input
stringlengths 2.65k
237k
| output
stringclasses 1
value |
---|---|
cardsLeftString = list(map(str, cardsLeft))
# print("Cards left as list of strings: ",cardsLeftString)
# return cardsLeftString
def show_cards_left_buttons(cardsLefts):
print(cardsLefts)
cardsLefts = update_left_cards_in_deck(cardsLefts)
print(cardsLefts)
for i in range(0,len(cardsLefts),1):
# print("Thats the input to add",select_counter(cardsLeft[i]))
buttonCal = tk.Button(MainWindow, text=(cardsLefts[i].rank.name +' ' + cardsLefts[i].suit.name), command=lambda i = i:add(select_counter(cardsLefts[i]))).grid(row=13, column=i)
return
def pick_random_card_from_left_cards(cardsLeft):
global RANDOMLYPICKEDCARDCOUNTER ##### this variable is for future use to calculate points with random cards
print(counterRandomCardsToPick.get())
for i in range(0,counterRandomCardsToPick.get(),1):
RANDOMLYPICKEDCARDCOUNTER = RANDOMLYPICKEDCARDCOUNTER + 1
Random_card_from_left_cards = cardsLeft[random.randint(0, (len(cardsLeft)-1))]
print(Random_card_from_left_cards)
# buttonCal = tk.Button(MainWindow, text=(Random_card_from_left_cards.rank.name +' ' + Random_card_from_left_cards.suit.name), command=lambda Random_card_from_left_cards = Random_card_from_left_cards:add(select_counter(Random_card_from_left_cards))).grid(row=15, column=RANDOMLYPICKEDCARDCOUNTER)
cardsLeft.remove(Random_card_from_left_cards)
pickedRandomCards.append(Random_card_from_left_cards)
show_card_as_button_with_add_counter_when_clicked(pickedRandomCards)
return
previously_clicked=None
def show_card_as_button_with_add_counter_when_clicked(cards):
for i, card in enumerate(cards, 2):
btn = tk.Button(MainWindow, text=(card.rank.name + ' ' + card.suit.name))
u=select_counter(card)
btn.config(command=lambda u=u:add(u))
btn.grid(row=15, column=i, sticky='w')
return
def show_card_as_button_with_add_counter_when_clicked_with_highlight(cards):
for i, card in enumerate(cards, 2):
btn = tk.Button(MainWindow, text=(card.rank.name + ' ' + card.suit.name))
u=select_counter(card)
btn.config(command=lambda u=u,arg=btn:add_with_button_highlight_after_click(u,arg))
btn.grid(row=15, column=i, sticky='w')
return
###################### POINTS MEASURE
def calculate_single_points(Hand):
singlesum = 0
for i in range(0,len(Hand),1):
singlesum = singlesum + Hand[i].rank.value + PreferencePoints[Hand[i].suit.value]
print("Single points: ",singlesum)
return singlesum
def calculate_pair_points(Hand):
pairsum = 0
if is_pair(Hand) == True:
pairsum = pairsum + PAIRPOINTS
print("Pair points: ",pairsum)
return pairsum
def calculate_double_pair_points(Hand):
doublepairsum = 0
if is_two_pair(Hand) == True:
doublepairsum = doublepairsum + DOUBLEPAIRPOINTS
print("Double pair points: ",doublepairsum)
return doublepairsum
def calculate_trio_points(Hand):
triosum = 0
if is_three_of_kind(Hand) == True:
triosum = triosum + TRIOPOINTS
print("Trio points: ",triosum)
return triosum
def calculate_straight_points(Hand):
straightsum = 0
if is_straight(Hand) == True:
straightsum = straightsum + STRAIGHTPOINTS
print("Straight points: ",straightsum)
return straightsum
def calculate_flush_points(Hand):
flushsum = 0
if is_flush(Hand) == True:
flushsum = flushsum + FLUSHPOINTS
print("Flush points: ",flushsum)
return flushsum
def calculate_full_house_points(Hand):
fullhousesum = 0
if is_full_house(Hand) == True:
fullhousesum = fullhousesum + FULLHOUSEPOINTS
print("Full house points: ",fullhousesum)
return fullhousesum
def calculate_four_points(Hand):
foursum = 0
if is_four_of_kind(Hand) == True:
foursum = foursum + FOURPOINTS
print("Four points: ",foursum)
return foursum
def calculate_straight_flush_points(Hand):
straightflushsum = 0
if is_straightflush(Hand) == True:
straightflushsum = straightflushsum + STRAIGHTFLUSHPOINTS
print("Straight flush points: ",straightflushsum)
return straightflushsum
def calculate_royal_flush_points(Hand):
royalflushsum = 0
if is_royalflush(Hand) == True:
royalflushsum = royalflushsum + ROYALFLUSHPOINTS
print("Royal flush points: ",royalflushsum)
return royalflushsum
##################### Scores, label results here to avoid partial
def show_single_points(singleSum,labelRow = 7, labelColumn = 2):
singleResultLabel = tk.Label(MainWindow)
singleResultLabel.grid(row=labelRow, column=labelColumn)
print("Single points: ",singleSum)
singleResultLabel.config(text="Result from single points %d" %singleSum)
return
def show_pair_points(pairSum, labelRow = 8, labelColumn = 2):
pairResultLabel = tk.Label(MainWindow)
pairResultLabel.grid(row=8, column=2)
print("Pair points: ",pairSum)
pairResultLabel.config(text="Result from pair points %d" %pairSum)
return
def show_double_pair_points(doublePairSum, labelRow = 9, labelColumn = 2):
doublepairResultLabel = tk.Label(MainWindow)
doublepairResultLabel.grid(row=9, column=2)
print("Doble pair points: ",doublePairSum)
doublepairResultLabel.config(text="Result from doublepair points %d" %doublePairSum)
return
def show_trio_points(trioSum, labelRow = 10, labelColumn = 2):
trioResultLabel = tk.Label(MainWindow)
trioResultLabel.grid(row=10, column=2)
print("Trio points: ",trioSum)
trioResultLabel.config(text="Result from trio points %d" %trioSum)
return
def call_result(label_result, n1, n2):
num1 = (n1.get())
num2 = (n2.get())
result = int(num1)+int(num2)
label_result.config(text="Result is %d" % result)
print(label_result,n1,n2)
return
def substract_one(label_result, n1):
num1 = (n1.get())
result = int(num1)-1
label_result.config(text="Result is %d" % result)
return
def check_value(label_result, stringVariable):
value = stringVariable.get()
value = int(value)
label_result.config(text="Value = %d" % value)
return
def add(intVariable):
intVariable.set(intVariable.get() + 1)
return
def add(intVariable,maximum=1):
if intVariable.get() < maximum:
intVariable.set(intVariable.get() + 1)
return
def add_with_button_highlight_after_click(intVariable,widget):
intVariable.set(intVariable.get() + 1)
global previously_clicked
if previously_clicked:
previously_clicked['bg'] = widget['bg']
previously_clicked['activebackground'] = widget['activebackground']
previously_clicked['relief'] = widget['relief']
widget['bg'] = 'green'
widget['activebackground'] = 'green'
widget['relief'] = 'sunken'
previously_clicked = widget
return
def sub(intVariable):
if intVariable.get() >0:
intVariable.set(intVariable.get() - 1)
################### FUZZY LOGIC BRO
def calculate_fuzzy_scope():
global PreferencePoints
fSuits = ctrl.Antecedent(np.arange(0, 4.5, 0.001), 'fSuits')
pointsMaximum=20
fPoints = ctrl.Consequent(np.arange(0, pointsMaximum, 0.1), 'fPoints')
#### fuzzy inputs
fPoints['poor'] = fuzz.gbellmf(fPoints.universe, 0.025, 0.95, pointsMaximum/5)
fPoints['mediocre'] = fuzz.gbellmf(fPoints.universe, 0.025, 0.95, pointsMaximum*2/5)
fPoints['average'] = fuzz.gbellmf(fPoints.universe, 0.025, 0.95, pointsMaximum*3/5)
fPoints['decent'] = fuzz.gbellmf(fPoints.universe, 0.025, 0.95, pointsMaximum*4/5)
#### fuzzy outputs
fSuits['hearts'] = fuzz.gbellmf(fSuits.universe, 0.025, 0.95, 1)
fSuits['tiles'] = fuzz.gbellmf(fSuits.universe, 0.025, 0.95, 2)
fSuits['clovers'] = fuzz.gbellmf(fSuits.universe, 0.025, 0.95, 3)
fSuits['pikes'] = fuzz.gbellmf(fSuits.universe, 0.025, 0.95, 4)
# fPoints.automf(5)
orderedFInput = list(fSuits.terms.values())
orderedFOutput = list(fPoints.terms.values())
rulez=[]
for i,name in enumerate(orderedFInput):
if any(i==x for x in prefered):
rulez.append(ctrl.Rule(orderedFInput[i], orderedFOutput[3]))
print(i)
else:
rulez.append(ctrl.Rule(orderedFInput[i], orderedFOutput[0]))
rule1 = ctrl.Rule(fSuits['hearts'], orderedFOutput[0])
rule2 = ctrl.Rule(fSuits['tiles'], orderedFOutput[0])
rule3 = ctrl.Rule(fSuits['clovers'], orderedFOutput[0])
rule4 = ctrl.Rule(fSuits['pikes'], orderedFOutput[0])
rules = [rule1, rule2, rule3, rule4]
suitsPreferencesRulebase = ctrl.ControlSystem(rulez)
suitsPreferences = ctrl.ControlSystemSimulation(suitsPreferencesRulebase)
for i in range(1,5):
suitsPreferences.input['fSuits'] = i
suitsPreferences.compute()
PreferencePoints[i] = int(suitsPreferences.output['fPoints'])
return
def save_preferences_and_close_window():
global prefered
preferedLocal = []
for i in range(0,4):
if box_preferences_counter[i].get() == 1:
preferedLocal = preferedLocal +[i]
prefered = list(set(preferedLocal))
window.destroy()
return
def save_preferences_and_close_window_and_calculate_fuzzy_scope():
global prefered
preferedLocal = []
for i in range(0,4):
if box_preferences_counter[i].get() == 1:
preferedLocal = preferedLocal +[i]
prefered = list(set(preferedLocal))
window.destroy()
calculate_fuzzy_scope()
return
def create_window():
global window
window = tk.Toplevel(MainWindow)
labelTitle = tk.Label(window, text="Preferences").grid(row=0, column=2)
labelNum9 = tk.Label(window, text="Hearts").grid(row=1, column=0)
labelNum10 = tk.Label(window, text="Tiles").grid(row=2, column=0)
labelNumJ = tk.Label(window, text="Clovers").grid(row=3, column=0)
labelNumQ = tk.Label(window, text="Pikes").grid(row=4, column=0)
for i,name in enumerate(box_preferences_counter):
maximum=1
entryNumH9 = tk.Entry(window, textvariable=name, width = 4).grid(row=i+1, column=2)
buttonCal = tk.Button(window, text="+", command=lambda name=name:add(name,maximum)).grid(row=i+1, column=3)
buttonCal = tk.Button(window, text="-", command=lambda name = name:sub(name)).grid(row=i+1, column=4)
buttonCal = tk.Button(window, text="save&quit", command=save_preferences_and_close_window_and_calculate_fuzzy_scope).grid(row=5, column=2)
return
## text signs near buttons HEARTS $$$$$$$$$$$$$$$$$$$
labelTitle = tk.Label(MainWindow, text="Hearts").grid(row=0, column=2)
labelNum9 = tk.Label(MainWindow, text="9").grid(row=1, column=0)
labelNum10 = tk.Label(MainWindow, text="10").grid(row=2, column=0)
labelNumJ = tk.Label(MainWindow, text="J").grid(row=3, column=0)
labelNumQ = tk.Label(MainWindow, text="Q").grid(row=4, column=0)
labelNumK = tk.Label(MainWindow, text="K").grid(row=5, column=0)
labelNumA = tk.Label(MainWindow, text="A").grid(row=6, column=0)
##############################
## text signs near buttons TILES ^^^^^^^^^^^^^^^^^^
labelTitle = tk.Label(MainWindow, text="TILES").grid(row=0, column=2 + SHIFTBETWEENCARDS)
labelNum9 = tk.Label(MainWindow, text="9").grid(row=1, column=0 + SHIFTBETWEENCARDS + SHIFTADJUSTMENT, padx = PADXBETWEENCARDNAMES)
labelNum10 = tk.Label(MainWindow, text="10").grid(row=2, column=0 + SHIFTBETWEENCARDS + SHIFTADJUSTMENT, padx = PADXBETWEENCARDNAMES )
labelNumJ = tk.Label(MainWindow, text="J").grid(row=3, column=0 + SHIFTBETWEENCARDS + SHIFTADJUSTMENT, padx = PADXBETWEENCARDNAMES)
labelNumQ = tk.Label(MainWindow, text="Q").grid(row=4, column=0 + SHIFTBETWEENCARDS + SHIFTADJUSTMENT, padx = PADXBETWEENCARDNAMES)
labelNumK = tk.Label(MainWindow, text="K").grid(row=5, column=0 + SHIFTBETWEENCARDS + SHIFTADJUSTMENT, padx = PADXBETWEENCARDNAMES)
labelNumA = tk.Label(MainWindow, text="A").grid(row=6, column=0 + SHIFTBETWEENCARDS + SHIFTADJUSTMENT, padx = PADXBETWEENCARDNAMES)
##############################
## text signs near buttons CLOVERS &&&&&&&&&&&&&&&&&&&
labelTitle = tk.Label(MainWindow, text="CLOVERS").grid(row=0, column=2 + SHIFTBETWEENCARDS * 2)
labelNum9 = tk.Label(MainWindow, text="9").grid(row=1, column=0 + SHIFTBETWEENCARDS * 2 + SHIFTADJUSTMENT, padx = PADXBETWEENCARDNAMES)
CnineEntry = tk.Entry(MainWindow)
labelNum10 = tk.Label(MainWindow, text="10").grid(row=2, column=0 + SHIFTBETWEENCARDS * 2 + SHIFTADJUSTMENT, padx = PADXBETWEENCARDNAMES)
CtenEntry = tk.Entry(MainWindow)
labelNumJ = tk.Label(MainWindow, text="J").grid(row=3, column=0 + SHIFTBETWEENCARDS * 2 + SHIFTADJUSTMENT, padx = PADXBETWEENCARDNAMES)
CjackEntry = tk.Entry(MainWindow)
labelNumQ = tk.Label(MainWindow, text="Q").grid(row=4, column=0 + SHIFTBETWEENCARDS * 2 + SHIFTADJUSTMENT, padx = PADXBETWEENCARDNAMES)
CquuenEntry = tk.Entry(MainWindow)
labelNumK = tk.Label(MainWindow, text="K").grid(row=5, column=0 + SHIFTBETWEENCARDS * 2 + SHIFTADJUSTMENT, padx = PADXBETWEENCARDNAMES)
CkingEntry = tk.Entry(MainWindow)
labelNumA = tk.Label(MainWindow, text="A").grid(row=6, column=0 + SHIFTBETWEENCARDS * 2 + SHIFTADJUSTMENT, padx = PADXBETWEENCARDNAMES)
CaceEntry = tk.Entry(MainWindow)
##############################
## text signs near buttons PIKES **********************
labelTitle = tk.Label(MainWindow, text="PIKES").grid(row=0, column=2 + SHIFTBETWEENCARDS * 3)
labelNum9 = tk.Label(MainWindow, text="9").grid(row=1, column=0 + SHIFTBETWEENCARDS * 3 + SHIFTADJUSTMENT, padx = PADXBETWEENCARDNAMES)
nineEntry = tk.Entry(MainWindow)
labelNum10 = tk.Label(MainWindow, text="10").grid(row=2, column=0 + SHIFTBETWEENCARDS * 3 + SHIFTADJUSTMENT, padx = PADXBETWEENCARDNAMES)
CtenEntry = tk.Entry(MainWindow)
labelNumJ = tk.Label(MainWindow, text="J").grid(row=3, column=0 + SHIFTBETWEENCARDS * 3 + SHIFTADJUSTMENT, padx = PADXBETWEENCARDNAMES)
CjackEntry = tk.Entry(MainWindow)
labelNumQ = tk.Label(MainWindow, text="Q").grid(row=4, column=0 + SHIFTBETWEENCARDS * 3 + SHIFTADJUSTMENT, padx = PADXBETWEENCARDNAMES)
CquuenEntry = tk.Entry(MainWindow)
labelNumK = tk.Label(MainWindow, text="K").grid(row=5, column=0 + SHIFTBETWEENCARDS * 3 + SHIFTADJUSTMENT, padx = PADXBETWEENCARDNAMES)
CkingEntry = tk.Entry(MainWindow)
labelNumA = tk.Label(MainWindow, text="A").grid(row=6, column=0 + SHIFTBETWEENCARDS * 3 + SHIFTADJUSTMENT, padx = PADXBETWEENCARDNAMES)
CaceEntry = tk.Entry(MainWindow)
##############################
#### text sign for RANDOM CARDS AMOUNT
labelTitle = tk.Label(MainWindow, text="Random Cards Amount").grid(row=0, column=2 + SHIFTBETWEENCARDS * 4)
labelTitle = tk.Label(MainWindow, text="User actions?").grid(row=0, column=2 + SHIFTBETWEENCARDS * 6)
labelTitle = tk.Label(MainWindow, text="Possible hands check").grid(row=0, column=2 + SHIFTBETWEENCARDS * 7)
labelTitle = tk.Label(MainWindow, text="++ as multitask").grid(row=0, column=2 + SHIFTBETWEENCARDS * 5)
labelTitle = tk.Label(MainWindow, text="Combo checking").grid(row=0, column=2 + SHIFTBETWEENCARDS * 8)
## space to set amount of HEARTS $$$$$$$$$$$$
entryNumH9 = tk.Entry(MainWindow, textvariable=counterH9, width = 4).grid(row=1, column=2)
entryNumH10 = tk.Entry(MainWindow, textvariable=counterH10, width = 4).grid(row=2, column=2)
entryNumHJ = tk.Entry(MainWindow, textvariable=counterHJ, width = 4).grid(row=3, column=2)
entryNumHQ = tk.Entry(MainWindow, textvariable=counterHQ, width = 4).grid(row=4, column=2)
entryNumHK = tk.Entry(MainWindow, textvariable=counterHK, width | |
self._test_everything(self.hitX.strict_can_append, default=True)
def test_strict_can_prepend(self):
self._test_everything(self.hitX.strict_can_prepend, default=True)
def test_hitX_0(self):
"""PartInXEnsemble treatment of zero-length trajectory"""
assert_equal(self.hitX(paths.Trajectory([])), False)
assert_equal(self.hitX.can_append(paths.Trajectory([])), True)
assert_equal(self.hitX.can_prepend(paths.Trajectory([])), True)
def test_hitX_str(self):
volstr = "{x|Id(x) in [0.1, 0.5]}"
assert_equal(self.hitX.__str__(),
"exists t such that x[t] in "+volstr)
class TestSequentialEnsemble(EnsembleTest):
def setup(self):
self.inX = AllInXEnsemble(vol1)
self.outX = AllOutXEnsemble(vol1)
self.hitX = PartInXEnsemble(vol1)
self.leaveX = PartOutXEnsemble(vol1)
self.inInterface = AllInXEnsemble(vol2)
self.leaveX0 = PartOutXEnsemble(vol2)
self.inX0 = AllInXEnsemble(vol2)
self.length1 = LengthEnsemble(1)
# pseudo_tis and pseudo_minus assume that the interface is equal to
# the state boundary
self.pseudo_tis = SequentialEnsemble( [
self.inX & self.length1,
self.outX,
self.inX & self.length1 ]
)
self.pseudo_minus = SequentialEnsemble( [
self.inX & self.length1,
self.outX,
self.inX,
self.outX,
self.inX & self.length1 ]
)
self.tis = SequentialEnsemble([
self.inX & self.length1,
self.outX & self.leaveX0,
self.inX & self.length1,
])
@raises(ValueError)
def test_maxminoverlap_size(self):
"""SequentialEnsemble errors if max/min overlap sizes different"""
SequentialEnsemble([self.inX, self.outX, self.inX], (0,0), (0,0,0))
@raises(ValueError)
def test_maxoverlap_ensemble_size(self):
"""SequentialEnsemble errors if overlap sizes don't match ensemble size"""
SequentialEnsemble([self.inX, self.outX, self.inX], (0,0,0), (0,0,0))
@raises(ValueError)
def test_minmax_order(self):
"""SequentialEnsemble errors if min_overlap > max_overlap"""
SequentialEnsemble([self.inX, self.outX, self.inX], (0,1), (0,0))
def test_allowed_initializations(self):
"""SequentialEnsemble initializes correctly with defaults"""
A = SequentialEnsemble([self.inX, self.outX, self.inX], (0,0), (0,0))
B = SequentialEnsemble([self.inX, self.outX, self.inX],0,0)
C = SequentialEnsemble([self.inX, self.outX, self.inX])
assert_equal(A.min_overlap,B.min_overlap)
assert_equal(A.min_overlap,C.min_overlap)
assert_equal(A.max_overlap,B.max_overlap)
assert_equal(A.max_overlap,C.max_overlap)
def test_overlap_max(self):
"""SequentialEnsemble allows overlaps up to overlap max, no more"""
raise SkipTest
def test_overlap_min(self):
"""SequentialEnsemble requires overlaps of at least overlap min"""
raise SkipTest
def test_overlap_max_inf(self):
"""SequentialEnsemble works if max overlap in infinite"""
raise SkipTest
def test_overlap_min_gap(self):
"""SequentialEnsemble works in mix overlap is negative (gap)"""
raise SkipTest
def test_overlap_max_gap(self):
"""SequentialEnsemble works if max overlap is negative (gap)"""
raise SkipTest
def test_seqens_order_combo(self):
# regression test for #229
import numpy as np
op = paths.FunctionCV(name="x", f=lambda snap : snap.xyz[0][0])
bigvol = paths.CVDefinedVolume(collectivevariable=op,
lambda_min=-100.0, lambda_max=100.0)
traj = paths.Trajectory([
paths.engines.toy.Snapshot(
coordinates=np.array([[-0.5, 0.0]]),
velocities=np.array([[0.0,0.0]])
)
])
vol_ens = paths.AllInXEnsemble(bigvol)
len_ens = paths.LengthEnsemble(5)
combo1 = vol_ens & len_ens
combo2 = len_ens & vol_ens
seq1 = SequentialEnsemble([combo1])
seq2 = SequentialEnsemble([combo2])
logger.debug("Checking combo1")
assert_equal(combo1.can_append(traj), True)
logger.debug("Checking combo2")
assert_equal(combo2.can_append(traj), True)
logger.debug("Checking seq1")
assert_equal(seq1.can_append(traj), True)
logger.debug("Checking seq2")
assert_equal(seq2.can_append(traj), True)
def test_can_append_tis(self):
"""SequentialEnsemble as TISEnsemble knows when it can append"""
results = { 'upper_in_out' : True,
'lower_in_out' : True,
'upper_in_out_in' : False,
'lower_in_out_in' : False,
'upper_in' : True,
'lower_in' : True,
'upper_in_in_in' : False,
'lower_in_in_in' : False,
'upper_out_out_out' : True,
'lower_out_out_out' : True,
'upper_out_in' : False,
'lower_out_in' : False,
'upper_out' : True,
'lower_out' : True,
'upper_in_out_in_in' : False,
'lower_in_out_in_in' : False,
'upper_in_out_in_out_in' : False,
'lower_in_out_in_out_in' : False
}
for test in list(results.keys()):
failmsg = "Failure in "+test+"("+str(ttraj[test])+"): "
self._single_test(self.pseudo_tis.can_append,
ttraj[test], results[test], failmsg)
def test_strict_can_append_tis(self):
results = {
'upper_in_out' : True,
'lower_in_out' : True,
'upper_in_out_in' : False,
'lower_in_out_in' : False,
'upper_in' : True,
'lower_in' : True,
'upper_in_in_in' : False,
'lower_in_in_in' : False,
'upper_out_out_out' : False,
'lower_out_out_out' : False,
'upper_out_in' : False,
'lower_out_in' : False,
'upper_out' : False,
'lower_out' : False,
'upper_in_out_in_in' : False,
'lower_in_out_in_in' : False,
'upper_in_out_in_out_in' : False,
'lower_in_out_in_out_in' : False
}
for test in list(results.keys()):
failmsg = "Failure in "+test+"("+str(ttraj[test])+"): "
self._single_test(self.pseudo_tis.strict_can_append,
ttraj[test], results[test], failmsg)
def test_can_append_pseudominus(self):
"""SequentialEnsemble as Pseudo-MinusEnsemble knows when it can append"""
results = {
'upper_in_out' : True,
'lower_in_out' : True,
'upper_in_out_in' : True,
'lower_in_out_in' : True,
'upper_in' : True,
'lower_in' : True,
'upper_in_in_in' : True,
'lower_in_in_in' : True,
'upper_out_out_out' : True,
'lower_out_out_out' : True,
'upper_out_in' : True,
'lower_out_in' : True,
'upper_out' : True,
'lower_out' : True,
'upper_in_out_in_in' : True,
'lower_in_out_in_in' : True,
'upper_in_out_in_out_in' : False,
'lower_in_out_in_out_in' : False,
'upper_in_out_in_in_out' : True,
'lower_in_out_in_in_out' : True,
'upper_out_in_out' : True,
'lower_out_in_out' : True,
'upper_out_in_in_out' : True,
'lower_out_in_in_out' : True,
'upper_out_in_out_in': False,
'lower_out_in_out_in': False,
'upper_out_in_in_out_in' : False,
'lower_out_in_in_out_in' : False,
'upper_in_cross_in' : True,
'lower_in_cross_in' : True,
'upper_in_cross_in_cross' : True,
'lower_in_cross_in_cross' : True,
'upper_cross_in_cross_in' : False,
'lower_cross_in_cross_in' : False,
'upper_in_cross_in_cross_in' : False,
'lower_in_cross_in_cross_in' : False
}
for test in list(results.keys()):
failmsg = "Failure in "+test+"("+str(ttraj[test])+"): "
self._single_test(self.pseudo_minus.can_append,
ttraj[test], results[test], failmsg)
def test_strict_can_append_pseudominus(self):
results = {
'upper_in_out' : True,
'lower_in_out' : True,
'upper_in_out_in' : True,
'lower_in_out_in' : True,
'upper_in' : True,
'lower_in' : True,
'upper_in_in_in' : False,
'lower_in_in_in' : False,
'upper_out_out_out' : False,
'lower_out_out_out' : False,
'upper_out_in' : False,
'lower_out_in' : False,
'upper_out' : False,
'lower_out' : False,
'upper_in_out_in_in' : True,
'lower_in_out_in_in' : True,
'upper_in_out_in_out_in' : False,
'lower_in_out_in_out_in' : False,
'upper_in_out_in_in_out' : True,
'lower_in_out_in_in_out' : True,
'upper_out_in_out' : False,
'lower_out_in_out' : False,
'upper_out_in_in_out' : False,
'lower_out_in_in_out' : False,
'upper_out_in_out_in': False,
'lower_out_in_out_in': False,
'upper_out_in_in_out_in' : False,
'lower_out_in_in_out_in' : False,
'upper_in_cross_in' : True,
'lower_in_cross_in' : True,
'upper_in_cross_in_cross' : True,
'lower_in_cross_in_cross' : True,
'upper_cross_in_cross_in' : False,
'lower_cross_in_cross_in' : False,
'upper_in_cross_in_cross_in' : False,
'lower_in_cross_in_cross_in' : False
}
for test in list(results.keys()):
failmsg = "Failure in "+test+"("+str(ttraj[test])+"): "
self._single_test(self.pseudo_minus.strict_can_append,
ttraj[test], results[test], failmsg)
def test_can_prepend_pseudo_tis(self):
"""SequentialEnsemble as Pseudo-TISEnsemble knows when it can prepend"""
results = {
'upper_in_out' : False,
'lower_in_out' : False,
'upper_in_out_in' : False,
'lower_in_out_in' : False,
'upper_in' : True,
'lower_in' : True,
'upper_in_in_in' : False,
'lower_in_in_in' : False,
'upper_out_out_out' : True,
'lower_out_out_out' : True,
'upper_out_in' : True,
'lower_out_in' : True,
'upper_out' : True,
'lower_out' : True,
'upper_in_out_in_in' : False,
'lower_in_out_in_in' : False,
'upper_in_out_in_out_in' : False,
'lower_in_out_in_out_in' : False
}
for test in list(results.keys()):
failmsg = "Failure in "+test+"("+str(ttraj[test])+"): "
self._single_test(self.pseudo_tis.can_prepend,
ttraj[test], results[test], failmsg)
def test_strict_can_prepend_pseudo_tis(self):
results = {
'upper_in_out' : False,
'lower_in_out' : False,
'upper_in_out_in' : False,
'lower_in_out_in' : False,
'upper_in' : True,
'lower_in' : True,
'upper_in_in_in' : False,
'lower_in_in_in' : False,
'upper_out_out_out' : False,
'lower_out_out_out' : False,
'upper_out_in' : True,
'lower_out_in' : True,
'upper_out' : False,
'lower_out' : False,
'upper_in_out_in_in' : False,
'lower_in_out_in_in' : False,
'upper_in_out_in_out_in' : False,
'lower_in_out_in_out_in' : False
}
for test in list(results.keys()):
failmsg = "Failure in "+test+"("+str(ttraj[test])+"): "
self._single_test(self.pseudo_tis.strict_can_prepend,
ttraj[test], results[test], failmsg)
def test_can_prepend_pseudo_minus(self):
results = {
'upper_in_out' : True,
'lower_in_out' : True,
'upper_in_out_in' : True,
'lower_in_out_in' : True,
'upper_in' : True,
'lower_in' : True,
'upper_in_in_in' : True,
'lower_in_in_in' : True,
'upper_out_out_out' : True,
'lower_out_out_out' : True,
'upper_out_in' : True,
'lower_out_in' : True,
'upper_out' : True,
'lower_out' : True,
'upper_in_out_in_in' : False,
'lower_in_out_in_in' : False,
'upper_in_out_in_out_in' : False,
'lower_in_out_in_out_in' : False,
'upper_in_out_in_in_out' : False,
'lower_in_out_in_in_out' : False,
'upper_out_in_out' : True,
'lower_out_in_out' : True,
'upper_out_in_in_out' : True,
'lower_out_in_in_out' : True,
'upper_out_in_out_in': True,
'lower_out_in_out_in': True,
'upper_out_in_in_out_in' : True,
'lower_out_in_in_out_in' : True
}
for test in list(results.keys()):
failmsg = "Failure in "+test+"("+str(ttraj[test])+"): "
self._single_test(self.pseudo_minus.can_prepend,
ttraj[test], results[test], failmsg)
def test_strict_can_prepend_pseudo_minus(self):
results = {
'upper_in_out' : False,
'lower_in_out' : False,
'upper_in_out_in' : True,
'lower_in_out_in' : True,
'upper_in' : True,
'lower_in' : True,
'upper_in_in_in' : False,
'lower_in_in_in' : False,
'upper_out_out_out' : False,
'lower_out_out_out' : False,
'upper_out_in' : True,
'lower_out_in' : True,
'upper_out' : False,
'lower_out' : False,
'upper_in_out_in_in' : False,
'lower_in_out_in_in' : False,
'upper_in_out_in_out_in' : False,
'lower_in_out_in_out_in' : False,
'upper_in_out_in_in_out' : False,
'lower_in_out_in_in_out' : False,
'upper_out_in_out' : False,
'lower_out_in_out' : False,
'upper_out_in_in_out' : False,
'lower_out_in_in_out' : False,
'upper_out_in_out_in': True,
'lower_out_in_out_in': True,
'upper_out_in_in_out_in' : True,
'lower_out_in_in_out_in' : True
}
for test in list(results.keys()):
failmsg = "Failure in "+test+"("+str(ttraj[test])+"): "
self._single_test(self.pseudo_minus.strict_can_prepend,
ttraj[test], results[test], failmsg)
def test_sequential_transition_frames(self):
"""SequentialEnsemble identifies transitions frames correctly"""
ensemble = SequentialEnsemble([self.inX, self.outX])
results = {'upper_in_in_in' : [3],
'upper_out_out_out' : [],
'upper_in_out_in' : [1,2],
'upper_in_out' : [1,2]
}
for test in list(results.keys()):
failmsg = "Failure in "+test+"("+str(ttraj[test])+"): "
self._single_test(ensemble.transition_frames,
ttraj[test], results[test], failmsg)
def test_sequential_simple_in_out_call(self):
"""Simplest sequential ensemble identifies correctly"""
ensemble = SequentialEnsemble([self.inX, self.outX])
results = {'upper_in_in_in' : False,
'upper_out_out_out' : False,
'upper_in_out_in' : False,
'upper_in_out' : True
}
for test in list(results.keys()):
failmsg = "Failure in "+test+"("+str(ttraj[test])+"): "
self._single_test(ensemble,
ttraj[test], results[test], failmsg)
def test_sequential_in_out(self):
"""SequentialEnsembles based on In/AllOutXEnsemble"""
# idea: for each ttraj, use the key name to define in/out behavior,
# dynamically construct a SequentialEnsemble
ens_dict = {'in' : self.inX, 'out' : self.outX }
for test in list(ttraj.keys()):
ens_list = in_out_parser(test)
ens = []
# how to pick ensembles is specific to this test
for ens_type in ens_list:
ens.append(ens_dict[ens_type])
ensemble = SequentialEnsemble(ens)
failmsg = "Failure in "+test+"("+str(ttraj[test])+"): "
self._single_test(ensemble, ttraj[test], True, failmsg)
def test_sequential_pseudo_tis(self):
"""SequentialEnsemble as Pseudo-TISEnsemble identifies paths"""
results = {}
for test | |
# Copyright 2019 The TensorFlow Probability Authors.
# Copyright 2019 OpenAI (http://openai.com).
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""The Pixel CNN++ distribution class."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import functools
import numpy as np
import tensorflow.compat.v2 as tf
from tensorflow_probability.python.bijectors import shift
from tensorflow_probability.python.distributions import categorical
from tensorflow_probability.python.distributions import distribution
from tensorflow_probability.python.distributions import independent
from tensorflow_probability.python.distributions import logistic
from tensorflow_probability.python.distributions import mixture_same_family
from tensorflow_probability.python.distributions import quantized_distribution
from tensorflow_probability.python.distributions import transformed_distribution
from tensorflow_probability.python.internal import prefer_static
from tensorflow_probability.python.internal import reparameterization
from tensorflow_probability.python.internal import tensorshape_util
from tensorflow_probability.python.layers import weight_norm
class PixelCNN(distribution.Distribution):
"""The Pixel CNN++ distribution.
Pixel CNN++ [(Salimans et al., 2017)][1] models a distribution over image
data, parameterized by a neural network. It builds on Pixel CNN and
Conditional Pixel CNN, as originally proposed by [(van den Oord et al.,
2016)][2, 3]. The model expresses the joint distribution over pixels as
the product of conditional distributions:
`p(x|h) = prod{ p(x[i] | x[0:i], h) : i=0, ..., d }`,
in which `p(x[i] | x[0:i], h) : i=0, ..., d` is the
probability of the `i`-th pixel conditional on the pixels that preceded it in
raster order (color channels in RGB order, then left to right, then top to
bottom). `h` is optional additional data on which to condition the image
distribution, such as class labels or VAE embeddings. The Pixel CNN++
network enforces the dependency structure among pixels by applying a mask to
the kernels of the convolutional layers that ensures that the values for each
pixel depend only on other pixels up and to the left (see
`tfd.PixelCnnNetwork`).
Pixel values are modeled with a mixture of quantized logistic distributions,
which can take on a set of distinct integer values (e.g. between 0 and 255
for an 8-bit image).
Color intensity `v` of each pixel is modeled as:
`v ~ sum{q[i] * quantized_logistic(loc[i], scale[i]) : i = 0, ..., k }`,
in which `k` is the number of mixture components and the `q[i]` are the
Categorical probabilities over the components.
#### Sampling
Pixels are sampled one at a time, in raster order. This enforces the
autoregressive dependency structure, in which the sample of pixel `i` is
conditioned on the samples of pixels `1, ..., i-1`. A single color image is
sampled as follows:
```python
samples = random_uniform([image_height, image_width, image_channels])
for i in image_height:
for j in image_width:
component_logits, locs, scales, coeffs = pixel_cnn_network(samples)
components = Categorical(component_logits).sample()
locs = gather(locs, components)
scales = gather(scales, components)
coef_count = 0
channel_samples = []
for k in image_channels:
loc = locs[k]
for m in range(k):
loc += channel_samples[m] * coeffs[coef_count]
coef_count += 1
channel_samp = Logistic(loc, scales[k]).sample()
channel_samples.append(channel_samp)
samples[i, j, :] = tf.stack(channel_samples, axis=-1)
samples = round(samples)
```
#### Examples
```python
# Build a small Pixel CNN++ model to train on MNIST.
import tensorflow as tf
import tensorflow_datasets as tfds
import tensorflow_probability as tfp
tfd = tfp.distributions
tfk = tf.keras
tfkl = tf.keras.layers
tf.enable_v2_behavior()
# Load MNIST from tensorflow_datasets
data = tfds.load('mnist')
train_data, test_data = data['train'], data['test']
def image_preprocess(x):
x['image'] = tf.cast(x['image'], tf.float32)
return (x['image'],) # (input, output) of the model
batch_size = 16
train_it = train_data.map(image_preprocess).batch(batch_size).shuffle(1000)
image_shape = (28, 28, 1)
# Define a Pixel CNN network
dist = tfd.PixelCNN(
image_shape=image_shape,
num_resnet=1,
num_hierarchies=2,
num_filters=32,
num_logistic_mix=5,
dropout_p=.3,
)
# Define the model input
image_input = tfkl.Input(shape=image_shape)
# Define the log likelihood for the loss fn
log_prob = dist.log_prob(image_input)
# Define the model
model = tfk.Model(inputs=image_input, outputs=log_prob)
model.add_loss(-tf.reduce_mean(log_prob))
# Compile and train the model
model.compile(
optimizer=tfk.optimizers.Adam(.001),
metrics=[])
model.fit(train_it, epochs=10, verbose=True)
# sample five images from the trained model
samples = dist.sample(5)
```
To train a class-conditional model:
```python
data = tfds.load('mnist')
train_data, test_data = data['train'], data['test']
def image_preprocess(x):
x['image'] = tf.cast(x['image'], tf.float32)
# return model (inputs, outputs): inputs are (image, label) and there are no
# outputs
return ((x['image'], x['label']),)
batch_size = 16
train_ds = train_data.map(image_preprocess).batch(batch_size).shuffle(1000)
optimizer = tfk.optimizers.Adam()
image_shape = (28, 28, 1)
label_shape = ()
dist = tfd.PixelCNN(
image_shape=image_shape,
conditional_shape=label_shape,
num_resnet=1,
num_hierarchies=2,
num_filters=32,
num_logistic_mix=5,
dropout_p=.3,
)
image_input = tfkl.Input(shape=image_shape)
label_input = tfkl.Input(shape=label_shape)
log_prob = dist.log_prob(image_input, conditional_input=label_input)
class_cond_model = tfk.Model(
inputs=[image_input, label_input], outputs=log_prob)
class_cond_model.add_loss(-tf.reduce_mean(log_prob))
class_cond_model.compile(
optimizer=tfk.optimizers.Adam(),
metrics=[])
class_cond_model.fit(train_ds, epochs=10)
# Take 10 samples of the digit '5'
samples = dist.sample(10, conditional_input=5.)
# Take 4 samples each of the digits '1', '2', '3'.
# Note that when a batch of conditional input is passed, the sample shape
# (the first argument of `dist.sample`) must have its last dimension(s) equal
# the batch shape of the conditional input (here, (3,)).
samples = dist.sample((4, 3), conditional_input=[1., 2., 3.])
```
Note: PixelCNN may also be trained using tfp.layers.DistributionLambda;
however, as of this writing, that method is much slower and has the
disadvantage of calling `sample()` upon construction, which causes the
`PixelCnnNetwork` to be initialized with random data (if data-dependent
initialization is used).
#### References
[1]: <NAME>, <NAME>, <NAME>, and <NAME>.
PixelCNN++: Improving the PixelCNN with Discretized Logistic Mixture
Likelihood and Other Modifications. In _International Conference on
Learning Representations_, 2017.
https://pdfs.semanticscholar.org/9e90/6792f67cbdda7b7777b69284a81044857656.pdf
Additional details at https://github.com/openai/pixel-cnn
[2]: <NAME>, <NAME>, <NAME>, <NAME>,
<NAME>, and <NAME>. Conditional Image Generation with
PixelCNN Decoders. In _Neural Information Processing Systems_, 2016.
https://arxiv.org/abs/1606.05328
[3]: <NAME>, <NAME>, and <NAME>. Pixel
Recurrent Neural Networks. In _International Conference on Machine
Learning_, 2016. https://arxiv.org/pdf/1601.06759.pdf
"""
def __init__(self,
image_shape,
conditional_shape=None,
num_resnet=5,
num_hierarchies=3,
num_filters=160,
num_logistic_mix=10,
receptive_field_dims=(3, 3),
dropout_p=0.5,
resnet_activation='concat_elu',
use_weight_norm=True,
use_data_init=True,
high=255,
low=0,
dtype=tf.float32,
name='PixelCNN'):
"""Construct Pixel CNN++ distribution.
Args:
image_shape: 3D `TensorShape` or tuple for the `[height, width, channels]`
dimensions of the image.
conditional_shape: `TensorShape` or tuple for the shape of the
conditional input, or `None` if there is no conditional input.
num_resnet: `int`, the number of layers (shown in Figure 2 of [2]) within
each highest-level block of Figure 2 of [1].
num_hierarchies: `int`, the number of hightest-level blocks (separated by
expansions/contractions of dimensions in Figure 2 of [1].)
num_filters: `int`, the number of convolutional filters.
num_logistic_mix: `int`, number of components in the logistic mixture
distribution.
receptive_field_dims: `tuple`, height and width in pixels of the receptive
field of the convolutional layers above and to the left of a given
pixel. The width (second element of the tuple) should be odd. Figure 1
(middle) of [2] shows a receptive field of (3, 5) (the row containing
the current pixel is included in the height). The default of (3, 3) was
used to produce the results in [1].
dropout_p: `float`, the dropout probability. Should be between 0 and 1.
resnet_activation: `string`, the type of activation to use in the resnet
blocks. May be 'concat_elu', 'elu', or 'relu'.
use_weight_norm: `bool`, if `True` then use weight normalization (works
only in Eager mode).
use_data_init: `bool`, if `True` then use data-dependent initialization
(has no effect if `use_weight_norm` is `False`).
high: `int`, the maximum value of the input data (255 for an 8-bit image).
low: `int`, the minimum value of the input data.
dtype: Data type of the `Distribution`.
name: `string`, the name of the `Distribution`.
"""
parameters = dict(locals())
with tf.name_scope(name) as name:
super(PixelCNN, self).__init__(
dtype=dtype,
reparameterization_type=reparameterization.NOT_REPARAMETERIZED,
validate_args=False,
allow_nan_stats=True,
parameters=parameters,
name=name)
if not tensorshape_util.is_fully_defined(image_shape):
raise ValueError('`image_shape` must be fully defined.')
if (conditional_shape is not None and
not tensorshape_util.is_fully_defined(conditional_shape)):
raise ValueError('`conditional_shape` must be fully defined`')
if tensorshape_util.rank(image_shape) != 3:
raise ValueError('`image_shape` must have length 3, representing '
'[height, width, channels] dimensions.')
self._high = tf.cast(high, self.dtype)
self._low = tf.cast(low, self.dtype)
self._num_logistic_mix = num_logistic_mix
self.network = _PixelCNNNetwork(
dropout_p=dropout_p,
num_resnet=num_resnet,
num_hierarchies=num_hierarchies,
num_filters=num_filters,
num_logistic_mix=num_logistic_mix,
receptive_field_dims=receptive_field_dims,
resnet_activation=resnet_activation,
use_weight_norm=use_weight_norm,
use_data_init=use_data_init,
dtype=dtype)
image_shape = tensorshape_util.constant_value_as_shape(image_shape)
conditional_shape = (None if conditional_shape is None
else tensorshape_util.constant_value_as_shape(
conditional_shape))
image_input_shape = tensorshape_util.concatenate([None], image_shape)
| |
<reponame>YuanYunshuang/OpenCOOD<filename>opencood/utils/box_utils.py
# -*- coding: utf-8 -*-
# Author: <NAME> <<EMAIL>>, <NAME> <<EMAIL>>,
# License: TDG-Attribution-NonCommercial-NoDistrib
"""
Bounding box related utility functions
"""
import sys
import numpy as np
import torch
import torch.nn.functional as F
import opencood.utils.common_utils as common_utils
from opencood.utils.transformation_utils import x1_to_x2
def corner_to_center(corner3d, order='lwh'):
"""
Convert 8 corners to x, y, z, dx, dy, dz, yaw.
Parameters
----------
corner3d : np.ndarray
(N, 8, 3)
order : str
'lwh' or 'hwl'
Returns
-------
box3d : np.ndarray
(N, 7)
"""
assert corner3d.ndim == 3
batch_size = corner3d.shape[0]
xyz = np.mean(corner3d[:, [0, 3, 5, 6], :], axis=1)
h = abs(np.mean(corner3d[:, 4:, 2] - corner3d[:, :4, 2], axis=1,
keepdims=True))
l = (np.sqrt(np.sum((corner3d[:, 0, [0, 1]] - corner3d[:, 3, [0, 1]]) ** 2,
axis=1, keepdims=True)) +
np.sqrt(np.sum((corner3d[:, 2, [0, 1]] - corner3d[:, 1, [0, 1]]) ** 2,
axis=1, keepdims=True)) +
np.sqrt(np.sum((corner3d[:, 4, [0, 1]] - corner3d[:, 7, [0, 1]]) ** 2,
axis=1, keepdims=True)) +
np.sqrt(np.sum((corner3d[:, 5, [0, 1]] - corner3d[:, 6, [0, 1]]) ** 2,
axis=1, keepdims=True))) / 4
w = (np.sqrt(
np.sum((corner3d[:, 0, [0, 1]] - corner3d[:, 1, [0, 1]]) ** 2, axis=1,
keepdims=True)) +
np.sqrt(np.sum((corner3d[:, 2, [0, 1]] - corner3d[:, 3, [0, 1]]) ** 2,
axis=1, keepdims=True)) +
np.sqrt(np.sum((corner3d[:, 4, [0, 1]] - corner3d[:, 5, [0, 1]]) ** 2,
axis=1, keepdims=True)) +
np.sqrt(np.sum((corner3d[:, 6, [0, 1]] - corner3d[:, 7, [0, 1]]) ** 2,
axis=1, keepdims=True))) / 4
theta = (np.arctan2(corner3d[:, 1, 1] - corner3d[:, 2, 1],
corner3d[:, 1, 0] - corner3d[:, 2, 0]) +
np.arctan2(corner3d[:, 0, 1] - corner3d[:, 3, 1],
corner3d[:, 0, 0] - corner3d[:, 3, 0]) +
np.arctan2(corner3d[:, 5, 1] - corner3d[:, 6, 1],
corner3d[:, 5, 0] - corner3d[:, 6, 0]) +
np.arctan2(corner3d[:, 4, 1] - corner3d[:, 7, 1],
corner3d[:, 4, 0] - corner3d[:, 7, 0]))[:,
np.newaxis] / 4
if order == 'lwh':
return np.concatenate([xyz, l, w, h, theta], axis=1).reshape(
batch_size, 7)
elif order == 'hwl':
return np.concatenate([xyz, h, w, l, theta], axis=1).reshape(
batch_size, 7)
else:
sys.exit('Unknown order')
def boxes_to_corners2d(boxes3d, order):
"""
0 -------- 1
| |
| |
| |
3 -------- 2
Parameters
__________
boxes3d: np.ndarray or torch.Tensor
(N, 7) [x, y, z, dx, dy, dz, heading], (x, y, z) is the box center.
order : str
'lwh' or 'hwl'
Returns:
corners2d: np.ndarray or torch.Tensor
(N, 4, 3), the 4 corners of the bounding box.
"""
corners3d = boxes_to_corners_3d(boxes3d, order)
corners2d = corners3d[:, :4, :]
return corners2d
def boxes2d_to_corners2d(boxes2d, order="lwh"):
"""
0 -------- 1
| |
| |
| |
3 -------- 2
Parameters
__________
boxes2d: np.ndarray or torch.Tensor
(..., 5) [x, y, dx, dy, heading], (x, y) is the box center.
order : str
'lwh' or 'hwl'
Returns:
corners2d: np.ndarray or torch.Tensor
(..., 4, 2), the 4 corners of the bounding box.
"""
assert order == "lwh", \
"boxes2d_to_corners_2d only supports lwh order for now."
boxes2d, is_numpy = common_utils.check_numpy_to_torch(boxes2d)
template = boxes2d.new_tensor((
[1, -1], [1, 1], [-1, 1], [-1, -1]
)) / 2
input_shape = boxes2d.shape
boxes2d = boxes2d.view(-1, 5)
corners2d = boxes2d[:, None, 2:4].repeat(1, 4, 1) * template[None, :, :]
corners2d = common_utils.rotate_points_along_z_2d(corners2d.view(-1, 2),
boxes2d[:,
4].repeat_interleave(
4)).view(-1, 4,
2)
corners2d += boxes2d[:, None, 0:2]
corners2d = corners2d.view(*(input_shape[:-1]), 4, 2)
return corners2d
def boxes_to_corners_3d(boxes3d, order):
"""
4 -------- 5
/| /|
7 -------- 6 .
| | | |
. 0 -------- 1
|/ |/
3 -------- 2
Parameters
__________
boxes3d: np.ndarray or torch.Tensor
(N, 7) [x, y, z, dx, dy, dz, heading], (x, y, z) is the box center.
order : str
'lwh' or 'hwl'
Returns:
corners3d: np.ndarray or torch.Tensor
(N, 8, 3), the 8 corners of the bounding box.
"""
# ^ z
# |
# |
# | . x
# |/
# +-------> y
boxes3d, is_numpy = common_utils.check_numpy_to_torch(boxes3d)
boxes3d_ = boxes3d
if order == 'hwl':
boxes3d_ = boxes3d[:, [0, 1, 2, 5, 4, 3, 6]]
template = boxes3d_.new_tensor((
[1, -1, -1], [1, 1, -1], [-1, 1, -1], [-1, -1, -1],
[1, -1, 1], [1, 1, 1], [-1, 1, 1], [-1, -1, 1],
)) / 2
corners3d = boxes3d_[:, None, 3:6].repeat(1, 8, 1) * template[None, :, :]
corners3d = common_utils.rotate_points_along_z(corners3d.view(-1, 8, 3),
boxes3d_[:, 6]).view(-1, 8,
3)
corners3d += boxes3d_[:, None, 0:3]
return corners3d.numpy() if is_numpy else corners3d
def box3d_to_2d(box3d):
"""
Convert 3D bounding box to 2D.
Parameters
----------
box3d : np.ndarray
(n, 8, 3)
Returns
-------
box2d : np.ndarray
(n, 4, 2), project 3d to 2d.
"""
box2d = box3d[:, :4, :2]
return box2d
def corner2d_to_standup_box(box2d):
"""
Find the minmaxx, minmaxy for each 2d box. (N, 4, 2) -> (N, 4)
x1, y1, x2, y2
Parameters
----------
box2d : np.ndarray
(n, 4, 2), four corners of the 2d bounding box.
Returns
-------
standup_box2d : np.ndarray
(n, 4)
"""
N = box2d.shape[0]
standup_boxes2d = np.zeros((N, 4))
standup_boxes2d[:, 0] = np.min(box2d[:, :, 0], axis=1)
standup_boxes2d[:, 1] = np.min(box2d[:, :, 1], axis=1)
standup_boxes2d[:, 2] = np.max(box2d[:, :, 0], axis=1)
standup_boxes2d[:, 3] = np.max(box2d[:, :, 1], axis=1)
return standup_boxes2d
def corner_to_standup_box_torch(box_corner):
"""
Find the minmax x and y for each bounding box.
Parameters
----------
box_corner : torch.Tensor
Shape: (N, 8, 3) or (N, 4)
Returns
-------
standup_box2d : torch.Tensor
(n, 4)
"""
N = box_corner.shape[0]
standup_boxes2d = torch.zeros((N, 4))
standup_boxes2d = standup_boxes2d.to(box_corner.device)
standup_boxes2d[:, 0] = torch.min(box_corner[:, :, 0], dim=1).values
standup_boxes2d[:, 1] = torch.min(box_corner[:, :, 1], dim=1).values
standup_boxes2d[:, 2] = torch.max(box_corner[:, :, 0], dim=1).values
standup_boxes2d[:, 3] = torch.max(box_corner[:, :, 1], dim=1).values
return standup_boxes2d
def project_box3d(box3d, transformation_matrix):
"""
Project the 3d bounding box to another coordinate system based on the
transfomration matrix.
Parameters
----------
box3d : torch.Tensor or np.ndarray
3D bounding box, (N, 8, 3)
transformation_matrix : torch.Tensor or np.ndarray
Transformation matrix, (4, 4)
Returns
-------
projected_box3d : torch.Tensor
The projected bounding box, (N, 8, 3)
"""
assert transformation_matrix.shape == (4, 4)
box3d, is_numpy = \
common_utils.check_numpy_to_torch(box3d)
transformation_matrix, _ = \
common_utils.check_numpy_to_torch(transformation_matrix)
# (N, 3, 8)
box3d_corner = box3d.transpose(1, 2)
# (N, 1, 8)
torch_ones = torch.ones((box3d_corner.shape[0], 1, 8))
torch_ones = torch_ones.to(box3d_corner.device)
# (N, 4, 8)
box3d_corner = torch.cat((box3d_corner, torch_ones),
dim=1)
# (N, 4, 8)
projected_box3d = torch.matmul(transformation_matrix,
box3d_corner)
# (N, 8, 3)
projected_box3d = projected_box3d[:, :3, :].transpose(1, 2)
return projected_box3d if not is_numpy else projected_box3d.numpy()
def project_points_by_matrix_torch(points, transformation_matrix):
"""
Project the points to another coordinate system based on the
transfomration matrix.
Parameters
----------
points : torch.Tensor
3D points, (N, 3)
transformation_matrix : torch.Tensor
Transformation matrix, (4, 4)
Returns
-------
projected_points : torch.Tensor
The projected points, (N, 3)
"""
# convert to homogeneous coordinates via padding 1 at the last dimension.
# (N, 4)
points_homogeneous = F.pad(points, (0, 1), mode="constant", value=1)
# (N, 4)
projected_points = torch.einsum("ik, jk->ij", points_homogeneous,
transformation_matrix)
return projected_points[:, :3]
def get_mask_for_boxes_within_range_torch(boxes):
"""
Generate mask to remove the bounding boxes
outside the range.
Parameters
----------
boxes : torch.Tensor
Groundtruth bbx, shape: N,8,3 or N,4,2
Returns
-------
mask: torch.Tensor
The mask for bounding box -- True means the
bbx is within the range and False means the
bbx is outside the range.
"""
from opencood.data_utils.datasets import GT_RANGE
# mask out the gt bounding box out fixed range (-140, -40, -3, 140, 40 1)
device = boxes.device
boundary_lower_range = \
torch.Tensor(GT_RANGE[:2]).reshape(1, 1, -1).to(device)
boundary_higher_range = \
torch.Tensor(GT_RANGE[3:5]).reshape(1, 1, -1).to(device)
mask = torch.all(
torch.all(boxes[:, :, :2] >= boundary_lower_range,
dim=-1) & \
torch.all(boxes[:, :, :2] <= boundary_higher_range,
dim=-1), dim=-1)
return mask
def mask_boxes_outside_range_numpy(boxes, limit_range, order,
min_num_corners=8, return_mask=False):
"""
Parameters
----------
boxes: np.ndarray
(N, 7) [x, y, z, dx, dy, dz, heading], (x, y, z) is the box center
limit_range: list
[minx, miny, minz, maxx, maxy, maxz]
min_num_corners: int
The required minimum number of corners to be considered as in range.
order : str
'lwh' or 'hwl'
return_mask : bool
Whether return the mask.
Returns
-------
boxes: np.ndarray
The filtered boxes.
"""
assert boxes.shape[1] == 8 or boxes.shape[1] == 7
new_boxes = boxes.copy()
if boxes.shape[1] == 7:
new_boxes = boxes_to_corners_3d(new_boxes, order)
mask = ((new_boxes >= limit_range[0:3]) &
(new_boxes <= limit_range[3:6])).all(axis=2)
mask = mask.sum(axis=1) >= min_num_corners # (N)
if return_mask:
return boxes[mask], mask
return boxes[mask]
def create_bbx(extent):
"""
Create bounding box with 8 corners under obstacle vehicle | |
<gh_stars>0
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright 2008-2011 Zuza Software Foundation
#
# This file is part of translate.
#
# translate is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# translate is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, see <http://www.gnu.org/licenses/>.
#
"""
Interface to the Xapian indexing engine for the Translate Toolkit
Xapian v1.0 or higher is supported.
If you are interested in writing an interface for Xapian 0.x, then
you should checkout the following::
svn export -r 7235 https://translate.svn.sourceforge.net/svnroot/translate/src/branches/translate-search-indexer-generic-merging/translate/search/indexer/
It is not completely working, but it should give you a good start.
"""
# xapian module versions before 1.0.13 hangs apache under mod_python
import sys
import re
# detect if running under apache
if 'apache' in sys.modules or '_apache' in sys.modules or 'mod_wsgi' in sys.modules:
def _str2version(version):
return [int(i) for i in version.split('.')]
import subprocess
# even checking xapian version leads to deadlock under apache, must figure version from command line
try:
command = subprocess.Popen(['xapian-check', '--version'], stdout=subprocess.PIPE)
stdout, stderr = command.communicate()
if _str2version(re.match('.*([0-9]+\.[0-9]+\.[0-9]+).*', stdout).groups()[0]) < [1, 0, 13]:
raise ImportError("Running under apache, can't load xapain")
except:
#FIXME: report is xapian-check command is missing?
raise ImportError("Running under apache, can't load xapian")
import CommonIndexer
import xapian
import os
import time
import logging
def is_available():
return xapian.major_version() > 0
# in xapian there is a length restriction for term strings
# see http://osdir.com/ml/search.xapian.general/2006-11/msg00210.html
# a maximum length of around 240 is described there - but we need less anyway
_MAX_TERM_LENGTH = 128
class XapianDatabase(CommonIndexer.CommonDatabase):
"""Interface to the `Xapian indexer <http://xapian.org>`_."""
QUERY_TYPE = xapian.Query
INDEX_DIRECTORY_NAME = "xapian"
def __init__(self, basedir, analyzer=None, create_allowed=True):
"""Initialize or open a Xapian database.
:raise ValueError: the given location exists, but the database type
is incompatible (e.g. created by a different indexing engine)
:raise OSError: the database failed to initialize
:param basedir: the parent directory of the database
:type basedir: str
:param analyzer: Bitwise combination of possible analyzer flags
to be used as the default analyzer for this
database. Leave it empty to use the system default
analyzer (self.ANALYZER_DEFAULT).
See self.ANALYZER_TOKENIZE, self.ANALYZER_PARTIAL, ...
:type analyzer: int
:param create_allowed: create the database, if necessary; default: True
:type create_allowed: bool
"""
# call the __init__ function of our parent
super(XapianDatabase, self).__init__(basedir, analyzer=analyzer,
create_allowed=create_allowed)
self.reader = None
self.writer = None
if os.path.exists(self.location):
# try to open an existing database
try:
self.reader = xapian.Database(self.location)
except xapian.DatabaseOpeningError, err_msg:
raise ValueError("Indexer: failed to open xapian database " \
+ "(%s) - maybe it is not a xapian database: %s" \
% (self.location, str(err_msg)))
else:
# create a new database
if not create_allowed:
raise OSError("Indexer: skipping database creation")
try:
# create the parent directory if it does not exist
parent_path = os.path.dirname(self.location)
if not os.path.isdir(parent_path):
# recursively create all directories up to parent_path
os.makedirs(parent_path)
except IOError, err_msg:
raise OSError("Indexer: failed to create the parent " \
+ "directory (%s) of the indexing database: %s" \
% (parent_path, str(err_msg)))
try:
self.writer = xapian.WritableDatabase(self.location,
xapian.DB_CREATE_OR_OPEN)
self.flush()
except xapian.DatabaseOpeningError, err_msg:
raise OSError("Indexer: failed to open or create a xapian " \
+ "database (%s): %s" % (self.location, str(err_msg)))
def __del__(self):
self.reader = None
self._writer_close()
def flush(self, optimize=False):
"""force to write the current changes to disk immediately
:param optimize: ignored for xapian
:type optimize: bool
"""
# write changes to disk (only if database is read-write)
if self._writer_is_open():
self._writer_close()
self._index_refresh()
def make_query(self, *args, **kwargs):
try:
return super(XapianDatabase, self).make_query(*args, **kwargs)
except xapian.DatabaseModifiedError:
self._index_refresh()
return super(XapianDatabase, self).make_query(*args, **kwargs)
def _create_query_for_query(self, query):
"""generate a query based on an existing query object
basically this function should just create a copy of the original
:param query: the original query object
:type query: xapian.Query
:return: the resulting query object
:rtype: xapian.Query
"""
# create a copy of the original query
return xapian.Query(query)
def _create_query_for_string(self, text, require_all=True,
analyzer=None):
"""generate a query for a plain term of a string query
basically this function parses the string and returns the resulting
query
:param text: the query string
:type text: str
:param require_all: boolean operator
(True -> AND (default) / False -> OR)
:type require_all: bool
:param analyzer: Define query options (partial matching, exact matching,
tokenizing, ...) as bitwise combinations of
*CommonIndexer.ANALYZER_???*.
This can override previously defined field
analyzer settings.
If analyzer is None (default), then the configured
analyzer for the field is used.
:type analyzer: int
:return: resulting query object
:rtype: xapian.Query
"""
qp = xapian.QueryParser()
qp.set_database(self.reader)
if require_all:
qp.set_default_op(xapian.Query.OP_AND)
else:
qp.set_default_op(xapian.Query.OP_OR)
if analyzer is None:
analyzer = self.analyzer
if analyzer & self.ANALYZER_PARTIAL > 0:
match_flags = xapian.QueryParser.FLAG_PARTIAL
return qp.parse_query(text, match_flags)
elif analyzer == self.ANALYZER_EXACT:
# exact matching -
return xapian.Query(text)
else:
# everything else (not partial and not exact)
match_flags = 0
return qp.parse_query(text, match_flags)
def _create_query_for_field(self, field, value, analyzer=None):
"""generate a field query
this functions creates a field->value query
:param field: the fieldname to be used
:type field: str
:param value: the wanted value of the field
:type value: str
:param analyzer: Define query options (partial matching, exact
matching, tokenizing, ...) as bitwise combinations of
*CommonIndexer.ANALYZER_???*.
This can override previously defined field
analyzer settings.
If analyzer is None (default), then the configured
analyzer for the field is used.
:type analyzer: int
:return: the resulting query object
:rtype: xapian.Query
"""
if analyzer is None:
analyzer = self.analyzer
if analyzer == self.ANALYZER_EXACT:
# exact matching -> keep special characters
return xapian.Query("%s%s" % (field.upper(), value))
# other queries need a parser object
qp = xapian.QueryParser()
qp.set_database(self.reader)
if (analyzer & self.ANALYZER_PARTIAL > 0):
# partial matching
match_flags = xapian.QueryParser.FLAG_PARTIAL
return qp.parse_query(value, match_flags, field.upper())
else:
# everything else (not partial and not exact)
match_flags = 0
return qp.parse_query(value, match_flags, field.upper())
def _create_query_combined(self, queries, require_all=True):
"""generate a combined query
:param queries: list of the original queries
:type queries: list of xapian.Query
:param require_all: boolean operator
(True -> AND (default) / False -> OR)
:type require_all: bool
:return: the resulting combined query object
:rtype: xapian.Query
"""
if require_all:
query_op = xapian.Query.OP_AND
else:
query_op = xapian.Query.OP_OR
return xapian.Query(query_op, queries)
def _create_empty_document(self):
"""create an empty document to be filled and added to the index later
:return: the new document object
:rtype: xapian.Document
"""
return xapian.Document()
def _add_plain_term(self, document, term, tokenize=True):
"""add a term to a document
:param document: the document to be changed
:type document: xapian.Document
:param term: a single term to be added
:type term: str
:param tokenize: should the term be tokenized automatically
:type tokenize: bool
"""
if tokenize:
term_gen = xapian.TermGenerator()
term_gen.set_document(document)
term_gen.index_text(term)
else:
document.add_term(_truncate_term_length(term))
def _add_field_term(self, document, field, term, tokenize=True):
"""add a field term to a document
:param document: the document to be changed
:type document: xapian.Document
:param field: name of the field
:type field: str
:param term: term to be associated to the field
:type term: str
:param tokenize: should the term be tokenized automatically
:type tokenize: bool
"""
if tokenize:
term_gen = xapian.TermGenerator()
term_gen.set_document(document)
term_gen.index_text(term, 1, field.upper())
else:
document.add_term(_truncate_term_length("%s%s" % \
(field.upper(), term)))
def _add_document_to_index(self, document):
"""add a prepared document to the index database
:param document: the document to be added
:type document: xapian.Document
"""
# open the database for writing
self._writer_open()
self.writer.add_document(document)
def begin_transaction(self):
"""Begin a transaction.
Xapian supports transactions to group multiple database modifications.
This avoids intermediate flushing and therefore increases performance.
"""
self._writer_open()
self.writer.begin_transaction()
def cancel_transaction(self):
"""cancel an ongoing transaction
no changes since the last execution of 'begin_transcation' are written
"""
self.writer.cancel_transaction()
self._writer_close()
def commit_transaction(self):
"""Submit the changes of an ongoing transaction.
All changes since the last execution of 'begin_transaction'
are written.
"""
self.writer.commit_transaction()
self._writer_close()
def get_query_result(self, query):
"""Return an object containing the results of a query.
:param query: a pre-compiled xapian query
:type query: xapian.Query
:return: | |
continue
start = Util.current_time_in_millis ()
other_graph = Util.read_object (path_2)
load_time = Util.current_time_in_millis () - start
start = Util.current_time_in_millis ()
other_nodes = other_graph.get('nodes', [])
other_map = { n['id'] : n for n in other_nodes }
other_keys = set(other_map.keys())
intersection = [ v for v in graph_keys if v in other_keys ]
difference = list(set(other_keys) - set(graph_keys))
scope_time = Util.current_time_in_millis () - start
start = Util.current_time_in_millis ()
for i in intersection:
self.merge_nodes (graph_map[i], other_map[i])
other_graph['nodes'] = [ other_map[i] for i in difference ]
merge_time = Util.current_time_in_millis () - start
start = Util.current_time_in_millis ()
Util.write_object (other_graph, path_2.replace ('kgx', 'merge'))
write_time = Util.current_time_in_millis () - start
log.debug ("merged {:>45} load:{:>5} scope:{:>7} merge:{:>3}".format(
Util.trunc(path_2, 45), load_time, scope_time, merge_time))
total_merge_time += load_time + scope_time + merge_time + write_time
start = Util.current_time_in_millis ()
Util.write_object (graph, new_path)
rewrite_time = Util.current_time_in_millis () - start
log.info (f"{path} rewrite: {rewrite_time}. total merge time: {total_merge_time}")
def format_keys (self, keys, schema_type : SchemaType):
""" Format schema keys. Make source and destination first in edges. Make
id first in nodes. Remove keys for fields we can't yet represent.
:param keys: List of keys.
:param schema_type: Type of schema to conform to.
"""
""" Sort keys. """
k_list = sorted(keys)
if schema_type == SchemaType.PREDICATE:
""" Rename subject and object to src and dest """
k_list.remove ('subject')
k_list.remove ('object')
k_list.insert (0, 'src')
k_list.insert (1, 'dest')
elif schema_type == SchemaType.CATEGORY:
""" Make id the first field. Remove smiles. It causes ast parse errors.
TODO: update bulk loader to ignore AST on selected fields.
"""
k_list.remove ('id')
if 'simple_smiles' in k_list:
k_list.remove ('simple_smiles')
k_list.insert (0, 'id')
return k_list
def load (self):
""" Use KGX to load a data set into Redisgraph """
input_format = "json"
uri = f"redis://{config['redisgraph']['host']}:{config['redisgraph']['ports']['http']}/"
username = config['redisgraph']['username']
password = config['redisgraph']['password']
log.info (f"connecting to redisgraph: {uri}")
for subgraph in glob.glob (f"{kgx_repo}/**.json"):
redisgraph_upload(inputs=[ subgraph ],
input_format=input_format,
input_compression=None,
uri=uri,
username=username,
password=password,
node_filters=[],
edge_filters=[])
class BiolinkModel:
""" Programmatic model of Biolink. """
def to_camel_case(self, snake_str):
""" Convert a snake case string to camel case. """
components = snake_str.split('_')
return ''.join(x.title() for x in components)
def get_class(self, name):
""" Get a Python class from a string name. """
return getattr(sys.modules["biolink.model"], name)
def is_derived (self, a_class_name, classes):
""" Return true if the class derives from any of the provided classes. """
for c in classes:
if isinstance (self.get_class(self.to_camel_case(a_class_name)), c):
return True
return False
def get_leaf_class (self, names):
""" Return the leaf classes in the provided list of names. """
classes = [ self.get_class(self.to_camel_case(n)) for n in names ]
leaves = [ n for n in names if not self.is_derived (n, classes) ]
return leaves [0]
class BulkLoad:
""" Tools for creating a Redisgraph bulk load dataset. """
def __init__(self, biolink):
self.biolink = biolink
def tables_up_to_date (self):
return Util.is_up_to_date (
source=[
Util.schema_path (f"{SchemaType.PREDICATE.value}-schema.json"),
Util.schema_path (f"{SchemaType.PREDICATE.value}-schema.json")
] + Util.merged_objects (),
targets=glob.glob (Util.bulk_path ("nodes/**.csv")) + \
glob.glob (Util.bulk_path ("edges/**.csv")))
def create (self):
""" Check source times. """
if self.tables_up_to_date ():
log.info ("up to date.")
return
""" Format the data for bulk load. """
predicates_schema = Util.read_schema (SchemaType.PREDICATE)
categories_schema = Util.read_schema (SchemaType.CATEGORY)
bulk_path = Util.bulk_path("")
if os.path.exists(bulk_path):
shutil.rmtree(bulk_path)
state = defaultdict(lambda:None)
for subgraph in Util.merged_objects ():
log.info (f"processing {subgraph}")
graph = Util.read_object (subgraph)
""" Write node data for bulk load. """
categories = defaultdict(lambda: [])
for node in graph['nodes']:
index = self.biolink.get_leaf_class (node['category'])
categories[index].append (node)
self.write_bulk (Util.bulk_path("nodes"), categories, categories_schema,
state=state, f=subgraph)
""" Write predicate data for bulk load. """
predicates = defaultdict(lambda: [])
for edge in graph['edges']:
predicates[edge['edge_label']].append (edge)
edge['src'] = edge.pop ('subject')
edge['dest'] = edge.pop ('object')
self.write_bulk (Util.bulk_path("edges"), predicates, predicates_schema)
def cleanup (self, v):
""" Filter problematic text.
:param v: A value to filter and clean.
"""
if isinstance(v, list):
v = [ self.cleanup(val) for val in v ]
elif isinstance (v, str):
""" Some values contain the CSV separator character. 'fix' that. """
if len(v) > 1 and v[0] == '[' and v[-1] == ']':
v = v.replace ("[", "@").replace ("]", "@") #f" {v}"
v = v.replace ("|","^")
return v
def write_bulk (self, bulk_path, obj_map, schema, state={}, f=None):
""" Write a bulk load group of objects.
:param bulk_path: Path to the bulk loader object to write.
:param obj_map: A map of biolink type to list of objects.
:param schema: The schema (nodes or predicates) containing identifiers.
:param state: Track state of already written objects to avoid duplicates.
"""
os.makedirs (bulk_path, exist_ok=True)
for key, objects in obj_map.items ():
out_file = f"{bulk_path}/{key}.csv"
if len(objects) == 0:
continue
new_file = not os.path.exists (out_file)
all_keys = schema[key]
with open (out_file, "a") as stream:
if new_file:
log.info (f" --creating {out_file}")
stream.write ("|".join (all_keys))
stream.write ("\n")
""" Make all objects conform to the schema. """
for obj in objects:
for akey in all_keys:
if not akey in obj:
obj[akey] = ""
""" Write fields, skipping duplicate objects. """
for obj in objects:
oid = str(obj['id'])
if oid in state:
continue
state[oid] = oid
values = [ self.cleanup(obj[k]) for k in all_keys if not 'smiles' in k ]
clean = list(map(str, values))
s = "|".join (clean)
stream.write (s)
stream.write ("\n")
def insert (self):
redisgraph = config.get('redisgraph', {})
bulk_loader = config.get('bulk_loader', {})
nodes = sorted(glob.glob (Util.bulk_path ("nodes/**.csv")))
edges = sorted(glob.glob (Util.bulk_path ("edges/**.csv")))
graph = redisgraph['graph']
log.info (f"bulk loading \n nodes: {nodes} \n edges: {edges}")
print (f"bulk loading \n nodes: {nodes} \n edges: {edges}")
try:
log.info (f"deleting graph {graph} in preparation for bulk load.")
db = self.get_redisgraph (redisgraph)
db.redis_graph.delete ()
except redis.exceptions.ResponseError:
log.info ("no graph to delete")
log.info (f"bulk loading graph: {graph}")
args = []
if len(nodes) > 0:
args.extend (("-n " + " -n ".join (nodes)).split ())
if len(edges) > 0:
args.extend (("-r " + " -r ".join (edges)).split ())
args.extend ([ "--separator=|" ])
args.extend ([ redisgraph['graph'] ])
""" standalone_mode=False tells click not to sys.exit() """
bulk_insert (args, standalone_mode=False)
def get_redisgraph (self, redisgraph):
return RedisGraph (host=redisgraph['host'],
port=redisgraph['ports']['http'],
graph=redisgraph['graph'])
def validate (self):
redisgraph = config.get('redisgraph', {})
print (f"config:{json.dumps(redisgraph, indent=2)}")
db = self.get_redisgraph (redisgraph)
validation_queries = config.get('validation', {}).get('queries', [])
for key, query in validation_queries.items ():
text = query['query']
name = query['name']
args = query.get('args', [{}])
for arg in args:
start = Util.current_time_in_millis ()
instance = Template (text).safe_substitute (arg)
db.query (instance)
duration = Util.current_time_in_millis () - start
log.info (f"Query {key}:{name} ran in {duration}ms: {instance}")
class Roger:
""" Consolidate Roger functionality for a cleaner interface. """
def __init__(self, to_string=False):
""" Initialize.
:param to_string: Log messages to a string, available as self.log_stream.getvalue()
after execution completes.
"""
import logging
if to_string:
""" Add a stream handler to enable to_string. """
self.log_stream = StringIO()
self.string_handler = logging.StreamHandler (self.log_stream)
log.addHandler (self.string_handler)
self.biolink = BiolinkModel ()
self.kgx = KGXModel (self.biolink)
self.bulk = BulkLoad (self.biolink)
def __enter__(self):
""" Implement Python's Context Manager interface. """
return self
def __exit__(self, exception_type, exception_value, traceback):
""" Implement Python's Context Manager interface. We use this finalizer
to detach the stream handler appended in the constructor.
:param exception_type: Type of exception, if one occurred.
:param exception_value: The exception, if one occurred.
:param traceback: The stack trace explaining the exception.
"""
if exception_type or exception_value or traceback:
log.error ("{} {} {}".format (exception_type, exception_value, traceback))
log.removeHandler (self.string_handler)
class RogerUtil:
""" An interface abstracting Roger's inner workings to make it easier to
incorporate into external tools like workflow engines. """
@staticmethod
def get_kgx (to_string=False):
output = None
with Roger (to_string) as roger:
roger.kgx.get ()
output = roger.log_stream.getvalue () if to_string else None
return output
@staticmethod
def create_schema (to_string=False):
output = None
with Roger (to_string) as roger:
roger.kgx.create_schema ()
output = roger.log_stream.getvalue () if to_string else None
return output
@staticmethod
def merge_nodes (to_string=False):
output = None
with Roger (to_string) as roger:
roger.kgx.merge ()
output = roger.log_stream.getvalue () if to_string else None
return output
@staticmethod
def create_bulk_load (to_string=False):
output = None
with Roger (to_string) as roger:
roger.bulk.create ()
output = roger.log_stream.getvalue () if | |
from __future__ import print_function
import os
import sys
import time
import traceback
from unittest import TestResult, TextTestResult
from unittest.result import failfast
from jinja2 import Template
DEFAULT_TEMPLATE = os.path.join(os.path.dirname(__file__), "template", "report_template.html")
def load_template(template):
""" Try to read a file from a given path, if file
does not exist, load default one. """
file = None
try:
if template:
with open(template, "r") as f:
file = f.read()
except Exception as err:
print("Error: Your Template wasn't loaded", err,
"Loading Default Template", sep="\n")
finally:
if not file:
with open(DEFAULT_TEMPLATE, "r") as f:
file = f.read()
return file
def render_html(template, **kwargs):
template_file = load_template(template)
if template_file:
template = Template(template_file)
return template.render(**kwargs)
def testcase_name(test_method):
testcase = type(test_method)
module = testcase.__module__ + "."
if module == "__main__.":
module = ""
result = module + testcase.__name__
return result
def strip_module_names(testcase_names):
"""Examine all given test case names and strip them the minimal
names needed to distinguish each. This prevents cases where test
cases housed in different files but with the same names cause clashes."""
result = list(testcase_names)
for i, testcase in enumerate(testcase_names):
classname = testcase.split(".")[-1]
duplicate_found = False
testcase_names_ = list(testcase_names)
del testcase_names_[i]
for testcase_ in testcase_names_:
classname_ = testcase_.split(".")[-1]
if classname_ == classname:
duplicate_found = True
if not duplicate_found:
result[i] = classname
return result
class _TestInfo(object):
"""" Keeps information about the execution of a test method. """
(SUCCESS, FAILURE, ERROR, SKIP) = range(4)
def __init__(self, test_result, test_method, outcome=SUCCESS,
err=None, subTest=None):
self.test_result = test_result
self.outcome = outcome
self.elapsed_time = 0
self.err = err
self.stdout = test_result._stdout_data
self.stderr = test_result._stderr_data
self.is_subtest = subTest is not None
self.test_description = self.test_result.getDescription(test_method)
self.test_exception_info = (
'' if outcome in (self.SUCCESS, self.SKIP)
else self.test_result._exc_info_to_string(
self.err, test_method))
self.test_name = testcase_name(test_method)
if not self.is_subtest:
self.test_id = test_method.id()
else:
self.test_id = subTest.id()
def id(self):
return self.test_id
def test_finished(self):
self.elapsed_time = self.test_result.stop_time - self.test_result.start_time
def get_description(self):
return self.test_description
def get_error_info(self):
return self.test_exception_info
class _SubTestInfos(object):
# TODO: make better: inherit _TestInfo?
(SUCCESS, FAILURE, ERROR, SKIP) = range(4)
def __init__(self, test_id, subtests):
self.subtests = subtests
self.test_id = test_id
self.outcome = self.check_outcome()
def check_outcome(self):
outcome = _TestInfo.SUCCESS
for subtest in self.subtests:
if subtest.outcome != _TestInfo.SUCCESS:
outcome = _TestInfo.FAILURE
break
return outcome
class HtmlTestResult(TextTestResult):
""" A test result class that express test results in Html. """
start_time = None
stop_time = None
default_prefix = "TestResults_"
def __init__(self, stream, descriptions, verbosity):
TextTestResult.__init__(self, stream, descriptions, verbosity)
self.buffer = True
self._stdout_data = None
self._stderr_data = None
self.successes = []
self.subtests = {}
self.callback = None
self.infoclass = _TestInfo
self.report_files = []
def _prepare_callback(self, test_info, target_list, verbose_str,
short_str):
""" Appends a 'info class' to the given target list and sets a
callback method to be called by stopTest method."""
target_list.append(test_info)
def callback():
""" Print test method outcome to the stream and elapsed time too."""
test_info.test_finished()
if self.showAll:
self.stream.writeln(
"{} ({:3f}s)".format(verbose_str, test_info.elapsed_time))
elif self.dots:
self.stream.write(short_str)
self.callback = callback
def getDescription(self, test):
""" Return the test description if not have test name. """
return str(test)
def startTest(self, test):
""" Called before execute each method. """
self.start_time = time.time()
TestResult.startTest(self, test)
if self.showAll:
self.stream.write(" " + self.getDescription(test))
self.stream.write(" ... ")
def _save_output_data(self):
try:
self._stdout_data = sys.stdout.getvalue()
self._stderr_data = sys.stderr.getvalue()
except AttributeError:
pass
def stopTest(self, test):
""" Called after excute each test method. """
self._save_output_data()
TextTestResult.stopTest(self, test)
self.stop_time = time.time()
if self.callback and callable(self.callback):
self.callback()
self.callback = None
def addSuccess(self, test):
""" Called when a test executes successfully. """
self._save_output_data()
self._prepare_callback(self.infoclass(self, test), self.successes, "OK", ".")
@failfast
def addFailure(self, test, err):
""" Called when a test method fails. """
self._save_output_data()
testinfo = self.infoclass(self, test, self.infoclass.FAILURE, err)
self._prepare_callback(testinfo, self.failures, "FAIL", "F")
@failfast
def addError(self, test, err):
"""" Called when a test method raises an error. """
self._save_output_data()
testinfo = self.infoclass(self, test, self.infoclass.ERROR, err)
self._prepare_callback(testinfo, self.errors, 'ERROR', 'E')
def addSubTest(self, testcase, test, err):
""" Called when a subTest completes. """
self._save_output_data()
# TODO: should ERROR cases be considered here too?
if err is None:
testinfo = self.infoclass(self, testcase, self.infoclass.SUCCESS, err, subTest=test)
self._prepare_callback(testinfo, self.successes, "OK", ".")
else:
testinfo = self.infoclass(self, testcase, self.infoclass.FAILURE, err, subTest=test)
self._prepare_callback(testinfo, self.failures, "FAIL", "F")
test_id_components = str(testcase).rstrip(')').split(' (')
test_id = test_id_components[1] + '.' + test_id_components[0]
if test_id not in self.subtests:
self.subtests[test_id] = []
self.subtests[test_id].append(testinfo)
def addSkip(self, test, reason):
"""" Called when a test method was skipped. """
self._save_output_data()
testinfo = self.infoclass(self, test, self.infoclass.SKIP, reason)
self._prepare_callback(testinfo, self.skipped, "SKIP", "S")
def printErrorList(self, flavour, errors):
"""
Writes information about the FAIL or ERROR to the stream.
"""
for test_info in errors:
self.stream.writeln(self.separator1)
self.stream.writeln(
'{} [{:3f}s]: {}'.format(flavour, test_info.elapsed_time,
test_info.test_id)
)
self.stream.writeln(self.separator2)
self.stream.writeln('%s' % test_info.get_error_info())
def _get_info_by_testcase(self):
""" Organize test results by TestCase module. """
tests_by_testcase = {}
subtest_names = set(self.subtests.keys())
for test_name, subtests in self.subtests.items():
subtest_info = _SubTestInfos(test_name, subtests)
testcase_name = ".".join(test_name.split(".")[:-1])
if testcase_name not in tests_by_testcase:
tests_by_testcase[testcase_name] = []
tests_by_testcase[testcase_name].append(subtest_info)
for tests in (self.successes, self.failures, self.errors, self.skipped):
for test_info in tests:
# subtests will be contained by _SubTestInfos objects but there is also the
# case where all subtests pass and the method is added as a success as well
# which must be filtered out
if test_info.is_subtest or test_info.test_id in subtest_names:
continue
if isinstance(test_info, tuple): # TODO: does this ever occur?
test_info = test_info[0]
testcase_name = ".".join(test_info.test_id.split(".")[:-1])
if testcase_name not in tests_by_testcase:
tests_by_testcase[testcase_name] = []
tests_by_testcase[testcase_name].append(test_info)
# unittest tests in alphabetical order based on test name so re-assert this
for testcase in tests_by_testcase.values():
testcase.sort(key=lambda x: x.test_id)
return tests_by_testcase
@staticmethod
def _format_duration(elapsed_time):
"""Format the elapsed time in seconds, or milliseconds if the duration is less than 1 second."""
if elapsed_time > 1:
duration = '{:2.2f} s'.format(elapsed_time)
else:
duration = '{:d} ms'.format(int(elapsed_time * 1000))
return duration
def get_results_summary(self, tests):
"""Create a summary of the outcomes of all given tests."""
failures = errors = skips = successes = 0
for test in tests:
outcome = test.outcome
if outcome == test.ERROR:
errors += 1
elif outcome == test.FAILURE:
failures += 1
elif outcome == test.SKIP:
skips += 1
elif outcome == test.SUCCESS:
successes += 1
elapsed_time = 0
for testinfo in tests:
if not isinstance(testinfo, _SubTestInfos):
elapsed_time += testinfo.elapsed_time
else:
for subtest in testinfo.subtests:
elapsed_time += subtest.elapsed_time
results_summary = {
"total": len(tests),
"error": errors,
"failure": failures,
"skip": skips,
"success": successes,
"duration": self._format_duration(elapsed_time)
}
return results_summary
def _get_header_info(self, tests, start_time):
results_summary = self.get_results_summary(tests)
header_info = {
"start_time": start_time,
"status": results_summary
}
return header_info
def _get_report_summaries(self, all_results, testRunner):
""" Generate headers and summaries for all given test cases."""
summaries = {}
for test_case_class_name, test_case_tests in all_results.items():
summaries[test_case_class_name] = self.get_results_summary(test_case_tests)
return summaries
def generate_reports(self, testRunner):
""" Generate report(s) for all given test cases that have been run. """
status_tags = ('success', 'danger', 'warning', 'info')
all_results = self._get_info_by_testcase()
summaries = self._get_report_summaries(all_results, testRunner)
if not testRunner.combine_reports:
for test_case_class_name, test_case_tests in all_results.items():
header_info = self._get_header_info(test_case_tests, testRunner.start_time)
html_file = render_html(
testRunner.template,
title=testRunner.report_title,
header_info=header_info,
all_results={test_case_class_name: test_case_tests},
status_tags=status_tags,
summaries=summaries,
**testRunner.template_args
)
# append test case name if multiple reports to be generated
if testRunner.report_name is None:
report_name_body = self.default_prefix + test_case_class_name
else:
report_name_body = "{}_{}".format(testRunner.report_name, test_case_class_name)
self.generate_file(testRunner, report_name_body, html_file)
else:
header_info = self._get_header_info(
[item for sublist in all_results.values() for item in sublist],
testRunner.start_time
)
html_file = render_html(
testRunner.template,
title=testRunner.report_title,
header_info=header_info,
all_results=all_results,
status_tags=status_tags,
summaries=summaries,
**testRunner.template_args
)
# if available, use user report name
if testRunner.report_name is not None:
report_name_body = testRunner.report_name
else:
report_name_body = self.default_prefix + "_".join(strip_module_names(list(all_results.keys())))[:128]
self.generate_file(testRunner, report_name_body, html_file)
def generate_file(self, testRunner, report_name, report):
""" Generate the report file in the given path. """
dir_to = testRunner.output
if not os.path.exists(dir_to):
os.makedirs(dir_to)
if testRunner.timestamp:
report_name += "_" + testRunner.timestamp
report_name += ".html"
path_file = os.path.abspath(os.path.join(dir_to, report_name))
self.stream.writeln(os.path.relpath(path_file))
self.report_files.append(path_file)
with open(path_file, 'w') as report_file:
report_file.write(report)
def _exc_info_to_string(self, err, test):
""" Converts a sys.exc_info()-style tuple of values into a string."""
# if six.PY3:
# # It works fine in python 3
# try:
# return super(_HTMLTestResult, self)._exc_info_to_string(
# err, test)
# except AttributeError:
# # We keep going using the legacy python <= 2 way
# pass
# This comes directly from python2 unittest
exctype, value, tb = err
# Skip test runner traceback levels
while tb and self._is_relevant_tb_level(tb):
tb = tb.tb_next
if | |
fid == 3:
if ftype == TType.STRUCT:
self.auth = TimAuth()
self.auth.read(iprot)
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('timRemoteUserAuth_args')
if self.tid is not None:
oprot.writeFieldBegin('tid', TType.STRUCT, 1)
self.tid.write(oprot)
oprot.writeFieldEnd()
if self.pwd is not None:
oprot.writeFieldBegin('pwd', TType.STRING, 2)
oprot.writeString(self.pwd)
oprot.writeFieldEnd()
if self.auth is not None:
oprot.writeFieldBegin('auth', TType.STRUCT, 3)
self.auth.write(oprot)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __hash__(self):
value = 17
value = (value * 31) ^ hash(self.tid)
value = (value * 31) ^ hash(self.pwd)
value = (value * 31) ^ hash(self.auth)
return value
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class timRemoteUserAuth_result:
"""
Attributes:
- success
"""
thrift_spec = (
(0, TType.STRUCT, 'success', (TimRemoteUserBean, TimRemoteUserBean.thrift_spec), None, ), # 0
)
def __init__(self, success=None,):
self.success = success
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 0:
if ftype == TType.STRUCT:
self.success = TimRemoteUserBean()
self.success.read(iprot)
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('timRemoteUserAuth_result')
if self.success is not None:
oprot.writeFieldBegin('success', TType.STRUCT, 0)
self.success.write(oprot)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __hash__(self):
value = 17
value = (value * 31) ^ hash(self.success)
return value
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class timRemoteUserGet_args:
"""
Attributes:
- tid
- auth
"""
thrift_spec = (
None, # 0
(1, TType.STRUCT, 'tid', (Tid, Tid.thrift_spec), None, ), # 1
(2, TType.STRUCT, 'auth', (TimAuth, TimAuth.thrift_spec), None, ), # 2
)
def __init__(self, tid=None, auth=None,):
self.tid = tid
self.auth = auth
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRUCT:
self.tid = Tid()
self.tid.read(iprot)
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.STRUCT:
self.auth = TimAuth()
self.auth.read(iprot)
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('timRemoteUserGet_args')
if self.tid is not None:
oprot.writeFieldBegin('tid', TType.STRUCT, 1)
self.tid.write(oprot)
oprot.writeFieldEnd()
if self.auth is not None:
oprot.writeFieldBegin('auth', TType.STRUCT, 2)
self.auth.write(oprot)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __hash__(self):
value = 17
value = (value * 31) ^ hash(self.tid)
value = (value * 31) ^ hash(self.auth)
return value
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class timRemoteUserGet_result:
"""
Attributes:
- success
"""
thrift_spec = (
(0, TType.STRUCT, 'success', (TimRemoteUserBean, TimRemoteUserBean.thrift_spec), None, ), # 0
)
def __init__(self, success=None,):
self.success = success
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 0:
if ftype == TType.STRUCT:
self.success = TimRemoteUserBean()
self.success.read(iprot)
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('timRemoteUserGet_result')
if self.success is not None:
oprot.writeFieldBegin('success', TType.STRUCT, 0)
self.success.write(oprot)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __hash__(self):
value = 17
value = (value * 31) ^ hash(self.success)
return value
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class timRemoteUserEdit_args:
"""
Attributes:
- tid
- ub
- auth
"""
thrift_spec = (
None, # 0
(1, TType.STRUCT, 'tid', (Tid, Tid.thrift_spec), None, ), # 1
(2, TType.STRUCT, 'ub', (TimUserBean, TimUserBean.thrift_spec), None, ), # 2
(3, TType.STRUCT, 'auth', (TimAuth, TimAuth.thrift_spec), None, ), # 3
)
def __init__(self, tid=None, ub=None, auth=None,):
self.tid = tid
self.ub = ub
self.auth = auth
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRUCT:
self.tid = Tid()
self.tid.read(iprot)
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.STRUCT:
self.ub = TimUserBean()
self.ub.read(iprot)
else:
iprot.skip(ftype)
elif fid == 3:
if ftype == TType.STRUCT:
self.auth = TimAuth()
self.auth.read(iprot)
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('timRemoteUserEdit_args')
if self.tid is not None:
oprot.writeFieldBegin('tid', TType.STRUCT, 1)
self.tid.write(oprot)
oprot.writeFieldEnd()
if self.ub is not None:
oprot.writeFieldBegin('ub', TType.STRUCT, 2)
self.ub.write(oprot)
oprot.writeFieldEnd()
if self.auth is not None:
oprot.writeFieldBegin('auth', TType.STRUCT, 3)
self.auth.write(oprot)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __hash__(self):
value = 17
value = (value * 31) ^ hash(self.tid)
value = (value * 31) ^ hash(self.ub)
value = (value * 31) ^ hash(self.auth)
return value
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class timRemoteUserEdit_result:
"""
Attributes:
- success
"""
thrift_spec = (
(0, TType.STRUCT, 'success', (TimRemoteUserBean, TimRemoteUserBean.thrift_spec), None, ), # 0
)
def __init__(self, success=None,):
self.success = success
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 0:
if ftype == TType.STRUCT:
self.success = TimRemoteUserBean()
self.success.read(iprot)
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('timRemoteUserEdit_result')
if self.success is not None:
oprot.writeFieldBegin('success', TType.STRUCT, 0)
self.success.write(oprot)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __hash__(self):
value = 17
value = (value * 31) ^ hash(self.success)
return value
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class timResponsePresence_args:
"""
Attributes:
- pbean
- auth
"""
thrift_spec = (
None, # 0
(1, TType.STRUCT, 'pbean', (TimPBean, TimPBean.thrift_spec), None, ), # 1
(2, TType.STRUCT, 'auth', (TimAuth, TimAuth.thrift_spec), None, ), # 2
)
def __init__(self, pbean=None, auth=None,):
self.pbean = pbean
self.auth = auth
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRUCT:
self.pbean = TimPBean()
self.pbean.read(iprot)
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.STRUCT:
self.auth = TimAuth()
self.auth.read(iprot)
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('timResponsePresence_args')
if self.pbean is not None:
oprot.writeFieldBegin('pbean', TType.STRUCT, 1)
self.pbean.write(oprot)
oprot.writeFieldEnd()
if self.auth is not None:
oprot.writeFieldBegin('auth', TType.STRUCT, | |
``val``.
:param Optional[str] val:
:rtype: LookupError
"""
return cls('malformed_path', val)
def is_malformed_path(self):
"""
Check if the union tag is ``malformed_path``.
:rtype: bool
"""
return self._tag == 'malformed_path'
def is_not_found(self):
"""
Check if the union tag is ``not_found``.
:rtype: bool
"""
return self._tag == 'not_found'
def is_not_file(self):
"""
Check if the union tag is ``not_file``.
:rtype: bool
"""
return self._tag == 'not_file'
def is_not_folder(self):
"""
Check if the union tag is ``not_folder``.
:rtype: bool
"""
return self._tag == 'not_folder'
def is_restricted_content(self):
"""
Check if the union tag is ``restricted_content``.
:rtype: bool
"""
return self._tag == 'restricted_content'
def is_unsupported_content_type(self):
"""
Check if the union tag is ``unsupported_content_type``.
:rtype: bool
"""
return self._tag == 'unsupported_content_type'
def is_other(self):
"""
Check if the union tag is ``other``.
:rtype: bool
"""
return self._tag == 'other'
def get_malformed_path(self):
"""
The given path does not satisfy the required path format. Please refer
to the `Path formats documentation
<https://www.dropbox.com/developers/documentation/http/documentation#path-formats>`_
for more information.
Only call this if :meth:`is_malformed_path` is true.
:rtype: Optional[str]
"""
if not self.is_malformed_path():
raise AttributeError("tag 'malformed_path' not set")
return self._value
def _process_custom_annotations(self, annotation_type, field_path, processor):
super(LookupError, self)._process_custom_annotations(annotation_type, field_path, processor)
def __repr__(self):
return 'LookupError(%r, %r)' % (self._tag, self._value)
LookupError_validator = bv.Union(LookupError)
class MediaInfo(bb.Union):
"""
This class acts as a tagged union. Only one of the ``is_*`` methods will
return true. To get the associated value of a tag (if one exists), use the
corresponding ``get_*`` method.
:ivar files.MediaInfo.pending: Indicate the photo/video is still under
processing and metadata is not available yet.
:ivar MediaMetadata MediaInfo.metadata: The metadata for the photo/video.
"""
_catch_all = None
# Attribute is overwritten below the class definition
pending = None
@classmethod
def metadata(cls, val):
"""
Create an instance of this class set to the ``metadata`` tag with value
``val``.
:param MediaMetadata val:
:rtype: MediaInfo
"""
return cls('metadata', val)
def is_pending(self):
"""
Check if the union tag is ``pending``.
:rtype: bool
"""
return self._tag == 'pending'
def is_metadata(self):
"""
Check if the union tag is ``metadata``.
:rtype: bool
"""
return self._tag == 'metadata'
def get_metadata(self):
"""
The metadata for the photo/video.
Only call this if :meth:`is_metadata` is true.
:rtype: MediaMetadata
"""
if not self.is_metadata():
raise AttributeError("tag 'metadata' not set")
return self._value
def _process_custom_annotations(self, annotation_type, field_path, processor):
super(MediaInfo, self)._process_custom_annotations(annotation_type, field_path, processor)
def __repr__(self):
return 'MediaInfo(%r, %r)' % (self._tag, self._value)
MediaInfo_validator = bv.Union(MediaInfo)
class MediaMetadata(bb.Struct):
"""
Metadata for a photo or video.
:ivar files.MediaMetadata.dimensions: Dimension of the photo/video.
:ivar files.MediaMetadata.location: The GPS coordinate of the photo/video.
:ivar files.MediaMetadata.time_taken: The timestamp when the photo/video is
taken.
"""
__slots__ = [
'_dimensions_value',
'_dimensions_present',
'_location_value',
'_location_present',
'_time_taken_value',
'_time_taken_present',
]
_has_required_fields = False
def __init__(self,
dimensions=None,
location=None,
time_taken=None):
self._dimensions_value = None
self._dimensions_present = False
self._location_value = None
self._location_present = False
self._time_taken_value = None
self._time_taken_present = False
if dimensions is not None:
self.dimensions = dimensions
if location is not None:
self.location = location
if time_taken is not None:
self.time_taken = time_taken
@property
def dimensions(self):
"""
Dimension of the photo/video.
:rtype: Dimensions
"""
if self._dimensions_present:
return self._dimensions_value
else:
return None
@dimensions.setter
def dimensions(self, val):
if val is None:
del self.dimensions
return
self._dimensions_validator.validate_type_only(val)
self._dimensions_value = val
self._dimensions_present = True
@dimensions.deleter
def dimensions(self):
self._dimensions_value = None
self._dimensions_present = False
@property
def location(self):
"""
The GPS coordinate of the photo/video.
:rtype: GpsCoordinates
"""
if self._location_present:
return self._location_value
else:
return None
@location.setter
def location(self, val):
if val is None:
del self.location
return
self._location_validator.validate_type_only(val)
self._location_value = val
self._location_present = True
@location.deleter
def location(self):
self._location_value = None
self._location_present = False
@property
def time_taken(self):
"""
The timestamp when the photo/video is taken.
:rtype: datetime.datetime
"""
if self._time_taken_present:
return self._time_taken_value
else:
return None
@time_taken.setter
def time_taken(self, val):
if val is None:
del self.time_taken
return
val = self._time_taken_validator.validate(val)
self._time_taken_value = val
self._time_taken_present = True
@time_taken.deleter
def time_taken(self):
self._time_taken_value = None
self._time_taken_present = False
def _process_custom_annotations(self, annotation_type, field_path, processor):
super(MediaMetadata, self)._process_custom_annotations(annotation_type, field_path, processor)
def __repr__(self):
return 'MediaMetadata(dimensions={!r}, location={!r}, time_taken={!r})'.format(
self._dimensions_value,
self._location_value,
self._time_taken_value,
)
MediaMetadata_validator = bv.StructTree(MediaMetadata)
class RelocationBatchArgBase(bb.Struct):
"""
:ivar files.RelocationBatchArgBase.entries: List of entries to be moved or
copied. Each entry is :class:`RelocationPath`.
:ivar files.RelocationBatchArgBase.autorename: If there's a conflict with
any file, have the Dropbox server try to autorename that file to avoid
the conflict.
"""
__slots__ = [
'_entries_value',
'_entries_present',
'_autorename_value',
'_autorename_present',
]
_has_required_fields = True
def __init__(self,
entries=None,
autorename=None):
self._entries_value = None
self._entries_present = False
self._autorename_value = None
self._autorename_present = False
if entries is not None:
self.entries = entries
if autorename is not None:
self.autorename = autorename
@property
def entries(self):
"""
List of entries to be moved or copied. Each entry is
:class:`RelocationPath`.
:rtype: list of [RelocationPath]
"""
if self._entries_present:
return self._entries_value
else:
raise AttributeError("missing required field 'entries'")
@entries.setter
def entries(self, val):
val = self._entries_validator.validate(val)
self._entries_value = val
self._entries_present = True
@entries.deleter
def entries(self):
self._entries_value = None
self._entries_present = False
@property
def autorename(self):
"""
If there's a conflict with any file, have the Dropbox server try to
autorename that file to avoid the conflict.
:rtype: bool
"""
if self._autorename_present:
return self._autorename_value
else:
return False
@autorename.setter
def autorename(self, val):
val = self._autorename_validator.validate(val)
self._autorename_value = val
self._autorename_present = True
@autorename.deleter
def autorename(self):
self._autorename_value = None
self._autorename_present = False
def _process_custom_annotations(self, annotation_type, field_path, processor):
super(RelocationBatchArgBase, self)._process_custom_annotations(annotation_type, field_path, processor)
def __repr__(self):
return 'RelocationBatchArgBase(entries={!r}, autorename={!r})'.format(
self._entries_value,
self._autorename_value,
)
RelocationBatchArgBase_validator = bv.Struct(RelocationBatchArgBase)
class MoveBatchArg(RelocationBatchArgBase):
"""
:ivar files.MoveBatchArg.allow_ownership_transfer: Allow moves by owner even
if it would result in an ownership transfer for the content being moved.
This does not apply to copies.
"""
__slots__ = [
'_allow_ownership_transfer_value',
'_allow_ownership_transfer_present',
]
_has_required_fields = True
def __init__(self,
entries=None,
autorename=None,
allow_ownership_transfer=None):
super(MoveBatchArg, self).__init__(entries,
autorename)
self._allow_ownership_transfer_value = None
self._allow_ownership_transfer_present = False
if allow_ownership_transfer is not None:
self.allow_ownership_transfer = allow_ownership_transfer
@property
def allow_ownership_transfer(self):
"""
Allow moves by owner even if it would result in an ownership transfer
for the content being moved. This does not apply to copies.
:rtype: bool
"""
if self._allow_ownership_transfer_present:
return self._allow_ownership_transfer_value
else:
return False
@allow_ownership_transfer.setter
def allow_ownership_transfer(self, val):
val = self._allow_ownership_transfer_validator.validate(val)
self._allow_ownership_transfer_value = val
self._allow_ownership_transfer_present = True
@allow_ownership_transfer.deleter
def allow_ownership_transfer(self):
self._allow_ownership_transfer_value = None
self._allow_ownership_transfer_present = False
def _process_custom_annotations(self, annotation_type, field_path, processor):
super(MoveBatchArg, self)._process_custom_annotations(annotation_type, field_path, processor)
def __repr__(self):
return 'MoveBatchArg(entries={!r}, autorename={!r}, allow_ownership_transfer={!r})'.format(
self._entries_value,
self._autorename_value,
self._allow_ownership_transfer_value,
)
MoveBatchArg_validator = bv.Struct(MoveBatchArg)
class PhotoMetadata(MediaMetadata):
"""
Metadata for a photo.
"""
__slots__ = [
]
_has_required_fields = False
def __init__(self,
dimensions=None,
location=None,
time_taken=None):
super(PhotoMetadata, self).__init__(dimensions,
location,
time_taken)
def _process_custom_annotations(self, annotation_type, field_path, processor):
super(PhotoMetadata, self)._process_custom_annotations(annotation_type, field_path, processor)
def __repr__(self):
return 'PhotoMetadata(dimensions={!r}, location={!r}, time_taken={!r})'.format(
self._dimensions_value,
self._location_value,
self._time_taken_value,
)
PhotoMetadata_validator = bv.Struct(PhotoMetadata)
class PreviewArg(bb.Struct):
"""
:ivar files.PreviewArg.path: The path of the file to preview.
:ivar files.PreviewArg.rev: Please specify revision in ``path`` instead.
"""
__slots__ = [
'_path_value',
'_path_present',
'_rev_value',
'_rev_present',
]
_has_required_fields = True
def __init__(self,
path=None,
rev=None):
self._path_value = None
self._path_present = False
self._rev_value = None
self._rev_present = False
if path is not None:
self.path = path
if rev is not None:
self.rev = rev
@property
def path(self):
"""
The path of the file to preview.
:rtype: str
"""
if self._path_present:
return self._path_value
else:
raise AttributeError("missing required field 'path'")
@path.setter
def path(self, val):
val = self._path_validator.validate(val)
self._path_value = val
self._path_present = True
@path.deleter
def path(self):
self._path_value = None
self._path_present = False
@property
def rev(self):
"""
Please specify revision in ``path`` instead.
:rtype: str
"""
if self._rev_present:
return self._rev_value
else:
return None
@rev.setter
def rev(self, val):
if val is None:
del self.rev
return
val = self._rev_validator.validate(val)
self._rev_value = val
self._rev_present = True
@rev.deleter
def rev(self):
self._rev_value = None
self._rev_present = False
def _process_custom_annotations(self, annotation_type, field_path, processor):
super(PreviewArg, self)._process_custom_annotations(annotation_type, field_path, processor)
def __repr__(self):
return 'PreviewArg(path={!r}, rev={!r})'.format(
self._path_value,
self._rev_value,
)
PreviewArg_validator = bv.Struct(PreviewArg)
class PreviewError(bb.Union):
"""
This class acts as a tagged union. Only one of the ``is_*`` methods will
return true. To get the associated value of a tag (if one exists), use the
corresponding ``get_*`` method.
:ivar LookupError PreviewError.path: An error occurs when downloading
metadata for the file.
:ivar files.PreviewError.in_progress: This preview generation is still in
progress and the file is not ready for preview yet.
:ivar files.PreviewError.unsupported_extension: The file extension is not
supported preview generation.
:ivar files.PreviewError.unsupported_content: The file content is | |
<gh_stars>10-100
import os
import sys
import json
import argparse
sys.path.append('../')
import numpy as np
import pandas as pd
from aser.extract.eventuality_extractor import SeedRuleEventualityExtractor
from tqdm import tqdm
from utils.atomic_utils import SUBJ2POSS, PP_SINGLE
from itertools import permutations
from itertools import chain
from utils.utils import *
from multiprocessing import Pool
def prepare_atomic2020():
omcs_relations = ['Causes', 'HasSubEvent', 'xReason',]
atomic_new_relations = ['isAfter', 'isBefore', 'HinderedBy']
event_relations = omcs_relations + atomic_new_relations
atomic_2020 = {
"trn": pd.read_csv("../data/atomic2020_data-feb2021/train.tsv", sep="\t"),
"dev": pd.read_csv("../data/atomic2020_data-feb2021/dev.tsv", sep="\t"),
"tst": pd.read_csv("../data/atomic2020_data-feb2021/test.tsv", sep="\t"),
}
atomic_2020_events = {
"trn":[(atomic_2020["trn"].loc[i][0], atomic_2020["trn"].loc[i][1], atomic_2020["trn"].loc[i][2])\
for i in range(len(atomic_2020["trn"])) \
if atomic_2020["trn"].loc[i][1] in event_relations],
"dev":[(atomic_2020["dev"].loc[i][0], atomic_2020["dev"].loc[i][1], atomic_2020["dev"].loc[i][2])\
for i in range(len(atomic_2020["dev"])) \
if atomic_2020["dev"].loc[i][1] in event_relations],
"tst":[(atomic_2020["tst"].loc[i][0], atomic_2020["tst"].loc[i][1], atomic_2020["tst"].loc[i][2])\
for i in range(len(atomic_2020["tst"])) \
if atomic_2020["tst"].loc[i][1] in event_relations],
}
new_atomic_tuples = {"trn":[], "tst":[], "dev":[]}
for r in ["trn", "dev", "tst"]:
for head, rel, tail in tqdm(atomic_2020_events[r]):
if rel in atomic_new_relations:
new_atomic_tuples[r].append((head, rel, tail))
return new_atomic_tuples
def simple_parse(sent):
"""
Deal with possessive cases only.
"""
strs = sent.split()
new_tokens = []
for token in strs:
if token.endswith("'s") and token != "'s":
new_tokens.append(token[:-2])
new_tokens.append("'s")
else:
new_tokens.append(token)
return new_tokens
def instantiate_ppn(head, tail):
"""
Input: Head and Tail in the original CSKG.
e.g., head = "PersonX takes PersonY's phone",
tail = "PersonY is sad"
Output: list(tuple())
A list of tuples with concrete personal pronouns.
[(he takes her phone, she is sad), (he takes my phone, i am sad), ...]
"""
# 1. Manually process possessive cases, the most common parsing issue in the dataset.
head_strs = simple_parse(head)
tail_strs = simple_parse(tail)
if not (len(head_strs) > 0 and len(tail_strs) > 0):
return []
# 2. dictionary that stores the indecies of "PersonX", "PersonY", and "PersonZ"
# Personal pronoun dict
pp_dict = {"head":{}, "tail":{}}
for key, strs in {"head":head_strs, "tail":tail_strs}.items():
for i, word in enumerate(strs):
if word in ["PersonX", "PersonY", "PersonZ"]:
pp_dict[key][word] = pp_dict[key].get(word, []) + [i]
# 3. get the PersonX -> he/she permutation
all_cs_pp = set(pp_dict["head"].keys()) | set(pp_dict["tail"].keys())
# commonsense PPs, PersonX/Y
num_special_pp = len(all_cs_pp)
# number of personal pronoun placeholders.
perm_pp = list(permutations(PP_SINGLE, num_special_pp))
# print(perm_pp)
# 4. Replace
result_tuples = []
for perm in perm_pp:
if len(set(perm)) < num_special_pp :
# Make sure the permutation contains distinct PPs
continue
to_pp_single_dict = {cs_pp:perm[i] for i, cs_pp in enumerate(all_cs_pp)}
result_tuple = {"head":"", "tail":""}
for key, strs in {"head":head_strs, "tail":tail_strs}.items():
replacements = [to_pp_single_dict.get(token, token)\
for i, token in enumerate(strs)]
# 4.1 Possessive case processing
if "'s" in strs:
# Convert I 's -> my, he 's -> his
for cs_pp in pp_dict[key]: #PersonX/Y/Z
for idx in pp_dict[key][cs_pp]:
# if this pp need to be substitute
if replacements[idx] in SUBJ2POSS and \
replacements[min(idx+1, len(replacements)-1)] == "'s":
replacements[idx] = SUBJ2POSS[replacements[idx]]
replacements[idx+1] = "\REMOVE"
while "\REMOVE" in replacements:
replacements.remove("\REMOVE")
# 4.2 dealing with singular formats
if "is" in replacements:
for i in range(len(replacements)-1):
if replacements[i] == "i" and replacements[i+1] == "is":
replacements[i+1] = "am"
if replacements[i] == "you" and replacements[i+1] == "is":
replacements[i+1] = "are"
# if no need to deal with possessive case
# simply replace PersonX/Y/Z with he/she.
result_tuple[key] = " ".join(replacements)
result_tuples.append((result_tuple["head"], result_tuple["tail"]))
return result_tuples
def fill_sentence(sent, r, has_subject):
if r in ['xEffect']:
# + subject
if has_subject:
return [sent]
else:
return [' '.join(["PersonX", sent])]
elif r in ['oEffect']:
if has_subject:
return [sent]
else:
return [' '.join(["PersonY", sent])]
elif r in ['oReact', 'xReact']:
# + subject / + subject is
if has_subject:
return [sent]
else:
if r == "oReact":
return [' '.join(["PersonY", sent])] + \
[' '.join(["PersonY", 'is', sent])]
else:
return [' '.join(["PersonX", sent])] + \
[' '.join(["PersonX", 'is', sent])]
elif r in ['xAttr']:
# + subject is
if has_subject:
return [sent]
else:
return [' '.join(["PersonX", 'is', sent])]
elif r in ['oWant', 'xWant']:
# + subject want / + subject
if has_subject:
return [sent]
else:
# if start with 'to'
if r == "oWant":
if sent.lower().split()[0] == 'to':
return [' '.join(["PersonY", 'want', sent])] \
+ [' '.join(["PersonY", " ".join(sent.lower().split()[1:]) ]) ]
else:
return [' '.join(["PersonY", 'want to', sent])] \
+ [' '.join(["PersonY", sent]) ]
else:
if sent.lower().split()[0] == 'to':
return [' '.join(["PersonX", 'want', sent])] \
+ [' '.join(["PersonX", " ".join(sent.lower().split()[1:]) ]) ]
else:
return [' '.join(["PersonX", 'want to', sent])] \
+ [' '.join(["PersonX", sent]) ]
elif r in ['xIntent']:
# + subject intent / + subject
if has_subject:
return [sent]
else:
# if start with 'to'
if sent.lower().split()[0] == 'to':
return [' '.join(["PersonX", 'intent', sent]) ] \
+ [' '.join(["PersonX", " ".join(sent.lower().split()[1:]) ]) ]
else:
return [' '.join(["PersonX", 'intent to', sent]) ]\
+ [' '.join(["PersonX", sent]) ]
elif r in ['xNeed']:
# + subject need / + subject
if has_subject:
return [sent]
else:
# if start with 'to'
if sent.lower().split()[0] == 'to':
return [' '.join(["PersonX", 'need', sent]) ]\
+ [' '.join(["PersonX", " ".join(sent.lower().split()[1:]) ]) ]
else:
return [' '.join(["PersonX", 'need to', sent]) ]\
+ [' '.join(["PersonX", sent]) ]
def unfold_parse_results(e):
# return the words of the extractor results
if len(e) == 0:
return ""
if len(e[0]) == 0:
return ""
return " ".join(e[0][0].words)
def contain_subject(dependencies):
return any(dep in [item[1] for item in dependencies] for dep in ['nsubj', 'nsubjpass'])
def process_pp(sent):
"""
Deal with the situation of "person x", "person y", "personx", "persony"
"""
fill_words = {"person x":"PersonX", "person y":"PersonY",
"personx":"PersonX", "persony":"PersonY",}
single_word_filter = {"x":"PersonX", "y": "PersonY"}
for strs in PP_filter_list:
if strs in sent:
sent = sent.replace(strs, fill_words[strs])
sent_split = sent.split()
if "x" in sent_split or "y" in sent_split:
sent = " ".join([single_word_filter.get(item, item) for item in sent_split])
return sent
def parse(atomic_data, r, idx):
extracted_event_list = [[] for i in range(len(atomic_data))]
parse_cache = {}
for i in tqdm(range(idx, len(atomic_data), num_thread)):
if i in wc_idx or i in clause_idx:
continue
tmp_node = []
head = atomic_data["event"][i]
for tail_raw in json.loads(atomic_data[r][i]):
if tail_raw == 'none':
continue
# filter the text
tail_raw = tail_raw.lower()
tail_raw = process_pp(tail_raw)
parsed_result = e_extractor.parse_text(tail_raw)[0]
filled_sentences = fill_sentence(tail_raw, r,
tail_raw.startswith("PersonX") or tail_raw.startswith("PersonY"))
# or contain_subject(parsed_result['dependencies']))
candi_tuples = list(chain(*[instantiate_ppn(head, tail) for tail in filled_sentences]))
head_dict = {h:"" for h, _ in candi_tuples}
tail_dict = {t:"" for _, t in candi_tuples}
for h in head_dict:
if not h in parse_cache:
parse_cache[h] = unfold_parse_results(e_extractor.extract_from_text(h))
head_dict[h] = parse_cache[h]
for t in tail_dict:
if not t in parse_cache:
parse_cache[t] = unfold_parse_results(e_extractor.extract_from_text(t))
tail_dict[t] = parse_cache[t]
tmp_node.append([
(
head_dict[h],
tail_dict[t],
) for h, t in candi_tuples])
extracted_event_list[i] = tmp_node
return extracted_event_list
def parse_cn(tuples, idx):
extracted_event_list = [[] for i in range(len(tuples))]
parse_cache = {}
for i in tqdm(range(idx, len(tuples), num_thread)):
head, rel, tail = tuples[i]
if not isinstance(head, str) or not isinstance(tail, str):
continue
if dataset == "conceptnet":
head = "PersonX " + head.lower()
tail = "PersonX " + tail.lower()
# filter the text
tail_raw = process_pp(tail)
candi_tuples = instantiate_ppn(head, tail)
head_dict = {h:"" for h, _ in candi_tuples}
tail_dict = {t:"" for _, t in candi_tuples}
for h in head_dict:
if not h in parse_cache:
parse_cache[h] = unfold_parse_results(e_extractor.extract_from_text(h))
head_dict[h] = parse_cache[h]
for t in tail_dict:
if not t in parse_cache:
parse_cache[t] = unfold_parse_results(e_extractor.extract_from_text(t))
tail_dict[t] = parse_cache[t]
tmp_node= [
(
head_dict[h],
tail_dict[t],
) for h, t in candi_tuples]
extracted_event_list[i] = [tmp_node, rel]
return extracted_event_list
parser = argparse.ArgumentParser()
parser.add_argument("--relation", default='xWant', type=str, required=False,
choices=['oEffect', 'oReact', 'oWant', 'xAttr',
'xEffect', 'xIntent', 'xNeed', 'xReact', 'xWant'],
help="choose which relation to process")
parser.add_argument("--dataset", default='atomic', type=str, required=False,
choices=['atomic', 'atomic2020', 'conceptnet'],
help="dataset")
parser.add_argument("--port", default=14000, type=int, required=False,
help="port of stanford parser")
args = parser.parse_args()
e_extractor = SeedRuleEventualityExtractor(
corenlp_path = "/data/aser/stanford-corenlp-full-2018-02-27",
corenlp_port= args.port)
PP_filter_list = ["person x", "person y", "personx", "persony"]
dataset = args.dataset
if dataset == "atomic":
atomic_data = pd.read_csv('../data/v4_atomic_all_agg.csv')
clause_idx = np.load('../data/clause_idx.npy', allow_pickle=True)
wc_idx = np.load('../data/wildcard_idx.npy', allow_pickle=True)
num_thread = 5
relation = args.relation
# all_results = parse(atomic_data, args.relation, 0)
num_thread = 5
workers = Pool(num_thread)
all_results = []
for i in range(num_thread):
tmp_result = workers.apply_async(
parse,
args=(atomic_data, relation, i))
all_results.append(tmp_result)
workers.close()
workers.join()
all_results = [tmp_result.get() for tmp_result in all_results]
all_results = [list(chain(*item)) for item in zip(*all_results)]
if not os.path.exists('../data/new_matching'):
os.mkdir('../data/new_matching')
if not os.path.exists('../data/new_matching/ASER-format-words'):
os.mkdir('../data/new_matching/ASER-format-words')
np.save('../data/new_matching/ASER-format-words/ATOMIC_'+relation, all_results)
elif dataset == "conceptnet":
omcs_tuples = np.load("../data/omcs_tuples.npy", allow_pickle=True)[()]
for spl in ["trn", "dev", "tst"]:
num_thread = 5
| |
<gh_stars>0
# -*- coding: utf-8 -*-
import uqra, unittest,warnings,os, sys
from tqdm import tqdm
import numpy as np, scipy as sp
from uqra.solver.PowerSpectrum import PowerSpectrum
from uqra.environment import Kvitebjorn as Kvitebjorn
from sklearn import datasets
from sklearn.linear_model import LinearRegression
from sklearn.model_selection import KFold
import pickle
warnings.filterwarnings(action="ignore", module="scipy", message="^internal gelsd")
sys.stdout = uqra.utilities.classes.Logger()
data_dir = '/Users/jinsongliu/BoxSync/MUSELab/uqra/examples/JupyterNotebook'
class BasicTestSuite(unittest.TestCase):
"""Basic test cases."""
def test_mPCE(self):
foo = lambda x: x**3 + 0.5*x + np.random.randn(*x.shape)
dist = cp.Normal()
x = dist.sample(1000).reshape(1,-1)
print(x.shape)
y = np.squeeze(np.array([foo(x), foo(x)]).T)
print(y.shape)
# basis = cp.orth_ttr(5, dist)
foo_hat = uqra.PCE(5, dist)
foo_hat.fit(x, y, method='OLS')
y_pred = foo_hat.predict(x)
print(y_pred.shape)
foo_hat = uqra.mPCE(5, dist)
foo_hat.fit(x, y, method='OLS')
y_pred = foo_hat.predict(x)
print(y_pred.shape)
def test_moments(self):
# np.set_printoptions(precision=3)
data_dir = '/Volumes/External/MUSE_UQ_DATA/Ishigami/Data'
y = []
for r in range(10):
filename = 'DoE_McsE6R{:d}.npy'.format(r)
data_set = np.load(os.path.join(data_dir, filename))
y.append(data_set[-1,:])
moments = uqra.metrics.moment(np.array(y),moment=[1,2,3,4], axis=1, multioutput='raw_values')
print(np.mean(moments, axis=1))
print(np.std(moments, axis=1))
print('--> GLK')
y = []
for r in range(10):
filename = 'DoE_McsE6R{:d}_PCE6_GLK.npy'.format(r)
data_set = np.load(os.path.join(data_dir, filename))
y.append(data_set)
moments = uqra.metrics.moment(np.array(y),moment=[1,2,3,4], axis=1, multioutput='raw_values')
print(np.mean(moments, axis=1))
print(np.std(moments, axis=1))
print('--> OLS')
y = []
for r in range(10):
filename = 'DoE_McsE6R{:d}_PCE9_OLS.npy'.format(r)
data_set = np.load(os.path.join(data_dir, filename))
y.append(data_set)
moments = uqra.metrics.moment(np.array(y),moment=[1,2,3,4], axis=1, multioutput='raw_values')
print(np.mean(moments, axis=1))
print(np.std(moments, axis=1))
print('--> OLSLARS')
y = []
for r in range(10):
filename = 'DoE_McsE6R{:d}_PCE9_OLSLARS.npy'.format(r)
data_set = np.load(os.path.join(data_dir, filename))
y.append(data_set)
moments = uqra.metrics.moment(np.array(y),moment=[1,2,3,4], axis=1, multioutput='raw_values')
print(np.mean(moments, axis=1))
print(np.std(moments, axis=1))
print('--> LASSOLARS')
y = []
for r in range(10):
filename = 'DoE_McsE6R{:d}_PCE9_LASSOLARS.npy'.format(r)
data_set = np.load(os.path.join(data_dir, filename))
y.append(data_set)
moments = uqra.metrics.moment(np.array(y),moment=[1,2,3,4], axis=1, multioutput='raw_values')
print(np.mean(moments, axis=1))
print(np.std(moments, axis=1))
def test_loo(self):
# Loading some example data
X, y = datasets.load_boston(return_X_y=True)
# X = X[:100,:2]
# y = y[:100]
# X = np.array([[0, 0], [1, 1], [2, 2]])
# y = np.array([0, 1, 2])
# print(X.shape)
print(y[:5])
# Training classifiers
reg1 = LinearRegression()
reg1.fit(X,y)
y1 = reg1.predict(X)
# print(reg1.coef_)
print(y1[:5])
# b = np.linalg.lstsq(X,y)[0]
# # print(b)
# y2 = np.dot(X, np.array(b))
# print(y2[:5])
mse = []
kf = KFold(n_splits=X.shape[0])
residual = []
for train_index, test_index in kf.split(X):
X_train, X_test = X[train_index], X[test_index]
y_train, y_test = y[train_index], y[test_index]
# H1 = np.linalg.inv(np.dot(X_train.T, X_train))
# H2 = np.dot(H1, X_train.T)
# H3 = np.dot(H2, y_train)
# y_hat = np.dot(X_test, H3)
# residual.append(y_test[0]- y_hat[0])
reg1.fit(X_train, y_train)
y_pred = reg1.predict(X_test)
residual.append(y_test[0] - y_pred[0])
# mse.append(uqra.metrics.mean_squared_error(y_test, y_pred))
Q, R = np.linalg.qr(X)
H = np.dot(Q, Q.T)
h = np.diagonal(H)
y_hat = np.dot(H, y)
e = (y-y_hat)/(1-h)
print(y_hat[:5])
print('e:')
print(np.mean(np.array(residual)**2))
print(np.mean(np.array(e)**2))
# print(uqra.metrics.leave_one_out_error(X,y,is_adjusted=False))
# print(np.mean(mse))
def test_QuadratureDesign(self):
print('>>> 1D quadrature design:')
p = 4
doe = uqra.QuadratureDesign(p, ndim=1, dist_names=['uniform',])
doe.samples()
print(' Legendre:')
print(' {:<15s} : {}'.format('Abscissa', np.around(doe.u, 2)))
print(' {:<15s} : {}'.format('Weights' , np.around(doe.w, 2)))
doe = uqra.QuadratureDesign(p, ndim=1, dist_names=['normal',])
doe.samples()
print(' Hermite:')
print(' {:<15s} : {}'.format('Abscissa', np.around(doe.u, 2)))
print(' {:<15s} : {}'.format('Weights' , np.around(doe.w, 2)))
print('>>> 1D quadrature design: Changing interval ')
a = -np.pi
b = np.pi
loc = a
scale = b - loc
print(' Legendre ({},{})'.format(np.around(a,2), np.around(b,2)))
doe = uqra.QuadratureDesign(p, ndim=1, dist_names=['uniform',])
doe.samples()
print(' From chaning inverval after uqra.doe:')
print(' {:<15s} : {}'.format('Abscissa', np.around((b-a)/2*doe.u + (a+b)/2, 2)))
print(' {:<15s} : {}'.format('Weights' , np.around((b-a)/2*doe.w, 2)))
doe = uqra.QuadratureDesign(p, ndim=1, dist_names=['uniform',], dist_theta=[(loc, scale)])
doe.samples()
print(' Directly from uqra.doe:')
print(' {:<15s} : {}'.format('Abscissa', np.around(doe.u, 2)))
print(' {:<15s} : {}'.format('Weights' , np.around(doe.w, 2)))
print('>>> 2D quadrature design:')
p = 4
doe = uqra.QuadratureDesign(p, ndim=2, dist_names=['uniform',])
doe.samples()
print(' Legendre:')
print(' {:<15s} :\n {}'.format('Abscissa', np.around(doe.u, 2)))
print(' {:<15s} :\n {}'.format('Weights' , np.around(doe.w, 2)))
doe = uqra.QuadratureDesign(p, ndim=2, dist_names=['normal',])
doe.samples()
print(' Hermite:')
print(' {:<15s} :\n {}'.format('Abscissa', np.around(doe.u, 2)))
print(' {:<15s} :\n {}'.format('Weights' , np.around(doe.w, 2)))
def test_RandomDesign(self):
doe = uqra.RandomDesign('MCS', n_samples=1e6, ndim=3, dist_names='uniform', dist_theta=[(-np.pi, 2*np.pi),]*3)
doe.samples()
def test_LatinHyperCube(self):
doe = uqra.LHS(distributions=[sp.stats.norm,]*2)
doe_u, doe_x = doe.samples(2000)
print(doe_x.shape)
print(np.mean(doe_x, axis=1))
print(np.std(doe_x, axis=1))
np.save('/Users/jinsongliu/BoxSync/PhD_UT/Working_Papers/AdaptiveSparsePCE_OED/Data/LHS_Normal_2000', doe_x)
# doe = uqra.LHS(n_samples=1e3,dist_names=['uniform', 'norm'],ndim=2,dist_theta=[(-1, 2*2), (2,1)])
# doe.samples()
# print(np.mean(doe.x, axis=1))
# print(np.std(doe.x, axis=1))
def test_OptimalDesign(self):
"""
Optimal Design
"""
### Ishigami function
# data_dir = '/Users/jinsongliu/External/MUSE_UQ_DATA/Ishigami/Data'
### SDOF system
data_dir = '/Users/jinsongliu/External/MUSE_UQ_DATA/linear_oscillator/Data'
# data_dir = 'E:\Run_MUSEUQ'
np.random.seed(100)
# dist_x = cp.Normal()
dist_u= cp.Iid(cp.Normal(),2)
u_samples = dist_u.sample(100)
basis = cp.orth_ttr(10,dist_u)
X = basis(*u_samples).T
doe = uqra.OptimalDesign('D', n_samples=10)
doe_index = doe.samples(X, is_orth=True)
doe_index = doe.adaptive(X, n_samples=10)
print(doe_index)
doe_index = doe.adaptive(X, n_samples=10)
print(doe_index)
### 2D
# quad_orders = range(4,11)
# alpha = [1.0, 1.1, 1.3, 1.5, 2.0,2.5, 3.0,3.5, 5]
# dist_u= cp.Iid(cp.Normal(),2)
# for iquad_orders in quad_orders:
# basis = cp.orth_ttr(iquad_orders-1,dist_u)
# for r in range(10):
# filename = 'DoE_McsE6R{:d}_stats.npy'.format(r)
# data_set = np.load(os.path.join(data_dir, filename))
# samples_y = np.squeeze(data_set[:,4,:]).T
# filename = 'DoE_McsE6R{:d}.npy'.format(r)
# data_set = np.load(os.path.join(data_dir, filename))
# samples_u = data_set[0:2, :]
# samples_x = data_set[2:4, :]
# # samples_y = data_set[6 , :].reshape(1,-1)
# print('Quadrature Order: {:d}'.format(iquad_orders))
# print('Candidate samples filename: {:s}'.format(filename))
# print(' >> Candidate sample set shape: {}'.format(samples_u.shape))
# design_matrix = basis(*samples_u).T
# print(' >> Candidate Design matrix shape: {}'.format(design_matrix.shape))
# for ia in alpha:
# print(' >> Oversampling rate : {:.2f}'.format(ia))
# doe_size = min(int(len(basis)*ia), 10000)
# doe = uqra.OptimalDesign('S', n_samples = doe_size )
# doe.samples(design_matrix, u=samples_u, is_orth=True)
# data = np.concatenate((doe.I.reshape(1,-1),doe.u,samples_x[:,doe.I], samples_y[:,doe.I]), axis=0)
# filename = os.path.join(data_dir, 'DoE_McsE6R{:d}_p{:d}_OptS{:d}'.format(r,iquad_orders,doe_size))
# np.save(filename, data)
# for ia in alpha:
# print(' >> Oversampling rate : {:.2f}'.format(ia))
# doe_size = min(int(len(basis)*ia), 10000)
# doe = uqra.OptimalDesign('D', n_samples = doe_size )
# doe.samples(design_matrix, u=samples_u, is_orth=True)
# data = np.concatenate((doe.I.reshape(1,-1),doe.u,samples_x[:,doe.I], samples_y[:,doe.I]), axis=0)
# filename = os.path.join(data_dir, 'DoE_McsE6R{:d}_p{:d}_OptD{:d}'.format(r,iquad_orders,doe_size))
# np.save(filename, data)
def test_gauss_quadrature(self):
"""
https://keisan.casio.com/exec/system/1329114617
"""
print('========================TESTING: 1D GAUSS QUADRATURE=======================')
dists2test = [cp.Uniform(-1,1), cp.Normal(), cp.Gamma(1,1), cp.Beta(1,1)]
rules2test = ['leg', 'hem', 'lag', 'jacobi']
order2test = [2,3,4,5,6,7,8]
for idist2test, irule2test in zip(dists2test, rules2test):
print('-'*50)
print('>>> Gauss Quadrature with polynominal: {}'.format(const.DOE_RULE_FULL_NAMES[irule2test.lower()]))
uqra.blockPrint()
quad_doe = uqra.DoE('QUAD', irule2test, order2test, idist2test)
uqra_samples = quad_doe.get_samples()
# quad_doe.disp()
uqra.enablePrint()
if irule2test == 'hem':
for i, iorder in enumerate(order2test):
print('>>> order : {}'.format(iorder))
coord1d_e, weight1d_e = np.polynomial.hermite_e.hermegauss(iorder)
print('{:<15s}: {}'.format('probabilist', np.around(coord1d_e,2)))
coord1d, weight1d = np.polynomial.hermite.hermgauss(iorder)
print('{:<15s}: {}'.format('physicist', np.around(coord1d,2)))
print('{:<15s}: {}'.format('uqra', np.around(np.squeeze(uqra_samples[i][:-1,:]),2)))
elif irule2test == 'leg':
for i, iorder in enumerate(order2test):
print('>>> order : {}'.format(iorder))
coord1d, weight1d = np.polynomial.legendre.leggauss(iorder)
print('{:<15s}: {}'.format('numpy ', np.around(coord1d,2)))
print('{:<15s}: {}'.format('uqra', np.around(np.squeeze(uqra_samples[i][:-1,:]),2)))
elif irule2test == 'lag':
for i, iorder in enumerate(order2test):
print('>>> order : {}'.format(iorder))
coord1d, weight1d = np.polynomial.laguerre.laggauss(iorder)
print('{:<15s}: {}'.format('numpy ', np.around(coord1d,2)))
print('{:<15s}: {}'.format('uqra', np.around(np.squeeze(uqra_samples[i][:-1,:]),2)))
elif irule2test == 'jacobi':
print('NOT TESTED YET')
print('Compared results here: https://keisan.casio.com/exec/system/1329114617')
def test_gpce(self):
print('==================TESTING: Generalized PCE (Not using SurrogateModel) ===================')
gpce_dist_to_test = [cp.Normal(), cp.Normal(2,3), cp.Gamma(1,1), cp.Beta(1,1)]
gpce_opt_dist = [cp.Normal(), cp.Normal(), cp.Gamma(1,1), cp.Beta(1,1)]
gpce_opt_rule = ['hem', 'hem', 'lag', 'jacobi']
npoly_orders = range(2,5)
dist_zeta0 = cp.Normal()
for i, igpce_dist in enumerate(gpce_dist_to_test):
dist_zeta1 = gpce_opt_dist[i]
print('>>> Testing # {:d}: gpce: {}, zeta0: {} , zeta1: {}'.format(i, igpce_dist, dist_zeta0, dist_zeta1 ))
for ipoly_order in npoly_orders:
print(' Polynomial order: {:d}'.format(ipoly_order))
## gPCE with hermite chaos
uqra.blockPrint()
quad_doe = uqra.DoE('QUAD', 'hem', [ipoly_order+1], dist_zeta0)
samples_zeta= quad_doe.get_samples()
zeta_cor, zeta_weight = samples_zeta[0]
zeta_cor = zeta_cor.reshape((len(dist_zeta0),-1))
x_cor = igpce_dist.inv(dist_zeta0.cdf(zeta_cor))
zeta_poly, zeta_norms = cp.orth_ttr(ipoly_order, dist_zeta0, retall=True)
x_hat,coeffs = cp.fit_quadrature(zeta_poly, zeta_cor, zeta_weight,np.squeeze(x_cor),retall=True)
uqra.enablePrint()
print('\t Hermite: {}'.format( np.around(coeffs,4)))
## gPCE with optimal chaos
uqra.blockPrint()
quad_doe = uqra.DoE('QUAD', gpce_opt_rule[i], [ipoly_order+1], dist_zeta1)
samples_zeta= quad_doe.get_samples()
zeta_cor, zeta_weight = samples_zeta[0]
zeta_cor = zeta_cor.reshape((len(dist_zeta1),-1))
x_cor = igpce_dist.inv(dist_zeta1.cdf(zeta_cor))
zeta_poly, zeta_norms = cp.orth_ttr(ipoly_order, dist_zeta1, retall=True)
x_hat,coeffs = cp.fit_quadrature(zeta_poly, zeta_cor, zeta_weight, np.squeeze(x_cor), retall=True)
uqra.enablePrint()
print('\t Optimal: {}'.format( np.around(coeffs,4)))
def test_PowerSpectrum(self):
print('========================TESTING: Power Spectrum =======================')
powerspecturms2test = ['jonswap']
powerspecturms_args = [(8, 10)]
df = 0.00001
f = np.arange(0, 10, df)
for psd_name, psd_args in zip(powerspecturms2test, powerspecturms_args):
psd = PowerSpectrum(psd_name, *psd_args)
psd_f, psd_pxx = psd.get_pxx(f)
psd_area = np.sum(psd_pxx * df)
np.save(os.path.join(data_dir,psd_name+'_psd_f'), psd_f)
np.save(os.path.join(data_dir,psd_name+'_psd_pxx'), psd_pxx)
tau, acf = psd.get_acf()
np.save(os.path.join(data_dir,psd_name+'_tau'), tau)
np.save(os.path.join(data_dir,psd_name+'_acf'), acf)
t, eta = psd.gen_process()
np.save(os.path.join(data_dir,psd_name+'_t'), t)
np.save(os.path.join(data_dir,psd_name+'_eta'), eta)
print(t, eta)
# t, eta = psd._gen_process_sum()
print('PSD name: {:s}, args: {}, Area: {:.2f}, 4*std:{}'.format(psd_name, psd_args, psd_area, 4*np.std(eta)))
def test_weighted_exceedance(self):
print('========================TESTING: Weighted Exceedance =======================')
# x = np.random.normal(size=1000).reshape(1,-1)
# res1 = stats.cumfreq(x)
# cdf_x = res1.lowerlimit + np.linspace(0, res1.binsize*res1.cumcount.size, res1.cumcount.size)
# cdf_y = res1.cumcount/x.size
# ecdf_y = 1- cdf_y
# ecdf_x = cdf_x
# print(np.around(ecdf_x,2))
# print(np.around(ecdf_y,2))
# res2 = uqhelpers.get_weighted_exceedance(x)
# print(res2.shape)
# print(np.around(res2[0],2))
# print(np.around(res2[1],2))
# orders = [4] ## mcs
orders = range(3,10) ## quad
| |
registry_secret)
assert mount_path == path
if 'v1' in registry_api_versions:
assert get_plugin(plugins, "postbuild_plugins",
"pulp_push")
assert plugin_value_get(plugins, "postbuild_plugins", "pulp_push",
"args", "pulp_registry_name") == pulp_env
else:
with pytest.raises(NoSuchPluginException):
get_plugin(plugins, "postbuild_plugins",
"pulp_push")
if 'v2' in registry_api_versions:
assert get_plugin(plugins, "postbuild_plugins", "pulp_sync")
env = plugin_value_get(plugins, "postbuild_plugins", "pulp_sync",
"args", "pulp_registry_name")
assert env == pulp_env
pulp_secret = plugin_value_get(plugins, "postbuild_plugins",
"pulp_sync", "args",
"pulp_secret_path")
docker_registry = plugin_value_get(plugins, "postbuild_plugins",
"pulp_sync", "args",
"docker_registry")
# pulp_sync config must have the scheme part to satisfy pulp.
assert docker_registry == 'http://registry2.example.com:5000'
else:
with pytest.raises(NoSuchPluginException):
get_plugin(plugins, "postbuild_plugins", "pulp_sync")
if scratch:
with pytest.raises(NoSuchPluginException):
get_plugin(plugins, "postbuild_plugins", "compress")
with pytest.raises(NoSuchPluginException):
get_plugin(plugins, "postbuild_plugins", "tag_from_config")
with pytest.raises(NoSuchPluginException):
get_plugin(plugins, "postbuild_plugins", "import_image")
with pytest.raises(NoSuchPluginException):
get_plugin(plugins, "postbuild_plugins", "pulp_pull")
with pytest.raises(NoSuchPluginException):
get_plugin(plugins, "exit_plugins", "koji_promote")
with pytest.raises(NoSuchPluginException):
get_plugin(plugins, "exit_plugins", "koji_tag_build")
else:
assert get_plugin(plugins, "postbuild_plugins", "compress")
assert get_plugin(plugins, "postbuild_plugins", "tag_from_config")
assert get_plugin(plugins, "postbuild_plugins", "pulp_pull")
assert get_plugin(plugins, "exit_plugins", "koji_promote")
assert get_plugin(plugins, "exit_plugins", "koji_tag_build")
assert (get_plugin(plugins, "postbuild_plugins", "tag_by_labels")
.get('args', {}).get('unique_tag_only', False) == scratch)
@pytest.mark.parametrize(('extra_kwargs', 'expected_name'), (
({'isolated': True, 'release': '1.1'}, TEST_ISOLATED_BUILD_NAME),
({'scratch': True}, TEST_SCRATCH_BUILD_NAME),
({}, TEST_BUILD_CONFIG),
))
def test_render_build_name(self, tmpdir, extra_kwargs, expected_name):
build_request = BuildRequest(INPUTS_PATH)
kwargs = get_sample_prod_params()
kwargs.update(extra_kwargs)
build_request.set_params(**kwargs)
build_json = build_request.render()
assert fnmatch.fnmatch(build_json['metadata']['name'], expected_name)
def test_render_with_yum_repourls(self):
kwargs = {
'git_uri': TEST_GIT_URI,
'git_ref': TEST_GIT_REF,
'git_branch': TEST_GIT_BRANCH,
'user': "john-foo",
'component': TEST_COMPONENT,
'base_image': 'fedora:latest',
'name_label': 'fedora/resultingimage',
'registry_uri': "registry.example.com",
'openshift_uri': "http://openshift/",
'builder_openshift_url': "http://openshift/",
'koji_target': "koji-target",
'kojiroot': "http://root/",
'kojihub': "http://hub/",
'sources_command': "make",
'vendor': "Foo Vendor",
'authoritative_registry': "registry.example.com",
'distribution_scope': "authoritative-source-only",
'registry_api_versions': ['v1'],
'build_from': 'image:buildroot:latest',
'osbs_api': MockOSBSApi(),
}
build_request = BuildRequest(INPUTS_PATH)
# Test validation for yum_repourls parameter
kwargs['yum_repourls'] = 'should be a list'
with pytest.raises(OsbsValidationException):
build_request.set_params(**kwargs)
# Use a valid yum_repourls parameter and check the result
kwargs['yum_repourls'] = ['http://example.com/repo1.repo', 'http://example.com/repo2.repo']
build_request.set_params(**kwargs)
build_json = build_request.render()
plugins = get_plugins_from_build_json(build_json)
repourls = None
for d in plugins['prebuild_plugins']:
if d['name'] == 'add_yum_repo_by_url':
repourls = d['args']['repourls']
assert repourls is not None
assert len(repourls) == 2
assert 'http://example.com/repo1.repo' in repourls
assert 'http://example.com/repo2.repo' in repourls
with pytest.raises(NoSuchPluginException):
get_plugin(plugins, "prebuild_plugins", "check_and_set_rebuild")
with pytest.raises(NoSuchPluginException):
get_plugin(plugins, "prebuild_plugins",
"stop_autorebuild_if_disabled")
assert plugin_value_get(plugins, "prebuild_plugins", "bump_release",
"args", "hub") == "http://hub/"
with pytest.raises(NoSuchPluginException):
get_plugin(plugins, "prebuild_plugins", "koji")
with pytest.raises(NoSuchPluginException):
get_plugin(plugins, "postbuild_plugins", "pulp_push")
with pytest.raises(NoSuchPluginException):
get_plugin(plugins, "postbuild_plugins", "pulp_sync")
with pytest.raises(NoSuchPluginException):
get_plugin(plugins, "postbuild_plugins", "pulp_pull")
assert get_plugin(plugins, "postbuild_plugins", "import_image")
@pytest.mark.parametrize('odcs_insecure', [False, True, None])
@pytest.mark.parametrize('pdc_insecure', [False, True, None])
@pytest.mark.parametrize('odcs_openidc_secret', [None, "odcs-openidc"])
@pytest.mark.parametrize('compose_ids', (None, [], [42], [42, 2]))
def test_render_prod_flatpak(self, odcs_insecure, pdc_insecure,
odcs_openidc_secret, compose_ids):
build_request = BuildRequest(INPUTS_PATH)
kwargs = {
'git_uri': TEST_GIT_URI,
'git_ref': TEST_GIT_REF,
'git_branch': TEST_GIT_BRANCH,
'flatpak': True,
'compose_ids': compose_ids,
'flatpak_base_image': TEST_FLATPAK_BASE_IMAGE,
'odcs_url': "https://odcs.fedoraproject.org/odcs/1",
'pdc_url': "https://pdc.fedoraproject.org/rest_api/v1",
'user': "john-foo",
'base_image': TEST_FLATPAK_BASE_IMAGE,
'name_label': 'fedora/resultingimage',
'registry_uri': "registry.example.com",
'openshift_uri': "http://openshift/",
'builder_openshift_url': "http://openshift/",
'koji_target': "koji-target",
'kojiroot': "http://root/",
'kojihub': "http://hub/",
'sources_command': "make",
'vendor': "Foo Vendor",
'authoritative_registry': "registry.example.com",
'distribution_scope': "authoritative-source-only",
'registry_api_versions': ['v2'],
'build_from': 'image:buildroot:latest',
'osbs_api': MockOSBSApi(),
}
if odcs_insecure is not None:
kwargs['odcs_insecure'] = odcs_insecure
if pdc_insecure is not None:
kwargs['pdc_insecure'] = pdc_insecure
if odcs_openidc_secret is not None:
kwargs['odcs_openidc_secret'] = odcs_openidc_secret
build_request.set_params(**kwargs)
build_json = build_request.render()
plugins = get_plugins_from_build_json(build_json)
plugin = get_plugin(plugins, "prebuild_plugins", "resolve_module_compose")
assert plugin
args = plugin['args']
if compose_ids:
assert args['compose_ids'] == compose_ids
else:
assert 'compose_ids' not in args
assert args['odcs_url'] == kwargs['odcs_url']
assert args['odcs_insecure'] == (False if odcs_insecure is None else odcs_insecure)
assert args['pdc_url'] == kwargs['pdc_url']
assert args['pdc_insecure'] == (False if pdc_insecure is None else pdc_insecure)
if odcs_openidc_secret:
mount_path = get_secret_mountpath_by_name(build_json, odcs_openidc_secret)
assert args['odcs_openidc_secret_path'] == mount_path
else:
assert 'odcs_openid_secret_path' not in args
plugin = get_plugin(plugins, "prebuild_plugins", "flatpak_create_dockerfile")
assert plugin
args = plugin['args']
assert args['base_image'] == TEST_FLATPAK_BASE_IMAGE
plugin = get_plugin(plugins, "prebuild_plugins", "koji")
assert plugin
args = plugin['args']
assert args['target'] == "koji-target"
plugin = get_plugin(plugins, "prebuild_plugins", "bump_release")
assert plugin
args = plugin['args']
assert args['append'] is True
assert get_plugin(plugins, "prepublish_plugins", "flatpak_create_oci")
with pytest.raises(NoSuchPluginException):
assert get_plugin(plugins, "prepublish_plugins", "squash")
with pytest.raises(NoSuchPluginException):
assert get_plugin(plugins, "postbuild_plugins", "import_image")
def test_render_prod_not_flatpak(self):
build_request = BuildRequest(INPUTS_PATH)
kwargs = {
'git_uri': TEST_GIT_URI,
'git_ref': TEST_GIT_REF,
'git_branch': TEST_GIT_BRANCH,
'flatpak': False,
'user': "john-foo",
'base_image': 'fedora:latest',
'name_label': 'fedora/resultingimage',
'registry_uri': "registry.example.com",
'openshift_uri': "http://openshift/",
'builder_openshift_url': "http://openshift/",
'koji_target': "koji-target",
'kojiroot': "http://root/",
'kojihub': "http://hub/",
'sources_command': "make",
'vendor': "Foo Vendor",
'authoritative_registry': "registry.example.com",
'distribution_scope': "authoritative-source-only",
'registry_api_versions': ['v2'],
'build_from': 'image:buildroot:latest',
'osbs_api': MockOSBSApi(),
}
build_request.set_params(**kwargs)
build_json = build_request.render()
plugins = get_plugins_from_build_json(build_json)
with pytest.raises(NoSuchPluginException):
get_plugin(plugins, "prebuild_plugins", "resolve_module_compose")
with pytest.raises(NoSuchPluginException):
get_plugin(plugins, "prebuild_plugins", "flatpak_create_dockerfile")
with pytest.raises(NoSuchPluginException):
get_plugin(plugins, "prepublish_plugins", "flatpak_create_oci")
assert get_plugin(plugins, "prepublish_plugins", "squash")
assert get_plugin(plugins, "postbuild_plugins", "import_image")
@pytest.mark.parametrize(('hub', 'disabled', 'release'), (
('http://hub/', False, None),
('http://hub/', True, '1.2.1'),
(None, True, None),
(None, True, '1.2.1'),
))
@pytest.mark.parametrize('flatpak', (True, False))
def test_render_bump_release(self, hub, disabled, release, flatpak):
kwargs = {
'git_uri': TEST_GIT_URI,
'git_ref': TEST_GIT_REF,
'user': "john-foo",
'component': TEST_COMPONENT,
'base_image': 'fedora:latest',
'name_label': 'fedora/resultingimage',
'registry_uri': "registry.example.com",
'openshift_uri': "http://openshift/",
'sources_command': "make",
'vendor': "Foo Vendor",
'authoritative_registry': "registry.example.com",
'distribution_scope': "authoritative-source-only",
'registry_api_versions': ['v1'],
'build_from': 'image:buildroot:latest',
'osbs_api': MockOSBSApi(),
}
if hub:
kwargs['kojihub'] = hub
if release:
kwargs['release'] = release
if flatpak:
kwargs['flatpak'] = flatpak
build_request = BuildRequest(INPUTS_PATH)
build_request.set_params(**kwargs)
build_json = build_request.render()
plugins = get_plugins_from_build_json(build_json)
labels = plugin_value_get(plugins, "prebuild_plugins", "add_labels_in_dockerfile",
"args", "labels")
assert labels.get('release') == release
if disabled:
with pytest.raises(NoSuchPluginException):
get_plugin(plugins, "prebuild_plugins", "bump_release")
return
plugin_args = plugin_value_get(plugins, "prebuild_plugins", "bump_release", "args")
assert plugin_args['hub'] == hub
assert plugin_args.get('append', False) == flatpak
@pytest.mark.parametrize(('hub', 'root', 'disabled'), [
('http://hub/', 'http://root/', False),
(None, None, True),
])
@pytest.mark.parametrize(('allowed_domains'), [
[],
['spam.com'],
['spam', 'bacon.com'],
])
def test_render_fetch_maven_artifacts(self, hub, root, disabled, allowed_domains):
kwargs = {
'git_uri': TEST_GIT_URI,
'git_ref': TEST_GIT_REF,
'user': "john-foo",
'component': TEST_COMPONENT,
'base_image': 'fedora:latest',
'name_label': 'fedora/resultingimage',
'registry_uri': "registry.example.com",
'openshift_uri': "http://openshift/",
'sources_command': "make",
'vendor': "Foo Vendor",
'authoritative_registry': "registry.example.com",
'distribution_scope': "authoritative-source-only",
'registry_api_versions': ['v1'],
'build_from': 'image:buildroot:latest',
'osbs_api': MockOSBSApi(),
}
if hub:
kwargs['kojihub'] = hub
if root:
kwargs['kojiroot'] = root
if allowed_domains:
kwargs['artifacts_allowed_domains'] = allowed_domains
build_request = BuildRequest(INPUTS_PATH)
build_request.set_params(**kwargs)
build_json = build_request.render()
plugins = get_plugins_from_build_json(build_json)
if disabled:
with pytest.raises(NoSuchPluginException):
get_plugin(plugins, "prebuild_plugins", "fetch_maven_artifacts")
else:
assert plugin_value_get(plugins, "prebuild_plugins", "fetch_maven_artifacts",
"args", "koji_hub") == hub
assert plugin_value_get(plugins, "prebuild_plugins", "fetch_maven_artifacts",
"args", "koji_root") == root
if allowed_domains:
assert plugin_value_get(plugins, "prebuild_plugins", "fetch_maven_artifacts",
"args", "allowed_domains") == allowed_domains
else:
with pytest.raises(KeyError):
plugin_value_get(plugins, "prebuild_plugins", "fetch_maven_artifacts",
"args", "allowed_domains")
@pytest.mark.parametrize(('extra_kwargs', 'has_platform_tag', 'extra_tags', 'primary_tags'), (
# Worker build cases
({'build_type': BUILD_TYPE_WORKER, 'platform': 'x86_64'},
True, (), ()),
({'build_type': BUILD_TYPE_WORKER, 'platform': 'x86_64'},
True, ('tag1', 'tag2'), ()),
({'build_type': BUILD_TYPE_WORKER, 'platform': 'x86_64', 'scratch': True},
True, (), ()),
({'build_type': BUILD_TYPE_WORKER, 'platform': 'x86_64', 'isolated': True,
'release': '1.1'},
True, (), ()),
# Orchestrator build cases
({'build_type': BUILD_TYPE_ORCHESTRATOR, 'platforms': ['x86_64']},
False, ('tag1', 'tag2'), ('latest', '{version}', '{version}-{release}', 'tag1', 'tag2')),
({'build_type': BUILD_TYPE_ORCHESTRATOR, 'platforms': ['x86_64']},
False, (), ('latest', '{version}', '{version}-{release}')),
({'build_type': BUILD_TYPE_ORCHESTRATOR, 'platforms': ['x86_64'], 'scratch': True},
False, ('tag1', 'tag2'), ()),
({'build_type': BUILD_TYPE_ORCHESTRATOR, 'platforms': ['x86_64'], 'isolated': True,
'release': '1.1'},
False, ('tag1', 'tag2'), ('{version}-{release}',)),
# When build_type is not specified, no primary tags are set
({}, False, (), ()),
({}, False, ('tag1', 'tag2'), ()),
({'scratch': True}, False, (), ()),
({'isolated': True, 'release': '1.1'}, False, (), ()),
))
def test_render_tag_from_config(self, tmpdir, extra_kwargs, has_platform_tag, extra_tags,
primary_tags):
kwargs = get_sample_prod_params()
kwargs.pop('platforms', None)
kwargs.pop('platform', None)
kwargs.update(extra_kwargs)
kwargs['arrangement_version'] = 4
expected_primary = set(primary_tags)
if extra_tags:
self._mock_addional_tags_config(str(tmpdir), extra_tags)
repo_info = RepoInfo(additional_tags=AdditionalTagsConfig(dir_path=str(tmpdir)))
build_json = self._render_tag_from_config_build_request(kwargs, repo_info)
plugins = get_plugins_from_build_json(build_json)
assert get_plugin(plugins, 'postbuild_plugins', 'tag_from_config')
tag_suffixes = plugin_value_get(plugins, 'postbuild_plugins', 'tag_from_config',
'args', 'tag_suffixes')
assert len(tag_suffixes['unique']) == 1
if has_platform_tag:
unique_tag_suffix = tag_suffixes['unique'][0]
assert unique_tag_suffix.endswith('-' + kwargs.get('platform', '')) == has_platform_tag
assert len(tag_suffixes['primary']) == len(expected_primary)
assert set(tag_suffixes['primary']) == expected_primary
def test_render_tag_from_container_yaml(self):
kwargs = get_sample_prod_params()
kwargs.pop('platform', None)
kwargs['platforms'] = ['x86_64', 'ppc64le']
kwargs['build_type'] = BUILD_TYPE_ORCHESTRATOR
kwargs['arrangement_version'] = 3
tags = set(['spam', 'bacon', 'eggs'])
expected_primary = set(['{version}-{release}', 'spam', 'bacon', 'eggs'])
repo_info = RepoInfo(additional_tags=AdditionalTagsConfig(tags=tags))
build_json = self._render_tag_from_config_build_request(kwargs, repo_info=repo_info)
plugins = get_plugins_from_build_json(build_json)
tag_suffixes = plugin_value_get(plugins, 'postbuild_plugins', 'tag_from_config',
'args', 'tag_suffixes')
assert len(tag_suffixes['primary']) == len(expected_primary)
assert set(tag_suffixes['primary']) == expected_primary
def test_render_tag_from_container_yaml_contains_bad_tag(self):
kwargs = get_sample_prod_params()
kwargs.pop('platform', None)
kwargs['platforms'] = ['x86_64', 'ppc64le']
kwargs['build_type'] = BUILD_TYPE_ORCHESTRATOR
kwargs['arrangement_version'] = 3
expected_primary = set(['{version}-{release}', 'bacon', 'eggs'])
repo_info = RepoInfo(additional_tags=AdditionalTagsConfig(tags=expected_primary))
build_json = self._render_tag_from_config_build_request(kwargs, repo_info=repo_info)
plugins = get_plugins_from_build_json(build_json)
tag_suffixes = plugin_value_get(plugins, 'postbuild_plugins', 'tag_from_config',
'args', 'tag_suffixes')
assert len(tag_suffixes['primary']) == len(expected_primary)
assert set(tag_suffixes['primary']) == expected_primary
def test_render_tag_from_config_unmodified(self):
kwargs = get_sample_prod_params()
kwargs.pop('platform', None)
kwargs['platforms'] = ['x86_64', 'ppc64le']
kwargs['build_type'] = BUILD_TYPE_ORCHESTRATOR
kwargs['arrangement_version'] = 3
expected_primary = set(['spam', 'bacon', 'eggs'])
tag_suffixes = {'primary': ['spam', 'bacon', 'eggs']}
build_json = self._render_tag_from_config_build_request(kwargs, tag_suffixes=tag_suffixes)
plugins = get_plugins_from_build_json(build_json)
assert get_plugin(plugins, 'postbuild_plugins', 'tag_from_config')
tag_suffixes = plugin_value_get(plugins, 'postbuild_plugins', 'tag_from_config',
'args', 'tag_suffixes')
assert len(tag_suffixes['primary']) == len(expected_primary)
assert set(tag_suffixes['primary']) == expected_primary
def _render_tag_from_config_build_request(self, kwargs, repo_info=None,
tag_suffixes='{{TAG_SUFFIXES}}'):
build_request = BuildRequest(INPUTS_PATH)
build_request.set_params(**kwargs)
repo_info = repo_info or RepoInfo()
build_request.set_repo_info(repo_info)
build_request.customize_conf['enable_plugins'].append(
{
"plugin_type": 'postbuild_plugins',
"plugin_name": 'tag_from_config',
"plugin_args": {
'tag_suffixes': tag_suffixes,
},
}
)
return build_request.render()
def _mock_addional_tags_config(self, dir_path, tags):
with open(os.path.join(dir_path, ADDITIONAL_TAGS_FILE), | |
# Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
import unittest
import itertools
import numpy as np
import math
from op_test import OpTest
# numpy.round has different behavior in comparision to c++ round function
# so we use round_c instead of numpy.round to align the output data
def round_c_single_element(val):
dtype = type(val)
if val >= 0:
return dtype(np.floor(val + 0.5))
return dtype(np.ceil(val - 0.5))
round_c = np.vectorize(round_c_single_element)
def get_compute_type(dtype):
assert dtype in [np.float16, np.float32, np.float64]
if dtype == np.float16:
return np.float32
return dtype
class TestFakeQuantizeAbsMaxOp(OpTest):
def setUp(self):
self.op_type = 'fake_quantize_abs_max'
self.attrs = {'bit_length': 8}
def _fake_quantize_abs_max(self, dtype, input_shape, distribution):
input_data = distribution(input_shape).astype(dtype)
compute_type = get_compute_type(dtype)
scale = np.max(np.abs(input_data))
bnt = (1 << (self.attrs['bit_length'] - 1)) - 1
inv_scale = 1.0 / (scale + 1e-6) if scale < 1e-30 else 1.0 / scale
output_data = round_c(input_data.astype(compute_type) * inv_scale * bnt)
self.inputs = {'X': input_data}
self.outputs = {'Out': output_data, 'OutScale': scale}
self.dtype = dtype
self.check_output()
def test_fake_quantize_abs_max(self):
self._fake_quantize_abs_max(np.float32, (124, 240), np.random.random)
def test_fake_quantize_abs_max_float16(self):
self._fake_quantize_abs_max(np.float16, (124, 240), np.random.random)
def test_fake_quantize_abs_max_underflow(self):
self._fake_quantize_abs_max(np.float32, (10, 10), np.zeros)
def test_fake_quantize_abs_max_underflow2(self):
self._fake_quantize_abs_max(np.float32, (10, 10),
lambda shape: np.full(shape, 1e-40))
class TestFakeChannelWiseQuantizeAbsMaxOp(OpTest):
def setUp(self):
self.op_type = 'fake_channel_wise_quantize_abs_max'
self.attrs = {'bit_length': 8}
def _fake_channel_wise_quantize_abs_max(self, dtype, input_shape,
quant_axis, distribution):
assert quant_axis in [0, 1], 'quant_axis should be 0 or 1.'
input_data = distribution(input_shape).astype(dtype)
compute_type = get_compute_type(dtype)
bnt = (1 << (self.attrs['bit_length'] - 1)) - 1
compute_axis = tuple(i for i in range(len(input_shape))
if i != quant_axis)
scale_broadcast = np.amax(input_data, axis=compute_axis, keepdims=True)
output_data = round_c(bnt * input_data.astype(compute_type) /
scale_broadcast)
if quant_axis == 1:
scale_broadcast = np.transpose(scale_broadcast,
(1, ) + compute_axis)
scale = scale_broadcast.reshape(input_shape[quant_axis], -1)[:, 0]
self.inputs = {'X': input_data}
self.outputs = {'Out': output_data, 'OutScale': scale}
self.dtype = dtype
self.attrs['quant_axis'] = quant_axis
self.check_output()
def test_fake_channel_wise_quantize_abs_max(self):
dtype_options = [np.float32, np.float16]
input_shape_quant_axis_options = [[(20, 15, 6, 6), 0],
[(15, 20, 5, 5), 1], [(30, 15), 0],
[(30, 15), 1]]
for dtype, input_shape_quant_axis in itertools.product(
dtype_options, input_shape_quant_axis_options):
input_shape, quant_axis = input_shape_quant_axis
with self.subTest(dtype=dtype,
input_shape=input_shape,
quant_axis=quant_axis):
self._fake_channel_wise_quantize_abs_max(
dtype, input_shape, quant_axis, np.random.random)
class TestFakeQuantizeRangeAbsMaxOp(OpTest):
def setUp(self):
self.op_type = 'fake_quantize_range_abs_max'
self.attrs = {'bit_length': 5, 'window_size': 1}
def _fake_quantize_range_abs_max(self,
dtype,
input_shape,
distribution,
is_test=False):
input_data = distribution(input_shape).astype(dtype)
compute_type = get_compute_type(dtype)
bnt = (1 << (self.attrs['bit_length'] - 1)) - 1
in_scale = np.zeros(1).astype(dtype)
out_scale = np.zeros(self.attrs['window_size']).astype(dtype)
out_scale[0] = np.max(np.abs(input_data))
if is_test:
out_scale[0] = in_scale[0] = out_scale[0] - 1.0
clip_data = np.clip(input_data, -in_scale, in_scale)
else:
clip_data = input_data
output_data = round_c(
clip_data.astype(compute_type) / out_scale[0] * bnt)
self.inputs = {
'X': input_data,
'Iter': np.zeros(1).astype(np.int64),
'InScale': in_scale
}
self.outputs = {
'Out': output_data,
'OutScale': out_scale[0],
'OutScales': out_scale
}
self.dtype = dtype
self.attrs['is_test'] = is_test
self.check_output()
def test_fake_quantize_range_abs_max(self):
dtype_options = [np.float32, np.float16]
is_test_options = [False, True]
for dtype, is_test in itertools.product(dtype_options, is_test_options):
self.attrs['bit_length'] = 8 if is_test else 5
with self.subTest(dtype=dtype, is_test=is_test):
self._fake_quantize_range_abs_max(
dtype, (8, 16, 7, 7),
lambda shape: (np.random.random(shape) - 0.5) * 10,
is_test=is_test)
class TestMovingAverageAbsMaxScaleOp(OpTest):
def setUp(self):
self.op_type = 'moving_average_abs_max_scale'
self.attrs = {'moving_rate': float(0.9), 'is_test': False}
def _moving_average_abs_max_scale(self, dtype, input_shape, distribution):
input_data = distribution(input_shape).astype(dtype)
in_accum = np.ones(1).astype(dtype)
in_state = np.ones(1).astype(dtype)
out_accum = self.attrs['moving_rate'] * in_accum[0] + np.max(
np.abs(input_data))
out_state = self.attrs['moving_rate'] * in_state[0] + 1.0
out_scale = out_accum / out_state
self.inputs = {
'X': input_data,
'InAccum': in_accum,
'InState': in_state
}
self.outputs = {
'Out': input_data,
'OutAccum': out_accum,
'OutState': out_state,
'OutScale': out_scale
}
self.dtype = dtype
self.check_output()
def test_moving_average_abs_max(self):
self._moving_average_abs_max_scale(np.float32, (8, 16, 7, 7),
np.random.random)
class TestFakeQuantizeMovingAverageAbsMaxOp(OpTest):
def setUp(self):
self.op_type = 'fake_quantize_moving_average_abs_max'
self.attrs = {'bit_length': 5, 'moving_rate': 0.9, 'is_test': False}
def _fake_quantize_moving_average_abs_max(self,
dtype,
input_shape,
distribution,
dequantize=False,
with_gradient=False):
input_data = distribution(input_shape).astype(dtype)
compute_type = get_compute_type(dtype)
bnt = (1 << (self.attrs['bit_length'] - 1)) - 1
in_accum = np.ones(1).astype(dtype)
in_state = np.ones(1).astype(dtype)
in_scale = np.array([0.001]).astype(dtype)
out_accum = np.zeros(1).astype(dtype)
out_state = np.zeros(1).astype(dtype)
out_scale = np.zeros(1).astype(dtype)
out_accum[0] = self.attrs['moving_rate'] * in_accum[0] + np.max(
np.abs(input_data))
out_state[0] = self.attrs['moving_rate'] * in_state[0] + 1.0
out_scale = out_accum / out_state
round_data = round_c(input_data.astype(compute_type) / out_scale * bnt)
if dequantize:
output_data = (round_data * out_scale / bnt).astype(dtype)
self.op_type = 'fake_quantize_dequantize_moving_average_abs_max'
else:
output_data = round_data.astype(dtype)
self.inputs = {
'X': input_data,
'InScale': in_scale,
'InAccum': in_accum,
'InState': in_state
}
self.outputs = {
'Out': output_data,
'OutAccum': out_accum,
'OutState': out_state,
'OutScale': out_scale
}
self.dtype = dtype
self.check_output()
if with_gradient:
gradient = [
np.ones(input_data.shape) / np.product(input_data.shape)
]
self.check_grad(['X'], 'Out', user_defined_grads=gradient)
def test_fake_quantize_moving_average_abs_max(self):
self._fake_quantize_moving_average_abs_max(np.float32, (8, 16, 7, 7),
np.random.random)
def test_fake_quantize_moving_average_abs_max_float16(self):
self._fake_quantize_moving_average_abs_max(np.float16, (8, 16, 7, 7),
np.random.random)
def test_fake_quantize_dequantize_moving_average_abs_max(self):
self._fake_quantize_moving_average_abs_max(np.float32, (8, 16, 7, 7),
np.random.random,
dequantize=True,
with_gradient=True)
class TestFakeQuantizeDequantizeAbsMaxOp(OpTest):
def setUp(self):
self.op_type = 'fake_quantize_dequantize_abs_max'
self.attrs = {'bit_length': 8}
def _fake_quantize_dequantize_abs_max(self, dtype, input_shape,
distribution):
input_data = distribution(input_shape).astype(dtype)
scale = np.max(np.abs(input_data)).astype(dtype)
bnt = (1 << (self.attrs['bit_length'] - 1)) - 1
output_data = round_c(input_data / scale * bnt) * scale / bnt
self.inputs = {'X': input_data}
self.outputs = {
'Out': output_data,
'OutScale': np.array(scale).astype(dtype)
}
self.dtype = dtype
self.check_output()
gradient = [np.ones(input_data.shape) / np.product(input_data.shape)]
self.check_grad(['X'], 'Out', user_defined_grads=gradient)
def test_fake_quantize_dequantize_abs_max(self):
self._fake_quantize_dequantize_abs_max(np.float32, (124, 240),
np.random.random)
class TestChannelWiseFakeQuantizeDequantizeAbsMaxOp(OpTest):
def setUp(self):
self.op_type = 'fake_channel_wise_quantize_dequantize_abs_max'
self.attrs = {'bit_length': 8}
def _fake_channel_wise_quantize_dequantize_abs_max(self, dtype, input_shape,
quant_axis,
distribution):
assert quant_axis in [0, 1], 'quant_axis should be 0 or 1.'
input_data = distribution(input_shape).astype(dtype)
compute_type = get_compute_type(dtype)
bnt = (1 << (self.attrs['bit_length'] - 1)) - 1
output_data = input_data.copy().astype(compute_type)
compute_axis = tuple(i for i in range(len(input_shape))
if i != quant_axis)
scale_broadcast = np.amax(input_data, axis=compute_axis, keepdims=True)
output_data = round_c(
bnt * output_data / scale_broadcast) * scale_broadcast / bnt
if quant_axis == 1:
scale_broadcast = np.transpose(scale_broadcast,
(1, ) + compute_axis)
scale = scale_broadcast.reshape(input_shape[quant_axis], -1)[:, 0]
self.inputs = {'X': input_data}
self.outputs = {'Out': output_data, 'OutScale': scale}
self.dtype = dtype
self.attrs['quant_axis'] = quant_axis
self.check_output()
gradient = [np.ones(input_data.shape) / np.product(input_data.shape)]
self.check_grad(['X'], 'Out', user_defined_grads=gradient)
def test_channel_wise_fake_quant_dequant_abs_max(self):
input_shape_quant_axis_options = [[(3, 4, 64, 64), 0],
[(15, 20, 5, 5), 1], [(30, 15), 0],
[(30, 15), 1]]
for input_shape, quant_axis in input_shape_quant_axis_options:
with self.subTest(input_shape=input_shape, quant_axis=quant_axis):
self._fake_channel_wise_quantize_dequantize_abs_max(
np.float32, input_shape, quant_axis, np.random.random)
def quantize_max_abs(x, max_range):
scale = np.max(np.abs(x).flatten())
y = np.round(x / scale * max_range)
return y, scale
def channel_wise_quantize_max_abs(x, quant_bit=8, quant_axis=0):
assert quant_axis in [0, 1], "The quant_axis should be 0 or 1."
scales = []
y = x.copy()
max_range = math.pow(2, quant_bit - 1) - 1
if quant_axis == 0:
for i in range(x.shape[0]):
scale = np.max(np.abs(x[i])).astype("float32")
scales.append(scale)
y[i] = np.round(x[i] * max_range / scale)
elif quant_axis == 1:
for i in range(x.shape[1]):
scale = np.max(np.abs(x[:, i])).astype("float32")
scales.append(scale)
y[:, i] = np.round(x[:, i] * max_range / scale)
return y, scales
class TestChannelWiseQuantizeOp(OpTest):
def set_args(self):
self.bit_length = 8
self.data_type = "float32"
self.quant_axis = 0
def setUp(self):
self.set_args()
self.op_type = "quantize_linear"
x = np.random.randn(4, 3, 64, 64).astype(self.data_type)
yq, scale = channel_wise_quantize_max_abs(x, self.bit_length,
self.quant_axis)
scale = np.array(scale).astype(self.data_type)
zero_point = np.zeros(scale.shape, dtype="int32")
self.inputs = {'X': x, 'Scale': scale, 'ZeroPoint': zero_point}
self.attrs = {
'bit_length': self.bit_length,
'quant_axis': self.quant_axis
}
self.outputs = {'Y': yq}
def test_check_output(self):
self.check_output()
class TestChannelWiseQuantizeOp1(TestChannelWiseQuantizeOp):
def set_args(self):
self.bit_length = 8
self.data_type = "float32"
self.quant_axis = 1
class TestChannelWiseQuantizeOpTrain(OpTest):
def set_args(self):
self.bit_length = 8
self.data_type = "float32"
self.quant_axis = 0
self.is_test = False
def setUp(self):
self.set_args()
self.op_type = "quantize_linear"
x = np.random.randn(4, 3, 64, 64).astype(self.data_type)
yq, scale = channel_wise_quantize_max_abs(x, self.bit_length,
self.quant_axis)
scale = np.array(scale).astype(self.data_type)
zero_point = np.zeros(scale.shape, dtype="int32")
self.inputs = {'X': x, 'Scale': scale, 'ZeroPoint': zero_point}
self.attrs = {
'bit_length': self.bit_length,
'quant_axis': self.quant_axis,
'is_test': self.is_test
}
self.outputs = {'Y': yq, 'OutScale': scale}
def test_check_output(self):
self.check_output()
class TestquantizeOp(OpTest):
def set_args(self):
self.bit_length = 8
self.quant_axis = -1
self.max_range = math.pow(2, self.bit_length - 1) - 1
self.data_type = "float32"
def setUp(self):
self.set_args()
self.op_type = "quantize_linear"
x = np.random.randn(31, 65).astype(self.data_type)
yq, scale = quantize_max_abs(x, self.max_range)
scale = np.array(scale).astype(self.data_type)
zero_point = np.zeros(scale.shape, dtype="int32")
self.inputs = {'X': x, 'Scale': scale, 'ZeroPoint': zero_point}
self.attrs = {
'bit_length': self.bit_length,
'quant_axis': self.quant_axis,
}
self.outputs = {'Y': yq}
def test_check_output(self):
self.check_output()
class | |
<gh_stars>1-10
import titration_class as ts
"""pK values and names taken from http://www.periodensystem-online.de/index.php (tyty)"""
acidic_water = ts.Compound(name="Water", acidic=True, pKas=[14.0])
basic_water = ts.Compound(name="Water", acidic=False, pKas=[14.0])
# Acids
Hydrochloric = ts.Compound(name="HCl", acidic=True, pKas=[-6.5])
Hydroiodic = ts.Compound(name="HI", acidic=True, pKas=[-10])
Nitric = ts.Compound(name="HNO3", acidic=True, pKas=[-1.4])
Hydrobromic = ts.Compound(name="HBr", acidic=True, pKas=[-8.7])
Hydroperchloric = ts.Compound(name="Perchloric Acid", acidic=True, pKas=[-8])
P_Toluenesulfonic = ts.Compound(name="p-Toluenesolfonic Acid", acidic=True, pKas=[-2.8])
Propane1sulfonic = ts.Compound(name="1-propanesulfonic Acid", acidic=True, pKas=[-1.49])
Propane2sulfonic = ts.Compound(name="2-propanesulfonic Acid", acidic=True, pKas=[-1.79])
Amidosulfonic = ts.Compound(name="Amidosulfonic", acidic=True, pKas=[0.99])
Benzenesulfonic = ts.Compound(name="Benzenesulfonic", acidic=True, pKas=[-2.5])
Bromic = ts.Compound(name="Bromic", acidic=True, pKas=[0.0])
Butanesulfonic = ts.Compound(name="Butanesulfonic", acidic=True, pKas=[-1.68])
Chlorous = ts.Compound(name="Chlorous", acidic=True, pKas=[1.97])
Chlorosulfonic = ts.Compound(name="Chlorosulfonic", acidic=True, pKas=[-10.43])
Chloric = ts.Compound(name="Chloric", acidic=True, pKas=[-2.7])
Chromic = ts.Compound(name="Chromic", acidic=True, pKas=[-0.61])
Dibromoacetic = ts.Compound(name="Dibromoacetic", acidic=True, pKas=[1.39])
Dichloroacetic = ts.Compound(name="Dichloroacetic", acidic=True, pKas=[1.29])
Dichromic = ts.Compound(name="Dichromic", acidic=True, pKas=[-4.5])
Difluoroacetic = ts.Compound(name="Difluoroacetic", acidic=True, pKas=[1.24])
Difluorophosphoric = ts.Compound(name="Difluorophosphoric", acidic=True, pKas=[-1.5])
Diiodoacetic = ts.Compound(name="Diiodoacetic", acidic=True, pKas=[1.49])
Dimethylphosphonic = ts.Compound(name="Dimethylphosphonic", acidic=True, pKas=[1.29])
Disulfuric = ts.Compound(name="Disulfuric", acidic=True, pKas=[-12.0])
Dithionic = ts.Compound(name="Dithionic", acidic=True, pKas=[-3.4, 0.35])
Ethanesulfonic = ts.Compound(name="Ethanesulfonic", acidic=True, pKas=[-1.68])
Fluorosulfonic = ts.Compound(name="Fluorosulfonic", acidic=True, pKas=[-14.0])
Heptafluoropropanesulfonic = ts.Compound(name="Heptafluoropropanesulfonic", acidic=True, pKas=[-5.0])
EDTA = ts.Compound(name="EDTA", acidic=True, pKas=[0.0, 1.5, 2.00, 2.69, 6.13, 10.37]) # Pg 268 QCA
Citric = ts.Compound(name="Citric Acid", acidic=True, pKas=[3.13, 4.76, 6.40])
Carbonic = ts.Compound(name="Carbonic Acid", acidic=True, pKas=[6.37, 10.25])
Acetic = ts.Compound(name="Acetic Acid", acidic=True, pKas=[4.75])
Ammonium = ts.Compound(name="Phenol", acidic=True, pKas=[9.2])
Malic = ts.Compound(name="Malic", acidic=True, pKas=[3.46])
Enanthic = ts.Compound(name="Enanthic", acidic=True, pKas=[4.89])
Acrylic = ts.Compound(name="Acrylic", acidic=True, pKas=[4.23])
Adipic = ts.Compound(name="Adipic", acidic=True, pKas=[4.43])
Alanine = ts.Compound(name="Alanine", acidic=True, pKas=[9.87])
Formic = ts.Compound(name="Formic", acidic=True, pKas=[3.75])
Amidophosphonic = ts.Compound(name="Amidophosphonic", acidic=True, pKas=[2.74])
Arsenic = ts.Compound(name="Arsenic", acidic=True, pKas=[2.25])
Azelaic = ts.Compound(name="Azelaic", acidic=True, pKas=[4.53])
Benzoic = ts.Compound(name="Benzoic", acidic=True, pKas=[4.21])
Succinic = ts.Compound(name="Succinic", acidic=True, pKas=[4.16])
Hydrocyanic = ts.Compound(name="Hydrocyanic", acidic=True, pKas=[9.21])
Bromoacetic = ts.Compound(name="Bromoacetic", acidic=True, pKas=[2.87])
Brominous = ts.Compound(name="Brominous", acidic=True, pKas=[2.85])
Butanethiol = ts.Compound(name="Butanethiol", acidic=True, pKas=[10.66])
Butyric = ts.Compound(name="Butyric", acidic=True, pKas=[4.82])
Butylarsonic = ts.Compound(name="Butylarsonic", acidic=True, pKas=[4.23])
Butylphosphinic = ts.Compound(name="Butylphosphinic", acidic=True, pKas=[3.41])
Butylphosphonic = ts.Compound(name="Butylphosphonic", acidic=True, pKas=[2.79])
Capric = ts.Compound(name="Capric", acidic=True, pKas=[4.9])
Caproic = ts.Compound(name="Caproic", acidic=True, pKas=[4.88])
Caprylic = ts.Compound(name="Caprylic", acidic=True, pKas=[4.89])
Chloroacetic = ts.Compound(name="Chloroacetic", acidic=True, pKas=[2.83])
Crotonic = ts.Compound(name="Crotonic", acidic=True, pKas=[4.69])
Cyanic = ts.Compound(name="Cyanic", acidic=True, pKas=[3.46])
Diamidophosphonic = ts.Compound(name="Diamidophosphonic", acidic=True, pKas=[4.83])
Dihydrogenperoxodiphosphate = ts.Compound(name="Dihydrogenperoxodiphosphate", acidic=True, pKas=[5.18])
Disulfane = ts.Compound(name="Disulfane", acidic=True, pKas=[5.0])
Dithioarsenic = ts.Compound(name="Dithioarsenic", acidic=True, pKas=[2.4])
Iron_VI = ts.Compound(name="Iron_VI_", acidic=True, pKas=[3.5])
Ethanethiol = ts.Compound(name="Ethanethiol", acidic=True, pKas=[10.5])
Ethylarsonic = ts.Compound(name="Ethylarsonic", acidic=True, pKas=[3.89])
Ethylhydroperoxide = ts.Compound(name="Ethylhydroperoxide", acidic=True, pKas=[11.8])
Ethylphosphinic = ts.Compound(name="Ethylphosphinic", acidic=True, pKas=[3.29])
Ethylphosphonic = ts.Compound(name="Ethylphosphonic", acidic=True, pKas=[2.43])
Fluoroacetic = ts.Compound(name="Fluoroacetic", acidic=True, pKas=[2.57])
Hydrofluoric = ts.Compound(name="Hydrofluoric", acidic=True, pKas=[3.14])
Glycine = ts.Compound(name="Glycine", acidic=True, pKas=[9.6])
Glycolic = ts.Compound(name="Glycolic", acidic=True, pKas=[3.83])
Hexafluoroantimonic = ts.Compound(name="Hexafluoroantimonic", acidic=True, pKas=[-17.0])
Hexafluoroarsenic = ts.Compound(name="Hexafluoroarsenic", acidic=True, pKas=[-13.0])
Hexafluorophosphoric = ts.Compound(name="Hexafluorophosphoric", acidic=True, pKas=[-10.0])
Hexafluorotitan_IV = ts.Compound(name="Hexafluorotitan_IV_", acidic=True, pKas=[2.14])
Hexafluorosilicic = ts.Compound(name="Hexafluorosilicic", acidic=True, pKas=[-3.0])
Hexasulfane = ts.Compound(name="Hexasulfane", acidic=True, pKas=[3.2])
Hydrazidosulfonic = ts.Compound(name="Hydrazidosulfonic", acidic=True, pKas=[3.85])
Hydrogenperoxodiphosphate = ts.Compound(name="Hydrogenperoxodiphosphate", acidic=True, pKas=[7.67])
Hypobromous = ts.Compound(name="Hypobromous", acidic=True, pKas=[8.68])
Hypochlorous = ts.Compound(name="Hypochlorous", acidic=True, pKas=[7.54])
Hypodiphosphoric = ts.Compound(name="Hypodiphosphoric", acidic=True, pKas=[2.22])
Hypoiodous = ts.Compound(name="Hypoiodous", acidic=True, pKas=[10.64])
Hypophosphorous = ts.Compound(name="Hypophosphorous", acidic=True, pKas=[2.23])
Hypo_nitrous = ts.Compound(name="Hypo-nitrousacid", acidic=True, pKas=[7.21])
Hypothiocyanitic = ts.Compound(name="Hypothiocyanitic", acidic=True, pKas=[5.3])
Imidodiphosphoric = ts.Compound(name="Imidodiphosphoric", acidic=True, pKas=[2.0])
Iodic = ts.Compound(name="Iodic", acidic=True, pKas=[0.804])
Iodoacetic = ts.Compound(name="Iodoacetic", acidic=True, pKas=[3.13])
Isocyanic = ts.Compound(name="Isocyanic", acidic=True, pKas=[3.92])
Isopropylhydroperoxide = ts.Compound(name="Isopropylhydroperoxide", acidic=True, pKas=[12.1])
Isopropyloxonium = ts.Compound(name="Isopropyloxonium", acidic=True, pKas=[-3.2])
Isothiocyanic = ts.Compound(name="Isothiocyanic", acidic=True, pKas=[-1.28])
Suberic = ts.Compound(name="Suberic", acidic=True, pKas=[4.51])
Malonic = ts.Compound(name="Malonic", acidic=True, pKas=[2.83])
Manganese_VI = ts.Compound(name="Manganese_VI_", acidic=True, pKas=[5.0])
Mellitic = ts.Compound(name="Mellitic", acidic=True, pKas=[1.4])
Metaarsenous = ts.Compound(name="Metaarsenous", acidic=True, pKas=[9.28])
Metaboric = ts.Compound(name="Metaboric", acidic=True, pKas=[9.12])
Metagermanic = ts.Compound(name="Metagermanic", acidic=True, pKas=[8.59])
Metasilicic = ts.Compound(name="Metasilicic", acidic=True, pKas=[9.51])
Metaniobic = ts.Compound(name="Metaniobic", acidic=True, pKas=[7.4])
Metaperiodic = ts.Compound(name="Metaperiodic", acidic=True, pKas=[1.64])
Metatantalic = ts.Compound(name="Metatantalic", acidic=True, pKas=[9.6])
Metavanadium_V = ts.Compound(name="Metavanadium_V_", acidic=True, pKas=[3.8])
Methanal = ts.Compound(name="Methanal", acidic=True, pKas=[13.3])
Methaneselenol = ts.Compound(name="Methaneselenol", acidic=True, pKas=[5.2])
Methanesulfonic = ts.Compound(name="Methanesulfonic", acidic=True, pKas=[-1.92])
Methanethiol = ts.Compound(name="Methanethiol", acidic=True, pKas=[10.4])
Methylarsonic = ts.Compound(name="Methylarsonic", acidic=True, pKas=[3.41])
Methylhydroperoxide = ts.Compound(name="Methylhydroperoxide", acidic=True, pKas=[11.5])
Methylphosphinic = ts.Compound(name="Methylphosphinic", acidic=True, pKas=[3.08])
Methylphosphonic = ts.Compound(name="Methylphosphonic", acidic=True, pKas=[2.38])
Methylsulfinic = ts.Compound(name="Methylsulfinic", acidic=True, pKas=[2.28])
Lactic = ts.Compound(name="Lactic", acidic=True, pKas=[3.9])
Molybdic = ts.Compound(name="Molybdic", acidic=True, pKas=[3.7])
Monofluorophosphoric = ts.Compound(name="Monofluorophosphoric", acidic=True, pKas=[0.5])
Nitramine = ts.Compound(name="Nitramine", acidic=True, pKas=[6.6])
Nitromethane = ts.Compound(name="Nitromethane", acidic=True, pKas=[10.2])
Octanesulfonic = ts.Compound(name="Octanesulfonic", acidic=True, pKas=[-1.41])
Ortho_periodic = ts.Compound(name="Ortho-periodicacid", acidic=True, pKas=[3.29])
Orthoantimonic = ts.Compound(name="Orthoantimonic", acidic=True, pKas=[2.55])
Orthoboric = ts.Compound(name="Orthoboric", acidic=True, pKas=[9.25])
Orthogermanic = ts.Compound(name="Orthogermanic", acidic=True, pKas=[8.68])
Orthosilicic = ts.Compound(name="Orthosilicic", acidic=True, pKas=[9.66])
Orthotelluric = ts.Compound(name="Orthotelluric", acidic=True, pKas=[7.7])
Orthovanadic = ts.Compound(name="Orthovanadic", acidic=True, pKas=[2.6])
Oxalic = ts.Compound(name="Oxalic", acidic=True, pKas=[1.23])
Pelargonic = ts.Compound(name="Pelargonic", acidic=True, pKas=[4.95])
Pentacarbonylmanganese = ts.Compound(name="Pentacarbonylmanganese", acidic=True, pKas=[7.1])
Pentafluoroethanesulfonic = ts.Compound(name="Pentafluoroethanesulfonic", acidic=True, pKas=[-5.1])
Pentasulfane = ts.Compound(name="Pentasulfane", acidic=True, pKas=[3.5])
Perchlorylamide = ts.Compound(name="Perchlorylamide", acidic=True, pKas=[8.6])
Perchlorylamine = ts.Compound(name="Perchlorylamine", acidic=True, pKas=[3.7])
Perchromic = ts.Compound(name="Perchromic", acidic=True, pKas=[4.95])
Permanganic = ts.Compound(name="Permanganic", acidic=True, pKas=[-2.25])
Perosmic = ts.Compound(name="Perosmic", acidic=True, pKas=[7.2])
Peroxoformic = ts.Compound(name="Peroxoformic", acidic=True, pKas=[7.1])
Peroxobutyric = ts.Compound(name="Peroxobutyric", acidic=True, pKas=[8.2])
Peroxodiphosphoric = ts.Compound(name="Peroxodiphosphoric", acidic=True, pKas=[-3.0])
Peroxodisulfuric = ts.Compound(name="Peroxodisulfuric", acidic=True, pKas=[-3.5])
Peroxyacetic = ts.Compound(name="Peroxyacetic", acidic=True, pKas=[8.2])
Peroxohypositrous = ts.Compound(name="Peroxohypositrous", acidic=True, pKas=[2.51])
Peroxopropionic = ts.Compound(name="Peroxopropionic", acidic=True, pKas=[8.1])
Peroxonitric = ts.Compound(name="Peroxonitric", acidic=True, pKas=[-5.0])
Peroxo_nitrous = ts.Compound(name="Peroxo-nitrousacid", acidic=True, pKas=[6.8])
Peroxosulphuric = ts.Compound(name="Peroxosulphuric", acidic=True, pKas=[0.8])
Perrhenic = ts.Compound(name="Perrhenic", acidic=True, pKas=[-1.25])
Perruthenic = ts.Compound(name="Perruthenic", acidic=True, pKas=[11.2])
Pertechnetic = ts.Compound(name="Pertechnetic", acidic=True, pKas=[0.3])
Phenol = ts.Compound(name="Phenol", acidic=True, pKas=[9.99])
Phenylphosphonic = ts.Compound(name="Phenylphosphonic", acidic=True, pKas=[1.83])
Phenylsulfinic = ts.Compound(name="Phenylsulfinic", acidic=True, pKas=[1.84])
Phosphorous = ts.Compound(name="Phosphorous", acidic=True, pKas=[1.92])
Phosphoric = ts.Compound(name="Phosphoric", acidic=True, pKas=[2.13])
Pimelic = ts.Compound(name="Pimelic", acidic=True, pKas=[4.47])
Poly = ts.Compound(name="Poly", acidic=True, pKas=[4.09])
Propanethiol = ts.Compound(name="Propanethiol", acidic=True, pKas=[10.65])
Propionic = ts.Compound(name="Propionic", acidic=True, pKas=[4.87])
Propylarsonic = ts.Compound(name="Propylarsonic", acidic=True, pKas=[4.21])
Propylphosphinic = ts.Compound(name="Propylphosphinic", acidic=True, pKas=[3.46])
Propylphosphonic = ts.Compound(name="Propylphosphonic", acidic=True, pKas=[2.49])
Salicylic = ts.Compound(name="Salicylic", acidic=True, pKas=[2.75])
Nitrous = ts.Compound(name="Nitrous", acidic=True, pKas=[3.35])
Sulfuric = ts.Compound(name="Sulfuric", acidic=True, pKas=[-3.0])
Hydrogensulfide = ts.Compound(name="Hydrogensulfide", acidic=True, pKas=[7.06])
Sulphurous = ts.Compound(name="Sulphurous", acidic=True, pKas=[1.92])
Seaborgium_VI = ts.Compound(name="Seaborgium_VI_", acidic=True, pKas=[3.75])
Sebacic = ts.Compound(name="Sebacic", acidic=True, pKas=[4.72])
Selenous = ts.Compound(name="Selenous", acidic=True, pKas=[2.62])
Selenophenol = ts.Compound(name="Selenophenol", acidic=True, pKas=[5.9])
Selenophosphoric = ts.Compound(name="Selenophosphoric", acidic=True, pKas=[0.02])
Selenic = ts.Compound(name="Selenic", acidic=True, pKas=[-3.0])
Hydroselenic = ts.Compound(name="Hydroselenic", acidic=True, pKas=[3.73])
Hydrazoic = ts.Compound(name="Hydrazoic", acidic=True, pKas=[4.76])
Tartronic = ts.Compound(name="Tartronic", acidic=True, pKas=[2.3])
Telluric = ts.Compound(name="Telluric", acidic=True, pKas=[2.64, 2.7])
Tetracarbonyliron = ts.Compound(name="Tetracarbonyliron", acidic=True, pKas=[4.4])
Tetrafluoroboric = ts.Compound(name="Tetrafluoroboric", acidic=True, pKas=[-0.4])
Tetraphosphoric = ts.Compound(name="Tetraphosphoric", acidic=True, pKas=[0.5])
Tetrasulfane = ts.Compound(name="Tetrasulfane", acidic=True, pKas=[3.8])
Tetrathiophosphoric = ts.Compound(name="Tetrathiophosphoric", acidic=True, pKas=[1.5])
Thioarsenic = ts.Compound(name="Thioarsenic", acidic=True, pKas=[3.3])
Thiocyanic = ts.Compound(name="Thiocyanic", acidic=True, pKas=[-1.85])
Thiophenol = ts.Compound(name="Thiophenol", acidic=True, pKas=[6.52])
Thiophosphoric = ts.Compound(name="Thiophosphoric", acidic=True, pKas=[1.79])
Thiosulfuric = ts.Compound(name="Thiosulfuric", acidic=True, pKas=[0.6])
Thioselenic = ts.Compound(name="Thioselenic", acidic=True, pKas=[0.99])
Tribromoacetic = ts.Compound(name="Tribromoacetic", acidic=True, pKas=[0.72])
Trichloroacetic = ts.Compound(name="Trichloroacetic", acidic=True, pKas=[0.65])
Trifluoroacetic = ts.Compound(name="Trifluoroacetic", acidic=True, pKas=[0.23])
Trifluoromethanesulfonic = ts.Compound(name="Trifluoromethanesulfonic", acidic=True, pKas=[-5.21])
Trihydrogenperoxodiphosphate = ts.Compound(name="Trihydrogenperoxodiphosphate", acidic=True, pKas=[0.5])
Triiodoacetic = ts.Compound(name="Triiodoacetic", acidic=True, pKas=[0.9])
Trioxide = ts.Compound(name="Trioxide", acidic=True, pKas=[9.5])
Triphosphoric = ts.Compound(name="Triphosphoric", acidic=True, pKas=[1.0])
Triselenocarbonic = ts.Compound(name="Triselenocarbonic", acidic=True, pKas=[1.16])
Trisulfane = ts.Compound(name="Trisulfane", acidic=True, pKas=[4.2])
Trithiocarbonic = ts.Compound(name="Trithiocarbonic", acidic=True, pKas=[2.68])
Valeric = ts.Compound(name="Valeric", acidic=True, pKas=[4.84])
HydrogenHyperoxide = ts.Compound(name="Hydrogenhyperoxide", acidic=True, pKas=[4.7])
HydrogenOzonide = ts.Compound(name="Hydrogenozonide", acidic=True, pKas=[8.2])
HydrogenPeroxide = ts.Compound(name="Hydrogenperoxide", acidic=True, pKas=[11.62])
Tartaric = ts.Compound(name="Tartaric", acidic=True, pKas=[2.98])
Tungstic = ts.Compound(name="Tungstic", acidic=True, pKas=[3.8])
Xenon_VI = ts.Compound(name="Xenon_VI_", acidic=True, pKas=[10.5])
Xenon_VIII = ts.Compound(name="Xenon_VIII_", acidic=True, pKas=[2.0])
# Bases
CesiumHydroxide = ts.Compound(name="CsOH", acidic=False, pKas=[15.76])
FranciumHydroxide = ts.Compound(name="FrOH", acidic=False, pKas=[15.7])
RubidiumHydroxide = ts.Compound(name="RbOH", acidic=False, pKas=[15.4])
LithiumHydroxide = ts.Compound(name="LiOH", acidic=False, pKas=[14.36])
CalciumHydroxide = ts.Compound(name="calcium_hydroxide", acidic=False, pKas=[12.6, 11.57])
RadiumHydroxide = ts.Compound(name="Radiumhydroxide", acidic=False, pKas=[14.0])
BariumHydroxide = ts.Compound(name="Bariumhydroxide", acidic=False, pKas=[13.85])
SodiumHydroxide = ts.Compound(name="NaOH", acidic=False, pKas=[13.8])
StrontiumHydroxide = ts.Compound(name="Strontiumhydroxide", acidic=False, pKas=[13.7])
PotassiumHydroxide = ts.Compound(name="KOH", acidic=False, pKas=[13.5])
Thallium_I_Hydroxide = ts.Compound(name="Thallium_I_hydroxide", acidic=False, pKas=[13.36])
MagnesiumHydroxide = ts.Compound(name="Magnesiumhydroxide", acidic=False, pKas=[12.2])
Manganese_II_Hydroxide = ts.Compound(name="Manganese_II_hydroxide", acidic=False, pKas=[12.0])
Indium_I_Hydroxide = ts.Compound(name="Indium_I_hydroxide_InOH", acidic=False, pKas=[11.83])
SilverHydroxide = ts.Compound(name="Silverhydroxide", acidic=False, pKas=[11.5])
Neptunyl_V_Hydroxide = ts.Compound(name="Neptunyl_V_hydroxide", acidic=False, pKas=[11.3])
Protactinyl_V_Hydroxide = ts.Compound(name="Protactinyl_V_hydroxide", acidic=False, pKas=[11.19])
ActiniumHydroxide = ts.Compound(name="Actiniumhydroxide", acidic=False, pKas=[11.1])
Americium_III_Hydroxide = ts.Compound(name="Americium_III_hydroxide", acidic=False, pKas=[11.1])
Curium_III_Hydroxide = ts.Compound(name="Curium_III_hydroxide", acidic=False, pKas=[11.1])
Plutonium_III_Hydroxide = ts.Compound(name="Plutonium_III_hydroxide", acidic=False, pKas=[11.1])
Iron_II_Hydroxide = ts.Compound(name="Iron_II_hydroxide", acidic=False, pKas=[11.07])
Triethylamine = ts.Compound(name="Triethylamine", acidic=False, pKas=[11.01])
Dipropylamine = ts.Compound(name="Dipropylamine", acidic=False, pKas=[10.91])
ethylamine = ts.Compound(name="ethylamine", acidic=False, pKas=[10.75])
Dimethylamine = ts.Compound(name="Dimethylamine", acidic=False, pKas=[10.73])
Methylamine = ts.Compound(name="Methylamine", acidic=False, pKas=[10.66])
Tripropylamine = ts.Compound(name="Tripropylamine", acidic=False, pKas=[10.66])
Propylamine = ts.Compound(name="Propylamine", acidic=False, pKas=[10.57])
Hydrogenxenonate_VIII_ = ts.Compound(name="Hydrogenxenonate_VIII_", acidic=False, pKas=[10.5])
Diethylamine = ts.Compound(name="Diethylamine", acidic=False, pKas=[10.49])
CadmiumHydroxide = ts.Compound(name="Cadmiumhydroxide", acidic=False, pKas=[10.4])
Nickel_II_Hydroxide = ts.Compound(name="Nickel_II_hydroxide", acidic=False, pKas=[10.22])
Trimethylamine = ts.Compound(name="Trimethylamine", acidic=False, pKas=[9.81])
LanthanumHydroxide = ts.Compound(name="Lanthanumhydroxide", acidic=False, pKas=[9.8])
Plutonyl_V_Hydroxide = ts.Compound(name="Plutonyl_V_hydroxide", acidic=False, pKas=[9.7])
Cobalt_II_Hydroxide = ts.Compound(name="Cobalt_II_hydroxide", acidic=False, pKas=[9.63])
Lead_II_Hydroxide = ts.Compound(name="Lead_II_hydroxide", acidic=False, pKas=[9.62])
Cerium_III_Hydroxide = ts.Compound(name="Cerium_III_hydroxide", acidic=False, pKas=[9.6])
YttriumHydroxide = ts.Compound(name="Yttriumhydroxide", acidic=False, pKas=[9.6])
Neodymium_III_Hydroxide = ts.Compound(name="Neodymium_III_hydroxide", acidic=False, pKas=[9.5])
Praseodymium_III_Hydroxide = ts.Compound(name="Praseodymium_III_hydroxide", acidic=False, pKas=[9.4])
Promethium_III_Hydroxide = ts.Compound(name="Promethium_III_hydroxide", acidic=False, pKas=[9.4])
Samarium_III_Hydroxide = ts.Compound(name="Samarium_III_hydroxide", acidic=False, pKas=[9.4])
ammonia = ts.Compound(name="ammonia", acidic=False, pKas=[9.25])
Platinum_II_Hydroxide = ts.Compound(name="Platinum_II_hydroxide", acidic=False, pKas=[9.2])
ZincHydroxide = ts.Compound(name="Zinchydroxide", acidic=False, pKas=[8.99])
Europium_III_Hydroxide = ts.Compound(name="Europium_III_hydroxide", acidic=False, pKas=[8.8])
Terbium_III_Hydroxide = ts.Compound(name="Terbium_III_hydroxide", acidic=False, pKas=[8.8])
Trimethylphosphine = ts.Compound(name="Trimethylphosphine", acidic=False, pKas=[8.65])
BerylliumHydroxide = ts.Compound(name="Berylliumhydroxide", acidic=False, pKas=[8.6])
Perchlorylimide = ts.Compound(name="Perchlorylimide", acidic=False, pKas=[8.6])
Dysprosium_III_Hydroxide = ts.Compound(name="Dysprosium_III_hydroxide", acidic=False, pKas=[8.5])
Copper_II_Hydroxide = ts.Compound(name="Copper_II_hydroxide", acidic=False, pKas=[8.5])
Holmium_III_Hydroxide = ts.Compound(name="Holmium_III_hydroxide", acidic=False, pKas=[8.4])
Erbium_III_Hydroxide = ts.Compound(name="Erbium_III_hydroxide", acidic=False, pKas=[8.3])
GadoliniumHydroxide = ts.Compound(name="Gadoliniumhydroxide", acidic=False, pKas=[8.3])
Thulium_III_Hydroxide = ts.Compound(name="Thulium_III_hydroxide", acidic=False, pKas=[8.3])
Ytterbium_III_Hydroxide = ts.Compound(name="Ytterbium_III_hydroxide", acidic=False, pKas=[8.3])
Hydroxylamine = ts.Compound(name="Hydroxylamine", acidic=False, pKas=[8.2])
LutetiumHydroxide = ts.Compound(name="Lutetiumhydroxide", acidic=False, pKas=[8.2])
Ethylhydrazine = ts.Compound(name="Ethylhydrazine", acidic=False, pKas=[7.99])
Hydrazine = ts.Compound(name="Hydrazine", acidic=False, pKas=[7.93])
Methylhydrazine = ts.Compound(name="Methylhydrazine", acidic=False, pKas=[7.87])
Peroxodiphosphate = ts.Compound(name="Peroxodiphosphate", acidic=False, pKas=[7.67])
Iron_III_Hydroxide = ts.Compound(name="Iron_III_hydroxide", acidic=False, pKas=[6.89])
Uranyl_VI_Hydroxide = ts.Compound(name="Uranyl_VI_hydroxide", acidic=False, pKas=[6.8])
Plutonium_IV_Hydroxide = ts.Compound(name="Plutonium_IV_hydroxide", acidic=False, pKas=[6.7])
ScandiumHydroxide = ts.Compound(name="Scandiumhydroxide", acidic=False, pKas=[6.4])
Bismuth_III_Hydroxide = ts.Compound(name="Bismuth_III_hydroxide", acidic=False, pKas=[6.38])
Vanadyl_IV_hydroxide_VO = ts.Compound(name="Vanadyl_IV_hydroxide_VO", acidic=False, pKas=[6.34])
AluminumHydroxide = ts.Compound(name="Aluminumhydroxide", acidic=False, pKas=[5.86])
Thorium_IV_Hydroxide = ts.Compound(name="Thorium_IV_hydroxide", acidic=False, pKas=[5.8])
Chromium_III_Hydroxide = ts.Compound(name="Chromium_III_hydroxide", acidic=False, pKas=[5.7])
Uranium_IV_Hydroxide = ts.Compound(name="Uranium_IV_hydroxide", acidic=False, pKas=[5.65])
Neptunium_IV_Hydroxide = ts.Compound(name="Neptunium_IV_hydroxide", acidic=False, pKas=[5.3])
Rutherfordium_IV_Hydroxide = ts.Compound(name="Rutherfordium_IV_hydroxide", acidic=False, pKas=[5.3])
Phenylhydrazine = ts.Compound(name="Phenylhydrazine", acidic=False, pKas=[5.21])
Indium_III_hydroxide_In = ts.Compound(name="Indium_III_hydroxide_In", acidic=False, pKas=[5.16])
GalliumHydroxide = ts.Compound(name="Galliumhydroxide", acidic=False, pKas=[4.75])
aniline = ts.Compound(name="aniline", acidic=False, pKas=[4.6])
Vanadium_III_hydroxide_V = ts.Compound(name="Vanadium_III_hydroxide_V", acidic=False, pKas=[4.1])
Plutonyl_VI_Hydroxide = ts.Compound(name="Plutonyl_VI_hydroxide", acidic=False, pKas=[4.05])
Gold_I_Hydroxide = ts.Compound(name="Gold_I_hydroxide", acidic=False, pKas=[3.8])
Tin_II_Hydroxide = ts.Compound(name="Tin_II_hydroxide", acidic=False, pKas=[3.66])
Tin_IV_Hydroxide = ts.Compound(name="Tin_IV_hydroxide", acidic=False, pKas=[3.32])
Triphenylphosphine = ts.Compound(name="Triphenylphosphine", acidic=False, pKas=[2.73])
Methylphosphine = ts.Compound(name="Methylphosphine", acidic=False, pKas=[2.7])
Mercury_II_Hydroxide = ts.Compound(name="Mercury_II_hydroxide", acidic=False, pKas=[2.5])
Palladium_II_Hydroxide = ts.Compound(name="Palladium_II_hydroxide", acidic=False, pKas=[2.46])
Technetyl_IV_Hydroxide = ts.Compound(name="Technetyl_IV_hydroxide", acidic=False, pKas=[2.43])
Titanyl_IV_dihydroxide = ts.Compound(name="Titanyl_IV_dihydroxide", acidic=False, pKas=[2.4])
Cerium_IV_Hydroxide = ts.Compound(name="Cerium_IV_hydroxide", acidic=False, pKas=[2.29])
Thallium_III_Hydroxide = ts.Compound(name="Thallium_III_hydroxide", acidic=False, pKas=[1.9])
Astatine_I_Hydroxide = ts.Compound(name="Astatine_I_hydroxide", acidic=False, pKas=[1.5])
Antimony_III_Hydroxide = ts.Compound(name="Antimony_III_hydroxide", acidic=False, pKas=[1.42])
Protactinium_IV_Hydroxide = ts.Compound(name="Protactinium_IV_hydroxide", acidic=False, pKas=[1.25])
Diphenylamine = ts.Compound(name="Diphenylamine", acidic=False, pKas=[0.78])
Hafnium_IV_Hydroxide = ts.Compound(name="Hafnium_IV_hydroxide", acidic=False, pKas=[0.52])
Zirconium_IV_Hydroxide = ts.Compound(name="Zirconium_IV_hydroxide", acidic=False, pKas=[0.5])
# Lists of compound
acids = [
| |
str(d.mouseX) + "," + str(d.mouseY) + ")"
i.dl().text(txt, (10,i.height / 2), color=col)
txt = "color: " + str(i.getPixel(d.mouseX,d.mouseY))
i.dl().text(txt, (10,(i.height / 2) + 10), color=col)
print "coord: (" + str(d.mouseX) + "," + str(d.mouseY) + "), color: " + str(i.getPixel(d.mouseX,d.mouseY))
if elapsed_time > 0 and elapsed_time < 5:
i.dl().text("In live mode", (10,10), color=col)
i.dl().text("Left click will show mouse coordinates and color", (10,20), color=col)
i.dl().text("Right click will kill the live image", (10,30), color=col)
i.save(d)
if d.mouseRight:
print "Closing Window"
d.done = True
pg.quit()
class Camera(FrameSource):
"""
**SUMMARY**
The Camera class is the class for managing input from a basic camera. Note
that once the camera is initialized, it will be locked from being used
by other processes. You can check manually if you have compatible devices
on linux by looking for /dev/video* devices.
This class wrappers OpenCV's cvCapture class and associated methods.
Read up on OpenCV's CaptureFromCAM method for more details if you need finer
control than just basic frame retrieval
"""
capture = "" #cvCapture object
thread = ""
pygame_camera = False
pygame_buffer = ""
prop_map = {"width": cv.CV_CAP_PROP_FRAME_WIDTH,
"height": cv.CV_CAP_PROP_FRAME_HEIGHT,
"brightness": cv.CV_CAP_PROP_BRIGHTNESS,
"contrast": cv.CV_CAP_PROP_CONTRAST,
"saturation": cv.CV_CAP_PROP_SATURATION,
"hue": cv.CV_CAP_PROP_HUE,
"gain": cv.CV_CAP_PROP_GAIN,
"exposure": cv.CV_CAP_PROP_EXPOSURE}
#human readable to CV constant property mapping
def __init__(self, camera_index = -1, prop_set = {}, threaded = True, calibrationfile = ''):
global _cameras
global _camera_polling_thread
"""
**SUMMARY**
In the camera constructor, camera_index indicates which camera to connect to
and props is a dictionary which can be used to set any camera attributes
Supported props are currently: height, width, brightness, contrast,
saturation, hue, gain, and exposure.
You can also specify whether you want the FrameBufferThread to continuously
debuffer the camera. If you specify True, the camera is essentially 'on' at
all times. If you specify off, you will have to manage camera buffers.
**PARAMETERS**
* *camera_index* - The index of the camera, these go from 0 upward, and are system specific.
* *prop_set* - The property set for the camera (i.e. a dict of camera properties).
.. Warning::
For most web cameras only the width and height properties are supported. Support
for all of the other parameters varies by camera and operating system.
* *threaded* - If True we constantly debuffer the camera, otherwise the user
must do this manually.
* *calibrationfile* - A calibration file to load.
"""
#This is to add support for XIMEA cameras.
if isinstance(camera_index, str):
if camera_index.lower() == 'ximea':
camera_index = 1100
self.capture = cv.CaptureFromCAM(camera_index) #This fixes bug with opencv not being able to grab frames from webcams on linux
if "delay" in prop_set:
time.sleep(prop_set['delay'])
if platform.system() == "Linux" and (prop_set.has_key("height") or cv.GrabFrame(self.capture) == False):
import pygame.camera
pygame.camera.init()
threaded = True #pygame must be threaded
if camera_index == -1:
camera_index = 0
if(prop_set.has_key("height") and prop_set.has_key("width")):
self.capture = pygame.camera.Camera("/dev/video" + str(camera_index), (prop_set['width'], prop_set['height']))
else:
self.capture = pygame.camera.Camera("/dev/video" + str(camera_index))
try:
self.capture.start()
except:
logger.warning("SimpleCV can't seem to find a camera on your system, or the drivers do not work with SimpleCV.")
return
time.sleep(0)
self.pygame_buffer = self.capture.get_image()
self.pygame_camera = True
else:
self.threaded = False
if (platform.system() == "Windows"):
threaded = False
if (not self.capture):
return None
#set any properties in the constructor
for p in prop_set.keys():
if p in self.prop_map:
cv.SetCaptureProperty(self.capture, self.prop_map[p], prop_set[p])
if (threaded):
self.threaded = True
_cameras.append(self)
if (not _camera_polling_thread):
_camera_polling_thread = FrameBufferThread()
_camera_polling_thread.daemon = True
_camera_polling_thread.start()
time.sleep(0) #yield to thread
if calibrationfile:
self.loadCalibration(calibrationfile)
#todo -- make these dynamic attributes of the Camera class
def getProperty(self, prop):
"""
**SUMMARY**
Retrieve the value of a given property, wrapper for cv.GetCaptureProperty
.. Warning::
For most web cameras only the width and height properties are supported. Support
for all of the other parameters varies by camera and operating system.
**PARAMETERS**
* *prop* - The property to retrive.
**RETURNS**
The specified property. If it can't be found the method returns False.
**EXAMPLE**
>>> cam = Camera()
>>> prop = cam.getProperty("width")
"""
if self.pygame_camera:
if prop.lower() == 'width':
return self.capture.get_size()[0]
elif prop.lower() == 'height':
return self.capture.get_size()[1]
else:
return False
if prop in self.prop_map:
return cv.GetCaptureProperty(self.capture, self.prop_map[prop])
return False
def getAllProperties(self):
"""
**SUMMARY**
Return all properties from the camera.
**RETURNS**
A dict of all the camera properties.
"""
if self.pygame_camera:
return False
props = {}
for p in self.prop_map:
props[p] = self.getProperty(p)
return props
def getImage(self):
"""
**SUMMARY**
Retrieve an Image-object from the camera. If you experience problems
with stale frames from the camera's hardware buffer, increase the flushcache
number to dequeue multiple frames before retrieval
We're working on how to solve this problem.
**RETURNS**
A SimpleCV Image from the camera.
**EXAMPLES**
>>> cam = Camera()
>>> while True:
>>> cam.getImage().show()
"""
if self.pygame_camera:
return Image(self.pygame_buffer.copy())
if (not self.threaded):
cv.GrabFrame(self.capture)
self.capturetime = time.time()
else:
self.capturetime = self._threadcapturetime
frame = cv.RetrieveFrame(self.capture)
newimg = cv.CreateImage(cv.GetSize(frame), cv.IPL_DEPTH_8U, 3)
cv.Copy(frame, newimg)
return Image(newimg, self)
class VirtualCamera(FrameSource):
"""
**SUMMARY**
The virtual camera lets you test algorithms or functions by providing
a Camera object which is not a physically connected device.
Currently, VirtualCamera supports "image", "imageset" and "video" source types.
**USAGE**
* For image, pass the filename or URL to the image
* For the video, the filename
* For imageset, you can pass either a path or a list of [path, extension]
"""
source = ""
sourcetype = ""
def __init__(self, s, st, start=1):
"""
**SUMMARY**
The constructor takes a source, and source type.
**PARAMETERS**
* *s* - the source of the imagery.
* *st* - the type of the virtual camera. Valid strings include:
* *start* - the number of the frame that you want to start with.
* "image" - a single still image.
* "video" - a video file.
* "imageset" - a SimpleCV image set.
**EXAMPLE**
>>> vc = VirtualCamera("img.jpg", "image")
>>> vc = VirtualCamera("video.mpg", "video")
>>> vc = VirtualCamera("./path_to_images/", "imageset")
>>> vc = VirtualCamera("video.mpg", "video", 300)
"""
self.source = s
self.sourcetype = st
self.counter = 0
if start==0:
start=1
self.start = start
if not (self.sourcetype == "video" or self.sourcetype == "image" or self.sourcetype == "imageset"):
print 'Error: In VirtualCamera(), Incorrect Source option. "%s" \nUsage:' % self.sourcetype
print '\tVirtualCamera("filename","video")'
print '\tVirtualCamera("filename","image")'
print '\tVirtualCamera("./path_to_images","imageset")'
return None
if (type(self.source) == list):
for source_file in self.source:
if not os.path.exists(source_file):
print 'Error: In VirtualCamera()\n\t"%s" was not found.' % source_file
return None
else:
if not os.path.exists(self.source):
print 'Error: In VirtualCamera()\n\t"%s" was not found.' % self.source
return None
if (self.sourcetype == "imageset"):
self.source = ImageSet()
if (type(s) == list):
self.source.load(*s)
else:
self.source.load(s)
if (self.sourcetype == 'video'):
self.capture = cv.CaptureFromFile(self.source)
cv.SetCaptureProperty(self.capture, cv.CV_CAP_PROP_POS_FRAMES, self.start-1)
def getImage(self):
"""
**SUMMARY**
Retrieve an Image-object from the virtual camera.
**RETURNS**
A SimpleCV Image from the camera.
**EXAMPLES**
>>> cam = VirtualCamera()
>>> while True:
>>> cam.getImage().show()
"""
if (self.sourcetype == 'image'):
return Image(self.source, self)
if (self.sourcetype == 'imageset'):
img = self.source[self.counter % len(self.source)]
self.counter = self.counter + 1
return img
if (self.sourcetype == 'video'):
return Image(cv.QueryFrame(self.capture), self)
def rewind(self, start=None):
"""
**SUMMARY**
Rewind the Video source back to the given frame.
Available for only video sources.
**PARAMETERS**
start - the number of the frame that you want to rewind to.
if not provided, the video source would be rewound
to the starting frame number you provided or rewound
to the beginning.
**RETURNS**
None
**EXAMPLES**
>>> cam = VirtualCamera("filename.avi", "video", 120)
>>> i=0
>>> while i<60:
... cam.getImage().show()
... i+=1
>>> cam.rewind()
"""
if (self.sourcetype == 'video'):
if not start:
cv.SetCaptureProperty(self.capture, cv.CV_CAP_PROP_POS_FRAMES, self.start-1)
else:
if start==0:
start=1
cv.SetCaptureProperty(self.capture, cv.CV_CAP_PROP_POS_FRAMES, start-1)
def getFrame(self, frame):
"""
**SUMMARY**
Get the provided numbered | |
Calculate overfill
start_mismatch[j].append((time_list[tseg_start] - time_list[pseg_start]).total_seconds())
for ts, (tseg_start, tseg_stop) in enumerate(truth_segs):
ps = _find_overlap_seg(pred_segs, tseg_stop)
if ps == -1:
# potential underfill or deletion
ps = _find_seg_end_within(pred_segs, tseg_start, tseg_stop)
if ps != -1:
pseg_stop = pred_segs[ps][1]
offset = (time_list[tseg_stop] - time_list[pseg_stop]).total_seconds()
if tseg_stop != pseg_stop and abs(offset) < 18000:
stop_mismatch[j].append(offset)
else:
pseg_stop = pred_segs[ps][1]
# Check the end of previous truth
if ts < len(truth_segs) - 1 and truth_segs[ts-1][0] <= pseg_stop:
continue
else:
offset = (time_list[tseg_stop] - time_list[pseg_stop]).total_seconds()
if abs(offset) < 18000:
# Calculate overfill
stop_mismatch[j].append(offset)
# print("class: %d" % j)
# print("pred_segs: %d %s" % (len(pred_segs), str(pred_segs)))
# print("truth_segs: %d %s" % (len(truth_segs), str(truth_segs)))
# print("start_mismatch: %s" % start_mismatch)
# print("stop_mismatch: %s" % stop_mismatch)
return start_mismatch, stop_mismatch
def _get_timeliness_measures(classes, truth, prediction, time_list):
num_classes = len(classes)
start_mismatch = [list([]) for i in range(num_classes)]
stop_mismatch = [list([]) for i in range(num_classes)]
# Processing segmentation first!
for j in range(num_classes):
pred_segs = []
truth_segs = []
prev_pred = False
prev_truth = False
tseg_start = 0
tseg_stop = 0
pseg_start = 0
pseg_stop = 0
for i in range(truth.shape[0]):
cur_truth = (int(truth[i]) == j)
cur_pred = (int(prediction[i]) == j)
# Truth segments
if cur_truth != prev_truth:
if cur_truth:
tseg_start = i
elif tseg_stop != 0:
truth_segs.append((tseg_start, tseg_stop))
tseg_stop = i
# Prediction segments
if cur_pred != prev_pred:
if cur_pred:
pseg_start = i
elif pseg_stop != 0:
pred_segs.append((pseg_start, pseg_stop))
pseg_stop = i
prev_truth = cur_truth
prev_pred = cur_pred
# Add compensated segments to predictions egments
for ts, (tseg_start, tseg_stop) in enumerate(truth_segs):
ps = _find_overlap_seg(pred_segs, tseg_start)
if ps == -1:
# potential underfill or deletion
ps = _find_seg_start_within(pred_segs, tseg_start, tseg_stop)
if ps != -1:
pseg_start = pred_segs[ps][0]
offset = (time_list[tseg_start] - time_list[pseg_start]).total_seconds()
if tseg_start != pseg_start and abs(offset) < 18000:
start_mismatch[j].append(offset)
else:
pseg_start = pred_segs[ps][0]
# Check the end of previous truth
if ts > 1 and truth_segs[ts-1][1] >= pseg_start:
continue
else:
offset = (time_list[tseg_start] - time_list[pseg_start]).total_seconds()
if tseg_start != pseg_start and abs(offset) < 18000:
# Calculate overfill
start_mismatch[j].append((time_list[tseg_start] - time_list[pseg_start]).total_seconds())
for ts, (tseg_start, tseg_stop) in enumerate(truth_segs):
ps = _find_overlap_seg(pred_segs, tseg_stop)
if ps == -1:
# potential underfill or deletion
ps = _find_seg_end_within(pred_segs, tseg_start, tseg_stop)
if ps != -1:
pseg_stop = pred_segs[ps][1]
offset = (time_list[tseg_stop] - time_list[pseg_stop]).total_seconds()
if tseg_stop != pseg_stop and abs(offset) < 18000:
stop_mismatch[j].append(offset)
else:
pseg_stop = pred_segs[ps][1]
# Check the end of previous truth
if ts < len(truth_segs) - 1 and truth_segs[ts-1][0] <= pseg_stop:
continue
else:
offset = (time_list[tseg_stop] - time_list[pseg_stop]).total_seconds()
if tseg_stop != pseg_stop and abs(offset) < 18000:
# Calculate overfill
stop_mismatch[j].append(offset)
# print("class: %d" % j)
# print("pred_segs: %d %s" % (len(pred_segs), str(pred_segs)))
# print("truth_segs: %d %s" % (len(truth_segs), str(truth_segs)))
# print("start_mismatch: %s" % start_mismatch)
# print("stop_mismatch: %s" % stop_mismatch)
return start_mismatch, stop_mismatch
def _get_timeliness_measures_depricated(classes, truth, prediction, truth_scoring, prediction_scoring, time_list):
num_classes = len(classes)
start_mismatch = [list([]) for i in range(num_classes)]
stop_mismatch = [list([]) for i in range(num_classes)]
# For each Underfill, Overfill
prev_truth = -1
for i in range(truth.shape[0]):
cur_truth = int(truth[i])
# Overfill/Underfill only occur at the boundary of any activity event, so look for the boundary first
if cur_truth != prev_truth:
truth_time = time_list[i]
# Check the start boundary
if truth[i] == prediction[i]:
# If current prediction is correct, then it can only be overfill of current truth label.
j = i - 1
while j >= 0 and prediction_scoring[j] == 'O':
j -= 1
# If there is no overfill for cur_truth, and the current truth and prediction are the same,
# then there is no start_boundary mismatch.
start_mismatch[cur_truth].append((time_list[j + 1] - truth_time).total_seconds())
else:
# If current prediction is incorrect, then it can only be underfill of current truth label at start
# boundary.
j = i
while j < truth.shape[0] and truth_scoring[j] == 'U':
j += 1
if j != i and j < truth.shape[0]:
start_mismatch[cur_truth].append((time_list[j-1] - truth_time).total_seconds())
# Check the stop boundary
if i > 0:
if prediction[i-1] == truth[i-1]:
# Previous prediction is correct, then it can only be overfill of previous truth.
# If there is no overfill, the stop boundary is accurate
j = i
while prediction_scoring[j] == 'o':
j += 1
stop_mismatch[prev_truth].append((time_list[j-1] - truth_time).total_seconds())
else:
# Check Underfill for prev_truth (at the stop boundary)
j = i - 1
while j >= 0 and truth_scoring[j] == 'u':
j -= 1
if j != i - 1:
stop_mismatch[prev_truth].append((time_list[j + 1] - truth_time).total_seconds())
if prev_truth != -1:
if len(stop_mismatch[prev_truth]) > 0 and abs(stop_mismatch[prev_truth][-1]) > 1800:
logger.warning('Stop mismatch is over half an hour: %s at %d (%s) - %f' %
(classes[prev_truth], i, time_list[i],
stop_mismatch[prev_truth][-1]))
if len(start_mismatch[cur_truth]) > 0 and abs(start_mismatch[cur_truth][-1]) > 1800:
logger.warning('Start mismatch is over half an hour: %s at %d (%s) - %f' %
(classes[cur_truth], i, time_list[i],
start_mismatch[cur_truth][-1]))
# Update prev truth
prev_truth = cur_truth
# Sort all arrays
for i in range(num_classes):
start_mismatch[i].sort()
stop_mismatch[i].sort()
# Return
return start_mismatch, stop_mismatch
def generate_latex_table(methods, classes, recall_metrics, precision_matrics,
background_class=None, filename=None,
as_percent=True, metric_name='recall'):
bg_class_id = _get_bg_class_id(classes, background_class)
metric_labels, metric_indices = _get_metric_label_dict(metric_name='recall')
rmp = _gether_per_class_metrics(methods, classes, recall_metrics, True,
metric_labels, metric_indices)
rmr = _gether_per_class_metrics(methods, classes, recall_metrics, False,
metric_labels, metric_indices)
metric_labels, metric_indices = _get_metric_label_dict(metric_name='precision')
pmp = _gether_per_class_metrics(methods, classes, precision_matrics, True,
metric_labels, metric_indices)
pmr = _gether_per_class_metrics(methods, classes, precision_matrics, False,
metric_labels, metric_indices)
if filename is None:
f = sys.stdout
else:
f = open(filename, 'w')
f.write('\\multirow{2}{*}{Models} & \\multirow{2}{*}{Activities} & '
'\\multirow{2}{*}{Total Truth} & \\multicolumn{2}{|c|}{Recall} & '
'\\multirow{2}{*}{Total Prediction} & \\multicolumn{2}{|c|}{Precision} \\\\ \\hline\n')
f.write('& & & C only & U included & & C only & O included \\\\ \\hline \n')
for i, method in enumerate(methods):
f.write('\\multirow{%d}{*}{%s} & ' % (len(classes), method.replace('_', '\_')))
for j, target in enumerate(classes):
if j != 0:
f.write('& ')
f.write('%s & '
'%d & %d (%.2f) & %d (%.2f) & '
'%d & %d (%.2f) & %d (%.2f) \\\\ \n' %
(target.replace('_', '\_'),
rmr[i][j,:].sum(), rmr[i][j,0], rmp[i][j,0],
rmr[i][j,0]+rmr[i][j,1]+rmr[i][j,2], rmp[i][j,0]+rmp[i][j,1]+rmp[i][j,2],
pmr[i][j,:].sum(), pmr[i][j,0], pmp[i][j,0],
pmr[i][j,0]+pmr[i][j,1]+pmr[i][j,2], pmp[i][j,0]+pmp[i][j,1]+pmp[i][j,2],
)
)
f.write('\\hline\n')
f.close()
def generate_seg_latex_table(methods, classes, recall_metrics, precision_matrics,
background_class=None, filename=None):
bg_class_id = _get_bg_class_id(classes, background_class)
metric_labels, metric_indices = _get_metric_label_dict(metric_name='recall')
rmp = _gether_per_class_metrics(methods, classes, recall_metrics, True,
metric_labels, metric_indices)
rmr = _gether_per_class_metrics(methods, classes, recall_metrics, False,
metric_labels, metric_indices)
metric_labels, metric_indices = _get_metric_label_dict(metric_name='precision')
pmp = _gether_per_class_metrics(methods, classes, precision_matrics, True,
metric_labels, metric_indices)
pmr = _gether_per_class_metrics(methods, classes, precision_matrics, False,
metric_labels, metric_indices)
if filename is None:
f = sys.stdout
else:
f = open(filename, 'w')
f.write('Metric & Activities')
for method in methods:
f.write('& %s' % method.replace('_', '\_'))
f.write('\\\\ \\hline \n')
for i, activity in enumerate(classes):
if i != bg_class_id:
if i == 0:
f.write('\multirow{%d}{*}{Recall} & ' % (len(classes) - 1))
else:
f.write(' & ')
f.write('%s ' % activity.replace('_', '\_'))
# Find maximum and store index
temp_array = np.array([rmp[j][i,0] for j in range(len(methods))])
max_index = temp_array.argpartition(-2)[-2:]
for j, method in enumerate(methods):
if j in max_index:
f.write('& \\textbf{%d/%.2f\\%%} ' % (rmr[j][i,0], rmp[j][i,0]* 100))
else:
f.write('& %d/%.2f\\%% ' % (rmr[j][i,0], rmp[j][i,0]* 100))
f.write('\\\\ \n')
f.write('\\hline \n')
for i, activity in enumerate(classes):
if i != bg_class_id:
if i == 0:
f.write('\multirow{%d}{*}{Precision} & ' % (len(classes) - 1))
else:
f.write(' & ')
f.write('%s ' % activity.replace('_', '\_'))
# Find maximum and store index
temp_array = np.array([pmp[j][i,0] for j in range(len(methods))])
max_index = temp_array.argpartition(-2)[-2:]
for j, method in enumerate(methods):
if j in max_index:
f.write('& \\textbf{%d/%.2f\\%%} ' % (pmr[j][i,0], pmp[j][i,0]* 100))
else:
f.write('& %d/%.2f\\%% ' % (pmr[j][i,0], pmp[j][i,0]* 100))
f.write('\\\\ \n')
f.write('\\hline \n')
def generate_event_recall_table(methods, classes, recall_metrics,
background_class=None, filename=None):
bg_class_id = _get_bg_class_id(classes, background_class)
metric_labels, metric_indices = _get_metric_label_dict(metric_name='recall')
rmp = _gether_per_class_metrics(methods, classes, recall_metrics, True,
metric_labels, metric_indices)
rmr = _gether_per_class_metrics(methods, classes, recall_metrics, False,
metric_labels, metric_indices)
if filename is None:
f = sys.stdout
else:
f = open(filename, 'w')
f.write('Activities')
for method in methods:
f.write('& %s' % method.replace('_', '\_'))
f.write('\\\\ \\hline \n')
for i, activity in enumerate(classes):
if i != bg_class_id:
f.write(' & ')
f.write('%s ' % activity.replace('_', '\_'))
# Find maximum and store index
temp_array = np.array([rmp[j][i, 0] for j in range(len(methods))])
max_index = temp_array.argpartition(-2)[-2:]
for j, method in enumerate(methods):
if j in max_index:
f.write('& \\textbf{%.2f\\%%} ' % (rmp[j][i,0]* 100))
else:
f.write('& %.2f\\%% ' % (rmp[j][i,0]* 100))
f.write('\\\\ | |
cur.execute('SELECT COUNT(*) AS count, XImage.id, path FROM XImage, XBelonging WHERE XImage.id=XBelonging.ximage_id GROUP BY XImage.id ORDER BY count DESC LIMIT 1;').fetchone()
print('Items per image:\n Minimum: %d (%s)\n Maximum: %d (%s)\n Average: %.1f' % (min_items_per_image, min_items_per_image_path, max_items_per_image, max_items_per_image_path, round(avg_items_per_image, 1)))
except ValueError:
print('No items found')
try:
(avg_blobs_per_item,) = cur.execute('SELECT AVG(count) FROM (SELECT COUNT(*) AS count, XBelonging.xitem_id FROM XBlob, XBelonging, XImage WHERE XImage.id=XBelonging.ximage_id AND XBelonging.id=XBlob.xbelonging_id GROUP BY XBelonging.xitem_id);').fetchone()
(min_blobs_per_item, min_blobs_per_item_uuid) = cur.execute('SELECT COUNT(*) AS count, XBelonging.xitem_id FROM XBlob, XBelonging, XImage WHERE XImage.id=XBelonging.ximage_id AND XBelonging.id=XBlob.xbelonging_id GROUP BY XBelonging.xitem_id ORDER BY count ASC LIMIT 1;').fetchone()
(max_blobs_per_item, max_blobs_per_item_uuid) = cur.execute('SELECT COUNT(*) AS count, XBelonging.xitem_id FROM XBlob, XBelonging, XImage WHERE XImage.id=XBelonging.ximage_id AND XBelonging.id=XBlob.xbelonging_id GROUP BY XBelonging.xitem_id ORDER BY count DESC LIMIT 1;').fetchone()
(avg_blobs_per_image,) = cur.execute('SELECT AVG(count) FROM (SELECT COUNT(*) AS count, XImage.id FROM XBlob, XBelonging, XImage WHERE XImage.id=XBelonging.ximage_id AND XBelonging.id=XBlob.xbelonging_id GROUP BY XImage.id);').fetchone()
(min_blobs_per_image, _, min_blobs_per_image_path) = cur.execute('SELECT COUNT(*) AS count, XImage.id, path FROM XBlob, XBelonging, XImage WHERE XImage.id=XBelonging.ximage_id AND XBelonging.id=XBlob.xbelonging_id GROUP BY XImage.id ORDER BY count ASC LIMIT 1;').fetchone()
(max_blobs_per_image, _, max_blobs_per_image_path) = cur.execute('SELECT COUNT(*) AS count, XImage.id, path FROM XBlob, XBelonging, XImage WHERE XImage.id=XBelonging.ximage_id AND XBelonging.id=XBlob.xbelonging_id GROUP BY XImage.id ORDER BY count DESC LIMIT 1;').fetchone()
(xblob_minarea, xblob_minarea_path) = cur.execute('SELECT area, path FROM XBlob, XBelonging, XImage WHERE XImage.id=XBelonging.ximage_id AND XBelonging.id=XBlob.xbelonging_id ORDER BY area ASC LIMIT 1;').fetchone()
(xblob_maxarea, xblob_maxarea_path) = cur.execute('SELECT area, path FROM XBlob, XBelonging, XImage WHERE XImage.id=XBelonging.ximage_id AND XBelonging.id=XBlob.xbelonging_id ORDER BY area DESC LIMIT 1;').fetchone()
(xblob_avgarea,) = cur.execute('SELECT AVG(area) FROM XBlob, XBelonging, XImage WHERE XImage.id=XBelonging.ximage_id AND XBelonging.id=XBlob.xbelonging_id;').fetchone()
print('Blobs per item:\n Minimum: %d (%s)\n Maximum: %d (%s)\n Average: %.1f' % (min_blobs_per_item, str(min_blobs_per_item_uuid), max_blobs_per_item, str(max_blobs_per_item_uuid), round(avg_blobs_per_item, 1)))
print('Blobs per image:\n Minimum: %d (%s)\n Maximum: %d (%s)\n Average: %.1f' % (min_blobs_per_image, min_blobs_per_image_path, max_blobs_per_image, max_blobs_per_image_path, round(avg_blobs_per_image, 1)))
print('Blobs areas:\n Minimum: %dpx (%s)\n Maximum: %dpx (%s)\n Average: %dpx' % (xblob_minarea, xblob_minarea_path, xblob_maxarea, xblob_maxarea_path, xblob_avgarea))
except ValueError:
print('No blobs found')
def ximage_query(args):
class XEvalContext(object):
def __init__(self, cur):
self.cur = cur
self.cur.execute('SELECT path FROM XImage;')
self.all_paths = self._fetch_all()
self.reset()
def push_param(self, p):
n = 'x%d' % (len(self.params),)
self.params[n] = p
return ':%s' % (n,)
def execute_query(self):
from_clause = ', '.join(self.from_tables)
where_clause = ' AND '.join(self.where_conjs)
groupby_clause = '' if len(self.having_conjs) == 0 else ' GROUP BY path HAVING %s' % (' AND '.join(self.having_conjs),)
query = 'SELECT path FROM %s WHERE %s%s;' % (from_clause, where_clause, groupby_clause)
#print query, self.params
self.cur.execute(query, self.params)
return self._fetch_all()
def reset(self):
self.params = {}
self.where_conjs = set()
self.from_tables = set()
self.having_conjs = set()
def _fetch_all(self):
return set([ r[0] for r in self.cur.fetchall() ])
def xeval_num(node):
return node.n
def xeval_str(node):
return node.s
def xeval_attribute(node, ctx):
assert type(node.value) == ast.Name
t = node.value.id.capitalize()
if t in [ 'Acquisition', 'Setup' ]:
ctx.from_tables.update([ 'XImage', 'XImageParam AS Acquisition', 'XImageParam AS Setup' ])
ctx.where_conjs.update([ 'Acquisition.param_type=0', 'Acquisition.ximage_id=XImage.id', 'Setup.param_type=1', 'Setup.ximage_id=XImage.id' ])
ctx.where_conjs.add('%s.name=%s' % (t, ctx.push_param(node.attr)))
return 'xvalue_parse(%s.val)' % (t,)
elif t == 'Item':
ctx.from_tables.update([ 'XImage', 'XBelonging', 'XBlob', 'XClass' ])
ctx.where_conjs.update([ 'XImage.id=XBelonging.ximage_id', 'XBlob.xbelonging_id=XBelonging.id', 'XBlob.xclass_id=XClass.id' ])
ctx.where_conjs.add('XClass.name=%s' % (ctx.push_param(node.attr),))
return '*'
else:
pass # raise
def xeval_call(node, ctx):
fn = node.func.id.lower()
if fn == 'count':
assert len(node.args) == 1 and type(node.args[0]) == ast.Attribute
return True, 'COUNT(%s)' % (xeval_attribute(node.args[0], ctx),)
elif fn == 'area':
assert len(node.args) == 1 and type(node.args[0]) == ast.Attribute
xeval_attribute(node.args[0], ctx)
return True, 'XBlob.area'
elif fn == 'areas':
assert len(node.args) == 1 and type(node.args[0]) == ast.Attribute
xeval_attribute(node.args[0], ctx)
return True, 'SUM(XBlob.area)'
else:
pass # Raise
def xeval_unaryop(node, ctx):
if type(node.op) == ast.Not:
return ctx.all_paths - xeval(node.operand, cur)
else:
pass # Raise
def xeval_boolop(node, ctx):
values = [ xeval(v, ctx) for v in node.values ]
if type(node.op) == ast.And:
return reduce(set.intersection, values, ctx.all_paths)
elif type(node.op) == ast.Or:
return reduce(set.union, values, set())
else:
pass # Raise
def xeval_compare(node, ctx):
comparators = [ node.left ] + node.comparators
paths = ctx.all_paths
for op, x, y in zip(map(type, node.ops), comparators[:-1], comparators[1:]):
if op == ast.Lt:
op_str = '<'
elif op == ast.LtE:
op_str = '<='
elif op == ast.Gt:
op_str = '>'
elif op == ast.GtE:
op_str = '>='
elif op == ast.Eq:
op_str = '='
elif op == ast.NotEq:
op_str = '<>'
else:
pass # raise
comps = [ '', '' ]
conjs = ctx.where_conjs
for i, z in enumerate([ x, y ]):
if type(z) == ast.Call:
h, comps[i] = xeval_call(z, ctx)
if h:
conjs = ctx.having_conjs
elif type(z) == ast.Attribute:
comps[i] = xeval_attribute(z, ctx)
elif type(z) == ast.Str:
comps[i] = ctx.push_param(xeval_str(z))
elif type(z) == ast.Num:
comps[i] = ctx.push_param(xeval_num(z))
else:
pass # raise
conjs.add('%s%s%s' % (comps[0], op_str, comps[1]))
#
paths = paths.intersection(ctx.execute_query())
ctx.reset()
if len(paths) == 0:
break
return paths
def xeval(node, ctx):
if type(node) == ast.UnaryOp:
return xeval_unaryop(node, ctx)
elif type(node) == ast.BoolOp:
return xeval_boolop(node, ctx)
elif type(node) == ast.Compare:
return xeval_compare(node, ctx)
else:
pass # raise
try:
conn = _ximage_index_connect(args)
except IOError as e:
sys.stderr.write('Error: cannot open index: %s\n' % (str(e),))
return 1
except ImportError:
sys.stderr.write('Error: cannot import sqlite3 module\n')
return -1
query = ' '.join(args.query)
if query is None or len(query.strip()) == 0:
paths = XEvalContext(conn.cursor()).all_paths
else:
root = ast.parse(query, '<query>', 'eval')
paths = xeval(root.body, XEvalContext(conn.cursor()))
print('\n'.join(sorted(paths)))
return 0
def ximage_main(prog_name='ximage'):
parser = argparse.ArgumentParser(prog=prog_name, description='Manipulate images along with its metadata')
subparsers = parser.add_subparsers(help='sub-commands help')
parser_import = subparsers.add_parser('import', help='Add blobs and metadata to an image, importing index mask')
parser_import.add_argument('-K', '--classes', type=str, required=False, nargs='+', default=[], help='List of classes, 0-indexed')
parser_import.add_argument('-U', '--uuids', type=str, required=False, nargs='+', default=[], help='List of UUIDs (0 to generate)')
parser_import.add_argument('-C', '--colors', type=str, required=False, nargs='+', default=[], help='List of classes\' colors')
parser_import.add_argument('mask', type=str, help='Index mask path')
parser_import.add_argument('path', type=str, help='Image path')
parser_import.set_defaults(func=ximage_import)
parser_export = subparsers.add_parser('export', help='Export index mask from an image')
parser_export.add_argument('path', type=str, help='Image path')
parser_export.add_argument('mask', type=str, help='Index mask path')
parser_export.set_defaults(func=ximage_export)
parser_inject = subparsers.add_parser('inject', help='Add blobs and metadata to an image')
parser_inject.add_argument('metadata', type=str, help='XML')
parser_inject.add_argument('path', type=str, help='Image path')
parser_inject.set_defaults(func=ximage_inject)
parser_extract = subparsers.add_parser('extract', help='Extract blobs and metadata from an image')
parser_extract.add_argument('path', type=str, help='Image path')
parser_extract.set_defaults(func=ximage_extract)
parser_update = subparsers.add_parser('update', help='Update image metadata with XML')
parser_update.add_argument('-f', '--overwrite', action='store_true', required=False, default=False, help='Overwrite present values (default: no)')
parser_update.add_argument('-K', '--replace-classes', action='store_true', required=False, default=False, help='Overwrite all defined classes (default: no)')
parser_update.add_argument('metadata', type=str, help='Metadata to update with')
parser_update.add_argument('path', type=str, help='Image path')
parser_update.add_argument('mapping', nargs=argparse.REMAINDER)
parser_update.set_defaults(func=ximage_update)
parser_uuid = subparsers.add_parser('uuid', help='Get/set items UUIDs (left to right, top to bottom)')
parser_uuid.add_argument('-U', '--uuids', type=str, required=False, nargs='+', default=[], help='List of new UUIDs (0 to skip)')
parser_uuid.add_argument('path', type=str, help='Image path')
parser_uuid.set_defaults(func=ximage_uuid)
parser_view = subparsers.add_parser('view', help='View images, blobs and other metadata')
parser_view.add_argument('-m', '--metadata', type=str, required=False, default=None, help='Use this XML instead of image\'s XMP')
parser_view.add_argument('-o', '--output_path', type=str, required=False, default=None, help='Output image path')
parser_view.add_argument('path', type=str, help='Image path')
parser_view.set_defaults(func=ximage_view)
parser_index = subparsers.add_parser('index', help='Index a directory (recursively) of XImages')
parser_index.add_argument('root', type=str, help='Root directory path')
parser_index.set_defaults(func=ximage_index)
parser_query = subparsers.add_parser('query', help='Query on indexed directory of XImages')
parser_query.add_argument('-D', '--root', type=str, required=False, default=os.getcwd(), help='Root directory path (default: cwd)')
parser_query.add_argument('query', nargs=argparse.REMAINDER)
parser_query.set_defaults(func=ximage_query)
parser_stats = subparsers.add_parser('stats', help='Show some indexed directory statistics')
parser_stats.add_argument('-D', '--root', type=str, required=False, default=os.getcwd(), help='Root directory path (default: cwd)')
parser_stats.set_defaults(func=ximage_stats)
args = parser.parse_args()
sys.exit(args.func(args))
_COLORS = dict(
maroon=(0x00, 0x00, 0x80),
darkred=(0x00, 0x00, 0x8b),
red=(0x00, 0x00, 0xff),
lightpink=(0xc1, 0xb6, 0xff),
crimson=(0x3c, 0x14, 0xdc),
palevioletred=(0x93, 0x70, 0xdb),
hotpink=(0xb4, 0x69, 0xff),
deeppink=(0x93, 0x14, 0xff),
mediumvioletred=(0x85, 0x15, 0xc7),
purple=(0x80, 0x00, 0x80),
darkmagenta=(0x8b, 0x00, 0x8b),
orchid=(0xd6, 0x70, 0xda),
thistle=(0xd8, 0xbf, 0xd8),
plum=(0xdd, 0xa0, 0xdd),
violet=(0xee, 0x82, 0xee),
fuchsia=(0xff, 0x00, 0xff),
magenta=(0xff, 0x00, 0xff),
mediumorchid=(0xd3, 0x55, 0xba),
darkviolet=(0xd3, 0x00, 0x94),
darkorchid=(0xcc, 0x32, 0x99),
blueviolet=(0xe2, 0x2b, 0x8a),
indigo=(0x82, 0x00, 0x4b),
mediumpurple=(0xdb, 0x70, 0x93),
slateblue=(0xcd, 0x5a, 0x6a),
mediumslateblue=(0xee, 0x68, 0x7b),
darkblue=(0x8b, 0x00, 0x00),
mediumblue=(0xcd, 0x00, 0x00),
blue=(0xff, 0x00, 0x00),
navy=(0x80, 0x00, 0x00),
midnightblue=(0x70, 0x19, 0x19),
darkslateblue=(0x8b, 0x3d, 0x48),
royalblue=(0xe1, 0x69, 0x41),
cornflowerblue=(0xed, 0x95, 0x64),
lightsteelblue=(0xde, 0xc4, 0xb0),
aliceblue=(0xff, 0xf8, 0xf0),
ghostwhite=(0xff, 0xf8, 0xf8),
lavender=(0xfa, 0xe6, 0xe6),
dodgerblue=(0xff, 0x90, 0x1e),
steelblue=(0xb4, 0x82, 0x46),
deepskyblue=(0xff, 0xbf, 0x00),
slategray=(0x90, 0x80, 0x70),
lightslategray=(0x99, 0x88, 0x77),
lightskyblue=(0xfa, 0xce, 0x87),
skyblue=(0xeb, 0xce, 0x87),
lightblue=(0xe6, 0xd8, 0xad),
teal=(0x80, 0x80, 0x00),
darkcyan=(0x8b, 0x8b, 0x00),
darkturquoise=(0xd1, 0xce, 0x00),
cyan=(0xff, 0xff, 0x00),
mediumturquoise=(0xcc, 0xd1, 0x48),
cadetblue=(0xa0, 0x9e, 0x5f),
paleturquoise=(0xee, 0xee, 0xaf),
lightcyan=(0xff, 0xff, 0xe0),
azure=(0xff, 0xff, 0xf0),
lightseagreen=(0xaa, 0xb2, 0x20),
turquoise=(0xd0, 0xe0, 0x40),
powderblue=(0xe6, 0xe0, 0xb0),
darkslategray=(0x4f, 0x4f, 0x2f),
aquamarine=(0xd4, 0xff, 0x7f),
mediumspringgreen=(0x9a, 0xfa, 0x00),
mediumaquamarine=(0xaa, | |
<filename>QUANTAXIS/QAFetch/QAQuery.py<gh_stars>1-10
# coding: utf-8
#
# The MIT License (MIT)
#
# Copyright (c) 2016-2018 yutiansut/QUANTAXIS
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import datetime
import numpy
import pandas as pd
from pandas import DataFrame
from QUANTAXIS.QAUtil import (DATABASE, QA_Setting, QA_util_date_stamp,
QA_util_date_valid, QA_util_dict_remove_key,
QA_util_log_info, QA_util_code_tolist, QA_util_date_str2int, QA_util_date_int2str,
QA_util_sql_mongo_sort_DESCENDING,
QA_util_time_stamp, QA_util_to_json_from_pandas,
trade_date_sse)
from QUANTAXIS.QAData.financial_mean import financial_dict
"""
按要求从数据库取数据,并转换成numpy结构
2018-07-30 修改 增加batch_size 可以做到8MB/S-30mb/s的传输速度
"""
def QA_fetch_stock_day(code, start, end, format='numpy', frequence='day', collections=DATABASE.stock_day):
'获取股票日线'
start = str(start)[0:10]
end = str(end)[0:10]
#code= [code] if isinstance(code,str) else code
# code checking
code = QA_util_code_tolist(code)
if QA_util_date_valid(end):
__data = []
cursor = collections.find({
'code': {'$in': code}, "date_stamp": {
"$lte": QA_util_date_stamp(end),
"$gte": QA_util_date_stamp(start)}}, batch_size=10000)
#res=[QA_util_dict_remove_key(data, '_id') for data in cursor]
res = pd.DataFrame([item for item in cursor])
try:
res = res.drop('_id', axis=1).assign(volume=res.vol, date=pd.to_datetime(
res.date)).drop_duplicates((['date', 'code'])).query('volume>1').set_index('date', drop=False)
res = res.ix[:, ['code', 'open', 'high', 'low',
'close', 'volume', 'amount', 'date']]
except:
res = None
if format in ['P', 'p', 'pandas', 'pd']:
return res
elif format in ['json', 'dict']:
return QA_util_to_json_from_pandas(res)
# 多种数据格式
elif format in ['n', 'N', 'numpy']:
return numpy.asarray(res)
elif format in ['list', 'l', 'L']:
return numpy.asarray(res).tolist()
else:
print("QA Error QA_fetch_stock_day format parameter %s is none of \"P, p, pandas, pd , json, dict , n, N, numpy, list, l, L, !\" " % format)
return None
else:
QA_util_log_info(
'QA Error QA_fetch_stock_day data parameter start=%s end=%s is not right' % (start, end))
def QA_fetch_stock_min(code, start, end, format='numpy', frequence='1min', collections=DATABASE.stock_min):
'获取股票分钟线'
if frequence in ['1min', '1m']:
frequence = '1min'
elif frequence in ['5min', '5m']:
frequence = '5min'
elif frequence in ['15min', '15m']:
frequence = '15min'
elif frequence in ['30min', '30m']:
frequence = '30min'
elif frequence in ['60min', '60m']:
frequence = '60min'
else:
print("QA Error QA_fetch_stock_min parameter frequence=%s is none of 1min 1m 5min 5m 15min 15m 30min 30m 60min 60m" % frequence)
__data = []
# code checking
code = QA_util_code_tolist(code)
cursor = collections.find({
'code': {'$in': code}, "time_stamp": {
"$gte": QA_util_time_stamp(start),
"$lte": QA_util_time_stamp(end)
}, 'type': frequence
}, batch_size=10000)
res = pd.DataFrame([item for item in cursor])
try:
res = res.drop('_id', axis=1).assign(volume=res.vol, datetime=pd.to_datetime(
res.datetime)).query('volume>1').drop_duplicates(['datetime', 'code']).set_index('datetime', drop=False)
# return res
except:
res = None
if format in ['P', 'p', 'pandas', 'pd']:
return res
elif format in ['json', 'dict']:
return QA_util_to_json_from_pandas(res)
# 多种数据格式
elif format in ['n', 'N', 'numpy']:
return numpy.asarray(res)
elif format in ['list', 'l', 'L']:
return numpy.asarray(res).tolist()
else:
print("QA Error QA_fetch_stock_min format parameter %s is none of \"P, p, pandas, pd , json, dict , n, N, numpy, list, l, L, !\" " % format)
return None
def QA_fetch_trade_date():
'获取交易日期'
return trade_date_sse
def QA_fetch_stock_list(collections=DATABASE.stock_list):
'获取股票列表'
return [item for item in collections.find()]
def QA_fetch_index_list(collections=DATABASE.index_list):
'获取指数列表'
return [item for item in collections.find()]
def QA_fetch_stock_terminated(collections=DATABASE.stock_terminated):
'获取股票基本信息 , 已经退市的股票列表'
items = [item for item in collections.find()]
# 🛠todo 转变成 dataframe 类型数据
return items
def QA_fetch_stock_basic_info_tushare(collections=DATABASE.stock_info_tushare):
'''
purpose:
tushare 股票列表数据库
code,代码
name,名称
industry,所属行业
area,地区
pe,市盈率
outstanding,流通股本(亿)
totals,总股本(亿)
totalAssets,总资产(万)
liquidAssets,流动资产
fixedAssets,固定资产
reserved,公积金
reservedPerShare,每股公积金
esp,每股收益
bvps,每股净资
pb,市净率
timeToMarket,上市日期
undp,未分利润
perundp, 每股未分配
rev,收入同比(%)
profit,利润同比(%)
gpr,毛利率(%)
npr,净利润率(%)
holders,股东人数
add by tauruswang,
:param collections: stock_info_tushare 集合
:return:
'''
'获取股票基本信息'
items = [item for item in collections.find()]
# 🛠todo 转变成 dataframe 类型数据
return items
def QA_fetch_stock_to_market_date(stock_code):
'''
根据tushare 的数据库查找上市的日期
:param stock_code: '600001'
:return: string 上市日期 eg: '2018-05-15'
'''
items = QA_fetch_stock_basic_info_tushare()
for row in items:
if row['code'] == stock_code:
return row['timeToMarket']
def QA_fetch_stock_full(date, format='numpy', collections=DATABASE.stock_day):
'获取全市场的某一日的数据'
Date = str(date)[0:10]
if QA_util_date_valid(Date) is True:
__data = []
for item in collections.find({
"date_stamp": QA_util_date_stamp(Date)}, batch_size=10000):
__data.append([str(item['code']), float(item['open']), float(item['high']), float(
item['low']), float(item['close']), float(item['vol']), item['date']])
# 多种数据格式
if format in ['n', 'N', 'numpy']:
__data = numpy.asarray(__data)
elif format in ['list', 'l', 'L']:
__data = __data
elif format in ['P', 'p', 'pandas', 'pd']:
__data = DataFrame(__data, columns=[
'code', 'open', 'high', 'low', 'close', 'volume', 'date'])
__data['date'] = pd.to_datetime(__data['date'])
__data = __data.set_index('date', drop=False)
else:
print("QA Error QA_fetch_stock_full format parameter %s is none of \"P, p, pandas, pd , json, dict , n, N, numpy, list, l, L, !\" " % format)
return __data
else:
QA_util_log_info(
'QA Error QA_fetch_stock_full data parameter date=%s not right' % date)
def QA_fetch_index_day(code, start, end, format='numpy', collections=DATABASE.index_day):
'获取指数日线'
start = str(start)[0:10]
end = str(end)[0:10]
code = QA_util_code_tolist(code)
if QA_util_date_valid(end) == True:
__data = []
cursor = collections.find({
'code': {'$in': code}, "date_stamp": {
"$lte": QA_util_date_stamp(end),
"$gte": QA_util_date_stamp(start)}}, batch_size=10000)
if format in ['dict', 'json']:
return [data for data in cursor]
for item in cursor:
__data.append([str(item['code']), float(item['open']), float(item['high']), float(
item['low']), float(item['close']), int(item['up_count']), int(item['down_count']), float(item['vol']), float(item['amount']), item['date']])
# 多种数据格式
if format in ['n', 'N', 'numpy']:
__data = numpy.asarray(__data)
elif format in ['list', 'l', 'L']:
__data = __data
elif format in ['P', 'p', 'pandas', 'pd']:
__data = DataFrame(
__data, columns=['code', 'open', 'high', 'low', 'close', 'up_count', 'down_count', 'volume', 'amount', 'date'])
__data['date'] = pd.to_datetime(__data['date'])
__data = __data.set_index('date', drop=False)
else:
print("QA Error QA_fetch_index_day format parameter %s is none of \"P, p, pandas, pd , n, N, numpy !\" " % format)
return __data
else:
QA_util_log_info('QA something wrong with date')
def QA_fetch_index_min(
code,
start, end,
format='numpy',
frequence='1min',
collections=DATABASE.index_min):
'获取股票分钟线'
if frequence in ['1min', '1m']:
frequence = '1min'
elif frequence in ['5min', '5m']:
frequence = '5min'
elif frequence in ['15min', '15m']:
frequence = '15min'
elif frequence in ['30min', '30m']:
frequence = '30min'
elif frequence in ['60min', '60m']:
frequence = '60min'
__data = []
code = QA_util_code_tolist(code)
cursor = collections.find({
'code': {'$in': code}, "time_stamp": {
"$gte": QA_util_time_stamp(start),
"$lte": QA_util_time_stamp(end)
}, 'type': frequence
}, batch_size=10000)
if format in ['dict', 'json']:
return [data for data in cursor]
for item in cursor:
__data.append([str(item['code']), float(item['open']), float(item['high']), float(
item['low']), float(item['close']), int(item['up_count']), int(item['down_count']), float(item['vol']), float(item['amount']), item['datetime'], item['time_stamp'], item['date']])
__data = DataFrame(__data, columns=[
'code', 'open', 'high', 'low', 'close', 'up_count', 'down_count', 'volume', 'amount', 'datetime', 'time_stamp', 'date'])
__data['datetime'] = pd.to_datetime(__data['datetime'])
__data = __data.set_index('datetime', drop=False)
if format in ['numpy', 'np', 'n']:
return numpy.asarray(__data)
elif format in ['list', 'l', 'L']:
return numpy.asarray(__data).tolist()
elif format in ['P', 'p', 'pandas', 'pd']:
return __data
def QA_fetch_future_day(code, start, end, format='numpy', collections=DATABASE.future_day):
start = str(start)[0:10]
end = str(end)[0:10]
code = QA_util_code_tolist(code, auto_fill=False)
if QA_util_date_valid(end) == True:
__data = []
cursor = collections.find({
'code': {'$in': code}, "date_stamp": {
"$lte": QA_util_date_stamp(end),
"$gte": QA_util_date_stamp(start)}}, batch_size=10000)
if format in ['dict', 'json']:
return [data for data in cursor]
for item in cursor:
__data.append([str(item['code']), float(item['open']), float(item['high']), float(
item['low']), float(item['close']), float(item['position']), float(item['price']), float(item['trade']), item['date']])
# 多种数据格式
if format in ['n', 'N', 'numpy']:
__data = numpy.asarray(__data)
elif format in ['list', 'l', 'L']:
__data = __data
elif format in ['P', 'p', 'pandas', 'pd']:
__data = DataFrame(
__data, columns=['code', 'open', 'high', 'low', 'close', 'position', 'price', 'trade', 'date'])
__data['date'] = pd.to_datetime(__data['date'])
__data = __data.set_index('date', drop=False)
else:
print("QA Error QA_fetch_future_day format parameter %s is none of \"P, p, pandas, pd , n, N, numpy !\" " % format)
return __data
else:
QA_util_log_info('QA something wrong with date')
def QA_fetch_future_min(
code,
start, end,
format='numpy',
frequence='1min',
collections=DATABASE.future_min):
'获取股票分钟线'
if frequence in ['1min', '1m']:
frequence = '1min'
elif frequence in ['5min', '5m']:
frequence = '5min'
elif frequence in ['15min', '15m']:
frequence = '15min'
elif frequence in ['30min', '30m']:
frequence = '30min'
elif frequence in ['60min', '60m']:
frequence = '60min'
__data = []
code = QA_util_code_tolist(code,auto_fill=False)
cursor = collections.find({
'code': {'$in': code}, "time_stamp": {
"$gte": QA_util_time_stamp(start),
"$lte": QA_util_time_stamp(end)
}, 'type': frequence
}, batch_size=10000)
if format in ['dict', 'json']:
return [data for data in cursor]
| |
<gh_stars>1-10
# -*- coding: utf-8 -*-
"""EDA_Loan Approval System.ipynb
Automatically generated by Colaboratory.
Original file is located at
https://colab.research.google.com/drive/1Sv6QW4TJX0C9sCKhxevV5ocqAyY1sxbx
# Problem Statement
## Business Use-case
- Home loan company/bank gets application for loan from variour customers (loan applicants) where based on certain inputs company validates the customer eligibility for loan.
- We want to automate the loan eligibility process (real time) based on customer detail obtained during loan application. These details are Gender, Marital Status, Education, Number of Dependents, Income, Loan Amount, Credit History and others.
## Business goal
- If the loan approval process is automated, it can save a lot of man hours and improve the speed of service to the customers.
- The increase in customer satisfaction and savings in operational costs are significant.
- However, the benefits can only be reaped if the bank has a robust model to accurately predict which application it should approve and which to reject, in order to minimize the risk of loan default.
## Translating Problem into Data Science / Machine Learning use case
- This is a classification problem where we have to predict whether a loan will be approved or not.
- Specifically, it is a binary classification problem where we have to predict either one of the two classes given i.e. approved (Y) or not approved (N).
- Other way to frame the problem is to predict whether the loan will likely to default or not.
- The dependent variable is the Loan_Status, while the rest are independent variable or features.
- We need to develop a model using the features to predict the target variable.
"""
# Commented out IPython magic to ensure Python compatibility.
# import libraries
# %matplotlib inline
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
pd.set_option('display.max_rows', 500)
pd.set_option('display.max_columns', 500)
pd.set_option('display.width', 150)
import warnings
warnings.filterwarnings("ignore")
from google.colab import files
uploaded = files.upload()
"""## Exploratory Data Analysis (EDA)"""
# Import data
df = pd.read_csv("loan_train.csv")
print('Information on dataset:')
df.info()
print('Head:'); print(df.head()); print(); print('Tail:'); print(df.tail());
"""
## Hypotheses
Through this process, all the possible factors are listed here which can affect the outcome i.e. which of the features will have an impact on whether a loan will be approved or not.
Some of the hypothesis are:
- Education - Applicants with higher education level i.e. graduate level should have higher chances of loan approval
- Income: Applicants with higher income should have more chances of loan approval
- Loan amount: If the loan amount is less, the chances of loan approval should be high
- Loan term: Loans with shorter time period should have higher chances of approval
- Previous credit history: Applicants who have repayed their previous debts should have higher chances of loan approval
- Monthly installment amount: If the monthly installment amount is low, the chances of loan approval should be high And so on
Some of the hypothesis are intuitive while others may not. We will try to validate each of these hypothesis based on the dataset."""
df.describe()
"""# Exploratory data analysis
## Univariate analysis
###Target Variable (Categorical)
"""
plt.style.use('ggplot')
from pylab import rcParams
import matplotlib.ticker as mtick # For specifying the axes tick format
rcParams['figure.figsize']=10,6
# bar plot to visualize the frequency
ax = (df['Loan_Status'].value_counts()*100.0 /len(df)).plot(kind='bar', stacked = True, rot = 0)
ax.yaxis.set_major_formatter(mtick.PercentFormatter())
ax.set_ylabel('% Loan application')
ax.set_xlabel('Status')
ax.set_ylabel('% Customers')
ax.set_title('Application Distribution')
totals = [] # creating a list to collect the plt.patches data
# finding the values and append to list
for i in ax.patches:
totals.append(i.get_width())
total = sum(totals) # setting individual bar lables using above list
for i in ax.patches:
# getting_width pulls left or right; get_y pushes up or down
ax.text(i.get_x()+.15, i.get_height()-3.5, \
str(round((i.get_height()/total), 1))+'%', color='white', weight = 'bold')
"""The loan of around 68.7% was approved. There is no imbalanced classes issue in this dataset, thus accuracy as an evaluation metric should be appropriate.
### Independent Variable (Categorical)
There are 5 features that are categorical or binary (Gender, Married, Self_Employed, Credit_History, Education)
"""
# Visualizing categorical features
# plt.figure(1)
plt.subplot(231)
df['Gender'].value_counts(normalize=True).plot.bar(figsize=(20,10), title= 'Gender')
plt.subplot(232)
df['Married'].value_counts(normalize=True).plot.bar(title= 'Married')
plt.subplot(233)
df['Self_Employed'].value_counts(normalize=True).plot.bar(title= 'Self_Employed')
plt.subplot(234)
df['Credit_History'].value_counts(normalize=True).plot.bar(title= 'Credit_History')
plt.subplot(235)
df['Education'].value_counts(normalize=True).plot.bar(title= 'Education')
plt.show()
"""- 80% applicants in the dataset are male.
- 65% of the applicants in the dataset are married.
- 15% applicants in the dataset are self employed.
- 85% applicants have credit history (repaid their debts).
- 80% of the applicants are Graduate.
### Independent Variable (Ordinal)
There are 2 features that are Ordinal: Variables in categorical features having some order involved (Dependents, Property_Area)
"""
# Remaining categorical features
plt.subplot(121)
df['Dependents'].value_counts(normalize=True).plot.bar(figsize=(10,4), title= 'Dependents')
plt.subplot(122)
df['Property_Area'].value_counts(normalize=True).plot.bar(title= 'Property_Area')
plt.show()
"""- More than half of the applicants don’t have any dependents.
- Most of the applicants are from Semiurban area.
### Independent Variable
There are 4 features that are Numerical:
- ApplicantIncome,
- CoapplicantIncome,
- LoanAmount,
- Loan_Amount_Term
"""
numerical_columns = ['ApplicantIncome', 'CoapplicantIncome', 'LoanAmount']
print(numerical_columns)
fig,axes = plt.subplots(1,3,figsize=(17,5))
for idx,cat_col in enumerate(numerical_columns):
sns.boxplot(y=cat_col,data=df,x='Loan_Status',ax=axes[idx])
print(df[numerical_columns].describe())
plt.subplots_adjust(hspace=1)
plt.tight_layout()
"""### Applicant income distribution"""
plt.subplot(121)
sns.distplot(df['ApplicantIncome']);
plt.subplot(122)
df['ApplicantIncome'].plot.box(figsize=(10,5))
plt.show()
"""- Most of the data in the distribution of applicant income is towards left which means it is not normally distributed.
- Distribution is right-skewed.
- We need to make it normal.
The boxplot confirms the presence of a lot of outliers/extreme values.
"""
# Income vs Education
df.boxplot(column='ApplicantIncome', by = 'Education')
plt.suptitle(""); plt.show()
"""higher number of graduates with very high incomes, which are appearing to be the outliers."""
# co-applicant income distribution
plt.subplot(121)
sns.distplot(df['CoapplicantIncome']);
plt.subplot(122)
df['CoapplicantIncome'].plot.box(figsize=(10,5))
plt.show()
"""Majority of coapplicant’s income ranges from 0 to 5000. We also see a lot of outliers in the coapplicant income and it is not normally distributed."""
# distribution of LoanAmount
plt.subplot(121)
sns.distplot(df['LoanAmount']);
plt.subplot(122)
df['LoanAmount'].plot.box(figsize=(10,5))
plt.show()
"""normal distribution but still slightly right-skewed for LoanAmount but there are lot of outliers in this variable. """
# distribution of Loan_Amount_Term
df['Loan_Amount_Term'].value_counts(normalize=True).plot.bar(title= 'Loan_Amount_Term')
plt.show()
"""- 85% of the loans are 360 months term/30 years period
## Bivariate Analysis
### Categorical Independent Variable vs Target Variable
"""
print(pd.crosstab(df['Gender'],df['Loan_Status']))
Gender = pd.crosstab(df['Gender'],df['Loan_Status'])
Gender.div(Gender.sum(1).astype(float), axis = 0).plot(kind="bar", stacked=True, figsize=(6,4))
plt.xlabel('Gender')
p = plt.ylabel('Percentage')
plt.show()
print(pd.crosstab(df['Married'],df['Loan_Status']))
Married = pd.crosstab(df['Married'],df['Loan_Status'])
Married.div(Married.sum(1).astype(float), axis=0).plot(kind="bar", stacked=True, figsize=(6,4))
plt.xlabel('Married')
p = plt.ylabel('Percentage')
print(pd.crosstab(df['Dependents'],df['Loan_Status']))
Dependents=pd.crosstab(df['Dependents'],df['Loan_Status'])
Dependents.div(Dependents.sum(1).astype(float), axis=0).plot(kind="bar", stacked=True)
plt.xlabel('Dependents')
p = plt.ylabel('Percentage')
print(pd.crosstab(df['Education'],df['Loan_Status']))
Education=pd.crosstab(df['Education'],df['Loan_Status'])
Education.div(Education.sum(1).astype(float), axis=0).plot(kind="bar", stacked=True, figsize=(6,4))
plt.xlabel('Education')
p = plt.ylabel('Percentage')
print(pd.crosstab(df['Self_Employed'], df['Loan_Status']))
Self_Employed=pd.crosstab(df['Self_Employed'],df['Loan_Status'])
Self_Employed.div(Self_Employed.sum(1).astype(float), axis=0).plot(kind="bar", stacked=True, figsize=(6,4))
plt.xlabel('Self_Employed')
p = plt.ylabel('Percentage')
print(pd.crosstab(df['Credit_History'], df['Loan_Status']))
Credit_History=pd.crosstab(df['Credit_History'], df['Loan_Status'])
Credit_History.div(Credit_History.sum(1).astype(float), axis=0).plot(kind="bar", stacked=True, figsize=(6,4))
plt.xlabel('Credit_History')
p = plt.ylabel('Percentage')
print(pd.crosstab(df['Property_Area'],df['Loan_Status']))
Property_Area=pd.crosstab(df['Property_Area'],df['Loan_Status'])
Property_Area.div(Property_Area.sum(1).astype(float), axis=0).plot(kind="bar", stacked=True)
plt.xlabel('Property_Area')
P = plt.ylabel('Percentage')
"""- proportion of male and female applicants is more or less same for both approved and unapproved loans
- proportion of married applicants is higher for the approved loans
distribution of applicants with 1 or 3+ dependents is similar across both the categories of Loan_Status there is nothing significant we can infer from Self_Employed vs Loan_Status plot.
- proportion of loans getting approved for graduates is higher compared to non-graduates it seems people with credit history as 1 are more likely to get their loans approved
- proportion of loans getting approved in semiurban area is higher as compared to that in rural or urban areas.
### Numerical Independent Variable vs Target Variable
"""
df[df['ApplicantIncome'] > 20000].sort_values(by = 'ApplicantIncome')
"""All applicants with an income greater than 20,000 have higher level of education, it seems reasonable that these applicants would have higher income. However, since most of the dataset is comprised of applicants with higher education this alone would not explain the difference. However, for most of these points there is nothing indicating we should remove these points.
If we look at row 409, we see that this applicant's income is the largest in our dataset, and suspiciously ends in three 0's. Futhermore, the property area is rural, the credit history is marked 0, and the loan status is marked as having been declined. Given this information it is most likely that the applicant income was entered incorrectly.
**We should drop this point**
"""
df = df.drop(409)
df[df['LoanAmount'] > 400 ].sort_values(by = 'ApplicantIncome')
"""Only 4 out of 15 of the loans were denied, but since many of the incomes are fairly high this doesn't seem completely unreasonable.
While some of these points could be questioned, we lack significant evidence that any of these points should be removed.
"""
print(df.groupby('Loan_Status')['ApplicantIncome'].mean())
df.groupby('Loan_Status')['ApplicantIncome'].mean().plot.bar()
P = plt.ylabel('mean applicant income')
plt.show()
# making bins for applicant income variable
bins = [0,2500,4000,6000,81000]
group = ['Low','Average','High', 'Very high']
df['Income_bin'] = pd.cut(df['ApplicantIncome'],bins,labels=group)
df.head()
print(pd.crosstab(df['Income_bin'],df['Loan_Status']))
Income_bin = pd.crosstab(df['Income_bin'],df['Loan_Status'])
Income_bin.div(Income_bin.sum(1).astype(float), axis=0).plot(kind="bar", stacked=True)
plt.xlabel('ApplicantIncome')
P = plt.ylabel('Percentage')
plt.show()
"""Applicant income does not affect the chances of loan approval which contradicts our hypothesis in which we assumed that if the applicant income is high the chances of loan approval will also be high"""
# making bins for Coapplicant income variable
bins = [0,1000,3000,42000]
group = ['Low','Average','High']
df['Coapplicant_Income_bin'] = pd.cut(df['CoapplicantIncome'],bins,labels=group)
# plot the chart
Coapplicant_Income_bin = pd.crosstab(df['Coapplicant_Income_bin'],df['Loan_Status'])
Coapplicant_Income_bin.div(Coapplicant_Income_bin.sum(1).astype(float), axis=0).plot(kind="bar", stacked=True)
plt.xlabel('CoapplicantIncome')
P = plt.ylabel('Percentage')
plt.show()
print(len(df[df["CoapplicantIncome"] == 0]))
"Percentage of CoapplicantIncome = 0 is:", len(df[df["CoapplicantIncome"] == 0])/len(df["CoapplicantIncome"])
"""- Historically if coapplicant’s income is less the chances of loan approval are high.
- However, most | |
import h5py
import numpy as np
import pyxrf
from pyxrf.model.scan_metadata import *
from pyxrf.core.utils import *
from pyxrf.model.load_data_from_db import _get_fpath_not_existing, helper_encode_list
try:
from databroker.v0 import Broker
except ModuleNotFoundError:
from databroker import Broker
try:
from pyxrf.api_dev import db
except ImportError:
db = None
print("Error importing pyXRF. Continuing without import.")
if not db:
# Register the data broker
try:
db = Broker.named("srx")
except AttributeError:
db = Broker.named("temp")
print("Using temporary databroker.")
pyxrf_version = pyxrf.__version__
def _extract_metadata_from_header(hdr):
"""
Extract metadata from start and stop document. Metadata extracted from other document
in the scan are beamline specific and added to dictionary at later time.
"""
start_document = hdr.start
mdata = ScanMetadataXRF()
data_locations = {
"scan_id": ["scan_id"],
"scan_uid": ["uid"],
"scan_instrument_id": ["beamline_id"],
"scan_instrument_name": [],
"scan_time_start": ["time"],
"scan_time_start_utc": ["time"],
"instrument_mono_incident_energy": ["scan/energy"],
"instrument_beam_current": [],
"instrument_detectors": ["detectors"],
"sample_name": ["scan/sample_name"],
"experiment_plan_name": ["plan_name"],
"experiment_plan_type": ["plan_type"],
"experiment_fast_axis": ["scan/fast_axis/motor_name"],
"experiment_slow_axis": ["scan/slow_axis/motor_name"],
"proposal_num": ["proposal/proposal_num"],
"proposal_title": ["proposal/proposal_title"],
"proposal_PI_lastname": ["proposal/PI_lastname"],
"proposal_saf_num": ["proposal/saf_num"],
"proposal_cycle": ["proposal/cycle"]
}
for key, locations in data_locations.items():
# Go to the next key if no location is defined for the current key.
# No locations means that the data is not yet defined in start document on any beamline
# Multiple locations point to locations at different beamlines
if not locations:
continue
# For each metadata key there could be none, one or multiple locations in the start document
for loc in locations:
path = loc.split('/') #
ref = start_document
for n, p in enumerate(path):
if n >= len(path) - 1:
break
# 'ref' must always point to dictionary
if not isinstance(ref, dict):
ref = None
break
if p in ref:
ref = ref[p]
else:
ref = None
break
# At this point 'ref' must be a dictionary
value = None
if ref is not None and isinstance(ref, dict):
if path[-1] in ref:
value = ref[path[-1]]
# Now we finally arrived to the end of the path: the 'value' must be a scalar or a list
if value is not None and not isinstance(value, dict):
if path[-1] == 'time':
if key.endswith("_utc"):
value = convert_time_to_nexus_string(ttime.gmtime(value))
else:
value = convert_time_to_nexus_string(ttime.localtime(value))
mdata[key] = value
break
stop_document = hdr.stop
if stop_document:
if "time" in stop_document:
t = stop_document["time"]
mdata["scan_time_stop"] = convert_time_to_nexus_string(ttime.localtime(t))
mdata["scan_time_stop_utc"] = convert_time_to_nexus_string(ttime.gmtime(t))
if "exit_status" in stop_document:
mdata["scan_exit_status"] = stop_document["exit_status"]
else:
mdata["scan_exit_status"] = "incomplete"
# Add full beamline name (if available, otherwise don't create the entry).
# Also, don't overwrite the existing name if it was read from the start document
if "scan_instrument_id" in mdata and "scan_instrument_name" not in mdata:
instruments = {
"srx": "Submicron Resolution X-ray Spectroscopy",
"hxn": "Hard X-ray Nanoprobe",
"tes": "Tender Energy X-ray Absorption Spectroscopy",
"xfm": "X-ray Fluorescence Microprobe"
}
iname = instruments.get(mdata["scan_instrument_id"].lower(), "")
if iname:
mdata["scan_instrument_name"] = iname
return mdata
def new_makehdf(scanid=-1, create_each_det=False):
# Get scan header
h = db[int(scanid)]
scanid = int(h.start['scan_id'])
start_doc = h.start
scan_doc = h.start['scan']
stop_doc = h.stop
# Check if new type of metadata
if 'md_version' not in h.start:
print('Please use old make_hdf.')
return
# Check for detectors
dets = []
try:
if 'xs' in h.start['scan']['detectors']:
dets.append('xs')
elif 'xs2' in h.start['scan']['detectors']:
dets.append('xs2')
except KeyError:
# AMK forgot to add detectors to step scans
# This is fixed, but left in for those scans
if scan_doc['type'] == 'XRF_STEP':
dets.append('xs')
if dets == []:
print('No detectors found!')
return
# Get metadata
mdata = _extract_metadata_from_header(h)
# Get position data from scan
c, r = h.start['scan']['shape']
if scan_doc['type'] == 'XRF_FLY':
fast_motor = scan_doc['fast_axis']['motor_name']
if (fast_motor == 'nano_stage_sx'):
fast_key = 'enc1'
elif (fast_motor == 'nano_stage_x'):
fast_key = 'enc1'
elif (fast_motor == 'nano_stage_sy'):
fast_key = 'enc2'
elif (fast_motor == 'nano_stage_y'):
fast_key = 'enc2'
elif (fast_motor == 'nano_stage_sz'):
fast_key = 'enc3'
else:
print(f'{fast_motor} not found!')
return
slow_motor = scan_doc['slow_axis']['motor_name']
if (slow_motor == 'nano_stage_sx'):
slow_key = 'enc1'
elif (slow_motor == 'nano_stage_x'):
slow_key = 'enc1'
elif (slow_motor == 'nano_stage_sy'):
slow_key = 'enc2'
elif (slow_motor == 'nano_stage_y'):
slow_key = 'enc2'
elif (slow_motor == 'nano_stage_sz'):
slow_key = 'enc3'
else:
slow_key = slow_motor
fast_pos = h.data(fast_key, stream_name='stream0', fill=True)
fast_pos = np.array(list(fast_pos))
if 'enc' in slow_key:
slow_pos = h.data(slow_key, stream_name='stream0', fill=True)
slow_pos = np.array(list(slow_pos))
else:
slow_pos = h.data(slow_key, stream_name='primary', fill=True)
slow_pos = np.array(list(slow_pos))
slow_pos = np.array([slow_pos,]*c).T
num_events = stop_doc['num_events']['stream0']
# pos_pos = np.zeros((2, r, c))
pos_pos = np.zeros((2, num_events, c))
if 'x' in slow_key:
pos_pos[1, :, :] = fast_pos
pos_pos[0, :, :] = slow_pos
else:
pos_pos[0, :, :] = fast_pos
pos_pos[1, :, :] = slow_pos
pos_name = ['x_pos', 'y_pos']
# Get detector data
if 'xs' in dets:
d_xs = np.array(list(h.data('fluor', stream_name='stream0', fill=True)))
N_xs = d_xs.shape[2]
d_xs_sum = np.squeeze(np.sum(d_xs, axis=2))
if 'xs2' in dets:
d_xs2 = np.array(list(h.data('fluor_xs2', stream_name='stream0', fill=True)))
N_xs2 = d_xs2.shape[2]
d_xs2_sum = np.squeeze(np.sum(d_xs2, axis=2))
# Scaler list
sclr_list = ['i0', 'i0_time', 'time', 'im', 'it']
sclr = []
sclr_name = []
for s in sclr_list:
if s in h.table('stream0').keys():
tmp = np.array(list(h.data(s, stream_name='stream0', fill=True)))
sclr.append(tmp)
sclr_name.append(s)
sclr = np.array(sclr)
sclr = np.moveaxis(sclr, 0, -1)
if scan_doc['type'] == 'XRF_STEP':
# Define keys for motor data
fast_motor = scan_doc['fast_axis']['motor_name']
fast_key = fast_motor + '_user_setpoint'
slow_motor = scan_doc['slow_axis']['motor_name']
slow_key = slow_motor + '_user_setpoint'
# Collect motor positions
fast_pos = h.data(fast_key, stream_name='primary', fill=True)
fast_pos = np.array(list(fast_pos))
slow_pos = h.data(slow_key, stream_name='primary', fill=True)
slow_pos = np.array(list(slow_pos))
# Reshape motor positions
num_events = stop_doc['num_events']['primary']
r, c = scan_doc['shape']
if num_events != (r * c):
num_rows = num_events // c + 1 # number of rows
fast_pos = np.zeros((num_rows, c))
slow_pos = np.zeros((num_rows, c))
for i in range(num_rows):
for j in range(c):
fast_pos[i, j] = fast_pos[i*c + j]
slow_pos[i, j] = slow_pos[i*c + j]
else:
num_rows = r
fast_pos = np.reshape(fast_pos, (r, c))
slow_pos = np.reshape(slow_pos, (r, c))
# Put into one array for h5 file
pos_pos = np.zeros((2, num_rows, c))
if 'x' in slow_key:
pos_pos[1, :, :] = fast_pos
pos_pos[0, :, :] = slow_pos
else:
pos_pos[0, :, :] = fast_pos
pos_pos[1, :, :] = slow_pos
pos_name = ['x_pos', 'y_pos']
# Get detector data
keys = h.table().keys()
MAX_DET_ELEMENTS = 7
for i in np.arange(1, MAX_DET_ELEMENTS+1):
if f'xs_channel{i}' in keys:
N_xs = i
else:
break
N_pts = num_events
N_bins= 4096
if 'xs' in dets:
d_xs = np.empty((N_xs, N_pts, N_bins))
for i in np.arange(0, N_xs):
d = h.data(f'xs_channel{i+1}', fill=True)
d = np.array(list(d))
d_xs[i, :, :] = np.copy(d)
del d
# Reshape data
if num_events != (r * c):
tmp = np.zeros((N_xs, num_rows, c, N_bins))
for i in range(num_rows):
for j in range(c):
tmp[:, i, j, :] = fast_pos[:, i*c + j, :]
d_xs = np.copy(tmp)
del tmp
else:
d_xs = np.reshape(d_xs, (N_xs, r, c, N_bins))
# Sum data
d_xs_sum = np.squeeze(np.sum(d_xs, axis=0))
# Scaler list
sclr_list = ['sclr_i0', 'sclr_im', 'sclr_it']
sclr_name = []
for s in sclr_list:
if s in keys:
sclr_name.append(s)
sclr = np.array(h.table()[sclr_name].values)
# Reshape data
if num_events != (r * c):
tmp = np.zeros((num_rows, c))
for i in range(num_rows):
for j in range(c):
tmp[i, j] = fast_pos[i*c + j]
sclr = np.copy(tmp)
del tmp
else:
sclr = np.reshape(sclr, (r, c, len(sclr_name)))
# Consider snake
# pos_pos, d_xs, d_xs_sum, sclr
if scan_doc['snake'] == 1:
pos_pos[:, fc00:db20:35b:7399::5, :] = pos_pos[:, fc00:db20:35b:7399::5, ::-1]
d_xs[:, fc00:db20:35b:7399::5, :, :] = d_xs[:, fc00:db20:35b:7399::5, ::-1, :]
d_xs_sum[1::2, :, :] = d_xs_sum[1::2, ::-1, :]
sclr[1::2, :, :] = sclr[1::2, ::-1, :]
# Transpose map for y scans
if scan_doc['type'] == 'XRF_FLY':
if (fast_motor == 'nano_stage_sy' or
fast_motor == 'nano_stage_y'):
# Need to swapaxes on pos_pos, d_xs, d_xs_sum, sclr
pos_name = pos_name[::-1]
pos_pos = np.swapaxes(pos_pos, 1, 2)
d_xs = np.swapaxes(d_xs, 1, 2)
d_xs_sum = np.swapaxes(d_xs_sum, 0, 1)
sclr = np.swapaxes(sclr, 0, 1)
# Write file
interpath = 'xrfmap'
for d in dets:
if d == 'xs':
tmp_data = d_xs
tmp_data_sum = d_xs_sum
N = N_xs
elif d == 'xs2':
tmp_data = d_xs2
tmp_data_sum = d_xs2_sum
N = N_xs2
if (create_each_det):
fn = f'scan2D_{scanid}_{d}_{N}ch.h5'
else:
fn = f'scan2D_{scanid}_{d}_sum{N}ch.h5'
file_open_mode = 'a'
fname_add_version = True
file_overwrite_existing = False
if fname_add_version:
fpath = _get_fpath_not_existing(fn)
else:
if file_overwrite_existing:
file_open_mode = 'w'
else:
print('File already exists!')
return
with h5py.File(fn, file_open_mode) as f:
# Create metadata group
metadata_grp = f.create_group(f"{interpath}/scan_metadata")
| |
so change perms on the directory's contents
changed |= recursive_set_attributes(b_fsname, follow, file_args, mtime, atime)
# Change perms on the file pointed to by the link
tmp_file_args = file_args.copy()
tmp_file_args['path'] = to_native(b_fsname, errors='surrogate_or_strict')
changed |= module.set_fs_attributes_if_different(tmp_file_args, changed, expand=False)
changed |= update_timestamp_for_file(tmp_file_args['path'], mtime, atime)
return changed
def initial_diff(path, state, prev_state):
diff = {'before': {'path': path},
'after': {'path': path},
}
if prev_state != state:
diff['before']['state'] = prev_state
diff['after']['state'] = state
return diff
#
# States
#
def get_timestamp_for_time(formatted_time, time_format):
if formatted_time == 'preserve':
return None
elif formatted_time == 'now':
return Sentinel
else:
try:
struct = time.strptime(formatted_time, time_format)
struct_time = time.mktime(struct)
except (ValueError, OverflowError) as e:
raise AnsibleModuleError(results={'msg': 'Error while obtaining timestamp for time %s using format %s: %s'
% (formatted_time, time_format, to_native(e, nonstring='simplerepr'))})
return struct_time
def update_timestamp_for_file(path, mtime, atime, diff=None):
try:
# When mtime and atime are set to 'now', rely on utime(path, None) which does not require ownership of the file
# https://github.com/ansible/ansible/issues/50943
if mtime is Sentinel and atime is Sentinel:
# It's not exact but we can't rely on os.stat(path).st_mtime after setting os.utime(path, None) as it may
# not be updated. Just use the current time for the diff values
mtime = atime = time.time()
previous_mtime = os.stat(path).st_mtime
previous_atime = os.stat(path).st_atime
set_time = None
else:
# If both parameters are None 'preserve', nothing to do
if mtime is None and atime is None:
return False
previous_mtime = os.stat(path).st_mtime
previous_atime = os.stat(path).st_atime
if mtime is None:
mtime = previous_mtime
elif mtime is Sentinel:
mtime = time.time()
if atime is None:
atime = previous_atime
elif atime is Sentinel:
atime = time.time()
# If both timestamps are already ok, nothing to do
if mtime == previous_mtime and atime == previous_atime:
return False
set_time = (atime, mtime)
os.utime(path, set_time)
if diff is not None:
if 'before' not in diff:
diff['before'] = {}
if 'after' not in diff:
diff['after'] = {}
if mtime != previous_mtime:
diff['before']['mtime'] = previous_mtime
diff['after']['mtime'] = mtime
if atime != previous_atime:
diff['before']['atime'] = previous_atime
diff['after']['atime'] = atime
except OSError as e:
raise AnsibleModuleError(results={'msg': 'Error while updating modification or access time: %s'
% to_native(e, nonstring='simplerepr'), 'path': path})
return True
def keep_backward_compatibility_on_timestamps(parameter, state):
if state in ['file', 'hard', 'directory', 'link'] and parameter is None:
return 'preserve'
elif state == 'touch' and parameter is None:
return 'now'
else:
return parameter
def execute_diff_peek(path):
"""Take a guess as to whether a file is a binary file"""
b_path = to_bytes(path, errors='surrogate_or_strict')
appears_binary = False
try:
with open(b_path, 'rb') as f:
head = f.read(8192)
except Exception:
# If we can't read the file, we're okay assuming it's text
pass
else:
if b"\x00" in head:
appears_binary = True
return appears_binary
def ensure_absent(path):
b_path = to_bytes(path, errors='surrogate_or_strict')
prev_state = get_state(b_path)
result = {}
if prev_state != 'absent':
if not module.check_mode:
if prev_state == 'directory':
try:
shutil.rmtree(b_path, ignore_errors=False)
except Exception as e:
raise AnsibleModuleError(results={'msg': "rmtree failed: %s" % to_native(e)})
else:
try:
os.unlink(b_path)
except OSError as e:
if e.errno != errno.ENOENT: # It may already have been removed
raise AnsibleModuleError(results={'msg': "unlinking failed: %s " % to_native(e),
'path': path})
diff = initial_diff(path, 'absent', prev_state)
result.update({'path': path, 'changed': True, 'diff': diff})
else:
result.update({'path': path, 'changed': False})
return result
def execute_touch(path, follow, timestamps):
b_path = to_bytes(path, errors='surrogate_or_strict')
prev_state = get_state(b_path)
changed = False
result = {'dest': path}
mtime = get_timestamp_for_time(timestamps['modification_time'], timestamps['modification_time_format'])
atime = get_timestamp_for_time(timestamps['access_time'], timestamps['access_time_format'])
if not module.check_mode:
if prev_state == 'absent':
# Create an empty file if the filename did not already exist
try:
open(b_path, 'wb').close()
changed = True
except (OSError, IOError) as e:
raise AnsibleModuleError(results={'msg': 'Error, could not touch target: %s'
% to_native(e, nonstring='simplerepr'),
'path': path})
# Update the attributes on the file
diff = initial_diff(path, 'touch', prev_state)
file_args = module.load_file_common_arguments(module.params)
try:
changed = module.set_fs_attributes_if_different(file_args, changed, diff, expand=False)
changed |= update_timestamp_for_file(file_args['path'], mtime, atime, diff)
except SystemExit as e:
if e.code:
# We take this to mean that fail_json() was called from
# somewhere in basic.py
if prev_state == 'absent':
# If we just created the file we can safely remove it
os.remove(b_path)
raise
result['changed'] = changed
result['diff'] = diff
return result
def ensure_file_attributes(path, follow, timestamps):
b_path = to_bytes(path, errors='surrogate_or_strict')
prev_state = get_state(b_path)
file_args = module.load_file_common_arguments(module.params)
mtime = get_timestamp_for_time(timestamps['modification_time'], timestamps['modification_time_format'])
atime = get_timestamp_for_time(timestamps['access_time'], timestamps['access_time_format'])
if prev_state != 'file':
if follow and prev_state == 'link':
# follow symlink and operate on original
b_path = os.path.realpath(b_path)
path = to_native(b_path, errors='strict')
prev_state = get_state(b_path)
file_args['path'] = path
if prev_state not in ('file', 'hard'):
# file is not absent and any other state is a conflict
raise AnsibleModuleError(results={'msg': 'file (%s) is %s, cannot continue' % (path, prev_state),
'path': path})
diff = initial_diff(path, 'file', prev_state)
changed = module.set_fs_attributes_if_different(file_args, False, diff, expand=False)
changed |= update_timestamp_for_file(file_args['path'], mtime, atime, diff)
return {'path': path, 'changed': changed, 'diff': diff}
def ensure_directory(path, follow, recurse, timestamps):
b_path = to_bytes(path, errors='surrogate_or_strict')
prev_state = get_state(b_path)
file_args = module.load_file_common_arguments(module.params)
mtime = get_timestamp_for_time(timestamps['modification_time'], timestamps['modification_time_format'])
atime = get_timestamp_for_time(timestamps['access_time'], timestamps['access_time_format'])
# For followed symlinks, we need to operate on the target of the link
if follow and prev_state == 'link':
b_path = os.path.realpath(b_path)
path = to_native(b_path, errors='strict')
file_args['path'] = path
prev_state = get_state(b_path)
changed = False
diff = initial_diff(path, 'directory', prev_state)
if prev_state == 'absent':
# Create directory and assign permissions to it
if module.check_mode:
return {'changed': True, 'diff': diff}
curpath = ''
try:
# Split the path so we can apply filesystem attributes recursively
# from the root (/) directory for absolute paths or the base path
# of a relative path. We can then walk the appropriate directory
# path to apply attributes.
# Something like mkdir -p with mode applied to all of the newly created directories
for dirname in path.strip('/').split('/'):
curpath = '/'.join([curpath, dirname])
# Remove leading slash if we're creating a relative path
if not os.path.isabs(path):
curpath = curpath.lstrip('/')
b_curpath = to_bytes(curpath, errors='surrogate_or_strict')
if not os.path.exists(b_curpath):
try:
os.mkdir(b_curpath)
changed = True
except OSError as ex:
# Possibly something else created the dir since the os.path.exists
# check above. As long as it's a dir, we don't need to error out.
if not (ex.errno == errno.EEXIST and os.path.isdir(b_curpath)):
raise
tmp_file_args = file_args.copy()
tmp_file_args['path'] = curpath
changed = module.set_fs_attributes_if_different(tmp_file_args, changed, diff, expand=False)
changed |= update_timestamp_for_file(file_args['path'], mtime, atime, diff)
except Exception as e:
raise AnsibleModuleError(results={'msg': 'There was an issue creating %s as requested:'
' %s' % (curpath, to_native(e)),
'path': path})
return {'path': path, 'changed': changed, 'diff': diff}
elif prev_state != 'directory':
# We already know prev_state is not 'absent', therefore it exists in some form.
raise AnsibleModuleError(results={'msg': '%s already exists as a %s' % (path, prev_state),
'path': path})
#
# previous state == directory
#
changed = module.set_fs_attributes_if_different(file_args, changed, diff, expand=False)
changed |= update_timestamp_for_file(file_args['path'], mtime, atime, diff)
if recurse:
changed |= recursive_set_attributes(b_path, follow, file_args, mtime, atime)
return {'path': path, 'changed': changed, 'diff': diff}
def ensure_symlink(path, src, follow, force, timestamps):
b_path = to_bytes(path, errors='surrogate_or_strict')
b_src = to_bytes(src, errors='surrogate_or_strict')
prev_state = get_state(b_path)
file_args = module.load_file_common_arguments(module.params)
mtime = get_timestamp_for_time(timestamps['modification_time'], timestamps['modification_time_format'])
atime = get_timestamp_for_time(timestamps['access_time'], timestamps['access_time_format'])
# source is both the source of a symlink or an informational passing of the src for a template module
# or copy module, even if this module never uses it, it is needed to key off some things
if src is None:
if follow:
# use the current target of the link as the source
src = to_native(os.path.realpath(b_path), errors='strict')
b_src = to_bytes(src, errors='surrogate_or_strict')
if not os.path.islink(b_path) and os.path.isdir(b_path):
relpath = path
else:
b_relpath = os.path.dirname(b_path)
relpath = to_native(b_relpath, errors='strict')
absrc = os.path.join(relpath, src)
b_absrc = to_bytes(absrc, errors='surrogate_or_strict')
if not force and not os.path.exists(b_absrc):
raise AnsibleModuleError(results={'msg': 'src file does not exist, use "force=yes" if you'
' really want to create the link: %s' % absrc,
'path': path, 'src': src})
if prev_state == 'directory':
if not force:
raise AnsibleModuleError(results={'msg': 'refusing to convert from %s to symlink for %s'
% (prev_state, path),
'path': path})
elif os.listdir(b_path):
# refuse to replace a directory that has files in it
raise AnsibleModuleError(results={'msg': 'the directory %s is not empty, refusing to'
' convert it' % path,
'path': path})
elif | |
"""
The various classes that launch external programs to view files
"""
import plugins, os
from .. import guiplugins
from string import Template
from copy import copy
import subprocess
class FileViewAction(guiplugins.ActionGUI):
def __init__(self, *args, **kw):
self.performArgs = []
guiplugins.ActionGUI.__init__(self, *args, **kw)
def singleTestOnly(self):
return True
def isActiveOnCurrent(self, *args):
if not guiplugins.ActionGUI.isActiveOnCurrent(self):
return False
for fileName, obj in self.currFileSelection:
if self.isActiveForFile(fileName, obj):
return True
return False
def isActiveForFile(self, fileName, *args):
return fileName and not os.path.isdir(fileName)
def useFiltered(self):
return False
def getLargestFileSize(self, f, *args):
try:
return os.path.getsize(f)
except EnvironmentError:
return 0
def getConfirmationMessage(self):
self.performArgs = []
message = ""
for fileName, associatedObject in self.currFileSelection:
if self.isActiveForFile(fileName, associatedObject):
message, args = self.getConfMessageForFile(fileName, associatedObject)
self.performArgs.append(args)
return message
def performOnCurrent(self):
for args in self.performArgs:
self.performOnFile(*args)
def performOnFile(self, viewTool, *args):
try:
self._performOnFile(viewTool, *args)
except OSError:
self.showErrorDialog("Cannot find " + self.getToolDescription() + " '" + viewTool + \
"'.\nPlease install it somewhere on your PATH or\n"
"change the configuration entry '" + self.getToolConfigEntry() + "'.")
def getConfMessageForFile(self, fileName, associatedObject):
fileToView = self.getFileToView(fileName, associatedObject)
if os.path.isfile(fileToView) or os.path.islink(fileToView):
viewTool = self.getViewToolName(fileToView)
if viewTool:
args = (viewTool, fileToView, associatedObject)
maxFileSize = plugins.parseBytes(self.getConfigValue("max_file_size", viewTool))
if maxFileSize >= 0:
largestFileSize = self.getLargestFileSize(fileToView, associatedObject)
if largestFileSize > maxFileSize:
message = "You are trying to view a file of size " + str(largestFileSize) + " bytes, while a limit of " + \
str(maxFileSize) + " bytes is set for the tool '" + viewTool + "'. Are you sure you wish to continue?"
return message, args
return "", args
else:
raise plugins.TextTestError, "No " + self.getToolDescription() + " is defined for files of type '" + \
os.path.basename(fileToView).split(".")[0] + \
"'.\nPlease point the configuration entry '" + self.getToolConfigEntry() + \
"' at a valid program to view the file."
else:
raise plugins.TextTestError, "File '" + os.path.basename(fileName) + \
"' cannot be viewed as it has been removed in the file system." + self.noFileAdvice()
def isDefaultViewer(self, *args):
return False
def extraPostfix(self):
return ""
def notifyViewFile(self, fileName, comp, newFile):
if self.isDefaultViewer(comp, newFile):
allArgs = (fileName, comp)
self.currFileSelection = [ allArgs ]
self.runInteractive()
def getFileToView(self, fileName, associatedObject):
try:
# associatedObject might be a comparison object, but it might not
# Use the comparison if it's there
return associatedObject.existingFile(self.useFiltered(), self.extraPostfix())
except AttributeError:
return fileName
def noFileAdvice(self):
if len(self.currAppSelection) > 0:
return "\n" + self.currAppSelection[0].noFileAdvice()
else:
return ""
def testDescription(self):
if len(self.currTestSelection) > 0:
return " (from test " + self.currTestSelection[0].uniqueName + ")"
else:
return ""
def getRemoteHost(self):
if len(self.currTestSelection) > 0:
state = self.currTestSelection[0].stateInGui
if hasattr(state, "executionHosts") and len(state.executionHosts) > 0:
return plugins.interpretHostname(state.executionHosts[0])
return "localhost"
def getRemoteArgs(self, cmdArgs):
remoteHost = self.getRemoteHost()
return self.currTestSelection[0].app.getCommandArgsOn(remoteHost, cmdArgs, graphical=True)
def getSignalsSent(self):
return [ "ViewerStarted" ]
def startViewer(self, cmdArgs, description, checkOutput=False, **kwargs):
testDesc = self.testDescription()
fullDesc = description + testDesc
nullFile = open(os.devnull, "w")
stdout = subprocess.PIPE if checkOutput else nullFile
self.notify("Status", 'Started "' + description + '" in background' + testDesc + '.')
guiplugins.processMonitor.startProcess(cmdArgs, fullDesc, stdout=stdout, stderr=nullFile, **kwargs)
self.notifyThreaded("ViewerStarted") # Don't call application events directly in the GUI thread
def getStem(self, fileName):
return os.path.basename(fileName).split(".")[0]
def testRunning(self):
if len(self.currTestSelection) > 0:
return self.currTestSelection[0].stateInGui.hasStarted() and \
not self.currTestSelection[0].stateInGui.isComplete()
else:
return False
def getViewToolName(self, fileName):
stem = self.getStem(fileName)
return self.getConfigValue(self.getToolConfigEntry(), stem)
def getConfigValue(self, *args):
if len(self.currTestSelection) > 0:
return self.currTestSelection[0].getCompositeConfigValue(*args)
else:
return guiplugins.guiConfig.getCompositeValue(*args)
def differencesActive(self, comparison):
if not comparison or comparison.newResult() or comparison.missingResult():
return False
return comparison.hasDifferences()
def messageAfterPerform(self):
pass # provided by starting viewer, with message
class ViewInEditor(FileViewAction):
def __init__(self, allApps, dynamic, *args):
FileViewAction.__init__(self, allApps)
self.dynamic = dynamic
def _getStockId(self):
return "open"
def getToolConfigEntry(self):
return "view_program"
def getToolDescription(self):
return "file viewing program"
def viewFile(self, fileName, viewTool, exitHandler, exitHandlerArgs):
cmdArgs, descriptor, env = self.getViewCommand(fileName, viewTool)
description = descriptor + " " + os.path.basename(fileName)
checkOutput = cmdArgs[0] == "storytext_editor"
self.startViewer(cmdArgs, description=description, checkOutput=checkOutput, env=env,
exitHandler=exitHandler, exitHandlerArgs=exitHandlerArgs)
def getViewerEnvironment(self, cmdArgs):
# An absolute path to the viewer may indicate a custom tool, send the test environment along too
# Doing this is unlikely to cause harm in any case
if len(self.currTestSelection) > 0:
if cmdArgs[0] == "storytext_editor":
storytextHome = self.currTestSelection[0].getEnvironment("STORYTEXT_HOME")
return plugins.copyEnvironment({ "STORYTEXT_HOME" : storytextHome }, ignoreVars=[ "USECASE_RECORD_SCRIPT", "USECASE_REPLAY_SCRIPT" ])
elif os.path.isabs(cmdArgs[0]):
return self.currTestSelection[0].getRunEnvironment()
def getViewCommand(self, fileName, viewProgram):
# viewProgram might have arguments baked into it...
cmdArgs = plugins.splitcmd(viewProgram) + [ fileName ]
program = cmdArgs[0]
descriptor = " ".join([ os.path.basename(program) ] + cmdArgs[1:-1])
env = self.getViewerEnvironment(cmdArgs)
interpreter = plugins.getInterpreter(program)
if interpreter:
cmdArgs = [ interpreter ] + cmdArgs
if guiplugins.guiConfig.getCompositeValue("view_file_on_remote_machine", self.getStem(fileName)):
cmdArgs = self.getRemoteArgs(cmdArgs)
return cmdArgs, descriptor, env
def _performOnFile(self, viewTool, fileName, *args):
exitHandler, exitHandlerArgs = self.findExitHandlerInfo(fileName, *args)
return self.viewFile(fileName, viewTool, exitHandler, exitHandlerArgs)
def editingComplete(self):
self.applicationEvent("file editing operations to complete")
class ViewConfigFileInEditor(ViewInEditor):
def __init__(self, *args):
ViewInEditor.__init__(self, *args)
self.rootTestSuites = []
def _getTitle(self):
return "View In Editor"
def addSuites(self, suites):
self.rootTestSuites += suites
def isActiveOnCurrent(self, *args):
return len(self.currFileSelection) > 0
def notifyViewApplicationFile(self, fileName, apps, *args):
self.currFileSelection = [ (fileName, apps) ]
self.runInteractive()
def notifyViewReadonlyFile(self, fileName):
viewTool = self.getConfigValue("view_program", "default")
return self.viewFile(fileName, viewTool, self.applicationEvent, ("the readonly file viewer to be closed",))
def findExitHandlerInfo(self, dummy, apps):
return self.configFileChanged, (apps,)
def getSignalsSent(self):
return [ "ReloadConfig" ]
def configFileChanged(self, apps):
for app in apps:
app.setUpConfiguration()
suite = self.findSuite(app)
suite.refreshFilesRecursively()
# May have affected e.g. imported files, script references etc
self.notify("ReloadConfig")
self.editingComplete()
def findSuite(self, app):
for suite in self.rootTestSuites:
if suite.app is app:
return suite
class ViewTestFileInEditor(ViewInEditor):
def _getTitle(self):
return "View File"
def isDefaultViewer(self, comparison, isNewFile):
return not isNewFile and not self.differencesActive(comparison) and \
(not self.testRunning() or not guiplugins.guiConfig.getValue("follow_file_by_default"))
def findExitHandlerInfo(self, fileName, *args):
if self.dynamic:
return self.editingComplete, ()
# options file can change appearance of test (environment refs etc.)
baseName = os.path.basename(fileName)
for stem in [ "options", "testsuite", "config" ]:
if baseName.startswith(stem):
tests = self.getTestsForFile(stem, fileName)
if len(tests) > 0:
methodName = "handle" + stem.capitalize() + "Edit"
return getattr(self, methodName), (tests,)
return self.staticGUIEditingComplete, (copy(self.currTestSelection), fileName)
def getTestsForFile(self, stem, fileName):
tests = []
for test in self.currTestSelection:
defFile = test.getFileName(stem)
if defFile and plugins.samefile(fileName, defFile):
tests.append(test)
return tests
def handleTestsuiteEdit(self, suites):
for suite in suites:
suite.refresh(suite.app.getFilterList(suites))
self.editingComplete()
def handleOptionsEdit(self, tests):
for test in tests:
test.filesChanged()
self.editingComplete()
def handleConfigEdit(self, tests):
for test in tests:
test.reloadTestConfigurations()
self.editingComplete()
def getSignalsSent(self):
return [ "RefreshFilePreviews" ] + ViewInEditor.getSignalsSent(self)
def staticGUIEditingComplete(self, tests, fileName):
for test in tests:
self.notify("RefreshFilePreviews", test, fileName)
self.editingComplete()
class EditTestFileInEditor(ViewTestFileInEditor):
def _getTitle(self):
return "Edit File"
def getViewToolName(self, *args):
return self.getConfigValue(self.getToolConfigEntry(), "default")
def isDefaultViewer(self, comp, isNewFile):
return isNewFile
def _getStockId(self):
pass # don't use same stock for both
class ViewFilteredTestFileInEditor(ViewTestFileInEditor):
def _getStockId(self):
pass # don't use same stock for both
def useFiltered(self):
return True
def _getTitle(self):
return "View Filtered File"
def isActiveForFile(self, fileName, comparison):
return bool(comparison)
def isDefaultViewer(self, *args):
return False
class ContentFilterViewer:
def extraPostfix(self):
return ".normal"
def unorderedFiltersActive(self, comparison):
return len(self.currAppSelection[0].getCompositeConfigValue("unordered_text", comparison.stem)) > 0
class ViewContentFilteredTestFileInEditor(ContentFilterViewer, ViewFilteredTestFileInEditor):
def _getTitle(self):
return "View Content-Filtered File"
def isActiveForFile(self, fileName, comparison):
return ViewFilteredTestFileInEditor.isActiveForFile(self, fileName, comparison) and \
self.unorderedFiltersActive(comparison)
class ViewFilteredOrigFileInEditor(ViewFilteredTestFileInEditor):
def _getTitle(self):
return "View Filtered Original File"
def isActiveForFile(self, fileName, comparison):
return comparison and not comparison.newResult()
def getFileToView(self, fileName, comparison):
return comparison.getStdFile(self.useFiltered(), self.extraPostfix())
class ViewOrigFileInEditor(ViewFilteredOrigFileInEditor):
def _getTitle(self):
return "View Original File"
def useFiltered(self):
return False
class EditOrigFileInEditor(ViewOrigFileInEditor):
def _getTitle(self):
return "Edit Original File"
def getViewToolName(self, *args):
return self.getConfigValue(self.getToolConfigEntry(), "default")
class ViewContentFilteredOrigFileInEditor(ContentFilterViewer, ViewFilteredOrigFileInEditor):
def _getTitle(self):
return "View Content-Filtered Original File"
def isActiveForFile(self, fileName, comparison):
return ViewFilteredOrigFileInEditor.isActiveForFile(self, fileName, comparison) and \
self.unorderedFiltersActive(comparison)
class ViewFileDifferences(FileViewAction):
def _getTitle(self):
return "View Raw Differences"
def getToolConfigEntry(self):
return "diff_program"
def getToolDescription(self):
return "graphical difference program"
def isActiveForFile(self, fileName, comparison):
if bool(comparison):
if not (comparison.newResult() or comparison.missingResult()):
return True
return False
def getLargestFileSize(self, tmpFile, comparison):
stdFile = comparison.getStdFile(self.useFiltered(), self.extraPostfix())
return max(os.path.getsize(stdFile), os.path.getsize(tmpFile))
def _performOnFile(self, diffProgram, tmpFile, comparison):
stdFile = comparison.getStdFile(self.useFiltered(), self.extraPostfix())
self.runDiff(diffProgram, stdFile, tmpFile)
def runDiff(self, diffProgram, stdFile, tmpFile):
description = diffProgram + " " + os.path.basename(stdFile) + " " + os.path.basename(tmpFile)
cmdArgs = plugins.splitcmd(diffProgram) + [ stdFile, tmpFile ]
self.startViewer(cmdArgs, description=description, exitHandler=self.diffingComplete)
def diffingComplete(self, *args):
self.applicationEvent("the " + self.getToolDescription() + " to terminate")
class PairwiseFileViewer:
def singleTestOnly(self):
return False
def isActiveOnCurrent(self, *args):
return len(self.currTestSelection) == 2 and | |
False):
"""
Inits a row in current review optimization table
:param detector: User who created ReviewOptimization
:param argument: Argument.uid
:param statement: Statement.uid
:param is_executed: Boolean
:param is_revoked: Boolean
"""
self.detector: 'User' = detector
self.argument: Optional['Argument'] = argument
self.statement: Optional['Statement'] = statement
self.timestamp: ArrowType = get_now()
self.is_executed: bool = is_executed
self.is_revoked: bool = is_revoked
def set_executed(self, is_executed):
"""
Sets current review as executed
:param is_executed: Boolean
:return: None
"""
self.is_executed = is_executed
def set_revoked(self, is_revoked):
"""
Sets current review as revoked
:param is_revoked: Boolean
:return: None
"""
self.is_revoked = is_revoked
def update_timestamp(self):
"""
Update timestamp
:return: None
"""
self.timestamp = get_now()
def get_issues(self) -> [Issue]:
if self.argument:
return [self.argument.issue]
return self.statement.issues
def is_locked(self) -> bool:
lock = DBDiscussionSession.query(OptimizationReviewLocks).filter(
OptimizationReviewLocks.review_optimization_uid == self.uid).one_or_none()
return lock is not None
class ReviewDuplicate(AbstractReviewCase):
"""
ReviewDuplicate-table with several columns.
"""
__tablename__ = 'review_duplicates'
uid: int = Column(Integer, primary_key=True)
detector_uid: int = Column(Integer, ForeignKey('users.uid'))
duplicate_statement_uid: int = Column(Integer, ForeignKey('statements.uid'))
original_statement_uid: int = Column(Integer, ForeignKey('statements.uid'))
timestamp = Column(ArrowType, default=get_now())
is_executed: bool = Column(Boolean, nullable=False, default=False)
is_revoked: bool = Column(Boolean, nullable=False, default=False)
detector: User = relationship('User', foreign_keys=[detector_uid])
duplicate_statement: Statement = relationship('Statement', foreign_keys=[duplicate_statement_uid])
original_statement: Statement = relationship('Statement', foreign_keys=[original_statement_uid])
def __init__(self, detector: User, duplicate_statement: Statement = None, original_statement: Statement = None,
is_executed: bool = False, is_revoked: bool = False):
"""
Inits a row in current review duplicate table
:param detector: User.uid
:param duplicate_statement: Statement.uid
:param original_statement: Statement.uid
:param is_executed: Boolean
:param is_revoked: Boolean
"""
self.detector: User = detector
self.duplicate_statement: Statement = duplicate_statement
self.original_statement: Statement = original_statement
self.timestamp: ArrowType = get_now()
self.is_executed: bool = is_executed
self.is_revoked: bool = is_revoked
def set_executed(self, is_executed: bool):
"""
Sets current review as executed
:param is_executed: Boolean
:return: None
"""
self.is_executed = is_executed
def set_revoked(self, is_revoked: bool):
"""
Sets current review as revoked
:param is_revoked: Boolean
:return: None
"""
self.is_revoked = is_revoked
def update_timestamp(self):
"""
Update timestamp
:return: None
"""
self.timestamp = get_now()
def get_issues(self) -> [Issue]:
return self.duplicate_statement.issues
class ReviewMerge(AbstractReviewCase):
"""
Review-table with several columns.
"""
__tablename__ = 'review_merge'
uid: int = Column(Integer, primary_key=True)
detector_uid: int = Column(Integer, ForeignKey('users.uid'))
premisegroup_uid: int = Column(Integer, ForeignKey('premisegroups.uid'))
timestamp = Column(ArrowType, default=get_now())
is_executed: bool = Column(Boolean, nullable=False, default=False)
is_revoked: bool = Column(Boolean, nullable=False, default=False)
detector: User = relationship('User', foreign_keys=[detector_uid])
premisegroup: PremiseGroup = relationship('PremiseGroup', foreign_keys=[premisegroup_uid])
def __init__(self, detector: User, premisegroup: PremiseGroup, is_executed: bool = False, is_revoked: bool = False):
"""
Inits a row in current review merge table
:param detector: User
:param premisegroup: PremiseGroup
:param is_executed: Boolean
:param is_revoked: Boolean
"""
self.detector = detector
self.premisegroup = premisegroup
self.timestamp = get_now()
self.is_executed = is_executed
self.is_revoked = is_revoked
def set_executed(self, is_executed: bool):
"""
Sets current review as executed
:param is_executed: Boolean
:return: None
"""
self.is_executed = is_executed
def set_revoked(self, is_revoked: bool):
"""
Sets current review as revoked
:param is_revoked: Boolean
:return: None
"""
self.is_revoked = is_revoked
def update_timestamp(self):
"""
Update timestamp
:return: None
"""
self.timestamp = get_now()
def get_issues(self) -> [Issue]:
return [self.premisegroup.premises[0].issue]
class ReviewSplit(AbstractReviewCase):
"""
Review-table with several columns.
"""
__tablename__ = 'review_split'
uid: int = Column(Integer, primary_key=True)
detector_uid: int = Column(Integer, ForeignKey('users.uid'))
premisegroup_uid: int = Column(Integer, ForeignKey('premisegroups.uid'))
timestamp = Column(ArrowType, default=get_now())
is_executed: bool = Column(Boolean, nullable=False, default=False)
is_revoked: bool = Column(Boolean, nullable=False, default=False)
detector: User = relationship('User', foreign_keys=[detector_uid])
premisegroup: PremiseGroup = relationship('PremiseGroup', foreign_keys=[premisegroup_uid])
def __init__(self, detector: User, premisegroup: PremiseGroup, is_executed: bool = False, is_revoked: bool = False):
"""
Inits a row in current review split table
:param detector: User
:param premisegroup: PremiseGroup
:param is_executed: Boolean
:param is_revoked: Boolean
"""
self.detector = detector
self.premisegroup = premisegroup
self.timestamp = get_now()
self.is_executed = is_executed
self.is_revoked = is_revoked
def set_executed(self, is_executed: bool):
"""
Sets current review as executed
:param is_executed: Boolean
:return: None
"""
self.is_executed = is_executed
def set_revoked(self, is_revoked: bool):
"""
Sets current review as revoked
:param is_revoked: Boolean
:return: None
"""
self.is_revoked = is_revoked
def update_timestamp(self):
"""
Update timestamp
:return: None
"""
self.timestamp = get_now()
def get_issues(self) -> [Issue]:
return [self.premisegroup.premises[0].issue]
class ReviewSplitValues(DiscussionBase):
"""
Review-table with several columns.
"""
__tablename__ = 'review_split_values'
uid: int = Column(Integer, primary_key=True)
review_uid: int = Column(Integer, ForeignKey('review_split.uid'))
content: str = Column(Text, nullable=False)
review: ReviewSplit = relationship('ReviewSplit', foreign_keys=[review_uid])
def __init__(self, review: ReviewSplit, content: str):
"""
Inits a row in current review merge value table
:param review: ReviewSplit
:param content: String
"""
self.review = review
self.content = content
class ReviewMergeValues(DiscussionBase):
"""
Review-table with several columns.
"""
__tablename__ = 'review_merge_values'
uid: int = Column(Integer, primary_key=True)
review_uid: int = Column(Integer, ForeignKey('review_merge.uid'))
content: str = Column(Text, nullable=False)
review: ReviewMerge = relationship('ReviewMerge', foreign_keys=[review_uid])
def __init__(self, review: ReviewMerge, content: str):
"""
Inits a row in current review merge value table
:param review: ReviewMerge
:param content: String
"""
self.review = review
self.content = content
class ReviewDeleteReason(DiscussionBase):
"""
ReviewDeleteReason-table with several columns.
"""
__tablename__ = 'review_delete_reasons'
uid: int = Column(Integer, primary_key=True)
reason: str = Column(Text, nullable=False, unique=True)
def __init__(self, reason: str):
"""
Inits a row in current review delete reason table
:param reason: String
"""
self.reason = reason
class AbstractLastReviewerCase(DiscussionBase):
__abstract__ = True # Needed for SQLAlchemy
uid = NotImplemented
reviewer_uid = NotImplemented
review_uid = NotImplemented
timestamp = NotImplemented
@abstractmethod
def __eq__(self, other):
pass
def __init_subclass__(cls, **kwargs):
super().__init_subclass__(**kwargs)
if cls.uid is NotImplemented or \
cls.review_uid is NotImplemented or \
cls.reviewer_uid is NotImplemented or \
cls.timestamp is NotImplemented:
raise NotImplementedError("Your subclass of AbstractLastReviewerCase did not define all columns")
class LastReviewerDelete(AbstractLastReviewerCase):
"""
LastReviewerDelete-table with several columns.
"""
__tablename__ = 'last_reviewers_delete'
uid: int = Column(Integer, primary_key=True)
reviewer_uid: int = Column(Integer, ForeignKey('users.uid'))
review_uid: int = Column(Integer, ForeignKey('review_deletes.uid'))
is_okay: bool = Column(Boolean, nullable=False, default=False)
timestamp = Column(ArrowType, default=get_now())
reviewer: User = relationship('User', foreign_keys=[reviewer_uid])
review: ReviewDelete = relationship('ReviewDelete', foreign_keys=[review_uid])
def __init__(self, reviewer: User, review: ReviewDelete, is_okay: bool):
"""
Inits a row in current last reviewer delete table
:param reviewer: User
:param review: ReviewDelete
:param is_okay: Boolean
"""
self.reviewer = reviewer
self.review = review
self.is_okay = is_okay
self.timestamp = get_now()
def __eq__(self, other):
return self.uid == other.uid
class LastReviewerDuplicate(AbstractLastReviewerCase):
"""
LastReviewerDuplicate-table with several columns.
"""
__tablename__ = 'last_reviewers_duplicates'
uid: int = Column(Integer, primary_key=True)
reviewer_uid: int = Column(Integer, ForeignKey('users.uid'))
review_uid: int = Column(Integer, ForeignKey('review_duplicates.uid'))
is_okay: bool = Column(Boolean, nullable=False, default=False)
timestamp = Column(ArrowType, default=get_now())
reviewer: User = relationship('User', foreign_keys=[reviewer_uid])
review: ReviewDuplicate = relationship('ReviewDuplicate', foreign_keys=[review_uid])
def __init__(self, reviewer: User, review: ReviewDuplicate, is_okay: bool):
"""
Inits a row in current last reviewer duplicate table
:param reviewer: User
:param review: ReviewDuplicate
:param is_okay: Boolean
"""
self.reviewer = reviewer
self.review = review
self.is_okay = is_okay
self.timestamp = get_now()
def __eq__(self, other):
return self.uid == other.uid
class LastReviewerEdit(AbstractLastReviewerCase):
"""
LastReviewerEdit-table with several columns.
"""
__tablename__ = 'last_reviewers_edit'
uid: int = Column(Integer, primary_key=True)
reviewer_uid: int = Column(Integer, ForeignKey('users.uid'))
review_uid: int = Column(Integer, ForeignKey('review_edits.uid'))
is_okay: bool = Column(Boolean, nullable=False, default=False)
timestamp = Column(ArrowType, default=get_now())
reviewer: User = relationship('User', foreign_keys=[reviewer_uid])
review: ReviewEdit = relationship('ReviewEdit', foreign_keys=[review_uid])
def __init__(self, reviewer: User, review: ReviewEdit, is_okay: bool):
"""
:param reviewer: User
:param review: ReviewEdit
:param is_okay: Boolean
"""
self.reviewer = reviewer
self.review = review
self.is_okay = is_okay
self.timestamp = get_now()
def __eq__(self, other):
return self.uid == other.uid
class LastReviewerOptimization(AbstractLastReviewerCase):
"""
Inits a row in current last reviewer edit table
"""
__tablename__ = 'last_reviewers_optimization'
uid: int = Column(Integer, primary_key=True)
reviewer_uid: int = Column(Integer, ForeignKey('users.uid'))
review_uid: int = Column(Integer, ForeignKey('review_optimizations.uid'))
is_okay: bool = Column(Boolean, nullable=False, default=False)
timestamp = Column(ArrowType, default=get_now())
reviewer: User = relationship('User', foreign_keys=[reviewer_uid])
review: ReviewOptimization = relationship('ReviewOptimization', foreign_keys=[review_uid])
def __init__(self, reviewer: User, review: ReviewOptimization, is_okay: bool):
"""
Inits a row in current last reviewer optimization table
:param reviewer: User
:param review: ReviewOptimization
:param is_okay: boolean
"""
self.reviewer = reviewer
self.review = review
self.is_okay = is_okay
self.timestamp = get_now()
def __eq__(self, other):
return self.uid == other.uid
class LastReviewerSplit(AbstractLastReviewerCase):
"""
Inits a row in current last reviewer split table
"""
__tablename__ = 'last_reviewers_split'
uid: int = Column(Integer, primary_key=True)
reviewer_uid: int = Column(Integer, ForeignKey('users.uid'))
review_uid: int = Column(Integer, ForeignKey('review_split.uid'))
should_split: bool = Column(Boolean, nullable=False, default=False)
timestamp = Column(ArrowType, default=get_now())
reviewer: User = relationship('User', foreign_keys=[reviewer_uid])
review: ReviewSplit = relationship('ReviewSplit', foreign_keys=[review_uid])
def __init__(self, reviewer: User, review: ReviewSplit, should_split: bool):
"""
Inits a row in current last reviewer Split table
:param reviewer: User
:param review: ReviewSplit
| |
logger.info("Removing server access to NSD {0} from node "
"{1}".format(nsd_to_delete,
node_to_delete.get_admin_node_name()))
SpectrumScaleNSD.remove_server_access_to_nsd(nsd_to_delete,
node_to_delete.get_admin_node_name(),
nsd_attached_to_nodes)
# All "mmchnsd" calls are asynchronous. Therefore wait here till all
# modifications are committed before proceeding further. For now just
# sleep but we need to enhance this to ensure the async op has completed
time.sleep(10)
logger.debug("Function Exit: remove_multi_attach_nsd(). ")
#
# This function performs removal / termination of nodes from the IBM Spectrum
# Scale cluster. If the node is a server node that has access to NSD(s), then
# we attempt to remove access to this NSD (if the NSD is a shared NSD) or
# delete access to it (if its a dedicated NSD).
#
# Args:
# node_names_to_delete: Nodes to be deleted from the cluster
#
# Return:
# rc: Return code
# msg: Output message
def remove_nodes(logger, node_names_to_delete):
logger.debug("Function Entry: remove_nodes(). "
"Args: node_list={0}".format(node_names_to_delete))
rc = RC_SUCCESS
msg = result_json = ""
removed_node_list = []
logger.info("Attempting to remove node(s) {0} from the "
"cluster".format(' '.join(map(str, node_names_to_delete))))
# TODO: The cluster health check should only fail if we are attempting
# to remove NSD servers while other NSD servers are down. The
# removal of compute nodes should be permitted even if NSD
# servers are down. For now disable check until correct algorithm
# can be implemented
# Ensure all nodes in the cluster are healthy
#check_cluster_health(logger)
# Check that the list of nodes to delete already exist. If not,
# simply ignore
nodes_to_delete = check_nodes_exist(logger, node_names_to_delete)
if len(nodes_to_delete) == 0:
msg = str("All node(s) marked for removal ({0}) are already not part "
"of the cluster".format(' '.join(map(str,
node_names_to_delete))))
logger.info(msg)
return rc, msg, result_json
# Precheck nodes to make sure they do not have any roles that should
# not be deleted
check_roles_before_delete(logger, nodes_to_delete)
# For each Filesystem, Get the Filesystem to NSD (disk) mapping
fs_nsd_map = get_filesystem_to_nsd_mapping(logger)
# TODO: The disk health check should only fail if we are attempting
# to remove NSD servers when any disks are down. The removal
# of compute nodes should be permitted even if disks are down.
# For now disable check until correct algorithm can be implemented
#check_disk_health(logger, fs_nsd_map)
# An NSD node can have access to a multi attach NSD (shared NSD) or
# dedicated access to the NSD (FPO model) or a combination of both.
# First modify the Shared NSDs and remove access to all NSD Nodes
# that are to be deleted. Note: As long as these are Shared NSD's
# another NSD server will continue to have access to the NSD (and
# therefore Data)
remove_multi_attach_nsd(logger, nodes_to_delete)
# Finally delete any dedicated NSDs (this will force the data to be
# copied to another NSD in the same Filesystem). Finally delete the
# node from the cluster
logger.debug("Identified all filesystem to disk mapping: "
"{0}".format(fs_nsd_map))
for node_to_del_obj in nodes_to_delete:
node_to_del = node_to_del_obj.get_admin_node_name()
logger.debug("Operating on server: {0}".format(node_to_del))
# For each node to be deleted, retrieve the NSDs (disks) on the node
all_node_disks = get_all_nsds_of_node(logger, node_to_del)
logger.debug("Identified disks for server ({0}): "
"{1}".format(node_to_del, all_node_disks))
# The Node does not have any disks on it (compute node). Delete the
# node without any more processing
if len(all_node_disks) == 0:
logger.info("Unmounting filesystem(s) on {0}".format(node_to_del))
SpectrumScaleFS.unmount_filesystems(node_to_del, wait=True)
logger.info("Shutting down node {0}".format(node_to_del))
SpectrumScaleNode.shutdown_node(node_to_del, wait=True)
logger.info("Deleting compute node {0}".format(node_to_del))
SpectrumScaleCluster.delete_node(node_to_del)
removed_node_list.append(node_to_del)
continue
# Generate a list of NSD (disks) on the host to be deleted for
# each filesystem
#
# fs_disk_map{} contains the following:
# Filesystem Name -> NSDs on the host to be deleted
fs_disk_map = {}
for fs_name, disks in list(fs_nsd_map.items()):
node_specific_disks = []
for disk_instance in disks:
if disk_instance.get_nsd_name() in all_node_disks:
node_specific_disks.append(disk_instance.get_nsd_name())
fs_disk_map[fs_name] = node_specific_disks
logger.debug("Identified filesystem to disk map for server "
"({0}): {1}".format(node_to_del, fs_disk_map))
for fs in fs_disk_map:
disk_cap = gpfs_df_disk(logger, fs)
logger.debug("Identified disk capacity for filesystem "
"({0}): {1}".format(fs, disk_cap))
# Algorithm used for checking at-least 20% free space during
# mmdeldisk in progress;
# - Identify the size of data stored in disks going to be
# deleted.
# - Identify the free size of the filesystem
# (excluding the disk going to be deleted)
# - Allow for disk deletion, if total_free size is 20% greater
# even after moving used data stored in disk going to be deleted.
size_to_be_del = 0
for disk in fs_disk_map[fs]:
size_to_be_del += disk_cap[disk]['used_size']
logger.debug("Identified data size going to be deleted from "
"filesystem ({0}): {1}".format(fs, size_to_be_del))
other_disks = []
for disk_name in disk_cap:
if disk_name not in fs_disk_map[fs]:
other_disks.append(disk_name)
logger.debug("Identified other disks of the filesystem "
"({0}): {1}".format(fs, other_disks))
if not other_disks:
msg = str("No free disks available to restripe data "
"for the filesystem {0}".format(fs))
logger.error(msg)
raise SpectrumScaleException(msg=msg, mmcmd="", cmdargs=[],
rc=-1, stdout="", stderr="")
size_avail_after_migration, total_free = 0, 0
for disk in other_disks:
# Accumulate free size on all disks.
total_free += disk_cap[disk]['free_size']
logger.debug("Identified free size in other disks of the "
"filesystem ({0}): {1}".format(fs, total_free))
size_avail_after_migration = total_free - size_to_be_del
logger.debug("Expected size after restriping of the filesystem "
"({0}): {1}".format(fs, size_avail_after_migration))
percent = int(size_avail_after_migration*100/total_free)
logger.debug("Expected percentage of size left after restriping "
"of the filesystem ({0}): {1}".format(fs, percent))
if percent < 20:
msg = ("Not enough space left for restriping data for "
"filesystem {0}".format(fs))
logger.error(msg)
raise SpectrumScaleException(msg=msg, mmcmd="", cmdargs=[],
rc=-1, stdout="", stderr="")
if fs_disk_map[fs]:
# mmdeldisk will not be hit if there are no disks to delete.
logger.info("Deleting disk(s) {0} from node "
"{1}".format(' '.join(map(str, fs_disk_map[fs])),
node_to_del))
SpectrumScaleDisk.delete_disk(node_to_del, fs, fs_disk_map[fs])
if all_node_disks:
# mmdelnsd will not be hot if there are no disks to delete.
logger.info("Deleting all NSD(s) {0} attached to node "
"{1}".format(' '.join(map(str, all_node_disks)),
node_to_del))
SpectrumScaleNSD.delete_nsd(all_node_disks)
logger.info("Unmounting filesystem(s) on {0}".format(node_to_del))
SpectrumScaleFS.unmount_filesystems(node_to_del, wait=True)
logger.info("Shutting down node {0}".format(node_to_del))
SpectrumScaleNode.shutdown_node(node_to_del, wait=True)
logger.info("Deleting storage node {0}".format(node_to_del))
SpectrumScaleCluster.delete_node(node_to_del)
removed_node_list.append(node_to_del)
msg = str("Successfully removed node(s) {0} from the "
"cluster".format(' '.join(map(str, removed_node_list))))
logger.info(msg)
logger.debug("Function Exit: remove_nodes(). "
"Return Params: rc={0} msg={1}".format(rc, msg))
return rc, msg, result_json
###############################################################################
## ##
## Functions to retrieve Node information ##
## ##
###############################################################################
def get_node_info_as_json(logger, node_names=[]):
logger.debug("Function Entry: get_node_info_as_json(). "
"Args: node_names={0}".format(node_names))
rc = 0
msg = result_json = ""
node_info_dict = {}
node_info_list = []
cluster = SpectrumScaleCluster()
node_instance_list = cluster.get_nodes()
for node_instance in node_instance_list:
if len(node_names) == 0:
node_info_list.append(node_instance.get_node_dict())
else:
if (node_instance.get_ip_address() in node_names or
node_instance.get_admin_node_name() in node_names or
node_instance.get_daemon_node_name() in node_names):
node_info_list.append(node_instance.get_node_dict())
node_info_dict["clusterNodes"] = node_info_list
result_json = json.dumps(node_info_dict)
msg = "List cluster successfully executed"
logger.debug("Function Exit: get_node_info_as_json(). "
"Return Params: rc={0} msg={1} "
"result_json={2}".format(rc, msg, result_json))
return rc, msg, result_json
def get_node_status_as_json(logger, node_names=[]):
logger.debug("Function Entry: get_node_status_as_json(). "
"Args: node_names={0}".format(node_names))
rc = 0
msg = result_json = ""
node_status = {}
node_state = SpectrumScaleNode.get_state(node_names)
result_json = json.dumps(node_state)
msg = "Cluster status successfully executed"
logger.debug("Function Exit: get_node_status_as_json(). "
"Return Params: rc={0} msg={1} "
"result_json={2}".format(rc, msg, result_json))
return rc, msg, result_json
###############################################################################
## ##
## Functions to Stop/Start Node(s) in the Cluster ##
## ##
###############################################################################
def start_nodes(logger, node_names):
logger.debug("Function Entry: start_nodes(). "
"Args: node_names={0}".format(node_names))
rc = RC_SUCCESS
msg = stdout = result_json = ""
for node in node_names:
logger.info("Attempting to start node {0}".format(node))
rc, stdout = SpectrumScaleNode.start_node(node, wait=True)
msg = str("Successfully started node(s) "
"{0}".format(' '.join(map(str, node_names))))
logger.info(msg)
logger.debug("Function Exit: start_nodes(). "
"Return Params: rc={0} msg={1} "
"result_json={2}".format(rc, msg, result_json))
return rc, msg, result_json
def stop_nodes(logger, node_names):
logger.debug("Function Entry: stop_nodes(). "
"Args: node_names={0}".format(node_names))
rc = RC_SUCCESS
msg = stdout = result_json = ""
for node in node_names:
logger.info("Attempting to stop node {0}".format(node))
rc, stdout = SpectrumScaleNode.shutdown_node(node, wait=True)
msg = str("Successfully stopped node(s) "
"{0}".format(' '.join(map(str, node_names))))
logger.info(msg)
logger.debug("Function Exit: stop_nodes(). "
"Return Params: rc={0} msg={1} "
"result_json={2}".format(rc, msg, result_json))
return rc, msg, result_json
###############################################################################
## ##
## Functions to add Node(s) to the Cluster ##
## ##
###############################################################################
def add_nodes(logger, node_names, stanza, license):
logger.debug("Function Entry: add_nodes(). "
"Args: node_names={0}".format(node_names))
rc = RC_SUCCESS
msg = stdout = result_json = ""
logger.info("Attempting to add node(s) {0} to the "
"cluster".format(' '.join(map(str, node_names))))
rc, stdout, stderr = SpectrumScaleCluster.add_node(node_names, stanza)
logger.info("Attempting to | |
import tensorflow as tf
def build_VGG19_feature_extractor(input_shape):
x = tf.keras.Input(shape=input_shape)
base_model = VGG19(weights='imagenet',
include_top=False)
outputs = []
out = x
for layer in base_model.layers:
out = layer(out)
if layer.name in ['block1_conv1', 'block2_conv1', 'block3_conv1',
'block4_conv1', 'block5_conv1']:
outputs.append(out)
if layer.name == 'block5_conv1':
break
return base_model, tf.keras.Model(inputs=x,
outputs=outputs)
from tensorflow.python.keras.applications import imagenet_utils
WEIGHTS_PATH = ('https://storage.googleapis.com/tensorflow/keras-applications/'
'vgg19/vgg19_weights_tf_dim_ordering_tf_kernels.h5')
WEIGHTS_PATH_NO_TOP = ('https://storage.googleapis.com/tensorflow/'
'keras-applications/vgg19/'
'vgg19_weights_tf_dim_ordering_tf_kernels_notop.h5')
def preprocess_for_vgg(x):
return imagenet_utils.preprocess_input(x * 255., data_format=tf.keras.backend.image_data_format(), mode='caffe')
def VGG19(
include_top=True,
weights='imagenet',
input_tensor=None,
input_shape=None,
pooling=None,
classes=1000,
classifier_activation='softmax'):
"""Instantiates the VGG19 architecture.
Reference:
- [Very Deep Convolutional Networks for Large-Scale Image Recognition](
https://arxiv.org/abs/1409.1556) (ICLR 2015)
By default, it loads weights pre-trained on ImageNet. Check 'weights' for
other options.
This model can be built both with 'channels_first' data format
(channels, height, width) or 'channels_last' data format
(height, width, channels).
The default input size for this model is 224x224.
Note: each Keras Application expects a specific kind of input preprocessing.
For VGG19, call `tf.keras.applications.vgg19.preprocess_input` on your
inputs before passing them to the model.
Arguments:
include_top: whether to include the 3 fully-connected
layers at the top of the network.
weights: one of `None` (random initialization),
'imagenet' (pre-training on ImageNet),
or the path to the weights file to be loaded.
input_tensor: optional Keras tensor
(i.e. output of `layers.Input()`)
to use as image input for the model.
input_shape: optional shape tuple, only to be specified
if `include_top` is False (otherwise the input shape
has to be `(224, 224, 3)`
(with `channels_last` data format)
or `(3, 224, 224)` (with `channels_first` data format).
It should have exactly 3 inputs channels,
and width and height should be no smaller than 32.
E.g. `(200, 200, 3)` would be one valid value.
pooling: Optional pooling mode for feature extraction
when `include_top` is `False`.
- `None` means that the output of the model will be
the 4D tensor output of the
last convolutional block.
- `avg` means that global average pooling
will be applied to the output of the
last convolutional block, and thus
the output of the model will be a 2D tensor.
- `max` means that global max pooling will
be applied.
classes: optional number of classes to classify images
into, only to be specified if `include_top` is True, and
if no `weights` argument is specified.
classifier_activation: A `str` or callable. The activation function to use
on the "top" layer. Ignored unless `include_top=True`. Set
`classifier_activation=None` to return the logits of the "top" layer.
Returns:
A `keras.Model` instance.
Raises:
ValueError: in case of invalid argument for `weights`,
or invalid input shape.
ValueError: if `classifier_activation` is not `softmax` or `None` when
using a pretrained top layer.
"""
def relu(x):
return tf.where(x <= 0., 0., x)
# Determine proper input shape
input_shape = imagenet_utils.obtain_input_shape(
input_shape,
default_size=224,
min_size=32,
data_format=tf.keras.backend.image_data_format(),
require_flatten=include_top,
weights=weights)
if input_tensor is None:
img_input = tf.keras.layers.Input(shape=input_shape)
else:
if not tf.keras.backend.is_keras_tensor(input_tensor):
img_input = tf.keras.layers.Input(tensor=input_tensor, shape=input_shape)
else:
img_input = input_tensor
# Block 1
x = tf.keras.layers.Conv2D(
64, (3, 3), activation=relu, padding='same', name='block1_conv1')(
img_input)
x = tf.keras.layers.Conv2D(
64, (3, 3), activation=relu, padding='same', name='block1_conv2')(x)
x = tf.keras.layers.MaxPooling2D((2, 2), strides=(2, 2), name='block1_pool')(x)
# Block 2
x = tf.keras.layers.Conv2D(
128, (3, 3), activation=relu, padding='same', name='block2_conv1')(x)
x = tf.keras.layers.Conv2D(
128, (3, 3), activation=relu, padding='same', name='block2_conv2')(x)
x = tf.keras.layers.MaxPooling2D((2, 2), strides=(2, 2), name='block2_pool')(x)
# Block 3
x = tf.keras.layers.Conv2D(
256, (3, 3), activation=relu, padding='same', name='block3_conv1')(x)
x = tf.keras.layers.Conv2D(
256, (3, 3), activation=relu, padding='same', name='block3_conv2')(x)
x = tf.keras.layers.Conv2D(
256, (3, 3), activation=relu, padding='same', name='block3_conv3')(x)
x = tf.keras.layers.Conv2D(
256, (3, 3), activation=relu, padding='same', name='block3_conv4')(x)
x = tf.keras.layers.MaxPooling2D((2, 2), strides=(2, 2), name='block3_pool')(x)
# Block 4
x = tf.keras.layers.Conv2D(
512, (3, 3), activation=relu, padding='same', name='block4_conv1')(x)
x = tf.keras.layers.Conv2D(
512, (3, 3), activation=relu, padding='same', name='block4_conv2')(x)
x = tf.keras.layers.Conv2D(
512, (3, 3), activation=relu, padding='same', name='block4_conv3')(x)
x = tf.keras.layers.Conv2D(
512, (3, 3), activation=relu, padding='same', name='block4_conv4')(x)
x = tf.keras.layers.MaxPooling2D((2, 2), strides=(2, 2), name='block4_pool')(x)
# Block 5
x = tf.keras.layers.Conv2D(
512, (3, 3), activation=relu, padding='same', name='block5_conv1')(x)
x = tf.keras.layers.Conv2D(
512, (3, 3), activation=relu, padding='same', name='block5_conv2')(x)
x = tf.keras.layers.Conv2D(
512, (3, 3), activation=relu, padding='same', name='block5_conv3')(x)
x = tf.keras.layers.Conv2D(
512, (3, 3), activation=relu, padding='same', name='block5_conv4')(x)
x = tf.keras.layers.MaxPooling2D((2, 2), strides=(2, 2), name='block5_pool')(x)
if include_top:
# Classification block
x = tf.keras.layers.Flatten(name='flatten')(x)
x = tf.keras.layers.Dense(4096, activation=relu, name='fc1')(x)
x = tf.keras.layers.Dense(4096, activation=relu, name='fc2')(x)
imagenet_utils.validate_activation(classifier_activation, weights)
x = tf.keras.layers.Dense(classes, activation=classifier_activation,
name='predictions')(x)
else:
if pooling == 'avg':
x = tf.keras.layers.GlobalAveragePooling2D()(x)
elif pooling == 'max':
x = tf.keras.layers.GlobalMaxPooling2D()(x)
# Ensure that the model takes into account
# any potential predecessors of `input_tensor`.
if input_tensor is not None:
inputs = tf.keras.utils.layer_utils.get_source_inputs(input_tensor)
else:
inputs = img_input
# Create model.
model = tf.keras.Model(inputs, x, name='vgg19')
# Load weights.
if weights == 'imagenet':
if include_top:
weights_path = tf.keras.utils.get_file(
'vgg19_weights_tf_dim_ordering_tf_kernels.h5',
WEIGHTS_PATH,
cache_subdir='models',
file_hash='cbe5617147190e668d6c5d5026f83318')
else:
weights_path = tf.keras.utils.get_file(
'vgg19_weights_tf_dim_ordering_tf_kernels_notop.h5',
WEIGHTS_PATH_NO_TOP,
cache_subdir='models',
file_hash='253f8cb515780f3b799900260a226db6')
model.load_weights(weights_path)
elif weights is not None:
model.load_weights(weights)
return model
BASE_WEIGHTS_PATH = (
'https://storage.googleapis.com/tensorflow/keras-applications/resnet/')
WEIGHTS_HASHES = {
'resnet50': ('2cb95161c43110f7111970584f804107',
'4d473c1dd8becc155b73f8504c6f6626'),
'resnet101': ('f1aeb4b969a6efcfb50fad2f0c20cfc5',
'88cf7a10940856eca736dc7b7e228a21'),
'resnet152': ('100835be76be38e30d865e96f2aaae62',
'ee4c566cf9a93f14d82f913c2dc6dd0c'),
'resnet50v2': ('3ef43a0b657b3be2300d5770ece849e0',
'fac2f116257151a9d068a22e544a4917'),
'resnet101v2': ('6343647c601c52e1368623803854d971',
'c0ed64b8031c3730f411d2eb4eea35b5'),
'resnet152v2': ('a49b44d1979771252814e80f8ec446f9',
'ed17cf2e0169df9d443503ef94b23b33'),
'resnext50': ('67a5b30d522ed92f75a1f16eef299d1a',
'62527c363bdd9ec598bed41947b379fc'),
'resnext101':
('34fb605428fcc7aa4d62f44404c11509', '0f678c91647380debd923963594981b3')
}
layers = None
def ResNet(stack_fn,
preact,
use_bias,
model_name='resnet',
include_top=True,
weights='imagenet',
input_tensor=None,
input_shape=None,
pooling=None,
classes=1000,
classifier_activation='softmax',
conv_fun=tf.keras.layers.Conv2D,
**kwargs):
if kwargs:
raise ValueError('Unknown argument(s): %s' % (kwargs,))
if not (weights in {'imagenet', None} or file_io.file_exists_v2(weights)):
raise ValueError('The `weights` argument should be either '
'`None` (random initialization), `imagenet` '
'(pre-training on ImageNet), '
'or the path to the weights file to be loaded.')
if weights == 'imagenet' and include_top and classes != 1000:
raise ValueError('If using `weights` as `"imagenet"` with `include_top`'
' as true, `classes` should be 1000')
# Determine proper input shape
input_shape = imagenet_utils.obtain_input_shape(
input_shape,
default_size=224,
min_size=32,
data_format=tf.keras.backend.image_data_format(),
require_flatten=include_top,
weights=weights)
if input_tensor is None:
img_input = tf.keras.layers.Input(shape=input_shape)
else:
if not tf.keras.backend.is_keras_tensor(input_tensor):
img_input = tf.keras.layers.Input(tensor=input_tensor, shape=input_shape)
else:
img_input = input_tensor
bn_axis = 3 if tf.keras.backend.image_data_format() == 'channels_last' else 1
x = tf.keras.layers.ZeroPadding2D(
padding=((3, 3), (3, 3)), name='conv1_pad')(img_input)
x = conv_fun(64, 7, strides=2, use_bias=use_bias, name='conv1_conv')(x)
if not preact:
x = tf.keras.layers.BatchNormalization(
axis=bn_axis, epsilon=1.001e-5, name='conv1_bn')(x)
x = tf.keras.layers.Activation('relu', name='conv1_relu')(x)
x = tf.keras.layers.ZeroPadding2D(padding=((1, 1), (1, 1)), name='pool1_pad')(x)
x = tf.keras.layers.MaxPooling2D(3, strides=2, name='pool1_pool')(x)
x = stack_fn(x)
if preact:
x = tf.keras.layers.BatchNormalization(
axis=bn_axis, epsilon=1.001e-5, name='post_bn')(x)
x = tf.keras.layers.Activation('relu', name='post_relu')(x)
if include_top:
x = tf.keras.layers.GlobalAveragePooling2D(name='avg_pool')(x)
imagenet_utils.validate_activation(classifier_activation, weights)
x = tf.keras.layers.Dense(classes, activation=classifier_activation,
name='predictions')(x)
else:
if pooling == 'avg':
x = tf.keras.layers.GlobalAveragePooling2D(name='avg_pool')(x)
elif pooling == 'max':
x = tf.keras.layers.GlobalMaxPooling2D(name='max_pool')(x)
# Ensure that the model takes into account
# any potential predecessors of `input_tensor`.
if input_tensor is not None:
inputs = layer_utils.get_source_inputs(input_tensor)
else:
inputs = img_input
# Create model.
model = tf.keras.Model(inputs, x, name=model_name)
# Load weights.
if (weights == 'imagenet') and (model_name in WEIGHTS_HASHES):
if include_top:
file_name = model_name + '_weights_tf_dim_ordering_tf_kernels.h5'
file_hash = WEIGHTS_HASHES[model_name][0]
else:
file_name = model_name + '_weights_tf_dim_ordering_tf_kernels_notop.h5'
file_hash = WEIGHTS_HASHES[model_name][1]
weights_path = tf.keras.utils.get_file(
file_name,
BASE_WEIGHTS_PATH + file_name,
cache_subdir='models',
file_hash=file_hash)
model.load_weights(weights_path)
elif weights is not None:
model.load_weights(weights)
return model
def block1(x, filters, kernel_size=3, stride=1, conv_shortcut=True, name=None, conv_fun=tf.keras.layers.Conv2D):
"""A residual block.
Args:
x: input tensor.
filters: integer, filters of the bottleneck layer.
kernel_size: default 3, kernel size of the bottleneck layer.
stride: default 1, stride of the first layer.
conv_shortcut: default True, use convolution shortcut if True,
otherwise identity shortcut.
name: string, block label.
Returns:
Output tensor for the residual block.
"""
bn_axis = 3 if tf.keras.backend.image_data_format() == 'channels_last' else 1
if conv_shortcut:
shortcut = conv_fun(
4 * filters, 1, strides=stride, name=name + '_0_conv')(x)
shortcut = tf.keras.layers.BatchNormalization(
axis=bn_axis, epsilon=1.001e-5, name=name + '_0_bn')(shortcut)
else:
shortcut = x
x = conv_fun(filters, 1, strides=stride, name=name + '_1_conv')(x)
x = tf.keras.layers.BatchNormalization(
axis=bn_axis, epsilon=1.001e-5, name=name + '_1_bn')(x)
x = tf.keras.layers.Activation('relu', name=name + '_1_relu')(x)
x = conv_fun(
filters, kernel_size, padding='SAME', name=name + '_2_conv')(x)
x = tf.keras.layers.BatchNormalization(
axis=bn_axis, epsilon=1.001e-5, name=name + '_2_bn')(x)
x = tf.keras.layers.Activation('relu', name=name + '_2_relu')(x)
x = conv_fun(4 * filters, 1, name=name + '_3_conv')(x)
x = tf.keras.layers.BatchNormalization(
axis=bn_axis, epsilon=1.001e-5, name=name + '_3_bn')(x)
x = tf.keras.layers.Add(name=name + '_add')([shortcut, x])
x = tf.keras.layers.Activation('relu', name=name + '_out')(x)
return x
def stack1(x, filters, blocks, stride1=2, conv_fun=tf.keras.layers.Conv2D, name=None):
"""A set of stacked residual blocks.
Args:
x: input tensor.
filters: integer, filters of the bottleneck layer in a block.
blocks: integer, blocks in the stacked | |
or a list of strings with the technology names to run.")
# Based on wealth index, minimum wage and a lower an upper range for cost of oportunity
print(f'[{self.specs["Country_name"]}] Getting value of time')
self.get_value_of_time()
# Loop through each technology and calculate all benefits and costs
for tech in techs:
print(f'Calculating health benefits for {tech.name}...')
tech.morbidity(self)
tech.mortality(self)
print(f'Calculating carbon emissions benefits for {tech.name}...')
tech.carbon_emissions(self)
print(f'Calculating time saved benefits for {tech.name}...')
tech.time_saved(self)
print(f'Calculating costs for {tech.name}...')
tech.required_energy(self)
tech.discounted_om(self)
tech.discounted_inv(self)
tech.discount_fuel_cost(self)
tech.salvage(self)
print(f'Calculating net benefit for {tech.name}...\n')
if 'w_costs' not in self.specs.keys():
w_health = 1
w_environment = 1
w_social = 1
w_costs = 1
else:
w_health = self.specs['w_health']
w_environment = self.specs['w_environment']
w_social = self.specs['w_social']
w_costs = self.specs['w_costs']
tech.net_benefit(self, w_health, w_environment, w_social, w_costs)
print('Getting maximum net benefit technologies...')
self.maximum_net_benefit(techs)
print('Extracting indicators...')
print(' - Lives saved')
self.extract_lives_saved()
print(' - Health costs')
self.extract_health_costs_saved()
print(' - Time saved')
self.extract_time_saved()
print(' - Opportunity cost')
self.extract_opportunity_cost()
print(' - Avoided emissions')
self.extract_reduced_emissions()
print(' - Avoided emissions costs')
self.extract_emissions_costs_saved()
print(' - Investment costs')
self.extract_investment_costs()
print(' - Fuel costs')
self.extract_fuel_costs()
print(' - OM costs')
self.extract_om_costs()
print(' - Salvage value')
self.extract_salvage()
print('Done')
def _get_column_functs(self):
columns_dict = {column: 'first' for column in self.gdf.columns}
for column in self.gdf.columns[self.gdf.columns.str.contains('cost|benefit|pop|Pop|Households')]:
columns_dict[column] = 'sum'
columns_dict['max_benefit_tech'] = 'first'
return columns_dict
def maximum_net_benefit(self, techs):
net_benefit_cols = [col for col in self.gdf if 'net_benefit_' in col]
benefits_cols = [col for col in self.gdf if 'benefits_' in col]
for benefit, net in zip(benefits_cols, net_benefit_cols):
self.gdf[net + '_temp'] = self.gdf[net]
self.gdf.loc[self.gdf[benefit] < 0, net + '_temp'] = np.nan
temps = [col for col in self.gdf if '_temp' in col]
self.gdf["max_benefit_tech"] = self.gdf[temps].idxmax(axis=1)
self.gdf['max_benefit_tech'] = self.gdf['max_benefit_tech'].str.replace("net_benefit_", "")
self.gdf['max_benefit_tech'] = self.gdf['max_benefit_tech'].str.replace("_temp", "")
self.gdf["maximum_net_benefit"] = self.gdf[temps].max(axis=1)
gdf = gpd.GeoDataFrame()
gdf_copy = self.gdf.copy()
for tech in techs:
current = (tech.households < gdf_copy['Households']) & \
(gdf_copy["max_benefit_tech"] == tech.name)
dff = gdf_copy.loc[current].copy()
if current.sum() > 0:
dff.loc[current, "maximum_net_benefit"] *= tech.factor.loc[current]
dff.loc[current, f'net_benefit_{tech.name}_temp'] = np.nan
second_benefit_cols = temps.copy()
second_benefit_cols.remove(f'net_benefit_{tech.name}_temp')
second_best = dff.loc[current, second_benefit_cols].idxmax(axis=1)
second_best.replace(np.nan, 'NaN', inplace=True)
second_best = second_best.str.replace("net_benefit_", "")
second_best = second_best.str.replace("_temp", "")
second_best.replace('NaN', np.nan, inplace=True)
second_tech_net_benefit = dff.loc[current, second_benefit_cols].max(axis=1) * (1 - tech.factor.loc[current])
dff['max_benefit_tech'] = second_best
dff['maximum_net_benefit'] = second_tech_net_benefit
dff['Calibrated_pop'] *= (1 - tech.factor.loc[current])
dff['Households'] *= (1 - tech.factor.loc[current])
if tech.name == 'Electricity':
dff['Elec_pop_calib'] *= 0
self.gdf.loc[current, 'Elec_pop_calib'] *= tech.factor.loc[current]
self.gdf.loc[current, 'Calibrated_pop'] *= tech.factor.loc[current]
self.gdf.loc[current, 'Households'] *= tech.factor.loc[current]
gdf = gdf.append(dff)
self.gdf = self.gdf.append(gdf)
for net in net_benefit_cols:
self.gdf[net + '_temp'] = self.gdf[net]
temps = [col for col in self.gdf if 'temp' in col]
for tech in self.gdf["max_benefit_tech"].unique():
index = self.gdf.loc[self.gdf['max_benefit_tech'] == tech].index
self.gdf.loc[index, f'net_benefit_{tech}_temp'] = np.nan
isna = self.gdf["max_benefit_tech"].isna()
self.gdf.loc[isna, 'max_benefit_tech'] = self.gdf.loc[isna, temps].idxmax(axis=1)
self.gdf['max_benefit_tech'] = self.gdf['max_benefit_tech'].str.replace("net_benefit_", "")
self.gdf['max_benefit_tech'] = self.gdf['max_benefit_tech'].str.replace("_temp", "")
self.gdf.loc[isna, "maximum_net_benefit"] = self.gdf.loc[isna, temps].max(axis=1)
def add_admin_names(self, admin, column_name):
if isinstance(admin, str):
admin = gpd.read_file(admin)
admin.to_crs(self.gdf.crs, inplace=True)
self.gdf = gpd.sjoin(self.gdf, admin[[column_name, 'geometry']], how="inner", op='intersects')
self.gdf.drop('index_right', axis=1, inplace=True)
self.gdf.sort_index(inplace=True)
def extract_lives_saved(self):
self.gdf["deaths_avoided"] = self.gdf.apply(
lambda row: self.techs[row['max_benefit_tech']].deaths_avoided[row.name], axis=1) * self.gdf["Households"]
def extract_health_costs_saved(self):
self.gdf["health_costs_avoided"] = self.gdf.apply(
lambda row: self.techs[row['max_benefit_tech']].distributed_morbidity[row.name] +
self.techs[row['max_benefit_tech']].distributed_mortality[row.name], axis=1) * self.gdf[
"Households"]
def extract_time_saved(self):
self.gdf["time_saved"] = self.gdf.apply(
lambda row: self.techs[row['max_benefit_tech']].total_time_saved[row.name], axis=1) * \
self.gdf["Households"]
def extract_opportunity_cost(self):
self.gdf["opportunity_cost_gained"] = self.gdf.apply(
lambda row: self.techs[row['max_benefit_tech']].time_value[row.name], axis=1) * \
self.gdf["Households"]
def extract_reduced_emissions(self):
# TODO: Fix this
self.gdf["reduced_emissions"] = self.gdf.apply(
lambda row: self.techs[row['max_benefit_tech']].decreased_carbon_emissions[row.name], axis=1) * self.gdf["Households"]
# except:
# self.gdf["reduced_emissions"] = self.gdf.apply(
# lambda row: self.techs[row['max_benefit_tech']].decreased_carbon_emissions, axis=1) * self.gdf["Households"]
def extract_investment_costs(self):
self.gdf["investment_costs"] = self.gdf.apply(
lambda row: self.techs[row['max_benefit_tech']].discounted_investments[row.name], axis=1) * self.gdf["Households"]
def extract_om_costs(self):
self.gdf["om_costs"] = self.gdf.apply(
lambda row: self.techs[row['max_benefit_tech']].discounted_om_costs, axis=1) * self.gdf["Households"]
def extract_fuel_costs(self):
self.gdf["fuel_costs"] = self.gdf.apply(
lambda row: self.techs[row['max_benefit_tech']].discounted_fuel_cost[row.name], axis=1) * self.gdf[
"Households"]
def extract_salvage(self):
self.gdf["salvage_value"] = self.gdf.apply(
lambda row: self.techs[row['max_benefit_tech']].discounted_salvage_cost[row.name], axis=1) * self.gdf["Households"]
def extract_emissions_costs_saved(self):
# TODO: Fix this
self.gdf["emissions_costs_saved"] = self.gdf.apply(
lambda row: self.techs[row['max_benefit_tech']].decreased_carbon_costs[row.name], axis=1) * self.gdf["Households"]
# except:
# self.gdf["emissions_costs_saved"] = self.gdf.apply(
# lambda row: self.techs[row['max_benefit_tech']].decreased_carbon_costs, axis=1) * self.gdf["Households"]
def gdf_to_csv(self, scenario_name):
name = os.path.join(self.output_directory, scenario_name)
pt = self.gdf.to_crs({'init': 'EPSG:3395'})
pt["X"] = pt["geometry"].x
pt["Y"] = pt["geometry"].y
df = pd.DataFrame(pt.drop(columns='geometry'))
df.to_csv(name)
def extract_wealth_index(self, wealth_index, file_type="csv", x_column="longitude", y_column="latitude",
wealth_column="rwi"):
if file_type == "csv":
df = pd.read_csv(wealth_index)
gdf = gpd.GeoDataFrame(df, geometry=gpd.points_from_xy(df[x_column], df[y_column]))
gdf.crs = 4326
gdf.to_crs(self.gdf.crs, inplace=True)
s1_arr = np.column_stack((self.gdf.centroid.x, self.gdf.centroid.y))
s2_arr = np.column_stack((gdf.centroid.x, gdf.centroid.y))
def do_kdtree(combined_x_y_arrays, points):
mytree = scipy.spatial.cKDTree(combined_x_y_arrays)
dist, indexes = mytree.query(points)
return dist, indexes
results1, results2 = do_kdtree(s2_arr, s1_arr)
self.gdf["relative_wealth"] = gdf.loc[results2][wealth_column].values
elif file_type == "point":
gdf = gpd.read_file(wealth_index)
gdf.to_crs(self.gdf.crs, inplace=True)
s1_arr = np.column_stack((self.gdf.centroid.x, self.gdf.centroid.y))
s2_arr = np.column_stack((gdf.centroid.x, gdf.centroid.y))
def do_kdtree(combined_x_y_arrays, points):
mytree = scipy.spatial.cKDTree(combined_x_y_arrays)
dist, indexes = mytree.query(points)
return dist, indexes
results1, results2 = do_kdtree(s2_arr, s1_arr)
self.gdf["relative_wealth"] = gdf.loc[results2].reset_index()[wealth_column]
elif file_type == "polygon":
gdf = gpd.read_file(wealth_index)
gdf.to_crs(self.gdf.crs, inplace=True)
gdf.rename(columns={wealth_column: "relative_wealth"}, inplace = True)
self.gdf = gpd.sjoin(self.gdf, gdf[["relative_wealth", "geometry"]], how="left")
elif file_type == "raster":
layer = RasterLayer('Demographics', 'Wealth', layer_path=wealth_index, resample='average')
layer.align(self.base_layer.path)
self.raster_to_dataframe(layer.layer, name="relative_wealth", method='read',
nodata=layer.meta['nodata'], fill_nodata='interpolate')
else:
raise ValueError("file_type needs to be either csv, raster, polygon or point.")
def _create_layer(self, variable, labels=None, cmap=None):
layer = np.empty(self.base_layer.layer.shape)
layer[:] = np.nan
codes = None
if isinstance(self.gdf[variable].iloc[0], str):
dff = self.gdf.copy().reset_index(drop=False)
if isinstance(labels, dict):
for value, label in labels.items():
dff.loc[dff[variable] == value.replace('_', ' '), variable] = label
dff[variable] += ' and '
dff = dff.groupby('index').agg({variable: 'sum'})
dff[variable] = [s[0:len(s) - 5].replace('_', ' ') for s in dff[variable]]
if isinstance(labels, dict):
for value, label in labels.items():
dff.loc[dff[variable] == value.replace('_', ' '), variable] = label
codes = {tech: i for i, tech in enumerate(dff[variable].unique())}
if isinstance(cmap, dict):
cmap = {i: cmap[tech] for i, tech in enumerate(dff[variable].unique())}
layer[self.rows, self.cols] = [codes[tech] for tech in dff[variable]]
else:
dff = self.gdf.copy().reset_index(drop=False)
dff = dff.groupby('index').agg({variable: 'sum'})
layer[self.rows, self.cols] = dff[variable]
raster = RasterLayer('Output', variable)
raster.layer = layer
raster.meta = self.base_layer.meta
raster.meta.update(nodata=np.nan, dtype='float32')
raster.bounds = self.base_layer.bounds
return raster, codes, cmap
def to_raster(self, variable, labels=None, cmap=None):
raster, codes, cmap = self._create_layer(variable, labels=labels, cmap=cmap)
raster.save(os.path.join(self.output_directory, 'Output'))
print(f'Layer saved in {os.path.join(self.output_directory, "Output", variable + ".tif")}\n')
if codes and cmap:
with open(os.path.join(self.output_directory, 'ColorMap.clr'), 'w') as f:
for label, code in codes.items():
r = int(to_rgb(cmap[code])[0] * 255)
g = int(to_rgb(cmap[code])[1] * 255)
b = int(to_rgb(cmap[code])[2] * 255)
f.write(f'{code} {r} {g} {b} 255 {label}\n')
def plot(self, variable, cmap='viridis', cumulative_count=None, legend_position=(1.05, 1), dpi=150,
admin_layer=None, title=None, labels=None, legend=True, legend_title='', legend_cols=1, rasterized=True,
stats=False, stats_position=(1.05, 0.5)):
raster, codes, cmap = self._create_layer(variable, labels=labels, cmap=cmap)
if isinstance(admin_layer, gpd.GeoDataFrame):
admin_layer = admin_layer
elif not admin_layer:
admin_layer = self.mask_layer.layer
if stats:
fig, ax = plt.subplots(1, 1, figsize=(16, 9), dpi=dpi)
self.add_statistics(ax, stats_position)
else:
ax = None
raster.plot(cmap=cmap, cumulative_count=cumulative_count,
categories=codes, legend_position=legend_position,
admin_layer=admin_layer, title=title, legend=legend,
legend_title=legend_title, legend_cols=legend_cols, rasterized=rasterized,
ax=ax)
def add_statistics(self, ax, stats_position):
summary = self.summary(total=True)
deaths = TextArea("Deaths avoided", textprops=dict(fontsize=12, color='black'))
health = TextArea("Health costs avoided", textprops=dict(fontsize=12, color='black'))
emissions = TextArea("Emissions avoided", textprops=dict(fontsize=12, color='black'))
time = TextArea("Time saved", textprops=dict(fontsize=12, color='black'))
texts_vbox = VPacker(children=[deaths, health, emissions, time], pad=0, sep=6)
deaths_avoided = summary.loc['total', 'deaths_avoided']
health_costs_avoided = summary.loc['total', 'health_costs_avoided']
reduced_emissions = summary.loc['total', 'reduced_emissions']
time_saved = summary.loc['total', 'time_saved']
deaths = TextArea(f"{deaths_avoided:.0f} pp/yr", textprops=dict(fontsize=12, color='black'))
health = TextArea(f"{health_costs_avoided:.2f} b.USD", textprops=dict(fontsize=12, color='black'))
emissions = TextArea(f"{reduced_emissions:.2f} Mton", textprops=dict(fontsize=12, color='black'))
time = TextArea(f"{time_saved:.2f} h/pp.day", textprops=dict(fontsize=12, color='black'))
values_vbox = VPacker(children=[deaths, health, emissions, time], pad=0, sep=6, align='right')
hvox = HPacker(children=[texts_vbox, values_vbox], pad=0, sep=6)
ab = AnnotationBbox(hvox, stats_position,
xycoords='axes fraction',
box_alignment=(0, 0),
pad=0.0,
bboxprops=dict(boxstyle='round',
facecolor='#f1f1f1ff',
edgecolor='lightgray'))
ax.add_artist(ab)
def to_image(self, variable, type='png', cmap='viridis', cumulative_count=None, legend_position=(1.05, 1),
admin_layer=None, title=None, dpi=300, labels=None, legend=True, legend_title='', legend_cols=1,
rasterized=True, stats=False, stats_position=(1.05, 0.5)):
raster, codes, cmap = self._create_layer(variable, labels=labels, cmap=cmap)
raster.bounds = self.base_layer.bounds
if isinstance(admin_layer, gpd.GeoDataFrame):
admin_layer = admin_layer
elif not admin_layer:
admin_layer = self.mask_layer.layer
if stats:
fig, ax = plt.subplots(1, 1, figsize=(16, 9), dpi=dpi)
self.add_statistics(ax, stats_position)
else:
ax = None
raster.save_image(self.output_directory, type=type, cmap=cmap, cumulative_count=cumulative_count,
categories=codes, legend_position=legend_position,
admin_layer=admin_layer, title=title, ax=ax, dpi=dpi,
legend=legend, legend_title=legend_title, legend_cols=legend_cols, rasterized=rasterized)
def to_json(self, name):
self.gdf.to_file(os.path.join(self.output_directory, name), driver='GeoJSON')
def read_data(self, path):
self.gdf = gpd.read_file(path)
def summary(self, total=False):
summary = self.gdf.groupby(['max_benefit_tech']).agg({'Calibrated_pop': lambda row: np.nansum(row) / 1000000,
'maximum_net_benefit': lambda row: np.nansum(
row) / 1000000,
'deaths_avoided': 'sum',
'health_costs_avoided': lambda row: np.nansum(
row) / 1000000,
'time_saved': 'sum',
'opportunity_cost_gained': lambda row: np.nansum(
row) / 1000000,
'reduced_emissions': lambda row: np.nansum(
row) / 1000000000,
'emissions_costs_saved': lambda row: np.nansum(
row) / 1000000,
'investment_costs': lambda row: np.nansum(row) / 1000000,
'fuel_costs': lambda row: np.nansum(row) / 1000000,
'om_costs': lambda row: np.nansum(row) / 1000000,
'salvage_value': lambda row: np.nansum(row) | |
import functools
import json
import typing as t
import warnings
from datetime import datetime
from io import BytesIO
from .._internal import _to_str
from .._internal import _wsgi_decoding_dance
from ..datastructures import Accept
from ..datastructures import Authorization
from ..datastructures import CharsetAccept
from ..datastructures import CombinedMultiDict
from ..datastructures import EnvironHeaders
from ..datastructures import ETags
from ..datastructures import FileStorage
from ..datastructures import HeaderSet
from ..datastructures import IfRange
from ..datastructures import ImmutableList
from ..datastructures import ImmutableMultiDict
from ..datastructures import iter_multi_items
from ..datastructures import LanguageAccept
from ..datastructures import MIMEAccept
from ..datastructures import MultiDict
from ..datastructures import Range
from ..datastructures import RequestCacheControl
from ..formparser import default_stream_factory
from ..formparser import FormDataParser
from ..http import parse_accept_header
from ..http import parse_authorization_header
from ..http import parse_cache_control_header
from ..http import parse_cookie
from ..http import parse_date
from ..http import parse_etags
from ..http import parse_if_range_header
from ..http import parse_list_header
from ..http import parse_options_header
from ..http import parse_range_header
from ..http import parse_set_header
from ..urls import url_decode
from ..useragents import UserAgent
from ..utils import cached_property
from ..utils import environ_property
from ..utils import header_property
from ..wsgi import get_content_length
from ..wsgi import get_current_url
from ..wsgi import get_host
from ..wsgi import get_input_stream
from werkzeug.exceptions import BadRequest
if t.TYPE_CHECKING:
from wsgiref.types import WSGIApplication
from wsgiref.types import WSGIEnvironment
class Request:
"""Represents an incoming HTTP request, with headers and body taken
from the WSGI environment. Has properties and methods for using the
functionality defined by various HTTP specs. The data in requests
object is read-only.
Text data is assumed to use UTF-8 encoding, which should be true for
the vast majority of modern clients. Using an encoding set by the
client is unsafe in Python due to extra encodings it provides, such
as ``zip``. To change the assumed encoding, subclass and replace
:attr:`charset`.
:param environ: The WSGI environ is generated by the WSGI server and
contains information about the server configuration and client
request.
:param populate_request: Add this request object to the WSGI environ
as ``environ['werkzeug.request']``. Can be useful when
debugging.
:param shallow: Makes reading from :attr:`stream` (and any method
that would read from it) raise a :exc:`RuntimeError`. Useful to
prevent consuming the form data in middleware, which would make
it unavailable to the final application.
.. versionchanged:: 2.0
Combine ``BaseRequest`` and mixins into a single ``Request``
class. Using the old classes is deprecated and will be removed
in version 2.1.
.. versionchanged:: 0.5
Read-only mode is enforced with immutable classes for all data.
"""
#: the charset for the request, defaults to utf-8
charset = "utf-8"
#: the error handling procedure for errors, defaults to 'replace'
encoding_errors = "replace"
#: the maximum content length. This is forwarded to the form data
#: parsing function (:func:`parse_form_data`). When set and the
#: :attr:`form` or :attr:`files` attribute is accessed and the
#: parsing fails because more than the specified value is transmitted
#: a :exc:`~werkzeug.exceptions.RequestEntityTooLarge` exception is raised.
#:
#: Have a look at :doc:`/request_data` for more details.
#:
#: .. versionadded:: 0.5
max_content_length: t.Optional[int] = None
#: the maximum form field size. This is forwarded to the form data
#: parsing function (:func:`parse_form_data`). When set and the
#: :attr:`form` or :attr:`files` attribute is accessed and the
#: data in memory for post data is longer than the specified value a
#: :exc:`~werkzeug.exceptions.RequestEntityTooLarge` exception is raised.
#:
#: Have a look at :doc:`/request_data` for more details.
#:
#: .. versionadded:: 0.5
max_form_memory_size: t.Optional[int] = None
#: the class to use for `args` and `form`. The default is an
#: :class:`~werkzeug.datastructures.ImmutableMultiDict` which supports
#: multiple values per key. alternatively it makes sense to use an
#: :class:`~werkzeug.datastructures.ImmutableOrderedMultiDict` which
#: preserves order or a :class:`~werkzeug.datastructures.ImmutableDict`
#: which is the fastest but only remembers the last key. It is also
#: possible to use mutable structures, but this is not recommended.
#:
#: .. versionadded:: 0.6
parameter_storage_class: t.Type[MultiDict] = ImmutableMultiDict
#: the type to be used for list values from the incoming WSGI environment.
#: By default an :class:`~werkzeug.datastructures.ImmutableList` is used
#: (for example for :attr:`access_list`).
#:
#: .. versionadded:: 0.6
list_storage_class: t.Type[t.List] = ImmutableList
#: The type to be used for dict values from the incoming WSGI
#: environment. (For example for :attr:`cookies`.) By default an
#: :class:`~werkzeug.datastructures.ImmutableMultiDict` is used.
#:
#: .. versionchanged:: 1.0.0
#: Changed to ``ImmutableMultiDict`` to support multiple values.
#:
#: .. versionadded:: 0.6
dict_storage_class: t.Type[MultiDict] = ImmutableMultiDict
#: The form data parser that shoud be used. Can be replaced to customize
#: the form date parsing.
form_data_parser_class: t.Type[FormDataParser] = FormDataParser
#: Optionally a list of hosts that is trusted by this request. By default
#: all hosts are trusted which means that whatever the client sends the
#: host is will be accepted.
#:
#: Because `Host` and `X-Forwarded-Host` headers can be set to any value by
#: a malicious client, it is recommended to either set this property or
#: implement similar validation in the proxy (if application is being run
#: behind one).
#:
#: .. versionadded:: 0.9
trusted_hosts: t.Optional[t.List[str]] = None
#: Indicates whether the data descriptor should be allowed to read and
#: buffer up the input stream. By default it's enabled.
#:
#: .. versionadded:: 0.9
disable_data_descriptor: bool = False
#: The WSGI environment containing HTTP headers and information from
#: the WSGI server.
environ: "WSGIEnvironment"
#: Set when creating the request object. If ``True``, reading from
#: the request body will cause a ``RuntimeException``. Useful to
#: prevent modifying the stream from middleware.
shallow: bool
def __init__(
self,
environ: "WSGIEnvironment",
populate_request: bool = True,
shallow: bool = False,
) -> None:
self.environ = environ
if populate_request and not shallow:
self.environ["werkzeug.request"] = self
self.shallow = shallow
def __repr__(self) -> str:
# make sure the __repr__ even works if the request was created
# from an invalid WSGI environment. If we display the request
# in a debug session we don't want the repr to blow up.
args = []
try:
args.append(f"'{self.url}'")
args.append(f"[{self.method}]")
except Exception:
args.append("(invalid WSGI environ)")
return f"<{type(self).__name__} {' '.join(args)}>"
@property
def url_charset(self) -> str:
"""The charset that is assumed for URLs. Defaults to the value
of :attr:`charset`.
.. versionadded:: 0.6
"""
return self.charset
@classmethod
def from_values(cls, *args, **kwargs) -> "Request":
"""Create a new request object based on the values provided. If
environ is given missing values are filled from there. This method is
useful for small scripts when you need to simulate a request from an URL.
Do not use this method for unittesting, there is a full featured client
object (:class:`Client`) that allows to create multipart requests,
support for cookies etc.
This accepts the same options as the
:class:`~werkzeug.test.EnvironBuilder`.
.. versionchanged:: 0.5
This method now accepts the same arguments as
:class:`~werkzeug.test.EnvironBuilder`. Because of this the
`environ` parameter is now called `environ_overrides`.
:return: request object
"""
from ..test import EnvironBuilder
charset = kwargs.pop("charset", cls.charset)
kwargs["charset"] = charset
builder = EnvironBuilder(*args, **kwargs)
try:
return builder.get_request(cls)
finally:
builder.close()
@classmethod
def application(
cls, f: t.Callable[["Request"], "WSGIApplication"]
) -> "WSGIApplication":
"""Decorate a function as responder that accepts the request as
the last argument. This works like the :func:`responder`
decorator but the function is passed the request object as the
last argument and the request object will be closed
automatically::
@Request.application
def my_wsgi_app(request):
return Response('Hello World!')
As of Werkzeug 0.14 HTTP exceptions are automatically caught and
converted to responses instead of failing.
:param f: the WSGI callable to decorate
:return: a new WSGI callable
"""
#: return a callable that wraps the -2nd argument with the request
#: and calls the function with all the arguments up to that one and
#: the request. The return value is then called with the latest
#: two arguments. This makes it possible to use this decorator for
#: both standalone WSGI functions as well as bound methods and
#: partially applied functions.
from ..exceptions import HTTPException
@functools.wraps(f)
def application(*args):
request = cls(args[-2])
with request:
try:
resp = f(*args[:-2] + (request,))
except HTTPException as e:
resp = e.get_response(args[-2])
return resp(*args[-2:])
return application
def _get_file_stream(
self,
total_content_length: int,
content_type: t.Optional[str],
filename: t.Optional[str] = None,
content_length: t.Optional[int] = None,
):
"""Called to get a stream for the file upload.
This must provide a file-like | |
x, y, w, h):
"""
Set the dimensions for this view.
:param int x: The horizontal position.
:param int y: The vertical position.
:param int w: The width.
:param int h: The height.
:returns: Self, to allow chaining.
:rtype: VitessceConfigView
"""
self.view["x"] = x
self.view["y"] = y
self.view["w"] = w
self.view["h"] = h
return self
def set_props(self, **kwargs):
"""
Set the props for this view.
:param \*\*kwargs: A variable number of named props.
:returns: Self, to allow chaining.
:rtype: VitessceConfigView
"""
if "props" in self.view:
self.view["props"].update(kwargs)
else:
self.view["props"] = kwargs
return self
def get_props(self):
"""
Get the props for this view.
:returns: The props.
:rtype: dict or None
"""
return self.view.get("props")
def to_dict(self):
return self.view
def __or__(self, other):
return hconcat(self, other)
def __truediv__(self, other):
return vconcat(self, other)
class VitessceConfigCoordinationScope:
"""
A class to represent a coordination scope in the Vitessce view config coordination space.
"""
def __init__(self, c_type, c_scope, c_value=None):
"""
Not meant to be instantiated directly, but instead created and returned by the ``VitessceConfig.add_coordination()`` method.
:param str c_type: The coordination type for this coordination scope.
:param str c_scope: The coordination scope name.
:param c_value: The value for the coordination scope. Optional.
"""
self.c_type = c_type
self.c_scope = c_scope
self.c_value = c_value
def _to_py_params(self):
return {
"c_type": self.c_type,
"c_scope": self.c_scope,
"c_value": self.c_value,
}
def set_value(self, c_value):
"""
Set the value of the coordination scope.
:param any c_value: The coordination value to be set. Can be any value that is valid for the coordination type. Must be serializable to JSON.
:returns: Self, to allow chaining.
:rtype: VitessceConfigCoordinationScope
.. code-block:: python
:emphasize-lines: 14-16
from vitessce import VitessceConfig, Component as cm, CoordinationType as ct
vc = VitessceConfig()
my_dataset = vc.add_dataset(name='My Dataset')
v1 = vc.add_view(cm.SPATIAL, dataset=my_dataset)
v2 = vc.add_view(cm.SPATIAL, dataset=my_dataset)
zoom_scope, x_scope, y_scope = vc.add_coordination(
ct.SPATIAL_ZOOM,
ct.SPATIAL_TARGET_X,
ct.SPATIAL_TARGET_Y,
)
v1.use_coordination(zoom_scope, x_scope, y_scope)
v2.use_coordination(zoom_scope, x_scope, y_scope)
zoom_scope.set_value(2)
x_scope.set_value(0)
y_scope.set_value(0)
"""
self.c_value = c_value
return self
class VitessceConfig:
"""
A class to represent a Vitessce view config.
"""
def __init__(self, name=None, description=None, schema_version="1.0.7"):
"""
Construct a Vitessce view config object.
:param str name: A name for the view config. Optional.
:param str description: A description for the view config. Optional.
:param str schema_version: The view config schema version.
.. code-block:: python
:emphasize-lines: 3
from vitessce import VitessceConfig
vc = VitessceConfig(name='My Config')
"""
self.config = {
"version": schema_version,
"name": name,
"description": description,
"datasets": [],
"coordinationSpace": {},
"layout": [],
"initStrategy": "auto"
}
if name is None:
self.config["name"] = ""
else:
self.config["name"] = name
if description is None:
self.config["description"] = ""
else:
self.config["description"] = description
def _to_py_params(self):
return {
"name": self.config["name"],
"description": self.config["description"],
"schema_version": self.config["version"],
}
def add_dataset(self, name="", uid=None, files=None, objs=None):
"""
Add a dataset to the config.
:param str name: A name for this dataset.
:param str uid: A unique identifier for this dataset. Optional. If None, one will be automatically generated.
:param files: A list of VitessceConfigDatasetFile instances. optional.
:type files: list or None
:param objs: A list of AbstractWrapper instances. Optional.
:type objs: list or None
:returns: The instance for the new dataset.
:rtype: VitessceConfigDataset
.. code-block:: python
:emphasize-lines: 5
from vitessce import VitessceConfig, DataType as dt, FileType as ft
vc = VitessceConfig(name='My Config')
my_dataset = (
vc.add_dataset(name='My Dataset')
.add_file(
url="http://example.com/cells.json",
data_type=dt.CELLS,
file_type=ft.CELLS_JSON,
)
)
"""
uid = uid if uid is not None else _get_next_scope(
[d.dataset['uid'] for d in self.config["datasets"]])
assert type(uid) == str
vcd = VitessceConfigDataset(uid, name)
self.config["datasets"].append(vcd)
[d_scope] = self.add_coordination(ct.DATASET)
d_scope.set_value(uid)
if isinstance(files, list):
for obj in files:
vcd._add_file(obj)
if isinstance(objs, list):
for obj in objs:
vcd.add_object(obj)
return vcd
def get_dataset_by_uid(self, uid):
"""
Get a dataset associated with this configuration based on its uid.
:param str uid: The unique identifier for the dataset of interest.
:returns: The dataset object.
:rtype: VitessceConfigDataset or None
"""
for dataset in self.config["datasets"]:
if dataset.get_uid() == uid:
return dataset
return None
def get_dataset_by_coordination_scope_name(self, query_scope_name):
"""
Get a dataset associated with this configuration based on a coordination scope.
:param str query_scope_name: The unique identifier for the dataset coordination scope of interest.
:returns: The dataset object.
:rtype: VitessceConfigDataset or None
"""
if ct.DATASET.value in self.config["coordinationSpace"]:
for scope_name, dataset_scope in self.config["coordinationSpace"][ct.DATASET.value].items():
if scope_name == query_scope_name:
return self.get_dataset_by_uid(dataset_scope.c_value)
return None
def get_datasets(self):
"""
Get the datasets associated with this configuration.
:returns: The list of dataset objects.
:rtype: list of VitessceConfigDataset
"""
return self.config["datasets"]
def add_view(self, component, dataset=None, dataset_uid=None, x=0, y=0, w=1, h=1, mapping=None, coordination_scopes=None, props=None):
"""
Add a view to the config.
:param component: A component name, either as a string or using the Component enum values.
:type component: str or vitessce.constants.Component
:param dataset: A dataset instance to be used for the data visualized in this view. Must provide dataset or dataset_uid, but not both.
:type dataset: VitessceConfigDataset or None
:param dataset_uid: A unique ID for a dataset to be used for the data visualized in this view. Must provide dataset or dataset_uid, but not both.
:type dataset_uid: str or None
:param str mapping: An optional convenience parameter for setting the EMBEDDING_TYPE coordination scope value. This parameter is only applicable to the SCATTERPLOT component.
:param int x: The horizontal position of the view. Must be an integer between 0 and 11. Optional. This will be ignored if you call the `layout` method of this class using `VitessceConfigViewHConcat` and `VitessceConfigViewVConcat` objects.
:param int y: The vertical position of the view. Must be an integer between 0 and 11. Optional. This will be ignored if you call the `layout` method of this class using `VitessceConfigViewHConcat` and `VitessceConfigViewVConcat` objects.
:param int w: The width of the view. Must be an integer between 1 and 12. Optional. This will be ignored if you call the `layout` method of this class using `VitessceConfigViewHConcat` and `VitessceConfigViewVConcat` objects.
:param int h: The height of the view. Must be an integer between 1 and 12. Optional. This will be ignored if you call the `layout` method of this class using `VitessceConfigViewHConcat` and `VitessceConfigViewVConcat` objects.
:param coordination_scopes: A mapping from coordination types to coordination scope names for this view.
:type coordination_scopes: dict or None
:param props: Props to set for the view using the VitessceConfigView.set_props method.
:type props: dict or None
:returns: The instance for the new view.
:rtype: VitessceConfigView
.. code-block:: python
:emphasize-lines: 5-6
from vitessce import VitessceConfig, Component as cm
vc = VitessceConfig()
my_dataset = vc.add_dataset(name='My Dataset')
v1 = vc.add_view(cm.SPATIAL, dataset=my_dataset)
v2 = vc.add_view(cm.SCATTERPLOT, dataset=my_dataset, mapping="X_umap")
"""
# User should only provide dataset or dataset_uid, but not both.
assert isinstance(dataset, VitessceConfigDataset) or isinstance(
dataset_uid, str)
assert dataset is None or dataset_uid is None
assert type(component) in [str, cm]
if dataset is None:
dataset = self.get_dataset_by_uid(dataset_uid)
if dataset is None:
raise ValueError(
"A dataset with the provided dataset_uid could not be found.")
if type(component) == str:
component_str = component
else:
component_str = component.value
# Find the coordination scope name associated with the dataset
dataset_matches = [
scope_name
for scope_name, dataset_scope in self.config["coordinationSpace"][ct.DATASET.value].items()
if dataset_scope.c_value == dataset.dataset["uid"]
] if ct.DATASET.value in self.config["coordinationSpace"].keys() else []
if len(dataset_matches) == 1:
dataset_scope = dataset_matches[0]
else:
raise ValueError(
"No coordination scope matching the dataset parameter could be found in the coordination space.")
# Set up the view's dataset coordination scope based on the dataset parameter.
internal_coordination_scopes = {
ct.DATASET.value: dataset_scope
}
if coordination_scopes is not None:
internal_coordination_scopes.update(coordination_scopes)
vcv = VitessceConfigView(
component_str, internal_coordination_scopes, x, y, w, h)
# Use the mapping parameter if component is scatterplot and the mapping is not None
if mapping is not None:
[et_scope] = self.add_coordination(ct.EMBEDDING_TYPE)
et_scope.set_value(mapping)
vcv.use_coordination(et_scope)
if isinstance(props, dict):
vcv.set_props(**props)
self.config["layout"].append(vcv)
return vcv
def add_coordination(self, *c_types):
"""
Add scope(s) for new coordination type(s) to the config.
:param \*c_types: A variable number of coordination types.
:type \*c_types: str or vitessce.constants.CoordinationType
:returns: The instances for the new scope objects corresponding to each coordination type. These can be linked to views via the ``VitessceConfigView.use_coordination()`` method.
:rtype: list[VitessceConfigCoordinationScope]
.. code-block:: python
:emphasize-lines: 7-11
from vitessce | |
# Copyright (c) 2020 The Khronos Group Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import division, print_function, absolute_import
from collections import Sequence
from functools import reduce
import six
import typing
import numpy as np
# noinspection PyProtectedMember
class Tensor:
def __init__(self,
graph, # type: Graph,
name=None, # type: typing.Optional[str]
shape=None, # type: typing.Optional[typing.Tuple[int, ...]]
dtype=None, # type: typing.Optional[type]
data=None, # type: typing.Union[None, np.ndarray, typing.Any]
quant=None # type: typing.Optional[typing.Dict[str, typing.Any]]
):
# type: (...)->None
self._graph = graph
self._producers = []
self._consumers = []
self.name = name # type: typing.Optional[str]
self.shape = shape # type: typing.Optional[typing.Tuple[int, ...]]
self.dtype = dtype # type: typing.Optional[type]
self.data = data # type: typing.Union[None, np.ndarray, typing.Any]
self.quant = quant or {} # type: typing.Optional[typing.Dict[str, typing.Any]]
assert isinstance(graph, Graph)
graph._tensors.append(self)
def copy_with(self, graph=None, name=None, dtype=None, shape=None, data=None, quant=None):
return Tensor(graph=graph if graph is not None else self.graph,
name=name if name is not None else self.name,
dtype=dtype if dtype is not None else self.dtype,
shape=shape if shape is not None else self.shape,
data=data if data is not None else self.data,
quant=quant if quant is not None else self.quant)
@property
def graph(self):
# type: ()->typing.Optional[Graph]
return self._graph
@property
def has_producer(self):
return len(self._producers) != 0
@property
def producers(self):
# type: ()->typing.List[Operation]
return self._producers
@property
def producer(self):
# type: ()->typing.Optional[Operation]
assert len(self._producers) <= 1
return self._producers[0] if len(self._producers) == 1 else None
@property
def has_consumer(self):
return len(self._consumers) != 0
@property
def consumers(self):
# type: ()->typing.List[Operation]
return self._consumers
@property
def consumer(self):
# type: ()->typing.Optional[Operation]
return self._consumers[0] if len(self._consumers) == 1 else None
@property
def name(self):
return self._name
@name.setter
def name(self, name):
assert name is None or isinstance(name, str)
self._name = name
@property
def shape(self):
return self._shape
@shape.setter
def shape(self, shape):
assert shape is None or isinstance(shape, (list, tuple))
assert shape is None or all(s is None or isinstance(s, int) for s in shape)
self._shape = tuple(shape) if shape is not None else None
@property
def dtype(self):
return self._dtype
@dtype.setter
def dtype(self, dtype):
assert dtype is None or isinstance(dtype, type)
self._dtype = dtype
@property
def rank(self):
# type: ()->typing.Optional[int]
return len(self.shape) if self.shape is not None else None
@property
def volume(self):
# type: ()->typing.Optional[int]
return reduce((lambda x, y: x * y), self.shape) if self.shape is not None and \
all(s is not None for s in self.shape) else None
@property
def is_constant(self):
# type: ()->bool
return self.data is not None
def __repr__(self):
return self.name if self.name is not None else _hex_id(self)
def __str__(self):
return '{name}: {dtype}[{shape}]'.format(
name=self.name if self.name is not None else _hex_id(self),
dtype=self.dtype.__name__,
shape=', '.join(str(s) for s in self.shape))
_TensorListOrTupleT = typing.Union[typing.List[Tensor], typing.Tuple[Tensor, ...]]
# noinspection PyProtectedMember
class Operation:
def __init__(self,
graph, # type: Graph
type=None, # type: typing.Optional[str]
name=None, # type: typing.Optional[str]
attribs=None, # type: typing.Dict[str, typing.Any]
inputs=None, # type: typing.Union[None, Tensor, _TensorListOrTuple]
outputs=None, # type: typing.Union[None, Tensor, _TensorListOrTuple]
custom=False, # type: bool
):
# type:(...)->None
self._graph = graph
self._inputs = tuple()
self._outputs = tuple()
assert name is None or isinstance(name, str)
if attribs is not None:
assert isinstance(attribs, dict)
assert all(isinstance(key, str) for key in six.iterkeys(attribs))
assert all(not isinstance(value, Tensor) for value in six.itervalues(attribs))
self.type = type # type: typing.Optional[str]
self.name = name # type: typing.Optional[str]
self.attribs = attribs or {} # type: typing.Dict[str, typing.Any]
self.custom = custom # type: bool
assert isinstance(graph, Graph)
graph._operations.append(self)
if inputs is not None:
self.inputs = inputs
if outputs is not None:
self.outputs = outputs
def copy_with(self, graph=None, type=None, name=None, attribs=None, inputs=None, outputs=None, custom=None):
return Operation(graph=graph if graph is not None else self.graph,
type=type if type is not None else self.type,
name=name if name is not None else self.name,
attribs=attribs if attribs is not None else self.attribs,
inputs=inputs if inputs is not None else self.inputs,
outputs=outputs if outputs is not None else self.outputs,
custom=custom if custom is not None else self.custom)
@property
def graph(self):
# type: ()->typing.Optional[Graph]
return self._graph
@property
def inputs(self):
# type: ()->_TensorListOrTupleT
return self._inputs
@property
def input(self):
# type: ()->Tensor
assert len(self._inputs) == 1
return self._inputs[0]
@inputs.setter
def inputs(self, tensors):
# type: (typing.Union[Tensor, _TensorListOrTupleT])->None
if isinstance(tensors, Tensor):
tensors = (tensors,)
for tensor in self._inputs:
assert self in tensor._consumers
for tensor in self._inputs:
if self in tensor._consumers:
tensor._consumers.remove(self)
self._inputs = _ListView(tensors) if isinstance(tensors, list) else tensors
for tensor in tensors:
assert isinstance(tensor, Tensor), "got {}".format(type(tensor))
assert tensor.graph is self.graph
if self not in tensor._consumers:
tensor._consumers.append(self)
@property
def outputs(self):
# type: ()->_TensorListOrTupleT
return self._outputs
@property
def output(self):
# type: ()->Tensor
assert len(self._outputs) == 1
return self._outputs[0]
@outputs.setter
def outputs(self, tensors):
# type: (typing.Union[Tensor, _TensorListOrTupleT])->None
if isinstance(tensors, Tensor):
tensors = (tensors,)
for tensor in self._outputs:
assert self in tensor._producers
tensor._producers.remove(self)
self._outputs = _ListView(tensors) if isinstance(tensors, list) else tensors
for tensor in tensors:
assert isinstance(tensor, Tensor), "got {}".format(type(tensor))
assert tensor.graph is self.graph
assert self not in tensor._producers
tensor._producers.append(self)
@property
def type(self):
return self._type
@type.setter
def type(self, type):
assert type is None or isinstance(type, str), "got '{}'".format(type)
self._type = type
@property
def name(self):
return self._name
@name.setter
def name(self, name):
assert name is None or isinstance(name, str), "got '{}'".format(name)
self._name = name
def __repr__(self):
return self.type if self.type is not None else _hex_id(self)
def __str__(self):
return '{outputs} = {op}{{{attribs}}}({inputs})'.format(
op=self.type if self.type is not None else _hex_id(self),
inputs=', '.join(repr(tensor) for tensor in self._inputs),
outputs=', '.join(str(tensor) for tensor in self._outputs),
attribs=', '.join('{}={}'.format(key, value) for key, value in self.attribs.items()))
# noinspection PyProtectedMember
class Graph:
def __init__(self, name=None):
# type:(typing.Optional[str])->None
self._operations = []
self._tensors = []
self._inputs = []
self._outputs = []
self._name = name
@property
def name(self):
return self._name
@name.setter
def name(self, name):
assert name is None or isinstance(name, str)
self._name = name
@property
def operations(self):
# type: ()->typing.Sequence[Operation]
return _ListView(self._operations)
@property
def tensors(self):
# type: ()->typing.Sequence[Tensor]
return _ListView(self._tensors)
@property
def inputs(self):
# type: ()->typing.Sequence[Tensor]
return _ListView(self._inputs)
@inputs.setter
def inputs(self, tensors):
# type: (_TensorListOrTupleT)->None
assert isinstance(tensors, (list, tuple))
self._inputs = tensors
for tensor in self._inputs:
assert isinstance(tensor, Tensor)
assert tensor.graph is self
@property
def outputs(self):
# type: ()->typing.Sequence[Tensor]
return _ListView(self._outputs)
@outputs.setter
def outputs(self, tensors):
# type: (_TensorListOrTupleT)->None
assert isinstance(tensors, (list, tuple))
self._outputs = tensors
for tensor in self._outputs:
assert isinstance(tensor, Tensor)
assert tensor.graph is self
def remove_tensor(self, tensor):
# type: (Tensor)->None
assert len(tensor.producers) == 0
assert len(tensor.consumers) == 0
assert tensor not in self._inputs
assert tensor not in self._outputs
self._tensors.remove(tensor)
tensor._graph = None
def remove_tensors(self, tensors):
# type: (typing.Iterable[Tensor])->None
for tensor in tensors:
assert len(tensor.producers) == 0
assert len(tensor.consumers) == 0
assert tensor not in self._inputs
assert tensor not in self._outputs
self._tensors = [tensor for tensor in self._tensors if tensor not in tensors]
for tensor in tensors:
tensor._graph = None
def remove_operation(self, operation, unlink=False):
# type: (Operation, bool)->None
if unlink:
operation.inputs = []
operation.outputs = []
else:
assert len(operation.inputs) == 0
assert len(operation.outputs) == 0
self._operations.remove(operation)
operation._graph = None
def remove_operations(self, operations, unlink=False):
# type: (typing.Iterable[Operation], bool)->None
operations = operations if isinstance(operations, set) else set(operations)
for operation in operations:
if unlink:
operation.inputs = []
operation.outputs = []
else:
assert len(operation.inputs) == 0
assert len(operation.outputs) == 0
self._operations = [op for op in self._operations if op not in operations]
for operation in operations:
operation._graph = None
def is_unique(self):
return all(len(t.producers) <= 1 for t in self.tensors)
def is_sorted(self):
seen = set()
for op in self.operations:
for tensor in op.inputs:
for producer in tensor.producers:
if producer not in seen:
return False
seen.add(op)
return True
def sort(self, offset=0):
count = len(self._operations)
sorted = {op: False for op in self._operations[offset:]}
for idx in range(offset, count):
i = idx
while i < count and not all(sorted.get(tensor.producer, True) for tensor in self._operations[i].inputs):
i += | |
shapes, they're completely
# useless. the previous step may also have created
# an empty geometry if there weren't any items of
# the type we're looking for.
if shape.is_empty:
return
# add the shape as-is unless we're trying to keep
# the geometry type or the geometry dimension is
# identical.
self.new_features.append((shape, props, fid))
# intersects the shape with the cutting shape and
# handles attribute projection. anything "inside" is
# kept as it must have intersected the highest
# priority cutting shape already. the remainder is
# returned.
def _intersect(self, shape, props, fid, cutting_shape,
cutting_attr, original_geom_dim):
inside, outside = \
self.intersect_func(shape, cutting_shape)
# intersections are tricky, and it seems that the geos
# library (perhaps only certain versions of it) don't
# handle intersection of a polygon with its boundary
# very well. for example:
#
# >>> import shapely.geometry as g
# >>> p = g.Point(0,0).buffer(1.0, resolution=2)
# >>> b = p.boundary
# >>> b.intersection(p).wkt
# 'MULTILINESTRING ((1 0, 0.7071067811865481 -0.7071067811865469), (0.7071067811865481 -0.7071067811865469, 1.615544574432587e-15 -1), (1.615544574432587e-15 -1, -0.7071067811865459 -0.7071067811865491), (-0.7071067811865459 -0.7071067811865491, -1 -3.231089148865173e-15), (-1 -3.231089148865173e-15, -0.7071067811865505 0.7071067811865446), (-0.7071067811865505 0.7071067811865446, -4.624589118372729e-15 1), (-4.624589118372729e-15 1, 0.7071067811865436 0.7071067811865515), (0.7071067811865436 0.7071067811865515, 1 0))' # noqa
#
# the result multilinestring could be joined back into
# the original object. but because it has separate parts,
# each requires duplicating the start and end point, and
# each separate segment gets a different polygon buffer
# in Tangram - basically, it's a problem all round.
#
# two solutions to this: given that we're cutting, then
# the inside and outside should union back to the
# original shape - if either is empty then the whole
# object ought to be in the other.
#
# the second solution, for when there is actually some
# part cut, is that we can attempt to merge lines back
# together.
if outside.is_empty and not inside.is_empty:
inside = shape
elif inside.is_empty and not outside.is_empty:
outside = shape
elif original_geom_dim == _LINE_DIMENSION:
inside = _linemerge(inside)
outside = _linemerge(outside)
if cutting_attr is not None:
inside_props = props.copy()
inside_props[self.target_attribute] = cutting_attr
else:
inside_props = props
self._add(inside, inside_props, fid,
original_geom_dim)
return outside
def _intersect_cut(shape, cutting_shape):
"""
intersect by cutting, so that the cutting shape defines
a part of the shape which is inside and a part which is
outside as two separate shapes.
"""
inside = shape.intersection(cutting_shape)
outside = shape.difference(cutting_shape)
return inside, outside
# intersect by looking at the overlap size. we can define
# a cut-off fraction and if that fraction or more of the
# area of the shape is within the cutting shape, it's
# inside, else outside.
#
# this is done using a closure so that we can curry away
# the fraction parameter.
def _intersect_overlap(min_fraction):
# the inner function is what will actually get
# called, but closing over min_fraction means it
# will have access to that.
def _f(shape, cutting_shape):
overlap = shape.intersection(cutting_shape).area
area = shape.area
# need an empty shape of the same type as the
# original shape, which should be possible, as
# it seems shapely geometries all have a default
# constructor to empty.
empty = type(shape)()
if ((area > 0) and (overlap / area) >= min_fraction):
return shape, empty
else:
return empty, shape
return _f
# intersect by looking at the overlap length. if more than a minimum fraction
# of the shape's length is within the cutting area, then we will consider it
# totally "cut".
def _intersect_linear_overlap(min_fraction):
# the inner function is what will actually get
# called, but closing over min_fraction means it
# will have access to that.
def _f(shape, cutting_shape):
overlap = shape.intersection(cutting_shape).length
total = shape.length
empty = type(shape)()
if ((total > 0) and (overlap / total) >= min_fraction):
return shape, empty
else:
return empty, shape
return _f
# find a layer by iterating through all the layers. this
# would be easier if they layers were in a dict(), but
# that's a pretty invasive change.
#
# returns None if the layer can't be found.
def _find_layer(feature_layers, name):
for feature_layer in feature_layers:
layer_datum = feature_layer['layer_datum']
layer_name = layer_datum['name']
if layer_name == name:
return feature_layer
return None
# shared implementation of the intercut algorithm, used both when cutting
# shapes and using overlap to determine inside / outsideness.
#
# the filter_fn are used to filter which features from the base layer are cut
# with which features from the cutting layer. cutting layer features which do
# not match the filter are ignored, base layer features are left in the layer
# unchanged.
def _intercut_impl(intersect_func, feature_layers, base_layer, cutting_layer,
attribute, target_attribute, cutting_attrs, keep_geom_type,
cutting_filter_fn=None, base_filter_fn=None):
# the target attribute can default to the attribute if
# they are distinct. but often they aren't, and that's
# why target_attribute is a separate parameter.
if target_attribute is None:
target_attribute = attribute
# search through all the layers and extract the ones
# which have the names of the base and cutting layer.
# it would seem to be better to use a dict() for
# layers, and this will give odd results if names are
# allowed to be duplicated.
base = _find_layer(feature_layers, base_layer)
cutting = _find_layer(feature_layers, cutting_layer)
# base or cutting layer not available. this could happen
# because of a config problem, in which case you'd want
# it to be reported. but also can happen when the client
# selects a subset of layers which don't include either
# the base or the cutting layer. then it's not an error.
# the interesting case is when they select the base but
# not the cutting layer...
if base is None or cutting is None:
return None
base_features = base['features']
cutting_features = cutting['features']
# filter out any features that we don't want to cut with
if cutting_filter_fn is not None:
cutting_features = filter(cutting_filter_fn, cutting_features)
# short-cut return if there are no cutting features => there's nothing
# to do.
if not cutting_features:
return base
# make a cutter object to help out
cutter = _Cutter(cutting_features, cutting_attrs,
attribute, target_attribute,
keep_geom_type, intersect_func)
skipped_features = []
for base_feature in base_features:
if base_filter_fn is None or base_filter_fn(base_feature):
# we use shape to track the current remainder of the
# shape after subtracting bits which are inside cuts.
shape, props, fid = base_feature
cutter.cut(shape, props, fid)
else:
skipped_features.append(base_feature)
base['features'] = cutter.new_features + skipped_features
return base
class Where(object):
"""
A "where" clause for filtering features based on their properties.
This is commonly used in post-processing steps to configure which features
in the layer we want to operate on, allowing us to write simple Python
expressions in the YAML.
"""
def __init__(self, where):
self.fn = compile(where, 'queries.yaml', 'eval')
def __call__(self, feature):
shape, props, fid = feature
local = defaultdict(lambda: None)
local.update(props)
return eval(self.fn, {}, local)
# intercut takes features from a base layer and cuts each
# of them against a cutting layer, splitting any base
# feature which intersects into separate inside and outside
# parts.
#
# the parts of each base feature which are outside any
# cutting feature are left unchanged. the parts which are
# inside have their property with the key given by the
# 'target_attribute' parameter set to the same value as the
# property from the cutting feature with the key given by
# the 'attribute' parameter.
#
# the intended use of this is to project attributes from one
# layer to another so that they can be styled appropriately.
#
# - feature_layers: list of layers containing both the base
# and cutting layer.
# - base_layer: str name of the base layer.
# - cutting_layer: str name of the cutting layer.
# - attribute: optional str name of the property / attribute
# to take from the cutting layer.
# - target_attribute: optional str name of the property /
# attribute to assign on the base layer. defaults to the
# same as the 'attribute' parameter.
# - cutting_attrs: list of str, the priority of the values
# to be used in the cutting operation. this ensures that
# items | |
self.x_tree[self.act_node['ix']] # get X data of the node
self.y_node = self.y_tree[self.act_node['ix']] # get Y data of the node
self.p_val = self.act_node['state']['p']
self.n_reduce_sigma = self.act_node['state']['Cr']
self.n_fail = self.act_node['state']['Cf']
self.gamma = self.act_node['state']['gamma']
self.gSRS_pct = np.floor(10*self.p_val)/10. # pertentage of global SRS (= percentage of Type I candidates)
self.sigma = self._init_sigma*0.5**self.n_reduce_sigma
########### Build RBF surrogate model ##############
if verbose:
sys.stdout.write('Building RBF regression model '+'.'*self._verbose_dot_len)
t1 = default_timer()
self.rbf_mod, _, _, _ = RBF_reg(self.x_node, self.y_node, self._lambda_range,
normalize_data=self._normalize_data, wgt_expon=self.gamma,
n_fold=self._n_fold, kernel=self._rbf_kernel,
poly_deg=self._rbf_poly_deg, pool=self._pool_rbf)
t2 = default_timer()
self.t_build = t2-t1
if verbose:
sys.stdout.write(' Done (time took: %.2e sec).\n' % self.t_build)
########### Propose new points using SRS method ##############
if verbose:
sys.stdout.write('Proposing new points '+'.'*self._verbose_dot_len)
t1 = default_timer()
new_pt = self.SRS()
t2 = default_timer()
self.t_srs = t2-t1
if verbose:
sys.stdout.write(' Done (time took: %.2e sec).\n' % self.t_srs)
tt2 = default_timer()
self.t_prop = tt2-tt1
return new_pt
def SRS(self):
"""
Propose new points using SRS method.
Returns:
new_pt (2d array): Proposed points.
"""
# generate candidate points
if self.gSRS_pct == 1:
# generate candidate points uniformly (global SRS)
cand_pt = np.zeros((self._n_cand, self._dim))
for d, bd in enumerate(self.act_node['domain']):
cand_pt[:, d] = np.random.uniform(low=bd[0], high=bd[1], size=self._n_cand)
else:
n_cand_gSRS = int(np.round(self._n_cand*self.gSRS_pct)) # number of candidate points for global SRS
n_cand_lSRS = self._n_cand-n_cand_gSRS # number of candidate points for local SRS
assert(n_cand_lSRS > 0) # sanity check
# generate candidate points uniformly (global SRS)
cand_pt_gSRS = np.zeros((n_cand_gSRS, self._dim))
if n_cand_gSRS > 0:
for d, bd in enumerate(self.act_node['domain']):
cand_pt_gSRS[:, d] = np.random.uniform(low=bd[0], high=bd[1], size=n_cand_gSRS)
# find x_star
Y_fit = self.rbf_mod(self.x_node)
min_ix = np.argmin(Y_fit)
x_star = self.x_node[min_ix]
assert(np.all([bd[0] <= x_star[j] <= bd[1] for j,bd in enumerate(self.act_node['domain'])])) # sanity check
# find step size (i.e. std) for each coordinate of `x_star`
step_size_arr = np.array([self.sigma*(bd[1]-bd[0]) for bd in self.act_node['domain']])
assert(np.min(step_size_arr) > 0) # sanity check
# generate candidate points (Gaussian about x_star, local SRS)
cand_pt_lSRS = np.random.multivariate_normal(x_star, np.diag(step_size_arr**2), n_cand_lSRS)
# combine two types of candidate points
comb_cand_pt = np.vstack((cand_pt_gSRS, cand_pt_lSRS))
# put candidate points back to the domain, if there's any outside
uniq_cand_pt, raw_cand_pt = put_back_box(comb_cand_pt, self.act_node['domain'])
# get candidate points (``len(uniq_cand_pt) < n_worker`` is pathological case, almost never encountered in practice)
cand_pt = uniq_cand_pt if len(uniq_cand_pt) >= self._n_worker else raw_cand_pt
# select new points from candidate points
n_cand = len(cand_pt)
assert(n_cand >= self._n_worker)
resp_cand = self.rbf_mod(cand_pt)
resp_score = scale_zero_one(resp_cand) # response score
# initializations
new_pt = np.zeros((self._n_worker, self._dim))
refer_pt = self.x_node.copy() # reference points based on which we compute distance scores
# select points sequentially
for j in range(self._n_worker):
wt = self.srs_wgt_pat[j]
if len(refer_pt) > 0:
if j == 0:
# distance matrix for `refer_pt` and `cand_pt`
dist_mat = cdist(cand_pt, refer_pt)
dist_cand = np.amin(dist_mat, axis=1)
else:
# distance to the previously proposed point
dist_prop_pt = cdist(cand_pt, new_pt[j-1].reshape((1, -1))).flatten()
dist_cand = np.minimum(dist_cand, dist_prop_pt)
dist_score = scale_one_zero(dist_cand) # distance score
else:
# pathological case
dist_score = np.zeros(n_cand) # distance score
cand_score = resp_score*wt+(1-wt)*dist_score # candidate score
assert (np.amax(cand_score)<=1 and np.amin(cand_score)>=0) # sanity check
# select the best one based on the score
min_ix = np.argmin(cand_score)
new_pt[j] = cand_pt[min_ix]
# update variables
refer_pt = np.vstack((refer_pt, new_pt[j].reshape((1, -1))))
dist_cand = np.delete(dist_cand, min_ix)
resp_score = np.delete(resp_score, min_ix)
cand_pt = np.delete(cand_pt, min_ix, axis=0)
n_cand -= 1
return new_pt
def eval_pt(self, x, verbose=True):
"""
Evaluate proposed points.
Args:
x (2d array): Points to be evaluated. Each row is one point.
verbose (bool, optional): Whether to verbose about the evaluation.
Returns:
y (1d array): Evaluations of points in `x`.
"""
if verbose:
sys.stdout.write('Evaluating proposed points '+'.'*self._verbose_dot_len)
t1 = default_timer()
assert(callable(self._prob.f)), 'Error! Unable to perform evaluations. Please first define the (noisy) optimization function ``f`` in the ``Problem`` object.'
y = eval_func(self._prob.f, x, n_proc=self._n_worker, seeds=self.eval_seeds.tolist(),
seed_func=self._seed_func)
t2 = default_timer()
self.t_eval = t2-t1
if verbose:
sys.stdout.write(' Done (time took: %.2e sec).\n' % self.t_eval)
return y
def update(self, new_x, new_y, verbose=True):
"""
Update the state of the optimizer.
Args:
new_x (2d array): Proposed new points. Each row is one point.
new_y (1d array): (Noisy) values of the points in `new_x`.
verbose (bool, optional): Whether to verbose about updating the state of the optimizer.
"""
if verbose:
sys.stdout.write('Updating optimizer state '+'.'*self._verbose_dot_len)
specific_msg = '' # specific message indicating the action
t1 = default_timer()
self.i_iter += 1
self.eval_seeds = self._seed+1+np.arange(self.i_iter*self._n_worker, (self.i_iter+1)*self._n_worker, dtype=int)
self.x_tree = np.vstack((self.x_tree, new_x))
self.y_tree = np.append(self.y_tree, new_y)
self.x_all = np.vstack((self.x_all, new_x))
self.y_all = np.append(self.y_all, new_y)
self.seed_all = np.append(self.seed_all, self.eval_seeds)
min_ix = np.argmin(self.y_all)
self.best_x = self.x_all[min_ix]
self.best_y = self.y_all[min_ix]
self.t_build_arr = np.append(self.t_build_arr, self.t_build)
self.t_srs_arr = np.append(self.t_srs_arr, self.t_srs)
self.t_prop_arr = np.append(self.t_prop_arr, self.t_prop)
try:
self.t_eval_arr = np.append(self.t_eval_arr, self.t_eval)
except:
# i.e., self.t_eval is not defined. This could happen when one uses customized evaluation function.
self.t_eval_arr = np.append(self.t_eval_arr, np.nan)
self.gSRS_pct_arr = np.append(self.gSRS_pct_arr, self.gSRS_pct)
self.zoom_lv_arr = np.append(self.zoom_lv_arr, self.zoom_lv)
if self.i_iter_doe < self._n_iter_doe: # i.e., current iteration is in DOE phase
self.i_iter_doe += 1
else:
# update weight pattern in SRS method
if self._n_worker == 1:
self.srs_wgt_pat = np.array([self._wgt_pat_bd[0]]) if self.srs_wgt_pat[0] == self._wgt_pat_bd[1] \
else np.array([self._wgt_pat_bd[1]]) # alternating weights
# update tree node
npt = len(self.x_tree)
self.act_node['ix'] = np.append(self.act_node['ix'], np.arange(npt-self._n_worker, npt, dtype=int))
if self._n_worker > 1 or (self._n_worker == 1 and self.srs_wgt_pat[0] == self._wgt_pat_bd[0]):
if self.p_val >= 0.1:
# compute p_val
if self._use_eff_npt:
eff_n = eff_npt(self.x_tree[self.act_node['ix']], self.act_node['domain'])
else:
eff_n = len(self.x_tree[self.act_node['ix']])
self.p_val = self.p_val*eff_n**(-self._alpha/float(self._dim))
if self.gSRS_pct == 0: # i.e. pure local SRS
best_Y_prev = np.min(self.y_node)
best_Y_new = np.min(new_y) # minimum of Y values of newly proposed points
if best_Y_prev <= best_Y_new: # failure
self.n_fail += 1 # count failure
else:
self.n_fail = 0
if self.n_fail == self._max_C_fail:
self.n_fail = 0
self.gamma -= self._delta_gamma
self.n_reduce_sigma += 1 # update counter
self.act_node['state']['p'] = self.p_val
self.act_node['state']['Cr'] = self.n_reduce_sigma
self.act_node['state']['Cf'] = self.n_fail
self.act_node['state']['gamma'] = self.gamma
if self.n_reduce_sigma > self._max_n_reduce_sigma:
# then we either restart or zoom-in (i.e., critical state is reached)
Y_fit = self.rbf_mod(self.x_tree[self.act_node['ix']])
min_ix = np.argmin(Y_fit)
x_star = self.x_tree[self.act_node['ix']][min_ix]
# suppose we're going to zoom in
child_node_ix = self.get_child_node(x_star)
if child_node_ix is None:
# then we create a new child (if zoom in)
domain_lb, domain_ub = zip(*self.act_node['domain'])
blen = np.array(domain_ub)-np.array(domain_lb) # bound length for each dimension
assert(np.min(blen)>0)
domain_lb = np.maximum(x_star-self._rho/2.*blen, domain_lb)
domain_ub = np.minimum(x_star+self._rho/2.*blen, domain_ub)
domain = list(zip(domain_lb, domain_ub)) # the list function is used to ensure compatibility of python3
child_node = {'ix': np.nonzero(boxify(self.x_tree, domain)[0])[0],
'domain': domain,
'parent_ix': self.act_node_ix,
'beta': self._init_beta,
'state': self.init_node_state()}
else:
# then we activate an existing child node (if zoom in)
child_node = self.tree[self.zoom_lv+1][child_node_ix]
child_node['ix'] = np.nonzero(boxify(self.x_tree, child_node['domain'])[0])[0]
child_npt = len(child_node['ix'])
domain_lb, domain_ub = zip(*child_node['domain'])
blen = np.array(domain_ub)-np.array(domain_lb) # bound length for each dimension
assert(np.min(blen)>0)
if np.all(blen*child_npt**(-1./self._dim) < (self._prob.domain_ub-self._prob.domain_lb)*self._resol): # resolution condition
# then we restart
if verbose:
specific_msg += 'Restart for the next iteration!\n'
self.i_iter_doe = 0
self.doe_samp = self.doe()
self.i_cycle += 1
self.zoom_lv = 0
self.act_node_ix = 0
self.x_tree = np.zeros((0, self._dim))
self.y_tree = np.zeros(0)
self.tree = self.init_tree()
else:
# then we zoom in
self.act_node['state'] = self.init_node_state() # reset the state of the current node
self.zoom_lv += 1
if child_node_ix is None:
# then we create a new child
if self.zoom_lv not in self.tree.keys():
self.act_node_ix = 0
self.tree[self.zoom_lv] = [child_node]
else:
self.act_node_ix = len(self.tree[self.zoom_lv])
self.tree[self.zoom_lv].append(child_node)
if verbose:
specific_msg += 'Zoom in (created a new child node)!\n'
else:
# then activate existing child node
self.act_node_ix = child_node_ix
# reduce zoom-out probability
child_node['beta'] = max(self._min_beta, child_node['beta']/2.)
if verbose:
specific_msg += 'Zoom in (activated an existing child node)!\n'
if self._n_worker > 1 or (self._n_worker == 1 and self.srs_wgt_pat[0] == self._wgt_pat_bd[0]):
if np.random.uniform() < self.tree[self.zoom_lv][self.act_node_ix]['beta'] and self.zoom_lv > 0 and self.i_iter_doe >= self._n_iter_doe:
# then we zoom out
child_node = self.tree[self.zoom_lv][self.act_node_ix]
self.act_node_ix = child_node['parent_ix']
self.zoom_lv -= 1
assert(self.act_node_ix is not None)
# | |
self.translation.write(f)
self.rotation.write(f)
self.scale.write(f)
vec3D.write(f, self.pivot)
return self
def build_relations(self, bones):
if self.parent_bone >= 0:
parent = bones[self.parent_bone]
self.parent = parent
parent.children.append(self)
def load_bone_name(self, bone_type_dict):
self.name = M2KeyBones.get_bone_name(self.key_bone_id, self.index)
b_type = bone_type_dict.get(self.index)
if b_type and self.key_bone_id < 0:
prefix, i, item = b_type
if prefix in ('AT', 'ET'):
self.name = "{}_{}_{}".format(prefix, item, i)
elif prefix in ('LT', 'RB', 'PT'):
self.name = "{}_{}".format(prefix, i)
def get_depth(self):
if not self.children:
return 0
return sum(map(lambda x: x.get_depth(), self.children)) + len(self.children)
@staticmethod
def size():
return 86 if M2VersionsManager().m2_version >= M2Versions.WOTLK else 110
#############################################################
###### Geometry and rendering ######
#############################################################
###### Vertices ######
class M2Vertex:
__slots__ = ('pos', 'bone_weights', 'bone_indices',
'normal', 'tex_coords', 'tex_coords2')
def __init__(self):
self.pos = (0.0, 0.0, 0.0)
self.bone_weights = (0, 0, 0, 0)
self.bone_indices = (0, 0, 0, 0)
self.normal = (0.0, 0.0, 0.0)
self.tex_coords = (0.0, 0.0)
self.tex_coords2 = (0.0, 0.0)
def read(self, f):
self.pos = vec3D.read(f)
self.bone_weights = uint8.read(f, 4)
self.bone_indices = uint8.read(f, 4)
self.normal = vec3D.read(f)
self.tex_coords = vec2D.read(f)
self.tex_coords2 = vec2D.read(f)
return self
def write(self, f):
vec3D.write(f, self.pos)
uint8.write(f, self.bone_weights, 4)
uint8.write(f, self.bone_indices, 4)
vec3D.write(f, self.normal)
vec2D.write(f, self.tex_coords)
vec2D.write(f, self.tex_coords2)
return self
###### Render flags ######
class M2Material:
def __init__(self):
self.flags = 0
self.blending_mode = 0 # apparently a bitfield
def read(self, f):
self.flags = uint16.read(f)
self.blending_mode = uint16.read(f)
return self
def write(self, f):
uint16.write(f, self.flags)
uint16.write(f, self.blending_mode)
return self
###### Colors and transparency ######
class M2Color:
def __init__(self):
self.color = M2Track(vec3D, M2Color)
self.alpha = M2Track(fixed16, M2Color)
def read(self, f):
self.color.read(f)
self.alpha.read(f)
return self
def write(self, f):
self.color.write(f)
self.alpha.write(f)
return self
@staticmethod
def size():
return 40 if M2VersionsManager().m2_version >= M2Versions.WOTLK else 56
class M2Texture:
def __init__(self):
self.type = 0
self.flags = 0
self.filename = M2String()
# BFA+ (internal use only)
self.fdid = 0
def read(self, f):
self.type = uint32.read(f)
self.flags = uint32.read(f)
self.filename.read(f)
return self
def write(self, f):
uint32.write(f, self.type)
uint32.write(f, self.flags)
self.filename.write(f)
return self
@staticmethod
def size():
return 16
#############################################################
###### Effects ######
#############################################################
class M2TextureTransform:
def __init__(self):
self.translation = M2Track(vec3D, M2TextureTransform)
self.rotation = M2Track(quat, M2TextureTransform) # rotation center is texture center (0.5, 0.5, 0.5)
self.scaling = M2Track(vec3D, M2TextureTransform)
def read(self, f):
self.translation.read(f)
self.rotation.read(f)
self.scaling.read(f)
return self
def write(self, f):
self.translation.write(f)
self.rotation.write(f)
self.scaling.write(f)
return self
@staticmethod
def size():
return 60 if M2VersionsManager().m2_version >= M2Versions.WOTLK else 84
class M2Ribbon:
def __init__(self):
self.m2_version = M2VersionsManager().m2_version
self.ribbon_id = -1 # Always (as I have seen): -1.
self.bone_index = 0 # A bone to attach to.
self.position = (0.0, 0.0, 0.0) # And a position, relative to that bone.
self.texture_indices = M2Array(uint16) # into textures
self.material_indices = M2Array(uint16) # into materials
self.color_track = M2Track(vec3D, M2Ribbon)
self.alpha_track = M2Track(fixed16, M2Ribbon) # And an alpha value in a short, where: 0 - transparent, 0x7FFF - opaque.
self.height_above_track = M2Track(float32, M2Ribbon)
self.height_below_track = M2Track(float32, M2Ribbon) # do not set to same!
self.edges_per_second = 0.0 # this defines how smooth the ribbon is. A low value may produce a lot of edges.
self.edge_lifetime = 0.0 # the length aka Lifespan. in seconds
self.gravity = 0.0 # use arcsin(val) to get the emission angle in degree
self.texture_rows = 0 # tiles in texture
self.texture_cols = 0
self.tex_slot_track = M2Track(uint16, M2Ribbon)
self.visibility_track = M2Track(uint8, M2Ribbon)
if self.m2_version >= M2Versions.WOTLK: # TODO: verify version
self.priority_plane = 0
self.padding = 0
def read(self, f):
self.ribbon_id = int32.read(f)
self.bone_index = uint32.read(f)
self.position = vec3D.read(f)
self.texture_indices.read(f)
self.material_indices.read(f)
self.color_track.read(f)
self.alpha_track.read(f)
self.height_above_track.read(f)
self.height_below_track.read(f)
self.edges_per_second = float32.read(f)
self.edge_lifetime = float32.read(f)
self.gravity = float32.read(f)
self.texture_rows = uint16.read(f)
self.texture_cols = uint16.read(f)
self.tex_slot_track.read(f)
self.visibility_track.read(f)
if self.m2_version >= M2Versions.WOTLK:
self.priority_plane = int16.read(f)
self.padding = uint16.read(f)
return self
def write(self, f):
int32.write(f, self.ribbon_id)
uint32.write(f, self.bone_index)
vec3D.write(f, self.position)
self.texture_indices.write(f)
self.material_indices.write(f)
self.color_track.write(f)
self.alpha_track.write(f)
self.height_above_track.write(f)
self.height_below_track.write(f)
float32.write(f, self.edges_per_second)
float32.write(f, self.edge_lifetime)
float32.write(f, self.gravity)
uint16.write(f, self.texture_rows)
uint16.write(f, self.texture_cols)
self.tex_slot_track.write(f)
self.visibility_track.write(f)
if self.m2_version >= M2Versions.WOTLK:
int16.write(f, self.priority_plane)
uint16.write(f, self.padding)
return self
@staticmethod
def size():
return 176 if M2VersionsManager().m2_version >= M2Versions.WOTLK else 220
class M2Particle:
def __init__(self):
self.m2_version = M2VersionsManager().m2_version
self.particle_id = 0 # Always (as I have seen): -1.
self.flags = 0 # See Below
self.position = (0.0, 0.0, 0.0) # The position. Relative to the following bone.
self.bone = 0 # The bone its attached to.
self.texture = 0 # And the textures that are used. For multi-textured particles actually three ids
self.geometry_model_filename = M2Array(int8) # if given, this emitter spawns models
self.recursion_model_filename = M2Array(int8) # if given, this emitter is an alias for the (maximum 4) emitters of the given model
if self.m2_version >= M2Versions.TBC:
self.blending_type = 0 # A blending type for the particle. See Below
self.emitter_type = 0 # 1 - Plane (rectangle), 2 - Sphere, 3 - Spline, 4 - Bone
self.particle_color_index = 0 # This one is used for ParticleColor.dbc. See below.
else:
self.blending_type = 0 # A blending type for the particle. See Below
self.emitter_type = 0 # 1 - Plane (rectangle), 2 - Sphere, 3 - Spline, 4 - Bone
if self.m2_version >= M2Versions.CATA:
self.multi_tex_param_x = fixed_point(uint8, 2, 5)
self.multi_tex_param_y = fixed_point(uint8, 2, 5)
else:
self.particle_type = 0 # Found below.
self.head_or_tail = 0 # 0 - Head, 1 - Tail, 2 - Both
self.texture_tile_rotation = 0 # Rotation for the texture tile. (Values: -1,0,1) -- priorityPlane
self.texture_dimensions_rows = 0 # for tiled textures
self.texture_dimension_columns = 0
self.emission_speed = M2Track(float32, M2Particle) # Base velocity at which particles are emitted.
self.speed_variation = M2Track(float32, M2Particle) # Random variation in particle emission speed. (range: 0 to 1)
self.vertical_range = M2Track(float32, M2Particle) # Drifting away vertically. (range: 0 to pi) For plane generators, this is the maximum polar angle of the initial velocity;
self.horizontal_range = M2Track(float32, M2Particle) # They can do it horizontally too! (range: 0 to 2*pi) For plane generators, this is the maximum azimuth angle of the initial velocity;
# 0 makes the velocity have no sideways (y-axis) component. For sphere generators, this is the maximum azimuth angle of the initial position.
self.gravity = M2Track(float32, M2Particle) # Not necessarily a float; see below.
self.lifespan = M2Track(float32, M2Particle) # 0 makes the velocity have no sideways (y-axis) component. For sphere generators, this is the maximum azimuth angle of the initial position.
if self.m2_version >= M2Versions.WOTLK:
self.life_span_vary = 0.0 # An individual particle's lifespan is added to by lifespanVary * random(-1, 1)
self.emission_rate_vary = 0.0 # This adds to the base emissionRate value the same way as lifespanVary. The random value is different every update.
self.emission_rate = M2Track(float32, M2Particle)
self.emission_area_length = M2Track(float32, M2Particle) # For plane generators, this is the width of the plane in the x-axis. For sphere generators, this is the minimum radius.
self.emission_area_width = M2Track(float32, M2Particle) # For plane generators, this is the width of the plane in the y-axis. For sphere generators, this is the maximum radius.
self.z_source = M2Track(float32, M2Particle) # When greater than 0, the initial velocity of the particle is (particle.position - C3Vector(0, 0, zSource)).Normalize()
if self.m2_version >= M2Versions.WOTLK:
self.color_track = FBlock(vec3D) # Most likely they all have 3 timestamps for {start, middle, end}.
self.alpha_track = FBlock(fixed16)
self.scale_track = FBlock(vec2D)
self.scale_vary = (0.0, 0.0) # A percentage amount to randomly vary the scale of each particle
self.head_cell_track = FBlock(uint16) # Some kind of intensity values seen: 0,16,17,32 (if set to different it will have high intensity)
self.tail_cell_track = FBlock(uint16)
else:
self.mid_point = 0.0 # Middle point in lifespan (0 to 1).
self.color_values = Array((Array << uint8, 4), 3)
self.scale_values = Array(float32, 4)
self.head_cell_begin = Array(uint16, 2)
self.head_cell_end = Array(uint16, 2)
self.tiles = Array(int16, 4) # Indices into the tiles on the texture? Or tailCell maybe?
self.tail_length = 0.0 # TailCellTime?
self.twinkle_speed = 0.0 # has something to do with the spread
self.twinkle_percent = 0.0 # has something to do with the spread
self.twinkle_scale = CRange()
self.burst_multiplier = 0.0 # ivelScale
self.drag = 0.0 # For a non-zero values, instead of travelling linearly the particles seem to slow down sooner. Speed is multiplied by exp( -drag * t ).
if self.m2_version >= M2Versions.WOTLK:
self.basespin = 0.0 # Initial rotation of | |
"""
Copyright 2019 Google LLC
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
Parse the regression testlist in YAML format
"""
import os
import random
import sys
import re
import subprocess
import time
import yaml
import logging
from datetime import date
RET_SUCCESS = 0
RET_FAIL = 1
RET_FATAL = -1
def setup_logging(verbose):
"""Setup the root logger.
Args:
verbose: Verbose logging
"""
if verbose:
logging.basicConfig(format="%(asctime)s %(filename)s:%(lineno)-5s %(levelname)-8s %(message)s",
datefmt='%a, %d %b %Y %H:%M:%S',
level=logging.DEBUG)
else:
logging.basicConfig(format="%(asctime)s %(levelname)-8s %(message)s",
datefmt='%a, %d %b %Y %H:%M:%S',
level=logging.INFO)
def read_yaml(yaml_file):
""" Read YAML file to a dictionary
Args:
yaml_file : YAML file
Returns:
yaml_data : data read from YAML in dictionary format
"""
with open(yaml_file, "r") as f:
try:
yaml_data = yaml.safe_load(f)
except yaml.YAMLError as exc:
logging.error(exc)
sys.exit(RET_FAIL)
return yaml_data
def get_env_var(var, debug_cmd = None):
"""Get the value of environment variable
Args:
var : Name of the environment variable
Returns:
val : Value of the environment variable
"""
try:
val = os.environ[var]
except KeyError:
if debug_cmd:
return var
else:
logging.warning("Please set the environment variable %0s" % var)
sys.exit(RET_FAIL)
return val
def get_seed(seed):
"""Get the seed to run the generator
Args:
seed : input seed
Returns:
seed to run instruction generator
"""
if seed >= 0:
return seed
return random.getrandbits(32)
def run_cmd(cmd, timeout_s = 999, exit_on_error = 1, check_return_code = True, debug_cmd = None):
"""Run a command and return output
Args:
cmd : shell command to run
Returns:
command output
"""
logging.debug(cmd)
if debug_cmd:
debug_cmd.write(cmd)
debug_cmd.write("\n\n")
return
try:
ps = subprocess.Popen("exec " + cmd,
shell=True,
executable='/bin/bash',
universal_newlines=True,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT)
except subprocess.CalledProcessError:
logging.error(ps.communicate()[0])
sys.exit(RET_FAIL)
except KeyboardInterrupt:
logging.info("\nExited Ctrl-C from user request.")
sys.exit(130)
try:
output = ps.communicate(timeout = timeout_s)[0]
except subprocess.TimeoutExpired:
logging.error("Timeout[%ds]: %s" % (timeout_s, cmd))
output = ""
ps.kill()
rc = ps.returncode
if rc and check_return_code and rc > 0:
logging.info(output)
logging.error("ERROR return code: %d/%d, cmd:%s" % (check_return_code, rc, cmd))
if exit_on_error:
sys.exit(RET_FAIL)
logging.debug(output)
return output
def run_parallel_cmd(cmd_list, timeout_s = 999, exit_on_error = 0,
check_return_code = True, debug_cmd = None):
"""Run a list of commands in parallel
Args:
cmd_list: command list
Returns:
command output
"""
if debug_cmd:
for cmd in cmd_list:
debug_cmd.write(cmd)
debug_cmd.write("\n\n")
return
children = []
for cmd in cmd_list:
ps = subprocess.Popen("exec " + cmd,
shell=True,
executable='/bin/bash',
universal_newlines=True,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT)
children.append(ps)
for i in range(len(children)):
logging.info("Command progress: %d/%d" % (i, len(children)))
logging.debug("Waiting for command: %s" % cmd_list[i])
try:
output = children[i].communicate(timeout = timeout_s)[0]
except KeyboardInterrupt:
logging.info("\nExited Ctrl-C from user request.")
sys.exit(130)
except subprocess.TimeoutExpired:
logging.error("Timeout[%ds]: %s" % (timeout_s, cmd))
children[i].kill()
rc = children[i].returncode
if rc and check_return_code and rc > 0:
logging.info(output)
logging.error("ERROR return code: %d, cmd:%s" % (rc, cmd))
if exit_on_error:
sys.exit(RET_FAIL)
# Restore stty setting otherwise the terminal may go crazy
os.system("stty sane")
logging.debug(output)
def run_cmd_output(cmd, debug_cmd = None):
"""Run a command and return output
Args:
cmd : Command line to execute
"""
logging.debug(" ".join(cmd))
if debug_cmd:
debug_cmd.write(" ".join(cmd))
debug_cmd.write("\n\n")
return
try:
output = subprocess.check_output(cmd)
except subprocess.CalledProcessError as exc:
logging.debug(exc.output)
raise exc
sys.exit(RET_FAIL)
if output:
logging.debug(output)
def process_regression_list(testlist, test, iterations, matched_list, riscv_dv_root):
""" Get the matched tests from the regression test list
Args:
testlist : Regression test list
test : Test to run, "all" means all tests in the list
iterations : Number of iterations for each test
riscv_dv_root : Root directory of RISCV-DV
Returns:
matched_list : A list of matched tests
"""
logging.info("Processing regression test list : %s, test: %s" % (testlist, test))
yaml_data = read_yaml(testlist)
mult_test = test.split(',')
for entry in yaml_data:
if 'import' in entry:
sub_list = re.sub('<riscv_dv_root>', riscv_dv_root, entry['import'])
process_regression_list(sub_list, test, iterations, matched_list, riscv_dv_root)
else:
if (entry['test'] in mult_test) or (test == "all"):
if (iterations > 0 and entry['iterations'] > 0):
entry['iterations'] = iterations
if entry['iterations'] > 0:
logging.info("Found matched tests: %s, iterations:%0d" %
(entry['test'], entry['iterations']))
matched_list.append(entry)
def create_output(output, noclean, prefix = "out_"):
""" Create output directory
Args:
output : Name of specified output directory
noclean: Do not clean the output of the previous runs
Returns:
Output directory
"""
# Create output directory
if output is None:
output = prefix + str(date.today())
if noclean is False:
os.system("rm -rf %s" % output)
logging.info("Creating output directory: %s" % output)
subprocess.run(["mkdir", "-p", output])
return output
def gpr_to_abi(gpr):
"""Convert a general purpose register to its corresponding abi name"""
switcher = {
"x0" : "zero",
"x1" : "ra",
"x2" : "sp",
"x3" : "gp",
"x4" : "tp",
"x5" : "t0",
"x6" : "t1",
"x7" : "t2",
"x8" : "s0",
"x9" : "s1",
"x10" : "a0",
"x11" : "a1",
"x12" : "a2",
"x13" : "a3",
"x14" : "a4",
"x15" : "a5",
"x16" : "a6",
"x17" : "a7",
"x18" : "s2",
"x19" : "s3",
"x20" : "s4",
"x21" : "s5",
"x22" : "s6",
"x23" : "s7",
"x24" : "s8",
"x25" : "s9",
"x26" : "s10",
"x27" : "s11",
"x28" : "t3",
"x29" : "t4",
"x30" : "t5",
"x31" : "t6",
"f0" : "ft0",
"f1" : "ft1",
"f2" : "ft2",
"f3" : "ft3",
"f4" : "ft4",
"f5" : "ft5",
"f6" : "ft6",
"f7" : "ft7",
"f8" : "fs0",
"f9" : "fs1",
"f10" : "fa0",
"f11" : "fa1",
"f12" : "fa2",
"f13" : "fa3",
"f14" : "fa4",
"f15" : "fa5",
"f16" : "fa6",
"f17" : "fa7",
"f18" : "fs2",
"f19" : "fs3",
"f20" : "fs4",
"f21" : "fs5",
"f22" : "fs6",
"f23" : "fs7",
"f24" : "fs8",
"f25" : "fs9",
"f26" : "fs10",
"f27" : "fs11",
"f28" : "ft8",
"f29" : "ft9",
"f30" : "ft10",
"f31" : "ft11",
}
return switcher.get(gpr, "na")
def sint_to_hex(val):
"""Signed integer to hex conversion"""
return str(hex((val + (1 << 32)) % (1 << 32)))
BASE_RE = re.compile(r"(?P<rd>[a-z0-9]+?),(?P<imm>[\-0-9]*?)\((?P<rs1>[a-z0-9]+?)\)")
def convert_pseudo_instr(instr_name, operands, binary):
"""Convert pseudo instruction to regular instruction"""
if instr_name == "nop":
instr_name = "addi"
operands = "zero,zero,0"
elif instr_name == "mv":
instr_name = "addi"
operands = operands + ",0"
elif instr_name == "not":
instr_name = "xori"
operands = operands + ",-1"
elif instr_name == "neg":
instr_name = "sub"
o = operands.split(",")
operands = o[0] + ",zero," + o[1]
elif instr_name == "negw":
instr_name = "subw"
o = operands.split(",")
operands = o[0] + ",zero," + o[1]
elif instr_name == "sext.w":
instr_name = "addiw"
operands = operands + ",0"
elif instr_name == "seqz":
instr_name = "sltiu"
operands = operands + ",1"
elif instr_name == "snez":
instr_name = "sltu"
o = operands.split(",")
operands = o[0] + ",zero," + o[1]
elif instr_name == "sltz":
instr_name = "slt"
operands = operands + ",zero"
elif instr_name == "sgtz":
instr_name = "slt"
o = operands.split(",")
operands = o[0] + ",zero," + o[1]
elif instr_name in ["beqz", "bnez", "bgez", "bltz"]:
instr_name = instr_name[0:3]
o = operands.split(",")
operands = o[0] + ",zero," + o[1]
elif instr_name == "blez":
instr_name = "bge";
operands = "zero," + operands
elif instr_name == "bgtz":
instr_name = "blt";
operands = "zero," + operands
elif instr_name == "bgt":
instr_name = "blt";
o = operands.split(",")
operands = o[1] + "," + o[0] + "," + o[2]
elif instr_name == "ble":
instr_name = "bge";
o = operands.split(",")
operands = o[1] + "," + o[0] + "," + o[2]
elif instr_name == "bgtu":
instr_name = "bltu";
o = operands.split(",")
operands = o[1] + "," + o[0] + "," + o[2]
elif instr_name == "bleu":
instr_name = "bgeu";
o = operands.split(",")
operands = o[1] + "," + o[0] + "," + o[2]
elif instr_name == "csrr":
instr_name = "csrrw"
operands = operands + ",zero"
elif instr_name in ["csrw", "csrs", "csrc"]:
instr_name = "csrr" + instr_name[3:]
operands = "zero," + operands
elif instr_name in ["csrwi", "csrsi", "csrci"]:
instr_name = "csrr" + instr_name[3:]
operands = "zero," + operands
elif instr_name == "jr":
instr_name = "jalr"
operands = "zero,%s,0" % operands
elif instr_name == "j":
instr_name = "jal"
operands = "zero,%s" % | |
import time
from datetime import datetime
import os
import logging
import platform
import csv
import statistics
from polyglotdb import CorpusContext
from polyglotdb.config import CorpusConfig
from polyglotdb.io import (inspect_buckeye, inspect_textgrid, inspect_timit,
inspect_labbcat, inspect_mfa, inspect_fave,
guess_textgrid_format)
graph_db = {'graph_host':'localhost', 'graph_port': 7474,
'user': 'neo4j', 'password': '<PASSWORD>'}
amountofcorpus = 'full'
#amountofcorpus = 'partial'
globalphonebenchmark = 'globalphone_vn'
#globalphonebenchmark = 'timitbenchmark'
#globalphonesyllabic = ['a', 'aa', 'aw', 'e', 'ee', 'ew', 'i', 'ii', 'o', 'oo', 'ow', 'u', 'uu']#cz
#globalphonesyllabic = ['ab', 'e', 'i', 'i2', 'o', 'oe', 'u', 'ue']#tu
#globalphonesyllabic = ['a', 'e', 'i', 'o', 'u']#sa
#globalphonesyllabic = ['i', 'y', 'u', 'e', 'EU', 'o', 'E', 'OE', 'AX', 'O', 'a', 'AE', 'A~', 'E~', 'o~', 'OE~']#fr
#globalphonesyllabic = ['a', 'ae', 'ale', 'e', 'etu', 'i', 'o', 'oc', 'oe', 'ole', 'ox', 'u', 'ue', 'abl', 'ael', 'al',
# 'alel', 'el', 'il', 'oel', 'ole', 'olel', 'uel', 'ul', 'uxl']#sw
#globalphonesyllabic = ['a', 'ae', 'atu', 'e', 'etu', 'i', 'o', 'oe', 'u', 'ue', 'aI', 'aU', 'eU', 'al', 'el', 'il',
# 'oel', 'ol', 'uel', 'ul']#ge
#globalphonesyllabic = ['a', 'y', 'e', 'i', 'o', 'u', 'ja', 'ju']#bg
#globalphonesyllabic = ['a', 'e', 'i', 'o', 'u', 'a_L', 'e_L', 'i_L', 'o_L', 'u_L', 'a_T1', 'e_T1', 'i_T1', 'o_T1', 'u_T1',
# 'a_T2', 'e_T2', 'i_T2', 'o_T2', 'u_T2', 'a_T3', 'e_T3', 'i_T3', 'o_T3', 'u_T3', 'aI', 'aU']#ha
#globalphonesyllabic = ['a', 'e', 'i', 'i2', 'o', 'u', 'jA', 'jE', 'jO', 'jU']#ru
#globalphonesyllabic = ['a', 'e', 'eo5', 'i', 'i2', 'o', 'oc5', 'u']#pl
#globalphonesyllabic = ['A', 'AX', 'A~', 'E', 'E~', 'I', 'IX', 'I~', 'O', 'O~', 'U', 'UX', 'U~', 'A+', 'A~+',
# 'E+', 'E~+', 'O+', 'O~+', 'U+', 'U~+', 'I+', 'I~+']#po
#globalphonesyllabic = ['a', 'e', 'i', 'o', 'u', 'y']#ua
#globalphonesyllabic = ['A', 'EO', 'O', 'U', 'I', 'EU', 'AE', 'E', 'OE', 'UE', 'iA', 'iEO', 'iO', 'iU', 'iE', 'oA', 'uEO', 'eul']#ko
globalphonesyllabic = ['a1', 'a2', 'a3', 'e1', 'e2', 'i', 'o1', 'o2', 'o3', 'u1', 'u2', 'ai', 'ao', 'au', 'au3',
'ay', 'ay3', 'eo', 'eu', 'ie2', 'iu', 'oa', 'oe', 'oi', 'oi2', 'oi3', 'ua', 'ua2', 'ui', 'ui2', 'uu2', 'uy', 'ieu',
'uoi2', 'uoi3', 'uou']#vn
#globalphonesyllabic = ['a1', 'a2', 'a3', 'a4', 'a5', 'ai1', 'ai2', 'ai3', 'ai4', 'ai5', 'ao1', 'ao2', 'ao3', 'ao4', 'ao5',
# 'e1', 'e2', 'e3', 'e4', 'e5', 'ei1', 'ei2', 'ei3', 'ei4', 'ei5', 'i1', 'i2', 'i3', 'i4', 'i5',
# 'ia1', 'ia2', 'ia3', 'ia4', 'ia5', 'ie1', 'ie2', 'ie3', 'ie4', 'ie5', 'ii1', 'ii2', 'ii3', 'ii4', 'ii5',
# 'io1', 'io2', 'io3', 'io4', 'io5', 'iu1', 'iu2', 'iu3', 'iu4', 'iu5', 'o1', 'o2', 'o3', 'o4', 'o5',
# 'ou1', 'ou2', 'ou3', 'ou4', 'ou5', 'u1', 'u2', 'u3', 'u4', 'u5', 'ua1', 'ua2', 'ua3', 'ua4', 'ua5',
# 'ue1', 'ue2', 'ue3', 'ue4', 'ue5', 'uo1', 'uo2', 'uo3', 'uo4', 'uo5',
# 'v1', 'v2', 'v3', 'v4', 'v5', 'va1', 'va2', 'va3', 'va4', 've1', 've2', 've3', 've4',
# 'iao1', 'iao2', 'iao3', 'iao4', 'iao5', 'iou1', 'iou2', 'iou3', 'iou4', 'uai1', 'uai2', 'uai3', 'uai4', 'uai5',
# 'uei1', 'uei2', 'uei3', 'uei4', 'uei5',]#ch
#globalphonesyllabic = ['AA0', 'AE0', 'AH0', 'AO0', 'AW0', 'AY0', 'EH0', 'ER0', 'EY0', 'IH0', 'IY0', 'OW0', 'OY0', 'UH0', 'UW0',
# 'AA1', 'AE1', 'AH1', 'AO1', 'AW1', 'AY1', 'EH1', 'ER1', 'EY1', 'IH1', 'IY1', 'OW1', 'OY1', 'UH1', 'UW1',
# 'AA2', 'AE2', 'AH2', 'AO2', 'AW2', 'AY2', 'EH2', 'ER2', 'EY2', 'IH2', 'IY2', 'OW2', 'OY2', 'UH2', 'UW2']#librispeech
#globalphonesyllabic = ['aa', 'ae', 'ah', 'ao', 'aw', 'ax', 'ax-h', 'axr', 'ay',
# 'eh', 'el', 'em', 'en', 'eng', 'er', 'ey', 'ih', 'ix', 'iy', 'ow',' oy', 'uh', 'uw', 'ux']#timit
globalphone = os.path.expanduser('/media/share/corpora/GP_aligned/VN')
#globalphone = os.path.expanduser('/media/share/datasets/sct_benchmarks/LibriSpeech')
#globalphone = os.path.expanduser('/media/share/datasets/sct_benchmarks/automated/timit')
lang = 'vn'
outpath = 'export_vn.csv'
lasttime = time.time()
times = []
def call_back(*args):
global lasttime
print(*args)
if len(args) > 1:
return
if isinstance(args[0], int):
logtime = time.time() - lasttime
print(logtime)
times.append(logtime)
lasttime = time.time()
def import_corpus_run_query(data, path):
with CorpusContext(data, **graph_db) as c:
c.reset()
beg = time.time()
if data == 'buckeyebenchmark':
parser = inspect_buckeye(path)
elif data == 'timitbenchmark':
parser = inspect_timit(path)
else:
parser = inspect_mfa(path)
parser.call_back = call_back
c.load(parser, path)
end = time.time()
avgtime = sum(times)/(len(times))
sd = statistics.stdev(times)
return [(end - beg), avgtime, sd]
def pause_encoding_run_query(data):
beg = time.time()
with CorpusContext(data, **graph_db) as c:
#pattern = '^[<{].*$'
#if 'timit' in data:
# pattern = '^<?(sil|SIL)>?$'
c.encode_pauses('^[<{].*$', call_back=call_back)
end = time.time()
if len(times) >1:
avgtime = sum(times)/len(times)
sd = statistics.stdev(times)
else:
avgtime = times[0]
sd = None
return [(end - beg), avgtime, sd]
def utterance_encoding_run_query(data):
beg = time.time()
with CorpusContext(data, **graph_db) as c:
c.encode_utterances(0.5, 0, call_back=call_back)
end = time.time()
avgtime = sum(times)/len(times)
#sd = statistics.stdev(times)
sd = None
return [(end - beg), avgtime, sd]
def syllabic_encoding_run_query(data, syllabic):
beg = time.time()
with CorpusContext(data, **graph_db) as c:
c.reset_class('syllabic')
c.encode_class(syllabic, 'syllabic')
end = time.time()
#avgtime = sum(times)/len(times)
return [(end - beg), None]
def syllable_encoding_run_query(data):
beg = time.time()
algorithm = 'maxonset'
with CorpusContext(data, **graph_db) as c:
c.encode_syllables(algorithm = algorithm, call_back=call_back)
end = time.time()
return [(end - beg), None]
def speech_rate_phones(data):
beg = time.time()
with CorpusContext(data, **graph_db) as c:
c.encode_rate('utterance', 'phone', 'speech_rate_phones')
end = time.time()
return [(end-beg), None]
def speech_rate_syllables(data):
beg = time.time()
with CorpusContext(data, **graph_db) as c:
c.encode_rate('utterance', 'syllable', 'speech_rate_syllables')
end = time.time()
return [(end-beg), None]
def number_of_syllables(data):
beg = time.time()
with CorpusContext(data, **graph_db) as c:
c.encode_count('word', 'syllable', 'number_of_syllables')
end = time.time()
return [(end-beg), None]
def number_of_phones(data):
beg = time.time()
with CorpusContext(data, **graph_db) as c:
c.encode_count('syllable', 'phone', 'number_of_phones')
end = time.time()
return [(end-beg), None]
def number_of_words(data):
beg = time.time()
with CorpusContext(data, **graph_db) as c:
c.encode_count('utterance', 'word', 'number_of_words')
end = time.time()
return [(end-beg), None]
def position_in_utterance(data):
beg = time.time()
with CorpusContext(data, **graph_db) as c:
c.encode_position('utterance', 'word', 'position_in_utterance')
end = time.time()
return [(end-beg), None]
def position_in_word(data):
beg = time.time()
with CorpusContext(data, **graph_db) as c:
c.encode_position('word', 'syllable', 'position_in_word')
end = time.time()
return [(end-beg), None]
def export_query_pss(data, export_path):
beg = time.time()
with CorpusContext(data, **graph_db) as c:
#print (c.hierarchy.token_properties)
#print (c.hierarchy.type_properties)
c.refresh_hierarchy()
query = c.query_graph(c.syllable)
filters = (c.syllable.word.end == c.syllable.word.utterance.end)
query = query.filter(filters)
columns = (c.syllable.word.id.column_name('word_id'), c.syllable.word.label.column_name('orthography'), c.syllable.word.duration.column_name('word_duration'), c.syllable.word.begin.column_name('word_begin'), c.syllable.word.end.column_name('word_end'),
c.syllable.word.number_of_syllables.column_name('num_syllables_in_word'), c.syllable.word.position_in_utterance.column_name('position_in_utterance'),
c.pause.following.duration.column_name('following_pause_duration'), c.pause.following.label.column_name('following_pause_label'),
c.syllable.utterance.speech_rate_phones.column_name('speech_rate_phones'), c.syllable.utterance.speech_rate_syllables.column_name('speech_rate_syllables'), c.syllable.utterance.begin.column_name('utterance_begin'),
c.syllable.utterance.end.column_name('utterance_end'), c.syllable.utterance.number_of_words.column_name('num_words'),
c.syllable.discourse.name.column_name('discourse_name'), c.syllable.speaker.name.column_name('speaker_name'),
c.syllable.duration.column_name('syllable_duration'), c.syllable.label.column_name('syllable_label'), c.syllable.position_in_word.column_name('syllable_position'), c.syllable.number_of_phones.column_name('num_phones_in_syllable'))
query = query.columns(*columns)
print (query.cypher())
results = query.to_csv(export_path)
end = time.time()
return [(end-beg)]
#globalphone_import = import_corpus_run_query(globalphonebenchmark, globalphone)
#globalphone_import = import_corpus_run_query(globalphonebenchmark, globalphone)
#globalphone_pauses = pause_encoding_run_query(globalphonebenchmark)
#globalphone_utts = utterance_encoding_run_query(globalphonebenchmark)
#globalphone_syllabic = syllabic_encoding_run_query(globalphonebenchmark, globalphonesyllabic)
globalphone_syllables = syllable_encoding_run_query(globalphonebenchmark)
globalphone_speechrate_phones = speech_rate_phones(globalphonebenchmark)
globalphone_speechrate_syllables = speech_rate_syllables(globalphonebenchmark)
globalphone_num_syllables = number_of_syllables(globalphonebenchmark)
globalphone_num_phones = number_of_phones(globalphonebenchmark)
globalphone_num_words = number_of_words(globalphonebenchmark)
globalphone_word_position = position_in_utterance(globalphonebenchmark)
globalphone_syllable_position = position_in_word(globalphonebenchmark)
globalphone_export_pss = export_query_pss(globalphonebenchmark, outpath)
def WriteDictToCSV(csv_file,csv_columns,dict_data):
with open(csv_file, 'w') as csvfile:
writer = csv.DictWriter(csvfile, fieldnames=csv_columns)
writer.writeheader()
for data in dict_data:
writer.writerow(data)
return
csv_columns = ['Computer','Date','Corpus', 'Type of benchmark', 'Total time', 'Mean time per call back', 'sd time between call backs']
dict_data = [
#{'Computer': platform.node(), 'Date': str(datetime.now()), 'Corpus': amountofcorpus + globalphonebenchmark, 'Type of benchmark': 'Import', 'Total time': globalphone_import[0], 'Mean time per call back': globalphone_import[1], 'sd time between call backs': globalphone_import[2]},
#{'Computer': platform.node(), 'Date': str(datetime.now()), 'Corpus': globalphonebenchmark, 'Type of benchmark': 'Pause encoding', 'Total time': globalphone_pauses[0], 'Mean time per call back': globalphone_pauses[1], 'sd time between call backs': globalphone_pauses[2]},
#{'Computer': platform.node(), 'Date': str(datetime.now()), 'Corpus': globalphonebenchmark, 'Type of benchmark': 'Utterance encoding', 'Total time': globalphone_utts[0], 'Mean time per call back': globalphone_utts[1], 'sd time between call backs': globalphone_utts[2]},
#{'Computer': platform.node(), 'Date': str(datetime.now()), 'Corpus': globalphonebenchmark, 'Type of benchmark': 'Syllabic encoding', 'Total time': globalphone_syllabic[0], 'Mean time per call back': None, 'sd time between call backs': None},
{'Computer': platform.node(), 'Date': str(datetime.now()), 'Corpus': globalphonebenchmark, 'Type of benchmark': 'Syllable encoding', 'Total time': globalphone_syllables[0], 'Mean time per call back': None, 'sd time between call backs': None},
{'Computer': platform.node(), 'Date': str(datetime.now()), 'Corpus': globalphonebenchmark, 'Type of benchmark': 'Speech rate encoding (phones)', 'Total time': globalphone_speechrate_phones[0], 'Mean time per call back': None, 'sd time between call backs': None},
{'Computer': platform.node(), 'Date': str(datetime.now()), 'Corpus': globalphonebenchmark, 'Type of benchmark': 'Speech rate encoding (syllables)', 'Total time': globalphone_speechrate_syllables[0], 'Mean time per call back': None, 'sd time between call backs': None},
{'Computer': platform.node(), 'Date': str(datetime.now()), 'Corpus': globalphonebenchmark, 'Type of benchmark': 'Num syllables encoding', 'Total time': globalphone_num_syllables[0], 'Mean time per call back': None, 'sd time between call backs': None},
{'Computer': platform.node(), 'Date': str(datetime.now()), 'Corpus': globalphonebenchmark, 'Type of benchmark': 'Num phones encoding', 'Total time': globalphone_num_phones[0], 'Mean time per call back': None, 'sd time between call backs': None},
{'Computer': platform.node(), 'Date': str(datetime.now()), 'Corpus': globalphonebenchmark, 'Type of benchmark': 'Num words encoding', 'Total time': globalphone_num_words[0], 'Mean time per call back': None, 'sd time between call backs': None},
{'Computer': platform.node(), 'Date': str(datetime.now()), 'Corpus': globalphonebenchmark, 'Type of benchmark': 'Word position encoding', 'Total time': globalphone_word_position[0], 'Mean time per call back': None, 'sd time between call backs': None},
{'Computer': platform.node(), 'Date': str(datetime.now()), 'Corpus': globalphonebenchmark, 'Type of benchmark': 'Syllable position encoding', 'Total time': globalphone_syllable_position[0], 'Mean time per call back': None, 'sd time between call backs': None},
{'Computer': platform.node(), 'Date': str(datetime.now()), 'Corpus': globalphonebenchmark, 'Type | |
= 'ATP Hydrolysis'
reaction.lower_bound = 0. # This is the default
reaction.upper_bound = 1000. # This is the default
reaction.add_metabolites({atp_HAOc: -1.0,
h2o_HAOc: -1.0,
adp_HAOc: 1.0,
pi_HAOc: 1.0,
h_HAOc: 1.0,
ATP_SLP_HAO: -1.0})
model.add_reactions([reaction])
print(reaction.name + ": " + str(reaction.check_mass_balance()))
##Gluconeogenesis
# atp_HAOc + h2o_HAOc + pyr_HAOc <-> amp_HAOc + 2.0 h_HAOc + pep_HAOc + pi_HAOc
amp_HAOc = Metabolite('amp_HAOc', formula='C10H12N5O7P', name='AMP', compartment='HAOc', charge=-2)
reaction = Reaction('HAO_PPS')
reaction.name = 'Phosphoenolpyruvate synthase'
reaction.subsystem = 'Gluconeogenesis'
reaction.lower_bound = -1000. # This is the default
reaction.upper_bound = 1000. # This is the default
reaction.add_metabolites({atp_HAOc: -1.0,
h2o_HAOc: -1.0,
pyr_HAOc: -1.0,
amp_HAOc: 1.0,
h_HAOc: 2.0,
pep_HAOc: 1.0,
pi_HAOc: 1.0,
ATP_SLP_HAO: -1.0})
model.add_reactions([reaction])
print(reaction.name + ": " + str(reaction.check_mass_balance()))
# fdp_HAOc + h2o_HAOc <-> f6p_HAOc + pi_HAOc
reaction = Reaction('HAO_FBP')
reaction.name = 'Fructose-bisphosphatase'
reaction.subsystem = 'Gluconeogenesis'
reaction.lower_bound = 0. # This is the default
reaction.upper_bound = 1000. # This is the default
reaction.add_metabolites({fdp_HAOc: -1.0,
h2o_HAOc: -1.0,
f6p_HAOc: 1.0,
pi_HAOc: 1.0})
model.add_reactions([reaction])
print(reaction.name + ": " + str(reaction.check_mass_balance()))
# TCA Cycle
# malate dehydrogenase
mal__L_HAOc = Metabolite('mal__L_HAOc', formula='C4H4O5', name='L-Malate', compartment='HAOc', charge=-2)
oaa_HAOc = Metabolite('oaa_HAOc', formula='C4H2O5', name='Oxaloacetate', compartment='HAOc', charge=-2)
reaction = Reaction('HAO_MDH')
reaction.name = 'Malate dehydrogenase'
reaction.subsystem = 'Propionate Production'
reaction.lower_bound = -1000. # This is the default
reaction.upper_bound = 1000. # This is the default
reaction.add_metabolites({oaa_HAOc: -1.0,
nadh_HAOc: -1.0,
h_HAOc: -1.0,
nad_HAOc: 1.0,
mal__L_HAOc: 1.0})
model.add_reactions([reaction])
print(reaction.name + ": " + str(reaction.check_mass_balance()))
# fumarate reductase NADH
succ_HAOc = Metabolite('succ_HAOc', formula='C4H4O4', name='Succinate', compartment='c', charge=-2)
fum_HAOc = Metabolite('fum_HAOc', formula='C4H2O4', name='Fumarate', compartment='c', charge=-2)
reaction = Reaction('HAO_FRDx')
reaction.name = 'Fumarate Reductase NADH'
reaction.subsystem = 'Propionate Production'
reaction.lower_bound = -1000. # This is the default
reaction.upper_bound = 1000. # This is the default
reaction.add_metabolites({fum_HAOc: -1.0,
nadh_HAOc: -1.0,
h_HAOc: -1.0,
nad_HAOc: 1.0,
succ_HAOc: 1.0})
model.add_reactions([reaction])
print(reaction.name + ": " + str(reaction.check_mass_balance()))
# Bifurcated TCA Cycle
# OAA to PEP
# atp_HAOc + oaa_HAOc -> adp_HAOc + co2_HAOc + pep_HAOc
reaction = Reaction('HAO_PPCK')
reaction.name = 'Phosphoenolpyruvate carboxykinase'
reaction.subsystem = 'TCA Cycle'
reaction.lower_bound = 0. # This is the default
reaction.upper_bound = 1000. # This is the default
reaction.add_metabolites({atp_HAOc: -1.0,
oaa_HAOc: -1.0,
pep_HAOc: 1.0,
adp_HAOc: 1.0,
co2_HAOc: 1.0,
ATP_SLP_HAO: -1.0})
model.add_reactions([reaction])
print(reaction.name + ": " + str(reaction.check_mass_balance()))
# Acetyl-CoA to OAA and Fumarate
# co2_HAOc + h2o_HAOc + pep_HAOc <-> h_HAOc + oaa_HAOc + pi_HAOc
reaction = Reaction('HAO_PPC')
reaction.name = 'Phosphoenolpyruvate carboxylase'
reaction.subsystem = 'TCA Cycle'
reaction.lower_bound = 0. # This is the default
reaction.upper_bound = 1000. # This is the default
reaction.add_metabolites({co2_HAOc: -1.0,
h2o_HAOc: -1.0,
pep_HAOc: -1.0,
h_HAOc: 1.0,
oaa_HAOc: 1.0,
pi_HAOc: 1.0})
model.add_reactions([reaction])
print(reaction.name + ": " + str(reaction.check_mass_balance()))
# accoa_HAOc + h2o_HAOc + oaa_HAOc -> cit_HAOc + coa_HAOc + h_HAOc
cit_HAOc = Metabolite('cit_HAOc', formula='C6H5O7', name='Citrate', compartment='c', charge=-3)
reaction = Reaction('HAO_CS')
reaction.name = 'Citrate synthase'
reaction.subsystem = 'TCA Cycle'
reaction.lower_bound = -1000. # This is the default
reaction.upper_bound = 1000. # This is the default
reaction.add_metabolites({accoa_HAOc: -1.0,
h2o_HAOc: -1.0,
oaa_HAOc: -1.0,
cit_HAOc: 1.0,
coa_HAOc: 1.0,
h_HAOc: 1.0})
model.add_reactions([reaction])
print(reaction.name + ": " + str(reaction.check_mass_balance()))
# accoa_HAOc + h2o_HAOc + oaa_HAOc -> cit_HAOc + coa_HAOc + h_HAOc
icit_HAOc = Metabolite('icit_HAOc', formula='C6H5O7', name='Isocitrate', compartment='c', charge=-3)
reaction = Reaction('HAO_ACONT')
reaction.name = 'Aconitate hydratase'
reaction.subsystem = 'TCA Cycle'
reaction.lower_bound = -1000. # This is the default
reaction.upper_bound = 1000. # This is the default
reaction.add_metabolites({cit_HAOc: -1.0,
icit_HAOc: 1.0})
model.add_reactions([reaction])
print(reaction.name + ": " + str(reaction.check_mass_balance()))
# icit_c <-> glx_c + succ_c
glx_HAOc = Metabolite('glx_HAOc', formula='C2HO3', name='Glyxoxylate', compartment='HAOc', charge=-1)
reaction = Reaction('HAO_ICL')
reaction.name = 'Isocitrate lyase'
reaction.subsystem = 'TCA Cycle'
reaction.lower_bound = -1000. # This is the default
reaction.upper_bound = 1000. # This is the default
reaction.add_metabolites({icit_HAOc: -1.0,
glx_HAOc: 1.0,
succ_HAOc: 1.0})
model.add_reactions([reaction])
print(reaction.name + ": " + str(reaction.check_mass_balance()))
# accoa_c + glx_c + h2o_c <->g coa_c + h_c + mal__L_c
reaction = Reaction('HAO_MALS')
reaction.name = 'Malate synthase'
reaction.subsystem = 'TCA Cycle'
reaction.lower_bound = 0. # This is the default
reaction.upper_bound = 1000. # This is the default
reaction.add_metabolites({accoa_HAOc: -1.0,
glx_HAOc: -1.0,
h2o_HAOc: -1.0,
coa_HAOc: 1.0,
h_HAOc: 1.0,
mal__L_HAOc: 1.0})
model.add_reactions([reaction])
print(reaction.name + ": " + str(reaction.check_mass_balance()))
# icit_HAOc + nad_HAOc <-> akg_HAOc + co2_HAOc + nadh_HAOc
akg_HAOc = Metabolite('akg_HAOc', formula='C5H4O5', name='2-Oxoglutarate', compartment='HAOc', charge=-2)
reaction = Reaction('HAO_ICDHx')
reaction.name = 'Isocitrate dehydrogenase (NAD)'
reaction.subsystem = 'TCA Cycle'
reaction.lower_bound = -1000. # This is the default
reaction.upper_bound = 1000. # This is the default
reaction.add_metabolites({icit_HAOc: -1.0,
nad_HAOc: -1.0,
akg_HAOc: 1.0,
co2_HAOc: 1.0,
nadh_HAOc: 1.0})
model.add_reactions([reaction])
print(reaction.name + ": " + str(reaction.check_mass_balance()))
# mal__L_HAOc + nad_HAOc <-> h_HAOc + nadh_HAOc + oaa_HAOc
reaction = Reaction('HAO_MDH')
reaction.name = 'Malate dehydrogenase'
reaction.subsystem = 'TCA Cycle'
reaction.lower_bound = -1000. # This is the default
reaction.upper_bound = 1000. # This is the default
reaction.add_metabolites({mal__L_HAOc: -1.0,
nad_HAOc: -1.0,
h_HAOc: 1.0,
nadh_HAOc: 1.0,
oaa_HAOc: 1.0})
model.add_reactions([reaction])
print(reaction.name + ": " + str(reaction.check_mass_balance()))
# fum_HAOc + h2o_HAOc <-> mal__L_HAOc
reaction = Reaction('HAO_FUM')
reaction.name = 'Fumarase'
reaction.subsystem = 'TCA Cycle'
reaction.lower_bound = -1000. # This is the default
reaction.upper_bound = 1000. # This is the default
reaction.add_metabolites({fum_HAOc: -1.0,
h2o_HAOc: -1.0,
mal__L_HAOc: 1.0})
model.add_reactions([reaction])
print(reaction.name + ": " + str(reaction.check_mass_balance()))
# atp_c + cit_c + coa_c -> accoa_c + adp_c + oaa_c + pi_c
reaction = Reaction('HAO_ACITL')
reaction.name = '<NAME>'
reaction.subsystem = 'TCA Cycle'
reaction.lower_bound = 0. # This is the default
reaction.upper_bound = 1000. # This is the default
reaction.add_metabolites({coa_HAOc: -1.0,
atp_HAOc: -1.0,
cit_HAOc: -1.0,
accoa_HAOc: 1.0,
oaa_HAOc: 1.0,
adp_HAOc: 1.0,
pi_HAOc: 1.0,
ATP_SLP_HAO: -1.0})
model.add_reactions([reaction])
print(reaction.name + ": " + str(reaction.check_mass_balance()))
# mal__L_c + nad_c -> co2_c + nadh_c + pyr_c
reaction = Reaction('HAO_ME1')
reaction.name = 'Malic Enzyme (NAD)'
reaction.subsystem = 'TCA Cycle'
reaction.lower_bound = 0. # This is the default
reaction.upper_bound = 1000. # This is the default
reaction.add_metabolites({mal__L_HAOc: -1.0,
nad_HAOc: -1.0,
pyr_HAOc: 1.0,
nadh_HAOc: 1.0,
co2_HAOc: 1.0})
model.add_reactions([reaction])
print(reaction.name + ": " + str(reaction.check_mass_balance()))
# ac_HAOc + atp_HAOc + coa_HAOc -> accoa_HAOc + amp_HAOc + ppi_HAOc
ppi_HAOc = Metabolite('ppi_HAOc', formula='HO7P2', name='Diphosphate', compartment='c', charge=-3)
reaction = Reaction('HAO_ACS')
reaction.name = 'Acetyl-CoA synthetase'
reaction.subsystem = 'Acetate metabolism'
reaction.lower_bound = 0. # This is the default
reaction.upper_bound = 1000. # This is the default
reaction.add_metabolites({ac_HAOc: -1.0,
atp_HAOc: -1.0,
coa_HAOc: -1.0,
accoa_HAOc: 1.0,
amp_HAOc: 1.0,
ppi_HAOc: 1.0,
ATP_SLP_HAO: -1.0})
model.add_reactions([reaction])
print(reaction.name + ": " + str(reaction.check_mass_balance()))
# NADH/ NADPH Conversions
# atp_HAOc + nad_HAOc <-> adp_HAOc + h_HAOc + nadp_HAOc
nadp_HAOc = Metabolite('nadp_HAOc', formula='C21H25N7O17P3', name='Nicotinamide adenine dinucHAOtide phosphate',
compartment='c', charge=-3)
reaction = Reaction('HAO_NADK')
reaction.name = 'NAD kinase'
reaction.subsystem = 'NADH/ NADPH Conversions'
reaction.lower_bound = -1000. # This is the default
reaction.upper_bound = 1000. # This is the default
reaction.add_metabolites({atp_HAOc: -1.0,
nad_HAOc: -1.0,
adp_HAOc: 1.0,
h_HAOc: 1.0,
nadp_HAOc: 1.0,
ATP_SLP_HAO: -1.0})
model.add_reactions([reaction])
print(reaction.name + ": " + str(reaction.check_mass_balance()))
# nadh_HAOc + nadp_HAOc + 2.0 h_HAOi -> 2.0 h_HAOc + nad_HAOc + nadph_HAOc
nadph_HAOc = Metabolite('nadph_HAOc', formula='C21H26N7O17P3',
name='Nicotinamide adenine dinucHAOtide phosphate - reduced', compartment='c', charge=-4)
reaction = Reaction('HAO_THD2')
reaction.name = 'NAD(P) transhydrogenase'
reaction.subsystem = 'NADH/ NADPH Conversions'
reaction.lower_bound = -1000. # This is the default
reaction.upper_bound = 1000. # This is the default
reaction.add_metabolites({nadh_HAOc: -1.0,
nadp_HAOc: -1.0,
h_HAOi: -2.0,
h_HAOc: 2.0,
nad_HAOc: 1.0,
nadph_HAOc: 1.0})
model.add_reactions([reaction])
print(reaction.name + ": " + str(reaction.check_mass_balance()))
##Nitrogen and Sulfur Import
# nh4_e ->
nh4_HAOe = Metabolite('nh4_HAOe', formula='H4N', name='H2O', compartment='HAOe', charge=1)
reaction = Reaction('HAO_EX_nh4')
reaction.name = 'Ammonium Transport'
reaction.subsystem = 'Transport'
reaction.lower_bound = -1000. # This is the default
reaction.upper_bound = 1000. # This is the default
reaction.add_metabolites({nh4_e: HAO_Abnd,
nh4_HAOe: 1.0})
model.add_reactions([reaction])
print(reaction.name + ": " + str(reaction.check_mass_balance()))
nh4_HAOc = Metabolite('nh4_HAOc', formula='H4N', name='H2O', compartment='c', charge=1)
reaction = Reaction('HAO_nh4t')
reaction.name = 'Ammonium Transport'
reaction.subsystem = 'Transport'
reaction.lower_bound = -1000. # This is the default
reaction.upper_bound = 1000. # This is the default
reaction.add_metabolites({nh4_HAOe: -1.0,
nh4_HAOc: 1.0})
model.add_reactions([reaction])
print(reaction.name + ": " + str(reaction.check_mass_balance()))
# so4_e ->
so4_HAOe = Metabolite('so4_HAOe', formula='O4S', name='Sulfate', compartment='HAOe', charge=-2)
reaction = Reaction('EX_HAO_so4')
reaction.name = 'Sulfate Transport'
reaction.subsystem = 'Transport'
reaction.lower_bound = -1000. # This is the default
reaction.upper_bound = 1000. # This is the default
reaction.add_metabolites({so4_e: HAO_Abnd,
so4_HAOe: -1.0})
model.add_reactions([reaction])
print(reaction.name + ": " + str(reaction.check_mass_balance()))
so4_HAOc = | |
= coefs
self.offset = offset
self.symmetric_bonds = symmetric_bonds
kwargs = {**desc.index_dict, **kwargs}
Expr.__init__(self, **kwargs)
# === PROPERTY EVALUATION METHODS
def _pyomo_expr(self, index=None):
"""Interface for generating Pyomo expressions.
Args:
index (list): Optional, index to to create an instance of a Pyomo
expression. In the case of a scalar, the valid index is None.
Returns:
An instance of a Pyomo expression.
"""
i, *index = index
if index == (None,):
index = ()
result = self.offset
for n, j in enumerate(self.desc.canv.NeighborhoodIndexes[i]):
if j is not None:
if self.symmetric_bonds:
i, j = min(i, j), max(i, j)
result += (
self.coefs
if (
type(self.coefs) is float
or type(self.coefs) is int
or type(self.coefs) is SimpleParam
)
else self.coefs[n]
) * self.desc._pyomo_expr(index=(i, j, *index))
return result
class SumSites(Expr):
"""A class for expressions formed by summation over canvas sites.
Attributes:
desc (Descriptor/Expr): descriptors or expressions to sum over
coefs (float/list<float>): coefficients to multiply contributions
from each site
offset (float): coefficient to add to the expression
sites_to_sum (list<int>): sites to consider in the summation
(index information inherited from IndexedElem)
"""
# === STANDARD CONSTRUCTOR
def __init__(self, desc, coefs=1.0, offset=0.0, sites_to_sum=None, **kwargs):
"""Standard constructor for summation of site contributions.
Args:
desc (Descriptor): descriptors or expressions to sum across all sites
coefs (float/list<float>): Optional, coefficients to multiple each
site term by. Default=1.0
offset (float): Optional, coefficient to add to the expression.
Default=0.0
sites_to_sum (list<int>): Optional, subset of canvas sites to sum.
Default=None, meaning all sites in the desc object are considered.
**kwargs: Optional, index information passed to IndexedElem if
interested in a subset of indices
Possible choices: sites, bonds, site_types, bond_types, confs.
"""
self.desc = desc
self.coefs = coefs
self.offset = offset
self.sites_to_sum = sites_to_sum if sites_to_sum is not None else desc.sites
kwargs = {**desc.index_dict, **kwargs}
kwargs.pop("sites")
Expr.__init__(self, **kwargs)
# === PROPERTY EVALUATION METHODS
def _pyomo_expr(self, index=None):
"""Interface for generating Pyomo expressions.
Args:
index (list): Optional, index to to create an instance of a Pyomo
expression. In the case of a scalar, the valid index is None.
Returns:
An instance of a Pyomo expression.
"""
if index == (None,):
index = ()
result = self.offset
for i in self.sites_to_sum:
result += (
self.coefs
if (
type(self.coefs) is float
or type(self.coefs) is int
or type(self.coefs) is SimpleParam
)
else self.coefs[(i, *index)]
) * self.desc._pyomo_var[(i, *index)]
return result
class SumBonds(Expr):
"""A class for expressions formed by summation over canvas bonds.
Attributes:
desc (Descriptor/Expr): descriptors or expressions to sum over
coefs (float/list<float>): coefficients to multiply contributions
from each bond
offset (float): coefficient to add to the expression
bonds_to_sum (list<int>): bonds to consider in the summation
(index information inherited from IndexedElem)
"""
# === STANDARD CONSTRUCTOR
def __init__(self, desc, coefs=1.0, offset=0.0, bonds_to_sum=None, **kwargs):
"""Standard constructor for summation of bond contributions.
Args:
desc (Descriptor): descriptors or expressions to sum across all bonds
coefs (float/list<float>): Optional, coefficients to multiple each
bond term by. Default=1.0
offset (float): Optional, coefficient to add to the expression.
Default=0.0
bonds_to_sum (list<int>): Optional, subset of canvas bonds
(i.e., neighbor connections) to sum.
Default=None, meaning all bonds in the desc object are considered.
**kwargs: Optional, index information passed to IndexedElem if
interested in a subset of indices
Possible choices: sites, bonds, site_types, bond_types, confs.
"""
self.desc = desc
self.coefs = coefs
self.offset = offset
self.bonds_to_sum = bonds_to_sum if bonds_to_sum is not None else desc.bonds
kwargs = {**desc.index_dict, **kwargs}
kwargs.pop("bonds")
Expr.__init__(self, **kwargs)
# === PROPERTY EVALUATION METHODS
def _pyomo_expr(self, index=None):
"""Interface for generating Pyomo expressions.
Args:
index (list): Optional, index to to create an instance of a Pyomo
expression. In the case of a scalar, the valid index is None.
Returns:
An instance of a Pyomo expression.
"""
if index == (None,):
index = ()
result = self.offset
for i, j in self.bonds_to_sum:
result += (
self.coefs
if (
type(self.coefs) is float
or type(self.coefs) is int
or type(self.coefs) is SimpleParam
)
else self.coefs[(i, j, *index)]
) * self.desc._pyomo_var[(i, j, *index)]
return result
class SumSiteTypes(Expr):
"""A class for expressions formed by summation over building block types.
Attributes:
desc (Descriptor/Expr): descriptors or expressions to sum over
coefs (float/list<float>): coefficients to multiply contributions
from each building block type
offset (float): coefficient to add to the expression
site_types_to_sum (list<BBlock>): building block types to consider in
the summation (index information inherited from IndexedElem)
"""
# === STANDARD CONSTRUCTOR
def __init__(self, desc, coefs=1.0, offset=0.0, site_types_to_sum=None, **kwargs):
"""Standard constructor for summation of contributions by site-type.
Args:
desc (Descriptor): descriptors or expressions to sum across site types
coefs (float/list<float>): Optional, coefficients to multiple each
site-type term by. Default=1.0
offset (float): Optional, coefficient to add to the expression.
Default=0.0
bonds_types_to_sum (list<int>): Optional, subset of site types
to sum. Default=None, meaning all site-types in the desc object are
considered.
**kwargs: Optional, index information passed to IndexedElem if
interested in a subset of indices
Possible choices: sites, bonds, site_types, bond_types, confs.
"""
self.desc = desc
self.coefs = coefs
self.offset = offset
self.site_types_to_sum = (
site_types_to_sum if site_types_to_sum is not None else desc.site_types
)
kwargs = {**desc.index_dict, **kwargs}
kwargs.pop("site_types")
Expr.__init__(self, **kwargs)
# === PROPERTY EVALUATION METHODS
def _pyomo_expr(self, index=None):
"""Interface for generating Pyomo expressions.
Args:
index (list): Optional, index to to create an instance of a Pyomo
expression. In the case of a scalar, the valid index is None.
Returns:
An instance of a Pyomo expression.
"""
assert index is not None
i, *index = index
if index == (None,):
index = ()
result = self.offset
for k in self.site_types_to_sum:
result += (
self.coefs
if (
type(self.coefs) is float
or type(self.coefs) is int
or type(self.coefs) is SimpleParam
)
else self.coefs[(i, k, *index)]
) * self.desc._pyomo_var[(i, k, *index)]
return result
class SumBondTypes(Expr):
"""A class for expressions formed by summation over building block types.
Attributes:
desc (Descriptor/Expr): descriptors or expressions to sum over
coefs (float/list<float>): coefficients to multiply contributions
from each pair of building block types
offset (float): coefficient to add to the expression
bond_types_to_sum (list<tuple<BBlock,BBlock>>): building block pairs
to consider in the summation
(index information inherited from IndexedElem)
"""
# === STANDARD CONSTRUCTOR
def __init__(self, desc, coefs=1.0, offset=0.0, bond_types_to_sum=None, **kwargs):
"""Standard constructor for summation of contributions by bond-type.
Args:
desc (Descriptor): descriptors or expressions to sum across bond types
coefs (float/list<float>): Optional, coefficients to multiple each
bond-type term by. Default=1.0
offset (float): Optional, coefficient to add to the expression.
Default=0.0
bonds_types_to_sum (list<tuple<BBlock,BBlock>>): Optional, subset
of bond types to sum.
Default=None, meaning all bond-types in the desc object are
considered.
**kwargs: Optional, index information passed to IndexedElem if
interested in a subset of indices
Possible choices: sites, bonds, site_types, bond_types, confs.
"""
self.desc = desc
self.coefs = coefs
self.offset = offset
self.bond_types_to_sum = (
bond_types_to_sum if bond_types_to_sum is not None else desc.bond_types
)
kwargs = {**desc.index_dict, **kwargs}
kwargs.pop("bond_types")
Expr.__init__(self, **kwargs)
# === PROPERTY EVALUATION METHODS
def _pyomo_expr(self, index=None):
"""Interface for generating Pyomo expressions.
Args:
index (list): Optional, index to to create an instance of a Pyomo
expression. In the case of a scalar, the valid index is None.
Returns:
An instance of a Pyomo expression.
"""
assert index is not None
i, j, *index = index
if index == (None,):
index = ()
result = self.offset
for k, l in self.bond_types_to_sum:
result += (
self.coefs
if (
type(self.coefs) is float
or type(self.coefs) is int
or type(self.coefs) is SimpleParam
)
else self.coefs[(i, j, k, l, *index)]
) * self.desc._pyomo_var[(i, j, k, l, *index)]
return result
class SumSitesAndTypes(Expr):
"""A class for expressions formed by summation over sites and building
block types.
Attributes:
desc (Descriptor/Expr): descriptors or expressions to sum over
coefs (float/list<float>): coefficients to multiply contributions
from each building block type
offset (float): coefficient to add to the expression
sites_to_sum (list<int>): sites to consider in the summation
site_types_to_sum (list<BBlock>): building block types to consider in
the summation (index information inherited from IndexedElem)
"""
# === STANDARD CONSTRUCTOR
def __init__(
self,
desc,
coefs=1.0,
offset=0.0,
sites_to_sum=None,
site_types_to_sum=None,
**kwargs
):
"""Standard constructor for summation of site contributions.
Args:
| |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from unittest import TestCase
import pytest
import os
from cielo_webservice.models import (
Comercial, Cartao, Pedido, Pagamento, Autenticacao, Autorizacao, Token,
Transacao, Avs, Captura, Cancelamento, Erro, xml_to_object
)
BASE_DIR = os.path.dirname(os.path.abspath(__file__))
class TestComercial(TestCase):
def test_validate(self):
with pytest.raises(TypeError) as excinfo:
Comercial(numero='1234', chave='1234')
assert 'numero precisa ser do tipo inteiro.' in str(excinfo.value)
with pytest.raises(TypeError) as excinfo:
Comercial(numero=1234, chave=1234)
assert 'chave precisa ser do tipo string.' in str(excinfo.value)
def test_repr(self):
comercial = Comercial(
numero=1006993069,
chave='<KEY>'
)
self.assertEqual(
repr(comercial),
'<Comercial(numero=1006993069, chave=<KEY>)>'
)
class TestCartao(TestCase):
def test_validate(self):
with pytest.raises(TypeError) as excinfo:
Cartao(
numero='1234', validade=201805, indicador=1,
codigo_seguranca=123, nome_portador='<NAME>'
)
assert 'numero precisa ser do tipo inteiro.' in str(excinfo.value)
with pytest.raises(TypeError) as excinfo:
Cartao(
numero=1234, validade='201805', indicador=1,
codigo_seguranca=123, nome_portador='<NAME>'
)
assert 'validade precisa ser do tipo inteiro.' in str(excinfo.value)
with pytest.raises(TypeError) as excinfo:
Cartao(
numero=1234, validade=201805, indicador='1',
codigo_seguranca=123, nome_portador='<NAME>'
)
assert 'indicador precisa ser do tipo inteiro.' in str(excinfo.value)
with pytest.raises(TypeError) as excinfo:
Cartao(
numero=1234, validade=201805, indicador=1,
codigo_seguranca='123', nome_portador='<NAME>'
)
assert 'codigo_seguranca precisa ser do tipo inteiro.' in str(excinfo.value)
with pytest.raises(TypeError) as excinfo:
Cartao(
numero=1234, validade=201805, indicador=1,
codigo_seguranca=123, nome_portador=123
)
assert 'nome_portador precisa ser do tipo string.' in str(excinfo.value)
with pytest.raises(TypeError) as excinfo:
Cartao(token=123)
assert 'token precisa ser do tipo string.' in str(excinfo.value)
def test_repr(self):
cartao = Cartao(
numero=4012001037141112, validade=201805, indicador=1,
codigo_seguranca=123, nome_portador='<NAME>'
)
self.assertEqual(
repr(cartao),
'<Cartao(numero=4012001037141112, validade=201805, indicador=1, codigo_seguranca=123, nome_portador=<NAME>, token=None)>'
)
class TestPedido(TestCase):
def test_validate(self):
with pytest.raises(TypeError) as excinfo:
Pedido(
numero=1234, valor=10000, moeda=986,
data_hora='2011-12-07T11:43:37',
)
assert 'numero precisa ser do tipo string.' in str(excinfo.value)
with pytest.raises(TypeError) as excinfo:
Pedido(
numero='1234', valor='10000', moeda=986,
data_hora='2011-12-07T11:43:37',
)
assert 'valor precisa ser do tipo inteiro.' in str(excinfo.value)
with pytest.raises(TypeError) as excinfo:
Pedido(
numero='1234', valor=10000, moeda='986',
data_hora='2011-12-07T11:43:37',
)
assert 'moeda precisa ser do tipo inteiro.' in str(excinfo.value)
with pytest.raises(TypeError) as excinfo:
Pedido(
numero='1234', valor=10000, moeda=986,
data_hora=20111207,
)
assert 'data_hora precisa ser do tipo string.' in str(excinfo.value)
with pytest.raises(TypeError) as excinfo:
Pedido(
numero='1234', valor=10000, moeda=986,
data_hora='2011-12-07T11:43:37', descricao=123
)
assert 'descricao precisa ser do tipo string.' in str(excinfo.value)
with pytest.raises(TypeError) as excinfo:
Pedido(
numero='1234', valor=10000, moeda=986,
data_hora='2011-12-07T11:43:37', idioma=123
)
assert 'idioma precisa ser do tipo string.' in str(excinfo.value)
with pytest.raises(TypeError) as excinfo:
Pedido(
numero='1234', valor=10000, moeda=986,
data_hora='2011-12-07T11:43:37', taxa_embarque='123'
)
assert 'taxa_embarque precisa ser do tipo inteiro.' in str(excinfo.value)
with pytest.raises(TypeError) as excinfo:
Pedido(
numero='1234', valor=10000, moeda=986,
data_hora='2011-12-07T11:43:37', soft_descriptor=123
)
assert 'soft_descriptor precisa ser do tipo string.' in str(excinfo.value)
def test_repr(self):
pedido = Pedido(
numero='1234', valor=10000, moeda=986,
data_hora='2016-03-05T03:30:43.982543'
)
self.assertEqual(
repr(pedido),
'<Pedido(numero=1234, valor=10000, moeda=986, data_hora=2016-03-05T03:30:43.982543, descricao=None, idioma=PT, taxa_embarque=None, soft_descriptor=None)>'
)
class TestPagamento(TestCase):
def test_validate(self):
with pytest.raises(TypeError) as excinfo:
Pagamento(bandeira=1, produto=1, parcelas=1)
assert 'bandeira precisa ser do tipo string.' in str(excinfo.value)
with pytest.raises(TypeError) as excinfo:
Pagamento(bandeira='visa', produto=1, parcelas=1)
assert 'produto precisa ser do tipo string.' in str(excinfo.value)
with pytest.raises(TypeError) as excinfo:
Pagamento(bandeira='visa', produto='1', parcelas='1')
assert 'parcelas precisa ser do tipo inteiro.' in str(excinfo.value)
def test_repr(self):
pagamento = Pagamento(bandeira='visa', produto='1', parcelas=1)
self.assertEqual(
repr(pagamento),
'<Pagamento(bandeira=visa, produto=1, parcelas=1)>'
)
class TestAutenticacao(TestCase):
def test_validate(self):
with pytest.raises(TypeError) as excinfo:
Autenticacao(
codigo='1', mensagem='msg', data_hora='2011-12-07T11:43:37',
valor=10000, eci=7
)
assert 'codigo precisa ser do tipo inteiro.' in str(excinfo.value)
with pytest.raises(TypeError) as excinfo:
Autenticacao(
codigo=1, mensagem=1, data_hora='2011-12-07T11:43:37',
valor=10000, eci=7
)
assert 'mensagem precisa ser do tipo string.' in str(excinfo.value)
with pytest.raises(TypeError) as excinfo:
Autenticacao(
codigo=1, mensagem='msg', data_hora=201112,
valor=10000, eci=7
)
assert 'data_hora precisa ser do tipo string.' in str(excinfo.value)
with pytest.raises(TypeError) as excinfo:
Autenticacao(
codigo=1, mensagem='msg', data_hora='2011-12-07T11:43:37',
valor='10000', eci=7
)
assert 'valor precisa ser do tipo inteiro.' in str(excinfo.value)
with pytest.raises(TypeError) as excinfo:
Autenticacao(
codigo=1, mensagem='msg', data_hora='2011-12-07T11:43:37',
valor=10000, eci='7'
)
assert 'eci precisa ser do tipo inteiro.' in str(excinfo.value)
def test_repr(self):
autenticacao = Autenticacao(
codigo=6, mensagem='Transacao sem autenticacao',
data_hora='2016-03-05T00:03:46.158-03:00', valor=10000, eci=7
)
self.assertEqual(
repr(autenticacao),
'<Autenticacao(codigo=6, mensagem=Transacao sem autenticacao, data_hora=2016-03-05T00:03:46.158-03:00, valor=10000, eci=7)>'
)
class TestAutorizacao(TestCase):
def test_validate(self):
with pytest.raises(TypeError) as excinfo:
Autorizacao(
codigo='1', mensagem='msg', data_hora='2011-12-07T11:43:37',
valor=10000, lr="01", arp=1, nsu=1
)
assert 'codigo precisa ser do tipo inteiro.' in str(excinfo.value)
with pytest.raises(TypeError) as excinfo:
Autorizacao(
codigo=1, mensagem=1, data_hora='2011-12-07T11:43:37',
valor=10000, lr="01", arp=1, nsu=1
)
assert 'mensagem precisa ser do tipo string.' in str(excinfo.value)
with pytest.raises(TypeError) as excinfo:
Autorizacao(
codigo=1, mensagem='msg', data_hora=201112,
valor=10000, lr="01", arp=1, nsu=1
)
assert 'data_hora precisa ser do tipo string.' in str(excinfo.value)
with pytest.raises(TypeError) as excinfo:
Autorizacao(
codigo=1, mensagem='msg', data_hora='2011-12-07T11:43:37',
valor='10000', lr="01", arp=1, nsu=1
)
assert 'valor precisa ser do tipo inteiro.' in str(excinfo.value)
with pytest.raises(TypeError) as excinfo:
Autorizacao(
codigo=1, mensagem='msg', data_hora='2011-12-07T11:43:37',
valor=10000, lr=1, arp=1, nsu=1
)
assert 'lr precisa ser do tipo string.' in str(excinfo.value)
with pytest.raises(TypeError) as excinfo:
Autorizacao(
codigo=1, mensagem='msg', data_hora='2011-12-07T11:43:37',
valor=10000, lr="01", arp='1', nsu=1
)
assert 'arp precisa ser do tipo inteiro.' in str(excinfo.value)
with pytest.raises(TypeError) as excinfo:
Autorizacao(
codigo=1, mensagem='msg', data_hora='2011-12-07T11:43:37',
valor=10000, lr="01", arp=1, nsu='1'
)
assert 'nsu precisa ser do tipo inteiro.' in str(excinfo.value)
def test_repr(self):
autorizacao = Autorizacao(
codigo=6, mensagem='Transacao autorizada',
data_hora='2016-03-05T00:03:46.161-03:00', valor=10000, lr="00",
arp=123456, nsu=36318
)
self.assertEqual(
repr(autorizacao),
'<Autorizacao(codigo=6, mensagem=Transacao autorizada, data_hora=2016-03-05T00:03:46.161-03:00, valor=10000, lr=00, arp=123456, nsu=36318)>'
)
class TestToken(TestCase):
def test_validate(self):
with pytest.raises(TypeError) as excinfo:
Token(codigo=1, status=1, numero='1234')
assert 'codigo precisa ser do tipo string.' in str(excinfo.value)
with pytest.raises(TypeError) as excinfo:
Token(codigo='code', status='1', numero='1234')
assert 'status precisa ser do tipo inteiro.' in str(excinfo.value)
with pytest.raises(TypeError) as excinfo:
Token(codigo='code', status=1, numero=1234)
assert 'numero precisa ser do tipo string.' in str(excinfo.value)
def test_repr(self):
token = Token(codigo='code', status=1, numero='1234')
self.assertEqual(
repr(token),
'<Token(codigo=code, status=1, numero=1234)>'
)
class TestAvs(TestCase):
def test_validate(self):
with pytest.raises(TypeError) as excinfo:
Avs(
endereco=1, complemento='', numero=1, bairro='Bairro',
cep='00000-000'
)
assert 'endereco precisa ser do tipo string.' in str(excinfo.value)
with pytest.raises(TypeError) as excinfo:
Avs(
endereco='Rua 1', complemento=1, numero=1, bairro='Bairro',
cep='00000-000'
)
assert 'complemento precisa ser do tipo string.' in str(excinfo.value)
with pytest.raises(TypeError) as excinfo:
Avs(
endereco='Rua 1', complemento='', numero='1', bairro='Bairro',
cep='00000-000'
)
assert 'numero precisa ser do tipo inteiro.' in str(excinfo.value)
with pytest.raises(TypeError) as excinfo:
Avs(
endereco='Rua 1', complemento='', numero=1, bairro=1,
cep='00000-000'
)
assert 'bairro precisa ser do tipo string.' in str(excinfo.value)
with pytest.raises(TypeError) as excinfo:
Avs(
endereco='Rua 1', complemento='', numero=1, bairro='Bairro',
cep=00000000
)
assert 'cep precisa ser do tipo string.' in str(excinfo.value)
def test_repr(self):
avs = Avs(
endereco='Rua 1', complemento='', numero=1, bairro='Bairro',
cep='00000000'
)
self.assertEqual(
repr(avs),
'<Avs(endereco=Rua 1, complemento=, numero=1, bairro=Bairro, cep=00000000)>'
)
class TestCaptura(TestCase):
def test_validate(self):
with pytest.raises(TypeError) as excinfo:
Captura(
codigo='1', mensagem='mensagem',
data_hora='2011-12-07T11:43:37', valor=10000, taxa_embarque=0
)
assert 'codigo precisa ser do tipo inteiro.' in str(excinfo.value)
with pytest.raises(TypeError) as excinfo:
Captura(
codigo=1, mensagem=1, data_hora='2011-12-07T11:43:37',
valor=10000, taxa_embarque=0
)
assert 'mensagem precisa ser do tipo string.' in str(excinfo.value)
with pytest.raises(TypeError) as excinfo:
Captura(
codigo=1, mensagem='mensagem', data_hora=1,
valor=10000, taxa_embarque=0
)
assert 'data_hora precisa ser do tipo string.' in str(excinfo.value)
with pytest.raises(TypeError) as excinfo:
Captura(
codigo=1, mensagem='mensagem', data_hora='2011-12-07T11:43:37',
valor='10000', taxa_embarque=0
)
assert 'valor precisa ser do tipo inteiro.' in str(excinfo.value)
with pytest.raises(TypeError) as excinfo:
Captura(
codigo=1, mensagem='mensagem', data_hora='2011-12-07T11:43:37',
valor=10000, taxa_embarque='0'
)
assert 'taxa_embarque precisa ser do tipo inteiro.' in str(excinfo.value)
def test_repr(self):
captura = Captura(
codigo=1, mensagem='mensagem', data_hora='2011-12-07T11:43:37',
valor=10000, taxa_embarque=0
)
self.assertEqual(
repr(captura),
'<Captura(codigo=1, mensagem=mensagem, data_hora=2011-12-07T11:43:37, valor=10000, taxa_embarque=0)>'
)
class TestCancelamento(TestCase):
def test_validate(self):
with pytest.raises(TypeError) as excinfo:
Cancelamento(
codigo='1', mensagem='mensagem',
data_hora='2011-12-07T11:43:37', valor=10000,
)
assert 'codigo precisa ser do tipo inteiro.' in str(excinfo.value)
with pytest.raises(TypeError) as excinfo:
Cancelamento(
codigo=1, mensagem=1, data_hora='2011-12-07T11:43:37',
valor=10000,
)
assert 'mensagem precisa ser do tipo string.' in str(excinfo.value)
with pytest.raises(TypeError) as excinfo:
Cancelamento(
codigo=1, mensagem='mensagem', data_hora=201112, valor=10000
)
assert 'data_hora precisa ser do tipo string.' in str(excinfo.value)
with pytest.raises(TypeError) as excinfo:
Cancelamento(
codigo=1, mensagem='mensagem',
data_hora='2011-12-07T11:43:37', valor='10000',
)
assert 'valor precisa ser do tipo inteiro.' in str(excinfo.value)
def test_repr(self):
cancelamento = Cancelamento(
codigo=1, mensagem='mensagem', data_hora='2011-12-07T11:43:37',
valor=10000
)
self.assertEqual(
repr(cancelamento),
'<Cancelamento(codigo=1, mensagem=mensagem, data_hora=2011-12-07T11:43:37, valor=10000)>'
)
class TestErro(TestCase):
def test_validate(self):
with pytest.raises(TypeError) as excinfo:
Erro(codigo=1, mensagem='mensagem')
assert 'codigo precisa ser do tipo string.' in str(excinfo.value)
with pytest.raises(TypeError) as excinfo:
Erro(codigo='001', mensagem=1)
assert 'mensagem precisa ser do tipo string.' in str(excinfo.value)
def test_repr(self):
erro = Erro(codigo='001', mensagem='erro')
self.assertEqual(
repr(erro),
'<Erro(codigo=001, mensagem=erro)>'
)
class TestTransacao(TestCase):
def test_validate(self):
comercial = Comercial(numero=1234, chave='1234')
cartao = Cartao(
numero=1234, validade=201805, indicador=1,
codigo_seguranca=123, nome_portador='<NAME>'
)
pedido = Pedido(
numero='1234', valor=10000, moeda=986,
data_hora='2011-12-07T11:43:37',
)
pagamento = Pagamento(bandeira='visa', produto='1', parcelas=1)
autenticacao = Autenticacao(
codigo=1, mensagem='msg', data_hora='2011-12-07T11:43:37',
valor=10000, eci=7
)
autorizacao = Autorizacao(
codigo=1, mensagem='msg', data_hora='2011-12-07T11:43:37',
valor=10000, lr="01", arp=1, nsu=1
)
token = Token(codigo='codigo', status=1, numero='1234')
avs = Avs(
endereco='Rua 1', complemento='', | |
a topic distribution for "<input_data>"
Then the topic distribution is "<topic_distribution>"
And I create a local topic distribution for "<input_data>"
Then the local topic distribution is "<topic_distribution>"
"""
headers = ["data", "source_wait", "dataset_wait", "model_wait",
"source_conf", "input_data", "topic_distribution"]
examples = [
['data/spam.csv', '30', '30', '80',
'{"fields": {"000001": {"optype": "text", "term_analysis": '
'{"case_sensitive": true, "stem_words": true, '
'"use_stopwords": false, "language": "en"}}}}',
'{"Type": "ham", "Message": "Mobile call"}',
'[0.51133, 0.00388, 0.00574, 0.00388, 0.00388, 0.00388, '
'0.00388, 0.00388, 0.00388, 0.00388, 0.00388, 0.44801]'],
['data/spam.csv', '30', '30', '30',
'{"fields": {"000001": {"optype": "text", "term_analysis": '
'{"case_sensitive": true, "stem_words": true, '
'"use_stopwords": false, "language": "en"}}}}',
'{"Type": "ham", "Message": "Go until jurong point, crazy.. '
'Available only in bugis n great world la e buffet... Cine '
'there got amore wat..."}',
'[0.39188, 0.00643, 0.00264, 0.00643, 0.08112, 0.00264, '
'0.37352, 0.0115, 0.00707, 0.00327, 0.00264, 0.11086]']]
show_doc(self.test_scenario4)
for example in examples:
example = dict(zip(headers, example))
show_method(self, sys._getframe().f_code.co_name, example)
source_create.i_upload_a_file(self, example["data"])
source_create.the_source_is_finished(
self, example["source_wait"])
source_create.i_update_source_with(self, example["source_conf"])
dataset_create.i_create_a_dataset(self)
dataset_create.the_dataset_is_finished_in_less_than(
self, example["dataset_wait"])
topic_create.i_create_a_topic_model(self)
topic_create.the_topic_model_is_finished_in_less_than(
self, example["model_wait"])
prediction_compare.i_create_a_local_topic_model(self)
topic_create.i_create_a_topic_distribution(
self, example["input_data"])
prediction_compare.the_topic_distribution_is(
self, example["topic_distribution"])
topic_create.i_create_a_local_topic_distribution(
self, example["input_data"])
prediction_compare.the_local_topic_distribution_is(
self, example["topic_distribution"])
def test_scenario5(self):
"""
Scenario: Successfully comparing association sets:
Given I create a data source uploading a "<data>" file
And I wait until the source is ready less than <source_wait> secs
And I update the source with params "<source_conf>"
And I create a dataset
And I wait until the dataset is ready less than <dataset_wait> secs
And I create a model
And I wait until the association is ready less than <model_wait> secs
And I create a local association
When I create an association set for "<input_data>"
Then the association set is like the contents of "<association_set_file>"
And I create a local association set for "<input_data>"
Then the local association set is like the contents of "<association_set_file>"
"""
headers = ["data", "source_wait", "dataset_wait", "model_wait",
"source_conf", "association_set_file", "input_data"]
examples = [
['data/groceries.csv', '20', '20', '50', '{"fields": {"00000": {"optype": "text", "term_analysis": {"token_mode": "all", "language": "en"}}}}', 'data/associations/association_set.json', '{"field1": "cat food"}']]
show_doc(self.test_scenario5)
for example in examples:
example = dict(zip(headers, example))
show_method(self, sys._getframe().f_code.co_name, example)
source_create.i_upload_a_file(self, example["data"])
source_create.the_source_is_finished(self, example["source_wait"])
source_create.i_update_source_with(self, example["source_conf"])
dataset_create.i_create_a_dataset(self)
dataset_create.the_dataset_is_finished_in_less_than(
self, example["dataset_wait"])
association_create.i_create_an_association_from_dataset(self)
association_create.the_association_is_finished_in_less_than(
self, example["model_wait"])
prediction_compare.i_create_a_local_association(self)
prediction_create.i_create_an_association_set(
self, example["input_data"])
prediction_compare.the_association_set_is_like_file(
self, example["association_set_file"])
prediction_compare.i_create_a_local_association_set(
self, example["input_data"])
prediction_compare.the_local_association_set_is_like_file(
self, example["association_set_file"])
def test_scenario6(self):
"""
Scenario: Successfully comparing predictions for ensembles:
Given I create a data source uploading a "<data>" file
And I wait until the source is ready less than <source_wait> secs
And I create a dataset
And I wait until the dataset is ready less than <dataset_wait> secs
And I create an ensemble with "<model_conf>"
And I wait until the ensemble is ready less than <model_wait> secs
And I create a local ensemble
When I create a prediction for "<input_data>"
Then the prediction for "<objective_id>" is "<prediction>"
And I create a local prediction for "<input_data>"
Then the local prediction is "<prediction>"
"""
headers = ["data", "source_wait", "dataset_wait", "model_wait",
"input_data", "objective_id", "prediction", "model_conf"]
examples = [
['data/iris_unbalanced.csv', '30', '30', '120',
'{"petal width": 4}', '000004', 'Iris-virginica',
'{"boosting": {"iterations": 5}, "number_of_models": 5}'],
['data/grades.csv', '30', '30', '120', '{"Midterm": 20}',
'000005', 61.61036,
'{"boosting": {"iterations": 5}, "number_of_models": 5}']]
show_doc(self.test_scenario6)
for example in examples:
example = dict(zip(headers, example))
show_method(self, sys._getframe().f_code.co_name, example)
source_create.i_upload_a_file(
self, example["data"], shared=example["data"])
source_create.the_source_is_finished(
self, example["source_wait"], shared=example["data"])
dataset_create.i_create_a_dataset(self, shared=example["data"])
dataset_create.the_dataset_is_finished_in_less_than(
self, example["dataset_wait"], shared=example["data"])
ensemble_create.i_create_an_ensemble_with_params(
self, example["model_conf"])
ensemble_create.the_ensemble_is_finished_in_less_than(
self, example["model_wait"])
ensemble_create.create_local_ensemble(self)
prediction_create.i_create_an_ensemble_prediction(
self, example["input_data"])
prediction_create.the_prediction_is(
self, example["objective_id"], example["prediction"])
prediction_compare.i_create_a_local_ensemble_prediction(
self, example["input_data"])
prediction_compare.the_local_prediction_is(
self, example["prediction"])
def test_scenario7(self):
"""
Scenario: Successfully comparing predictions for ensembles with proportional missing strategy:
Given I create a data source uploading a "<data>" file
And I wait until the source is ready less than <source_wait> secs
And I create a dataset
And I wait until the dataset is ready less than <dataset_wait> secs
And I create an esemble with "<params>"
And I wait until the ensemble is ready less than <model_wait> secs
And I create a local ensemble
When I create a proportional missing strategy prediction for "<input_data>" with <"operating">
Then the prediction for "<objective_id>" is "<prediction>"
And the confidence for the prediction is "<confidence>"
And I create a proportional missing strategy local prediction for "<data_input>" with <"operating">
Then the local prediction is "<prediction>"
And the local prediction's confidence is "<confidence>"
"""
headers = ["data", "source_wait", "dataset_wait", "model_wait",
"input_data", "objective_id", "prediction", "confidence",
"model_conf", "operating"]
examples = [
['data/iris.csv', '30', '30', '80', '{}', '000004', 'Iris-virginica', '0.33784', '{"boosting": {"iterations": 5}}', {}],
['data/iris.csv', '30', '30', '80', '{}', '000004', 'Iris-versicolor', '0.27261', '{"number_of_models": 5"}', {"operating_kind": "confidence"}],
['data/grades.csv', '30', '30', '50', '{}', '000005', '70.505792', '30.7161', '{"number_of_models": 5}', {}]]
show_doc(self.test_scenario7)
for example in examples:
example = dict(zip(headers, example))
show_method(self, sys._getframe().f_code.co_name, example)
source_create.i_upload_a_file(
self, example["data"], shared=example["data"])
source_create.the_source_is_finished(
self, example["source_wait"], shared=example["data"])
dataset_create.i_create_a_dataset(self, shared=example["data"])
dataset_create.the_dataset_is_finished_in_less_than(
self, example["dataset_wait"], shared=example["data"])
ensemble_create.i_create_an_ensemble_with_params(
self, example["model_conf"])
ensemble_create.the_ensemble_is_finished_in_less_than(
self, example["model_wait"])
ensemble_create.create_local_ensemble(self)
prediction_create.i_create_an_ensemble_proportional_prediction(
self, example["input_data"])
prediction_create.the_prediction_is(
self, example["objective_id"], example["prediction"])
prediction_create.the_confidence_is(self, example["confidence"])
prediction_create.create_local_ensemble_proportional_prediction_with_confidence(
self, example["input_data"], example["operating"])
prediction_compare.the_local_ensemble_prediction_is(
self, example["prediction"])
prediction_compare.the_local_prediction_confidence_is(
self, example["confidence"])
def test_scenario7b(self):
"""
Scenario: Successfully comparing predictions for ensembles with proportional missing strategy:
Given I create a data source uploading a "<data>" file
And I wait until the source is ready less than <source_wait> secs
And I create a dataset
And I wait until the dataset is ready less than <dataset_wait> secs
And I create an esemble with "<model_conf>"
And I wait until the ensemble is ready less than <model_wait> secs
And I create a local ensemble
When I create a proportional missing strategy prediction for "<input_data>" with <"operating">
Then the prediction for "<objective_id>" is "<prediction>"
And the confidence for the prediction is "<confidence>"
And I create a proportional missing strategy local prediction for "<input_data>" with <"operating">
Then the local prediction is "<prediction>"
And the local prediction's confidence is "<confidence>"
"""
headers = ["data", "source_wait", "dataset_wait", "model_wait",
"input_data", "objective_id", "prediction", "confidence",
"model_conf", "operating"]
examples = [
['data/grades.csv', '30', '30', '80',
'{"Midterm": 20}', '000005', '54.82214', '25.89672',
'{"number_of_models": 5}', {"operating_kind": "confidence"}],
['data/grades.csv', '30', '30', '80', '{"Midterm": 20}',
'000005', '45.4573', '29.58403', '{"number_of_models": 5}', {}],
['data/grades.csv', '30', '30', '80',
'{"Midterm": 20, "Tutorial": 90, "TakeHome": 100}', '000005',
'42.814', '31.51804', '{"number_of_models": 5}', {}]]
show_doc(self.test_scenario7b)
for example in examples:
example = dict(zip(headers, example))
show_method(self, sys._getframe().f_code.co_name, example)
source_create.i_upload_a_file(
self, example["data"], shared=example["data"])
source_create.the_source_is_finished(
self, example["source_wait"], shared=example["data"])
dataset_create.i_create_a_dataset(self)
dataset_create.the_dataset_is_finished_in_less_than(
self, example["dataset_wait"])
ensemble_create.i_create_an_ensemble_with_params(
self, example["model_conf"])
ensemble_create.the_ensemble_is_finished_in_less_than(
self, example["model_wait"])
ensemble_create.create_local_ensemble(self)
prediction_create.i_create_an_ensemble_proportional_prediction(
self, example["input_data"], example["operating"])
prediction_create.the_prediction_is(
self, example["objective_id"], example["prediction"])
prediction_create.the_confidence_is(self, example["confidence"])
prediction_create.create_local_ensemble_proportional_prediction_with_confidence(
self, example["input_data"], example["operating"])
prediction_compare.the_local_ensemble_prediction_is(
self, example["prediction"])
prediction_compare.the_local_prediction_confidence_is(
self, example["confidence"])
def test_scenario8(self):
"""
Scenario: Successfully comparing predictions for ensembles:
Given I create a local ensemble predictor from "<directory>"
And I create a local prediction for "<input_data>"
Then the local prediction is "<prediction>"
"""
headers = ["directory", "input_data", "prediction"]
examples = [
['bigml/tests/my_ensemble', '{"petal width": 4}', 68.1258030739]]
show_doc(self.test_scenario8)
for example in examples:
example = dict(zip(headers, example))
show_method(self, sys._getframe().f_code.co_name, example)
ensemble_create.create_local_ensemble_predictor(
self, example["directory"])
prediction_compare.i_create_a_local_ensemble_prediction(
self, example["input_data"])
prediction_compare.the_local_ensemble_prediction_is(
self, example["prediction"])
def test_scenario9(self):
"""
Scenario: Successfully comparing predictions for ensembles with proportional missing strategy in a supervised model:
Given I create a data source uploading a "<data>" file
And I wait until the source is ready less than <source_wait> secs
And I create a dataset
And I wait until the dataset is ready less than <dataset_wait> secs
And I create an esemble with "<model_conf>"
And I wait until the ensemble is ready less than <model_wait> secs
And I create a local ensemble
When I create a proportional missing strategy prediction for "<input_data>" with <"operating">
Then the prediction for "<objective_id>" is "<prediction>"
And the confidence for the prediction is "<confidence>"
And I create a proportional missing strategy local prediction for "<input_data>" with <"operating">
Then the local prediction is "<prediction>"
And the local prediction's confidence is "<confidence>"
"""
headers = ["data", "source_wait", "dataset_wait", "model_wait",
"input_data", "objective_id", "prediction", "confidence",
"model_conf", "operating"]
examples = [
['data/iris.csv', '10', '10', '80', '{}', '000004', 'Iris-virginica', '0.33784', '{"boosting": {"iterations": 5}}', {}],
['data/iris.csv', '10', '10', '80', '{}', '000004', 'Iris-versicolor', '0.27261', '{"number_of_models": 5"}', {"operating_kind": "confidence"}]]
show_doc(self.test_scenario9)
for example in examples:
example = dict(zip(headers, example))
show_method(self, sys._getframe().f_code.co_name, example)
source_create.i_upload_a_file(
self, example["data"], shared=example["data"])
source_create.the_source_is_finished(
self, example["source_wait"])
dataset_create.i_create_a_dataset(self)
dataset_create.the_dataset_is_finished_in_less_than(
| |
import logging, requests, json, smtplib, datetime, os, random, sys, subprocess, traceback
from django.utils import timezone
from django.contrib.auth.models import User
from django.core.exceptions import ObjectDoesNotExist
from homeauto.models import Trigger, Nugget, Action, HouseLight, Account, Person, CustomEvent
from hue.models import Sensor, Scene, Light, Group, Schedule
from wemo.models import Device as Wemo
from decora.models import Switch
from vivint.models import Device as VivintDevice
from vivint.models import Panel
import hue.actions as HueAction
import wemo.actions as WemoAction
import decora.actions as DecoraAction
from datetime import timedelta
from django.db.models.signals import pre_save, post_save
from django.dispatch import receiver
logger = logging.getLogger(__name__)
# this is for intercepting databse updates
@receiver(post_save, sender=Person)
def register_person_event(sender, instance, **kwargs):
logger.info("Person:{"+instance.user.username+"} is home {"+str(instance.is_home)+"}")
if instance.is_home:
triggers = Trigger.objects.filter(trigger=(Trigger.PEOPLE), people_has_arrived=True, people=instance)
for t in triggers:
results = []
logger.debug('Evaluating: ' + t.name + " ("+str(t.people.count())+" people)")
if t.enabled:
if t.people.count() > 1:
for person in t.people.all():
# person = Person.objects.get(id=person.id)
if person.is_home:
logger.debug(' TEST (arrive): '+person.user.username + ' is home, matching trigger: '+t.name)
results.append(True)
else:
logger.debug(' TEST (arrive): '+person.user.username + ' is not home, not matching trigger: '+t.name)
results.append(False)
else:
results.append(True)
else:
logger.debug('Trigger:{' + t.name + '}{' + str(t.id) + '}{disabled}')
results.append(False)
logger.debug(" Results: "+str(results))
if all(results):
logger.debug('Trigger:{' + t.name + '}{' + str(t.id) + '}{fired}')
evaluate_nuggets(t.id)
else:
triggers = Trigger.objects.filter(trigger=(Trigger.PEOPLE), people_has_left=True, people=instance)
for t in triggers:
results = []
logger.debug('Evaluating: ' + t.name + " ("+str(t.people.count())+" people)")
if t.enabled:
if t.people.count() > 1:
for person in t.people.all():
# person = Person.objects.get(id=person.id)
if not person.is_home:
logger.debug(' TEST (leave): '+person.user.username + ' is not home, matching trigger: '+t.name)
results.append(True)
else:
logger.debug(' TEST (leave): '+person.user.username + ' is home, NOT matching trigger: '+t.name)
results.append(False)
else:
results.append(True)
else:
logger.debug('Trigger:{' + t.name + '}{' + str(t.id) + '}{disabled}')
results.append(False)
logger.debug(" Results: "+str(results))
if all(results):
logger.debug('Trigger:{' + t.name + '}{' + str(t.id) + '}{fired}')
evaluate_nuggets(t.id)
def register_watcher_event(event):
logger.debug(event)
if event.event_type == 'created':
logger.info('Received created event - %s.' % event.src_path)
with open(event.src_path) as f:
logger.info(subprocess.run(['cat', event.src_path], stdout=subprocess.PIPE))
if not f.readline().rstrip():
logger.error('Input file is empty')
remove_file(event.src_path)
return
with open(event.src_path) as f:
s = f.readline().rstrip().split(':')
if len(s) == 1:
try:
e = CustomEvent.objects.get(name=(s[0].lower()))
except ObjectDoesNotExist as e:
logger.error(e)
logger.error('There are no watcher events defined for: ' + s[0])
except:
logger.error("Error:"+ str(traceback.format_exc()))
else:
try:
t = Trigger.objects.get(trigger=(Trigger.CUSTOM_EVENT), event__name=(e.name))
if t.enabled:
logger.debug('Trigger:{' + t.name + '}{' + str(t.id) + '}{fired}')
eval = True
else:
logger.debug('Trigger:{' + t.name + '}{' + str(t.id) + '}{disabled}')
eval = False
except Exception as e:
try:
logger.error(e)
eval = False
finally:
e = None
del e
if eval:
evaluate_nuggets(t.id)
elif len(s) == 2:
key = s[0].lower()
value = s[1]
logger.info('Found:{' + key + '}{' + value+"}")
if key == 'arrive':
try:
p = Person.objects.get(user__username=value)
logger.debug(p.user.first_name+" was found")
if p:
p.is_home = True
p.save()
else:
logger.error('No person was found with the username: ' + str(value))
except:
logger.error("Unexpected error:"+ str(traceback.format_exc()))
elif key == 'leave':
try:
p = Person.objects.get(user__username=value)
logger.debug(p.user.first_name+" was found")
if p:
p.is_home = False
try:
p.save()
except:
logger.error("Unexpected error:"+ str(traceback.format_exc()))
else:
logger.error('No person was found with the username: ' + str(value))
except:
logger.error("Unexpected error:"+ str(traceback.format_exc()))
else:
logger.error('No action defined for key: ' + str(key))
else:
logger.error(event.src_path + ' contains invalid content: ' + str(s))
remove_file(event.src_path)
else:
logger.info('New event - %s.' % event)
logger.debug("end of register watcher event - %s" % event)
def remove_file(path):
if os.path.isfile(path):
logger.debug('removeing ' + path)
try:
os.remove(path)
except:
logger.error("Unexpected error:"+ str(sys.exc_info()[0]))
def register_motion_event(source, device_id):
if 'Hue' in source:
m = Sensor.objects.get(id=device_id)
elif 'Vivint' in source:
m = VivintDevice.objects.get(id=device_id)
logger.info('Sensor{' + source + '}{' + m.name + '}{' + str(m.id) + '}{' + m.type + '}{Active}')
try:
t = Trigger.objects.get(trigger=(Trigger.MOTION), motion_detector__source_id=device_id)
if t.enabled:
logger.debug('Trigger:{' + t.name + '}{' + str(t.id) + '}{fired}')
eval = True
else:
logger.debug('Trigger:{' + t.name + '}{' + str(t.id) + '}{disabled}')
eval = False
except:
eval = False
if eval:
evaluate_nuggets(t.id)
def register_sensor_event(source, device_id, state):
if 'Hue' in source:
m = Sensor.objects.get(id=device_id)
elif 'Vivint' in source:
m = VivintDevice.objects.get(id=device_id)
logger.info('Sensor{' + source + '}{' + m.name + '}{' + str(m.id) + '}{' + m.type + '}{' + state+'}')
try:
if state == 'Opened':
t = Trigger.objects.get(trigger=(Trigger.SENSOR_OPENED), sensor__source_id=device_id)
elif state == 'Closed':
t = Trigger.objects.get(trigger=(Trigger.SENSOR_CLOSED), sensor__source_id=device_id)
elif state == 'Locked':
t = Trigger.objects.get(trigger=(Trigger.LOCK_LOCKED), lock__source_id=device_id)
elif state == 'Unlocked':
t = Trigger.objects.get(trigger=(Trigger.LOCK_UNLOCKED), lock__source_id=device_id)
else:
logger.error('Sensor has an unknown state of ' + state)
if t.enabled:
logger.debug('Trigger:{' + t.name + '}{' + str(t.id) + '}{fired}')
eval = True
else:
logger.debug('Trigger:{' + t.name + '}{' + str(t.id) + '}{disabled}')
eval = False
except:
eval = False
if eval:
evaluate_nuggets(t.id)
def check_security_trigger(t):
if t.enabled:
if t.security_armed_to:
logger.debug("Looking for triggers with security_armed_to flagged")
if trigger.people__user__first_name == who[0]:
logger.debug('Trigger:{' + t.name + '}{' + str(t.id) + '}{fired}')
evaluate_nuggets(t.id)
if t.security_changed_to:
logger.debug("Looking for triggers with security_changed_to flagged")
logger.debug('Trigger:{' + t.name + '}{' + str(t.id) + '}{fired}')
evaluate_nuggets(t.id)
else:
logger.debug('Trigger:{' + t.name + '}{' + str(t.id) + '}{disabled}')
def register_security_event(the_who, state):
try:
who = the_who.split()
username = User.objects.get(first_name=(who[0]), last_name=(who[1])).username
logger.info('Security{Vivint}{' + username + '} set house to {' + state+'}')
except:
logger.info('Security{Vivint}{' + the_who + '} set house to {' + state+']')
finally:
who = the_who.split()
try:
triggers = Trigger.objects.filter(trigger=(Trigger.SECURITY_ARMED_STATE), security_armed_state=state)
except:
logger.error(sys.exc_info()[0])
else:
logger.debug('found '+str(triggers.count())+' tiggers with '+state)
if triggers.count() > 1:
for trigger in triggers:
check_security_trigger(trigger)
else:
check_security_trigger(triggers.first())
def register_hvac_event(who, what, oldValue, newValue):
try:
v = float(oldValue)
logger.info('HvacValue:{' + who + '}{' + what + '}{' + str(oldValue) + '}{' + str(newValue)+"}")
except:
logger.info('HvacStatus:{' + who + '}{' + what + '}{' + str(oldValue) + '}{' + str(newValue)+"}")
def register_time_event(t):
if t.enabled:
logger.debug('Trigger:{' + t.name + '}{' + str(t.id) + '}{fired}')
evaluate_nuggets(t.id)
else:
logger.debug('Trigger:{' + t.name + '}{' + str(t.id) + '}{disabled}')
def is_anyone_home():
p = Person.objects.all()
for person in p:
if person.is_home:
return True
logger.debug('no one is home')
return False
def is_nugget_runable(nug):
if nug.enabled:
if not nug.only_execute_if_someone_is_home:
return True
if is_anyone_home():
return True
else:
logger.debug(nug.name + ' is disabled')
return False
def evaluate_nuggets(t_id):
nugs = Nugget.objects.filter(triggers=t_id)
for nug in nugs:
if is_nugget_runable(nug):
triggers = nug.triggers.all()
results = []
logger.debug('Evaluating: ' + nug.name + " ("+str(len(triggers))+" triggers)")
for t in triggers:
if t.id == t_id:
logger.debug(' TEST: ' + t.name + ' ' + str(t.id) + ' True')
results.append(True)
else:
if t.trigger == t.MOTION:
if t.motion_detector.source == 0:
try:
state = VivintDevice.objects.get(id=(t.motion_detector.source_id)).state
except ObjectDoesNotExist as e:
logger.error(e)
except:
logger.error("Error:"+ str(traceback.format_exc()))
else:
if state == 'Open':
state = True
elif state == 'Closed':
state = False
else:
state = False
results.append(state)
logger.debug(' TEST: ' + nug.name + ':Vivint:' + t.motion_detector.name + ' state ' + str(state))
elif t.motion_detector.source == 1:
try:
state = Sensor.objects.get(id=(t.motion_detector.source_id)).presence
except ObjectDoesNotExist as e:
logger.error(e)
except:
logger.error("Error:"+ str(traceback.format_exc()))
else:
logger.debug(' TEST: ' + nug.name + ':Hue:' + t.motion_detector.name + ' state ' + str(state))
results.append(state)
else:
logger.warning('There is no motion state lookup for source ' + str(t.motion_detector_id__source))
results.append(False)
elif t.trigger == t.WINDOW:
if t.window_start <= timezone.localtime().time() <= t.window_end:
logger.debug(" TEST: "+t.name + ' timeframe state True')
results.append(True)
else:
logger.debug(" TEST: "+t.name + ' timeframe state False')
results.append(False)
elif t.trigger == t.SCHEDULE:
logger.error('code has not been written for SCHEDULE trigger')
results.append(False)
elif t.trigger == t.SENSOR_OPENED:
logger.error('code has not been written for SENSOR_OPENED trigger')
results.append(False)
elif t.trigger == t.SENSOR_CLOSED:
logger.error('code has not been written for SENSOR_CLOSED trigger')
results.append(False)
elif t.trigger == t.LOCK_UNLOCKED:
logger.error('code has not been written for LOCK_UNLOCKED trigger')
results.append(False)
elif t.trigger == t.LOCK_LOCKED:
logger.error('code has not been written for LOCK_LOCKED trigger')
results.append(False)
elif t.trigger == t.HVAC_ACTIVITY:
logger.error('code has not been written for HVAC_ACTIVITY trigger')
results.append(False)
elif t.trigger == t.HVAC_FAN:
logger.error('code has not been written for HVAC_FAN trigger')
results.append(False)
elif t.trigger == t.HVAC_HITS_TEMP:
logger.error('code has not been written for HVAC_HITS_TEMP trigger')
results.append(False)
elif t.trigger == t.HVAC_HOLD:
logger.error('code has not been written for HVAC_HOLD trigger')
results.append(False)
elif t.trigger == t.HVAC_HEATMODE:
logger.error('code has not been written for HVAC_HEATMODE trigger')
results.append(False)
elif t.trigger == t.HVAC_FILTRLVL:
logger.error('code has not been written for HVAC_FILTRLVL trigger')
results.append(False)
elif t.trigger == t.HVAC_HUMLVL:
logger.error('code has not been written for HVAC_HUMLVL trigger')
results.append(False)
elif t.trigger == t.CUSTOM_EVENT:
logger.error('code has not been written for CUSTOM_EVENT trigger')
results.append(False)
elif t.trigger == t.SECURITY_ARMED_STATE:
try:
state = Panel.objects.get(id=(t.security_panel.id)).armed_state
except ObjectDoesNotExist as e:
logger.error(e)
except:
logger.error("Error:"+ | |
None:
stream_retval = RewriteRuleSubtreeStream(self._adaptor, "rule retval", retval.tree)
else:
stream_retval = RewriteRuleSubtreeStream(self._adaptor, "token retval", None)
root_0 = self._adaptor.nil()
# 218:36: -> set_func
self._adaptor.addChild(root_0, stream_set_func.nextTree())
retval.tree = root_0
retval.stop = self.input.LT(-1)
retval.tree = self._adaptor.rulePostProcessing(root_0)
self._adaptor.setTokenBoundaries(retval.tree, retval.start, retval.stop)
except RecognitionException as re:
self.reportError(re)
self.recover(self.input, re)
retval.tree = self._adaptor.errorNode(self.input, retval.start, self.input.LT(-1), re)
finally:
pass
return retval
# $ANTLR end "set_block"
class set_func_return(ParserRuleReturnScope):
def __init__(self):
super().__init__()
self.tree = None
# $ANTLR start "set_func"
# /home/tux/PycharmProjects/ProtoGen_public/Parser/ProtoCC.g:219:5: set_func : ID DOT set_function_types OBRACE ( set_nest )* CBRACE -> ^( SETFUNC_ ID DOT set_function_types OBRACE ( set_nest )* CBRACE ) ;
def set_func(self, ):
retval = self.set_func_return()
retval.start = self.input.LT(1)
root_0 = None
ID134 = None
DOT135 = None
OBRACE137 = None
CBRACE139 = None
set_function_types136 = None
set_nest138 = None
ID134_tree = None
DOT135_tree = None
OBRACE137_tree = None
CBRACE139_tree = None
stream_OBRACE = RewriteRuleTokenStream(self._adaptor, "token OBRACE")
stream_DOT = RewriteRuleTokenStream(self._adaptor, "token DOT")
stream_ID = RewriteRuleTokenStream(self._adaptor, "token ID")
stream_CBRACE = RewriteRuleTokenStream(self._adaptor, "token CBRACE")
stream_set_function_types = RewriteRuleSubtreeStream(self._adaptor, "rule set_function_types")
stream_set_nest = RewriteRuleSubtreeStream(self._adaptor, "rule set_nest")
try:
try:
# /home/tux/PycharmProjects/ProtoGen_public/Parser/ProtoCC.g:219:14: ( ID DOT set_function_types OBRACE ( set_nest )* CBRACE -> ^( SETFUNC_ ID DOT set_function_types OBRACE ( set_nest )* CBRACE ) )
# /home/tux/PycharmProjects/ProtoGen_public/Parser/ProtoCC.g:219:16: ID DOT set_function_types OBRACE ( set_nest )* CBRACE
pass
ID134 = self.match(self.input, ID, self.FOLLOW_ID_in_set_func1829)
stream_ID.add(ID134)
DOT135 = self.match(self.input, DOT, self.FOLLOW_DOT_in_set_func1831)
stream_DOT.add(DOT135)
self._state.following.append(self.FOLLOW_set_function_types_in_set_func1833)
set_function_types136 = self.set_function_types()
self._state.following.pop()
stream_set_function_types.add(set_function_types136.tree)
OBRACE137 = self.match(self.input, OBRACE, self.FOLLOW_OBRACE_in_set_func1835)
stream_OBRACE.add(OBRACE137)
# /home/tux/PycharmProjects/ProtoGen_public/Parser/ProtoCC.g:219:49: ( set_nest )*
while True: #loop21
alt21 = 2
LA21_0 = self.input.LA(1)
if (LA21_0 == ID) :
alt21 = 1
if alt21 == 1:
# /home/tux/PycharmProjects/ProtoGen_public/Parser/ProtoCC.g:219:49: set_nest
pass
self._state.following.append(self.FOLLOW_set_nest_in_set_func1837)
set_nest138 = self.set_nest()
self._state.following.pop()
stream_set_nest.add(set_nest138.tree)
else:
break #loop21
CBRACE139 = self.match(self.input, CBRACE, self.FOLLOW_CBRACE_in_set_func1840)
stream_CBRACE.add(CBRACE139)
# AST Rewrite
# elements: ID, DOT, set_function_types, OBRACE, set_nest, CBRACE
# token labels:
# rule labels: retval
# token list labels:
# rule list labels:
# wildcard labels:
retval.tree = root_0
if retval is not None:
stream_retval = RewriteRuleSubtreeStream(self._adaptor, "rule retval", retval.tree)
else:
stream_retval = RewriteRuleSubtreeStream(self._adaptor, "token retval", None)
root_0 = self._adaptor.nil()
# 219:66: -> ^( SETFUNC_ ID DOT set_function_types OBRACE ( set_nest )* CBRACE )
# /home/tux/PycharmProjects/ProtoGen_public/Parser/ProtoCC.g:220:9: ^( SETFUNC_ ID DOT set_function_types OBRACE ( set_nest )* CBRACE )
root_1 = self._adaptor.nil()
root_1 = self._adaptor.becomeRoot(
self._adaptor.createFromType(SETFUNC_, "SETFUNC_")
, root_1)
self._adaptor.addChild(root_1,
stream_ID.nextNode()
)
self._adaptor.addChild(root_1,
stream_DOT.nextNode()
)
self._adaptor.addChild(root_1, stream_set_function_types.nextTree())
self._adaptor.addChild(root_1,
stream_OBRACE.nextNode()
)
# /home/tux/PycharmProjects/ProtoGen_public/Parser/ProtoCC.g:220:53: ( set_nest )*
while stream_set_nest.hasNext():
self._adaptor.addChild(root_1, stream_set_nest.nextTree())
stream_set_nest.reset();
self._adaptor.addChild(root_1,
stream_CBRACE.nextNode()
)
self._adaptor.addChild(root_0, root_1)
retval.tree = root_0
retval.stop = self.input.LT(-1)
retval.tree = self._adaptor.rulePostProcessing(root_0)
self._adaptor.setTokenBoundaries(retval.tree, retval.start, retval.stop)
except RecognitionException as re:
self.reportError(re)
self.recover(self.input, re)
retval.tree = self._adaptor.errorNode(self.input, retval.start, self.input.LT(-1), re)
finally:
pass
return retval
# $ANTLR end "set_func"
class set_nest_return(ParserRuleReturnScope):
def __init__(self):
super().__init__()
self.tree = None
# $ANTLR start "set_nest"
# /home/tux/PycharmProjects/ProtoGen_public/Parser/ProtoCC.g:221:5: set_nest : ( set_func | object_expr );
def set_nest(self, ):
retval = self.set_nest_return()
retval.start = self.input.LT(1)
root_0 = None
set_func140 = None
object_expr141 = None
try:
try:
# /home/tux/PycharmProjects/ProtoGen_public/Parser/ProtoCC.g:221:14: ( set_func | object_expr )
alt22 = 2
LA22_0 = self.input.LA(1)
if (LA22_0 == ID) :
LA22_1 = self.input.LA(2)
if (LA22_1 == DOT) :
LA22_2 = self.input.LA(3)
if ((96 <= LA22_2 <= 100) or LA22_2 in {}) :
alt22 = 1
elif (LA22_2 in {ID, NID}) :
alt22 = 2
else:
nvae = NoViableAltException("", 22, 2, self.input)
raise nvae
elif (LA22_1 in {CBRACE, ID}) :
alt22 = 2
else:
nvae = NoViableAltException("", 22, 1, self.input)
raise nvae
else:
nvae = NoViableAltException("", 22, 0, self.input)
raise nvae
if alt22 == 1:
# /home/tux/PycharmProjects/ProtoGen_public/Parser/ProtoCC.g:221:16: set_func
pass
root_0 = self._adaptor.nil()
self._state.following.append(self.FOLLOW_set_func_in_set_nest1878)
set_func140 = self.set_func()
self._state.following.pop()
self._adaptor.addChild(root_0, set_func140.tree)
elif alt22 == 2:
# /home/tux/PycharmProjects/ProtoGen_public/Parser/ProtoCC.g:221:27: object_expr
pass
root_0 = self._adaptor.nil()
self._state.following.append(self.FOLLOW_object_expr_in_set_nest1882)
object_expr141 = self.object_expr()
self._state.following.pop()
self._adaptor.addChild(root_0, object_expr141.tree)
retval.stop = self.input.LT(-1)
retval.tree = self._adaptor.rulePostProcessing(root_0)
self._adaptor.setTokenBoundaries(retval.tree, retval.start, retval.stop)
except RecognitionException as re:
self.reportError(re)
self.recover(self.input, re)
retval.tree = self._adaptor.errorNode(self.input, retval.start, self.input.LT(-1), re)
finally:
pass
return retval
# $ANTLR end "set_nest"
class arch_block_return(ParserRuleReturnScope):
def __init__(self):
super().__init__()
self.tree = None
# $ANTLR start "arch_block"
# /home/tux/PycharmProjects/ProtoGen_public/Parser/ProtoCC.g:224:1: arch_block : ARCH ID OCBRACE arch_body CCBRACE -> ^( ARCH_ ^( MACHN_ ID ) arch_body ) ;
def arch_block(self, ):
retval = self.arch_block_return()
retval.start = self.input.LT(1)
root_0 = None
ARCH142 = None
ID143 = None
OCBRACE144 = None
CCBRACE146 = None
arch_body145 = None
ARCH142_tree = None
ID143_tree = None
OCBRACE144_tree = None
CCBRACE146_tree = None
stream_OCBRACE = RewriteRuleTokenStream(self._adaptor, "token OCBRACE")
stream_ARCH = RewriteRuleTokenStream(self._adaptor, "token ARCH")
stream_ID = RewriteRuleTokenStream(self._adaptor, "token ID")
stream_CCBRACE = RewriteRuleTokenStream(self._adaptor, "token CCBRACE")
stream_arch_body = RewriteRuleSubtreeStream(self._adaptor, "rule arch_body")
try:
try:
# /home/tux/PycharmProjects/ProtoGen_public/Parser/ProtoCC.g:224:12: ( ARCH ID OCBRACE arch_body CCBRACE -> ^( ARCH_ ^( MACHN_ ID ) arch_body ) )
# /home/tux/PycharmProjects/ProtoGen_public/Parser/ProtoCC.g:224:14: ARCH ID OCBRACE arch_body CCBRACE
pass
ARCH142 = self.match(self.input, ARCH, self.FOLLOW_ARCH_in_arch_block1892)
stream_ARCH.add(ARCH142)
ID143 = self.match(self.input, ID, self.FOLLOW_ID_in_arch_block1894)
stream_ID.add(ID143)
OCBRACE144 = self.match(self.input, OCBRACE, self.FOLLOW_OCBRACE_in_arch_block1896)
stream_OCBRACE.add(OCBRACE144)
self._state.following.append(self.FOLLOW_arch_body_in_arch_block1898)
arch_body145 = self.arch_body()
self._state.following.pop()
stream_arch_body.add(arch_body145.tree)
CCBRACE146 = self.match(self.input, CCBRACE, self.FOLLOW_CCBRACE_in_arch_block1900)
stream_CCBRACE.add(CCBRACE146)
# AST Rewrite
# elements: ID, arch_body
# token labels:
# rule labels: retval
# token list labels:
# rule list labels:
# wildcard labels:
retval.tree = root_0
if retval is not None:
stream_retval = RewriteRuleSubtreeStream(self._adaptor, "rule retval", retval.tree)
else:
stream_retval = RewriteRuleSubtreeStream(self._adaptor, "token retval", None)
root_0 = self._adaptor.nil()
# 224:48: -> ^( ARCH_ ^( MACHN_ ID ) arch_body )
# /home/tux/PycharmProjects/ProtoGen_public/Parser/ProtoCC.g:224:51: ^( ARCH_ ^( MACHN_ ID ) arch_body )
root_1 = self._adaptor.nil()
root_1 = self._adaptor.becomeRoot(
self._adaptor.createFromType(ARCH_, "ARCH_")
, root_1)
# /home/tux/PycharmProjects/ProtoGen_public/Parser/ProtoCC.g:224:59: ^( MACHN_ ID )
root_2 = self._adaptor.nil()
root_2 = self._adaptor.becomeRoot(
self._adaptor.createFromType(MACHN_, "MACHN_")
, root_2)
self._adaptor.addChild(root_2,
stream_ID.nextNode()
)
self._adaptor.addChild(root_1, root_2)
self._adaptor.addChild(root_1, stream_arch_body.nextTree())
self._adaptor.addChild(root_0, root_1)
retval.tree = root_0
retval.stop = self.input.LT(-1)
retval.tree = self._adaptor.rulePostProcessing(root_0)
self._adaptor.setTokenBoundaries(retval.tree, retval.start, retval.stop)
except RecognitionException as re:
self.reportError(re)
self.recover(self.input, re)
retval.tree = self._adaptor.errorNode(self.input, retval.start, self.input.LT(-1), re)
finally:
pass
return retval
# $ANTLR end "arch_block"
class arch_body_return(ParserRuleReturnScope):
def __init__(self):
super().__init__()
self.tree = None
# $ANTLR start "arch_body"
# /home/tux/PycharmProjects/ProtoGen_public/Parser/ProtoCC.g:226:1: arch_body : ( stable_def | process_block )* ;
def arch_body(self, ):
retval = self.arch_body_return()
retval.start = self.input.LT(1)
root_0 = None
stable_def147 = None
process_block148 = None
try:
try:
# /home/tux/PycharmProjects/ProtoGen_public/Parser/ProtoCC.g:226:10: ( ( stable_def | process_block )* )
# /home/tux/PycharmProjects/ProtoGen_public/Parser/ProtoCC.g:226:12: ( stable_def | process_block )*
pass
root_0 = self._adaptor.nil()
# /home/tux/PycharmProjects/ProtoGen_public/Parser/ProtoCC.g:226:12: ( stable_def | process_block )*
while True: #loop23
alt23 = 3
LA23_0 = self.input.LA(1)
if (LA23_0 == STABLE) :
alt23 = 1
elif (LA23_0 == PROC) :
alt23 = 2
if alt23 == 1:
# /home/tux/PycharmProjects/ProtoGen_public/Parser/ProtoCC.g:226:13: stable_def
pass
self._state.following.append(self.FOLLOW_stable_def_in_arch_body1922)
stable_def147 = self.stable_def()
self._state.following.pop()
self._adaptor.addChild(root_0, stable_def147.tree)
elif alt23 == 2:
# /home/tux/PycharmProjects/ProtoGen_public/Parser/ProtoCC.g:226:26: process_block
pass
self._state.following.append(self.FOLLOW_process_block_in_arch_body1926)
process_block148 = self.process_block()
self._state.following.pop()
self._adaptor.addChild(root_0, process_block148.tree)
else:
break #loop23
retval.stop = self.input.LT(-1)
retval.tree = self._adaptor.rulePostProcessing(root_0)
self._adaptor.setTokenBoundaries(retval.tree, retval.start, retval.stop)
except RecognitionException as re:
self.reportError(re)
self.recover(self.input, re)
retval.tree = self._adaptor.errorNode(self.input, retval.start, self.input.LT(-1), re)
finally:
pass
return retval
# $ANTLR end "arch_body"
class stable_def_return(ParserRuleReturnScope):
def __init__(self):
super().__init__()
self.tree = None
# $ANTLR start "stable_def"
# /home/tux/PycharmProjects/ProtoGen_public/Parser/ProtoCC.g:228:1: stable_def : STABLE OCBRACE ID ( COMMA ID )* CCBRACE -> ^( STABLE_ ID ( ID )* ) ;
def stable_def(self, ):
retval = self.stable_def_return()
retval.start = self.input.LT(1)
root_0 = None
STABLE149 = None
OCBRACE150 = None
ID151 = None
COMMA152 = None
ID153 = None
CCBRACE154 = None
STABLE149_tree = None
OCBRACE150_tree = None
ID151_tree = None
COMMA152_tree = None
ID153_tree = None
CCBRACE154_tree = None
stream_COMMA = RewriteRuleTokenStream(self._adaptor, "token COMMA")
stream_OCBRACE = RewriteRuleTokenStream(self._adaptor, "token OCBRACE")
stream_STABLE = RewriteRuleTokenStream(self._adaptor, "token STABLE")
stream_ID = RewriteRuleTokenStream(self._adaptor, "token ID")
stream_CCBRACE = RewriteRuleTokenStream(self._adaptor, "token CCBRACE")
try:
try:
# /home/tux/PycharmProjects/ProtoGen_public/Parser/ProtoCC.g:228:12: ( STABLE OCBRACE ID ( COMMA ID )* CCBRACE -> ^( STABLE_ ID ( ID )* ) )
# /home/tux/PycharmProjects/ProtoGen_public/Parser/ProtoCC.g:228:14: STABLE OCBRACE ID ( COMMA ID )* CCBRACE
pass
STABLE149 = self.match(self.input, STABLE, self.FOLLOW_STABLE_in_stable_def1936)
stream_STABLE.add(STABLE149)
OCBRACE150 = self.match(self.input, OCBRACE, self.FOLLOW_OCBRACE_in_stable_def1938)
stream_OCBRACE.add(OCBRACE150)
ID151 = self.match(self.input, ID, self.FOLLOW_ID_in_stable_def1940)
stream_ID.add(ID151)
# /home/tux/PycharmProjects/ProtoGen_public/Parser/ProtoCC.g:228:32: ( COMMA ID )*
while True: #loop24
alt24 = 2
LA24_0 = self.input.LA(1)
if (LA24_0 == COMMA) :
alt24 = 1
if alt24 == 1:
| |
'Renaming Unit/Int Front End RAT/Subthreshold Leakage with power gating': 0.00248228,
'Renaming Unit/Peak Dynamic': 3.58947,
'Renaming Unit/Runtime Dynamic': 0.337767,
'Renaming Unit/Subthreshold Leakage': 0.0552466,
'Renaming Unit/Subthreshold Leakage with power gating': 0.0276461,
'Runtime Dynamic': 2.80515,
'Subthreshold Leakage': 6.16288,
'Subthreshold Leakage with power gating': 2.55328},
{'Area': 32.0201,
'Execution Unit/Area': 7.68434,
'Execution Unit/Complex ALUs/Area': 0.235435,
'Execution Unit/Complex ALUs/Gate Leakage': 0.0132646,
'Execution Unit/Complex ALUs/Peak Dynamic': 5.19579e-05,
'Execution Unit/Complex ALUs/Runtime Dynamic': 0.20273,
'Execution Unit/Complex ALUs/Subthreshold Leakage': 0.20111,
'Execution Unit/Complex ALUs/Subthreshold Leakage with power gating': 0.0754163,
'Execution Unit/Floating Point Units/Area': 4.6585,
'Execution Unit/Floating Point Units/Gate Leakage': 0.0656156,
'Execution Unit/Floating Point Units/Peak Dynamic': 0.00072612,
'Execution Unit/Floating Point Units/Runtime Dynamic': 0.304033,
'Execution Unit/Floating Point Units/Subthreshold Leakage': 0.994829,
'Execution Unit/Floating Point Units/Subthreshold Leakage with power gating': 0.373061,
'Execution Unit/Gate Leakage': 0.120359,
'Execution Unit/Instruction Scheduler/Area': 1.66526,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Area': 0.275653,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Gate Leakage': 0.000977433,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Peak Dynamic': 1.04181,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Runtime Dynamic': 0.0580421,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Subthreshold Leakage': 0.0143453,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Subthreshold Leakage with power gating': 0.00810519,
'Execution Unit/Instruction Scheduler/Gate Leakage': 0.00568913,
'Execution Unit/Instruction Scheduler/Instruction Window/Area': 0.805223,
'Execution Unit/Instruction Scheduler/Instruction Window/Gate Leakage': 0.00414562,
'Execution Unit/Instruction Scheduler/Instruction Window/Peak Dynamic': 1.6763,
'Execution Unit/Instruction Scheduler/Instruction Window/Runtime Dynamic': 0.0936197,
'Execution Unit/Instruction Scheduler/Instruction Window/Subthreshold Leakage': 0.0625755,
'Execution Unit/Instruction Scheduler/Instruction Window/Subthreshold Leakage with power gating': 0.0355964,
'Execution Unit/Instruction Scheduler/Peak Dynamic': 3.82262,
'Execution Unit/Instruction Scheduler/ROB/Area': 0.584388,
'Execution Unit/Instruction Scheduler/ROB/Gate Leakage': 0.00056608,
'Execution Unit/Instruction Scheduler/ROB/Peak Dynamic': 1.10451,
'Execution Unit/Instruction Scheduler/ROB/Runtime Dynamic': 0.0472561,
'Execution Unit/Instruction Scheduler/ROB/Subthreshold Leakage': 0.00906853,
'Execution Unit/Instruction Scheduler/ROB/Subthreshold Leakage with power gating': 0.00364446,
'Execution Unit/Instruction Scheduler/Runtime Dynamic': 0.198918,
'Execution Unit/Instruction Scheduler/Subthreshold Leakage': 0.0859892,
'Execution Unit/Instruction Scheduler/Subthreshold Leakage with power gating': 0.047346,
'Execution Unit/Integer ALUs/Area': 0.47087,
'Execution Unit/Integer ALUs/Gate Leakage': 0.0265291,
'Execution Unit/Integer ALUs/Peak Dynamic': 0.0662712,
'Execution Unit/Integer ALUs/Runtime Dynamic': 0.101344,
'Execution Unit/Integer ALUs/Subthreshold Leakage': 0.40222,
'Execution Unit/Integer ALUs/Subthreshold Leakage with power gating': 0.150833,
'Execution Unit/Peak Dynamic': 3.95341,
'Execution Unit/Register Files/Area': 0.570804,
'Execution Unit/Register Files/Floating Point RF/Area': 0.208131,
'Execution Unit/Register Files/Floating Point RF/Gate Leakage': 0.000232788,
'Execution Unit/Register Files/Floating Point RF/Peak Dynamic': 0.00013718,
'Execution Unit/Register Files/Floating Point RF/Runtime Dynamic': 0.00243454,
'Execution Unit/Register Files/Floating Point RF/Subthreshold Leakage': 0.00399698,
'Execution Unit/Register Files/Floating Point RF/Subthreshold Leakage with power gating': 0.00176968,
'Execution Unit/Register Files/Gate Leakage': 0.000622708,
'Execution Unit/Register Files/Integer RF/Area': 0.362673,
'Execution Unit/Register Files/Integer RF/Gate Leakage': 0.00038992,
'Execution Unit/Register Files/Integer RF/Peak Dynamic': 0.017606,
'Execution Unit/Register Files/Integer RF/Runtime Dynamic': 0.0180049,
'Execution Unit/Register Files/Integer RF/Subthreshold Leakage': 0.00614175,
'Execution Unit/Register Files/Integer RF/Subthreshold Leakage with power gating': 0.00246675,
'Execution Unit/Register Files/Peak Dynamic': 0.0177432,
'Execution Unit/Register Files/Runtime Dynamic': 0.0204395,
'Execution Unit/Register Files/Subthreshold Leakage': 0.0101387,
'Execution Unit/Register Files/Subthreshold Leakage with power gating': 0.00423643,
'Execution Unit/Results Broadcast Bus/Area Overhead': 0.0390912,
'Execution Unit/Results Broadcast Bus/Gate Leakage': 0.00537402,
'Execution Unit/Results Broadcast Bus/Peak Dynamic': 0.0371038,
'Execution Unit/Results Broadcast Bus/Runtime Dynamic': 0.0973803,
'Execution Unit/Results Broadcast Bus/Subthreshold Leakage': 0.081478,
'Execution Unit/Results Broadcast Bus/Subthreshold Leakage with power gating': 0.0305543,
'Execution Unit/Runtime Dynamic': 0.924844,
'Execution Unit/Subthreshold Leakage': 1.79543,
'Execution Unit/Subthreshold Leakage with power gating': 0.688821,
'Gate Leakage': 0.368936,
'Instruction Fetch Unit/Area': 5.85939,
'Instruction Fetch Unit/Branch Predictor/Area': 0.138516,
'Instruction Fetch Unit/Branch Predictor/Chooser/Area': 0.0435221,
'Instruction Fetch Unit/Branch Predictor/Chooser/Gate Leakage': 0.000278362,
'Instruction Fetch Unit/Branch Predictor/Chooser/Peak Dynamic': 0.0168831,
'Instruction Fetch Unit/Branch Predictor/Chooser/Runtime Dynamic': 0.000845929,
'Instruction Fetch Unit/Branch Predictor/Chooser/Subthreshold Leakage': 0.00759719,
'Instruction Fetch Unit/Branch Predictor/Chooser/Subthreshold Leakage with power gating': 0.0039236,
'Instruction Fetch Unit/Branch Predictor/Gate Leakage': 0.000757657,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Area': 0.0435221,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Gate Leakage': 0.000278362,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Peak Dynamic': 0.0168831,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Runtime Dynamic': 0.000845929,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Subthreshold Leakage': 0.00759719,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Subthreshold Leakage with power gating': 0.0039236,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Area': 0.0257064,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Gate Leakage': 0.000154548,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Peak Dynamic': 0.0142575,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Runtime Dynamic': 0.0007604,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Subthreshold Leakage': 0.00384344,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Subthreshold Leakage with power gating': 0.00198631,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Area': 0.0151917,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Gate Leakage': 8.00196e-05,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Peak Dynamic': 0.00527447,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Runtime Dynamic': 0.000307269,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Subthreshold Leakage': 0.00181347,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Subthreshold Leakage with power gating': 0.000957045,
'Instruction Fetch Unit/Branch Predictor/Peak Dynamic': 0.0597838,
'Instruction Fetch Unit/Branch Predictor/RAS/Area': 0.0105732,
'Instruction Fetch Unit/Branch Predictor/RAS/Gate Leakage': 4.63858e-05,
'Instruction Fetch Unit/Branch Predictor/RAS/Peak Dynamic': 0.0117602,
'Instruction Fetch Unit/Branch Predictor/RAS/Runtime Dynamic': 0.000258642,
'Instruction Fetch Unit/Branch Predictor/RAS/Subthreshold Leakage': 0.000932505,
'Instruction Fetch Unit/Branch Predictor/RAS/Subthreshold Leakage with power gating': 0.000494733,
'Instruction Fetch Unit/Branch Predictor/Runtime Dynamic': 0.0027109,
'Instruction Fetch Unit/Branch Predictor/Subthreshold Leakage': 0.0199703,
'Instruction Fetch Unit/Branch Predictor/Subthreshold Leakage with power gating': 0.0103282,
'Instruction Fetch Unit/Branch Target Buffer/Area': 0.64954,
'Instruction Fetch Unit/Branch Target Buffer/Gate Leakage': 0.00272758,
'Instruction Fetch Unit/Branch Target Buffer/Peak Dynamic': 0.177867,
'Instruction Fetch Unit/Branch Target Buffer/Runtime Dynamic': 0.0072676,
'Instruction Fetch Unit/Branch Target Buffer/Subthreshold Leakage': 0.0811682,
'Instruction Fetch Unit/Branch Target Buffer/Subthreshold Leakage with power gating': 0.0435357,
'Instruction Fetch Unit/Gate Leakage': 0.0589979,
'Instruction Fetch Unit/Instruction Buffer/Area': 0.0226323,
'Instruction Fetch Unit/Instruction Buffer/Gate Leakage': 6.83558e-05,
'Instruction Fetch Unit/Instruction Buffer/Peak Dynamic': 0.606827,
'Instruction Fetch Unit/Instruction Buffer/Runtime Dynamic': 0.0173086,
'Instruction Fetch Unit/Instruction Buffer/Subthreshold Leakage': 0.00151885,
'Instruction Fetch Unit/Instruction Buffer/Subthreshold Leakage with power gating': 0.000701682,
'Instruction Fetch Unit/Instruction Cache/Area': 3.14635,
'Instruction Fetch Unit/Instruction Cache/Gate Leakage': 0.029931,
'Instruction Fetch Unit/Instruction Cache/Peak Dynamic': 1.10098,
'Instruction Fetch Unit/Instruction Cache/Runtime Dynamic': 0.0680732,
'Instruction Fetch Unit/Instruction Cache/Subthreshold Leakage': 0.367022,
'Instruction Fetch Unit/Instruction Cache/Subthreshold Leakage with power gating': 0.180386,
'Instruction Fetch Unit/Instruction Decoder/Area': 1.85799,
'Instruction Fetch Unit/Instruction Decoder/Gate Leakage': 0.0222493,
'Instruction Fetch Unit/Instruction Decoder/Peak Dynamic': 1.37404,
'Instruction Fetch Unit/Instruction Decoder/Runtime Dynamic': 0.0587878,
'Instruction Fetch Unit/Instruction Decoder/Subthreshold Leakage': 0.442943,
'Instruction Fetch Unit/Instruction Decoder/Subthreshold Leakage with power gating': 0.166104,
'Instruction Fetch Unit/Peak Dynamic': 3.37292,
'Instruction Fetch Unit/Runtime Dynamic': 0.154148,
'Instruction Fetch Unit/Subthreshold Leakage': 0.932286,
'Instruction Fetch Unit/Subthreshold Leakage with power gating': 0.40843,
'L2/Area': 4.53318,
'L2/Gate Leakage': 0.015464,
'L2/Peak Dynamic': 0.0280726,
'L2/Runtime Dynamic': 0.00871061,
'L2/Subthreshold Leakage': 0.834142,
'L2/Subthreshold Leakage with power gating': 0.401066,
'Load Store Unit/Area': 8.80901,
'Load Store Unit/Data Cache/Area': 6.84535,
'Load Store Unit/Data Cache/Gate Leakage': 0.0279261,
'Load Store Unit/Data Cache/Peak Dynamic': 1.85339,
'Load Store Unit/Data Cache/Runtime Dynamic': 0.30892,
'Load Store Unit/Data Cache/Subthreshold Leakage': 0.527675,
'Load Store Unit/Data Cache/Subthreshold Leakage with power gating': 0.25085,
'Load Store Unit/Gate Leakage': 0.0350888,
'Load Store Unit/LoadQ/Area': 0.0836782,
'Load Store Unit/LoadQ/Gate Leakage': 0.00059896,
'Load Store Unit/LoadQ/Peak Dynamic': 0.0199378,
'Load Store Unit/LoadQ/Runtime Dynamic': 0.0199379,
'Load Store Unit/LoadQ/Subthreshold Leakage': 0.00941961,
'Load Store Unit/LoadQ/Subthreshold Leakage with power gating': 0.00536918,
'Load Store Unit/Peak Dynamic': 1.94754,
'Load Store Unit/Runtime Dynamic': 0.427185,
'Load Store Unit/StoreQ/Area': 0.322079,
'Load Store Unit/StoreQ/Gate Leakage': 0.00329971,
'Load Store Unit/StoreQ/Peak Dynamic': 0.0491633,
'Load Store Unit/StoreQ/Runtime Dynamic': 0.0983271,
'Load Store Unit/StoreQ/Subthreshold Leakage': 0.0345621,
'Load Store Unit/StoreQ/Subthreshold Leakage with power gating': 0.0197004,
'Load Store Unit/Subthreshold Leakage': 0.591321,
'Load Store Unit/Subthreshold Leakage with power gating': 0.283293,
'Memory Management Unit/Area': 0.4339,
'Memory Management Unit/Dtlb/Area': 0.0879726,
'Memory Management Unit/Dtlb/Gate Leakage': 0.00088729,
'Memory Management Unit/Dtlb/Peak Dynamic': 0.0174482,
'Memory Management Unit/Dtlb/Runtime Dynamic': 0.0178557,
'Memory Management Unit/Dtlb/Subthreshold Leakage': 0.0155699,
'Memory Management Unit/Dtlb/Subthreshold Leakage with power gating': 0.00887485,
'Memory Management Unit/Gate Leakage': 0.00808595,
'Memory Management Unit/Itlb/Area': 0.301552,
'Memory Management Unit/Itlb/Gate Leakage': 0.00393464,
'Memory Management Unit/Itlb/Peak Dynamic': 0.0684547,
'Memory Management Unit/Itlb/Runtime Dynamic': 0.0112016,
'Memory Management Unit/Itlb/Subthreshold Leakage': 0.0413758,
'Memory Management Unit/Itlb/Subthreshold Leakage with power gating': 0.0235842,
'Memory Management Unit/Peak Dynamic': 0.254536,
'Memory Management Unit/Runtime Dynamic': 0.0290573,
'Memory Management Unit/Subthreshold Leakage': 0.0766103,
'Memory Management Unit/Subthreshold Leakage with power gating': 0.0398333,
'Peak Dynamic': 13.146,
'Renaming Unit/Area': 0.303608,
'Renaming Unit/FP Front End RAT/Area': 0.131045,
'Renaming Unit/FP Front End RAT/Gate Leakage': 0.00351123,
'Renaming Unit/FP Front End RAT/Peak Dynamic': 2.51468,
'Renaming Unit/FP Front End RAT/Runtime Dynamic': 0.000360828,
'Renaming Unit/FP Front End RAT/Subthreshold Leakage': 0.0308571,
'Renaming Unit/FP Front End RAT/Subthreshold Leakage with power gating': 0.0175885,
'Renaming Unit/Free List/Area': 0.0340654,
'Renaming Unit/Free List/Gate Leakage': 2.5481e-05,
'Renaming Unit/Free List/Peak Dynamic': 0.0306032,
'Renaming Unit/Free List/Runtime Dynamic': 0.00262309,
'Renaming Unit/Free List/Subthreshold Leakage': 0.000370144,
'Renaming Unit/Free List/Subthreshold Leakage with power gating': 0.000201064,
'Renaming Unit/Gate Leakage': 0.00708398,
'Renaming Unit/Int Front End RAT/Area': 0.0941223,
'Renaming Unit/Int Front End RAT/Gate Leakage': 0.000283242,
'Renaming Unit/Int Front End RAT/Peak Dynamic': 0.731965,
'Renaming Unit/Int Front End RAT/Runtime Dynamic': 0.0294031,
'Renaming Unit/Int Front End RAT/Subthreshold Leakage': 0.00435488,
'Renaming | |
#!/usr/local/bin/python3
import sys
import re
import os
import copy
import pickle
if len(sys.argv) < 2:
sys.exit('./callMom.py <reference> <fasta>')
### CLASS DEFINITIONS
# Site variables: id, pos, end, ref, alt, len, out, vtype, maxlen
# create site class
class Site():
def __init__(self, a, b, c, d):
self.id = a
self.pos = int(b)
self.end = int(b)
self.ref = c
self.alt = d
self.len = 1
self.out = 0
self.vtype = ''
self.maxlen = 1
def printsite(self):
self.updatesite()
out = self.id + '\t' + str(self.pos) + '\t' + str(self.end) + '\t' + self.ref + '\t' + self.alt + '\t' + str(
self.len) + '\t' + str(self.maxlen) + '\t' + self.vtype + '\t' + str(self.out)
return out
def updatesite(self):
self.len = len(self.alt)
self.end = self.pos + self.len -1
# create object for storing all variant alleles at a position in refseq
class Var():
def __init__(self, a):
self.pos = int(a)
self.len = []
self.ref = []
self.alt = []
self.altcode = []
self.ac = []
self.vtype = []
self.maxlen = 0
self.hasvar = {}
# extend ref/alt by 1 bp upstream
def addbp(self, r):
for i in range(len(self.ref)):
self.ref[i] = r[self.pos - 1] + self.ref[i]
for i in range(len(self.alt)):
self.alt[i] = r[self.pos - 1] + self.alt[i]
self.len[i] = len(self.alt[i])
if self.len[i] > self.maxlen:
self.maxlen = self.len[i]
self.vtype = 'M'
# function for outputting a string of the object contents
# varfrqout1.write("pos\tref\talt\tlen\tac\tvtype\thas\n")
def printvar(self):
out = str(self.pos)
tref = []
for a1 in self.ref:
tref.append(a1)
out = out + '\t' + ','.join(tref)
talt = []
for a2 in self.alt:
talt.append(a2)
out = out + '\t' + ','.join(talt)
tlen = []
for a3 in self.len:
tlen.append(str(a3))
out = out + '\t' + ','.join(tlen)
tac = []
for a4 in self.ac:
tac.append(str(a4))
out = out + '\t' + ','.join(tac)
tvtype = []
for a5 in self.vtype:
tvtype.append(str(a5))
out = out + '\t' + ','.join(tvtype)
thasvar = []
done = []
for a6 in range(len(self.ref)):
for a7 in range(len(self.alt)):
k = self.ref[a6] + ':' + self.alt[a7]
if k in self.hasvar.keys() and k not in done:
thasvar.append(self.ref[a6] + ':' + self.alt[a7] + ':' + ','.join(
self.hasvar[self.ref[a6] + ':' + self.alt[a7]]))
done.append(k)
out = out + '\t' + '\t'.join(thasvar)
return out
def vcf(v, ids):
# combine genotypes into a string
gtar = []
# loop through ids
for i in ids:
hasalt = 0
# loop through alts
for a in range(len(v.alt)):
k = v.ref[0] + ':' + v.alt[a]
if i in v.hasvar[k]:
# individual has this alt allele
# add genotype code
gtcode = a + 1
gtar.append(str(gtcode))
hasalt = 1
if hasalt == 0:
gtar.append('0')
gtstr = '\t'.join(gtar)
# put together string of ac
acar = []
for a in v.ac:
acar.append(str(a))
acstr = ','.join(acar)
# compile final output string
out = '\t'.join(['MT', str(v.pos), '.', v.ref[0], ','.join(v.alt), '100', 'fa',
'VT=' + ','.join(v.vtype) + ';' + 'AC=' + acstr, 'GT', gtstr])
return out
# check for errors
def err(v1):
if len(set(v1.alt)) != len(v1.alt) or len(set(v1.ref)) != len(v1.ref):
return 1
if len(set(v1.alt)) == len(v1.alt) and len(set(v1.ref)) == len(v1.ref):
return 0
# check if two variants 1 bp away are the same indel
def samevar(v1, v2):
for vt1 in range(len(v1.vtype)):
for vt2 in range(len(v2.vtype)):
if v1.vtype[vt1] == 'I' and v2.vtype[vt2] == 'I':
return 1
if v1.vtype[vt1] == 'M' and v2.vtype[vt2] == 'M':
return 1
# lengthen snp alt when overlapping indel
def updatealt(self):
if len(self.alt) > 1:
# multiple alternate alleles
if 'I' not in set(self.vtype) and 'M' not in set(self.vtype):
# multiple SNP alleles
self.ref = list(set(self.ref))
if 'I' in set(self.vtype) or 'M' in set(self.vtype):
# contains complex variants
# get longest ref
maxlen = 0
maxref = ''
for r in self.ref:
if len(r) > maxlen:
maxref = r
maxlen = len(r)
self.maxlen = maxlen
# store the original variant
ogv = copy.deepcopy(self)
# set the new ref
self.ref = []
self.ref.append(maxref)
# loop through alts
for i in range(len(self.alt)):
# add bases to the alt for snps
if 'S' in self.vtype[i]:
j = len(self.alt[i])
while len(self.alt[i]) < len(self.ref[0]):
self.alt[i] = self.alt[i] + list(self.ref[0])[j]
j = j + 1
# add bases to the alt for mnps
if 'M' in self.vtype[i]:
j = len(self.alt[i])
while len(self.alt[i]) < len(self.ref[0]):
self.alt[i] = self.alt[i] + list(self.ref[0])[j]
j = j + 1
# add bases to the alt for indels
if 'I' in self.vtype[i]:
# no need to edit when ref len is same
if len(self.ref[0]) != len(ogv.ref[i]):
# add bases to alt
j = len(ogv.ref[i])
while j < len(self.ref[0]):
self.alt[i] = self.alt[i] + list(self.ref[0])[j]
j = j + 1
# update all the hasvar keys
temp = {}
for i in range(len(ogv.alt)):
k0 = ogv.ref[i] + ':' + ogv.alt[i]
k1 = self.ref[0] + ':' + self.alt[i]
if k1 not in temp.keys():
temp[k1] = self.hasvar[k0]
self.hasvar = temp
# sort alt alleles in order of frequency
# assign numbers to the alleles
# self.pos=int(a)
# self.len=[]
# self.ref=[]
# self.alt=[]
# self.altcode=[]
# self.ac=[]
# self.vtype=[]
# self.maxlen=0
# self.hasvar={}
def sortalt(self):
# create a duplicate variant
ogv = copy.deepcopy(self)
# only needed if n>1
if len(self.ac) > 1:
# store the unsorted list
usoac = copy.deepcopy(self.ac)
# sort the list
soac = copy.deepcopy(self.ac)
soac = sorted(soac, reverse=True)
# update the sorted list in self
self.ac.sort(reverse=True)
# reset the other items to be sorted
self.alt = []
self.len = []
self.vtype = []
# loop through new ac order
for i in range(len(soac)):
# pop the first item
ac = soac.pop(0)
# get first index of item with ac
ui = usoac.index(ac)
# clear the value
usoac[ui] = ''
# update other lists
self.alt.append(ogv.alt[ui])
self.len.append(ogv.len[ui])
self.vtype.append(ogv.vtype[ui])
### OPERATION CODE
# STEP 1
# read in reference sequence to list
refseq = ['>']
f1 = open(sys.argv[1], 'r')
for l in f1:
l = l.rstrip()
if '>' not in l:
for b in l:
refseq.append(b.upper())
f1.close()
bases = ['A', 'C', 'G', 'T']
# STEP 2
# identify all variant bases
# store in database indexed by id then position
initvar = open('cmo.0.initvar.txt', 'w')
ids = []
vars = {}
curid = ''
f2 = open(sys.argv[2], 'r')
acgtn = ['A', 'C', 'G', 'T', 'N']
indel = ['-']
fastas = {}
for l in f2:
l = l.rstrip()
# read the sequence
if '>' not in l:
seq = '>' + l
fastas[curid]=seq
# loop through fasta file
# check if variant in individual
p = 0
while p < len(seq):
# matches reference, continue
if refseq[p] == seq[p].upper():
p = p + 1
continue
# does not match reference, variant
if refseq[p] != seq[p].upper():
# record variant site
initvar.write(curid + '\t' + str(p) + '\n')
# is it a SNP or MNP?
if seq[p].upper() in acgtn:
# initalize variant record for id at position
vars[curid][p] = ''
q = p
# check all consecutive positions that differ
done = 0
while refseq[q] != seq[q].upper() and done == 0:
if seq[q].upper() in acgtn:
# store the variant position and base
vars[curid][p] = vars[curid][p] + seq[q].upper()
# go to the next base
q = q + 1
if seq[q].upper() not in acgtn:
done = 1
p = q
# found a deletion
if seq[p] in indel:
vars[curid][p] = ''
q = p
# check all consecutive positions that differ
done = 0
while refseq[q] != seq[q].upper() and done == 0:
if seq[q].upper() in indel:
# store the variant position and base
vars[curid][p] = vars[curid][p] + seq[q].upper()
q = q + 1
if seq[q].upper() not in indel:
done = 1
p = q
if seq[p] not in indel and seq[p].upper() not in acgtn:
# skip these sites
p = p + 1
# read the id
if '>' in l:
l = l.replace('>', '')
ids.append(l)
# initialize database of variants
vars[l] = {}
curid = l
f2.close()
initvar.close()
# DATA
# vars[id][position] contains list of variants for each individual
# STEP 3
# populate list of all individual variant alleles
sites = []
# loop through ids
for i in ids:
# loop through variant sites in this individual
| |
waveform_size, n_spikes = wf.shape
# upsample using cubic interpolation
x = np.linspace(0, waveform_size - 1, num=waveform_size, endpoint=True)
shifts = np.linspace(0, 1, upsample_factor, endpoint=False)
xnew = np.sort(np.reshape(x[:, np.newaxis] + shifts, -1))
wfs_upsampled = np.zeros((waveform_size * upsample_factor, n_spikes))
# compute template and interpolate it
template = np.mean(wf,axis=1)
ff = interp1d(x, template, kind='cubic')
idx_good = np.logical_and(xnew >= 0, xnew <= waveform_size - 1)
template_upsampled = ff(xnew[idx_good])
return template_upsampled
def shift_template(template_upsampled, n_shifts, window):
''' Select n_shifts version of the tempalte shifting from
-n_shifts/2 to + n_shifts/2 in the original waveform
Cat TODO: this should be done pythonically
'''
temp_array = []
for s in range(-n_shifts//2, n_shifts//2, 1):
temp_array.append(template_upsampled[template_upsampled.shape[0]//2-window+s:
template_upsampled.shape[0]//2+window+s])
return np.array(temp_array)
def return_shifts(wfs_upsampled, template_shifted, window):
shift_array = []
out_array = []
waveform_len = wfs_upsampled.shape[0]
for k in range(wfs_upsampled.shape[1]):
temp = np.matmul(wfs_upsampled[waveform_len//2-window:waveform_len//2+window,k],
template_shifted.T)
shift_array.append(np.argmax(temp))
return np.array(shift_array) #, out_array
# PCA function return PCA and reconstructed data
def PCA(X, n_components):
from sklearn import decomposition
pca = decomposition.PCA(n_components)
pca.fit(X)
X = pca.transform(X)
Y = pca.inverse_transform(X)
return X, Y, pca
def align_channelwise3(wf, upsample_factor = 20, nshifts = 7):
wf_up = upsample_resample(wf.T, upsample_factor)
wlen = wf_up.shape[1]
wf_start = int(.2 * (wlen-1))
wf_end = -int(.3 * (wlen-1))
wf_trunc = wf_up[:,wf_start:wf_end]
wlen_trunc = wf_trunc.shape[1]
#if type(ref) == 'ndarray':
# ref_upsampled = upsample_resample(ref,20)
#else:
ref_upsampled = wf_up.mean(0)
ref_shifted = np.zeros([wf_trunc.shape[1], nshifts])
for i,s in enumerate(range(-int((nshifts-1)/2), int((nshifts-1)/2+1))):
ref_shifted[:,i] = ref_upsampled[s+ wf_start: s+ wf_end]
bs_indices = np.matmul(wf_trunc[:,np.newaxis,:], ref_shifted).squeeze(1).argmax(1)
best_shifts = (np.arange(-int((nshifts-1)/2), int((nshifts-1)/2+1)))[bs_indices]
wf_final = np.zeros([wf.shape[0], (wlen-1)//2 +1])
for i,s in enumerate(best_shifts):
wf_final[i] = wf_up[i,-s+ wf_start: -s+ wf_end]
return wf_final[:,::upsample_factor]
def align_mc_templates(wf, mc, spike_padding, upsample_factor = 5,
nshifts = 15):
''' Align all waveforms to the master channel
wf = selected waveform matrix (# spikes, # samples, # featchans)
mc = maximum channel from featchans; usually first channle, i.e. 0
'''
# convert nshifts from timesamples to #of times in upsample_factor
nshifts = (nshifts*upsample_factor)
if nshifts%2==0:
nshifts+=1
# or loop over every channel and parallelize each channel:
wf_up = []
for k in range(wf.shape[2]):
#print ("aligning : ", k)
wf_up.append(upsample_resample_parallel_channel(wf[:,:,k], upsample_factor))
wf_up = np.array(wf_up).swapaxes(1,2).swapaxes(0,2)
wlen = wf_up.shape[1]
wf_start = spike_padding*upsample_factor
wf_end = -spike_padding*upsample_factor
wf_trunc = wf_up[:,wf_start:wf_end]
wlen_trunc = wf_trunc.shape[1]
ref_upsampled = wf_up[:,:,mc].mean(0)
ref_shifted = np.zeros([wf_trunc.shape[1], nshifts])
for i,s in enumerate(range(-int((nshifts-1)/2), int((nshifts-1)/2+1))):
ref_shifted[:,i] = ref_upsampled[s+ wf_start: s+ wf_end]
bs_indices = np.matmul(wf_trunc[:,np.newaxis,:, mc], ref_shifted).squeeze(1).argmax(1)
best_shifts = (np.arange(-int((nshifts-1)/2), int((nshifts-1)/2+1)))[bs_indices]
wf_final = np.zeros([wf.shape[0],wlen_trunc, wf.shape[2]])
for i,s in enumerate(best_shifts):
wf_final[i] = wf_up[i,-s+ wf_start: -s+ wf_end][:, np.arange(wf.shape[2])]
# plot original waveforms
#print ("Plotting align_mc")
#ax = plt.subplot(131)
#plt.plot(wf[:1000,:,mc].T,alpha=0.1)
## plot aligned waveforms
#ax = plt.subplot(132)
#plt.plot(wf_final[:1000,::upsample_factor,mc].T, alpha=0.1)
#plt.savefig('/media/cat/1TB/liam/49channels/data1_allset/tmp/cluster/chunk_000000/channel_31_aligning.png')
#quit()
return wf_final[:,::upsample_factor]
def align_mc_cumulative(wf, mc, CONFIG, upsample_factor = 20, nshifts = 7,
ref = None):
''' Align all waveforms to the master channel
wf = selected waveform matrix (# spikes, # samples, # featchans)
mc = maximum channel from featchans; usually first channle, i.e. 0
'''
# convert nshifts from timesamples to #of times in upsample_factor
nshifts = (nshifts*upsample_factor)
if nshifts%2==0:
nshifts+=1
# or loop over every channel and parallelize each channel:
wf_up = []
for k in range(wf.shape[2]):
#print (" upsampling chan (parallel): ", k)
wf_up.append(upsample_resample_parallel_channel(wf[:,:,k], upsample_factor))
wf_up = np.array(wf_up).swapaxes(1,2).swapaxes(0,2)
#print ('wf_upsampled: ', wf_up.shape)
wlen = wf_up.shape[1]
wf_start = int(.2 * (wlen-1))
wf_end = -int(.3 * (wlen-1))
wf_trunc = wf_up[:,wf_start:wf_end]
wlen_trunc = wf_trunc.shape[1]
if ref is not None:
ref_upsampled = upsample_resample(ref[:,np.newaxis],upsample_factor)[0]
else:
ref_upsampled = wf_up[:,:,mc].mean(0)
ref_shifted = np.zeros([wf_trunc.shape[1], nshifts])
#print (ref_shifted.shape)
for i,s in enumerate(range(-int((nshifts-1)/2), int((nshifts-1)/2+1))):
ref_shifted[:,i] = ref_upsampled[s+ wf_start: s+ wf_end]
bs_indices = np.matmul(wf_trunc[:,np.newaxis,:, mc], ref_shifted).squeeze(1).argmax(1)
best_shifts = (np.arange(-int((nshifts-1)/2), int((nshifts-1)/2+1)))[bs_indices]
wf_final = np.zeros([wf.shape[0],wlen_trunc, wf.shape[2]])
for i,s in enumerate(best_shifts):
wf_final[i] = wf_up[i,-s+ wf_start: -s+ wf_end][:, np.arange(wf.shape[2])]
return wf_final[:,::upsample_factor]
def upsample_resample(wf, upsample_factor):
waveform_len, n_spikes = wf.shape
traces = np.zeros((n_spikes, (waveform_len-1)*upsample_factor+1),'float32')
for j in range(n_spikes):
traces[j] = signal.resample(wf[:,j],(waveform_len-1)*upsample_factor+1)
return traces
def upsample_resample_parallel(wf, upsample_factor):
waveform_len, n_spikes = wf.shape
traces = np.zeros((n_spikes, (waveform_len-1)*upsample_factor+1),'float32')
for j in range(n_spikes):
traces[j] = signal.resample(wf[:,j],(waveform_len-1)*upsample_factor+1)
return traces
def upsample_resample_parallel_channel(wf, upsample_factor):
n_spikes, _ = wf.shape
# dont' parallize alignment - unless seems ok otherwise
# Cat: TODO: can we parallize recursively and to this?
wf_up = upsample_parallel(wf, upsample_factor)
return wf_up
def shift_chans(wf, best_shifts, CONFIG):
# use template feat_channel shifts to interpolate shift of all spikes on all other chans
# Cat: TODO read this from CNOFIG
upsample_factor = 5.
wf_shifted = []
all_shifts = best_shifts/upsample_factor
wfs_final=[]
for k, shift_ in enumerate(all_shifts):
if int(shift_)==shift_:
ceil = int(shift_)
temp = np.roll(wf[k],ceil,axis=0)
else:
ceil = int(math.ceil(shift_))
floor = int(math.floor(shift_))
temp = np.roll(wf[k],ceil,axis=0)*(shift_-floor)+np.roll(wf[k],floor, axis=0)*(ceil-shift_)
wfs_final.append(temp)
wf_shifted = np.array(wfs_final)
return wf_shifted
def align_get_shifts(wf, CONFIG, upsample_factor = 5, nshifts = 15):
''' Align all waveforms on a single channel
wf = selected waveform matrix (# spikes, # samples)
max_channel: is the last channel provided in wf
Returns: superresolution shifts required to align all waveforms
- used downstream for linear interpolation alignment
'''
# convert nshifts from timesamples to #of times in upsample_factor
nshifts = (nshifts*upsample_factor)
if nshifts%2==0:
nshifts+=1
# or loop over every channel and parallelize each channel:
#wf_up = []
wf_up = upsample_resample_parallel_channel(wf, upsample_factor)
wlen = wf_up.shape[1]
wf_start = int(.2 * (wlen-1))
wf_end = -int(.3 * (wlen-1))
wf_trunc = wf_up[:,wf_start:wf_end]
wlen_trunc = wf_trunc.shape[1]
# align to last chanenl which is largest amplitude channel appended
ref_upsampled = wf_up.mean(0)
ref_shifted = np.zeros([wf_trunc.shape[1], nshifts])
for i,s in enumerate(range(-int((nshifts-1)/2), int((nshifts-1)/2+1))):
ref_shifted[:,i] = ref_upsampled[s+ wf_start: s+ wf_end]
bs_indices = np.matmul(wf_trunc[:,np.newaxis], ref_shifted).squeeze(1).argmax(1)
best_shifts = (np.arange(-int((nshifts-1)/2), int((nshifts-1)/2+1)))[bs_indices]
return best_shifts
def upsample_parallel(wf, upsample_factor):
wf = wf.T
waveform_len, n_spikes = wf.shape
traces = np.zeros((n_spikes, (waveform_len-1)*upsample_factor+1),'float32')
for j in range(wf.shape[1]):
traces[j] = signal.resample(wf[:,j],(waveform_len-1)*upsample_factor+1)
return traces
def RRR3_noregress_recovery_dynamic_features(channel, current_indexes, gen, fig,
grid, x, ax_t, triageflag, alignflag, plotting, n_feat_chans,
n_dim_pca, wf_start, wf_end, mfm_threshold, CONFIG,
upsample_factor, nshifts, assignment_global, spike_index,
scale, knn_triage_threshold, deconv_flag, templates,
min_spikes_local, active_chans=None):
''' Recursive clusteringn function
channel: current channel being clusterd
wf = wf_PCA: denoised waveforms (# spikes, # time points, # chans)
sic = spike_indexes of spikes on current channel
gen = generation of cluster; increases with each clustering step
'''
# Cat: TODO read from CONFIG File
verbose=True
# load correct waveforms from disk
#wf = np.load(wf_fname)[current_indexes]
wf = wf_global[current_indexes]
# ************* CHECK SMALL CLUSTERS *************
# Exit clusters that are too small
if wf.shape[0] < CONFIG.cluster.min_spikes:
return
if verbose:
print("chan/unit "+str(channel)+' gen: '+str(gen)+' # spikes: '+
str(wf.shape[0]))
''' *************************************************
** ALIGN ALL CHANS TO MAX CHAN - LINEAR INTERP **
*************************************************
'''
# align, note: aligning all channels to max chan which is appended to the end
# note: max chan is first from feat_chans above, ensure order is preserved
if alignflag:
if verbose:
print ("chan "+str(channel)+' gen: '+str(gen)+" - aligning")
mc = wf.mean(0).ptp(0).argmax(0)
best_shifts = align_get_shifts(wf[:,:,mc], CONFIG)
wf_align = shift_chans(wf, best_shifts, CONFIG)
else:
wf_align = wf
''' *************************************************
************** ACTIVE CHAN SELECTION ************
*************************************************
'''
active_chans_flag = False
if active_chans_flag and gen == 0:
stds = np.median(np.abs(wf - np.median(wf_align, axis=0, keepdims=True)), axis=0)*1.4826
active_chans = np.where(stds.max(0) > 1.05)[0]
neighbors = n_steps_neigh_channels(CONFIG.neigh_channels, 1)
active_chans = np.hstack((active_chans, np.where(neighbors[channel])[0]))
active_chans = np.where(connected_channels(active_chans, channel, neighbors))[0]
#def plot_with_geom(data, geom, time_scale=0.5, scale=10, color='k', mark_channels=None):
# t, c = data.shape
# plt.plot(geom[:,0]+np.arange(-data.shape[0],0)[:,np.newaxis]/time_scale,
# geom[:,1] + data*scale, color=color, alpha=.8)
# if mark_channels is not None:
# plt.scatter(geom[mark_channels,0], geom[mark_channels,1], s=scale*10, color='green')
# for j in range(c):
# plt.text(geom[j,0], geom[j,1], str(j))
#chunk_dir = CONFIG.data.root_folder+"/tmp/cluster/chunk_000000"
#plt.figure(figsize=(30,20))
#plot_with_geom(stds, CONFIG.geom, time_scale=2, scale=10, color='k', mark_channels=active_chans)
#plt.savefig(chunk_dir+"/channel_{}_active_chans.png".format(channel))
else:
active_chans = np.arange(wf_align.shape[2])
# Cat: TODO: so we force all subsequent generations to use gen0 alignment
#wf=wf_align
''' ************************************************
****** FIND FEATURE CHANNELS & FEATURIZE *******
************************************************
'''
if verbose:
print("chan/unit "+str(channel)+' gen: '+str(gen)+' getting feat chans')
# Cat: TODO: is 10k spikes enough?
# Cat: TODO: what do these metrics look like for 100 spikes!?; should we simplify for | |
from __future__ import (absolute_import, division, print_function,
unicode_literals)
import copy
import six
import warnings
import logging
import numpy as np
from ..utils import (apply_to_dict_recursively, sanitize_np,
format_time as _format_time, transpose as _transpose)
from event_model import MismatchedDataKeys
logger = logging.getLogger(__name__)
# singletons defined as they are defined in pymongo
ASCENDING = 1
DESCENDING = -1
def _format_regex(d):
for k, v in six.iteritems(d):
if k == '$regex':
# format regex for monoquery
d[k] = '/{0}/'.format(v)
else:
# recurse if v is a dict
if hasattr(v, 'items'):
_format_regex(v)
class NoRunStop(Exception):
pass
class NoRunStart(Exception):
pass
class NoEventDescriptors(Exception):
pass
def doc_or_uid_to_uid(doc_or_uid):
"""Given Document or uid return the uid
Parameters
----------
doc_or_uid : dict or str
If str, then assume uid and pass through, if not, return
the 'uid' field
Returns
-------
uid : str
A string version of the uid of the given document
"""
if not isinstance(doc_or_uid, six.string_types):
doc_or_uid = doc_or_uid['uid']
return doc_or_uid
def _cache_run_start(run_start, run_start_cache):
"""Cache a RunStart document
Parameters
----------
run_start : dict
raw pymongo dictionary. This is expected to have
an entry `_id` with the ObjectId used by mongo.
run_start_cache : dict
Dict[str, Document]
Returns
-------
run_start : dict
Document instance for this RunStart document.
The ObjectId has been stripped.
"""
run_start = dict(run_start)
# TODO actually do this de-reference for documents that have it
# There is no known actually usage of this document and it is not being
# created going forward
run_start.pop('beamline_config_id', None)
# get the mongo ObjectID
oid = run_start.pop('_id', None)
run_start_cache[run_start['uid']] = run_start
run_start_cache[oid] = run_start
return run_start
def _cache_run_stop(run_stop, run_stop_cache):
"""Cache a RunStop document
Parameters
----------
run_stop : dict
raw pymongo dictionary. This is expected to have
an entry `_id` with the ObjectId used by mongo.
run_stop_cache : dict
Dict[str, Document]
Returns
-------
run_stop : dict
Document instance for this RunStop document.
The ObjectId (if it exists) has been stripped.
"""
run_stop = dict(run_stop)
# pop off the ObjectId of this document
oid = run_stop.pop('_id', None)
try:
run_stop['run_start']
except KeyError:
run_stop['run_start'] = run_stop.pop('run_start_id')
run_stop_cache[run_stop['uid']] = run_stop
# this is
if oid is not None:
run_stop_cache[oid] = run_stop
return run_stop
def _cache_descriptor(descriptor, descritor_cache):
"""De-reference and cache a RunStop document
The de-referenced Document is cached against the
ObjectId and the uid -> ObjectID mapping is stored.
Parameters
----------
descriptor : dict
raw pymongo dictionary. This is expected to have
an entry `_id` with the ObjectId used by mongo.
Returns
-------
descriptor : dict
Document instance for this EventDescriptor document.
The ObjectId has been stripped.
"""
descriptor = dict(descriptor)
# pop the ObjectID
oid = descriptor.pop('_id', None)
try:
descriptor['run_start']
except KeyError:
descriptor['run_start'] = descriptor.pop('run_start_id')
descritor_cache[descriptor['uid']] = descriptor
if oid is not None:
descritor_cache[oid] = descriptor
return descriptor
def run_start_given_uid(uid, run_start_col, run_start_cache):
"""Given a uid, return the RunStart document
Parameters
----------
uid : str
The uid
run_start_col : pymongo.Collection
The collection to search for documents
run_start_cache : MutableMapping
Mutable mapping to serve as a local cache
Returns
-------
run_start : dict
The RunStart document.
"""
try:
return run_start_cache[uid]
except KeyError:
pass
run_start = run_start_col.find_one({'uid': uid})
if run_start is None:
raise NoRunStart("No runstart with uid {!r}".format(uid))
return _cache_run_start(run_start, run_start_cache)
def run_stop_given_uid(uid, run_stop_col, run_stop_cache):
"""Given a uid, return the RunStop document
Parameters
----------
uid : str
The uid
run_stop_col : pymongo.Collection
The collection to search for documents
run_stop_cache : MutableMapping
Mutable mapping to serve as a local cache
Returns
-------
run_stop : dict
The RunStop document fully de-referenced
"""
try:
return run_stop_cache[uid]
except KeyError:
pass
# get the raw run_stop
run_stop = run_stop_col.find_one({'uid': uid})
return _cache_run_stop(run_stop, run_stop_cache)
def descriptor_given_uid(uid, descriptor_col, descriptor_cache):
"""Given a uid, return the EventDescriptor document
Parameters
----------
uid : str
The uid
descriptor_col : pymongo.Collection
The collection to search for documents
descriptor_cache : MutableMapping
Mutable mapping to serve as a local cache
Returns
-------
descriptor : dict
The EventDescriptor document fully de-referenced
"""
try:
return descriptor_cache[uid]
except KeyError:
pass
descriptor = descriptor_col.find_one({'uid': uid})
return _cache_descriptor(descriptor, descriptor_cache)
def stop_by_start(run_start, run_stop_col, run_stop_cache):
"""Given a RunStart return it's RunStop
Raises if no RunStop exists.
Parameters
----------
run_start : dict or str
The RunStart to get the RunStop for. Can be either
a Document/dict with a 'uid' key or a uid string
Returns
-------
run_stop : dict
The RunStop document
Raises
------
NoRunStop
If no RunStop document exists for the given RunStart
"""
run_start_uid = doc_or_uid_to_uid(run_start)
run_stop = run_stop_col.find_one({'run_start': run_start_uid})
if run_stop is None:
raise NoRunStop("No run stop exists for {!r}".format(run_start))
return _cache_run_stop(run_stop, run_stop_cache)
def descriptors_by_start(run_start, descriptor_col, descriptor_cache):
"""Given a RunStart return a list of it's descriptors
Raises if no EventDescriptors exist.
Parameters
----------
run_start : dict or str
The RunStart to get the EventDescriptors for. Can be either
a Document/dict with a 'uid' key or a uid string
descriptor_col
A collection we can search against
descriptor_cache : dict
Dict[str, Document]
Returns
-------
event_descriptors : list
A list of EventDescriptor documents
Raises
------
NoEventDescriptors
If no EventDescriptor documents exist for the given RunStart
"""
# normalize the input and get the run_start oid
run_start_uid = doc_or_uid_to_uid(run_start)
# query the database for any event descriptors which
# refer to the given run_start
descriptors = descriptor_col.find({'run_start': run_start_uid})
# loop over the found documents, cache, and dereference
rets = [_cache_descriptor(descriptor, descriptor_cache)
for descriptor in descriptors]
# if nothing found, raise
if not rets:
raise NoEventDescriptors("No EventDescriptors exists "
"for {!r}".format(run_start))
# return the list of event descriptors
return rets
def get_events_generator(descriptor, event_col, descriptor_col,
descriptor_cache, run_start_col,
run_start_cache, convert_arrays=True):
"""A generator which yields all events from the event stream
Parameters
----------
descriptor : dict or str
The EventDescriptor to get the Events for. Can be either
a Document/dict with a 'uid' key or a uid string
event_col
Collection we can search for events given descriptor in.
descriptor_col
Collection we can search for descriptors given a uid
descriptor_cache : dict
Dict[str, Document]
convert_arrays: boolean, optional
convert 'array' type to numpy.ndarray; True by default
Yields
------
event : dict
All events for the given EventDescriptor from oldest to
newest
"""
descriptor_uid = doc_or_uid_to_uid(descriptor)
descriptor = descriptor_given_uid(descriptor_uid, descriptor_col,
descriptor_cache)
col = event_col
ev_cur = col.find({'descriptor': descriptor_uid},
sort=[('time', ASCENDING)])
data_keys = descriptor['data_keys']
external_keys = [k for k in data_keys if 'external' in data_keys[k]]
for ev in ev_cur:
# ditch the ObjectID
ev.pop('_id', None)
ev['descriptor'] = descriptor_uid
for k, v in ev['data'].items():
try:
_dk = data_keys[k]
except KeyError as err:
raise MismatchedDataKeys(
"The documents are not valid. Either because they "
"were recorded incorrectly in the first place, "
"corrupted since, or exercising a yet-undiscovered "
"bug in a reader. event['data'].keys() "
"must equal descriptor['data_keys'].keys(). "
f"event['data'].keys(): {ev['data'].keys()}, "
"descriptor['data_keys'].keys(): "
f"{descriptor['data_keys'].keys()}") from err
# convert any arrays stored directly in mds into ndarray
if convert_arrays:
if _dk['dtype'] == 'array' and not _dk.get('external', False):
ev['data'][k] = np.asarray(ev['data'][k])
# note which keys refer to dereferences (external) data
ev['filled'] = {k: False for k in external_keys}
yield ev
def get_events_table(descriptor, event_col, descriptor_col,
descriptor_cache, run_start_col, run_start_cache):
"""All event data as tables
Parameters
----------
descriptor : dict or str
The EventDescriptor to get the Events for. Can be either
a Document/dict with a 'uid' key or a uid string
event_col
Collection we can search for events given descriptor in.
descriptor_col
Collection we can search for descriptors given a uid
descriptor_cache : dict
Dict[str, Document]
convert_arrays: boolean, optional
convert 'array' type to numpy.ndarray; True by default
Returns
-------
descriptor : dict
EventDescriptor document
data_table : dict
dict of lists of the transposed data
seq_nums : list
The sequence number of each event.
times : list
The time of each event.
uids : list
The uid of each event.
timestamps_table : dict
The timestamps of each of the measurements as dict of lists. Same
keys as `data_table`.
"""
desc_uid = doc_or_uid_to_uid(descriptor)
descriptor = descriptor_given_uid(desc_uid, descriptor_col,
descriptor_cache)
# this will get more complicated once transpose caching layer is in place
all_events = list(get_events_generator(desc_uid, event_col,
descriptor_col,
descriptor_cache,
run_start_col,
run_start_cache))
# get event sequence numbers
seq_nums = [ev['seq_num'] for ev in all_events]
# get event times
times = [ev['time'] for ev in all_events]
# get uids
uids = [ev['uid'] | |
'HLT_BTagMu_DiJet40_Mu5_v3',
'HLT_BTagMu_DiJet70_Mu5_v3',
'HLT_BTagMu_Jet300_Mu5_v3'),
Charmonium = cms.vstring('HLT_Dimuon0_Jpsi_Muon_v3',
'HLT_Dimuon0er16_Jpsi_NoOS_NoVertexing_v3',
'HLT_Dimuon0er16_Jpsi_NoVertexing_v3',
'HLT_Dimuon10_Jpsi_Barrel_v4',
'HLT_Dimuon13_PsiPrime_v3',
'HLT_Dimuon16_Jpsi_v3',
'HLT_Dimuon20_Jpsi_v3',
'HLT_Dimuon6_Jpsi_NoVertexing_v3',
'HLT_Dimuon8_PsiPrime_Barrel_v4',
'HLT_DoubleMu4_3_Bs_v4',
'HLT_DoubleMu4_3_Jpsi_Displaced_v4',
'HLT_DoubleMu4_JpsiTrk_Displaced_v4',
'HLT_DoubleMu4_PsiPrimeTrk_Displaced_v4',
'HLT_Mu7p5_L2Mu2_Jpsi_v3',
'HLT_Mu7p5_Track2_Jpsi_v3',
'HLT_Mu7p5_Track3p5_Jpsi_v3',
'HLT_Mu7p5_Track7_Jpsi_v3',
'HLT_QuadMuon0_Dimuon0_Jpsi_v3'),
Commissioning = cms.vstring('HLT_DiSC30_18_EIso_AND_HE_Mass70_v4',
'HLT_IsoTrackHB_v2',
'HLT_IsoTrackHE_v2',
'HLT_L1BeamGasMinus_v2',
'HLT_L1BeamGasPlus_v2',
'HLT_L1BptxXOR_v2',
'HLT_L1SingleMuOpen_DT_v3'),
DisplacedJet = cms.vstring('HLT_HT200_DisplacedDijet40_DisplacedTrack_v3',
'HLT_HT200_v3',
'HLT_HT250_DisplacedDijet40_DisplacedTrack_v4',
'HLT_HT275_v3',
'HLT_HT325_v3',
'HLT_HT350_DisplacedDijet40_DisplacedTrack_v4',
'HLT_HT350_DisplacedDijet40_Inclusive_v3',
'HLT_HT350_DisplacedDijet80_DisplacedTrack_v4',
'HLT_HT350_DisplacedDijet80_Tight_DisplacedTrack_v4',
'HLT_HT400_DisplacedDijet40_Inclusive_v4',
'HLT_HT425_v3',
'HLT_HT500_DisplacedDijet40_Inclusive_v4',
'HLT_HT550_DisplacedDijet40_Inclusive_v4',
'HLT_HT575_v3',
'HLT_HT650_DisplacedDijet80_Inclusive_v4',
'HLT_HT750_DisplacedDijet80_Inclusive_v4',
'HLT_VBF_DisplacedJet40_DisplacedTrack_2TrackIP2DSig5_v3',
'HLT_VBF_DisplacedJet40_DisplacedTrack_v3',
'HLT_VBF_DisplacedJet40_Hadronic_2PromptTrack_v3',
'HLT_VBF_DisplacedJet40_Hadronic_v3',
'HLT_VBF_DisplacedJet40_TightID_DisplacedTrack_v3',
'HLT_VBF_DisplacedJet40_TightID_Hadronic_v3',
'HLT_VBF_DisplacedJet40_VTightID_DisplacedTrack_v3',
'HLT_VBF_DisplacedJet40_VTightID_Hadronic_v3',
'HLT_VBF_DisplacedJet40_VVTightID_DisplacedTrack_v3',
'HLT_VBF_DisplacedJet40_VVTightID_Hadronic_v3'),
DoubleEG = cms.vstring('HLT_Diphoton30EB_18EB_R9Id_OR_IsoCaloId_AND_HE_R9Id_DoublePixelVeto_Mass55_v4',
'HLT_Diphoton30PV_18PV_R9Id_AND_IsoCaloId_AND_HE_R9Id_DoublePixelVeto_Mass55_v4',
'HLT_Diphoton30_18_R9Id_OR_IsoCaloId_AND_HE_R9Id_DoublePixelSeedMatch_Mass70_v4',
'HLT_Diphoton30_18_R9Id_OR_IsoCaloId_AND_HE_R9Id_Mass90_v4',
'HLT_Diphoton30_18_Solid_R9Id_AND_IsoCaloId_AND_HE_R9Id_Mass55_v4',
'HLT_DoubleEle24_22_eta2p1_WPLoose_Gsf_v5',
'HLT_DoubleEle33_CaloIdL_GsfTrkIdVL_MW_v6',
'HLT_DoubleEle33_CaloIdL_GsfTrkIdVL_v6',
'HLT_DoubleEle33_CaloIdL_MW_v4',
'HLT_DoubleEle33_CaloIdL_v4',
'HLT_DoubleEle37_Ele27_CaloIdL_GsfTrkIdVL_v4',
'HLT_DoubleEle8_CaloIdM_TrackIdM_Mass8_PFHT250_v4',
'HLT_DoubleEle8_CaloIdM_TrackIdM_Mass8_PFHT300_v7',
'HLT_DoublePhoton60_v4',
'HLT_DoublePhoton85_v5',
'HLT_ECALHT800_v4',
'HLT_Ele10_CaloIdM_TrackIdM_CentralPFJet30_BTagCSV_p13_v4',
'HLT_Ele12_CaloIdL_TrackIdL_IsoVL_PFJet30_v6',
'HLT_Ele12_CaloIdL_TrackIdL_IsoVL_v6',
'HLT_Ele12_CaloIdM_TrackIdM_PFJet30_v6',
'HLT_Ele16_Ele12_Ele8_CaloIdL_TrackIdL_v6',
'HLT_Ele17_CaloIdL_GsfTrkIdVL_v4',
'HLT_Ele17_CaloIdL_TrackIdL_IsoVL_PFJet30_v4',
'HLT_Ele17_CaloIdL_TrackIdL_IsoVL_v5',
'HLT_Ele17_CaloIdM_TrackIdM_PFJet30_v4',
'HLT_Ele17_Ele12_CaloIdL_TrackIdL_IsoVL_DZ_v6',
'HLT_Ele17_Ele12_CaloIdL_TrackIdL_IsoVL_v6',
'HLT_Ele23_CaloIdL_TrackIdL_IsoVL_PFJet30_v6',
'HLT_Ele23_CaloIdL_TrackIdL_IsoVL_v6',
'HLT_Ele23_CaloIdM_TrackIdM_PFJet30_v6',
'HLT_Ele23_Ele12_CaloIdL_TrackIdL_IsoVL_DZ_L1JetTauSeeded_v1',
'HLT_Ele23_Ele12_CaloIdL_TrackIdL_IsoVL_DZ_v6',
'HLT_Ele23_Ele12_CaloIdL_TrackIdL_IsoVL_v6',
'HLT_Ele27_HighEta_Ele20_Mass55_v4',
'HLT_Ele30WP60_Ele8_Mass55_v5',
'HLT_Ele30WP60_SC4_Mass55_v6',
'HLT_Ele8_CaloIdL_TrackIdL_IsoVL_PFJet30_v4',
'HLT_Ele8_CaloIdM_TrackIdM_PFJet30_v6',
'HLT_Photon26_R9Id85_OR_CaloId24b40e_Iso50T80L_Photon16_AND_HE10_R9Id65_Eta2_Mass60_v5',
'HLT_Photon36_R9Id85_OR_CaloId24b40e_Iso50T80L_Photon22_AND_HE10_R9Id65_Eta2_Mass15_v5',
'HLT_Photon42_R9Id85_OR_CaloId24b40e_Iso50T80L_Photon25_AND_HE10_R9Id65_Eta2_Mass15_v5'),
DoubleMuon = cms.vstring('HLT_DoubleMu18NoFiltersNoVtx_v3',
'HLT_DoubleMu23NoFiltersNoVtxDisplaced_v3',
'HLT_DoubleMu28NoFiltersNoVtxDisplaced_v3',
'HLT_DoubleMu33NoFiltersNoVtx_v3',
'HLT_DoubleMu38NoFiltersNoVtx_v3',
'HLT_DoubleMu8_Mass8_PFHT250_v3',
'HLT_DoubleMu8_Mass8_PFHT300_v6',
'HLT_L2DoubleMu23_NoVertex_v4',
'HLT_L2DoubleMu28_NoVertex_2Cha_Angle2p5_Mass10_v4',
'HLT_L2DoubleMu38_NoVertex_2Cha_Angle2p5_Mass10_v4',
'HLT_Mu10_CentralPFJet30_BTagCSV_p13_v2',
'HLT_Mu17_Mu8_DZ_v4',
'HLT_Mu17_Mu8_SameSign_DZ_v3',
'HLT_Mu17_Mu8_SameSign_v3',
'HLT_Mu17_Mu8_v3',
'HLT_Mu17_TkMu8_DZ_v3',
'HLT_Mu17_TrkIsoVVL_Mu8_TrkIsoVVL_DZ_v4',
'HLT_Mu17_TrkIsoVVL_Mu8_TrkIsoVVL_v4',
'HLT_Mu17_TrkIsoVVL_TkMu8_TrkIsoVVL_DZ_v3',
'HLT_Mu17_TrkIsoVVL_TkMu8_TrkIsoVVL_v3',
'HLT_Mu17_TrkIsoVVL_v3',
'HLT_Mu17_v3',
'HLT_Mu20_Mu10_DZ_v3',
'HLT_Mu20_Mu10_SameSign_DZ_v3',
'HLT_Mu20_Mu10_SameSign_v2',
'HLT_Mu20_Mu10_v3',
'HLT_Mu27_TkMu8_v3',
'HLT_Mu30_TkMu11_v3',
'HLT_Mu3_PFJet40_v3',
'HLT_Mu40_TkMu11_v3',
'HLT_Mu8_TrkIsoVVL_v4',
'HLT_Mu8_v4',
'HLT_TripleMu_12_10_5_v3',
'HLT_TripleMu_5_3_3_v1',
'HLT_TrkMu15_DoubleTrkMu5NoFiltersNoVtx_v4',
'HLT_TrkMu17_DoubleTrkMu8NoFiltersNoVtx_v4'),
DoubleMuonLowMass = cms.vstring('HLT_DoubleMu3_Trk_Tau3mu_v2',
'HLT_DoubleMu4_LowMassNonResonantTrk_Displaced_v4'),
EcalLaser = cms.vstring('HLT_EcalCalibration_v3'),
EmptyBX = cms.vstring('HLT_L1BptxMinus_v2',
'HLT_L1BptxPlus_v2',
'HLT_L1NotBptxOR_v2'),
EventDisplay = cms.vstring('HLT_AK4PFJet100_v4',
'HLT_AK8PFJet360_TrimMass30_v4',
'HLT_HISinglePhoton60_v3',
'HLT_Photon175_v6'),
ExpressPhysics = cms.vstring('HLT_Ele17_Ele12_CaloIdL_TrackIdL_IsoVL_DZ_v6',
'HLT_Ele23_Ele12_CaloIdL_TrackIdL_IsoVL_DZ_v6',
'HLT_Ele250_CaloIdVT_GsfTrkIdT_v4',
'HLT_Ele300_CaloIdVT_GsfTrkIdT_v4',
'HLT_HT2000_v3',
'HLT_HT2500_v3',
'HLT_IsoMu20_v4',
'HLT_IsoMu24_v2',
'HLT_L1FatEvents_v1',
'HLT_MET600_v3',
'HLT_MET700_v3',
'HLT_Mu17_TrkIsoVVL_Mu8_TrkIsoVVL_DZ_v4',
'HLT_Mu300_v2',
'HLT_Mu350_v2',
'HLT_PFMET500_v3',
'HLT_PFMET600_v3',
'HLT_Photon500_v4',
'HLT_Photon600_v4',
'HLT_Physics_v4',
'HLT_Random_v2',
'HLT_ZeroBias_FirstCollisionAfterAbortGap_TCDS_v1',
'HLT_ZeroBias_FirstCollisionAfterAbortGap_v2',
'HLT_ZeroBias_IsolatedBunches_v3',
'HLT_ZeroBias_v4'),
FSQJets = cms.vstring('HLT_DiPFJet15_FBEta3_NoCaloMatched_v3',
'HLT_DiPFJet15_NoCaloMatched_v2',
'HLT_DiPFJet25_FBEta3_NoCaloMatched_v3',
'HLT_DiPFJet25_NoCaloMatched_v2',
'HLT_DiPFJetAve15_HFJEC_v2',
'HLT_DiPFJetAve25_HFJEC_v2',
'HLT_DiPFJetAve35_HFJEC_v2',
'HLT_PFJet15_NoCaloMatched_v4',
'HLT_PFJet25_NoCaloMatched_v2'),
HINCaloJets = cms.vstring('HLT_AK4CaloJet100_v3',
'HLT_AK4CaloJet30_v4',
'HLT_AK4CaloJet40_v3',
'HLT_AK4CaloJet50_v3',
'HLT_AK4CaloJet80_v3'),
HINPFJets = cms.vstring('HLT_AK4PFJet100_v4',
'HLT_AK4PFJet30_v4',
'HLT_AK4PFJet50_v4',
'HLT_AK4PFJet80_v4'),
HINPhoton = cms.vstring('HLT_HISinglePhoton10_v3',
'HLT_HISinglePhoton15_v3',
'HLT_HISinglePhoton20_v3',
'HLT_HISinglePhoton40_v3',
'HLT_HISinglePhoton60_v3'),
HLTMonitor = cms.vstring('HLT_DiPFJetAve40_v4',
'HLT_PFHT350_v5',
'HLT_PFMET120_BTagCSV_p067_v3',
'HLT_PFMET120_Mu5_v3',
'HLT_PFMET120_PFMHT120_IDTight_v4',
'HLT_QuadPFJet_BTagCSV_p016_VBF_Mqq460_v2',
'HLT_QuadPFJet_BTagCSV_p016_VBF_Mqq500_v2',
'HLT_QuadPFJet_BTagCSV_p016_p11_VBF_Mqq200_v2',
'HLT_QuadPFJet_BTagCSV_p016_p11_VBF_Mqq240_v2',
'HLT_QuadPFJet_VBF_v5'),
HLTPhysics = cms.vstring('HLT_L1FatEvents_v1',
'HLT_Physics_v4'),
HLTPhysics0 = cms.vstring('HLT_L1FatEvents_part0_v1'),
HLTPhysics1 = cms.vstring('HLT_L1FatEvents_part1_v1'),
HLTPhysics2 = cms.vstring('HLT_L1FatEvents_part2_v1'),
HLTPhysics3 = cms.vstring('HLT_L1FatEvents_part3_v1'),
HTMHT = cms.vstring('HLT_DiPFJet40_DEta3p5_MJJ600_PFMETNoMu140_v2',
'HLT_DiPFJet40_DEta3p5_MJJ600_PFMETNoMu80_v2',
'HLT_PFHT200_DiPFJetAve90_PFAlphaT0p57_v4',
'HLT_PFHT200_DiPFJetAve90_PFAlphaT0p63_v4',
'HLT_PFHT200_PFAlphaT0p51_v4',
'HLT_PFHT250_DiPFJetAve90_PFAlphaT0p55_v4',
'HLT_PFHT250_DiPFJetAve90_PFAlphaT0p58_v4',
'HLT_PFHT300_DiPFJetAve90_PFAlphaT0p53_v4',
'HLT_PFHT300_DiPFJetAve90_PFAlphaT0p54_v4',
'HLT_PFHT300_PFMET100_v3',
'HLT_PFHT300_PFMET110_v3',
'HLT_PFHT350_DiPFJetAve90_PFAlphaT0p52_v4',
'HLT_PFHT350_DiPFJetAve90_PFAlphaT0p53_v4',
'HLT_PFHT400_DiPFJetAve90_PFAlphaT0p51_v4',
'HLT_PFHT400_DiPFJetAve90_PFAlphaT0p52_v4',
'HLT_Rsq0p25_v3',
'HLT_Rsq0p30_v3',
'HLT_RsqMR240_Rsq0p09_MR200_4jet_v3',
'HLT_RsqMR240_Rsq0p09_MR200_v3',
'HLT_RsqMR270_Rsq0p09_MR200_4jet_v3',
'HLT_RsqMR270_Rsq0p09_MR200_v3'),
HcalHPDNoise = cms.vstring('HLT_GlobalRunHPDNoise_v4'),
HcalNZS = cms.vstring('HLT_HcalNZS_v3',
'HLT_HcalPhiSym_v3'),
HighMultiplicity85EOF = cms.vstring('HLT_PixelTracks_Multiplicity85ForEndOfFill_v2'),
HighMultiplicityEOF = cms.vstring('HLT_FullTracks_Multiplicity100_v2',
'HLT_FullTracks_Multiplicity130_v2',
'HLT_FullTracks_Multiplicity150_v2',
'HLT_FullTracks_Multiplicity80_v2',
'HLT_PixelTracks_Multiplicity110ForEndOfFill_v3',
'HLT_PixelTracks_Multiplicity135ForEndOfFill_v3',
'HLT_PixelTracks_Multiplicity160ForEndOfFill_v3',
'HLT_PixelTracks_Multiplicity60ForEndOfFill_v2'),
JetHT = cms.vstring('HLT_AK8DiPFJet250_200_TrimMass30_BTagCSV_p20_v2',
'HLT_AK8DiPFJet250_200_TrimMass30_v2',
'HLT_AK8DiPFJet280_200_TrimMass30_BTagCSV_p20_v2',
'HLT_AK8DiPFJet280_200_TrimMass30_v2',
'HLT_AK8PFHT600_TrimR0p1PT0p03Mass50_BTagCSV_p20_v3',
'HLT_AK8PFHT650_TrimR0p1PT0p03Mass50_v4',
'HLT_AK8PFHT700_TrimR0p1PT0p03Mass50_v5',
'HLT_AK8PFJet140_v1',
'HLT_AK8PFJet200_v1',
'HLT_AK8PFJet260_v1',
'HLT_AK8PFJet320_v1',
'HLT_AK8PFJet360_TrimMass30_v4',
'HLT_AK8PFJet400_v1',
'HLT_AK8PFJet40_v2',
'HLT_AK8PFJet450_v1',
'HLT_AK8PFJet500_v1',
'HLT_AK8PFJet60_v1',
'HLT_AK8PFJet80_v1',
'HLT_CaloJet500_NoJetID_v4',
'HLT_DiCentralPFJet170_CFMax0p1_v2',
'HLT_DiCentralPFJet170_v2',
'HLT_DiCentralPFJet220_CFMax0p3_v2',
'HLT_DiCentralPFJet330_CFMax0p5_v2',
'HLT_DiCentralPFJet430_v2',
'HLT_DiPFJetAve100_HFJEC_v4',
'HLT_DiPFJetAve140_v3',
'HLT_DiPFJetAve160_HFJEC_v4',
'HLT_DiPFJetAve200_v3',
'HLT_DiPFJetAve220_HFJEC_v4',
'HLT_DiPFJetAve260_v3',
'HLT_DiPFJetAve300_HFJEC_v4',
'HLT_DiPFJetAve320_v3',
'HLT_DiPFJetAve400_v3',
'HLT_DiPFJetAve40_v4',
'HLT_DiPFJetAve500_v3',
'HLT_DiPFJetAve60_HFJEC_v4',
'HLT_DiPFJetAve60_v4',
'HLT_DiPFJetAve80_HFJEC_v4',
'HLT_DiPFJetAve80_v3',
'HLT_HT2000_v3',
'HLT_HT2500_v3',
'HLT_L1_TripleJet_VBF_v5',
'HLT_PFHT125_v2',
'HLT_PFHT200_v3',
'HLT_PFHT250_v3',
'HLT_PFHT300_v4',
'HLT_PFHT350_v5',
'HLT_PFHT400_SixJet30_DoubleBTagCSV_p056_v2',
'HLT_PFHT400_SixJet30_v4',
'HLT_PFHT400_v4',
'HLT_PFHT450_SixJet40_BTagCSV_p056_v2',
'HLT_PFHT450_SixJet40_v4',
'HLT_PFHT475_v4',
'HLT_PFHT550_4JetPt50_v3',
'HLT_PFHT600_v5',
'HLT_PFHT650_4JetPt50_v3',
'HLT_PFHT650_WideJetMJJ900DEtaJJ1p5_v5',
'HLT_PFHT650_WideJetMJJ950DEtaJJ1p5_v5',
'HLT_PFHT650_v5',
'HLT_PFHT750_4JetPt50_v5',
'HLT_PFHT800_v4',
'HLT_PFHT900_v3',
'HLT_PFJet140_v5',
'HLT_PFJet200_v5',
'HLT_PFJet260_v5',
'HLT_PFJet320_v5',
'HLT_PFJet400_v5',
'HLT_PFJet40_v6',
'HLT_PFJet450_v5',
'HLT_PFJet500_v5',
'HLT_PFJet60_v6',
'HLT_PFJet80_v5',
'HLT_QuadPFJet_VBF_v5',
'HLT_SingleCentralPFJet170_CFMax0p1_v2'),
L1Accept = cms.vstring('DST_Physics_v2'),
L1MinimumBias = cms.vstring('HLT_L1MinimumBiasHF_AND_v2',
'HLT_L1MinimumBiasHF_OR_v2'),
MET = cms.vstring('HLT_CaloMHTNoPU90_PFMET90_PFMHT90_IDTight_BTagCSV_p067_v3',
'HLT_CaloMHTNoPU90_PFMET90_PFMHT90_IDTight_v4',
'HLT_DiCentralPFJet55_PFMET110_v3',
'HLT_DoubleMu3_PFMET50_v2',
'HLT_MET200_v3',
'HLT_MET250_v3',
'HLT_MET300_v3',
'HLT_MET600_v3',
'HLT_MET60_IsoTrk35_Loose_v2',
'HLT_MET700_v3',
'HLT_MET75_IsoTrk50_v4',
'HLT_MET90_IsoTrk50_v4',
'HLT_MonoCentralPFJet80_PFMETNoMu100_PFMHTNoMu100_IDTight_v4',
'HLT_MonoCentralPFJet80_PFMETNoMu110_PFMHTNoMu110_IDTight_v4',
'HLT_MonoCentralPFJet80_PFMETNoMu120_PFMHTNoMu120_IDTight_v4',
'HLT_MonoCentralPFJet80_PFMETNoMu90_PFMHTNoMu90_IDTight_v4',
'HLT_Mu14er_PFMET100_v2',
'HLT_Mu3er_PFHT140_PFMET125_v3',
'HLT_Mu6_PFHT200_PFMET100_v2',
'HLT_Mu6_PFHT200_PFMET80_BTagCSV_p067_v2',
'HLT_PFMET100_PFMHT100_IDTight_v4',
'HLT_PFMET110_PFMHT110_IDTight_v4',
'HLT_PFMET120_BTagCSV_p067_v3',
'HLT_PFMET120_Mu5_v3',
'HLT_PFMET120_PFMHT120_IDTight_v4',
'HLT_PFMET170_BeamHaloCleaned_v2',
'HLT_PFMET170_HBHECleaned_v4',
'HLT_PFMET170_NotCleaned_v3',
'HLT_PFMET300_v3',
'HLT_PFMET400_v3',
'HLT_PFMET500_v3',
'HLT_PFMET600_v3',
'HLT_PFMET90_PFMHT90_IDTight_v4',
'HLT_PFMETNoMu100_PFMHTNoMu100_IDTight_v4',
'HLT_PFMETNoMu110_PFMHTNoMu110_IDTight_v4',
'HLT_PFMETNoMu120_PFMHTNoMu120_IDTight_v4',
'HLT_PFMETNoMu90_PFMHTNoMu90_IDTight_v4'),
MonteCarlo = cms.vstring('MC_AK4CaloJets_v2',
'MC_AK4PFJets_v3',
'MC_AK8CaloHT_v2',
'MC_AK8PFHT_v3',
'MC_AK8PFJets_v3',
'MC_AK8TrimPFJets_v3',
'MC_CaloHT_v2',
'MC_CaloMET_JetIdCleaned_v2',
'MC_CaloMET_v2',
'MC_CaloMHT_v2',
'MC_Diphoton10_10_R9Id_OR_IsoCaloId_AND_HE_R9Id_Mass10_v4',
'MC_DoubleEle5_CaloIdL_GsfTrkIdVL_MW_v5',
'MC_DoubleGlbTrkMu_TrkIsoVVL_DZ_v2',
'MC_DoubleL1Tau_MediumIsoPFTau32_Trk1_eta2p1_Reg_v1',
'MC_DoubleMuNoFiltersNoVtx_v2',
'MC_DoubleMu_TrkIsoVVL_DZ_v2',
'MC_Ele15_Ele10_CaloIdL_TrackIdL_IsoVL_DZ_v5',
'MC_Ele5_WPLoose_Gsf_v5',
'MC_IsoMu_v5',
'MC_IsoTkMu15_v5',
'MC_LooseIsoPFTau20_v2',
'MC_LooseIsoPFTau50_Trk30_eta2p1_v1',
'MC_PFHT_v3',
'MC_PFMET_v3',
'MC_PFMHT_v3',
'MC_ReducedIterativeTracking_v2'),
MuOnia = cms.vstring('HLT_Dimuon0_Phi_Barrel_v4',
'HLT_Dimuon0_Upsilon_Muon_v3',
'HLT_Dimuon13_Upsilon_v3',
'HLT_Dimuon8_Upsilon_Barrel_v4',
'HLT_Mu16_TkMu0_dEta18_Onia_v3',
'HLT_Mu16_TkMu0_dEta18_Phi_v3',
'HLT_Mu25_TkMu0_dEta18_Onia_v4',
'HLT_Mu7p5_L2Mu2_Upsilon_v3',
'HLT_Mu7p5_Track2_Upsilon_v3',
'HLT_Mu7p5_Track3p5_Upsilon_v3',
'HLT_Mu7p5_Track7_Upsilon_v3',
'HLT_QuadMuon0_Dimuon0_Upsilon_v3'),
MuonEG = cms.vstring('HLT_DiMu9_Ele9_CaloIdL_TrackIdL_v6',
'HLT_Mu12_Photon25_CaloIdL_L1ISO_v6',
'HLT_Mu12_Photon25_CaloIdL_L1OR_v6',
'HLT_Mu12_Photon25_CaloIdL_v6',
'HLT_Mu17_Photon22_CaloIdL_L1ISO_v4',
'HLT_Mu17_Photon30_CaloIdL_L1ISO_v6',
'HLT_Mu17_Photon35_CaloIdL_L1ISO_v6',
'HLT_Mu17_TrkIsoVVL_Ele12_CaloIdL_TrackIdL_IsoVL_v6',
'HLT_Mu23NoFiltersNoVtx_Photon23_CaloIdL_v5',
'HLT_Mu23_TrkIsoVVL_Ele12_CaloIdL_TrackIdL_IsoVL_v6',
'HLT_Mu23_TrkIsoVVL_Ele8_CaloIdL_TrackIdL_IsoVL_v4',
'HLT_Mu27_Ele37_CaloIdL_GsfTrkIdVL_v3',
'HLT_Mu28NoFiltersNoVtxDisplaced_Photon28_CaloIdL_v5',
'HLT_Mu30_Ele30_CaloIdL_GsfTrkIdVL_v5',
'HLT_Mu33NoFiltersNoVtxDisplaced_Photon33_CaloIdL_v5',
'HLT_Mu37_Ele27_CaloIdL_GsfTrkIdVL_v3',
'HLT_Mu38NoFiltersNoVtx_Photon38_CaloIdL_v5',
'HLT_Mu42NoFiltersNoVtx_Photon42_CaloIdL_v5',
'HLT_Mu8_DiEle12_CaloIdL_TrackIdL_v6',
'HLT_Mu8_Ele8_CaloIdM_TrackIdM_Mass8_PFHT250_v4',
'HLT_Mu8_Ele8_CaloIdM_TrackIdM_Mass8_PFHT300_v7',
'HLT_Mu8_TrkIsoVVL_Ele17_CaloIdL_TrackIdL_IsoVL_v6',
'HLT_Mu8_TrkIsoVVL_Ele23_CaloIdL_TrackIdL_IsoVL_v6'),
NoBPTX = cms.vstring('HLT_JetE30_NoBPTX3BX_v3',
'HLT_JetE30_NoBPTX_v3',
'HLT_JetE50_NoBPTX3BX_v2',
'HLT_JetE70_NoBPTX3BX_v2',
'HLT_L2Mu10_NoVertex_NoBPTX3BX_v2',
'HLT_L2Mu10_NoVertex_NoBPTX_v3',
'HLT_L2Mu35_NoVertex_3Sta_NoBPTX3BX_v2',
'HLT_L2Mu40_NoVertex_3Sta_NoBPTX3BX_v2'),
OnlineMonitor = cms.vstring( ('DST_CaloJet40_BTagScouting_v4',
'DST_CaloJet40_CaloBTagScouting_v3',
'DST_CaloJet40_CaloScouting_PFScouting_v4',
'DST_DoubleMu3_Mass10_BTagScouting_v4',
'DST_DoubleMu3_Mass10_CaloScouting_PFScouting_v3',
'DST_HT250_CaloBTagScouting_v2',
'DST_HT250_CaloScouting_v4',
'DST_HT410_BTagScouting_v4',
'DST_HT410_PFScouting_v4',
'DST_HT450_BTagScouting_v4',
'DST_HT450_PFScouting_v4',
'DST_L1DoubleMu_BTagScouting_v4',
'DST_L1DoubleMu_CaloScouting_PFScouting_v3',
'DST_L1HTT_BTagScouting_v4',
'DST_L1HTT_CaloBTagScouting_v3',
'DST_L1HTT_CaloScouting_PFScouting_v4',
'DST_ZeroBias_BTagScouting_v4',
'DST_ZeroBias_CaloScouting_PFScouting_v3',
'HLT_AK4CaloJet100_v3',
'HLT_AK4CaloJet30_v4',
'HLT_AK4CaloJet40_v3',
'HLT_AK4CaloJet50_v3',
'HLT_AK4CaloJet80_v3',
'HLT_AK4PFJet100_v4',
'HLT_AK4PFJet30_v4',
'HLT_AK4PFJet50_v4',
'HLT_AK4PFJet80_v4',
'HLT_AK8DiPFJet250_200_TrimMass30_BTagCSV_p20_v2',
'HLT_AK8DiPFJet250_200_TrimMass30_v2',
'HLT_AK8DiPFJet280_200_TrimMass30_BTagCSV_p20_v2',
'HLT_AK8DiPFJet280_200_TrimMass30_v2',
'HLT_AK8PFHT600_TrimR0p1PT0p03Mass50_BTagCSV_p20_v3',
'HLT_AK8PFHT650_TrimR0p1PT0p03Mass50_v4',
'HLT_AK8PFHT700_TrimR0p1PT0p03Mass50_v5',
'HLT_AK8PFJet140_v1',
'HLT_AK8PFJet200_v1',
'HLT_AK8PFJet260_v1',
'HLT_AK8PFJet320_v1',
'HLT_AK8PFJet360_TrimMass30_v4',
'HLT_AK8PFJet400_v1',
'HLT_AK8PFJet40_v2',
'HLT_AK8PFJet450_v1',
'HLT_AK8PFJet500_v1',
'HLT_AK8PFJet60_v1',
'HLT_AK8PFJet80_v1',
'HLT_BTagMu_AK8Jet300_Mu5_v2',
'HLT_BTagMu_DiJet110_Mu5_v3',
'HLT_BTagMu_DiJet170_Mu5_v2',
'HLT_BTagMu_DiJet20_Mu5_v3',
'HLT_BTagMu_DiJet40_Mu5_v3',
'HLT_BTagMu_DiJet70_Mu5_v3',
'HLT_BTagMu_Jet300_Mu5_v3',
'HLT_CaloJet500_NoJetID_v4',
'HLT_CaloMHTNoPU90_PFMET90_PFMHT90_IDTight_BTagCSV_p067_v3',
'HLT_CaloMHTNoPU90_PFMET90_PFMHT90_IDTight_v4',
'HLT_DiCentralPFJet170_CFMax0p1_v2',
'HLT_DiCentralPFJet170_v2',
'HLT_DiCentralPFJet220_CFMax0p3_v2',
'HLT_DiCentralPFJet330_CFMax0p5_v2',
'HLT_DiCentralPFJet430_v2',
'HLT_DiCentralPFJet55_PFMET110_v3',
'HLT_DiMu9_Ele9_CaloIdL_TrackIdL_v6',
'HLT_DiPFJet15_FBEta3_NoCaloMatched_v3',
'HLT_DiPFJet15_NoCaloMatched_v2',
'HLT_DiPFJet25_FBEta3_NoCaloMatched_v3',
'HLT_DiPFJet25_NoCaloMatched_v2',
'HLT_DiPFJet40_DEta3p5_MJJ600_PFMETNoMu140_v2',
'HLT_DiPFJet40_DEta3p5_MJJ600_PFMETNoMu80_v2',
'HLT_DiPFJetAve100_HFJEC_v4',
'HLT_DiPFJetAve140_v3',
'HLT_DiPFJetAve15_HFJEC_v2',
'HLT_DiPFJetAve160_HFJEC_v4',
'HLT_DiPFJetAve200_v3',
'HLT_DiPFJetAve220_HFJEC_v4',
'HLT_DiPFJetAve25_HFJEC_v2',
'HLT_DiPFJetAve260_v3',
'HLT_DiPFJetAve300_HFJEC_v4',
'HLT_DiPFJetAve320_v3',
'HLT_DiPFJetAve35_HFJEC_v2',
'HLT_DiPFJetAve400_v3',
'HLT_DiPFJetAve40_v4',
'HLT_DiPFJetAve500_v3',
'HLT_DiPFJetAve60_HFJEC_v4',
'HLT_DiPFJetAve60_v4',
'HLT_DiPFJetAve80_HFJEC_v4',
'HLT_DiPFJetAve80_v3',
'HLT_DiSC30_18_EIso_AND_HE_Mass70_v4',
'HLT_Dimuon0_Jpsi_Muon_v3',
'HLT_Dimuon0_Phi_Barrel_v4',
'HLT_Dimuon0_Upsilon_Muon_v3',
'HLT_Dimuon0er16_Jpsi_NoOS_NoVertexing_v3',
'HLT_Dimuon0er16_Jpsi_NoVertexing_v3',
'HLT_Dimuon10_Jpsi_Barrel_v4',
'HLT_Dimuon13_PsiPrime_v3',
'HLT_Dimuon13_Upsilon_v3',
'HLT_Dimuon16_Jpsi_v3',
'HLT_Dimuon20_Jpsi_v3',
'HLT_Dimuon6_Jpsi_NoVertexing_v3',
'HLT_Dimuon8_PsiPrime_Barrel_v4',
'HLT_Dimuon8_Upsilon_Barrel_v4',
'HLT_Diphoton30EB_18EB_R9Id_OR_IsoCaloId_AND_HE_R9Id_DoublePixelVeto_Mass55_v4',
'HLT_Diphoton30PV_18PV_R9Id_AND_IsoCaloId_AND_HE_R9Id_DoublePixelVeto_Mass55_v4',
'HLT_Diphoton30_18_R9Id_OR_IsoCaloId_AND_HE_R9Id_DoublePixelSeedMatch_Mass70_v4',
'HLT_Diphoton30_18_R9Id_OR_IsoCaloId_AND_HE_R9Id_Mass90_v4',
'HLT_Diphoton30_18_Solid_R9Id_AND_IsoCaloId_AND_HE_R9Id_Mass55_v4',
'HLT_DoubleEle24_22_eta2p1_WPLoose_Gsf_v5',
'HLT_DoubleEle33_CaloIdL_GsfTrkIdVL_MW_v6',
'HLT_DoubleEle33_CaloIdL_GsfTrkIdVL_v6',
'HLT_DoubleEle33_CaloIdL_MW_v4',
'HLT_DoubleEle33_CaloIdL_v4',
'HLT_DoubleEle37_Ele27_CaloIdL_GsfTrkIdVL_v4',
'HLT_DoubleEle8_CaloIdM_TrackIdM_Mass8_PFHT250_v4',
'HLT_DoubleEle8_CaloIdM_TrackIdM_Mass8_PFHT300_v7',
'HLT_DoubleIsoMu17_eta2p1_noDzCut_v2',
'HLT_DoubleIsoMu17_eta2p1_v4',
'HLT_DoubleJet90_Double30_DoubleBTagCSV_p087_v2',
'HLT_DoubleJet90_Double30_TripleBTagCSV_p087_v2',
'HLT_DoubleJetsC100_DoubleBTagCSV_p014_DoublePFJetsC100MaxDeta1p6_v2',
'HLT_DoubleJetsC100_DoubleBTagCSV_p026_DoublePFJetsC160_v2',
'HLT_DoubleJetsC100_SingleBTagCSV_p014_SinglePFJetC350_v2',
'HLT_DoubleJetsC100_SingleBTagCSV_p014_v2',
'HLT_DoubleJetsC100_SingleBTagCSV_p026_SinglePFJetC350_v2',
'HLT_DoubleJetsC100_SingleBTagCSV_p026_v2',
'HLT_DoubleJetsC112_DoubleBTagCSV_p014_DoublePFJetsC112MaxDeta1p6_v2',
'HLT_DoubleJetsC112_DoubleBTagCSV_p026_DoublePFJetsC172_v2',
'HLT_DoubleMediumIsoPFTau32_Trk1_eta2p1_Reg_v2',
'HLT_DoubleMediumIsoPFTau35_Trk1_eta2p1_Reg_v3',
'HLT_DoubleMediumIsoPFTau40_Trk1_eta2p1_Reg_v5',
'HLT_DoubleMu18NoFiltersNoVtx_v3',
'HLT_DoubleMu23NoFiltersNoVtxDisplaced_v3',
'HLT_DoubleMu28NoFiltersNoVtxDisplaced_v3',
'HLT_DoubleMu33NoFiltersNoVtx_v3',
'HLT_DoubleMu38NoFiltersNoVtx_v3',
'HLT_DoubleMu3_PFMET50_v2',
'HLT_DoubleMu3_Trk_Tau3mu_v2',
'HLT_DoubleMu4_3_Bs_v4',
'HLT_DoubleMu4_3_Jpsi_Displaced_v4',
'HLT_DoubleMu4_JpsiTrk_Displaced_v4',
'HLT_DoubleMu4_LowMassNonResonantTrk_Displaced_v4',
'HLT_DoubleMu4_PsiPrimeTrk_Displaced_v4',
'HLT_DoubleMu8_Mass8_PFHT250_v3',
'HLT_DoubleMu8_Mass8_PFHT300_v6',
'HLT_DoublePhoton60_v4',
'HLT_DoublePhoton85_v5',
'HLT_ECALHT800_v4',
'HLT_Ele105_CaloIdVT_GsfTrkIdT_v6',
'HLT_Ele10_CaloIdM_TrackIdM_CentralPFJet30_BTagCSV_p13_v4',
'HLT_Ele115_CaloIdVT_GsfTrkIdT_v5',
'HLT_Ele12_CaloIdL_TrackIdL_IsoVL_PFJet30_v6',
'HLT_Ele12_CaloIdL_TrackIdL_IsoVL_v6',
'HLT_Ele12_CaloIdM_TrackIdM_PFJet30_v6',
'HLT_Ele15_IsoVVVL_BTagCSV_p067_PFHT400_v4',
'HLT_Ele15_IsoVVVL_PFHT350_PFMET50_v5',
'HLT_Ele15_IsoVVVL_PFHT350_v5',
'HLT_Ele15_IsoVVVL_PFHT400_PFMET50_v3',
'HLT_Ele15_IsoVVVL_PFHT400_v3',
'HLT_Ele15_IsoVVVL_PFHT600_v6',
'HLT_Ele16_Ele12_Ele8_CaloIdL_TrackIdL_v6',
'HLT_Ele17_CaloIdL_GsfTrkIdVL_v4',
'HLT_Ele17_CaloIdL_TrackIdL_IsoVL_PFJet30_v4',
'HLT_Ele17_CaloIdL_TrackIdL_IsoVL_v5',
'HLT_Ele17_CaloIdM_TrackIdM_PFJet30_v4',
'HLT_Ele17_Ele12_CaloIdL_TrackIdL_IsoVL_DZ_v6',
'HLT_Ele17_Ele12_CaloIdL_TrackIdL_IsoVL_v6',
'HLT_Ele22_eta2p1_WPLoose_Gsf_LooseIsoPFTau20_SingleL1_v5',
'HLT_Ele22_eta2p1_WPLoose_Gsf_v6',
'HLT_Ele23_CaloIdL_TrackIdL_IsoVL_PFJet30_v6',
'HLT_Ele23_CaloIdL_TrackIdL_IsoVL_v6',
'HLT_Ele23_CaloIdM_TrackIdM_PFJet30_v6',
'HLT_Ele23_Ele12_CaloIdL_TrackIdL_IsoVL_DZ_L1JetTauSeeded_v1',
'HLT_Ele23_Ele12_CaloIdL_TrackIdL_IsoVL_DZ_v6',
'HLT_Ele23_Ele12_CaloIdL_TrackIdL_IsoVL_v6',
'HLT_Ele23_WPLoose_Gsf_WHbbBoost_v5',
'HLT_Ele23_WPLoose_Gsf_v6',
'HLT_Ele24_eta2p1_WPLoose_Gsf_LooseIsoPFTau20_SingleL1_v4',
'HLT_Ele24_eta2p1_WPLoose_Gsf_LooseIsoPFTau20_v4',
'HLT_Ele24_eta2p1_WPLoose_Gsf_v4',
'HLT_Ele250_CaloIdVT_GsfTrkIdT_v4',
'HLT_Ele25_WPTight_Gsf_v4',
'HLT_Ele25_eta2p1_WPLoose_Gsf_v4',
'HLT_Ele25_eta2p1_WPTight_Gsf_v4',
'HLT_Ele27_HighEta_Ele20_Mass55_v4',
'HLT_Ele27_WPLoose_Gsf_WHbbBoost_v5',
'HLT_Ele27_WPLoose_Gsf_v4',
'HLT_Ele27_WPTight_Gsf_L1JetTauSeeded_v1',
'HLT_Ele27_WPTight_Gsf_v4',
'HLT_Ele27_eta2p1_WPLoose_Gsf_DoubleMediumIsoPFTau32_Trk1_eta2p1_Reg_v4',
'HLT_Ele27_eta2p1_WPLoose_Gsf_DoubleMediumIsoPFTau35_Trk1_eta2p1_Reg_v5',
'HLT_Ele27_eta2p1_WPLoose_Gsf_DoubleMediumIsoPFTau40_Trk1_eta2p1_Reg_v6',
'HLT_Ele27_eta2p1_WPLoose_Gsf_HT200_v5',
'HLT_Ele27_eta2p1_WPLoose_Gsf_LooseIsoPFTau20_SingleL1_v4',
'HLT_Ele27_eta2p1_WPLoose_Gsf_v5',
'HLT_Ele27_eta2p1_WPTight_Gsf_v5',
'HLT_Ele300_CaloIdVT_GsfTrkIdT_v4',
'HLT_Ele30WP60_Ele8_Mass55_v5',
'HLT_Ele30WP60_SC4_Mass55_v6',
'HLT_Ele32_eta2p1_WPLoose_Gsf_LooseIsoPFTau20_SingleL1_v4',
'HLT_Ele32_eta2p1_WPTight_Gsf_v5',
'HLT_Ele35_CaloIdVT_GsfTrkIdT_PFJet150_PFJet50_v4',
'HLT_Ele35_WPLoose_Gsf_v4',
'HLT_Ele45_CaloIdVT_GsfTrkIdT_PFJet200_PFJet50_v6',
'HLT_Ele45_WPLoose_Gsf_L1JetTauSeeded_v1',
| |
"""
# pylint: disable-msg=too-many-locals
def __init__(self,
name: str = '',
command: Optional[List[str]] = None,
environment: Optional[Dict[str, str]] = None,
artifacts: str = '',
report_artifacts: str = '',
artifact_prebuild_clean: bool = False,
directory: str = '',
critical: bool = False,
background: bool = False,
finish_background: bool = False,
code_report: bool = False,
pass_tag: str = '',
fail_tag: str = '',
if_env_set: str = '',
**kwargs) -> None:
self.name: str = name
self.directory: str = directory
self.code_report: bool = code_report
self.command: List[str] = command if command else []
self.environment: Dict[str, str] = environment if environment else {}
self.artifacts: str = artifacts
self.report_artifacts: str = report_artifacts
self.artifact_prebuild_clean: bool = artifact_prebuild_clean
self.critical: bool = critical
self.background: bool = background
self.finish_background: bool = finish_background
self.pass_tag: str = pass_tag
self.fail_tag: str = fail_tag
self.if_env_set: str = if_env_set
self.children: Optional['Configuration'] = None
self._extras: Dict[str, str] = {}
for key, value in kwargs.items():
self._extras[key] = value
def __repr__(self) -> str:
"""
This function simulates `dict`-like representation for string output. This is useful when printing contents of
:class:`Configuration` objects, as internally they wrap a list of :class:`Step` objects
:return: a `dict`-like string
>>> step = Step(name='foo', command=['bar'], my_var='baz')
>>> repr(step)
"{'name': 'foo', 'command': 'bar', 'my_var': 'baz'}"
"""
res = {k: v for k, v in self.__dict__.items() if v and k != '_extras'}
res.update(self._extras)
if len(self.command) == 1: # command should be printed as one string, instead of list
res['command'] = self.command[0]
return str(res)
def __eq__(self, other: Any) -> bool:
"""
This functions simulates `dict`-like check for match
:param other: `dict` to compare values, or :class:`Step` object to check equality
:return: `True` if `other` matches
>>> step1 = Step(name='foo', my_var='bar')
>>> step2 = Step(name='foo', my_var='bar')
>>> step3 = Step(name='foo', my_var='bar', critical=True)
>>> step1 == step1
True
>>> step1 == step2
True
>>> step1 == step3
False
>>> step1 == {'name': 'foo', 'my_var': 'bar'}
True
>>> step1 == {'name': 'foo', 'my_var': 'bar', 'critical': False}
True
>>> step1 == {'name': 'foo', 'my_var': 'bar', 'test': None}
True
>>> step1 == {'name': 'foo', 'my_var': 'bar', 'test': ''}
True
>>> step1 == {'name': 'foo', 'my_var': 'bar', 'test': ' '}
False
"""
if isinstance(other, Step):
return self == other.__dict__
if isinstance(other, dict):
for key, val in other.items():
if val and self[key] != val:
return False
return True
return super().__eq__(other)
def __getitem__(self, key: str) -> Any:
"""
This functions simulates `dict`-like legacy read access
.. note::
It is recommended to use field-like access to non user-defined attributes of Step.
This preserves data type information for mypy static analysis.
:param key: client-defined item
:return: client-defined value
>>> step = Step(name='foo', my_var='bar')
>>> step['name'] == step.name == 'foo'
True
Note that step['name'] has type 'Any', but step.name has type 'str'
>>> step['my_var']
'bar'
>>> step['test']
"""
# _extras are checked first - just in case _extras field is added manually
# do note that __setitem__ checks predefined fields first, however it's impossible to shadow them by
# modifying _extras
return self._extras.get(key, self.__dict__.get(key, None))
def __setitem__(self, key: str, value: Any) -> None:
"""
This functions simulates `dict`-like legacy write access
.. note::
It is recommended to use field-like access to non user-defined attributes of Step.
This allows static analysis to catch possible type mismatch.
:param key: client-defined key
:param value: client-defined value
>>> import warnings
>>> def do_and_get_warnings(f):
... with warnings.catch_warnings(record=True) as w:
... warnings.simplefilter("always")
... f()
... return w
>>> def assign_legacy(o, k, v):
... o[k] = v
>>> def assign_name_legacy(o, v):
... o['name'] = v
>>> def assign_name_new(o, v):
... o.name = v
>>> step = Step(name='foo', my_var='bar')
>>> do_and_get_warnings(lambda : assign_legacy(step, 'name', 'bar')) # doctest: +ELLIPSIS
[<warnings.WarningMessage object at ...>]
>>> step['name']
'bar'
>>> do_and_get_warnings(lambda : assign_name_new(step, 'baz')) # doctest: +ELLIPSIS
[]
>>> step['name']
'baz'
>>> do_and_get_warnings(lambda : assign_legacy(step, 'directory', 'foo')) # doctest: +ELLIPSIS
[<warnings.WarningMessage object at ...>]
>>> do_and_get_warnings(lambda : assign_legacy(step, 'test', 42))
[]
>>> do_and_get_warnings(lambda : assign_legacy(step, '_extras', {'name': 'baz'}))
[]
>>> step
{'name': 'baz', 'directory': 'foo', 'my_var': 'bar', 'test': 42, '_extras': {'name': 'baz'}}
"""
if key in self.__dict__ and key != '_extras':
warn("Re-defining the value of Step field. Please use var." + key + " to set it instead of "
"using var['" + key + "']")
self.__dict__[key] = value
else:
self._extras[key] = value
def get(self, key: str, default: Any = None) -> Any:
"""
This functions simulates `dict`-like legacy read access
.. note::
It is recommended to use field-like access to non user-defined attributes of Step.
This preserves data type information for mypy static analysis.
:param key: client-defined item
:param default: value to return if `key` is absent in `dict`
:return: client-defined value
>>> import warnings
>>> def do_and_get_warnings(f):
... with warnings.catch_warnings(record=True) as w:
... warnings.simplefilter("always")
... f()
... return w
>>> step = Step(name='foo', my_var='bar')
>>> do_and_get_warnings(lambda : step.get('name', 'test')) # doctest: +ELLIPSIS
[<warnings.WarningMessage object at ...>]
Note that step.get('name') has type 'Any', but step.name has type 'str'
>>> step.get('my_var', 'test')
'bar'
>>> step.get('my_var_2', 'test')
'test'
>>> step.get('command', 'test')
'test'
"""
result = self._extras.get(key)
if result:
return result
result = self.__dict__.get(key)
if result:
warn("Using legacy API to access configuration values. Please use var." + key + " instead.")
return result
return default
def __add__(self, other: 'Step') -> 'Step':
"""
This functions defines operator ``+`` for :class:`Step` class objects by
concatenating strings and contents of dictionaries. Note that `critical` attribute is not merged.
:param other: `Step` object
:return: new `Step` object, including all attributes from both `self` and `other` objects
>>> step1 = Step(name='foo', command=['foo'], critical=True, my_var1='foo')
>>> step2 = Step(name='bar', command=['bar'], background=True, my_var1='bar', my_var2='baz')
>>> step1 + step2
{'name': 'foobar', 'command': ['foo', 'bar'], 'background': True, 'my_var1': 'foobar', 'my_var2': 'baz'}
"""
return Step(
name=self.name + other.name,
command=self.command + other.command,
environment=combine(self.environment, other.environment),
artifacts=self.artifacts + other.artifacts,
report_artifacts=self.report_artifacts + other.report_artifacts,
artifact_prebuild_clean=self.artifact_prebuild_clean or other.artifact_prebuild_clean,
directory=self.directory + other.directory,
critical=False,
background=self.background or other.background,
finish_background=self.finish_background or other.finish_background,
code_report=self.code_report or other.code_report,
pass_tag=self.pass_tag + other.pass_tag,
fail_tag=self.fail_tag + other.fail_tag,
if_env_set=self.if_env_set + other.if_env_set,
**combine(self._extras, other._extras)
)
def replace_string(self, from_string: str, to_string: str) -> None:
"""
Replace instances of a string, used for pseudo-variables
:param from_string: string to replace, e.g. `${CODE_REPORT_FILE}`
:param to_string: value to put in place of `from_string`
>>> step = Step(name='foo test', command=['foo', 'baz', 'foobar'], myvar1='foo', myvar2='bar', myvar3=1)
>>> step.replace_string('foo', 'bar')
>>> step
{'name': 'foo test', 'command': ['bar', 'baz', 'barbar'], 'myvar1': 'bar', 'myvar2': 'bar', 'myvar3': 1}
>>> step = Step(artifacts='foo', report_artifacts='foo', directory='foo')
>>> step.replace_string('foo', 'bar')
>>> step
{'directory': 'bar', 'artifacts': 'bar', 'report_artifacts': 'bar'}
"""
self.command = [word.replace(from_string, to_string) for word in self.command]
self.artifacts = self.artifacts.replace(from_string, to_string)
self.report_artifacts = self.report_artifacts.replace(from_string, to_string)
self.directory = self.directory.replace(from_string, to_string)
for k, v in self._extras.items():
if isinstance(v, str):
self._extras[k] = v.replace(from_string, to_string)
def stringify_command(self) -> bool:
"""
Concatenates components of a command into one element
:return: `True`, if any of the command components have space inside
>>> step = Step(name='stringify test', command=['foo', 'bar', '--baz'])
>>> step.stringify_command()
False
>>> step
{'name': 'stringify test', 'command': 'foo bar --baz'}
>>> step.stringify_command()
True
"""
result = False
command_line = ""
for argument in self.command:
if " " in argument:
argument = "\"" + argument + "\""
result = True
command_line = command_line + " " + argument if command_line else argument
self.command = [command_line] if command_line else []
return result
DictType = TypeVar('DictType', bound=dict)
def combine(dictionary_a: DictType, dictionary_b: DictType) -> DictType:
# TODO: move to utils, as this is no longer specific to configurations
"""
Combine two dictionaries using plus operator for matching keys
:param dictionary_a: may have any keys and values
:param dictionary_b: may have any keys, but the values of keys, matching `dictionary_a`,
should be the compatible, so that `dictionary_a[key] + dictionary_b[key]` is a valid expression
:return: new dictionary containing all keys from both `dictionary_a` and `dictionary_b`;
for each matching key the value in resulting dictionary is a sum of two corresponding values
For example:
>>> combine(dict(attr_a = "a1", attr_b = ["b11", "b12"]), dict(attr_a = "a2", attr_b = ["b2"]))
{'attr_a': 'a1a2', 'attr_b': | |
"""
Magic Link.
pymdownx.magiclink
An extension for Python Markdown.
Find http|ftp links and email address and turn them to actual links
MIT license.
Copyright (c) 2014 - 2017 <NAME> <<EMAIL>>
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated
documentation files (the "Software"), to deal in the Software without restriction, including without limitation
the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software,
and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all copies or substantial portions
of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED
TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF
CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
DEALINGS IN THE SOFTWARE.
"""
from __future__ import unicode_literals
from markdown import Extension
from markdown.inlinepatterns import LinkPattern, Pattern
from markdown.treeprocessors import Treeprocessor
from markdown import util as md_util
from . import util
from .util import PymdownxDeprecationWarning
import warnings
import re
import os
MAGIC_LINK = 1
MAGIC_AUTO_LINK = 2
RE_MAIL = r'''(?xi)
(
(?<![-/\+@a-z\d_])(?:[-+a-z\d_]([-a-z\d_+]|\.(?!\.))*) # Local part
(?<!\.)@(?:[-a-z\d_]+\.) # @domain part start
(?:(?:[-a-z\d_]|(?<!\.)\.(?!\.))*)[a-z]\b # @domain.end (allow multiple dot names)
(?![-@]) # Don't allow last char to be followed by these
)
'''
RE_LINK = r'''(?xi)
(
(?:(?<=\b)|(?<=_))(?:
(?:ht|f)tps?://(?:(?:[^_\W][-\w]*(?:\.[-\w.]+)+)|localhost)| # (http|ftp)://
(?P<www>w{3}\.)[^_\W][-\w]*(?:\.[-\w.]+)+ # www.
)
/?[-\w.?,!'(){}\[\]/+&@%$#=:"|~;]* # url path, fragments, and query stuff
(?:[^_\W]|[-/#@$+=]) # allowed end chars
)
'''
RE_SHORTHANDS = r'''(?x)
(?:
(?P<mention>(?<![a-zA-Z])@[a-zA-Z\d](?:[-a-zA-Z\d_]{0,37}[a-zA-Z\d])?)
(?:/(?P<mention_repo>[-._a-zA-Z\d]{0,99}[a-zA-Z\d]))? |
(?<![@/])(?:
(?:(?P<user>\b[a-zA-Z\d](?:[-a-zA-Z\d_]{0,37}[a-zA-Z\d])?)/)?
(?P<repo>\b[-._a-zA-Z\d]{0,99}[a-zA-Z\d])
)
(?:(?P<issue>(?:\#|!)[1-9][0-9]*)|(?P<commit>@[a-f\d]{40})) |
(?:(?<![a-zA-Z])(?P<issue2>(?:\#|!)[1-9][0-9]*)|(?P<commit2>(?<![@/])\b[a-f\d]{40}))
)\b
'''
RE_EXTERNAL_SHORTHANDS = r'''(?x)
(?:
(?P<mention>(?<![a-zA-Z])@(?P<provider>(?:github|gitlab|bitbucket):)[a-zA-Z\d](?:[-a-zA-Z\d_]{0,37}[a-zA-Z\d])?)
(?:/(?P<mention_repo>[-._a-zA-Z\d]{,99}[a-zA-Z\d]))? |
(?<![@/])(?:
(?P<provider2>\b(?:github|gitlab|bitbucket):)
(?P<user>[a-zA-Z\d](?:[-a-zA-Z\d_]{0,37}[a-zA-Z\d])?)/
(?P<repo>[-._a-zA-Z\d]{0,99}[a-zA-Z\d])
)
(?:(?P<issue>(?:\#|!)[1-9][0-9]*)|(?P<commit>@[a-f\d]{40}))
)\b
'''
RE_AUTOLINK = r'(?i)<((?:ht|f)tps?://[^>]*)>'
RE_REPO_LINK = re.compile(
r'''(?xi)
(?:
(?P<github>(?P<github_base>https://(?:w{3}\.)?github.com/(?P<github_user_repo>[^/]+/[^/]+))/
(?:issues/(?P<github_issue>\d+)/?|
pull/(?P<github_pull>\d+)/?|
commit/(?P<github_commit>[\da-f]{40})/?)) |
(?P<bitbucket>(?P<bitbucket_base>https://(?:w{3}\.)?bitbucket.org/(?P<bitbucket_user_repo>[^/]+/[^/]+))/
(?:issues/(?P<bitbucket_issue>\d+)(?:/[^/]+)?/?|
pull-requests/(?P<bitbucket_pull>\d+)(?:/[^/]+(?:/diff)?)?/?|
commits/commit/(?P<bitbucket_commit>[\da-f]{40})/?)) |
(?P<gitlab>(?P<gitlab_base>https://(?:w{3}\.)?gitlab.com/(?P<gitlab_user_repo>[^/]+/[^/]+))/
(?:issues/(?P<gitlab_issue>\d+)/?|
merge_requests/(?P<gitlab_pull>\d+)/?|
commit/(?P<gitlab_commit>[\da-f]{40})/?))
)
'''
)
PROVIDER_INFO = {
"gitlab": {
"provider": "GitLab",
"url": "https://gitlab.com",
"issue": "https://gitlab.com/%s/%s/issues/%s",
"pull": "https://gitlab.com/%s/%s/merge_requests/%s",
"commit": "https://gitlab.com/%s/%s/commit/%s",
"hash_size": 8
},
"bitbucket": {
"provider": "Bitbucket",
"url": "https://bitbucket.org",
"issue": "https://bitbucket.org/%s/%s/issues/%s",
"pull": "https://bitbucket.org/%s/%s/pull-requests/%s",
"commit": "https://bitbucket.org/%s/%s/commits/commit/%s",
"hash_size": 7
},
"github": {
"provider": "GitHub",
"url": "https://github.com",
"issue": "https://github.com/%s/%s/issues/%s",
"pull": "https://github.com/%s/%s/pull/%s",
"commit": "https://github.com/%s/%s/commit/%s",
"hash_size": 7
}
}
class MagicShortenerTreeprocessor(Treeprocessor):
"""Treeprocessor that finds repo issue and commit links and shortens them."""
# Repo link types
ISSUE = 0
PULL = 1
COMMIT = 2
def __init__(self, md, base_url, base_user_url, labels):
"""Initialize."""
self.base = base_url
self.base_user = base_user_url
self.repo_labels = labels
self.labels = {
"github": "GitHub",
"bitbucket": "Bitbucket",
"gitlab": "GitLab"
}
Treeprocessor.__init__(self, md)
def shorten(self, link, provider, my_repo, my_user, link_type, user_repo, value, url, hash_size):
"""Shorten url."""
label = PROVIDER_INFO[provider]['provider']
prov_class = 'magiclink-%s' % provider
class_attr = link.get('class', '')
class_name = class_attr.split(' ') if class_attr else []
if 'magiclink' not in class_name:
class_name.append('magiclink')
if prov_class not in class_name:
class_name.append(prov_class)
if link_type is self.COMMIT:
# user/repo@hash
repo_label = self.repo_labels.get('commit', 'Commit')
if my_repo:
text = value[0:hash_size]
elif my_user:
text = '%s@%s' % (user_repo.split('/')[1], value[0:hash_size])
else:
text = '%s@%s' % (user_repo, value[0:hash_size])
link.text = md_util.AtomicString(text)
if 'magiclink-commit' not in class_name:
class_name.append('magiclink-commit')
link.set(
'title',
'%s %s: %s@%s' % (label, repo_label, user_repo.rstrip('/'), value[0:hash_size])
)
else:
# user/repo#(issue|pull)
if link_type == self.ISSUE:
issue_type = self.repo_labels.get('issue', 'Issue')
separator = '#'
if 'magiclink-issue' not in class_name:
class_name.append('magiclink-issue')
else:
issue_type = self.repo_labels.get('pull', 'Pull Request')
separator = '!'
if 'magiclink-pull' not in class_name:
class_name.append('magiclink-pull')
if my_repo:
text = separator + value
elif my_user:
text = user_repo.split('/')[1] + separator + value
else:
text = user_repo + separator + value
link.text = md_util.AtomicString(text)
link.set('title', '%s %s: %s%s%s' % (label, issue_type, user_repo.rstrip('/'), separator, value))
link.set('class', ' '.join(class_name))
def get_provider(self, match):
"""Get the provider and hash size."""
# Set provider specific variables
if match.group('github'):
provider = 'github'
elif match.group('bitbucket'):
provider = 'bitbucket'
elif match.group('gitlab'):
provider = 'gitlab'
return provider
def get_type(self, provider, match):
"""Get the link type."""
# Gather info about link type
if match.group(provider + '_commit') is not None:
value = match.group(provider + '_commit')
link_type = self.COMMIT
elif match.group(provider + '_pull') is not None:
value = match.group(provider + '_pull')
link_type = self.PULL
else:
value = match.group(provider + '_issue')
link_type = self.ISSUE
return value, link_type
def is_my_repo(self, provider, match):
"""Check if link is from our specified user and repo."""
# See if these links are from the specified repo.
return self.base and match.group(provider + '_base') + '/' == self.base
def is_my_user(self, provider, match):
"""Check if link is from our specified user."""
return self.base_user and match.group(provider + '_base').startswith(self.base_user)
def run(self, root):
"""Shorten popular git repository links."""
self.hide_protocol = self.config['hide_protocol']
links = root.iter('a')
for link in links:
has_child = len(list(link))
is_magic = link.attrib.get('magiclink')
href = link.attrib.get('href', '')
text = link.text
if is_magic:
del link.attrib['magiclink']
# We want a normal link. No subelements embedded in it, just a normal string.
if has_child or not text: # pragma: no cover
continue
# Make sure the text matches the href. If needed, add back protocol to be sure.
# Not all links will pass through MagicLink, so we try both with and without protocol.
if (text == href or (is_magic and self.hide_protocol and ('https://' + text) == href)):
m = RE_REPO_LINK.match(href)
if m:
provider = self.get_provider(m)
my_repo = self.is_my_repo(provider, m)
my_user = my_repo or self.is_my_user(provider, m)
value, link_type = self.get_type(provider, m)
# All right, everything set, let's shorten.
self.shorten(
link,
provider,
my_repo,
my_user,
link_type,
m.group(provider + '_user_repo'),
value,
href,
PROVIDER_INFO[provider]['hash_size']
)
return root
class MagiclinkPattern(LinkPattern):
"""Convert html, ftp links to clickable links."""
def handleMatch(self, m):
"""Handle URL matches."""
el = md_util.etree.Element("a")
el.text = md_util.AtomicString(m.group(2))
if m.group("www"):
href = "http://%s" % m.group(2)
else:
href = m.group(2)
if self.config['hide_protocol']:
el.text = md_util.AtomicString(el.text[el.text.find("://") + 3:])
el.set("href", self.sanitize_url(self.unescape(href.strip())))
if self.config.get('repo_url_shortener', False):
el.set('magiclink', md_util.text_type(MAGIC_LINK))
return el
class MagiclinkAutoPattern(Pattern):
"""Return a link Element given an autolink `<http://example/com>`."""
def handleMatch(self, m):
"""Return link optionally without protocol."""
el = md_util.etree.Element("a")
el.set('href', self.unescape(m.group(2)))
el.text = md_util.AtomicString(m.group(2))
if self.config['hide_protocol']:
el.text = md_util.AtomicString(el.text[el.text.find("://") + 3:])
if self.config.get('repo_url_shortener', False):
el.set('magiclink', md_util.text_type(MAGIC_AUTO_LINK))
return el
class MagicMailPattern(LinkPattern):
"""Convert emails to clickable email links."""
def email_encode(self, code):
"""Return entity definition by code, or the code if not defined."""
return "%s#%d;" % (md_util.AMP_SUBSTITUTE, code)
def handleMatch(self, m):
"""Handle email link patterns."""
el = md_util.etree.Element("a")
email = self.unescape(m.group(2))
href = "mailto:%s" % email
el.text = md_util.AtomicString(''.join([self.email_encode(ord(c)) for c in email]))
el.set("href", ''.join([md_util.AMP_SUBSTITUTE + '#%d;' % ord(c) for c in href]))
return el
class MagiclinkShorthandPattern(Pattern):
"""Convert emails to clickable email links."""
def __init__(self, pattern, md, user, repo, provider, labels, external=False):
"""Initialize."""
self.user = user
self.repo = repo
self.labels = labels
self.provider = provider
self.external = external
Pattern.__init__(self, pattern, md)
def process_mention(self, el, provider, mention):
"""Process mention."""
prov = provider if provider else self.provider
el.set('href', '%s/%s' % (PROVIDER_INFO[prov]['url'], mention[1:]))
el.set(
'title',
"%s %s: %s" % (PROVIDER_INFO[prov]['provider'], self.labels.get('mention', "User"), mention[1:])
)
el.set('class', 'magiclink magiclink-%s magiclink-mention' % prov)
el.text = md_util.AtomicString(mention)
return el
def process_mention_repo(self, el, provider, mention, repo_name):
"""Process mentioned repository."""
prov = provider if provider else self.provider
el.set('href', '%s/%s/%s' % (PROVIDER_INFO[prov]['url'], mention[1:], repo_name))
el.set(
'title',
"%s %s: %s/%s" % (
PROVIDER_INFO[prov]['provider'], self.labels.get('repository', 'Repository'), mention[1:], repo_name
)
)
el.set('class', 'magiclink magiclink-%s magiclink-repository' % prov)
user = mention[1:]
# if user == self.user and prov == self.provider:
# el.text = md_util.AtomicString(repo_name)
# else:
el.text = md_util.AtomicString('%s/%s' % (user, repo_name))
return el
def process_issues(self, el, provider, user, repo, issue):
"""Process issues."""
my_repo = not repo
my_user = not user
if my_repo:
repo = self.repo
if my_user:
user = self.user
prov = provider if provider else self.provider
issue_value = issue[1:]
issue_type = issue[:1]
if issue_type == '#':
issue_link = PROVIDER_INFO[prov]['issue']
issue_label = self.labels.get('issue', 'Issue')
class_name = 'magiclink-issue'
else:
issue_link = PROVIDER_INFO[prov]['pull']
issue_label = self.labels.get('pull', 'Pull Request')
class_name = 'magiclink-pull'
if my_repo:
text = '%s%s' % (issue_type, issue_value)
elif my_user:
text = '%s%s%s' % (repo, issue_type, issue_value)
else:
text = '%s/%s%s%s' % (user, repo, issue_type, issue_value)
el.set('href', issue_link % (user, repo, issue_value))
el.text = md_util.AtomicString(text)
el.set('class', 'magiclink magiclink-%s %s' % (prov, class_name))
el.set(
'title',
'%s %s: %s/%s%s%s' % (
PROVIDER_INFO[prov]['provider'],
issue_label,
user,
repo,
issue_type,
issue_value
)
)
def | |
# Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0
# For details: https://github.com/nedbat/coveragepy/blob/master/NOTICE.txt
"""Determining whether files are being measured/reported or not."""
# For finding the stdlib
import atexit
import inspect
import itertools
import os
import platform
import re
import sys
import traceback
from coverage import env
from coverage.backward import code_object
from coverage.disposition import FileDisposition, disposition_init
from coverage.files import TreeMatcher, FnmatchMatcher, ModuleMatcher
from coverage.files import prep_patterns, find_python_files, canonical_filename
from coverage.misc import CoverageException
from coverage.python import source_for_file, source_for_morf
# Pypy has some unusual stuff in the "stdlib". Consider those locations
# when deciding where the stdlib is. These modules are not used for anything,
# they are modules importable from the pypy lib directories, so that we can
# find those directories.
_structseq = _pypy_irc_topic = None
if env.PYPY:
try:
import _structseq
except ImportError:
pass
try:
import _pypy_irc_topic
except ImportError:
pass
def canonical_path(morf, directory=False):
"""Return the canonical path of the module or file `morf`.
If the module is a package, then return its directory. If it is a
module, then return its file, unless `directory` is True, in which
case return its enclosing directory.
"""
morf_path = canonical_filename(source_for_morf(morf))
if morf_path.endswith("__init__.py") or directory:
morf_path = os.path.split(morf_path)[0]
return morf_path
def name_for_module(filename, frame):
"""Get the name of the module for a filename and frame.
For configurability's sake, we allow __main__ modules to be matched by
their importable name.
If loaded via runpy (aka -m), we can usually recover the "original"
full dotted module name, otherwise, we resort to interpreting the
file name to get the module's name. In the case that the module name
can't be determined, None is returned.
"""
module_globals = frame.f_globals if frame is not None else {}
if module_globals is None: # pragma: only ironpython
# IronPython doesn't provide globals: https://github.com/IronLanguages/main/issues/1296
module_globals = {}
dunder_name = module_globals.get('__name__', None)
if isinstance(dunder_name, str) and dunder_name != '__main__':
# This is the usual case: an imported module.
return dunder_name
loader = module_globals.get('__loader__', None)
for attrname in ('fullname', 'name'): # attribute renamed in py3.2
if hasattr(loader, attrname):
fullname = getattr(loader, attrname)
else:
continue
if isinstance(fullname, str) and fullname != '__main__':
# Module loaded via: runpy -m
return fullname
# Script as first argument to Python command line.
inspectedname = inspect.getmodulename(filename)
if inspectedname is not None:
return inspectedname
else:
return dunder_name
def module_is_namespace(mod):
"""Is the module object `mod` a PEP420 namespace module?"""
return hasattr(mod, '__path__') and getattr(mod, '__file__', None) is None
def module_has_file(mod):
"""Does the module object `mod` have an existing __file__ ?"""
mod__file__ = getattr(mod, '__file__', None)
if mod__file__ is None:
return False
return os.path.exists(mod__file__)
class InOrOut(object):
"""Machinery for determining what files to measure."""
def __init__(self, warn):
self.warn = warn
# The matchers for should_trace.
self.source_match = None
self.source_pkgs_match = None
self.pylib_paths = self.cover_paths = None
self.pylib_match = self.cover_match = None
self.include_match = self.omit_match = None
self.plugins = []
self.disp_class = FileDisposition
# The source argument can be directories or package names.
self.source = []
self.source_pkgs = []
self.source_pkgs_unmatched = []
self.omit = self.include = None
def configure(self, config):
"""Apply the configuration to get ready for decision-time."""
for src in config.source or []:
if os.path.isdir(src):
self.source.append(canonical_filename(src))
else:
self.source_pkgs.append(src)
self.source_pkgs_unmatched = self.source_pkgs[:]
self.omit = prep_patterns(config.run_omit)
self.include = prep_patterns(config.run_include)
# The directories for files considered "installed with the interpreter".
self.pylib_paths = set()
if not config.cover_pylib:
# Look at where some standard modules are located. That's the
# indication for "installed with the interpreter". In some
# environments (virtualenv, for example), these modules may be
# spread across a few locations. Look at all the candidate modules
# we've imported, and take all the different ones.
for m in (atexit, inspect, os, platform, _pypy_irc_topic, re, _structseq, traceback):
if m is not None and hasattr(m, "__file__"):
self.pylib_paths.add(canonical_path(m, directory=True))
if _structseq and not hasattr(_structseq, '__file__'):
# PyPy 2.4 has no __file__ in the builtin modules, but the code
# objects still have the file names. So dig into one to find
# the path to exclude. The "filename" might be synthetic,
# don't be fooled by those.
structseq_file = code_object(_structseq.structseq_new).co_filename
if not structseq_file.startswith("<"):
self.pylib_paths.add(canonical_path(structseq_file))
# To avoid tracing the coverage.py code itself, we skip anything
# located where we are.
self.cover_paths = [canonical_path(__file__, directory=True)]
if env.TESTING:
# Don't include our own test code.
self.cover_paths.append(os.path.join(self.cover_paths[0], "tests"))
# When testing, we use PyContracts, which should be considered
# part of coverage.py, and it uses six. Exclude those directories
# just as we exclude ourselves.
import contracts
import six
for mod in [contracts, six]:
self.cover_paths.append(canonical_path(mod))
# Create the matchers we need for should_trace
if self.source or self.source_pkgs:
self.source_match = TreeMatcher(self.source)
self.source_pkgs_match = ModuleMatcher(self.source_pkgs)
else:
if self.cover_paths:
self.cover_match = TreeMatcher(self.cover_paths)
if self.pylib_paths:
self.pylib_match = TreeMatcher(self.pylib_paths)
if self.include:
self.include_match = FnmatchMatcher(self.include)
if self.omit:
self.omit_match = FnmatchMatcher(self.omit)
def should_trace(self, filename, frame=None):
"""Decide whether to trace execution in `filename`, with a reason.
This function is called from the trace function. As each new file name
is encountered, this function determines whether it is traced or not.
Returns a FileDisposition object.
"""
original_filename = filename
disp = disposition_init(self.disp_class, filename)
def nope(disp, reason):
"""Simple helper to make it easy to return NO."""
disp.trace = False
disp.reason = reason
return disp
if frame is not None:
# Compiled Python files have two file names: frame.f_code.co_filename is
# the file name at the time the .pyc was compiled. The second name is
# __file__, which is where the .pyc was actually loaded from. Since
# .pyc files can be moved after compilation (for example, by being
# installed), we look for __file__ in the frame and prefer it to the
# co_filename value.
dunder_file = frame.f_globals and frame.f_globals.get('__file__')
if dunder_file:
filename = source_for_file(dunder_file)
if original_filename and not original_filename.startswith('<'):
orig = os.path.basename(original_filename)
if orig != os.path.basename(filename):
# Files shouldn't be renamed when moved. This happens when
# exec'ing code. If it seems like something is wrong with
# the frame's file name, then just use the original.
filename = original_filename
if not filename:
# Empty string is pretty useless.
return nope(disp, "empty string isn't a file name")
if filename.startswith('memory:'):
return nope(disp, "memory isn't traceable")
if filename.startswith('<'):
# Lots of non-file execution is represented with artificial
# file names like "<string>", "<doctest readme.txt[0]>", or
# "<exec_function>". Don't ever trace these executions, since we
# can't do anything with the data later anyway.
return nope(disp, "not a real file name")
# pyexpat does a dumb thing, calling the trace function explicitly from
# C code with a C file name.
if re.search(r"[/\\]Modules[/\\]pyexpat.c", filename):
return nope(disp, "pyexpat lies about itself")
# Jython reports the .class file to the tracer, use the source file.
if filename.endswith("$py.class"):
filename = filename[:-9] + ".py"
canonical = canonical_filename(filename)
disp.canonical_filename = canonical
# Try the plugins, see if they have an opinion about the file.
plugin = None
for plugin in self.plugins.file_tracers:
if not plugin._coverage_enabled:
continue
try:
file_tracer = plugin.file_tracer(canonical)
if file_tracer is not None:
file_tracer._coverage_plugin = plugin
disp.trace = True
disp.file_tracer = file_tracer
if file_tracer.has_dynamic_source_filename():
disp.has_dynamic_filename = True
else:
disp.source_filename = canonical_filename(
file_tracer.source_filename()
)
break
except Exception:
self.warn(
"Disabling plug-in %r due to an exception:" % (plugin._coverage_plugin_name)
)
traceback.print_exc()
plugin._coverage_enabled = False
continue
else:
# No plugin wanted it: it's Python.
disp.trace = True
disp.source_filename = canonical
if not disp.has_dynamic_filename:
if not disp.source_filename:
raise CoverageException(
"Plugin %r didn't set source_filename for %r" %
(plugin, disp.original_filename)
)
reason = self.check_include_omit_etc(disp.source_filename, frame)
if reason:
nope(disp, reason)
return disp
def check_include_omit_etc(self, filename, frame):
"""Check a file name against the include, omit, etc, rules.
Returns a string or None. String means, don't trace, and is the reason
why. None means no reason found to not trace.
"""
modulename = name_for_module(filename, frame)
# If the user specified source or include, then that's authoritative
# about the outer bound of what to measure and we don't have to apply
# any canned exclusions. If they didn't, then we have to exclude the
# stdlib and coverage.py directories.
if self.source_match:
if self.source_pkgs_match.match(modulename):
if modulename in self.source_pkgs_unmatched:
self.source_pkgs_unmatched.remove(modulename)
elif not self.source_match.match(filename):
return "falls outside the --source trees"
elif self.include_match:
| |
# Open the database to keep "done" records,
# and delete moldy, old in-progress records.
g.mdb = searchdatabasemongo.CSearchDatabase(g.sSearchDbMongoName,
g.sSearchDbProgressCollectionName,
g.sSearchDbDoneCollectionName)
g.mdb.fnvDeleteProgressCollection()
# Get the set of instructions for today from database.
NTRC.tracef(0,"MAIN","proc querydict2|%s|"
% (list(util.fngSortDictItemsByKeys(dQuery))))
itAllInstructions = searchspace.fndgGetSearchSpace(g.sInsDir, g.sInsTyp,
dQuery)
# Start the start-end threads.
# Define queues.
# Need a Multiprocessing Manager to own the output queue. (Do we?) (Yes.)
mpmgr = mp.Manager()
g.qJobs = mp.Queue()
g.qOutput = mpmgr.Queue()
# Start pool of worker processes.
g.cWorkersInst = cworkers.CWorkers(nservers=g.nCores
, qinputjobs=g.qJobs, qoutputdata=g.qOutput)
# If this wasn't just a listonly run, do all the cases.
if not g.sListOnly.startswith("Y"):
NTRC.ntracef(3, "MAIN", "proc all instr|%s|" % (g.lGiantInstr))
else:
NTRC.ntracef(0, "MAIN", "Listonly.")
nRuns = fnnProcessAllInstructions(itAllInstructions)
NTRC.ntracef(0, "MAIN", "End queued all runs ncases|%s|" % (g.nCases,))
#===========================================================
# f n n P r o c e s s A l l I n s t r u c t i o n s
@catchex
@ntracef("MAIN")
def fnnProcessAllInstructions(myitInstructionIterator):
'''
Get the set of instructions that match the user's criteria for this batch,
and run them one by one.
Each instruction (run) is executed once for each random seed value.
Count the number of runs, and don't exceed the user's limit, if any.
If the execution reports a serious error, stop the loop.
'''
nRunNumber = 0
maxcount = int(g.nTestLimit)
# Is this a completely fake test run? Replace templates.
if g.sTestFib.startswith("Y"):
g.lTemplates = g.lFibTemplates
# Process each instruction in turn.
for dRawInstruction in myitInstructionIterator:
NTRC.ntracef(3,"MAIN","proc main raw instruction\n|%s|"
% (dRawInstruction))
dInstruction = fndMaybeEnhanceInstruction(dRawInstruction)
NTRC.ntracef(3,"MAIN","proc main enhanced instruction\n|%s|"
% (dInstruction))
# Execute each instruction many times, once for each random seed value.
nRunNumber += 1
fnnProcessOneInstructionManyTimes(nRunNumber
, dInstruction)
# If user asked for a short test run today, maybe stop now.
maxcount -= 1
if int(g.nTestLimit) > 0 and maxcount <= 0: break
# That's all, folks. All instructions have been queued and will
# eventually be processed.
# Send the shutdown messages to worker processes.
g.cWorkersInst.Close()
return nRunNumber
#===========================================================
# f n s t P r o c e s s O n e I n s t r u c t i o n M a n y T i m e s
@catchex
@ntracef("MAIN")
def fnnProcessOneInstructionManyTimes(mynRunNumber, mydInstruction):
'''
Process a single instruction (set of params) once for each of a
predetermined number and sequence of random seeds.
Assign an id to each run that consists of the instruction hash (_id)
followed by _<seed number>.
'''
lManyInstr = []
lSeedsToUse = fnlGetRandomSeeds(util.fnIntPlease(g.nRandomSeeds),
g.sRandomSeedFile)
mydInstruction["sBaseId"] = str(mydInstruction["_id"])
for (nIdx, nMaybeSeed) in enumerate(lSeedsToUse):
# Adjust run number and mongo id because there are now
# multiple seeds and runs per instruction.
sRunId = str(mynRunNumber) + "." + str(nIdx+1)
sId = str(mydInstruction["sBaseId"])
mydInstruction["_id"] = sId + "_" + str(nIdx+1)
mydInstruction["_runid"] = sRunId
nStatus = 0
try:
nSeed = int(nMaybeSeed)
except ValueError:
raise ValueError("Random seed not integer |%s|" % (nMaybeSeed))
mydInstruction["nRandomseed"] = nSeed
tOneInstr = fntProcessOneInstruction(sRunId, mydInstruction, nSeed)
return g.nRandomSeeds
#===========================================================
# f n t P r o c e s s O n e I n s t r u c t i o n
@catchex
@ntracef("MAIN")
def fntProcessOneInstruction(mysRunNumber, mydInstruction, mynSeed):
'''
Process one single instruction for one run.
Slightly convoluted logic required here.
If just testing today, print instruction contents but do not run it.
If the instruction has already been processed, skip over it unless
the user requires it to be redone.
("Has been processed" = there is a MongoDB "done" record with the
same key. The key is the concatenation of all options and the
seed number.)
The instruction is actually executed by some worker process at
sometime in the future. We just queue the instruction for processing.
'''
sInstructionId = str(mydInstruction["_id"])
# If this instruction has already been processed, maybe skip it.
bIsItDone = g.mdb.fnbIsItDone(sInstructionId)
if bIsItDone and not g.sRedo.startswith("Y"):
# If the user has not insisted on redo, skip it.
NTRC.ntracef(0,"MAIN","proc skip item already done run|%s| "
"id|%s| copies|%s| lifem|%s|"
% (mysRunNumber, sInstructionId, mydInstruction["nCopies"],
mydInstruction["nLifem"]))
else:
# If the user specifies, redo this case even if was done before.
if g.sRedo.startswith("Y"):
NTRC.ntracef(0,"MAIN","proc force redo of run|%s| id|%s| "
% (mysRunNumber, sInstructionId))
# Well, maybe. Could be listonly.
if g.sListOnly.startswith("Y"):
NTRC.ntracef(0,"MAIN","proc ListOnly, item run|%s| "
"ncopies|%s| lifem|%s| id|%s| dict|%s|"
% (mysRunNumber, mydInstruction["nCopies"], mydInstruction["nLifem"],
sInstructionId, list(util.fngSortDictItemsByKeys(mydInstruction))))
else:
# Okay, not listonly, therefore really do this instruction.
mydInstruction["nRandomSeed"] = mynSeed
NTRC.ntracef(0,"MAIN","proc queue instr, item run|%s| "
"ncopies|%s| lifem|%s| id|%s|"
% (mysRunNumber, mydInstruction["nCopies"], mydInstruction["nLifem"],
sInstructionId))
# Format commands to be executed by somebody.
g.sShelfLogFileName = g.cFmt.msGentlyFormat(
g.sShelfLogFileTemplate, mydInstruction, g, CG)
g.lCommands = []
for sTemplate in g.lTemplates:
sCmd = g.cFmt.msGentlyFormat(sTemplate, mydInstruction, g, CG)
g.lCommands.append(sCmd)
# Where do files go, and what are they called.
g.sActorLogDir = g.cFmt.msGentlyFormat(
g.sActorLogDirTemplate, mydInstruction, g, CG)
# Record that this job will soon be running.
mydInstruction["starttime"] = util.fnsGetTimeStamp()
g.mdb.fndInsertProgressRecord(mydInstruction["_id"], mydInstruction)
# Return the full instruction to caller, too.
tThisInst = cworkers.tInstruction(cmdlist=g.lCommands
, logname=g.sShelfLogFileName + "_case.log"
, logdir=g.sActorLogDir
)
# Send the instruction out to be done. Just drop it in the
# queue (think of it as the outbox).
# g.qJobs.put(tThisInst)
fnSendOneJobSlowly(tThisInst, g.qJobs)
g.nCases += 1
return tThisInst
# f n S e n d O n e J o b S l o w l y
@ntracef("QTHR")
def fnSendOneJobSlowly(myInstruction, myqJobs):
''' Queue this instruction as a job.
If the queue size gets out of hand, wait for
some jobs to be removed from it.0
'''
# If qsize > hi threshold, wait for it to come down.
""" Boy, the walrus operator would really come in handy here.
But that would restrict us to Python versions >= 3.8.
if (nQSize := myqJobs.qsize()) > g.nQThreshHi:
NTRC.ntracef(3, "QTHR", "proc qsize over hi |%s|" % (nQSize))
while (nQSize := myqJobs.qsize()) > g.nQThreshLo:
time.sleep(g.nQThreshSleep)
NTRC.ntracef(3, "QTHR", "proc qsize under lo |%s|" % (nQSize))
"""
nQSize = myqJobs.qsize()
if nQSize > g.nQThreshHi:
NTRC.ntracef(3, "QTHR", "proc qsize over hi |%s|" % (nQSize))
while True:
time.sleep(g.nQThreshSleep)
nQSize = myqJobs.qsize()
if nQSize < g.nQThreshLo:
break
NTRC.ntracef(3, "QTHR", "proc qsize under lo |%s|" % (nQSize))
# Okay, now queue the job.
myqJobs.put(myInstruction)
return nQSize
#===========================================================
# Utility functions:
# - Get list of random seeds
# - Get environment vars that control timing
# - Keep text log of commands to broker
# - Check for valid dirs
#===========================================================
# f n l G e t R a n d o m S e e d s
@catchex
@ntracef("MAIN")
def fnlGetRandomSeeds(mynHowMany, mysFilename):
'''
Return a list of the first mynHowMany random seeds from the
file specified.
This is the primitive version that does not permit blank lines,
comments, or other detritus in the list of seed numbers.
'''
with open(mysFilename, "r") as fhSeeds:
lsSeeds = [(next(fhSeeds)) for _ in range(mynHowMany)]
lnSeeds = [util.fnIntPlease(_) for _ in lsSeeds]
return lnSeeds
#===========================================================
# f n v G e t E n v i r o n m e n t O v e r r i d e s
@catchex
@ntracef("MAIN")
def fnvGetEnvironmentOverrides():
'''
Get a few arguments from environment variables, which are
allowed to override built-in defaults.
'''
# Allow user to override number of cores to use today.
# Utility routine looks at HW and possible user envir override.
g.nCores = brokergetcores.fnnGetResolvedCores()
NTRC.ntracef(0, "MAIN", "proc ncores|%s|" % (g.nCores))
# If you want to overbook the available cores, do it here.
g.nParallel = g.nCores # Sorry for the name change.
# Allow user to override the polite interval to use today.
try:
g.nPoliteTimer = int(os.getenv("NPOLITE", CG.nPoliteTimer))
g.nCoreTimer = g.nPoliteTimer # Sorry for the name change.
NTRC.ntracef(0, "MAIN", "proc politetimer|%s|msec" % (g.nPoliteTimer))
except (ValueError, TypeError):
raise TypeError("Environment variable NPOLITE must be "
"an integer number of milliseconds.")
#===========================================================
# f n s R e c o n s t | |
"""Contains plot functions."""
from platform import system
import numpy as np
import matplotlib.pyplot as plt
from matplotlib import gridspec
from matplotlib.lines import Line2D
from matplotlib.patches import Polygon
from . import utils as ut
from . import salt_utils as salt_ut
from . import nb_fun as nbf
def plt_maximize():
"""Enable full screen.
Notes
-----
Come from
https://stackoverflow.com/questions/12439588/how-to-maximize-a-plt-show-window-using-python/22418354#22418354
"""
# See discussion on:
# https://stackoverflow.com/questions/12439588/how-to-maximize-a-plt-show-window-using-python
backend = plt.get_backend()
cfm = plt.get_current_fig_manager()
if backend == "wxAgg":
cfm.frame.Maximize(True)
elif backend == "TkAgg":
if system() == "win32":
cfm.window.state('zoomed') # This is windows only
else:
cfm.resize(*cfm.window.maxsize())
elif backend in ('QT4Agg', 'QT5Agg'):
cfm.window.showMaximized()
elif callable(getattr(cfm, "full_screen_toggle", None)):
if not getattr(cfm, "flag_is_max", None):
cfm.full_screen_toggle()
cfm.flag_is_max = True
else:
raise RuntimeError("plt_maximize() is not implemented for current backend:", backend)
def param_text_box(text_ax, model_name, sim_par=None, fit_par=None, pos=[0.01, 0.25]):
"""Add a text legend with model parameters to the plot.
Parameters
----------
text_ax : matplotlib.axes
Axes where place the text.
model_name : str
The name of the sn model that is used.
sim_par : list(float)
The parameters of the model.
fit_par : list(tuple(float,float))
The fitted parameters and errors.
"""
par_dic = {'salt': [('t0', '.2f'), ('x0', '.2e'), ('mb', '.2f'), ('x1', '.2f'), ('c', '.3f')],
'mw_': [('Rv', '.2f'), ('E(B-V)', '.3f')]}
par = par_dic[model_name]
str_list = [''] * (len(par) + 1)
if sim_par is not None:
str_list[0] += 'SIMULATED PARAMETERS :@'
if fit_par is not None:
str_list[0] += 'FITTED PARAMETERS :@'
for i, p in enumerate(par):
if sim_par is not None:
str_list[i + 1] += f"{p[0]} = {sim_par[i]:{p[1]}}@"
if fit_par is not None:
if isinstance(fit_par[i], (int, float)):
str_list[i + 1] += f"{p[0]} = {fit_par[i]:{p[1]}}"
else:
str_list[i + 1] += f"{p[0]} = {fit_par[i][0]:{p[1]}} $\pm$ {fit_par[i][1]:{p[1]}}@"
final_str = ""
if str_list[0].count('@') == 2:
len_str = []
for i, s in enumerate(str_list):
str_list[i] = s.split('@')
len_str.append(len(str_list[i][0]))
max_len = np.max(len_str)
for i in range(len(str_list)):
final_str += str_list[i][0] + " " * (max_len - len_str[i] + 2) + "| "
final_str += str_list[i][1] + "\n"
elif str_list[0].count('@') == 1:
for i, s in enumerate(str_list):
final_str += str_list[i][:-1] + '\n'
prop = dict(boxstyle='round,pad=1', facecolor='navajowhite', alpha=0.5)
text_ax.axis('off')
text_ax.text(pos[0], pos[1], final_str[:-1], transform=text_ax.transAxes, fontsize=9, bbox=prop)
def plot_lc(
flux_table,
meta,
zp=25.,
mag=False,
Jy=False,
snc_sim_model=None,
snc_fit_model=None,
fit_cov=None,
residuals=False,
full_screen=False,
figsize=(35 / 2.54, 20 / 2.54),
dpi=120):
"""Ploting a lightcurve flux table.
Parameters
----------
flux_table : astropy.Table
The lightcurve to plot.
zp : float, default = 25.
Zeropoint at which rescale the flux.
mag : bool
If True plot the magnitude.
snc_sim_model : sncosmo.Model
Model used to simulate the lightcurve.
snc_fit_model : sncosmo.Model
Model used to fit the lightcurve.
fit_cov : numpy.ndaray(float, size=(4, 4))
sncosmo t0, x0, x1, c covariance matrix from SALT fit.
residuals : bool
If True plot fit residuals.
full_screen : bool
Try to plot the figure in full screen.
Returns
-------
None
Just plot the lightcurve.
"""
plt.rcParams['font.family'] = 'monospace'
bands = np.unique(flux_table['band'])
flux_norm, fluxerr_norm = ut.norm_flux(flux_table, zp)
time = flux_table['time']
t0 = meta['sim_t0']
z = meta['zobs']
time_th = np.linspace(t0 - 19.8 * (1 + z), t0 + 49.8 * (1 + z), 200)
fig = plt.figure(figsize=figsize, dpi=dpi)
###################
# INIT THE FIGURE #
###################
if residuals:
gs = gridspec.GridSpec(3, 1, height_ratios=[0.5, 2, 1])
text_ax = fig.add_subplot(gs[0])
ax0 = fig.add_subplot(gs[1])
ax1 = fig.add_subplot(gs[2], sharex=ax0)
ax1_y_lim = []
elif snc_sim_model is None and (snc_fit_model is None or fit_cov is None):
gs = gridspec.GridSpec(1, 1, height_ratios=[1])
ax0 = fig.add_subplot(gs[0])
else:
gs = gridspec.GridSpec(2, 1, height_ratios=[0.5, 2])
text_ax = fig.add_subplot(gs[0])
ax0 = fig.add_subplot(gs[1])
fig.suptitle(f'SN at redshift z : {z:.5f} and peak at time t$_0$ : {t0:.2f} MJD',
fontsize='xx-large')
plt.xlabel('Time relative to peak', fontsize='x-large')
################
# PLOT SECTION #
################
for b in bands:
band_mask = flux_table['band'] == b
flux_b = flux_norm[band_mask]
fluxerr_b = fluxerr_norm[band_mask]
time_b = time[band_mask]
if mag:
ax0.invert_yaxis()
ax0.set_ylabel('Mag', fontsize='x-large')
# Delete < 0 pts
flux_mask = flux_b > 0
flux_b = flux_b[flux_mask]
fluxerr_b = fluxerr_b[flux_mask]
time_b = time_b[flux_mask]
plot = -2.5 * np.log10(flux_b) + zp
err = 2.5 / np.log(10) * 1 / flux_b * fluxerr_b
if snc_sim_model is not None:
plot_th = snc_sim_model.bandmag(b, 'ab', time_th)
if snc_fit_model is not None:
plot_fit = snc_fit_model.bandmag(b, 'ab', time_th)
if fit_cov is not None:
if snc_fit_model.source.name in ('salt2', 'salt3'):
err_th = salt_ut.compute_salt_fit_error(snc_fit_model,
fit_cov[1:, 1:],
b, time_th, zp)
err_th = 2.5 / \
(np.log(10) * 10**(-0.4 * (plot_fit - zp))) * err_th
if residuals:
fit_pts = snc_fit_model.bandmag(b, 'ab', time_b)
rsd = plot - fit_pts
else:
if Jy:
ax0.set_ylabel('Flux [$\mu$Jy]', fontsize='x-large')
norm = ut.flux_to_Jansky(zp, b)
else:
ax0.set_ylabel(f'Flux (ZP = {zp})', fontsize='x-large')
norm = 1.0
ax0.axhline(ls='dashdot', c='black', lw=1.5)
plot = flux_b * norm
err = fluxerr_b * norm
if snc_sim_model is not None:
plot_th = snc_sim_model.bandflux(b, time_th, zp=zp, zpsys='ab') * norm
if snc_fit_model is not None:
plot_fit = snc_fit_model.bandflux(
b, time_th, zp=zp, zpsys='ab') * norm
if fit_cov is not None:
if snc_fit_model.source.name in ('salt2', 'salt3'):
err_th = salt_ut.compute_salt_fit_error(snc_fit_model, fit_cov[1:, 1:],
b, time_th, zp) * norm
if residuals:
fit_pts = snc_fit_model.bandflux(b, time_b, zp=zp, zpsys='ab') * norm
rsd = plot - fit_pts
p = ax0.errorbar(time_b - t0, plot, yerr=err,
label=b, fmt='o', markersize=2.5)
handles, labels = ax0.get_legend_handles_labels()
if snc_sim_model is not None:
ax0.plot(time_th - t0, plot_th, color=p[0].get_color())
sim_line = Line2D([0], [0], color='k', linestyle='solid')
sim_label = 'Sim'
handles.append(sim_line)
labels.append(sim_label)
if snc_fit_model is not None:
fit_line = Line2D([0], [0], color='k', linestyle='--')
fit_label = 'Fit'
handles.append(fit_line)
labels.append(fit_label)
ax0.plot(time_th - t0, plot_fit, color=p[0].get_color(), ls='--')
if fit_cov is not None:
ax0.fill_between(
time_th - t0,
plot_fit - err_th,
plot_fit + err_th,
alpha=0.5)
if residuals:
ax1.set_ylabel('Data - Model', fontsize='x-large')
ax1.errorbar(time_b - t0, rsd, yerr=err, fmt='o')
ax1.axhline(0, ls='dashdot', c='black', lw=1.5)
ax1_y_lim.append(3 * np.std(rsd))
ax1.plot(time_th - t0, err_th, ls='--', color=p[0].get_color())
ax1.plot(time_th - t0, -err_th, ls='--', color=p[0].get_color())
ax0.legend(handles=handles, labels=labels, fontsize='x-large')
sim_par = None
sim_mwd_par = None
fit_par = None
fit_mwd_par = None
if snc_sim_model is not None:
plt.xlim(snc_sim_model.mintime() - t0, snc_sim_model.maxtime() - t0)
sim_par = [meta['sim_t0'],
meta['sim_x0'],
meta['sim_mb'],
meta['sim_x1'],
meta['sim_c']]
if 'mw_' in snc_sim_model.effect_names:
sim_mwd_par = []
if 'mw_r_v' in meta:
sim_mwd_par.append(meta['mw_r_v'])
else:
mod_index = np.where(np.array(snc_sim_model.effect_names) == 'mw_')[0][0]
sim_mwd_par.append(snc_sim_model.effects[mod_index]._r_v)
sim_mwd_par.append(meta['mw_ebv'])
elif snc_fit_model is not None:
plt.xlim(snc_fit_model.mintime() - t0, snc_fit_model.maxtime() - t0)
else:
plt.xlim(np.min(time) - 1 - t0, np.max(time) + 1 - t0)
if residuals:
ax1.set_ylim(-np.nanmax(ax1_y_lim), np.nanmax(ax1_y_lim))
if snc_fit_model is not None and fit_cov is not None:
mb_fit = snc_fit_model.source_peakmag('bessellb', 'ab')
mb_err = np.sqrt(salt_ut.cov_x0_to_mb(snc_fit_model.parameters[2], fit_cov[1:, 1:])[0, 0])
fit_par = [(snc_fit_model.parameters[1], np.sqrt(fit_cov[0, 0])),
(snc_fit_model.parameters[2], np.sqrt(fit_cov[1, 1])),
(mb_fit, mb_err),
(snc_fit_model.parameters[3], np.sqrt(fit_cov[2, 2])),
(snc_fit_model.parameters[4], np.sqrt(fit_cov[3, 3]))]
if 'mw_' in snc_fit_model.effect_names:
fit_mwd_par = []
if 'mw_r_v' not in snc_fit_model.param_names:
mod_index = np.where(np.array(snc_fit_model.effect_names) == 'mw_')[0][0]
fit_mwd_par.append(snc_fit_model.effects[mod_index]._r_v)
else:
par_idx = np.where(np.asarray(snc_fit_model.param_names) == 'mw_r_v')[0][0]
fit_mwd_par.append(snc_fit_model.parameters[par_idx])
par_idx = np.where(np.asarray(snc_fit_model.param_names) == 'mw_ebv')[0][0]
fit_mwd_par.append(snc_fit_model.parameters[par_idx])
if fit_par is not None or sim_par is not None:
param_text_box(text_ax, model_name='salt', sim_par=sim_par, fit_par=fit_par)
if fit_mwd_par is not None or sim_mwd_par is not None:
param_text_box(text_ax, model_name='mw_', sim_par=sim_mwd_par, fit_par=fit_mwd_par,
pos=[0.4, 0.25])
plt.subplots_adjust(hspace=.0)
if full_screen:
try:
plt_maximize()
except Exception:
pass
plt.show()
def plot_ra_dec(ra, dec, vpec=None, field_list=None, field_dic=None, field_size=None, **kwarg):
"""Plot a mollweide map of ra, dec.
Parameters
----------
ra : list(float)
Right Ascension.
dec : type
Declinaison.
vpec : type
Peculiar velocities.
Returns
-------
None
Just plot the map.
"""
plt.figure()
ax = plt.subplot(111, projection='mollweide')
ax.set_axisbelow(True)
plt.grid()
ra = ra - 2 * np.pi * (ra > np.pi)
if vpec is None:
plt.scatter(ra, dec, s=10, **kwarg)
else:
plot = plt.scatter(ra, dec, c=vpec, vmin=-1500, vmax=1500, s=10, **kwarg)
plt.colorbar(plot, label='$v_p$ [km/s]')
if field_list is not None and field_dic is not None and field_size is not None:
ra_edges = np.array([field_size[0] / 2,
field_size[0] / 2,
-field_size[0] / 2,
-field_size[0] / 2])
dec_edges = np.array([field_size[1] / 2,
-field_size[1] / 2,
-field_size[1] / 2,
field_size[1] / 2])
vec = np.array([np.cos(ra_edges) * np.cos(dec_edges),
np.sin(ra_edges) * np.cos(dec_edges),
np.sin(dec_edges)]).T
for ID in field_list:
# if ID < 880:
ra = field_dic[ID]['ra']
dec = field_dic[ID]['dec']
new_coord = [nbf.R_base(
ra, -dec, v, to_field_frame=False) for v in vec]
new_radec = [[np.arctan2(x[1], x[0]), np.arcsin(x[2])] for x in new_coord]
if new_radec[3][0] > new_radec[0][0]:
if new_radec[3][0] * new_radec[2][0] > 0:
x1 = [-np.pi, new_radec[0][0], new_radec[0][0], -np.pi]
y1 = [new_radec[0][1], new_radec[0][1],
new_radec[1][1], new_radec[1][1]]
x2 = [np.pi, new_radec[2][0], new_radec[2][0], np.pi]
y2 = [new_radec[2][1], new_radec[2][1],
new_radec[3][1], new_radec[3][1]]
ax.plot(x1, y1, ls='--', color='blue', lw=1, zorder=2)
ax.plot(x2, y2, | |
<reponame>woblob/Crystal_Symmetry
import podstawa as pod
# missing matrices
_matrix_inv_000_h00_miss = pod.matrices_dict["inv_000"] + pod._translation_h00 #0
_matrix_m_0yz_h00_miss = pod.matrices_dict["m_0yz"] + pod._translation_h00 #1
_matrix_m_x0z_h00_miss = pod.matrices_dict["m_x0z"] + pod._translation_h00 #2
_matrix_m_xy0_h00_miss = pod.matrices_dict["m_xy0"] + pod._translation_h00 #3
_matrix_m_xmxz_h00_miss = pod.matrices_dict["m_xmxz"] + pod._translation_h00 #4
_matrix_m_xymy_h00_miss = pod.matrices_dict["m_xymy"] + pod._translation_h00 #5
_matrix_m_xymx_h00_miss = pod.matrices_dict["m_xymx"] + pod._translation_h00 #6
_matrix_m_xyx_h00_miss = pod.matrices_dict["m_xyx"] + pod._translation_h00 #7
_matrix_m_xxz_h00_miss = pod.matrices_dict["m_xxz"] + pod._translation_h00 #8
_matrix_m_xyy_h00_miss = pod.matrices_dict["m_xyy"] + pod._translation_h00 #9
_matrix_2_x00_h00_miss = pod.matrices_dict["2_x00"] + pod._translation_h00 #10
_matrix_2_0y0_h00_miss = pod.matrices_dict["2_0y0"] + pod._translation_h00 #11
_matrix_2_00z_h00_miss = pod.matrices_dict["2_00z"] + pod._translation_h00 #12
_matrix_2_xx0_h00_miss = pod.matrices_dict["2_xx0"] + pod._translation_h00 #13
_matrix_2_x0x_h00_miss = pod.matrices_dict["2_x0x"] + pod._translation_h00 #14
_matrix_2_0yy_h00_miss = pod.matrices_dict["2_0yy"] + pod._translation_h00 #15
_matrix_2_xmx0_h00_miss = pod.matrices_dict["2_xmx0"] + pod._translation_h00 #16
_matrix_2_mx0x_h00_miss = pod.matrices_dict["2_mx0x"] + pod._translation_h00 #17
_matrix_2_0myy_h00_miss = pod.matrices_dict["2_0myy"] + pod._translation_h00 #18
_matrix_3_xxx_h00_miss = pod.matrices_dict["3_xxx"] + pod._translation_h00 #19
_matrix_3_xmxmx_h00_miss = pod.matrices_dict["3_xmxmx"] + pod._translation_h00 #20
_matrix_3_mxxmx_h00_miss = pod.matrices_dict["3_mxxmx"] + pod._translation_h00 #21
_matrix_3_mxmxx_h00_miss = pod.matrices_dict["3_mxmxx"] + pod._translation_h00 #22
_matrix_m3_xxx_h00_miss = pod.matrices_dict["m3_xxx"] + pod._translation_h00 #23
_matrix_m3_xmxmx_h00_miss = pod.matrices_dict["m3_xmxmx"] + pod._translation_h00 #24
_matrix_m3_mxxmx_h00_miss = pod.matrices_dict["m3_mxxmx"] + pod._translation_h00 #25
_matrix_m3_mxmxx_h00_miss = pod.matrices_dict["m3_mxmxx"] + pod._translation_h00 #26
_matrix_4_x00_h00_miss = pod.matrices_dict["4_x00"] + pod._translation_h00 #27
_matrix_4_0y0_h00_miss = pod.matrices_dict["4_0y0"] + pod._translation_h00 #28
_matrix_4_00z_h00_miss = pod.matrices_dict["4_00z"] + pod._translation_h00 #29
_matrix_m4_x00_h00_miss = pod.matrices_dict["-4_x00"] + pod._translation_h00 #30
_matrix_m4_0y0_h00_miss = pod.matrices_dict["-4_0y0"] + pod._translation_h00 #31
_matrix_m4_00z_h00_miss = pod.matrices_dict["-4_00z"] + pod._translation_h00 #32
_matrix_inv_000_0h0_miss = pod.matrices_dict["inv_000"] + pod._translation_0h0 #33
_matrix_m_0yz_0h0_miss = pod.matrices_dict["m_0yz"] + pod._translation_0h0 #34
_matrix_m_x0z_0h0_miss = pod.matrices_dict["m_x0z"] + pod._translation_0h0 #35
_matrix_m_xy0_0h0_miss = pod.matrices_dict["m_xy0"] + pod._translation_0h0 #36
_matrix_m_xmxz_0h0_miss = pod.matrices_dict["m_xmxz"] + pod._translation_0h0 #37
_matrix_m_xymy_0h0_miss = pod.matrices_dict["m_xymy"] + pod._translation_0h0 #38
_matrix_m_xymx_0h0_miss = pod.matrices_dict["m_xymx"] + pod._translation_0h0 #39
_matrix_m_xyx_0h0_miss = pod.matrices_dict["m_xyx"] + pod._translation_0h0 #40
_matrix_m_xxz_0h0_miss = pod.matrices_dict["m_xxz"] + pod._translation_0h0 #41
_matrix_m_xyy_0h0_miss = pod.matrices_dict["m_xyy"] + pod._translation_0h0 #42
_matrix_2_x00_0h0_miss = pod.matrices_dict["2_x00"] + pod._translation_0h0 #43
_matrix_2_0y0_0h0_miss = pod.matrices_dict["2_0y0"] + pod._translation_0h0 #44
_matrix_2_00z_0h0_miss = pod.matrices_dict["2_00z"] + pod._translation_0h0 #45
_matrix_2_xx0_0h0_miss = pod.matrices_dict["2_xx0"] + pod._translation_0h0 #46
_matrix_2_x0x_0h0_miss = pod.matrices_dict["2_x0x"] + pod._translation_0h0 #47
_matrix_2_0yy_0h0_miss = pod.matrices_dict["2_0yy"] + pod._translation_0h0 #48
_matrix_2_xmx0_0h0_miss = pod.matrices_dict["2_xmx0"] + pod._translation_0h0 #49
_matrix_2_mx0x_0h0_miss = pod.matrices_dict["2_mx0x"] + pod._translation_0h0 #50
_matrix_2_0myy_0h0_miss = pod.matrices_dict["2_0myy"] + pod._translation_0h0 #51
_matrix_3_xxx_0h0_miss = pod.matrices_dict["3_xxx"] + pod._translation_0h0 #52
_matrix_3_xmxmx_0h0_miss = pod.matrices_dict["3_xmxmx"] + pod._translation_0h0 #53
_matrix_3_mxxmx_0h0_miss = pod.matrices_dict["3_mxxmx"] + pod._translation_0h0 #54
_matrix_3_mxmxx_0h0_miss = pod.matrices_dict["3_mxmxx"] + pod._translation_0h0 #55
_matrix_m3_xxx_0h0_miss = pod.matrices_dict["m3_xxx"] + pod._translation_0h0 #56
_matrix_m3_xmxmx_0h0_miss = pod.matrices_dict["m3_xmxmx"] + pod._translation_0h0 #57
_matrix_m3_mxxmx_0h0_miss = pod.matrices_dict["m3_mxxmx"] + pod._translation_0h0 #58
_matrix_m3_mxmxx_0h0_miss = pod.matrices_dict["m3_mxmxx"] + pod._translation_0h0 #59
_matrix_4_x00_0h0_miss = pod.matrices_dict["4_x00"] + pod._translation_0h0 #60
_matrix_4_0y0_0h0_miss = pod.matrices_dict["4_0y0"] + pod._translation_0h0 #61
_matrix_4_00z_0h0_miss = pod.matrices_dict["4_00z"] + pod._translation_0h0 #62
_matrix_m4_x00_0h0_miss = pod.matrices_dict["-4_x00"] + pod._translation_0h0 #63
_matrix_m4_0y0_0h0_miss = pod.matrices_dict["-4_0y0"] + pod._translation_0h0 #64
_matrix_m4_00z_0h0_miss = pod.matrices_dict["-4_00z"] + pod._translation_0h0 #65
_matrix_inv_000_00h_miss = pod.matrices_dict["inv_000"] + pod._translation_00h #66
_matrix_m_0yz_00h_miss = pod.matrices_dict["m_0yz"] + pod._translation_00h #67
_matrix_m_x0z_00h_miss = pod.matrices_dict["m_x0z"] + pod._translation_00h #68
_matrix_m_xy0_00h_miss = pod.matrices_dict["m_xy0"] + pod._translation_00h #69
_matrix_m_xmxz_00h_miss = pod.matrices_dict["m_xmxz"] + pod._translation_00h #70
_matrix_m_xymy_00h_miss = pod.matrices_dict["m_xymy"] + pod._translation_00h #71
_matrix_m_xymx_00h_miss = pod.matrices_dict["m_xymx"] + pod._translation_00h #72
_matrix_m_xyx_00h_miss = pod.matrices_dict["m_xyx"] + pod._translation_00h #73
_matrix_m_xxz_00h_miss = pod.matrices_dict["m_xxz"] + pod._translation_00h #74
_matrix_m_xyy_00h_miss = pod.matrices_dict["m_xyy"] + pod._translation_00h #75
_matrix_2_x00_00h_miss = pod.matrices_dict["2_x00"] + pod._translation_00h #76
_matrix_2_0y0_00h_miss = pod.matrices_dict["2_0y0"] + pod._translation_00h #77
_matrix_2_00z_00h_miss = pod.matrices_dict["2_00z"] + pod._translation_00h #78
_matrix_2_xx0_00h_miss = pod.matrices_dict["2_xx0"] + pod._translation_00h #79
_matrix_2_x0x_00h_miss = pod.matrices_dict["2_x0x"] + pod._translation_00h #80
_matrix_2_0yy_00h_miss = pod.matrices_dict["2_0yy"] + pod._translation_00h #81
_matrix_2_xmx0_00h_miss = pod.matrices_dict["2_xmx0"] + pod._translation_00h #82
_matrix_2_mx0x_00h_miss = pod.matrices_dict["2_mx0x"] + pod._translation_00h #83
_matrix_2_0myy_00h_miss = pod.matrices_dict["2_0myy"] + pod._translation_00h #84
_matrix_3_xxx_00h_miss = pod.matrices_dict["3_xxx"] + pod._translation_00h #85
_matrix_3_xmxmx_00h_miss = pod.matrices_dict["3_xmxmx"] + pod._translation_00h #86
_matrix_3_mxxmx_00h_miss = pod.matrices_dict["3_mxxmx"] + pod._translation_00h #87
_matrix_3_mxmxx_00h_miss = pod.matrices_dict["3_mxmxx"] + pod._translation_00h #88
_matrix_m3_xxx_00h_miss = pod.matrices_dict["m3_xxx"] + pod._translation_00h #89
_matrix_m3_xmxmx_00h_miss = pod.matrices_dict["m3_xmxmx"] + pod._translation_00h #90
_matrix_m3_mxxmx_00h_miss = pod.matrices_dict["m3_mxxmx"] + pod._translation_00h #91
_matrix_m3_mxmxx_00h_miss = pod.matrices_dict["m3_mxmxx"] + pod._translation_00h #92
_matrix_4_x00_00h_miss = pod.matrices_dict["4_x00"] + pod._translation_00h #93
_matrix_4_0y0_00h_miss = pod.matrices_dict["4_0y0"] + pod._translation_00h #94
_matrix_4_00z_00h_miss = pod.matrices_dict["4_00z"] + pod._translation_00h #95
_matrix_m4_x00_00h_miss = pod.matrices_dict["-4_x00"] + pod._translation_00h #96
_matrix_m4_0y0_00h_miss = pod.matrices_dict["-4_0y0"] + pod._translation_00h #97
_matrix_m4_00z_00h_miss = pod.matrices_dict["-4_00z"] + pod._translation_00h #98
_matrix_inv_000_0hh_miss = pod.matrices_dict["inv_000"] + pod._translation_0hh #99
_matrix_m_0yz_0hh_miss = pod.matrices_dict["m_0yz"] + pod._translation_0hh #100
_matrix_m_x0z_0hh_miss = pod.matrices_dict["m_x0z"] + pod._translation_0hh #101
_matrix_m_xy0_0hh_miss = pod.matrices_dict["m_xy0"] + pod._translation_0hh #102
_matrix_m_xmxz_0hh_miss = pod.matrices_dict["m_xmxz"] + pod._translation_0hh #103
_matrix_m_xymy_0hh_miss = pod.matrices_dict["m_xymy"] + pod._translation_0hh #104
_matrix_m_xymx_0hh_miss = pod.matrices_dict["m_xymx"] + pod._translation_0hh #105
_matrix_m_xyx_0hh_miss = pod.matrices_dict["m_xyx"] + pod._translation_0hh #106
_matrix_m_xxz_0hh_miss = pod.matrices_dict["m_xxz"] + pod._translation_0hh #107
_matrix_m_xyy_0hh_miss = pod.matrices_dict["m_xyy"] + pod._translation_0hh #108
_matrix_2_x00_0hh_miss = pod.matrices_dict["2_x00"] + pod._translation_0hh #109
_matrix_2_0y0_0hh_miss = pod.matrices_dict["2_0y0"] + pod._translation_0hh #110
_matrix_2_00z_0hh_miss = pod.matrices_dict["2_00z"] + pod._translation_0hh #111
_matrix_2_xx0_0hh_miss = pod.matrices_dict["2_xx0"] + pod._translation_0hh #112
_matrix_2_x0x_0hh_miss = pod.matrices_dict["2_x0x"] + pod._translation_0hh #113
_matrix_2_0yy_0hh_miss = pod.matrices_dict["2_0yy"] + pod._translation_0hh #114
_matrix_2_xmx0_0hh_miss = pod.matrices_dict["2_xmx0"] + pod._translation_0hh #115
_matrix_2_mx0x_0hh_miss = pod.matrices_dict["2_mx0x"] + pod._translation_0hh #116
_matrix_2_0myy_0hh_miss = pod.matrices_dict["2_0myy"] + pod._translation_0hh #117
_matrix_3_xxx_0hh_miss = pod.matrices_dict["3_xxx"] + pod._translation_0hh #118
_matrix_3_xmxmx_0hh_miss = pod.matrices_dict["3_xmxmx"] + pod._translation_0hh #119
_matrix_3_mxxmx_0hh_miss = pod.matrices_dict["3_mxxmx"] + pod._translation_0hh #120
_matrix_3_mxmxx_0hh_miss = pod.matrices_dict["3_mxmxx"] + pod._translation_0hh #121
_matrix_m3_xxx_0hh_miss = pod.matrices_dict["m3_xxx"] + pod._translation_0hh #122
_matrix_m3_xmxmx_0hh_miss = pod.matrices_dict["m3_xmxmx"] + pod._translation_0hh #123
_matrix_m3_mxxmx_0hh_miss = pod.matrices_dict["m3_mxxmx"] + pod._translation_0hh #124
_matrix_m3_mxmxx_0hh_miss = pod.matrices_dict["m3_mxmxx"] + pod._translation_0hh #125
_matrix_4_x00_0hh_miss = pod.matrices_dict["4_x00"] + pod._translation_0hh #126
_matrix_4_0y0_0hh_miss = pod.matrices_dict["4_0y0"] + pod._translation_0hh #127
_matrix_4_00z_0hh_miss = pod.matrices_dict["4_00z"] + pod._translation_0hh #128
_matrix_m4_x00_0hh_miss = pod.matrices_dict["-4_x00"] + pod._translation_0hh #129
_matrix_m4_0y0_0hh_miss = pod.matrices_dict["-4_0y0"] + pod._translation_0hh #130
_matrix_m4_00z_0hh_miss = pod.matrices_dict["-4_00z"] + pod._translation_0hh #131
_matrix_inv_000_h0h_miss = pod.matrices_dict["inv_000"] + pod._translation_h0h #132
_matrix_m_0yz_h0h_miss = pod.matrices_dict["m_0yz"] + pod._translation_h0h #133
_matrix_m_x0z_h0h_miss = pod.matrices_dict["m_x0z"] + pod._translation_h0h #134
_matrix_m_xy0_h0h_miss = pod.matrices_dict["m_xy0"] + pod._translation_h0h #135
_matrix_m_xmxz_h0h_miss = pod.matrices_dict["m_xmxz"] + pod._translation_h0h #136
_matrix_m_xymy_h0h_miss = pod.matrices_dict["m_xymy"] + pod._translation_h0h #137
_matrix_m_xymx_h0h_miss = pod.matrices_dict["m_xymx"] + pod._translation_h0h #138
_matrix_m_xyx_h0h_miss = pod.matrices_dict["m_xyx"] + pod._translation_h0h #139
_matrix_m_xxz_h0h_miss = pod.matrices_dict["m_xxz"] + pod._translation_h0h #140
_matrix_m_xyy_h0h_miss = pod.matrices_dict["m_xyy"] + pod._translation_h0h #141
_matrix_2_x00_h0h_miss = pod.matrices_dict["2_x00"] + pod._translation_h0h #142
_matrix_2_0y0_h0h_miss = pod.matrices_dict["2_0y0"] + pod._translation_h0h #143
_matrix_2_00z_h0h_miss = pod.matrices_dict["2_00z"] + pod._translation_h0h #144
_matrix_2_xx0_h0h_miss = pod.matrices_dict["2_xx0"] + pod._translation_h0h #145
_matrix_2_x0x_h0h_miss = pod.matrices_dict["2_x0x"] + pod._translation_h0h #146
_matrix_2_0yy_h0h_miss = pod.matrices_dict["2_0yy"] + pod._translation_h0h #147
_matrix_2_xmx0_h0h_miss = pod.matrices_dict["2_xmx0"] + pod._translation_h0h #148
_matrix_2_mx0x_h0h_miss = pod.matrices_dict["2_mx0x"] + pod._translation_h0h #149
_matrix_2_0myy_h0h_miss = pod.matrices_dict["2_0myy"] + pod._translation_h0h #150
_matrix_3_xxx_h0h_miss = pod.matrices_dict["3_xxx"] + pod._translation_h0h #151
_matrix_3_xmxmx_h0h_miss = pod.matrices_dict["3_xmxmx"] + pod._translation_h0h #152
_matrix_3_mxxmx_h0h_miss = pod.matrices_dict["3_mxxmx"] + pod._translation_h0h #153
_matrix_3_mxmxx_h0h_miss = pod.matrices_dict["3_mxmxx"] + pod._translation_h0h #154
_matrix_m3_xxx_h0h_miss = pod.matrices_dict["m3_xxx"] + pod._translation_h0h #155
_matrix_m3_xmxmx_h0h_miss = pod.matrices_dict["m3_xmxmx"] + pod._translation_h0h #156
_matrix_m3_mxxmx_h0h_miss = pod.matrices_dict["m3_mxxmx"] + pod._translation_h0h #157
_matrix_m3_mxmxx_h0h_miss = pod.matrices_dict["m3_mxmxx"] + pod._translation_h0h #158
_matrix_4_x00_h0h_miss = pod.matrices_dict["4_x00"] + pod._translation_h0h #159
_matrix_4_0y0_h0h_miss = pod.matrices_dict["4_0y0"] + pod._translation_h0h #160
_matrix_4_00z_h0h_miss = pod.matrices_dict["4_00z"] + pod._translation_h0h #161
_matrix_m4_x00_h0h_miss = pod.matrices_dict["-4_x00"] + pod._translation_h0h #162
_matrix_m4_0y0_h0h_miss = pod.matrices_dict["-4_0y0"] + pod._translation_h0h #163
_matrix_m4_00z_h0h_miss = pod.matrices_dict["-4_00z"] + pod._translation_h0h #164
_matrix_inv_000_hh0_miss = pod.matrices_dict["inv_000"] + pod._translation_hh0 #165
_matrix_m_0yz_hh0_miss = pod.matrices_dict["m_0yz"] + pod._translation_hh0 #166
_matrix_m_x0z_hh0_miss = pod.matrices_dict["m_x0z"] + pod._translation_hh0 #167
_matrix_m_xy0_hh0_miss = pod.matrices_dict["m_xy0"] + pod._translation_hh0 #168
_matrix_m_xmxz_hh0_miss = pod.matrices_dict["m_xmxz"] + pod._translation_hh0 #169
_matrix_m_xymy_hh0_miss = pod.matrices_dict["m_xymy"] + pod._translation_hh0 #170
_matrix_m_xymx_hh0_miss = pod.matrices_dict["m_xymx"] + pod._translation_hh0 #171
_matrix_m_xyx_hh0_miss = pod.matrices_dict["m_xyx"] + pod._translation_hh0 #172
_matrix_m_xxz_hh0_miss = pod.matrices_dict["m_xxz"] + pod._translation_hh0 #173
_matrix_m_xyy_hh0_miss = pod.matrices_dict["m_xyy"] + pod._translation_hh0 #174
_matrix_2_x00_hh0_miss = pod.matrices_dict["2_x00"] + pod._translation_hh0 #175
_matrix_2_0y0_hh0_miss = pod.matrices_dict["2_0y0"] + pod._translation_hh0 #176
_matrix_2_00z_hh0_miss = pod.matrices_dict["2_00z"] + pod._translation_hh0 #177
_matrix_2_xx0_hh0_miss = pod.matrices_dict["2_xx0"] + pod._translation_hh0 #178
_matrix_2_x0x_hh0_miss = pod.matrices_dict["2_x0x"] + pod._translation_hh0 #179
_matrix_2_0yy_hh0_miss = pod.matrices_dict["2_0yy"] + pod._translation_hh0 #180
_matrix_2_xmx0_hh0_miss = pod.matrices_dict["2_xmx0"] + pod._translation_hh0 #181
_matrix_2_mx0x_hh0_miss = pod.matrices_dict["2_mx0x"] + pod._translation_hh0 #182
_matrix_2_0myy_hh0_miss = pod.matrices_dict["2_0myy"] + pod._translation_hh0 #183
_matrix_3_xxx_hh0_miss = pod.matrices_dict["3_xxx"] + pod._translation_hh0 #184
_matrix_3_xmxmx_hh0_miss = pod.matrices_dict["3_xmxmx"] + pod._translation_hh0 #185
_matrix_3_mxxmx_hh0_miss = pod.matrices_dict["3_mxxmx"] + pod._translation_hh0 #186
_matrix_3_mxmxx_hh0_miss = pod.matrices_dict["3_mxmxx"] + pod._translation_hh0 #187
_matrix_m3_xxx_hh0_miss = pod.matrices_dict["m3_xxx"] + pod._translation_hh0 #188
_matrix_m3_xmxmx_hh0_miss = pod.matrices_dict["m3_xmxmx"] + pod._translation_hh0 #189
_matrix_m3_mxxmx_hh0_miss = pod.matrices_dict["m3_mxxmx"] + pod._translation_hh0 #190
_matrix_m3_mxmxx_hh0_miss = pod.matrices_dict["m3_mxmxx"] + pod._translation_hh0 #191
_matrix_4_x00_hh0_miss = pod.matrices_dict["4_x00"] + pod._translation_hh0 #192
_matrix_4_0y0_hh0_miss = pod.matrices_dict["4_0y0"] + pod._translation_hh0 #193
_matrix_4_00z_hh0_miss = pod.matrices_dict["4_00z"] + pod._translation_hh0 #194
_matrix_m4_x00_hh0_miss = pod.matrices_dict["-4_x00"] + pod._translation_hh0 #195
_matrix_m4_0y0_hh0_miss = pod.matrices_dict["-4_0y0"] + pod._translation_hh0 #196
_matrix_m4_00z_hh0_miss = pod.matrices_dict["-4_00z"] + pod._translation_hh0 #197
_matrix_inv_000_hhh_miss = pod.matrices_dict["inv_000"] + pod._translation_hhh #198
_matrix_m_0yz_hhh_miss = pod.matrices_dict["m_0yz"] + pod._translation_hhh #199
_matrix_m_x0z_hhh_miss = pod.matrices_dict["m_x0z"] + pod._translation_hhh #200
_matrix_m_xy0_hhh_miss = pod.matrices_dict["m_xy0"] + pod._translation_hhh #201
_matrix_m_xmxz_hhh_miss = pod.matrices_dict["m_xmxz"] + pod._translation_hhh #202
_matrix_m_xymy_hhh_miss = pod.matrices_dict["m_xymy"] + pod._translation_hhh #203
_matrix_m_xymx_hhh_miss = pod.matrices_dict["m_xymx"] + pod._translation_hhh #204
_matrix_m_xyx_hhh_miss = pod.matrices_dict["m_xyx"] + pod._translation_hhh #205
_matrix_m_xxz_hhh_miss = pod.matrices_dict["m_xxz"] + pod._translation_hhh #206
_matrix_m_xyy_hhh_miss = pod.matrices_dict["m_xyy"] + pod._translation_hhh #207
_matrix_2_x00_hhh_miss = pod.matrices_dict["2_x00"] + pod._translation_hhh #208
_matrix_2_0y0_hhh_miss = pod.matrices_dict["2_0y0"] + pod._translation_hhh #209
_matrix_2_00z_hhh_miss = pod.matrices_dict["2_00z"] + pod._translation_hhh #210
_matrix_2_xx0_hhh_miss = pod.matrices_dict["2_xx0"] + pod._translation_hhh #211
_matrix_2_x0x_hhh_miss = pod.matrices_dict["2_x0x"] + pod._translation_hhh #212
_matrix_2_0yy_hhh_miss = pod.matrices_dict["2_0yy"] + pod._translation_hhh #213
_matrix_2_xmx0_hhh_miss = pod.matrices_dict["2_xmx0"] + pod._translation_hhh #214
_matrix_2_mx0x_hhh_miss = pod.matrices_dict["2_mx0x"] + pod._translation_hhh #215
_matrix_2_0myy_hhh_miss = pod.matrices_dict["2_0myy"] + pod._translation_hhh #216
_matrix_3_xxx_hhh_miss = pod.matrices_dict["3_xxx"] + pod._translation_hhh #217
_matrix_3_xmxmx_hhh_miss = pod.matrices_dict["3_xmxmx"] + pod._translation_hhh #218
_matrix_3_mxxmx_hhh_miss = pod.matrices_dict["3_mxxmx"] + pod._translation_hhh #219
_matrix_3_mxmxx_hhh_miss = pod.matrices_dict["3_mxmxx"] + pod._translation_hhh #220
_matrix_m3_xxx_hhh_miss = pod.matrices_dict["m3_xxx"] + pod._translation_hhh #221
_matrix_m3_xmxmx_hhh_miss = pod.matrices_dict["m3_xmxmx"] + pod._translation_hhh #222
_matrix_m3_mxxmx_hhh_miss = pod.matrices_dict["m3_mxxmx"] + pod._translation_hhh #223
_matrix_m3_mxmxx_hhh_miss = pod.matrices_dict["m3_mxmxx"] + pod._translation_hhh #224
_matrix_4_x00_hhh_miss = pod.matrices_dict["4_x00"] + pod._translation_hhh #225
_matrix_4_0y0_hhh_miss = pod.matrices_dict["4_0y0"] + pod._translation_hhh #226
_matrix_4_00z_hhh_miss = pod.matrices_dict["4_00z"] + pod._translation_hhh #227
_matrix_m4_x00_hhh_miss = pod.matrices_dict["-4_x00"] + pod._translation_hhh #228
_matrix_m4_0y0_hhh_miss = pod.matrices_dict["-4_0y0"] + pod._translation_hhh #229
_matrix_m4_00z_hhh_miss = pod.matrices_dict["-4_00z"] + pod._translation_hhh #230
_matrix_inv_000_qqq_miss = pod.matrices_dict["inv_000"] + pod._translation_qqq #231
_matrix_m_0yz_qqq_miss = pod.matrices_dict["m_0yz"] + pod._translation_qqq #232
_matrix_m_x0z_qqq_miss = pod.matrices_dict["m_x0z"] + pod._translation_qqq #233
_matrix_m_xy0_qqq_miss = pod.matrices_dict["m_xy0"] + pod._translation_qqq #234
_matrix_m_xmxz_qqq_miss = pod.matrices_dict["m_xmxz"] + pod._translation_qqq #235
_matrix_m_xymy_qqq_miss = pod.matrices_dict["m_xymy"] + pod._translation_qqq #236
_matrix_m_xymx_qqq_miss = pod.matrices_dict["m_xymx"] + pod._translation_qqq #237
_matrix_m_xyx_qqq_miss = pod.matrices_dict["m_xyx"] + pod._translation_qqq #238
_matrix_m_xxz_qqq_miss = pod.matrices_dict["m_xxz"] + pod._translation_qqq #239
_matrix_m_xyy_qqq_miss = pod.matrices_dict["m_xyy"] + pod._translation_qqq #240
_matrix_2_x00_qqq_miss = pod.matrices_dict["2_x00"] + pod._translation_qqq #241
_matrix_2_0y0_qqq_miss = pod.matrices_dict["2_0y0"] + pod._translation_qqq #242
_matrix_2_00z_qqq_miss = pod.matrices_dict["2_00z"] + pod._translation_qqq #243
_matrix_2_xx0_qqq_miss = pod.matrices_dict["2_xx0"] + pod._translation_qqq #244
_matrix_2_x0x_qqq_miss = pod.matrices_dict["2_x0x"] + pod._translation_qqq #245
_matrix_2_0yy_qqq_miss = pod.matrices_dict["2_0yy"] + pod._translation_qqq #246
_matrix_2_xmx0_qqq_miss = pod.matrices_dict["2_xmx0"] + pod._translation_qqq #247
_matrix_2_mx0x_qqq_miss = pod.matrices_dict["2_mx0x"] + pod._translation_qqq #248
_matrix_2_0myy_qqq_miss = pod.matrices_dict["2_0myy"] + pod._translation_qqq #249
_matrix_3_xxx_qqq_miss = pod.matrices_dict["3_xxx"] + pod._translation_qqq #250
_matrix_3_xmxmx_qqq_miss = pod.matrices_dict["3_xmxmx"] + pod._translation_qqq #251
_matrix_3_mxxmx_qqq_miss = pod.matrices_dict["3_mxxmx"] + pod._translation_qqq #252
_matrix_3_mxmxx_qqq_miss = pod.matrices_dict["3_mxmxx"] + pod._translation_qqq #253
_matrix_m3_xxx_qqq_miss = pod.matrices_dict["m3_xxx"] + pod._translation_qqq | |
from tkinter import *
from tkinter import messagebox
from tkinter import ttk
from datetime import *
import webbrowser
from tkcalendar import *
import sqlite3
# create connection to BMI database
try:
cn = sqlite3.connect("../bmil.db")
except:
cn = sqlite3.connect("bmil.db")
cr = cn.cursor()
# if there is no database here we created it
try:
cr.execute(
"""CREATE TABLE report(id_inc INTEGER PRIMARY KEY,
Name TEXT (25),
Weight INTEGER,
Height REAL,
B_Date TEXT);"""
)
ID = 0
Indextabl = 0
except:
print("Table Already Exists")
sqs = "SELECT * FROM report order by id_inc" # query to select (all(*)) from db table ordered by id
cr.execute(sqs) # executes the sqs to run the query
tbl = cr.fetchall() # fetchall is a query result set and returns a list of tuples
def table():
cr.execute("select * from report")
return cr.fetchall()
if len(table()) == 0:
ID = 0
Indextabl = 0
else:
ID = table()[-1][0] + 1
Indextabl = 1
# def exit for window protocol and for exit menu
def Exit():
cr.close() # close the cursor
cn.close() # close the connection
win.destroy() # destroys the window
def is_num(x):
try:
float(x)
except ValueError:
return False
else:
return True
# clearing al fields and results
def Clear(a=""):
v1.set("") # sets the stingvar of name entry ''(nothing)
v2.set("") # sets the stingvar of weight entry ''(nothing)
v3.set("") # sets the stingvar of height entry ''(nothing)
Clear_Lb() # change the color for the category result
e1.focus() # sets the name entry to enter data
# resetting result labels to be unshown
def Clear_Lb(a=""):
lb11.config(text=" ")
lb12.config(fg="lightgray")
lb13.config(fg="lightgray")
lb14.config(fg="lightgray")
lb15.config(fg="lightgray")
lb17.config(fg="lightgray")
lb18.config(fg="lightgray")
# saving every thing to database and checks if any field is empty or not
def Save(a=""):
if (not is_num(e2.get())) or float(e2.get()) <= 0:
# checks if weight entry isn't a number or float
e2.focus() # puts the mouse in the weight entry
v2.set("") # sets nothing in the weight entry
return
if (not is_num(e3.get())) or float(e2.get()) <= 0:
# checks if height entry isn't a number or float
e3.focus() # puts the mouse in the height entry
v3.set("") # sets nothing in the height entry
return
d2 = datetime.strptime(
e4.get(), "%d/%m/%Y"
) # strptime() method creates a datetime object from the given string
if todaydat <= d2: # if today date is less than or equal to date time of now
messagebox.showerror(
"Update", "Date should be < System date"
) # shows messagebox
return False
Calcul() # calculates the gives height and weight
quest = messagebox.askyesno(
"Save", "Do you want to save data ?"
) # asks yes or no if you really want to save
if quest == True: # if quest = yes then ...
global ID
sql = "INSERT INTO report(id_inc,Name,Weight,Height,B_Date) VALUES (%d, '%s', %s, %s, '%s');"
x = (
ID,
e1.get(),
e2.get(),
e3.get(),
e4.get(),
) # gets the name,weight,height,dateentry entries
cr.execute(sql % x)
cn.commit()
ID += 1
Clear() # clears all fields
# updates any record in database (getting it through name while clicking to view one and you can update the name)
def Update(a=""):
if (not is_num(e2.get())) or float(e2.get()) <= 0:
# checks if weight entry isn't number or float
e2.focus() # puts the mouse in the weight entry
v2.set("") # sets the weight entry string nothing
return
if (not is_num(e3.get())) or float(e2.get()) <= 0:
# checks if height entry isn't number or float
e3.focus() # puts the mouse in the height entry
v3.set("") # sets the height entry string nothing
return
d2 = datetime.strptime(
e4.get(), "%d/%m/%Y"
) # strptime() method creates a datetime object from the given string
print(e4.get())
if todaydat <= d2: # checks if today date is less than or equal to date time of now
messagebox.showerror(
"Update", "Date should be < System date"
) # shows messagebox
return False
Calcul() # calculates the current entered data of weight and height
quest = messagebox.askyesno(
"Update", "Sure to update data ?"
) # asks yes or no if you really want to update
if quest == True: # if quest = yes then...
sq = "update report set Weight=%s, Height=%s, B_Date='%s' where Name='%s'" # updates the current data entered where name = name entered
x = (
e2.get(),
e3.get(),
d2,
e1.get(),
)
# gets weight,height,dateentry and name entry
print(x)
cr.execute(sq % x) # execute the sq query and the x variable
cn.commit() # make changes in the database
Clear() # clearing all data in the fields
# gets the height and weight entry and calculate it and checks if height or weight is empty and if contains negative num
def Calcul(a=""):
Clear_Lb() # runs the clear_lb function whick cofigure the foreground of the category to be unshown
if (is_num(e2.get())) and float(
e2.get()
) > 0: # checks if weight entry is number and float
w = float(e2.get()) # gets the float of the weight
else: # if weight entry isn't number (float)
e2.focus() # puts the mouse in the weight entry
v2.set("") # sets nothing in the weight entry string
return
if (is_num(e3.get())) and float(
e3.get()
) > 0: # checks if height entry is number and float
h = float(e3.get()) # gets the float of the height
else: # if height entry isn't number (float)
e3.focus() # puts the mouse in the height entry
v3.set("") # sets nothing in the height entry string
return
r = float(w / (h ** 2)) # bmi calculation (weight / height*height)
lb11.config(text=str(round(r, 2))) # gives you the result of the calculation
# if results
if r < 18.5:
lb12.config(fg="red")
lb18.config(fg="red")
elif 18.5 <= r < 25:
lb13.config(fg="red")
lb17.config(fg="red")
elif 25 <= r < 30:
lb14.config(fg="red")
lb18.config(fg="red")
else:
lb15.config(fg="red")
lb18.config(fg="red")
# view all records from database in a listbox ordereb by name and you can view or delete any record
def VAll(rd: list = []):
global ls1 # globaling the list box
win2 = Tk() # new window for view all
win2.geometry("720x400") # geomatry for view all window
win2.title("View All Patient BMI") # title for view all window
y = [
"ID",
"Name",
"Weight",
"Height",
"Bith Date",
"BMI",
"Healthy",
] # titles for database records
s1 = "{:^7}{:^25}{:^8}{:^8}{:^15}{:^8}{:^8}".format(
y[0], y[1], y[2], y[3], y[4], y[5], y[6]
) # making spaces between fields
lb30 = Label(
win2, text=s1, bg="lightblue", font=("courier", 10)
) # label for spaces for titles with background
fr1 = Frame(win2) # creating a frame to put the list box
sb1 = Scrollbar(fr1) # scroll bar for the list box to see all records
ls1 = Listbox(
fr1, yscrollcommand=sb1.set, font=("courier", 10), width=80, height=15
) # list box to put records in
if not rd:
sq = "select * from report ORDER by Name"
# query for selecting all(*) from table ordered by name
cr.execute(sq) # executes the current query
rd = cr.fetchall()
# shows all rows of a query result set and returns a list of tuples
button1 = Button(
win2,
text="View it",
bg="green",
width=20,
command=lambda: view_selected(win2),
).place(x=400, y=350)
# view any selected record
button = Button(
win2, text="Delete it", bg="red", width=20, command=delete_selected
).place(x=200, y=350)
# delete any selected record
else:
button = Button(
win2, text="Delete it", bg="red", width=20, command=delete_selected
).place(x=300, y=350)
# delete any selected record
for row in rd:
# doing for loop for putting bmi result and if healthy or not without saving in into database
w = int(row[2]) # taking the integer of weight
h = float(row[3]) # taking the float of height
r = float(w / (h ** 2)) # result (bmi)
if 18.5 <= r < 25:
ht = "Yes" # healthy = yes
else:
ht = "No" # healthy = no
s2 = "{:^7}{:^25}{:^8}{:^8}{:^15}{:^8}{:^8}".format(
row[0], row[1], row[2], row[3], str(row[4]), round(r, 2), ht
) # formatting with spaces to print bmi result and if healthy without saving to db
ls1.insert(END, s2) # insert the data of the current format
sb1.config(command=ls1.yview)
lb30.place(x=30, y=30)
fr1.place(x=30, y=60)
ls1.pack(side=LEFT)
sb1.pack(side=LEFT, fill=Y)
win2.mainloop()
# get's the name you entered and printing the data on the fields place by place if record exists and checks if exist
def VOne(a=""):
Clear_Lb() # | |
module.exit_json(**result)
def validate_cer_connectivity():
url = 'https://' + hostname + ":" + api_port + '/cerappservices/export/authenticate/status/' + username + '/' + str(hashlib.sha256(password.encode()).hexdigest())
i = 0
cer_headers = {
'Host': hostname + ":" + api_port
}
while True:
try:
response = (requests.get(url, auth=(username,password), headers=cer_headers, timeout = 60, proxies=sock5Proxy, verify = False))
if (response.status_code == 200):
result['response'] = response.text
break
except Exception as err:
if (i > 100):
# Failed to connect for 60 minutes
result['changed'] = False
result['failed'] = True
module.fail_json(msg=str(err),**result)
else:
# Unable to connect, pausing for 1 minutes
time.sleep(60)
i = i + 1
continue
def run_module():
# define available arguments/parameters a user can pass to the module
module_args = dict(
vapp_ip=dict(type='str', required=True),
api_port=dict(type='str', required=False),
ssms_ip=dict(type='str', required=False),
ssms_port=dict(type='str', default=8443, required=False),
ip_address=dict(type='str', required=False),
username=dict(type='str', required=True),
password=dict(type='str', required=True, no_log=True),
token_description=dict(type='str', required=False),
model=dict(type='str', required=True),
version=dict(type='float', required=True),
use_proxy=dict(type='bool', required=True)
)
# seed the result dict in the object
# we primarily care about changed and the response data
# change is if this module effectively modified the target
# response is the data returned by TrafficJam
# status_code is the HTTP status code returned by the requests module
global result
result = dict(
changed=False,
response='',
status_code=''
)
global module
module = AnsibleModule(
argument_spec=module_args,
supports_check_mode=False
)
global pSession
# Collect Module Parameters
global vapp_ip
vapp_ip = module.params['vapp_ip']
global api_port
api_port = module.params['api_port']
global ssms_ip
ssms_ip = module.params['ssms_ip']
global ssms_port
ssms_port = module.params['ssms_port']
global ip_address
ip_address = module.params['ip_address']
global username
username = module.params['username']
global password
password = module.params['password']
global token_description
token_description = module.params['token_description']
global model
model = module.params['model']
global version
version = module.params['version']
global use_proxy
use_proxy = module.params['use_proxy']
# Set Hostname to use (if using proxy use ip_address, otherwise use vapp_ip)
global hostname
global sock5Proxy
if (use_proxy):
hostname = ip_address
sock5Proxy = {
'http': 'socks5h://' + vapp_ip + ':1080',
'https':'socks5h://' + vapp_ip + ':1080'
}
else:
hostname = vapp_ip
sock5Proxy = ''
if (model == "csr1k"):
# Verify CSR1K is responding to API requests
validate_csr1k_connectivity()
# Verify SSMS is responding to API requests
validate_ssms_connectivity()
# License CSR1000v
url = "https://" + hostname + ":" + api_port + "/restconf/data/cisco-smart-license:register-id-token"
CSRLicenseJSON = {}
CSRLicenseJSONChild = {}
CSRLicenseJSONChild['id-token'] = getSmartLicenseToken(token_description)
CSRLicenseJSONChild['force'] = True
CSRLicenseJSON['cisco-smart-license:register-id-token'] = CSRLicenseJSONChild
customHttpPost(url, json=CSRLicenseJSON, auth=(username,password), headers={'Content-Type': 'application/yang-data+json'},proxies=sock5Proxy, verify = False)
# Verify CSR1Kv is licensed
validate_csr1k_licenseStatus()
if (model == 'asav'):
# Verify ASAv is responding to API requests
validate_asav_connectivity()
# Verify SSMS is responding to API requests
validate_ssms_connectivity()
# License ASAv
url = "https://" + hostname + ":" + api_port + "/api/licensing/smart/asav/register"
ASALicenseJSON = {}
ASALicenseJSON['kind'] = 'object#SmartLicenseRegId'
ASALicenseJSON['idToken'] = getSmartLicenseToken(token_description)
ASALicenseJSON['force'] = True
customHttpPost(url, json=ASALicenseJSON, auth=(username,password), headers={'User-Agent': "REST API Agent" }, proxies=sock5Proxy, verify = False)
# Verify ASAv is licensed
validate_asav_licenseStatus()
if (model == 'cucm'):
# Verify ASAv is responding to API requests
validate_cucm_connectivity()
# Verify SSMS is responding to API requests
validate_ssms_connectivity()
# License CUCM
url = "https://" + hostname + ":" + api_port + "/axl/"
body = """
<soapenv:Envelope xmlns:soapenv="http://schemas.xmlsoap.org/soap/envelope/" xmlns:ns="http://www.cisco.com/AXL/API/%version">
<soapenv:Header/>
<soapenv:Body>
<ns:doSmartLicenseRegister>
<token>%token</token>
<force>true</force>
</ns:doSmartLicenseRegister>
</soapenv:Body>
</soapenv:Envelope>"""
body = body.replace("%version",str(version))
body = body.replace("%token", getSmartLicenseToken(token_description))
if(format(version,".0f") == '12'):
customHttpPost(url, data=body, auth=(username,password), headers={'SOAPAction' : 'CUCM:DB ver=' + str(version) }, proxies=sock5Proxy, timeout=(300,300), verify = False)
elif((format(version,".0f") == '14')):
iHttp = 0
while True:
try:
response = requests.post(url, data=body, auth=(username,password),headers={'SOAPAction' : 'CUCM:DB ver=' + str(version) }, proxies=sock5Proxy, timeout=(300,300), verify = False)
except requests.exceptions.ConnectionError as err:
if ('RemoteDisconnected' in str(err)):
# Request Actually Succeeded (handling delay's and SOCKS issues)
time.sleep(120)
break
except Exception as err:
if (iHttp > 100):
# Failed to connect for 60 minutes
result['changed'] = False
result['failed'] = True
if 'response' in locals():
result['response'] = str(response.text)
module.fail_json(msg=str(err),**result)
else:
# Unable to connect, pausing for 1 minutes
time.sleep(60)
iHttp = iHttp + 1
print('fail ' + str(iHttp))
continue
# Verify CUCM is licensed
validate_cucm_licenseStatus()
if (model == 'cuc'):
# Verify cuc is responding to API requests
validate_cuc_connectivity()
# Verify SSMS is responding to API requests
validate_ssms_connectivity()
# License CUC
url = "https://" + hostname + ":" + api_port + "/vmrest/smartlicense/register"
CUCLicenseJSON = {}
CUCLicenseJSON['token'] = getSmartLicenseToken(token_description)
CUCLicenseJSON['force'] = True
customHttpPut(url, json=CUCLicenseJSON, auth=(username,password), proxies=sock5Proxy, verify = False)
# Verify CUC is licensed
validate_cuc_licenseStatus()
if (model == "uccx"):
# Verify UCCx is responding to API requests
validate_uccx_connectivity()
# Verify SSMS is responding to API requests
validate_ssms_connectivity()
# Get jSession Cookie
url = "https://" + hostname + ":" + api_port + "/appadmin/main"
uccx_headers = {
'Host': hostname + ":" + api_port
}
pSession.headers = uccx_headers
uccx_response = customSessionGet(url, 302, auth=(username,password), timeout=60, allow_redirects=False, proxies=sock5Proxy, verify=False)
# Get CSRF Token
url = 'https://' + hostname + ":" + api_port + '/appadmin/JavaScriptServlet'
uccx_headers = {
'Host': hostname + ":" + api_port,
'Origin': 'https://' + hostname + ':' + api_port,
'FETCH-CSRF-TOKEN': '1'
}
pSession.headers = uccx_headers
uccx_response = customSessionPost(url, 200, timeout=60, allow_redirects=False, proxies=sock5Proxy, verify=False)
CSRFToken = (uccx_response.text.split(':', 1)[1])
# Perform User Authentication
url = 'https://' + hostname + ':' + api_port + '/appadmin/j_security_check'
uccx_headers = {
'Host': hostname + ":" + api_port,
'Origin': 'https://' + hostname + ':' + api_port,
'Referer': 'https://' + hostname + ':' + api_port + '/appadmin/main',
'Upgrade-Insecure-Requests': '1'
}
uccx_body = {
'j_username': username,
'j_password': password,
'appNav': 'appadmin',
'CSRFTOKEN': CSRFToken
}
pSession.headers = uccx_headers
uccx_response = customSessionPost(url, 302,data=uccx_body, timeout=60, allow_redirects=False, proxies=sock5Proxy, verify=False)
# Perform Smart License Registration
url = 'https://' + hostname + ':' + api_port + '/appadmin/smartlicense/register.do'
uccx_headers = {
'Host': hostname + ":" + api_port,
'Refer': 'https://' + hostname + api_port + '/appadmin/smartlicense/registerdisplay.do?request_type=register',
'CSRFTOKEN': CSRFToken,
'Origin': 'https://' + hostname + ':' + api_port,
'X-Requested-With': 'XMLHttpRequest, XMLHttpRequest',
'Accept': 'application/json, text/javascript, */*; q=0.01',
'Content-Type': 'application/json; charset=utf-8'
}
uccx_json = {
'forceFlag': True,
'IdToken': getSmartLicenseToken(token_description)
}
pSession.headers = uccx_headers
uccx_response = customSessionPost(url, 200, json=uccx_json, timeout=60, allow_redirects=False, proxies=sock5Proxy, verify=False)
result['response'] = uccx_response.text
# Verify UCCx is licensed
validate_uccx_licenseStatus(uccx_response)
pSession.close()
if (model == 'expressway'):
# Verify Expressway is responding to API requests
validate_expw_connectivity()
# Verify SSMS is responding to API requests
validate_ssms_connectivity()
# License Expressway
url = "https://" + hostname + ":" + api_port + "/api/provisioning/common/smartlicensing/registration"
expw_LicenseJSON = {}
expw_LicenseJSON['Reregister'] = 'Yes'
expw_LicenseJSON['Token'] = getSmartLicenseToken(token_description)
customHttpPost(url, json=expw_LicenseJSON, auth=(username,password), proxies=sock5Proxy, verify = False)
# Verify Expressway is licensed
validate_expw_licenseStatus()
if (model == 'cmm'):
# Verify Cisco Meeting Manager is responding to API requests
validate_cmm_connectivity()
# Verify SSMS is responding to API requests
validate_ssms_connectivity()
# Get CSRF Token
url = 'https://' + hostname + ":" + api_port + '/api_login'
cmm_headers = {
'Host': hostname + ":" + api_port
}
pSession.headers = cmm_headers
iHttp = 0
while True:
try:
cmm_response = customSessionGet(url, 200, timeout=60, proxies=sock5Proxy, verify=False)
CSRFToken = (json.loads(cmm_response.text))['token']
break
except Exception as err:
if (iHttp > 100):
# Failed to connect for 60 minutes
result['changed'] = False
result['failed'] = True
if 'response' in locals():
result['response'] = str(cmm_response.text)
module.fail_json(msg=str(err),**result)
else:
# Unable to connect, pausing for 1 minutes
time.sleep(60)
iHttp = iHttp + 1
continue
# Perform User Authentication
url = 'https://' + hostname + ":" + api_port + '/api_login/'
cmm_headers = {
'Host': hostname + ":" + api_port,
'X-CSRFToken': CSRFToken,
'X-Requested-With': 'XMLHttpRequest'
}
cmm_body = {
'username': username,
'password': password,
'source': 'local'
}
pSession.headers = cmm_headers
iHttp = 0
while True:
try:
cmm_response = customSessionPost(url, 200, data=cmm_body, proxies=sock5Proxy, timeout=60, verify=False)
CSRFToken = json.loads(cmm_response.text)['token']
break
except Exception as err:
if (iHttp > 100):
# Failed to connect for 60 minutes
result['changed'] = False
result['failed'] = True
if 'response' in locals():
result['response'] = str(cmm_response.text)
module.fail_json(msg=str(err),**result)
else:
# Unable to connect, pausing for 1 minutes
time.sleep(60)
iHttp = iHttp + 1
continue
# Perform Smart License Registration
url = 'https://' + hostname + ":" + api_port + '/licensing/api/smart/register/'
cmm_headers = {
'Host': hostname + ":" + api_port,
'Origin': 'https://' + hostname + ":" + api_port,
'X-CSRFToken': CSRFToken,
'X-Requested-With': 'XMLHttpRequest'
}
cmm_LicenseJSON = {}
cmm_LicenseJSON['token'] = getSmartLicenseToken(token_description)
cmm_LicenseJSON['force'] = True
pSession.headers = cmm_headers
cmm_response = customSessionPost(url, 200, json=cmm_LicenseJSON, proxies=sock5Proxy, verify = False)
# Verify CMM is | |
= AttributeEventWait(self.getAttribute("state"))
evt_wait.lock()
try:
time_stamp = time.time()
try:
self.command_inout("ReleaseMacro")
except PyTango.DevFailed as df:
# Macro already finished - no need to release
if df.args[0].reason == "API_CommandNotAllowed":
return
evt_wait.waitForEvent((self.Running, ), equal=False,
after=time_stamp,
reactivity=self.InteractiveTimeout)
finally:
evt_wait.unlock()
evt_wait.disconnect()
def stop(self, synch=True):
if not synch:
self.command_inout("StopMacro")
return
evt_wait = AttributeEventWait(self.getAttribute("state"))
evt_wait.lock()
try:
time_stamp = time.time()
self.command_inout("StopMacro")
evt_wait.waitForEvent((self.Running, ), equal=False,
after=time_stamp,
reactivity=self.InteractiveTimeout)
finally:
evt_wait.unlock()
evt_wait.disconnect()
def _clearRunMacro(self):
# Clear the log buffer
list(map(LogAttr.clearLogBuffer, list(self._log_attr.values())))
self._running_macros = None
self._running_macro = None
self._user_xml = None
self._block_lines = 0
def _createMacroXml(self, macro_name, macro_params):
"""Creation of the macro XML object.
:param macro_name: (str) macro name
:param macro_params: (sequence[str]) list of parameter values,
if repeat parameters are used parameter values may be sequences
itself.
:return (lxml.etree._Element) macro XML element
"""
macro_info = self.macro_server.getMacroInfoObj(macro_name)
params_def = macro_info.parameters
macro_node = createMacroNode(macro_name, params_def, macro_params)
return macro_node.toXml()
def preRunMacro(self, obj, parameters):
self._clearRunMacro()
xml_root = None
if isinstance(obj, str):
if obj.startswith('<') and not parameters:
xml_root = etree.fromstring(obj)
else:
macros = []
if len(parameters) == 0:
macros_strs = obj.split('\n')
for m in macros_strs:
pars = m.split()
macros.append((pars[0], pars[1:]))
else:
parameters = recur_map(str, parameters)
macros.append((obj, parameters))
xml_root = xml_seq = etree.Element('sequence')
for m in macros:
macro_name = m[0]
macro_params = m[1]
xml_macro = self._createMacroXml(macro_name, macro_params)
xml_macro.set('id', str(uuid.uuid1()))
xml_seq.append(xml_macro)
elif etree.iselement(obj):
xml_root = obj
else:
raise TypeError('obj must be a string or a etree.Element')
self._running_macros = {}
for macro_xml in xml_root.xpath('//macro'):
id, name = macro_xml.get('id'), macro_xml.get('name')
self._running_macros[id] = Macro(self, name, id, macro_xml)
return xml_root
def postRunMacro(self, result, synch):
pass
def runMacro(self, obj, parameters=[], synch=False):
self._user_xml = self.preRunMacro(obj, parameters)
result = self._runMacro(self._user_xml, synch=synch)
return self.postRunMacro(result, synch)
def _runMacro(self, xml, synch=False):
if not synch:
return self.command_inout("RunMacro",
[etree.tostring(xml,
encoding='unicode')])
timeout = self.InteractiveTimeout
evt_wait = self._getEventWait()
evt_wait.connect(self.getAttribute("state"))
evt_wait.lock()
try:
evt_wait.waitForEvent((self.Running, ), equal=False,
reactivity=timeout)
# Clear event set to not confuse the value coming from the
# connection with the event of of end of the macro execution
# in the next wait event. This was observed on Windows where
# the time stamp resolution is not better than 1 ms.
evt_wait.clearEventSet()
ts = time.time()
result = self.command_inout("RunMacro",
[etree.tostring(xml,
encoding='unicode')])
evt_wait.waitForEvent((self.Running, ), after=ts,
reactivity=timeout)
if synch:
evt_wait.waitForEvent((self.Running, ), equal=False, after=ts,
reactivity=timeout)
finally:
self._clearRunMacro()
evt_wait.unlock()
evt_wait.disconnect()
return result
def stateChanged(self, s, t, v):
# In contrary to the Taurus3 the Taurus4 raises exceptions when the
# device server is getting down and we try to retrieve the state.
# In this case provide the same behavior as Taurus3 - assign None to
# the old state
try:
self._old_door_state = self.stateObj.rvalue
except PyTango.DevFailed:
self._old_door_state = None
self._old_sw_door_state = self.state
def resultReceived(self, log_name, result):
"""Method invoked by the arrival of a change event on the Result
attribute"""
if self._ignore_logs or self._running_macro is None:
return
self._running_macro.setResult(result)
return result
def putEnvironment(self, name, value):
self.macro_server.putEnvironment(name, value)
def putEnvironments(self, obj):
self.macro_server.putEnvironments(obj)
setEnvironment = putEnvironment
setEnvironments = putEnvironments
def getEnvironment(self, name=None):
return self.macro_server.getEnvironment(name=name)
def inputReceived(self, s, t, v):
if t not in CHANGE_EVT_TYPES:
return
if v is None or self._running_macros is None:
return
input_data = CodecFactory().decode(('json', v.value))
self.processInput(input_data)
def processInput(self, input_data):
TaurusManager().addJob(self._processInput, None, input_data)
def _processInput(self, input_data):
input_type = input_data['type']
if input_type == 'input':
result = self._input_handler.input(input_data)
if result['input'] == '' and 'default_value' in input_data:
result['input'] = input_data['default_value']
result = CodecFactory().encode('json', ('', result))[1]
self.write_attribute('Input', result)
elif input_type == 'timeout':
self._input_handler.input_timeout(input_data)
def recordDataReceived(self, s, t, v):
if t not in CHANGE_EVT_TYPES:
return
return self._processRecordData(v)
def _processRecordData(self, data):
if data is None or data.rvalue is None:
return
data = data.rvalue
size = len(data[1])
if size == 0:
return
format = data[0]
codec = CodecFactory().getCodec(format)
data = codec.decode(data)
return data
def processRecordData(self, data):
pass
def macroStatusReceived(self, s, t, v):
if v is None or self._running_macros is None:
return
if t not in CHANGE_EVT_TYPES:
return
v = v.value
if not len(v[1]):
return
format = v[0]
codec = CodecFactory().getCodec(format)
fmt, data = codec.decode(v)
for macro_status in data:
id = macro_status.get('id')
macro = self._running_macros.get(id)
self._last_running_macro = self._running_macro = macro
# if we don't have the ID it's because the macro is running a
# submacro or another client is connected to the same door (shame
# on him!) and executing a macro we discard this event
if macro is not None:
macro.__dict__.update(macro_status)
return data
def logReceived(self, log_name, output):
term_size = get_terminal_size()
max_chrs = term_size.columns if term_size else None
if not output or self._silent or self._ignore_logs:
return
if log_name == self.Debug and not self._debug:
return
o = self.log_start[log_name]
for line in output:
if not self._debug:
if line == self.BlockStart:
self._in_block = True
for i in range(self._block_lines):
if max_chrs is None:
nb_lines = 1
else:
nb_lines = _get_nb_lines(
self._len_last_data_line,
max_chrs)
# per each line: erase current line,
# go up one line and erase current line
o += '\x1b[2K\x1b[1A\x1b[2K' * nb_lines
self._block_lines = 0
continue
elif line == self.BlockFinish:
self._in_block = False
continue
else:
self._len_last_data_line = len(line)
if self._in_block:
self._block_lines += 1
else:
self._block_lines = 0
o += "%s\n" % line
o += self.log_stop[log_name]
self.write(o)
def write(self, msg, stream=None):
if self.isSilent():
return
self._output_stream = sys.stdout
out = self._output_stream
if stream is not None:
start, stop = self.log_start.get(stream), self.log_stop.get(stream)
if start is not None and stop is not None:
out.write(start)
out.write(msg)
out.write(stop)
out.flush()
return
out.write(msg)
out.flush()
def writeln(self, msg='', stream=None):
self.write("%s\n" % msg, stream=stream)
def getExperimentConfigurationObj(self):
return self._experiment_configuration
def getExperimentConfiguration(self):
return self._experiment_configuration.get()
def setExperimentConfiguration(self, config, mnt_grps=None):
self._experiment_configuration.set(config, mnt_grps=mnt_grps)
class UnknownMacroServerElementFormat(Exception):
pass
class MacroPath(object):
def __init__(self, ms):
self._ms = weakref.ref(ms)
self.refresh()
def refresh(self):
self.macro_path = mp = self._ms().get_property("MacroPath")[
"MacroPath"]
self.base_macro_path = osp.commonprefix(self.macro_path)
self.rel_macro_path = [osp.relpath for p in (mp, self.base_macro_path)]
class Environment(dict):
def __init__(self, macro_server):
dict.__setattr__(self, "_macro_server_", weakref.ref(macro_server))
def __setattr__(self, key, value):
ms = self._macro_server_()
if ms is not None:
ms.putEnvironment(key, value)
def __getattr__(self, key):
return self[key]
def __delattr__(self, key):
ms = self._macro_server_()
if ms is not None:
ms.removeEnvironment(key)
def __dir__(self):
return [key for key in list(self.keys()) if not key.startswith("_")]
class BaseMacroServer(MacroServerDevice):
"""Class encapsulating Macro Server device functionality."""
def __init__(self, name, **kw):
self._env = Environment(self)
self._elements = BaseSardanaElementContainer()
self.call__init__(MacroServerDevice, name, **kw)
self.__elems_attr = self.getAttribute("Elements")
try:
serialization_mode = TaurusSerializationMode.TangoSerial
except AttributeError:
serialization_mode = TaurusSerializationMode.Serial
self.__elems_attr.setSerializationMode(serialization_mode)
self.__elems_attr.addListener(self.on_elements_changed)
self.__elems_attr.setSerializationMode(
TaurusSerializationMode.Concurrent)
self.__env_attr = self.getAttribute('Environment')
try:
serialization_mode = TaurusSerializationMode.TangoSerial
except AttributeError:
serialization_mode = TaurusSerializationMode.Serial
self.__env_attr.setSerializationMode(serialization_mode)
self.__env_attr.addListener(self.on_environment_changed)
self.__env_attr.setSerializationMode(
TaurusSerializationMode.Concurrent)
NO_CLASS_TYPES = 'ControllerClass', 'ControllerLibrary', \
'MacroLibrary', 'Instrument', 'Meta', 'ParameterType'
def on_environment_changed(self, evt_src, evt_type, evt_value):
try:
return self._on_environment_changed(evt_src, evt_type, evt_value)
except Exception:
self.error("Exception occurred processing environment")
self.error("Details:", exc_info=1)
return set(), set(), set()
def _on_environment_changed(self, evt_src, evt_type, evt_value):
ret = added, removed, changed = set(), set(), set()
if evt_type not in CHANGE_EVT_TYPES:
return ret
env = CodecFactory().decode(evt_value.rvalue)
for key, value in list(env.get('new', {}).items()):
self._addEnvironment(key, value)
added.add(key)
for key in env.get('del', []):
self._removeEnvironment(key)
removed.add(key)
for key, value in list(env.get('change', {}).items()):
self._removeEnvironment(key)
self._addEnvironment(key, value)
changed.add(key)
return ret
def _addEnvironment(self, key, value):
self._env[key] = value
def _removeEnvironment(self, key):
try:
self._env.pop(key)
except KeyError:
pass
def putEnvironment(self, name, value):
self.putEnvironments({name: value})
def putEnvironments(self, obj):
obj = dict(new=obj)
codec = CodecFactory().getCodec('pickle')
self.write_attribute('Environment', codec.encode(('', obj)))
setEnvironment = putEnvironment
setEnvironments = putEnvironments
def getEnvironment(self, name=None):
if name is None:
return self._env
else:
return self._env[name]
def removeEnvironment(self, key):
keys = key,
return self.removeEnvironments(keys)
def removeEnvironments(self, keys):
obj = {'del': keys}
codec = CodecFactory().getCodec('pickle')
self.write_attribute('Environment', codec.encode(('', obj)))
def getObject(self, element_info):
elem_type = element_info.getType()
if elem_type in self.NO_CLASS_TYPES:
obj = object()
elif "MacroCode" in element_info.interfaces:
obj = self._createMacroClassObject(element_info)
else:
obj = self._createDeviceObject(element_info)
return obj
def _createMacroClassObject(self, element_info):
return MacroInfo(from_json=element_info._data)
def _createDeviceObject(self, element_info):
return Factory().getDevice(element_info.full_name)
def on_elements_changed(self, evt_src, evt_type, evt_value):
try:
return self._on_elements_changed(evt_src, evt_type, evt_value)
except Exception:
self.error("Exception occurred processing elements")
self.error("Details:", exc_info=1)
return set(), set(), set()
def _on_elements_changed(self, evt_src, evt_type, evt_value):
ret = added, removed, changed = set(), set(), set()
if evt_type not in CHANGE_EVT_TYPES:
return ret
try:
elems = CodecFactory().decode(evt_value.rvalue)
except:
self.error("Could not decode element info format=%s len=%s",
evt_value.rvalue[0], len(evt_value.rvalue[1]))
return ret
for element_data in elems.get('new', ()):
element_data['manager'] = self
element = self._addElement(element_data)
added.add(element)
for element_data in elems.get('del', ()):
element = self._removeElement(element_data)
removed.add(element)
for element_data in elems.get('change', ()):
element = self._removeElement(element_data)
element_data['manager'] = self
element = self._addElement(element_data)
changed.add(element)
return ret
def _addElement(self, element_data):
element | |
assert balance_maker == deploy_args[2] - _amount_make
assert balance_taker == _amount_take
assert commitment == _amount_make - _amount_take
# Assert: last_price
assert bond_exchange.lastPrice(bond_token.address) == 123
# 正常系2
# Make買、Take売
# <発行体>新規発行 -> <投資家>Make注文(買)
# -> <発行体>Take注文(売) -> <決済業者>決済処理
def test_confirmAgreement_normal_2(users,
bond_exchange, personal_info, payment_gateway):
_issuer = users['issuer']
_trader = users['trader']
_agent = users['agent']
personalinfo_register(personal_info, _issuer, _issuer)
payment_gateway_register(payment_gateway, _issuer, _agent)
payment_gateway_approve(payment_gateway, _issuer, _agent)
personalinfo_register(personal_info, _trader, _issuer)
payment_gateway_register(payment_gateway, _trader, _agent)
payment_gateway_approve(payment_gateway, _trader, _agent)
# 新規発行
bond_token, deploy_args = utils. \
issue_bond_token(users, bond_exchange.address, personal_info.address)
# 新規注文(買):投資家
_price = 123
_amount_make = 100
bond_exchange.createOrder.transact(
bond_token.address, _amount_make, _price, True, _agent, {'from': _trader})
order_id = bond_exchange.latestOrderId()
# 預かりをExchangeへのデポジット:発行体
_amount_take = 50
bond_token.transfer.transact(bond_exchange.address, _amount_take, {'from': _issuer})
# Take注文(売):発行体
bond_exchange.executeOrder.transact(order_id, _amount_take, False, {'from': _issuer})
agreement_id = bond_exchange.latestAgreementId(order_id)
# 決済承認:決済業者
bond_exchange.confirmAgreement.transact(
order_id, agreement_id, {'from': _agent})
orderbook = bond_exchange.getOrder(order_id)
agree = bond_exchange.getAgreement(order_id, agreement_id)
balance_maker = bond_token.balanceOf(_trader)
balance_taker = bond_token.balanceOf(_issuer)
commitment = bond_exchange.commitmentOf(_issuer, bond_token.address)
assert orderbook == [
_trader, to_checksum_address(bond_token.address),
_amount_make - _amount_take,
_price, True, _agent, False
]
assert agree[0:5] == [_issuer, _amount_take, _price, False, True]
assert balance_maker == _amount_take
assert balance_taker == deploy_args[2] - _amount_take
assert commitment == 0
# Assert: last_price
assert bond_exchange.lastPrice(bond_token.address) == 123
# エラー系1
# 入力値の型誤り(_orderId)
def test_confirmAgreement_error_1(users, bond_exchange):
_agent = users['agent']
# 決済承認:決済業者
with pytest.raises(OverflowError):
bond_exchange.confirmAgreement.transact(-1, 0, {'from': _agent})
with pytest.raises(OverflowError):
bond_exchange.confirmAgreement.transact(2 ** 256, 0, {'from': _agent})
with pytest.raises(TypeError):
bond_exchange.confirmAgreement.transact('abc', 0, {'from': _agent})
# エラー系2
# 入力値の型誤り(_agreementId)
def test_confirmAgreement_error_2(users, bond_exchange):
_agent = users['agent']
# 決済承認:決済業者
with pytest.raises(OverflowError):
bond_exchange.confirmAgreement.transact(0, -1, {'from': _agent})
with pytest.raises(OverflowError):
bond_exchange.confirmAgreement.transact(0, 2 ** 256, {'from': _agent})
with pytest.raises(TypeError):
bond_exchange.confirmAgreement.transact(0, 'abc', {'from': _agent})
# エラー系3
# 指定した注文番号が、直近の注文ID以上の場合
def test_confirmAgreement_error_3(users,
bond_exchange, personal_info, payment_gateway):
_issuer = users['issuer']
_trader = users['trader']
_agent = users['agent']
personalinfo_register(personal_info, _issuer, _issuer)
payment_gateway_register(payment_gateway, _issuer, _agent)
payment_gateway_approve(payment_gateway, _issuer, _agent)
personalinfo_register(personal_info, _trader, _issuer)
payment_gateway_register(payment_gateway, _trader, _agent)
payment_gateway_approve(payment_gateway, _trader, _agent)
# 新規発行
bond_token, deploy_args = utils. \
issue_bond_token(users, bond_exchange.address, personal_info.address)
# Exchangeへのデポジット:発行体
_amount_make = 100
bond_token.transfer.transact(bond_exchange.address, _amount_make, {'from': _issuer})
# Make注文(売):発行体
_price = 123
bond_exchange.createOrder.transact(
bond_token.address, _amount_make, _price, False, _agent, {'from': _issuer})
# Take注文(買):投資家
order_id = bond_exchange.latestOrderId()
_amount_take = 50
bond_exchange.executeOrder.transact(
order_id, _amount_take, True, {'from': _trader})
agreement_id = bond_exchange.latestAgreementId(order_id)
# 決済承認:決済業者
order_id_error = bond_exchange.latestOrderId() + 1
bond_exchange.confirmAgreement.transact(order_id_error, agreement_id, {'from': _agent}) # エラーになる
orderbook = bond_exchange.getOrder(order_id)
agreement = bond_exchange.getAgreement(order_id, agreement_id)
balance_maker = bond_token.balanceOf(_issuer)
balance_taker = bond_token.balanceOf(_trader)
commitment = bond_exchange.commitmentOf(_issuer, bond_token.address)
assert orderbook == [
_issuer, to_checksum_address(bond_token.address),
_amount_make - _amount_take,
_price, False, _agent, False
]
assert agreement[0:5] == [_trader, _amount_take, _price, False, False]
assert balance_maker == deploy_args[2] - _amount_make
assert balance_taker == 0
assert commitment == _amount_make
# Assert: last_price
assert bond_exchange.lastPrice(bond_token.address) == 0
# エラー系4
# 指定した約定IDが、直近の約定ID以上の場合
def test_confirmAgreement_error_4(users,
bond_exchange, personal_info, payment_gateway):
_issuer = users['issuer']
_trader = users['trader']
_agent = users['agent']
personalinfo_register(personal_info, _issuer, _issuer)
payment_gateway_register(payment_gateway, _issuer, _agent)
payment_gateway_approve(payment_gateway, _issuer, _agent)
personalinfo_register(personal_info, _trader, _issuer)
payment_gateway_register(payment_gateway, _trader, _agent)
payment_gateway_approve(payment_gateway, _trader, _agent)
# 新規発行
bond_token, deploy_args = utils. \
issue_bond_token(users, bond_exchange.address, personal_info.address)
# Exchangeへのデポジット:発行体
_amount_make = 100
bond_token.transfer.transact(bond_exchange.address, _amount_make, {'from': _issuer})
# Make注文(売):発行体
_price = 123
bond_exchange.createOrder.transact(
bond_token.address, _amount_make, _price, False, _agent, {'from': _issuer})
# Take注文(買):投資家
order_id = bond_exchange.latestOrderId()
_amount_take = 50
bond_exchange.executeOrder.transact(
order_id, _amount_take, True, {'from': _trader})
agreement_id = bond_exchange.latestAgreementId(order_id)
# 決済承認:決済業者
agreement_id_error = bond_exchange.latestAgreementId(order_id) + 1
bond_exchange.confirmAgreement.transact(order_id, agreement_id_error, {'from': _agent}) # エラーになる
orderbook = bond_exchange.getOrder(order_id)
agreement = bond_exchange.getAgreement(order_id, agreement_id)
balance_maker = bond_token.balanceOf(_issuer)
balance_taker = bond_token.balanceOf(_trader)
commitment = bond_exchange.commitmentOf(_issuer, bond_token.address)
assert orderbook == [
_issuer, to_checksum_address(bond_token.address),
_amount_make - _amount_take,
_price, False, _agent, False
]
assert agreement[0:5] == [_trader, _amount_take, _price, False, False]
assert balance_maker == deploy_args[2] - _amount_make
assert balance_taker == 0
assert commitment == _amount_make
# Assert: last_price
assert bond_exchange.lastPrice(bond_token.address) == 0
# エラー系5
# 指定した約定明細がすでに支払い済みの状態の場合
def test_confirmAgreement_error_5(users,
bond_exchange, personal_info, payment_gateway):
_issuer = users['issuer']
_trader = users['trader']
_agent = users['agent']
personalinfo_register(personal_info, _issuer, _issuer)
payment_gateway_register(payment_gateway, _issuer, _agent)
payment_gateway_approve(payment_gateway, _issuer, _agent)
personalinfo_register(personal_info, _trader, _issuer)
payment_gateway_register(payment_gateway, _trader, _agent)
payment_gateway_approve(payment_gateway, _trader, _agent)
# 新規発行
bond_token, deploy_args = utils. \
issue_bond_token(users, bond_exchange.address, personal_info.address)
# Exchangeへのデポジット:発行体
_amount_make = 100
bond_token.transfer.transact(bond_exchange.address, _amount_make, {'from': _issuer})
# Make注文(売):発行体
_price = 123
bond_exchange.createOrder.transact(
bond_token.address, _amount_make, _price, False, _agent, {'from': _issuer})
# Take注文(買):投資家
order_id = bond_exchange.latestOrderId()
_amount_take = 50
bond_exchange.executeOrder.transact(
order_id, _amount_take, True, {'from': _trader})
agreement_id = bond_exchange.latestAgreementId(order_id)
# 決済承認:決済業者
bond_exchange.confirmAgreement.transact(
order_id, agreement_id, {'from': _agent})
# 決済承認:決済業者(2回目)
bond_exchange.confirmAgreement.transact(order_id, agreement_id, {'from': _agent}) # エラーになる
orderbook = bond_exchange.getOrder(order_id)
agreement = bond_exchange.getAgreement(order_id, agreement_id)
balance_maker = bond_token.balanceOf(_issuer)
balance_taker = bond_token.balanceOf(_trader)
commitment = bond_exchange.commitmentOf(_issuer, bond_token.address)
assert orderbook == [
_issuer, to_checksum_address(bond_token.address),
_amount_make - _amount_take,
_price, False, _agent, False
]
assert agreement[0:5] == [_trader, _amount_take, _price, False, True]
assert balance_maker == deploy_args[2] - _amount_make
assert balance_taker == _amount_take
assert commitment == _amount_make - _amount_take
# Assert: last_price
assert bond_exchange.lastPrice(bond_token.address) == 123
# エラー系6
# 元注文で指定した決済業者ではない場合
def test_confirmAgreement_error_6(users,
bond_exchange, personal_info, payment_gateway):
_issuer = users['issuer']
_trader = users['trader']
_agent = users['agent']
personalinfo_register(personal_info, _issuer, _issuer)
payment_gateway_register(payment_gateway, _issuer, _agent)
payment_gateway_approve(payment_gateway, _issuer, _agent)
personalinfo_register(personal_info, _trader, _issuer)
payment_gateway_register(payment_gateway, _trader, _agent)
payment_gateway_approve(payment_gateway, _trader, _agent)
# 新規発行
bond_token, deploy_args = utils. \
issue_bond_token(users, bond_exchange.address, personal_info.address)
# Exchangeへのデポジット:発行体
_amount_make = 100
bond_token.transfer.transact(bond_exchange.address, _amount_make, {'from': _issuer})
# Make注文(売):発行体
_price = 123
bond_exchange.createOrder.transact(
bond_token.address, _amount_make, _price, False, _agent, {'from': _issuer})
# Take注文(買):投資家
order_id = bond_exchange.latestOrderId()
_amount_take = 50
bond_exchange.executeOrder.transact(
order_id, _amount_take, True, {'from': _trader})
agreement_id = bond_exchange.latestAgreementId(order_id)
# 決済承認:投資家(指定した決済業者ではない)
bond_exchange.confirmAgreement.transact(order_id, agreement_id, {'from': _trader}) # エラーになる
orderbook = bond_exchange.getOrder(order_id)
agreement = bond_exchange.getAgreement(order_id, agreement_id)
balance_maker = bond_token.balanceOf(_issuer)
balance_taker = bond_token.balanceOf(_trader)
commitment = bond_exchange.commitmentOf(_issuer, bond_token.address)
assert orderbook == [
_issuer, to_checksum_address(bond_token.address),
_amount_make - _amount_take,
_price, False, _agent, False
]
assert agreement[0:5] == [_trader, _amount_take, _price, False, False]
assert balance_maker == deploy_args[2] - _amount_make
assert balance_taker == 0
assert commitment == _amount_make
# Assert: last_price
assert bond_exchange.lastPrice(bond_token.address) == 0
# エラー系7
# 既に決済非承認済み(キャンセル済み)の場合
def test_confirmAgreement_error_7(users,
bond_exchange, personal_info, payment_gateway):
_issuer = users['issuer']
_trader = users['trader']
_agent = users['agent']
personalinfo_register(personal_info, _issuer, _issuer)
payment_gateway_register(payment_gateway, _issuer, _agent)
payment_gateway_approve(payment_gateway, _issuer, _agent)
personalinfo_register(personal_info, _trader, _issuer)
payment_gateway_register(payment_gateway, _trader, _agent)
payment_gateway_approve(payment_gateway, _trader, _agent)
# 新規発行
bond_token, deploy_args = utils. \
issue_bond_token(users, bond_exchange.address, personal_info.address)
# Exchangeへのデポジット:発行体
_amount_make = 100
bond_token.transfer.transact(bond_exchange.address, _amount_make, {'from': _issuer})
# Make注文(売):発行体
_price = 123
bond_exchange.createOrder.transact(
bond_token.address, _amount_make, _price, False, _agent, {'from': _issuer})
# Take注文(買):投資家
order_id = bond_exchange.latestOrderId()
_amount_take = 50
bond_exchange.executeOrder.transact(
order_id, _amount_take, True, {'from': _trader})
agreement_id = bond_exchange.latestAgreementId(order_id)
# 決済非承認:決済業者
bond_exchange.cancelAgreement.transact(
order_id, agreement_id, {'from': _agent})
# 決済承認:決済業者
bond_exchange.confirmAgreement.transact(order_id, agreement_id, {'from': _agent}) # エラーになる
orderbook = bond_exchange.getOrder(order_id)
agreement = bond_exchange.getAgreement(order_id, agreement_id)
balance_maker = bond_token.balanceOf(_issuer)
balance_taker = bond_token.balanceOf(_trader)
commitment = bond_exchange.commitmentOf(_issuer, bond_token.address)
assert orderbook == [
_issuer, to_checksum_address(bond_token.address),
_amount_make,
_price, False, _agent, False
]
assert agreement[0:5] == [_trader, _amount_take, _price, True, False]
assert balance_maker == deploy_args[2] - _amount_make
assert balance_taker == 0
assert commitment == _amount_make
# Assert: last_price
assert bond_exchange.lastPrice(bond_token.address) == 0
'''
TEST_決済非承認(cancelAgreement)
'''
# 正常系1
# Make売、Take買
# <発行体>新規発行 -> <発行体>Make注文(売)
# -> <投資家>Take注文(買) -> <決済業者>決済非承認
def test_cancelAgreement_normal_1(users,
bond_exchange, personal_info, payment_gateway):
_issuer = users['issuer']
_trader = users['trader']
_agent = users['agent']
personalinfo_register(personal_info, _issuer, _issuer)
payment_gateway_register(payment_gateway, _issuer, _agent)
payment_gateway_approve(payment_gateway, _issuer, _agent)
personalinfo_register(personal_info, _trader, _issuer)
payment_gateway_register(payment_gateway, _trader, _agent)
payment_gateway_approve(payment_gateway, _trader, _agent)
# 新規発行
bond_token, deploy_args = utils. \
issue_bond_token(users, bond_exchange.address, personal_info.address)
# Exchangeへのデポジット:発行体
_amount_make = 100
bond_token.transfer.transact(bond_exchange.address, _amount_make, {'from': _issuer})
# Make注文(売):発行体
_price = 123
bond_exchange.createOrder.transact(
bond_token.address, _amount_make, _price, False, _agent, {'from': _issuer})
# Take注文(買):投資家
order_id = bond_exchange.latestOrderId()
_amount_take = 50
bond_exchange.executeOrder.transact(
order_id, _amount_take, True, {'from': _trader})
agreement_id = bond_exchange.latestAgreementId(order_id)
# 決済非承認:決済業者
bond_exchange.cancelAgreement.transact(
order_id, agreement_id, {'from': _agent})
orderbook = bond_exchange.getOrder(order_id)
agreement = bond_exchange.getAgreement(order_id, agreement_id)
balance_maker = bond_token.balanceOf(_issuer)
balance_taker = bond_token.balanceOf(_trader)
commitment = bond_exchange.commitmentOf(_issuer, bond_token.address)
assert orderbook == [
_issuer, to_checksum_address(bond_token.address),
_amount_make,
_price, False, _agent, False
]
assert agreement[0:5] == [_trader, _amount_take, _price, True, False]
assert balance_maker == deploy_args[2] - _amount_make
assert balance_taker == 0
assert commitment == _amount_make
# 正常系2
# Make買、Take売
# <発行体>新規発行 -> <投資家>Make注文(買)
# -> <発行体>Take注文(売) -> <決済業者>決済非承認
def test_cancelAgreement_normal_2(users,
bond_exchange, personal_info, payment_gateway):
_issuer = users['issuer']
_trader = users['trader']
_agent = users['agent']
personalinfo_register(personal_info, _issuer, _issuer)
payment_gateway_register(payment_gateway, _issuer, _agent)
payment_gateway_approve(payment_gateway, _issuer, _agent)
personalinfo_register(personal_info, _trader, _issuer)
payment_gateway_register(payment_gateway, _trader, _agent)
payment_gateway_approve(payment_gateway, _trader, _agent)
# 新規発行
bond_token, deploy_args = utils. \
issue_bond_token(users, bond_exchange.address, personal_info.address)
# 新規注文(買):投資家
_price = 123
_amount_make = 100
bond_exchange.createOrder.transact(
bond_token.address, _amount_make, _price, True, _agent, {'from': _trader})
order_id = bond_exchange.latestOrderId()
# 預かりをExchangeへのデポジット:発行体
_amount_take = 50
bond_token.transfer.transact(bond_exchange.address, _amount_take, {'from': _issuer})
# Take注文(売):発行体
bond_exchange.executeOrder.transact(order_id, _amount_take, False, {'from': _issuer})
agreement_id = bond_exchange.latestAgreementId(order_id)
# 決済非承認:決済業者
bond_exchange.cancelAgreement.transact(
order_id, agreement_id, {'from': _agent})
orderbook = bond_exchange.getOrder(order_id)
agree = bond_exchange.getAgreement(order_id, agreement_id)
balance_maker = bond_token.balanceOf(_trader)
balance_taker = bond_token.balanceOf(_issuer)
commitment = bond_exchange.commitmentOf(_issuer, bond_token.address)
assert orderbook == [
_trader, to_checksum_address(bond_token.address),
_amount_make - _amount_take,
_price, True, | |
<filename>packages/python/plotly/plotly/graph_objs/layout/scene/__init__.py
from plotly.basedatatypes import BaseLayoutHierarchyType as _BaseLayoutHierarchyType
import copy as _copy
class ZAxis(_BaseLayoutHierarchyType):
# autorange
# ---------
@property
def autorange(self):
"""
Determines whether or not the range of this axis is computed in
relation to the input data. See `rangemode` for more info. If
`range` is provided, then `autorange` is set to False.
The 'autorange' property is an enumeration that may be specified as:
- One of the following enumeration values:
[True, False, 'reversed']
Returns
-------
Any
"""
return self["autorange"]
@autorange.setter
def autorange(self, val):
self["autorange"] = val
# backgroundcolor
# ---------------
@property
def backgroundcolor(self):
"""
Sets the background color of this axis' wall.
The 'backgroundcolor' property is a color and may be specified as:
- A hex string (e.g. '#ff0000')
- An rgb/rgba string (e.g. 'rgb(255,0,0)')
- An hsl/hsla string (e.g. 'hsl(0,100%,50%)')
- An hsv/hsva string (e.g. 'hsv(0,100%,100%)')
- A named CSS color:
aliceblue, antiquewhite, aqua, aquamarine, azure,
beige, bisque, black, blanchedalmond, blue,
blueviolet, brown, burlywood, cadetblue,
chartreuse, chocolate, coral, cornflowerblue,
cornsilk, crimson, cyan, darkblue, darkcyan,
darkgoldenrod, darkgray, darkgrey, darkgreen,
darkkhaki, darkmagenta, darkolivegreen, darkorange,
darkorchid, darkred, darksalmon, darkseagreen,
darkslateblue, darkslategray, darkslategrey,
darkturquoise, darkviolet, deeppink, deepskyblue,
dimgray, dimgrey, dodgerblue, firebrick,
floralwhite, forestgreen, fuchsia, gainsboro,
ghostwhite, gold, goldenrod, gray, grey, green,
greenyellow, honeydew, hotpink, indianred, indigo,
ivory, khaki, lavender, lavenderblush, lawngreen,
lemonchiffon, lightblue, lightcoral, lightcyan,
lightgoldenrodyellow, lightgray, lightgrey,
lightgreen, lightpink, lightsalmon, lightseagreen,
lightskyblue, lightslategray, lightslategrey,
lightsteelblue, lightyellow, lime, limegreen,
linen, magenta, maroon, mediumaquamarine,
mediumblue, mediumorchid, mediumpurple,
mediumseagreen, mediumslateblue, mediumspringgreen,
mediumturquoise, mediumvioletred, midnightblue,
mintcream, mistyrose, moccasin, navajowhite, navy,
oldlace, olive, olivedrab, orange, orangered,
orchid, palegoldenrod, palegreen, paleturquoise,
palevioletred, papayawhip, peachpuff, peru, pink,
plum, powderblue, purple, red, rosybrown,
royalblue, rebeccapurple, saddlebrown, salmon,
sandybrown, seagreen, seashell, sienna, silver,
skyblue, slateblue, slategray, slategrey, snow,
springgreen, steelblue, tan, teal, thistle, tomato,
turquoise, violet, wheat, white, whitesmoke,
yellow, yellowgreen
Returns
-------
str
"""
return self["backgroundcolor"]
@backgroundcolor.setter
def backgroundcolor(self, val):
self["backgroundcolor"] = val
# calendar
# --------
@property
def calendar(self):
"""
Sets the calendar system to use for `range` and `tick0` if this
is a date axis. This does not set the calendar for interpreting
data on this axis, that's specified in the trace or via the
global `layout.calendar`
The 'calendar' property is an enumeration that may be specified as:
- One of the following enumeration values:
['gregorian', 'chinese', 'coptic', 'discworld',
'ethiopian', 'hebrew', 'islamic', 'julian', 'mayan',
'nanakshahi', 'nepali', 'persian', 'jalali', 'taiwan',
'thai', 'ummalqura']
Returns
-------
Any
"""
return self["calendar"]
@calendar.setter
def calendar(self, val):
self["calendar"] = val
# categoryarray
# -------------
@property
def categoryarray(self):
"""
Sets the order in which categories on this axis appear. Only
has an effect if `categoryorder` is set to "array". Used with
`categoryorder`.
The 'categoryarray' property is an array that may be specified as a tuple,
list, numpy array, or pandas Series
Returns
-------
numpy.ndarray
"""
return self["categoryarray"]
@categoryarray.setter
def categoryarray(self, val):
self["categoryarray"] = val
# categoryarraysrc
# ----------------
@property
def categoryarraysrc(self):
"""
Sets the source reference on Chart Studio Cloud for
categoryarray .
The 'categoryarraysrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["categoryarraysrc"]
@categoryarraysrc.setter
def categoryarraysrc(self, val):
self["categoryarraysrc"] = val
# categoryorder
# -------------
@property
def categoryorder(self):
"""
Specifies the ordering logic for the case of categorical
variables. By default, plotly uses "trace", which specifies the
order that is present in the data supplied. Set `categoryorder`
to *category ascending* or *category descending* if order
should be determined by the alphanumerical order of the
category names. Set `categoryorder` to "array" to derive the
ordering from the attribute `categoryarray`. If a category is
not found in the `categoryarray` array, the sorting behavior
for that attribute will be identical to the "trace" mode. The
unspecified categories will follow the categories in
`categoryarray`. Set `categoryorder` to *total ascending* or
*total descending* if order should be determined by the
numerical order of the values. Similarly, the order can be
determined by the min, max, sum, mean or median of all the
values.
The 'categoryorder' property is an enumeration that may be specified as:
- One of the following enumeration values:
['trace', 'category ascending', 'category descending',
'array', 'total ascending', 'total descending', 'min
ascending', 'min descending', 'max ascending', 'max
descending', 'sum ascending', 'sum descending', 'mean
ascending', 'mean descending', 'median ascending', 'median
descending']
Returns
-------
Any
"""
return self["categoryorder"]
@categoryorder.setter
def categoryorder(self, val):
self["categoryorder"] = val
# color
# -----
@property
def color(self):
"""
Sets default for all colors associated with this axis all at
once: line, font, tick, and grid colors. Grid color is
lightened by blending this with the plot background Individual
pieces can override this.
The 'color' property is a color and may be specified as:
- A hex string (e.g. '#ff0000')
- An rgb/rgba string (e.g. 'rgb(255,0,0)')
- An hsl/hsla string (e.g. 'hsl(0,100%,50%)')
- An hsv/hsva string (e.g. 'hsv(0,100%,100%)')
- A named CSS color:
aliceblue, antiquewhite, aqua, aquamarine, azure,
beige, bisque, black, blanchedalmond, blue,
blueviolet, brown, burlywood, cadetblue,
chartreuse, chocolate, coral, cornflowerblue,
cornsilk, crimson, cyan, darkblue, darkcyan,
darkgoldenrod, darkgray, darkgrey, darkgreen,
darkkhaki, darkmagenta, darkolivegreen, darkorange,
darkorchid, darkred, darksalmon, darkseagreen,
darkslateblue, darkslategray, darkslategrey,
darkturquoise, darkviolet, deeppink, deepskyblue,
dimgray, dimgrey, dodgerblue, firebrick,
floralwhite, forestgreen, fuchsia, gainsboro,
ghostwhite, gold, goldenrod, gray, grey, green,
greenyellow, honeydew, hotpink, indianred, indigo,
ivory, khaki, lavender, lavenderblush, lawngreen,
lemonchiffon, lightblue, lightcoral, lightcyan,
lightgoldenrodyellow, lightgray, lightgrey,
lightgreen, lightpink, lightsalmon, lightseagreen,
lightskyblue, lightslategray, lightslategrey,
lightsteelblue, lightyellow, lime, limegreen,
linen, magenta, maroon, mediumaquamarine,
mediumblue, mediumorchid, mediumpurple,
mediumseagreen, mediumslateblue, mediumspringgreen,
mediumturquoise, mediumvioletred, midnightblue,
mintcream, mistyrose, moccasin, navajowhite, navy,
oldlace, olive, olivedrab, orange, orangered,
orchid, palegoldenrod, palegreen, paleturquoise,
palevioletred, papayawhip, peachpuff, peru, pink,
plum, powderblue, purple, red, rosybrown,
royalblue, rebeccapurple, saddlebrown, salmon,
sandybrown, seagreen, seashell, sienna, silver,
skyblue, slateblue, slategray, slategrey, snow,
springgreen, steelblue, tan, teal, thistle, tomato,
turquoise, violet, wheat, white, whitesmoke,
yellow, yellowgreen
Returns
-------
str
"""
return self["color"]
@color.setter
def color(self, val):
self["color"] = val
# dtick
# -----
@property
def dtick(self):
"""
Sets the step in-between ticks on this axis. Use with `tick0`.
Must be a positive number, or special strings available to
"log" and "date" axes. If the axis `type` is "log", then ticks
are set every 10^(n*dtick) where n is the tick number. For
example, to set a tick mark at 1, 10, 100, 1000, ... set dtick
to 1. To set tick marks at 1, 100, 10000, ... set dtick to 2.
To set tick marks at 1, 5, 25, 125, 625, 3125, ... set dtick to
log_10(5), or 0.69897000433. "log" has several special values;
"L<f>", where `f` is a positive number, gives ticks linearly
spaced in value (but not position). For example `tick0` = 0.1,
`dtick` = "L0.5" will put ticks at 0.1, 0.6, 1.1, 1.6 etc. To
show powers of 10 plus small digits between, use "D1" (all
digits) or "D2" (only 2 and 5). `tick0` is ignored for "D1" and
"D2". If the axis `type` is "date", then you must convert the
time to milliseconds. For example, to set the interval between
ticks to one day, set `dtick` to 86400000.0. "date" also has
special values "M<n>" gives ticks spaced by a number of months.
`n` must be a positive integer. To set ticks on the 15th of
every third month, set `tick0` to "2000-01-15" and `dtick` to
"M3". To set ticks every 4 years, set `dtick` to "M48"
The 'dtick' property accepts values of any type
Returns
-------
Any
"""
return self["dtick"]
@dtick.setter
def dtick(self, val):
self["dtick"] = val
# exponentformat
# --------------
@property
def exponentformat(self):
"""
Determines a formatting rule for the tick exponents. For
example, consider the number 1,000,000,000. If "none", it
appears as 1,000,000,000. If "e", 1e+9. If "E", 1E+9. If
"power", 1x10^9 (with 9 in a super script). If "SI", 1G. If
"B", 1B.
The 'exponentformat' property is an enumeration that may be specified as:
- One of the following enumeration | |
#! usr/bin/python
from __future__ import print_function, division, absolute_import
from os import remove
from os.path import join, abspath
from sys import stdout, exit
from time import time
import multiprocessing as mp
from argparse import ArgumentParser
import logging
import numpy as np
from disvis import DisVis, PDB, Volume
from disvis.rotations import proportional_orientations, quat_to_rotmat
from disvis.helpers import mkdir_p
def parse_args():
"""Parse the command-line arguments."""
p = ArgumentParser()
p.add_argument('receptor', type=file,
help='PDB-file containing fixed chain.')
p.add_argument('ligand', type=file,
help='PDB-file containing scanning chain.')
p.add_argument('restraints', type=file,
help='File containing the distance restraints')
p.add_argument('-a', '--angle', dest='angle', type=float, default=15, metavar='<float>',
help='Rotational sampling density in degrees. Default is 15 degrees.')
p.add_argument('-vs', '--voxelspacing', dest='voxelspacing', metavar='<float>',
type=float, default=1,
help='Voxel spacing of search grid in angstrom. Default is 1A.')
p.add_argument('-ir', '--interaction-radius',
dest='interaction_radius', type=float, default=3.0, metavar='<float>',
help='Radius of the interaction space for each atom in angstrom. '
'Atoms are thus considered interacting if the distance is '
'larger than the vdW radius and shorther than or equal to '
'vdW + interaction_radius. Default is 3A.')
p.add_argument('-cv', '--max-clash',
dest='max_clash', type=float, default=200, metavar='<float>',
help='Maximum allowed volume of clashes. Increasing this '
'number results in more allowed complexes. '
'Default is 200 A^3.')
p.add_argument('-iv', '--min-interaction',
dest='min_interaction', type=float, default=300, metavar='<float>',
help='Minimal required interaction volume for a '
'conformation to be considered a '
'complex. Increasing this number results in a '
'stricter counting of complexes. '
'Default is 300 A^3.')
p.add_argument('-d', '--directory', dest='directory', metavar='<dir>',
type=abspath, default='.',
help='Directory where results are written to. '
'Default is current directory.')
p.add_argument('-p', '--nproc', dest='nproc', type=int, default=1, metavar='<int>',
help='Number of processors used during search.')
p.add_argument('-g', '--gpu', dest='gpu', action='store_true',
help='Use GPU-acceleration for search. If not available '
'the CPU-version will be used with the given number '
'of processors.')
help_msg = ("File containing residue number for which interactions will be counted. "
"The first line holds the receptor residue, "
"and the second line the ligand residue numbers.")
p.add_argument('-is', '--interaction-selection', metavar='<file>',
dest='interaction_selection', type=file, default=None,
help=help_msg)
help_msg = ("Number of minimal consistent restraints for which an interaction "
"or occupancy analysis will be performed. "
"Default is number of restraints minus 1.")
p.add_argument('-ic', '--interaction-restraints-cutoff', metavar='<int>',
dest='interaction_restraints_cutoff', type=int, default=None,
help=help_msg)
p.add_argument('-oa', '--occupancy-analysis', dest='occupancy_analysis',
action='store_true',
help=("Perform an occupancy analysis, ultimately providing "
"a volume where each grid point represents the "
"normalized probability of that spot being occupied by the ligand."
)
)
return p.parse_args()
def parse_interaction_selection(fid, pdb1, pdb2):
"""Parse the interaction selection file, i.e. all residues for which an
interaction analysis is performed."""
resi1 = [int(x) for x in fid.readline().split()]
resi2 = [int(x) for x in fid.readline().split()]
pdb1_sel = pdb1.select('name', ('CA', "O3'")).select('resi', resi1)
pdb2_sel = pdb2.select('name', ('CA', "O3'")).select('resi', resi2)
if (len(resi1) != pdb1_sel.natoms) or (len(resi2) != pdb2_sel.natoms):
msg = ("Some selected interaction residues where either missing in the PDB file "
"or had alternate conformers. Please check your input residues and remove alternate conformers.")
raise ValueError(msg)
return pdb1_sel, pdb2_sel
def parse_restraints(fid, pdb1, pdb2):
"""Parse the restraints file."""
dist_restraints = []
for line in fid:
# ignore comments and empty lines
line = line.strip()
if line.startswith('#') or not line:
continue
chain1, resi1, name1, chain2, resi2, name2, mindis, maxdis = line.split()
pdb1_sel = pdb1.select('chain', chain1).select('resi',
int(resi1)).select('name', name1).duplicate()
pdb2_sel = pdb2.select('chain', chain2).select('resi',
int(resi2)).select('name', name2).duplicate()
if pdb1_sel.natoms == 0 or pdb2_sel.natoms == 0:
raise ValueError("A restraint selection was not found in line:\n{:s}".format(str(line)))
dist_restraints.append([pdb1_sel, pdb2_sel, float(mindis), float(maxdis)])
fid.close()
return dist_restraints
class Joiner(object):
def __init__(self, directory):
self.directory = directory
def __call__(self, fname):
"""Join fname with set directory."""
return join(self.directory, fname)
class Results(object):
"""Simple container"""
pass
def run_disvis_instance(queue, receptor, ligand, distance_restraints, rotmat,
weights, n, pdb1_sel, pdb2_sel, args):
"""Run a single DisVis instance."""
dv = DisVis()
dv.receptor = receptor
dv.ligand = ligand
dv.distance_restraints = distance_restraints
dv.rotations = rotmat
dv.weights = weights
dv.voxelspacing = args.voxelspacing
dv.interaction_radius = args.interaction_radius
dv.max_clash = args.max_clash
dv.min_interaction = args.min_interaction
dv.interaction_restraints_cutoff = args.interaction_restraints_cutoff
if args.interaction_selection is not None:
dv.receptor_interaction_selection = pdb1_sel
dv.ligand_interaction_selection = pdb2_sel
dv.occupancy_analysis = args.occupancy_analysis
dv.search()
# Save results to file, to be combined later
joiner = Joiner(args.directory)
fname = joiner('accessible_interaction_space_{:d}.mrc').format(n)
dv.accessible_interaction_space.tofile(fname)
fname = joiner('violations_{:d}.npy').format(n)
np.save(fname, dv.violations)
if dv.interaction_matrix is not None:
fname = joiner('interaction_matrix_{:d}.npy'.format(n))
np.save(fname, dv.interaction_matrix)
if dv.occupancy_analysis:
for key, value in dv.occupancy_grids.iteritems():
fname = joiner('occupancy_{:d}_{:d}.mrc'.format(key, n))
value.tofile(fname)
queue.put(dv.accessible_complexes)
def mp_cpu_disvis(receptor, ligand, rotmat, weights, distance_restraints,
pdb1_sel, pdb2_sel, args):
"""Run several DisVis instances, each with a subset of all rotations."""
# multi-threaded CPU version
try:
max_cpu = mp.cpu_count()
jobs = min(max_cpu, args.nproc)
except NotImplementedError:
jobs = args.nproc
# in case more processes are requested than the number
# of rotations sampled
nrot = rotmat.shape[0]
if jobs > nrot:
jobs = nrot
nrot_per_job = nrot//jobs
write('Number of processors used: {:d}'.format(jobs))
write('Number of rotations per job: {:d}'.format(nrot_per_job))
write('Creating jobs')
queue = mp.Queue()
processes = []
for n in xrange(jobs):
# Determine the rotations that each job needs to sample
init_rot = n * nrot_per_job
end_rot = (n + 1) * nrot_per_job
if n == (jobs - 1):
end_rot = None
sub_rotmat = rotmat[init_rot: end_rot]
sub_weights = weights[init_rot: end_rot]
disvis_args = (queue, receptor, ligand, distance_restraints,
sub_rotmat, sub_weights, n, pdb1_sel, pdb2_sel, args)
process = mp.Process(target=run_disvis_instance, args=disvis_args)
processes.append(process)
write('Starting jobs')
for p in processes:
p.start()
write('Waiting for jobs to finish')
for p in processes:
p.join()
# Check whether the queue is empty, this indicates failure to run on
# multi-processor runs.
if queue.empty():
raise mp.Queue.Empty
write('Searching done. Combining results')
# Create dummy class with similar results attributes as DisVis class
results = Results()
joiner = Joiner(args.directory)
fname_interspace = joiner('accessible_interaction_space_{:d}.mrc')
fname_violations = joiner('violations_{:d}.npy')
fname_intermat = joiner('interaction_matrix_{:d}.npy')
accessible_complexes = np.asarray(queue.get(), dtype=np.float64)
accessible_interaction_space = Volume.fromfile(fname_interspace.format(0))
violations = np.load(fname_violations.format(0))
for n in xrange(1, jobs):
accessible_complexes += np.asarray(queue.get(), dtype=np.float64)
np.maximum(accessible_interaction_space.array,
Volume.fromfile(fname_interspace.format(n)).array,
accessible_interaction_space.array)
violations += np.load(fname_violations.format(n))
# Combine the occupancy grids
occupancy = None
if args.occupancy_analysis:
fname_occupancy = joiner('occupancy_{:d}_{:d}.mrc')
occupancy = {}
for consistent_restraints in xrange(args.interaction_restraints_cutoff,
len(distance_restraints) + 1):
occupancy[consistent_restraints] = Volume.fromfile(
fname_occupancy.format(consistent_restraints, 0))
for n in range(1, jobs):
occupancy[consistent_restraints]._array += (
Volume.fromfile(fname_occupancy.format(consistent_restraints, n))._array
)
# Combine the interaction analysis
results.interaction_matrix = None
if args.interaction_selection is not None:
interaction_matrix = np.load(fname_intermat.format(0))
for n in range(1, jobs):
interaction_matrix += np.load(fname_intermat.format(n))
results.interaction_matrix = interaction_matrix
# Remove the intermediate files
write('Cleaning')
for n in xrange(jobs):
remove(fname_interspace.format(n))
remove(fname_violations.format(n))
if args.interaction_selection is not None:
remove(fname_intermat.format(n))
if args.occupancy_analysis:
for consistent_restraints in xrange(
args.interaction_restraints_cutoff, len(distance_restraints) + 1):
remove(fname_occupancy.format(consistent_restraints, n))
results.accessible_interaction_space = accessible_interaction_space
results.accessible_complexes = accessible_complexes
results.violations = violations
results.occupancy_grids = occupancy
return results
def run_disvis(queue, receptor, ligand, rotmat, weights, distance_restraints,
pdb1_sel, pdb2_sel, args):
dv = DisVis()
dv.receptor = receptor
dv.ligand = ligand
dv.distance_restraints = distance_restraints
dv.rotations = rotmat
dv.weights = weights
dv.voxelspacing = args.voxelspacing
dv.interaction_radius = args.interaction_radius
dv.max_clash = args.max_clash
dv.min_interaction = args.min_interaction
dv.queue = queue
dv.occupancy_analysis = args.occupancy_analysis
dv.interaction_restraints_cutoff = args.interaction_restraints_cutoff
if not any([x is None for x in (pdb1_sel, pdb2_sel)]):
dv.receptor_interaction_selection = pdb1_sel
dv.ligand_interaction_selection = pdb2_sel
dv.search()
return dv
def write(line):
if stdout.isatty():
print(line)
logging.info(line)
def main():
args = parse_args()
mkdir_p(args.directory)
joiner = Joiner(args.directory)
logging.basicConfig(filename=joiner('disvis.log'),
level=logging.INFO, format='%(asctime)s %(message)s')
time0 = time()
queue = None
if args.gpu:
from disvis.helpers import get_queue
queue = get_queue()
if queue is None:
raise ValueError("No GPU queue was found.")
write('Reading fixed model from: {:s}'.format(args.receptor.name))
receptor = PDB.fromfile(args.receptor)
write('Reading scanning model from: {:s}'.format(args.ligand.name))
ligand = PDB.fromfile(args.ligand)
write('Reading in rotations.')
q, weights, a = proportional_orientations(args.angle)
rotmat = quat_to_rotmat(q)
write('Requested rotational sampling density: {:.2f}'.format(args.angle))
write('Real rotational sampling density: {:.2f}'.format(a))
write('Number of rotations: {:d}'.format(rotmat.shape[0]))
write('Reading in restraints from file: {:s}'.format(args.restraints.name))
distance_restraints = parse_restraints(args.restraints, receptor, ligand)
write('Number of distance restraints: {:d}'.format(len(distance_restraints)))
# If the interaction restraints cutoff is not specified, only calculate the
# interactions and occupancy grids for complexes consistent with at least 1
# restraints or more, with a limit of three.
if args.interaction_restraints_cutoff is None:
args.interaction_restraints_cutoff = max(len(distance_restraints) - 3, 1)
pdb1_sel = pdb2_sel = None
if args.interaction_selection is not None:
write('Reading in interaction selection from file: {:s}'
.format(args.interaction_selection.name))
pdb1_sel, pdb2_sel = parse_interaction_selection(
args.interaction_selection, receptor, ligand)
write('Number of receptor residues: {:d}'.format(pdb1_sel.natoms))
write('Number of ligand residues: {:d}'.format(pdb2_sel.natoms))
write('Voxel spacing set to: {:.2f}'.format(args.voxelspacing))
write('Interaction radius set to: {:.2f}'.format(args.interaction_radius))
write('Minimum required interaction volume: {:.2f}'.format(args.min_interaction))
write('Maximum allowed volume of clashes: {:.2f}'.format(args.max_clash))
if args.occupancy_analysis:
write('Performing occupancy analysis')
if queue is None:
# CPU-version
if args.nproc > 1:
try:
dv = mp_cpu_disvis(receptor, ligand, | |
200
self.assertContains(response, 'lang: fr')
self.assertContains(response, 'course name: test_template_3_course')
@override_settings(FEATURES=FEATURES_WITH_CUSTOM_CERTS_ENABLED)
@patch('lms.djangoapps.certificates.views.webview.get_course_run_details')
def test_certificate_custom_template_with_org_and_mode(self, mock_get_course_run_details):
"""
Tests custom template search if no template matches course_key, but a template does
match org and mode.
This test should check template matching when org={org}, course=Null, mode={mode}.
"""
mock_get_course_run_details.return_value = self.mock_course_run_details
othercourse = CourseFactory.create(
org='cstX', number='cst_22', display_name='custom template course'
)
self._add_course_certificates(count=1, signatory_count=2)
self._create_custom_named_template('test_template_1_course', org_id=1, mode='honor') # Correct template
self._create_custom_named_template( # wrong course key
'test_template_2_course',
org_id=1,
mode='honor',
course_key=str(othercourse.id)
)
self._create_custom_named_template('test_template_3_course', org_id=1, mode='verified') # wrong mode
self._create_custom_named_template('test_template_4_course', org_id=2, mode='honor') # wrong org
test_url = get_certificate_url(
user_id=self.user.id,
course_id=str(self.course.id),
uuid=self.cert.verify_uuid
)
with patch('lms.djangoapps.certificates.api.get_course_organization_id') as mock_get_org_id:
mock_get_org_id.side_effect = [1]
response = self.client.get(test_url)
assert response.status_code == 200
self.assertContains(response, 'course name: test_template_1_course')
@override_settings(FEATURES=FEATURES_WITH_CUSTOM_CERTS_ENABLED)
@patch('lms.djangoapps.certificates.views.webview.get_course_run_details')
def test_certificate_custom_template_with_org(self, mock_get_course_run_details):
"""
Tests custom template search when we have a single template for a organization.
This test should check template matching when org={org}, course=Null, mode=null.
"""
mock_get_course_run_details.return_value = self.mock_course_run_details
self._add_course_certificates(count=1, signatory_count=2)
self._create_custom_named_template('test_template_1_course', org_id=1, mode=None) # Correct template
self._create_custom_named_template('test_template_2_course', org_id=1, mode='verified') # wrong mode
self._create_custom_named_template('test_template_3_course', org_id=2, mode=None) # wrong org
test_url = get_certificate_url(
user_id=self.user.id,
course_id=str(self.course.id),
uuid=self.cert.verify_uuid
)
with patch('lms.djangoapps.certificates.api.get_course_organization_id') as mock_get_org_id:
mock_get_org_id.side_effect = [1]
response = self.client.get(test_url)
assert response.status_code == 200
self.assertContains(response, 'course name: test_template_1_course')
@override_settings(FEATURES=FEATURES_WITH_CUSTOM_CERTS_ENABLED)
@patch('lms.djangoapps.certificates.views.webview.get_course_run_details')
def test_certificate_custom_template_with_mode(self, mock_get_course_run_details):
"""
Tests custom template search if we have a single template for a course mode.
This test should check template matching when org=null, course=Null, mode={mode}.
"""
mock_get_course_run_details.return_value = self.mock_course_run_details
mode = 'honor'
self._add_course_certificates(count=1, signatory_count=2)
self._create_custom_named_template('test_template_1_course', org_id=None, mode=mode) # Correct template
self._create_custom_named_template('test_template_2_course', org_id=None, mode='verified') # wrong mode
self._create_custom_named_template('test_template_3_course', org_id=2, mode=mode) # wrong org
test_url = get_certificate_url(
user_id=self.user.id,
course_id=str(self.course.id),
uuid=self.cert.verify_uuid
)
with patch('lms.djangoapps.certificates.api.get_course_organization_id') as mock_get_org_id:
mock_get_org_id.return_value = None
response = self.client.get(test_url)
assert response.status_code == 200
self.assertContains(response, f'mode: {mode}')
self.assertContains(response, 'course name: test_template_1_course')
# Templates With Language tests
# 1
@override_settings(FEATURES=FEATURES_WITH_CUSTOM_CERTS_ENABLED)
@override_settings(LANGUAGE_CODE='fr')
@patch('lms.djangoapps.certificates.views.webview.get_course_run_details')
@patch('lms.djangoapps.certificates.api.get_course_organization_id')
def test_certificate_custom_language_template_with_org_mode_and_course_key(
self,
mock_get_org_id,
mock_get_course_run_details,
):
"""
Tests custom template search and rendering.
This test should check template matching when org={org}, course={course}, mode={mode}.
"""
DarkLangConfig(released_languages='es-419, fr', changed_by=self.user, enabled=True).save()
right_language = 'es'
wrong_language = 'fr'
mock_get_org_id.return_value = 1
course_run_details = self.mock_course_run_details
course_run_details.update({'content_language': 'es'})
mock_get_course_run_details.return_value = course_run_details
CertificateGenerationCourseSetting.objects.update_or_create(
course_key=self.course.id,
defaults={
'language_specific_templates_enabled': True
}
)
self._add_course_certificates(count=1, signatory_count=2)
test_url = get_certificate_url(
user_id=self.user.id,
course_id=str(self.course.id),
uuid=self.cert.verify_uuid
)
# Create an org_mode_and_coursekey template language=null
self._create_custom_named_template(
'test_null_lang_template', org_id=1, mode='honor', course_key=str(self.course.id), language=None,
)
# Verify return template lang = null
response = self.client.get(test_url)
assert response.status_code == 200
self.assertContains(response, 'course name: test_null_lang_template')
# Create an org_mode_and_coursekey template language=wrong_language
self._create_custom_named_template(
'test_wrong_lang_template',
org_id=1,
mode='honor',
course_key=str(self.course.id),
language=wrong_language,
)
# Verify returns null lang template
response = self.client.get(test_url)
assert response.status_code == 200
self.assertContains(response, 'course name: test_null_lang_template')
# Create an org_mode_and_coursekey template language=''
self._create_custom_named_template(
'test_all_languages_template',
org_id=1,
mode='honor',
course_key=str(self.course.id),
language='',
)
# Verify returns null lang template
response = self.client.get(test_url)
assert response.status_code == 200
self.assertContains(response, 'course name: test_all_languages_template')
# Create a org_mode_and_coursekey template language=lang
self._create_custom_named_template(
'test_right_lang_template',
org_id=1,
mode='honor',
course_key=str(self.course.id),
language=right_language,
)
# verify return right_language template
response = self.client.get(test_url)
assert response.status_code == 200
self.assertContains(response, 'course name: test_right_lang_template')
# 2
@override_settings(FEATURES=FEATURES_WITH_CUSTOM_CERTS_ENABLED)
@patch('lms.djangoapps.certificates.views.webview.get_course_run_details')
@patch('lms.djangoapps.certificates.api.get_course_organization_id')
def test_certificate_custom_language_template_with_org_and_mode(self, mock_get_org_id, mock_get_course_run_details):
"""
Tests custom template search if no template matches course_key, but a template does
match org and mode.
This test should check template matching when org={org}, course=Null, mode={mode}.
"""
DarkLangConfig(released_languages='es-419, fr', changed_by=self.user, enabled=True).save()
right_language = 'es'
wrong_language = 'fr'
mock_get_org_id.return_value = 1
course_run_details = self.mock_course_run_details
course_run_details.update({'content_language': 'es'})
mock_get_course_run_details.return_value = course_run_details
CertificateGenerationCourseSetting.objects.update_or_create(
course_key=self.course.id,
defaults={
'language_specific_templates_enabled': True
}
)
self._add_course_certificates(count=1, signatory_count=2)
test_url = get_certificate_url(
user_id=self.user.id,
course_id=str(self.course.id),
uuid=self.cert.verify_uuid
)
# Create a org and mode template language=null
self._create_custom_named_template('test_null_lang_template', org_id=1, mode='honor', language=None)
# Verify return template lang = null
response = self.client.get(test_url)
assert response.status_code == 200
self.assertContains(response, 'course name: test_null_lang_template')
# Create a org and mode template language=wrong_language
self._create_custom_named_template('test_wrong_lang_template', org_id=1, mode='honor', language=wrong_language)
# Verify returns null lang template
response = self.client.get(test_url)
assert response.status_code == 200
self.assertContains(response, 'course name: test_null_lang_template')
# Create an org and mode template language=''
self._create_custom_named_template('test_all_languages_template', org_id=1, mode='honor', language='')
# Verify returns All Languages template
response = self.client.get(test_url)
assert response.status_code == 200
self.assertContains(response, 'course name: test_all_languages_template')
# Create a org and mode template language=lang
self._create_custom_named_template('test_right_lang_template', org_id=1, mode='honor', language=right_language)
# Verify return right_language template
response = self.client.get(test_url)
assert response.status_code == 200
self.assertContains(response, 'course name: test_right_lang_template')
# 3
@override_settings(FEATURES=FEATURES_WITH_CUSTOM_CERTS_ENABLED)
@patch('lms.djangoapps.certificates.views.webview.get_course_run_details')
@patch('lms.djangoapps.certificates.api.get_course_organization_id')
def test_certificate_custom_language_template_with_org(self, mock_get_org_id, mock_get_course_run_details):
"""
Tests custom template search when we have a single template for a organization.
This test should check template matching when org={org}, course=Null, mode=null.
"""
DarkLangConfig(released_languages='es-419, fr', changed_by=self.user, enabled=True).save()
right_language = 'es'
wrong_language = 'fr'
mock_get_org_id.return_value = 1
course_run_details = self.mock_course_run_details
course_run_details.update({'content_language': 'es'})
mock_get_course_run_details.return_value = course_run_details
CertificateGenerationCourseSetting.objects.update_or_create(
course_key=self.course.id,
defaults={
'language_specific_templates_enabled': True
}
)
self._add_course_certificates(count=1, signatory_count=2)
test_url = get_certificate_url(
user_id=self.user.id,
course_id=str(self.course.id),
uuid=self.cert.verify_uuid
)
# Create a org template language=null
self._create_custom_named_template('test_null_lang_template', org_id=1, language=None)
# Verify return template lang = null
response = self.client.get(test_url)
assert response.status_code == 200
self.assertContains(response, 'course name: test_null_lang_template')
# Create a org template language=wrong_language
self._create_custom_named_template('test_wrong_lang_template', org_id=1, language=wrong_language)
# Verify returns null lang template
response = self.client.get(test_url)
assert response.status_code == 200
self.assertContains(response, 'course name: test_null_lang_template')
# Create an org template language=''
self._create_custom_named_template('test_all_languages_template', org_id=1, language='')
# Verify returns All Languages template
response = self.client.get(test_url)
assert response.status_code == 200
self.assertContains(response, 'course name: test_all_languages_template')
# Create a org template language=lang
self._create_custom_named_template('test_right_lang_template', org_id=1, language=right_language)
# Verify return right_language template
response = self.client.get(test_url)
assert response.status_code == 200
self.assertContains(response, 'course name: test_right_lang_template')
# 4
@override_settings(FEATURES=FEATURES_WITH_CUSTOM_CERTS_ENABLED)
@patch('lms.djangoapps.certificates.views.webview.get_course_run_details')
@patch('lms.djangoapps.certificates.api.get_course_organization_id')
def test_certificate_custom_language_template_with_mode(self, mock_get_org_id, mock_get_course_run_details):
"""
Tests custom template search if we have a single template for a course mode.
This test should check template matching when org=null, course=Null, mode={mode}.
"""
DarkLangConfig(released_languages='es-419, fr', changed_by=self.user, enabled=True).save()
right_language = 'es'
wrong_language = 'fr'
mock_get_org_id.return_value = 1
course_run_details = self.mock_course_run_details
course_run_details.update({'content_language': 'es'})
mock_get_course_run_details.return_value = course_run_details
CertificateGenerationCourseSetting.objects.update_or_create(
course_key=self.course.id,
defaults={
'language_specific_templates_enabled': True
}
)
self._add_course_certificates(count=1, signatory_count=2)
test_url = get_certificate_url(
user_id=self.user.id,
course_id=str(self.course.id),
uuid=self.cert.verify_uuid
)
# Create a mode template language=null
self._create_custom_named_template('test_null_lang_template', mode='honor', language=None)
# Verify return template with lang = null
response = self.client.get(test_url)
assert response.status_code == 200
self.assertContains(response, 'course name: test_null_lang_template')
# Create a mode template language=wrong_language
self._create_custom_named_template('test_wrong_lang_template', mode='honor', language=wrong_language)
# Verify returns null lang template
response = self.client.get(test_url)
assert response.status_code == 200
self.assertContains(response, 'course name: test_null_lang_template')
# Create a mode template language=''
self._create_custom_named_template('test_all_languages_template', mode='honor', language='')
# Verify returns All Languages template
response = self.client.get(test_url)
assert response.status_code == 200
self.assertContains(response, 'course name: test_all_languages_template')
# Create a mode template language=lang
self._create_custom_named_template('test_right_lang_template', mode='honor', language=right_language)
# Verify return right_language template
response = self.client.get(test_url)
assert response.status_code == 200
self.assertContains(response, 'course name: test_right_lang_template')
@override_settings(FEATURES=FEATURES_WITH_CUSTOM_CERTS_ENABLED)
@patch('lms.djangoapps.certificates.views.webview.get_course_run_details')
@patch('lms.djangoapps.certificates.api.get_course_organization_id')
def test_certificate_custom_language_template_with_locale_language_from_catalogue(
self,
mock_get_org_id,
mock_get_course_run_details,
):
"""
Tests custom template search if we have a single template for a course mode.
This test should check template matching when org=null, course=Null, mode={mode}.
"""
DarkLangConfig(released_languages='es-419, fr', changed_by=self.user, enabled=True).save()
right_language = 'es'
wrong_language = 'fr'
mock_get_org_id.return_value = 1
course_run_details = self.mock_course_run_details
course_run_details.update({'content_language': 'es-419'})
mock_get_course_run_details.return_value = course_run_details
CertificateGenerationCourseSetting.objects.update_or_create(
course_key=self.course.id,
defaults={
'language_specific_templates_enabled': True
}
)
self._add_course_certificates(count=1, signatory_count=2)
test_url = get_certificate_url(
user_id=self.user.id,
course_id=str(self.course.id),
uuid=self.cert.verify_uuid
)
# Create a mode template language=null
self._create_custom_named_template('test_null_lang_template', org_id=1, mode='honor', language=None)
# Verify return template with lang = null
response = self.client.get(test_url)
assert response.status_code == 200
self.assertContains(response, 'course name: test_null_lang_template')
# Create a mode template language=wrong_language
self._create_custom_named_template('test_wrong_lang_template', org_id=1, mode='honor', language=wrong_language)
# Verify returns null lang template
response = self.client.get(test_url)
assert response.status_code == 200
self.assertContains(response, 'course name: test_null_lang_template')
# Create a mode template language=''
self._create_custom_named_template('test_all_languages_template', org_id=1, mode='honor', language='')
# Verify returns All Languages template
response = self.client.get(test_url)
assert response.status_code == 200
self.assertContains(response, 'course name: test_all_languages_template')
# Create a mode template language=lang
self._create_custom_named_template('test_right_lang_template', org_id=1, mode='honor', language=right_language)
# verify return right_language template
response = self.client.get(test_url)
assert response.status_code == 200
self.assertContains(response, 'course name: test_right_lang_template')
@override_settings(FEATURES=FEATURES_WITH_CUSTOM_CERTS_ENABLED)
@ddt.data(True, False)
@patch('lms.djangoapps.certificates.views.webview.get_course_run_details')
@patch('lms.djangoapps.certificates.api.get_course_organization_id')
def test_certificate_custom_template_with_hours_of_effort(
self,
include_effort,
mock_get_org_id,
mock_get_course_run_details,
):
"""
Tests custom template properly retrieves and calculates Hours of Effort when the feature is enabled
"""
# mock the response data from Discovery that updates the context for template lookup and rendering
mock_get_course_run_details.return_value = self.mock_course_run_details
mock_get_org_id.return_value = 1
CertificateGenerationCourseSetting.objects.update_or_create(
course_key=self.course.id,
defaults={
'include_hours_of_effort': include_effort
}
)
self._add_course_certificates(count=1, signatory_count=2)
self._create_custom_template_with_hours_of_effort(org_id=1, language=None)
test_url = get_certificate_url(
user_id=self.user.id,
course_id=str(self.course.id),
uuid=self.cert.verify_uuid
)
response = self.client.get(test_url)
assert response.status_code == 200
if include_effort:
self.assertContains(response, 'hours of effort: 40')
else:
self.assertNotContains(response, 'hours of effort')
@ddt.data(True, False)
@patch('lms.djangoapps.certificates.views.webview.get_course_run_details')
def | |
data[resource]
actions.update({resource: {'action': 'subtract', 'amount': data[resource]}})
tgtactions.update({resource: {'action': 'add', 'amount': data[resource]}})
freighters = world.freighters - world.freightersinuse
required_freighters = (required_capacity / v.freighter_capacity['total'])+ 1
if len(resources) == 0: #pure budget aid
pass
elif freighters >= required_freighters:
#gots enough freighters
delay = (1 if world.sector == target.sector else 2)
outcometime = datetime=v.now() + time.timedelta(hours=delay)
actions.update({
'freightersinuse': {'action': 'add', 'amount': required_freighters},
})
utilities.atomic_world(world.pk, actions)
taskdetails = taskdata.directaidarrival(world, resources)
task = Task.objects.create(target=target,
content=taskdetails, datetime=outcometime)
newtask.directaid.apply_async(args=(world.pk, target.pk,
task.pk, resources, freighters), eta=outcometime)
if data['budget'] > 0:
resources = [['GEU', data['budget']]] + resources
#create logs!
log = ResourceLog.objects.create(owner=world, target=target, sent=True)
for resource in resources:
Logresource.objects.create(resource=resource[0], amount=resource[1], log=log)
hour = ('hours' if delay == 2 else 'hour')
if len(message):
message = message[:-1] + " and will recieve %s in %s %s!" % (
utilities.resource_text(resources), delay, hour)
else:
message = "%s will recieve %s in %s %s!" % (
target.name, utilities.resource_text(resources), delay, hour)
else: #not enough freighters
message = "We do not have enough freighters, we have %s and need %s" % (freighters, required_freighters)
if 'shipaid' in form:
form = Shipaidform(world, form)
if form.is_valid():
data = form.cleaned_data
ship = data['ship_choice']
amount = data['amount']
delay = (4 if target.sector == world.sector else 8)
outcometime = datetime=v.now() + time.timedelta(minutes=1)
if data['amount'] > data['fleet_choice'].__dict__[data['ship_choice']]:
message = "%s doesn't have that many %s!" % (data['fleet_choice'].name, ship)
else: #is all good
action = {'subtractships': {data['ship_choice']: amount}}
utilities.atomic_fleet(data['fleet_choice'].pk, action)
log = ResourceLog.objects.create(owner=world, target=target, sent=True)
shipname = (ship.replace('_', ' ') if amount > 1 else ship[:-1].replace('_', ' ')) #to plural or not plural
Logresource.objects.create(resource=shipname, amount=amount, log=log)
#more stuff
ref = fleet()
ref.__dict__[ship] = amount
training = data['fleet_choice'].maxtraining() * data['fleet_choice'].ratio()
taskdetails = taskdata.shipaidarrival(world, shipname, amount)
task = Task.objects.create(target=target,
content=taskdetails, datetime=outcometime)
newtask.shipaid.apply_async(args=(world.pk, target.pk,
task.pk, ship, amount, training), eta=outcometime)
message = "%s %s is en route to %s from %s" % (
amount, shipname, target.name, data['fleet_choice'].name)
if "infiltrate" in form:
form = SelectSpyForm(world, request.POST)
if form.is_valid():
data = form.cleaned_data
spyid = data['spyselect']
try:
spy = Spy.objects.get(pk=spyid)
except:
message = "There is no such spy!"
else:
if target.preferences.vacation:
message = 'This world is in vacation mode. You cannot infiltrate it.'
elif spy.owner != world:
message = "This spy does not belong to your intelligence services!"
elif Spy.objects.filter(owner=world, location=target).exists():
message = "You already have a spy in this world!"
elif spy.location != world:
message = "This spy is not at your home world!"
elif spy.nextaction > v.now():
message = "This spy is currently recovering and unavailable for missions."
else:
message = infiltrate(spy, target)
if "propaganda" in form:
try:
spy = Spy.objects.get(owner=world, location=target)
except ObjectDoesNotExist:
message = "You have no spy in this world!"
else:
if spy.nextaction > v.now():
message = "This spy is currently recovering and unavailable for missions."
elif world.budget < 250:
message = outcomes.nomoney()
else:
spy.nextaction = v.now() + time.timedelta(hours=8)
spy.save(update_fields=['nextaction'])
actions = {'budget': {'action': 'subtract', 'amount': 250}}
utilities.atomic_world(world.pk, actions)
message = propaganda(spy, target)
if "gunrun" in form:
try:
spy = Spy.objects.get(owner=world, location=target)
except ObjectDoesNotExist:
message = "You have no spy in this world!"
else:
if spy.nextaction > v.now():
message = "This spy is currently recovering and unavailable for missions."
elif world.millevel < 1000:
message = outcomes.gunrun('NoTech')
else:
spy.nextaction = v.now() + time.timedelta(hours=8)
spy.save(update_fields=['nextaction'])
world.millevel = F('millevel') - 1000
world.save(update_fields=['millevel'])
message = gunrun(spy, target)
if "intel" in form:
try:
spy = Spy.objects.get(owner=world, location=target)
except ObjectDoesNotExist:
message = "You have no spy in this world!"
else:
if spy.nextaction > v.now():
message = "This spy is currently recovering and unavailable for missions."
elif world.budget < 200:
message = outcomes.nomoney()
else:
spy.nextaction = v.now() + time.timedelta(hours=8)
spy.save(update_fields=['nextaction'])
actions = {'budget': {'action': 'subtract', 'amount': 200}}
utilities.atomic_world(world.pk, actions)
message = intel(spy, target)
if "sabyard" in form:
try:
spy = Spy.objects.get(owner=world, location=target)
except ObjectDoesNotExist:
message = "You have no spy in this world!"
else:
if spy.nextaction > v.now():
message = "This spy is currently recovering and unavailable for missions."
elif world.budget < 2000:
message = outcomes.nomoney()
elif target.shipyards - target.shipyardsinuse == 0:
message = outcomes.sabotage('NoFreeYards')
else:
spy.nextaction = v.now() + time.timedelta(hours=8)
spy.save(update_fields=['nextaction'])
actions = {'budget': {'action': 'subtract', 'amount': 2000}}
utilities.atomic_world(world.pk, actions)
message = sabyard(spy, target)
if "sabfuel" in form:
try:
spy = Spy.objects.get(owner=world, location=target)
except ObjectDoesNotExist:
message = "You have no spy in this world!"
else:
if spy.nextaction > v.now():
message = "This spy is currently recovering and unavailable for missions."
elif world.budget < 2000:
message = outcomes.nomoney()
elif target.warpfuelprod < 10:
message = outcomes.sabotage('NoFuelProd')
else:
spy.nextaction = v.now() + time.timedelta(hours=8)
spy.save(update_fields=['nextaction'])
actions = {'budget': {'action': 'subtract', 'amount': 2000}}
utilities.atomic_world(world.pk, actions)
message = sabfuel(spy, target)
if "sabdur" in form:
try:
spy = Spy.objects.get(owner=world, location=target)
except ObjectDoesNotExist:
message = "You have no spy in this world!"
else:
if spy.nextaction > v.now():
message = "This spy is currently recovering and unavailable for missions."
elif world.budget < 2000:
message = outcomes.nomoney()
elif target.duraniumprod < 5:
message = outcomes.sabotage('NoDurProd')
else:
spy.nextaction = v.now() + time.timedelta(hours=8)
spy.save(update_fields=['nextaction'])
actions = {'budget': {'action': 'subtract', 'amount': 2000}}
utilities.atomic_world(world.pk, actions)
message = sabdur(spy, target)
if "sabtrit" in form:
try:
spy = Spy.objects.get(owner=world, location=target)
except ObjectDoesNotExist:
message = "You have no spy in this world!"
else:
if spy.nextaction > v.now():
message = "This spy is currently recovering and unavailable for missions."
elif world.budget < 2000:
message = outcomes.nomoney()
elif target.tritaniumprod < 2:
message = outcomes.sabotage('NoTritProd')
else:
spy.nextaction = v.now() + time.timedelta(hours=8)
spy.save(update_fields=['nextaction'])
actions = {'budget': {'action': 'subtract', 'amount': 2000}}
utilities.atomic_world(world.pk, actions)
message = sabtrit(spy, target)
if "sabadam" in form:
try:
spy = Spy.objects.get(owner=world, location=target)
except ObjectDoesNotExist:
message = "You have no spy in this world!"
else:
if spy.nextaction > v.now():
message = "This spy is currently recovering and unavailable for missions."
elif world.budget < 2000:
message = outcomes.nomoney()
elif target.adamantiumprod < 1:
message = outcomes.sabotage('NoAdamProd')
else:
spy.nextaction = v.now() + time.timedelta(hours=8)
spy.save(update_fields=['nextaction'])
actions = {'budget': {'action': 'subtract', 'amount': 2000}}
utilities.atomic_world(world.pk, actions)
message = sabadam(spy, target)
if "sabhangars" in form:
try:
spy = Spy.objects.get(owner=world, location=target)
except ObjectDoesNotExist:
message = "You have no spy in this world!"
else:
if spy.nextaction > v.now():
message = "This spy is currently recovering and unavailable for missions."
elif world.budget < 2000:
message = outcomes.nomoney()
else:
spy.nextaction = v.now() + time.timedelta(hours=8)
spy.save(update_fields=['nextaction'])
actions = {'budget': {'action': 'subtract', 'amount': 2000}}
utilities.atomic_world(world.pk, actions)
message = sabhangars(spy, target)
if "withdraw" in form:
try:
spy = Spy.objects.get(owner=world, location=target)
except ObjectDoesNotExist:
message = "You have no spy in this world!"
else:
if spy.nextaction > v.now():
message = "This spy is currently recovering and unavailable."
else:
spy.inteltime = v.now()
spy.location = spy.owner
spy.nextaction = v.now() + time.timedelta(hours=8)
spy.timespent = 0
spy.save()
message = 'You have successfully withdrawn your spy from the enemy world!'
if world.pk in War.objects.filter(defender=target).values_list('attacker', flat=True):
atwar = True
war = War.objects.get(attacker=world, defender=target)
if war.peaceofferbyatk is not None:
peaceoffer = True
elif world.pk in War.objects.filter(attacker=target).values_list('defender', flat=True):
atwar = True
war = War.objects.get(attacker=target, defender=world)
if war.peaceofferbydef is not None:
peaceoffer = True
spyform = SelectSpyForm(world)
# recalculate variables in case an action has changed them
if v.now() < world.warprotection:
warprotection = True
if target.gdp > 3 * world.gdp:
gdpprotection = True
if v.now() < target.warprotection:
targetprotection = True
if Spy.objects.filter(owner=world, location=world).count() == 0:
nospies = True
if Spy.objects.filter(owner=world).filter(location=target).count() == 1:
spyintarget = Spy.objects.filter(owner=world, location=target)[0]
if spyintarget.inteltime > v.now():
spyintel = True
timediff = spyintarget.inteltime - v.now()
hours, minutes, seconds = utilities.timedeltadivide(timediff)
timeforintel = 'You have %s:%s:%s of intel remaining.' % (hours, minutes, seconds)
#if the two worlds are at war
#calculate what fleets can attack where and what buttons to render
attackforms = []
if atwar:
worldfleets = world.controlled_fleets.all().exclude(sector='warping').exclude(sector='hangar')
targetfleets = target.controlled_fleets.all().exclude(sector='warping').exclude(sector='hangar')
sectors = {'amyntas': 0, 'bion': 0, 'cleon': 0, 'draco': 0}
for unit in worldfleets:
sectors[unit.sector] = 1
if unit.sector == target.sector:
sectors[unit.sector] = 2
for unit in targetfleets:
sectors[unit.sector] += 1
for sector in v.sectors: #organised list so it shows amyntas -> draco
if sectors[sector] >= 2: #both worlds has fleets in given sector
attackforms.append({'form': attackform(world, sector), | |
# Boolean indices
out_aet = x_aet[x_aet < 0]
assert isinstance(out_aet.owner.op, aet_subtensor.AdvancedSubtensor)
out_fg = FunctionGraph([], [out_aet])
compare_jax_and_py(out_fg, [])
# Advanced indexing
out_aet = x_aet[[1, 2]]
assert isinstance(out_aet.owner.op, aet_subtensor.AdvancedSubtensor1)
out_fg = FunctionGraph([], [out_aet])
compare_jax_and_py(out_fg, [])
out_aet = x_aet[[1, 2], [2, 3]]
assert isinstance(out_aet.owner.op, aet_subtensor.AdvancedSubtensor)
out_fg = FunctionGraph([], [out_aet])
compare_jax_and_py(out_fg, [])
# Advanced and basic indexing
out_aet = x_aet[[1, 2], :]
assert isinstance(out_aet.owner.op, aet_subtensor.AdvancedSubtensor1)
out_fg = FunctionGraph([], [out_aet])
compare_jax_and_py(out_fg, [])
out_aet = x_aet[[1, 2], :, [3, 4]]
assert isinstance(out_aet.owner.op, aet_subtensor.AdvancedSubtensor)
out_fg = FunctionGraph([], [out_aet])
compare_jax_and_py(out_fg, [])
def test_jax_IncSubtensor():
x_np = np.random.uniform(-1, 1, size=(3, 4, 5)).astype(config.floatX)
x_aet = aet.arange(3 * 4 * 5).reshape((3, 4, 5)).astype(config.floatX)
# "Set" basic indices
st_aet = aet.as_tensor_variable(np.array(-10.0, dtype=config.floatX))
out_aet = aet_subtensor.set_subtensor(x_aet[1, 2, 3], st_aet)
assert isinstance(out_aet.owner.op, aet_subtensor.IncSubtensor)
out_fg = FunctionGraph([], [out_aet])
compare_jax_and_py(out_fg, [])
st_aet = aet.as_tensor_variable(np.r_[-1.0, 0.0].astype(config.floatX))
out_aet = aet_subtensor.set_subtensor(x_aet[:2, 0, 0], st_aet)
assert isinstance(out_aet.owner.op, aet_subtensor.IncSubtensor)
out_fg = FunctionGraph([], [out_aet])
compare_jax_and_py(out_fg, [])
out_aet = aet_subtensor.set_subtensor(x_aet[0, 1:3, 0], st_aet)
assert isinstance(out_aet.owner.op, aet_subtensor.IncSubtensor)
out_fg = FunctionGraph([], [out_aet])
compare_jax_and_py(out_fg, [])
# "Set" advanced indices
st_aet = aet.as_tensor_variable(
np.random.uniform(-1, 1, size=(2, 4, 5)).astype(config.floatX)
)
out_aet = aet_subtensor.set_subtensor(x_aet[np.r_[0, 2]], st_aet)
assert isinstance(out_aet.owner.op, aet_subtensor.AdvancedIncSubtensor1)
out_fg = FunctionGraph([], [out_aet])
compare_jax_and_py(out_fg, [])
st_aet = aet.as_tensor_variable(np.r_[-1.0, 0.0].astype(config.floatX))
out_aet = aet_subtensor.set_subtensor(x_aet[[0, 2], 0, 0], st_aet)
assert isinstance(out_aet.owner.op, aet_subtensor.AdvancedIncSubtensor)
out_fg = FunctionGraph([], [out_aet])
compare_jax_and_py(out_fg, [])
st_aet = aet.as_tensor_variable(x_np[[0, 2], 0, :3])
out_aet = aet_subtensor.set_subtensor(x_aet[[0, 2], 0, :3], st_aet)
assert isinstance(out_aet.owner.op, aet_subtensor.AdvancedIncSubtensor)
out_fg = FunctionGraph([], [out_aet])
compare_jax_and_py(out_fg, [])
# "Set" boolean indices
mask_aet = aet.as_tensor_variable(x_np) > 0
out_aet = aet_subtensor.set_subtensor(x_aet[mask_aet], 0.0)
assert isinstance(out_aet.owner.op, aet_subtensor.AdvancedIncSubtensor)
out_fg = FunctionGraph([], [out_aet])
compare_jax_and_py(out_fg, [])
# "Increment" basic indices
st_aet = aet.as_tensor_variable(np.array(-10.0, dtype=config.floatX))
out_aet = aet_subtensor.inc_subtensor(x_aet[1, 2, 3], st_aet)
assert isinstance(out_aet.owner.op, aet_subtensor.IncSubtensor)
out_fg = FunctionGraph([], [out_aet])
compare_jax_and_py(out_fg, [])
st_aet = aet.as_tensor_variable(np.r_[-1.0, 0.0].astype(config.floatX))
out_aet = aet_subtensor.inc_subtensor(x_aet[:2, 0, 0], st_aet)
assert isinstance(out_aet.owner.op, aet_subtensor.IncSubtensor)
out_fg = FunctionGraph([], [out_aet])
compare_jax_and_py(out_fg, [])
out_aet = aet_subtensor.set_subtensor(x_aet[0, 1:3, 0], st_aet)
assert isinstance(out_aet.owner.op, aet_subtensor.IncSubtensor)
out_fg = FunctionGraph([], [out_aet])
compare_jax_and_py(out_fg, [])
# "Increment" advanced indices
st_aet = aet.as_tensor_variable(
np.random.uniform(-1, 1, size=(2, 4, 5)).astype(config.floatX)
)
out_aet = aet_subtensor.inc_subtensor(x_aet[np.r_[0, 2]], st_aet)
assert isinstance(out_aet.owner.op, aet_subtensor.AdvancedIncSubtensor1)
out_fg = FunctionGraph([], [out_aet])
compare_jax_and_py(out_fg, [])
st_aet = aet.as_tensor_variable(np.r_[-1.0, 0.0].astype(config.floatX))
out_aet = aet_subtensor.inc_subtensor(x_aet[[0, 2], 0, 0], st_aet)
assert isinstance(out_aet.owner.op, aet_subtensor.AdvancedIncSubtensor)
out_fg = FunctionGraph([], [out_aet])
compare_jax_and_py(out_fg, [])
st_aet = aet.as_tensor_variable(x_np[[0, 2], 0, :3])
out_aet = aet_subtensor.inc_subtensor(x_aet[[0, 2], 0, :3], st_aet)
assert isinstance(out_aet.owner.op, aet_subtensor.AdvancedIncSubtensor)
out_fg = FunctionGraph([], [out_aet])
compare_jax_and_py(out_fg, [])
# "Increment" boolean indices
mask_aet = aet.as_tensor_variable(x_np) > 0
out_aet = aet_subtensor.set_subtensor(x_aet[mask_aet], 1.0)
assert isinstance(out_aet.owner.op, aet_subtensor.AdvancedIncSubtensor)
out_fg = FunctionGraph([], [out_aet])
compare_jax_and_py(out_fg, [])
def test_jax_ifelse():
true_vals = np.r_[1, 2, 3]
false_vals = np.r_[-1, -2, -3]
x = ifelse(np.array(True), true_vals, false_vals)
x_fg = FunctionGraph([], [x])
compare_jax_and_py(x_fg, [])
a = dscalar("a")
a.tag.test_value = np.array(0.2, dtype=config.floatX)
x = ifelse(a < 0.5, true_vals, false_vals)
x_fg = FunctionGraph([a], [x]) # I.e. False
compare_jax_and_py(x_fg, [get_test_value(i) for i in x_fg.inputs])
def test_jax_CAReduce():
a_aet = vector("a")
a_aet.tag.test_value = np.r_[1, 2, 3].astype(config.floatX)
x = aet_sum(a_aet, axis=None)
x_fg = FunctionGraph([a_aet], [x])
compare_jax_and_py(x_fg, [np.r_[1, 2, 3].astype(config.floatX)])
a_aet = matrix("a")
a_aet.tag.test_value = np.c_[[1, 2, 3], [1, 2, 3]].astype(config.floatX)
x = aet_sum(a_aet, axis=0)
x_fg = FunctionGraph([a_aet], [x])
compare_jax_and_py(x_fg, [np.c_[[1, 2, 3], [1, 2, 3]].astype(config.floatX)])
x = aet_sum(a_aet, axis=1)
x_fg = FunctionGraph([a_aet], [x])
compare_jax_and_py(x_fg, [np.c_[[1, 2, 3], [1, 2, 3]].astype(config.floatX)])
a_aet = matrix("a")
a_aet.tag.test_value = np.c_[[1, 2, 3], [1, 2, 3]].astype(config.floatX)
x = prod(a_aet, axis=0)
x_fg = FunctionGraph([a_aet], [x])
compare_jax_and_py(x_fg, [np.c_[[1, 2, 3], [1, 2, 3]].astype(config.floatX)])
x = aet_all(a_aet)
x_fg = FunctionGraph([a_aet], [x])
compare_jax_and_py(x_fg, [np.c_[[1, 2, 3], [1, 2, 3]].astype(config.floatX)])
def test_jax_MakeVector():
x = aet.make_vector(1, 2, 3)
x_fg = FunctionGraph([], [x])
compare_jax_and_py(x_fg, [])
def test_jax_Reshape():
a = vector("a")
x = reshape(a, (2, 2))
x_fg = FunctionGraph([a], [x])
compare_jax_and_py(x_fg, [np.r_[1.0, 2.0, 3.0, 4.0].astype(config.floatX)])
# Test breaking "omnistaging" changes in JAX.
# See https://github.com/tensorflow/probability/commit/782d0c64eb774b9aac54a1c8488e4f1f96fbbc68
x = reshape(a, (a.shape[0] // 2, a.shape[0] // 2))
x_fg = FunctionGraph([a], [x])
compare_jax_and_py(x_fg, [np.r_[1.0, 2.0, 3.0, 4.0].astype(config.floatX)])
@pytest.mark.xfail(reason="jax.numpy.arange requires concrete inputs")
def test_jax_Reshape_nonconcrete():
a = vector("a")
b = iscalar("b")
x = reshape(a, (b, b))
x_fg = FunctionGraph([a, b], [x])
compare_jax_and_py(x_fg, [np.r_[1.0, 2.0, 3.0, 4.0].astype(config.floatX), 2])
def test_jax_Dimshuffle():
a_aet = matrix("a")
x = a_aet.T
x_fg = FunctionGraph([a_aet], [x])
compare_jax_and_py(x_fg, [np.c_[[1.0, 2.0], [3.0, 4.0]].astype(config.floatX)])
x = a_aet.dimshuffle([0, 1, "x"])
x_fg = FunctionGraph([a_aet], [x])
compare_jax_and_py(x_fg, [np.c_[[1.0, 2.0], [3.0, 4.0]].astype(config.floatX)])
a_aet = tensor(dtype=config.floatX, broadcastable=[False, True])
x = a_aet.dimshuffle((0,))
x_fg = FunctionGraph([a_aet], [x])
compare_jax_and_py(x_fg, [np.c_[[1.0, 2.0, 3.0, 4.0]].astype(config.floatX)])
a_aet = tensor(dtype=config.floatX, broadcastable=[False, True])
x = aet_elemwise.DimShuffle([False, True], (0,), inplace=True)(a_aet)
x_fg = FunctionGraph([a_aet], [x])
compare_jax_and_py(x_fg, [np.c_[[1.0, 2.0, 3.0, 4.0]].astype(config.floatX)])
def test_jax_variadic_Scalar():
mu = vector("mu", dtype=config.floatX)
mu.tag.test_value = np.r_[0.1, 1.1].astype(config.floatX)
tau = vector("tau", dtype=config.floatX)
tau.tag.test_value = np.r_[1.0, 2.0].astype(config.floatX)
res = -tau * mu
fgraph = FunctionGraph([mu, tau], [res])
compare_jax_and_py(fgraph, [get_test_value(i) for i in fgraph.inputs])
res = -tau * (tau - mu) ** 2
fgraph = FunctionGraph([mu, tau], [res])
compare_jax_and_py(fgraph, [get_test_value(i) for i in fgraph.inputs])
def test_jax_logp():
mu = vector("mu")
mu.tag.test_value = np.r_[0.0, 0.0].astype(config.floatX)
tau = vector("tau")
tau.tag.test_value = np.r_[1.0, 1.0].astype(config.floatX)
sigma = vector("sigma")
sigma.tag.test_value = (1.0 / get_test_value(tau)).astype(config.floatX)
value = vector("value")
value.tag.test_value = np.r_[0.1, -10].astype(config.floatX)
logp = (-tau * (value - mu) ** 2 + log(tau / np.pi / 2.0)) / 2.0
conditions = [sigma > 0]
alltrue = aet_all([aet_all(1 * val) for val in conditions])
normal_logp = aet.switch(alltrue, logp, -np.inf)
fgraph = FunctionGraph([mu, tau, sigma, value], [normal_logp])
compare_jax_and_py(fgraph, [get_test_value(i) for i in fgraph.inputs])
def test_jax_multioutput():
x = vector("x")
x.tag.test_value = np.r_[1.0, 2.0].astype(config.floatX)
y = vector("y")
y.tag.test_value = np.r_[3.0, 4.0].astype(config.floatX)
w = cosh(x ** 2 + y / 3.0)
v = cosh(x / 3.0 + y ** 2)
fgraph = FunctionGraph([x, y], [w, v])
compare_jax_and_py(fgraph, [get_test_value(i) for i in fgraph.inputs])
def test_nnet():
x = vector("x")
x.tag.test_value = np.r_[1.0, 2.0].astype(config.floatX)
out = aet_nnet.sigmoid(x)
fgraph = FunctionGraph([x], [out])
compare_jax_and_py(fgraph, [get_test_value(i) for i in fgraph.inputs])
out = aet_nnet.ultra_fast_sigmoid(x)
fgraph = FunctionGraph([x], [out])
compare_jax_and_py(fgraph, [get_test_value(i) for i in fgraph.inputs])
out = aet_nnet.softplus(x)
fgraph = FunctionGraph([x], [out])
compare_jax_and_py(fgraph, [get_test_value(i) for i in fgraph.inputs])
out = aet_nnet.softmax(x)
fgraph = FunctionGraph([x], [out])
compare_jax_and_py(fgraph, [get_test_value(i) for i in fgraph.inputs])
def test_tensor_basics():
y = vector("y")
y.tag.test_value = np.r_[1.0, 2.0].astype(config.floatX)
x = vector("x")
x.tag.test_value = np.r_[3.0, 4.0].astype(config.floatX)
A = matrix("A")
A.tag.test_value = np.empty((2, 2), dtype=config.floatX)
alpha = scalar("alpha")
alpha.tag.test_value = np.array(3.0, dtype=config.floatX)
beta = scalar("beta")
beta.tag.test_value = np.array(5.0, dtype=config.floatX)
# This should be converted into a `Gemv` `Op` when the non-JAX compatible
# optimizations are turned on; however, when using JAX mode, it should
# leave the expression alone.
out = y.dot(alpha * A).dot(x) + beta * y
fgraph = FunctionGraph([y, x, A, alpha, beta], [out])
compare_jax_and_py(fgraph, [get_test_value(i) for i in fgraph.inputs])
out = maximum(y, x)
fgraph = FunctionGraph([y, x], [out])
compare_jax_and_py(fgraph, [get_test_value(i) for i in fgraph.inputs])
out = aet_max(y)
fgraph = FunctionGraph([y], [out])
compare_jax_and_py(fgraph, [get_test_value(i) for i in fgraph.inputs])
@pytest.mark.xfail(reason="jax.numpy.arange requires concrete inputs")
def test_arange_nonconcrete():
a = scalar("a")
a.tag.test_value = 10
out = aet.arange(a)
fgraph = FunctionGraph([a], [out])
compare_jax_and_py(fgraph, [get_test_value(i) for i in fgraph.inputs])
@pytest.mark.xfail(reason="jax.numpy.arange requires concrete inputs")
def test_unique_nonconcrete():
a = matrix("a")
a.tag.test_value = np.arange(6, dtype=config.floatX).reshape((3, 2))
out = aet_extra_ops.Unique()(a)
fgraph = FunctionGraph([a], [out])
compare_jax_and_py(fgraph, [get_test_value(i) for i in fgraph.inputs])
def test_identity():
a = scalar("a")
a.tag.test_value = 10
out = aes.identity(a)
fgraph = FunctionGraph([a], [out])
compare_jax_and_py(fgraph, [get_test_value(i) for i in fgraph.inputs])
def test_second():
a0 = scalar("a0")
b = scalar("b")
out = aes.second(a0, b)
fgraph = FunctionGraph([a0, b], [out])
compare_jax_and_py(fgraph, [10.0, 5.0])
a1 = vector("a1")
out = aet.second(a1, b)
fgraph = FunctionGraph([a1, b], [out])
compare_jax_and_py(fgraph, [np.zeros([5], dtype=config.floatX), 5.0])
def test_jax_BatchedDot():
# tensor3 . tensor3
a = tensor3("a")
a.tag.test_value = (
np.linspace(-1, 1, 10 * 5 * 3).astype(config.floatX).reshape((10, 5, 3))
)
b = tensor3("b")
b.tag.test_value = (
np.linspace(1, -1, 10 * 3 * 2).astype(config.floatX).reshape((10, 3, 2))
)
out = aet_blas.BatchedDot()(a, b)
fgraph = FunctionGraph([a, b], [out])
compare_jax_and_py(fgraph, [get_test_value(i) for i in fgraph.inputs])
# A dimension mismatch should raise a TypeError for compatibility
inputs = [get_test_value(a)[:-1], get_test_value(b)]
opts = Query(include=[None], exclude=["cxx_only", "BlasOpt"])
jax_mode = Mode(JAXLinker(), opts)
aesara_jax_fn = function(fgraph.inputs, fgraph.outputs, mode=jax_mode)
with pytest.raises(TypeError):
aesara_jax_fn(*inputs)
# matrix . matrix
a = matrix("a")
a.tag.test_value = np.linspace(-1, 1, 5 * 3).astype(config.floatX).reshape((5, 3))
b = matrix("b")
b.tag.test_value = np.linspace(1, -1, 5 * 3).astype(config.floatX).reshape((5, 3))
out = aet_blas.BatchedDot()(a, b)
fgraph = FunctionGraph([a, b], [out])
compare_jax_and_py(fgraph, [get_test_value(i) for i in fgraph.inputs])
def | |
<reponame>fengwang/jax
# Copyright 2021 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from dataclasses import dataclass
from functools import partial
import itertools as it
from typing import Union, Optional, Callable, Dict
import numpy as np
import jax.numpy as jnp
from jax import core
from jax import linear_util as lu
from jax.api_util import flatten_fun
from jax.interpreters import partial_eval as pe
from jax.tree_util import tree_flatten, tree_unflatten, register_pytree_node
from jax._src import source_info_util, traceback_util
from jax import lax
from jax._src.util import as_hashable_function, unzip2, split_list
source_info_util.register_exclusion(__file__)
traceback_util.register_exclusion(__file__)
## Utils
def popattr(obj, attrname):
val = getattr(obj, attrname)
delattr(obj, attrname)
return val
def setnewattr(obj, name, val):
sentinel = object()
assert getattr(obj, name, sentinel) is sentinel
setattr(obj, name, val)
## Error value data type and functional assert.
@dataclass(frozen=True)
class Error:
err: Union[bool, core.Tracer]
code: Union[int, core.Tracer]
msgs: Dict[int, str]
def get(self) -> Optional[str]:
assert np.shape(self.err) == np.shape(self.code)
if np.size(self.err) == 1:
if self.err:
return self.msgs[int(self.code)]
else:
return '\n'.join(f'at mapped index {", ".join(map(str, idx))}: ' # type: ignore
f'{self.msgs[int(self.code[idx])]}' # type: ignore
for idx, e in np.ndenumerate(self.err) if e) or None
return None
register_pytree_node(Error,
lambda e: ((e.err, e.code), tuple(sorted(e.msgs.items()))),
lambda msgs, data: Error(*data, dict(msgs))) # type: ignore
init_error = Error(False, 0, {})
next_code = it.count(1).__next__ # globally unique ids, could be uuid4
Bool = Union[bool, core.Tracer]
Int = Union[int, core.Tracer]
def assert_func(error: Error, pred: Bool, msg: str) -> Error:
code = next_code()
out_err = error.err | jnp.logical_not(pred)
out_code = lax.select(error.err, error.code, code)
return Error(out_err, out_code, {code: msg, **error.msgs})
## Checkify transformation for plumbing functional error values.
class ErrorTracer(core.Tracer):
def __init__(self, trace, val):
self._trace = trace
self.val = val
core.get_aval(val), val
aval = property(lambda self: core.get_aval(self.val))
full_lower = lambda self: self
class ErrorTrace(core.Trace):
pure = lift = lambda self, val: ErrorTracer(self, val)
def sublift(self, tracer):
return ErrorTracer(self, tracer.val)
def process_primitive(self, primitive, tracers, params):
in_vals = [t.val for t in tracers]
rule = error_checks.get(primitive)
if rule:
out, self.main.error = rule(self.main.error, *in_vals, **params) # type: ignore
else:
out = primitive.bind(*in_vals, **params)
if primitive.multiple_results:
return [ErrorTracer(self, x) for x in out]
else:
return ErrorTracer(self, out)
def process_call(self, primitive, f, tracers, params):
in_vals = [t.val for t in tracers]
e = popattr(self.main, 'error')
f, msgs = check_errors_subtrace(f, self.main, tuple(e.msgs.items()))
params_ = dict(params, donated_invars=(False, False, *params['donated_invars']))
err, code, *out_vals = primitive.bind(f, e.err, e.code, *in_vals, **params_)
setnewattr(self.main, 'error', Error(err, code, msgs()))
return [ErrorTracer(self, x) for x in out_vals]
def process_map(self, primitive, f, tracers, params):
in_vals = [t.val for t in tracers]
e = popattr(self.main, 'error')
f, msgs = check_errors_subtrace(f, self.main, tuple(e.msgs.items()))
@as_hashable_function(closure=params['out_axes_thunk'])
def new_out_axes_thunk():
return (0, 0, *params['out_axes_thunk']())
params_ = dict(params, in_axes=(None, None, *params['in_axes']),
out_axes_thunk=new_out_axes_thunk,
donated_invars=(False, False, *params['donated_invars']))
errs, codes, *outs = primitive.bind(f, e.err, e.code, *in_vals, **params_)
err, code = _reduce_any_error(errs, codes)
setnewattr(self.main, 'error', Error(err, code, msgs()))
return [ErrorTracer(self, x) for x in outs]
def post_process_call(self, primitive, tracers, params):
vals = [t.val for t in tracers]
main = self.main
e = popattr(self.main, 'error')
err, code, main.msgs = e.err, e.code, e.msgs
def todo(vals):
trace = main.with_cur_sublevel()
err, code, *vals = vals
return [ErrorTracer(trace, x) for x in vals]
return (err, code, *vals), todo
def post_process_map(self, primitive, tracers, params):
vals = [t.val for t in tracers]
main = self.main
e = popattr(self.main, 'error')
err, code, main.msgs = e.err, e.code, e.msgs
def todo(vals):
trace = main.with_cur_sublevel()
err, code, *vals = vals
return [ErrorTracer(trace, x) for x in vals]
def out_axes_transform(out_axes):
return (0, 0, *out_axes)
return (err, code, *vals), (todo, out_axes_transform)
def _reduce_any_error(errs, codes):
errs_, codes_ = lax.sort_key_val(errs, codes, dimension=0)
return errs_[-1], codes_[-1]
ErrorCheckRule = Callable
error_checks: Dict[core.Primitive, ErrorCheckRule] = {}
def check_errors_flat(fun: lu.WrappedFun, *args):
fun, msgs = check_errors_subtrace(fun)
fun = check_errors_toplevel(fun)
err, code, *out_vals = fun.call_wrapped(*args)
return (err, code, out_vals), msgs()
@lu.transformation
def check_errors_toplevel(*args):
error = init_error
with core.new_main(ErrorTrace) as main:
msgs = tuple(error.msgs.items())
outs = yield (main, msgs, error.err, error.code, *args), {}
del main
yield outs
@lu.transformation_with_aux
def check_errors_subtrace(main, msgs, err, code, *args):
setnewattr(main, 'error', Error(err, code, dict(msgs)))
trace = main.with_cur_sublevel()
in_tracers = [ErrorTracer(trace, x) for x in args]
out = yield in_tracers, {}
out_tracers = map(trace.full_raise, out)
out_vals = [t.val for t in out_tracers]
err, code, msgs = main.error.err, main.error.code, main.error.msgs
del main.error
yield (err, code, *out_vals), msgs
def checkify_fun_to_jaxpr(f, error, in_avals):
f, msgs = check_errors_subtrace(f)
f = check_errors_traceable(f, tuple(error.msgs.items()))
err_aval = core.raise_to_shaped(core.get_aval(error.err))
code_aval = core.raise_to_shaped(core.get_aval(error.code))
avals_in = [err_aval, code_aval, *in_avals]
jaxpr_out, _, literals_out = pe.trace_to_jaxpr_dynamic(f, avals_in)
return core.ClosedJaxpr(jaxpr_out, literals_out), msgs()
# TODO take (error_aval, code_aval) instead of error here?
def checkify_jaxpr(jaxpr, error):
f = lu.wrap_init(core.jaxpr_as_fun(jaxpr))
return checkify_fun_to_jaxpr(f, error, jaxpr.in_avals)
# TODO dedup with check_errors_toplevel
@lu.transformation
def check_errors_traceable(msgs, err, code, *args):
with core.new_main(ErrorTrace) as main:
outs = yield (main, msgs, err, code, *args), {}
del main
yield outs
## assert primitive
def assert_(pred: Bool, msg: str) -> None:
if not is_scalar_pred(pred):
raise TypeError(f"assert_ takes a scalar pred as argument, got {pred}")
code = next_code()
return assert2_(pred, code, {code: msg})
def is_scalar_pred(pred) -> bool:
return (isinstance(pred, bool) or
isinstance(pred, jnp.ndarray) and pred.shape == () and
pred.dtype == jnp.dtype('bool'))
def assert2_(pred: Bool, code: Int, msgs: Dict[int, str]) -> None:
return assert_p.bind(pred, code, msgs=msgs)
assert_p = core.Primitive('assert')
assert_p.multiple_results = True # zero results
@assert_p.def_impl
def assert_impl(pred, code, *, msgs):
assert pred, msgs[int(code)]
return []
@assert_p.def_abstract_eval
def assert_abstract_eval(pred, code, *, msgs):
raise Exception("can't be staged!")
## checkify rules
def summary() -> str:
return str(source_info_util.summarize(source_info_util.current()))
def nan_error_check(prim, error, *in_vals, **params):
out = prim.bind(*in_vals, **params)
no_nans = jnp.logical_not(jnp.any(jnp.isnan(out)))
msg = f"nan generated by primitive {prim.name} at {summary()}"
return out, assert_func(error, no_nans, msg)
def gather_error_check(error, operand, start_indices, *,
dimension_numbers, slice_sizes, unique_indices,
indices_are_sorted, mode, fill_value):
out = lax.gather_p.bind(
operand, start_indices, dimension_numbers=dimension_numbers,
slice_sizes=slice_sizes, unique_indices=unique_indices,
indices_are_sorted=indices_are_sorted, mode=mode, fill_value=fill_value)
# compare to OOB masking logic in lax._gather_translation_rule
dnums = dimension_numbers
operand_dims = np.array(operand.shape)
upper_bound = operand_dims[np.array(dnums.start_index_map)]
upper_bound -= np.array(slice_sizes)[np.array(dnums.start_index_map)]
all_inbounds = jnp.all((start_indices >= 0) & (start_indices <= upper_bound))
msg = f"out-of-bounds indexing at {summary()}"
return out, assert_func(error, all_inbounds, msg)
error_checks[lax.gather_p] = gather_error_check
def cond_error_check(error, index, *ops, branches, linear):
new_branches, msgs_ = unzip2(checkify_jaxpr(jxpr, error) for jxpr in branches)
new_linear = (False, False, *linear)
err, code, *outs = lax.cond_p.bind(
index, error.err, error.code, *ops,
branches=tuple(new_branches), linear=new_linear)
new_msgs = {k:v for d in it.chain([error.msgs], msgs_) for k, v in d.items()}
return outs, Error(err, code, new_msgs)
error_checks[lax.cond_p] = cond_error_check
def scan_error_check(error, *in_flat, reverse, length, jaxpr, num_consts, num_carry, linear, unroll):
consts, carry, xs = split_list(in_flat, [num_consts, num_carry])
checked_jaxpr, msgs_ = checkify_jaxpr(jaxpr, error)
new_linear = (False, False, *linear)
new_in_flat = [*consts, error.err, error.code, *carry, *xs]
err, code, *outs = lax.scan_p.bind(
*consts, *new_in_flat,
reverse=reverse, length=length, jaxpr=checked_jaxpr,
num_consts=len(consts), num_carry=len(carry)+2,
linear=new_linear, unroll=unroll)
new_msgs = {**error.msgs, **msgs_}
return outs, Error(err, code, new_msgs)
error_checks[lax.scan_p] = scan_error_check
def checkify_while_body_jaxpr(cond_jaxpr, body_jaxpr, error):
cond_f = core.jaxpr_as_fun(cond_jaxpr)
body_f = core.jaxpr_as_fun(body_jaxpr)
def new_body_f(*vals):
out = body_f(*vals)
_ = cond_f(*out) # this checks if the next cond application will error
return out
return checkify_fun_to_jaxpr(lu.wrap_init(new_body_f), error, body_jaxpr.in_avals)
def ignore_errors_jaxpr(jaxpr, error):
"""Constructs a jaxpr which takes two extra args but ignores them."""
err_aval = core.raise_to_shaped(core.get_aval(error.err))
code_aval = core.raise_to_shaped(core.get_aval(error.code))
consts = jaxpr.consts
jaxpr = jaxpr.jaxpr
new_vars = core.gensym([jaxpr])
new_invars = (new_vars(err_aval), new_vars(code_aval), *jaxpr.invars)
new_jaxpr = core.Jaxpr(jaxpr.constvars, new_invars,
jaxpr.outvars, jaxpr.eqns)
return core.ClosedJaxpr(new_jaxpr, consts)
def while_loop_error_check(error, *in_flat, cond_nconsts, cond_jaxpr, body_nconsts, body_jaxpr):
checked_cond_jaxpr, msgs_cond = checkify_jaxpr(cond_jaxpr, error)
checked_cond_fun = core.jaxpr_as_fun(checked_cond_jaxpr)
# Check if the first cond application will error.
cond_err, cond_code, _ = checked_cond_fun(error.err, error.code, *in_flat)
checked_body_jaxpr, msgs_body = checkify_while_body_jaxpr(cond_jaxpr, body_jaxpr, error)
compat_cond_jaxpr = ignore_errors_jaxpr(cond_jaxpr, error)
c_consts, b_consts, carry = split_list(in_flat, [cond_nconsts, body_nconsts])
new_in_flat = [*c_consts, *b_consts, cond_err, cond_code, *carry]
err, code, *out = lax.while_p.bind(
*new_in_flat,
cond_nconsts=cond_nconsts,
cond_jaxpr=compat_cond_jaxpr,
body_nconsts=body_nconsts,
body_jaxpr=checked_body_jaxpr)
new_msgs = {**error.msgs, **msgs_body, **msgs_cond}
return out, Error(err, code, new_msgs)
error_checks[lax.while_p] = while_loop_error_check
# TODO(mattjj,lenamartens): currently we bundle effectful-assert-discharging
# with the error-check-adding transformation (checkify), but they could be
# separated into two orthogonal transformations.
def assert_discharge_rule(error, pred, code, *, msgs):
out_err = error.err | jnp.logical_not(pred)
out_code = lax.select(error.err, error.code, code)
return [], Error(out_err, out_code, {**error.msgs, **msgs})
error_checks[assert_p] = assert_discharge_rule
## checkify api
def checkify(fun: Callable) -> Callable:
@traceback_util.api_boundary
def checked_fun(*args, **kwargs):
args_flat, in_tree = tree_flatten((args, kwargs))
f, out_tree = flatten_fun(lu.wrap_init(fun), in_tree)
(err, code, out_flat), msgs | |
"""
.. codeauthor:: <NAME> <<EMAIL>>
"""
import datetime
import ipaddress
import sys
from decimal import Decimal
import pytest
from termcolor import colored
from typepy import (
Bool,
DateTime,
Dictionary,
Integer,
Nan,
RealNumber,
StrictLevel,
String,
Typecode,
)
from dataproperty import (
MIN_STRICT_LEVEL_MAP,
Align,
DataProperty,
DefaultValue,
Format,
LineBreakHandling,
Preprocessor,
)
from .common import get_strict_level_map
dateutil = pytest.importorskip("dateutil", minversion="2.7")
DATATIME_DATA = datetime.datetime(2017, 1, 2, 3, 4, 5)
nan = float("nan")
inf = float("inf")
class Test_DataPeroperty_eq:
@pytest.mark.parametrize(
["lhs", "rhs", "expected"],
[
[1, 1, True],
[1, 2, False],
[1, 0.1, False],
[1, True, False],
[1.1, 1.1, True],
[1, nan, False],
[nan, nan, True],
[None, None, True],
],
)
def test_normal(self, lhs, rhs, expected):
lhs = DataProperty(lhs)
rhs = DataProperty(rhs)
assert (lhs == rhs) == expected
assert (lhs != rhs) == (not expected)
class Test_DataPeroperty_data_typecode:
@pytest.mark.parametrize(
["value", "expected_data", "expected_typecode"],
[
["-0.00284241876820074", Decimal("-0.00284241876820074"), Typecode.REAL_NUMBER],
],
)
def test_normal(self, value, expected_data, expected_typecode):
dp = DataProperty(value)
assert dp == dp
assert dp.data == expected_data
assert dp.typecode == expected_typecode
@pytest.mark.parametrize(
["value", "is_convert", "expected_data", "expected_typecode"],
[
[1.0, True, 1, Typecode.INTEGER],
[sys.maxsize, True, sys.maxsize, Typecode.INTEGER],
[-sys.maxsize, False, -sys.maxsize, Typecode.INTEGER],
[str(-sys.maxsize), True, -sys.maxsize, Typecode.INTEGER],
[str(sys.maxsize), False, str(sys.maxsize), Typecode.STRING],
[1.1, True, 1, Typecode.INTEGER],
[-1.1, False, Decimal("-1.1"), Typecode.REAL_NUMBER],
[Decimal("1.1"), False, Decimal("1.1"), Typecode.REAL_NUMBER],
["1.1", True, 1, Typecode.INTEGER],
["-1.1", False, "-1.1", Typecode.STRING],
["a", True, "a", Typecode.STRING],
["a", False, "a", Typecode.STRING],
["", True, "", Typecode.NULL_STRING],
["", False, "", Typecode.NULL_STRING],
[" ", True, " ", Typecode.NULL_STRING],
[" ", False, " ", Typecode.NULL_STRING],
["3.3.5", True, "3.3.5", Typecode.STRING],
["51.0.2704.106", True, "51.0.2704.106", Typecode.STRING],
[True, True, 1, Typecode.INTEGER],
[False, False, False, Typecode.BOOL],
["100-0002", False, "100-0002", Typecode.STRING],
["127.0.0.1", True, ipaddress.IPv4Address("127.0.0.1"), Typecode.IP_ADDRESS],
["127.0.0.1", False, "127.0.0.1", Typecode.STRING],
["::1", True, ipaddress.IPv6Address("::1"), Typecode.IP_ADDRESS],
["::1", False, "::1", Typecode.STRING],
[[], True, [], Typecode.LIST],
[[], False, [], Typecode.LIST],
[{}, True, {}, Typecode.DICTIONARY],
[{}, False, {}, Typecode.DICTIONARY],
[
"2017-01-02 03:04:05",
True,
datetime.datetime(2017, 1, 2, 3, 4, 5),
Typecode.DATETIME,
],
[DATATIME_DATA, True, DATATIME_DATA, Typecode.DATETIME],
["2017-01-02 03:04:05", False, "2017-01-02 03:04:05", Typecode.STRING],
[None, True, None, Typecode.NONE],
[None, False, None, Typecode.NONE],
["None", True, "None", Typecode.STRING],
["None", False, "None", Typecode.STRING],
[inf, True, inf, Typecode.INFINITY],
[inf, False, Decimal(inf), Typecode.INFINITY],
["inf", True, Decimal(inf), Typecode.INFINITY],
["inf", False, "inf", Typecode.STRING],
["nan", False, "nan", Typecode.STRING],
[
"Høgskolen i Østfold er et eksempel...",
True,
"Høgskolen i Østfold er et eksempel...",
Typecode.STRING,
],
[
"Høgskolen i Østfold er et eksempel...",
False,
"Høgskolen i Østfold er et eksempel...",
Typecode.STRING,
],
["新しいテキスト ドキュメント.txt".encode(), True, "新しいテキスト ドキュメント.txt", Typecode.STRING],
],
)
def test_normal_strict_map(self, value, is_convert, expected_data, expected_typecode):
dp = DataProperty(value, strict_level_map=get_strict_level_map(not is_convert))
assert dp == dp
assert dp != DataProperty("test for __ne__")
assert dp.data == expected_data
assert dp.typecode == expected_typecode
@pytest.mark.parametrize(
["value", "strip_str", "is_strict", "expected_data", "expected_typecode"],
[
['"1"', '"', False, 1, Typecode.INTEGER],
['"1"', "", False, '"1"', Typecode.STRING],
['"1"', '"', True, "1", Typecode.STRING],
['"1"', "", False, '"1"', Typecode.STRING],
[b"hoge", "a", False, "hoge", Typecode.STRING],
["hogea", b"a", False, "hoge", Typecode.STRING],
],
)
def test_normal_strip_str(self, value, strip_str, is_strict, expected_data, expected_typecode):
dp = DataProperty(
value,
preprocessor=Preprocessor(strip_str=strip_str),
strict_level_map=get_strict_level_map(is_strict),
)
assert dp.data == expected_data
assert dp.typecode == expected_typecode
@pytest.mark.parametrize(
["value", "type_hint", "strict_level", "expected_typecode"],
[
["2017-01-02 03:04:05", None, StrictLevel.MIN, Typecode.DATETIME],
["2017-01-02 03:04:05", None, StrictLevel.MAX, Typecode.STRING],
["2017-01-02 03:04:05", DateTime, StrictLevel.MIN, Typecode.DATETIME],
["2017-01-02 03:04:05", DateTime, StrictLevel.MAX, Typecode.DATETIME],
["2017-01-02 03:04:05", Integer, StrictLevel.MIN, Typecode.DATETIME],
["2017-01-02 03:04:05", Integer, StrictLevel.MAX, Typecode.STRING],
["1,000,000,000,000", None, StrictLevel.MAX, Typecode.STRING],
["1,000,000,000,000", None, StrictLevel.MIN + 1, Typecode.INTEGER],
["1,000,000,000,000", None, StrictLevel.MIN, Typecode.INTEGER],
["1,000,000,000,000", Integer, StrictLevel.MAX, Typecode.INTEGER],
["1,000,000,000,000", Integer, StrictLevel.MIN, Typecode.INTEGER],
[DATATIME_DATA, None, StrictLevel.MIN, Typecode.DATETIME],
[DATATIME_DATA, None, StrictLevel.MAX, Typecode.DATETIME],
[DATATIME_DATA, String, StrictLevel.MIN, Typecode.STRING],
[DATATIME_DATA, String, StrictLevel.MAX, Typecode.STRING],
["100-0002", None, StrictLevel.MIN, Typecode.STRING],
["45e76582", None, StrictLevel.MIN, Typecode.INTEGER],
["45e76582", None, StrictLevel.MAX, Typecode.STRING],
["4.5e-4", None, StrictLevel.MIN, Typecode.INTEGER],
["4.5e-4", None, StrictLevel.MIN + 1, Typecode.REAL_NUMBER],
["4.5e-4", None, StrictLevel.MAX, Typecode.STRING],
[1, String, StrictLevel.MAX, Typecode.STRING],
[1, String, StrictLevel.MIN, Typecode.STRING],
[float("inf"), RealNumber, StrictLevel.MAX, Typecode.INFINITY],
[float("inf"), RealNumber, StrictLevel.MIN, Typecode.INFINITY],
[1, RealNumber, StrictLevel.MAX, Typecode.INTEGER],
[1, RealNumber, StrictLevel.MIN, Typecode.INTEGER],
[1.1, Integer, StrictLevel.MAX, Typecode.INTEGER],
[1.1, Integer, StrictLevel.MIN, Typecode.INTEGER],
["true", None, StrictLevel.MAX, Typecode.STRING],
["false", None, StrictLevel.MAX, Typecode.STRING],
["true", None, StrictLevel.MIN, Typecode.BOOL],
["false", None, StrictLevel.MIN, Typecode.BOOL],
["true", Bool, StrictLevel.MIN, Typecode.BOOL],
["false", Bool, StrictLevel.MIN, Typecode.BOOL],
[b"hoge", None, StrictLevel.MAX, Typecode.STRING],
[b"hoge", None, StrictLevel.MIN, Typecode.STRING],
['{"foo": 10}', None, StrictLevel.MAX, Typecode.STRING],
['{"foo": 10}', None, StrictLevel.MIN, Typecode.DICTIONARY],
['{"foo": 10}', Dictionary, StrictLevel.MAX, Typecode.DICTIONARY],
['{"foo": 10}', Dictionary, StrictLevel.MIN, Typecode.DICTIONARY],
[{"foo": 10}, None, StrictLevel.MAX, Typecode.DICTIONARY],
[{"foo": 10}, None, StrictLevel.MIN, Typecode.LIST],
],
)
def test_normal_type_hint(self, value, type_hint, strict_level, expected_typecode):
dp = DataProperty(value, type_hint=type_hint, strict_level_map={"default": strict_level})
assert dp.typecode == expected_typecode
@pytest.mark.parametrize(
["value", "is_convert", "expected_data", "expected_typecode"],
[
[nan, True, nan, Typecode.NAN],
[nan, False, nan, Typecode.NAN],
["nan", True, nan, Typecode.NAN],
],
)
def test_normal_nan(self, value, is_convert, expected_data, expected_typecode):
dp = DataProperty(value, strict_level_map=get_strict_level_map(not is_convert))
assert Nan(dp.data).is_type()
assert dp.typecode == expected_typecode
class Test_DataPeroperty_to_str:
@pytest.mark.parametrize(
["value", "type_hint", "is_strict", "expected_data", "expected_str"],
[
[float("inf"), None, True, Decimal("inf"), "Infinity"],
[float("inf"), None, False, Decimal("inf"), "Infinity"],
[float("inf"), RealNumber, True, Decimal("inf"), "Infinity"],
[float("inf"), RealNumber, False, Decimal("inf"), "Infinity"],
[float("inf"), String, False, "inf", "inf"],
],
)
def test_normal(self, value, type_hint, is_strict, expected_data, expected_str):
dp = DataProperty(
value, type_hint=type_hint, strict_level_map=get_strict_level_map(is_strict)
)
assert dp.data == expected_data
assert dp.to_str() == expected_str
@pytest.mark.parametrize(
["value", "format_flags", "expected"], [[1234567, Format.THOUSAND_SEPARATOR, "1,234,567"]]
)
def test_normal_format_str(self, value, format_flags, expected):
dp = DataProperty(value, format_flags=format_flags)
assert dp.to_str() == expected
class Test_DataPeroperty_set_data:
@pytest.mark.parametrize(
["value", "is_convert", "replace_tabs_with_spaces", "tab_length", "expected"],
[
["a\tb", True, True, 2, "a b"],
["\ta\t\tb\tc\t", True, True, 2, " a b c "],
["a\tb", True, True, 4, "a b"],
["a\tb", True, False, 4, "a\tb"],
["a\tb", True, True, None, "a\tb"],
],
)
def test_normal_tab(self, value, is_convert, replace_tabs_with_spaces, tab_length, expected):
preprocessor = Preprocessor(
replace_tabs_with_spaces=replace_tabs_with_spaces,
tab_length=tab_length,
)
dp = DataProperty(
value, preprocessor=preprocessor, strict_level_map=get_strict_level_map(not is_convert)
)
assert dp.data == expected
class Test_DataPeroperty_is_escape_html_tag:
@pytest.mark.parametrize(
["value", "is_escape_html_tag", "expected"],
[
[
"<a href='https://google.com'>test</a>",
True,
"<a href='https://google.com'>test</a>",
],
[
"<a href='https://google.com'>test</a>",
False,
"<a href='https://google.com'>test</a>",
],
[True, True, True],
],
)
def test_normal_tab(self, value, is_escape_html_tag, expected):
dp = DataProperty(value, preprocessor=Preprocessor(is_escape_html_tag=is_escape_html_tag))
assert dp.data == expected
class Test_DataPeroperty_float_type:
@pytest.mark.parametrize(
["value", "float_type", "expected"], [[1.1, float, 1.1], [1.1, Decimal, Decimal("1.1")]]
)
def test_normal_tab(self, value, float_type, expected):
dp = DataProperty(value, float_type=float_type)
assert isinstance(dp.data, float_type)
assert dp.data == expected
class Test_DataPeroperty_align:
@pytest.mark.parametrize(
["value", "expected"],
[
[1, Align.RIGHT],
[1.1, Align.RIGHT],
["a", Align.LEFT],
[True, Align.LEFT],
[DATATIME_DATA, Align.LEFT],
[None, Align.LEFT],
[inf, Align.LEFT],
[nan, Align.LEFT],
],
)
def test_normal(self, value, expected):
dp = DataProperty(value)
assert dp.align == expected
class Test_DataPeroperty_len:
@pytest.mark.parametrize(
["value", "expected_acw", "expected_len"],
[
[1, 1, None],
[-1, 2, None],
[1.0, 1, None],
[-1.0, 2, None],
[1.1, 3, None],
[-1.1, 4, None],
[12.34, 5, None],
["000", 1, None],
["123456789", 9, None],
["-123456789", 10, None],
["45e76582", 8, 8],
["a", 1, 1],
["a" * 1000, 1000, 1000],
["あ", 2, 1],
[True, 4, None],
[None, 4, None],
[inf, 8, None],
[nan, 3, None],
],
)
def test_normal(self, value, expected_acw, expected_len):
dp = DataProperty(value)
assert dp.ascii_char_width == expected_acw
assert dp.length == expected_len
@pytest.mark.parametrize(
["value", "expected_acw", "expected_len"],
[
[colored(0, "red"), 1, 10],
[colored(12.34, "red"), 5, 14],
[colored("abc", "green"), 3, 12],
],
)
def test_normal_ascii_escape_sequence(self, value, expected_acw, expected_len):
dp = DataProperty(value)
assert dp.ascii_char_width == expected_acw
assert dp.length == expected_len
@pytest.mark.parametrize(
["value", "eaaw", "expected_acw", "expected_len"], [["øø", 1, 2, 2], ["øø", 2, 4, 2]]
)
def test_normal_eaaw(self, value, eaaw, expected_acw, expected_len):
dp = DataProperty(value, east_asian_ambiguous_width=eaaw)
assert dp.ascii_char_width == expected_acw
assert dp.length == expected_len
@pytest.mark.parametrize(["value", "expected"], [[nan, nan]])
def test_abnormal(self, value, expected):
Nan(DataProperty(value).length).is_type()
@pytest.mark.parametrize(
["value", "eaaw", "expected"],
[["øø", None, ValueError], ["øø", 0, ValueError], ["øø", 3, ValueError]],
)
def test_exception_eaaw(self, value, eaaw, expected):
with pytest.raises(expected):
DataProperty(value, east_asian_ambiguous_width=eaaw).ascii_char_width
class Test_DataPeroperty_is_include_ansi_escape:
@pytest.mark.parametrize(
["value", "expected_acw"],
[
[0, False],
[colored(0, "red"), True],
[12.34, False],
[colored(12.34, "red"), True],
["abc", False],
[colored("abc", "green"), True],
],
)
def test_normal(self, value, expected_acw):
assert DataProperty(value).is_include_ansi_escape == expected_acw
class Test_DataPeroperty_line_break_handling:
@pytest.mark.parametrize(
["value", "line_break_handling", "expected"],
[
["a\nb", LineBreakHandling.NOP, "a\nb"],
["a\nb", LineBreakHandling.REPLACE, "a b"],
["a\nb", LineBreakHandling.ESCAPE, "a\\nb"],
["a\r\nb", LineBreakHandling.ESCAPE, "a\\r\\nb"],
[123, LineBreakHandling.ESCAPE, 123],
],
)
def test_normal(self, value, line_break_handling, expected):
preprocessor = Preprocessor(line_break_handling=line_break_handling)
assert DataProperty(value, preprocessor=preprocessor).data == expected
class Test_DataPeroperty_line_break_repl:
@pytest.mark.parametrize(
["value", "line_break_handling", "line_break_repl", "expected"],
[
["a\nb", LineBreakHandling.REPLACE, "<br>", "a<br>b"],
["a\n\r\n\nb", LineBreakHandling.REPLACE, "<br>", "a<br><br><br>b"],
["a\nb", LineBreakHandling.NOP, "<br>", "a\nb"],
],
)
def test_normal(self, value, line_break_handling, line_break_repl, expected):
preprocessor = Preprocessor(
line_break_handling=line_break_handling, line_break_repl=line_break_repl
)
assert DataProperty(value, preprocessor=preprocessor).data == expected
class Test_DataPeroperty_escape_formula_injection:
@pytest.mark.parametrize(
["value", "escape_formula_injection", "expected"],
[
["a+b", True, "a+b"],
["=a+b", True, "'=a+b"],
["=a+b", False, "=a+b"],
["-a+b", True, "'-a+b"],
["-a+b", False, "-a+b"],
["+a+b", True, "'+a+b"],
["+a+b", False, "+a+b"],
["@a+b", True, "'@a+b"],
["@a+b", False, "@a+b"],
],
)
def test_normal(self, value, escape_formula_injection, expected):
preprocessor = Preprocessor(is_escape_formula_injection=escape_formula_injection)
assert DataProperty(value, preprocessor=preprocessor).data == expected
@pytest.mark.parametrize(
| |
import logging
import cv2
import numpy as np
import pytesseract
import os
import time
import json
import re
from multiprocessing import Pool
from Levenshtein import distance
from .input_handler import InputHandler
from .grabscreen import grab_screen
from .utils import get_config, filter_mod
# This is a position of the inventory as fraction of the resolution
OWN_INVENTORY_ORIGIN = (0.6769531, 0.567361)
# These are the sockets positions as measured on 2560x1440 resolution
# with X_SCALE and Y_SCALE applied, i.e., scale * SOCKETS[i] is the i:th
# sockets absolute pixel position with origin in the middle of the skill tree
# I think the SCALE variables are in fact useless and a relics from the
# positions initially being measured at a view which wasn't zoomed out maximally
SOCKETS = {
1: (-650.565, -376.013),
2: (648.905, -396.45),
3: (6.3354, 765.658),
4: (-1700.9, 2424.17),
5: (-2800.66, -215.34),
6: (-1435.02, -2635.39),
7: (1855.53, -2360.1),
8: (2835.84, 230.5361),
9: (1225.37, 2625.76),
10: (-120.12471, 5195.44),
11: (-3580.19, 5905.92),
12: (-5395.86, 2120.42),
13: (-6030.95, -115.7007),
14: (-5400.59, -1985.18),
15: (-3035.14, -5400.87),
16: (160.10728, -5196.32),
17: (3382.05, -5195.21),
18: (5730.2, -1625.75),
19: (6465.24, 190.3341),
20: (5542.76, 1690.07),
21: (3322.76, 6090.5),
}
# The offsets are specified in the same fashion as SOCKETS and are rough
# guesses which allow us to move to the general area and later refine the
# position of the socket through template matching
SOCKET_MOVE_OFFSET = {
1: (0, 150),
2: (0, 150),
3: (0, 200),
4: (0, 150),
5: (-300, 200),
6: (-100, 150),
7: (-150, 0),
8: (0, -150),
9: (-100, -125),
10: (170, 0),
11: (-400, -900),
12: (0, 300),
13: (400, 200),
14: (-250, -150),
15: (-100, -150),
16: (150, -150),
17: (150, 500), #
18: (-300, 400),
19: (-1000, -150),
20: (-500, 500),
21: (100, -1000),
}
# Scalers for the SOCKETS positions to convert them to 2560x1440 pixel positions
X_SCALE = 0.2
Y_SCALE = 0.2
CIRCLE_EFFECTIVE_RADIUS = 300
IMAGE_FOLDER = "data/images/"
# We're using a lot of template matching and all templates are defined here
# with matching thresholds (scores) and sizes per resolution
TEMPLATES = {
"AmbidexterityCluster.png": {
"1440p_size": (34, 34),
"1440p_threshold": 0.95,
"1080p_size": (26, 26),
"1080p_threshold": 0.95,
},
"FreeSpace.png": {
"1440p_size": (41, 41),
"1440p_threshold": 0.98,
"1080p_size": (30, 30),
"1080p_threshold": 0.98,
},
"Notable.png": {
"1440p_size": (30, 30),
"1440p_threshold": 0.89,
"1080p_size": (23, 23),
"1080p_threshold": 0.85,
},
"NotableAllocated.png": {
"1440p_size": (30, 30),
"1440p_threshold": 0.93,
"1080p_size": (23, 23),
"1080p_threshold": 0.90,
},
"Jewel.png": {
"1440p_size": (30, 30),
"1440p_threshold": 0.92,
"1080p_size": (23, 23),
"1080p_threshold": 0.92,
},
"JewelSocketed.png": {
"1440p_size": (30, 30),
"1440p_threshold": 0.9,
"1080p_size": (23, 23),
"1080p_threshold": 0.9,
},
"LargeJewel.png": {
"1440p_size": (39, 39),
"1440p_threshold": 0.9,
"1080p_size": (30, 30),
"1080p_threshold": 0.88,
},
"LargeJewelSocketed.png": {
"1440p_size": (39, 39),
"1440p_threshold": 0.9,
"1080p_size": (30, 30),
"1080p_threshold": 0.88,
},
"Skill.png": {
"1440p_size": (21, 21),
"1440p_threshold": 0.87,
"1080p_size": (15, 15),
"1080p_threshold": 0.91,
},
"SkillAllocated.png": {
"1440p_size": (21, 21),
"1440p_threshold": 0.93,
"1080p_size": (15, 15),
"1080p_threshold": 0.91,
},
}
# Defines the position of the text box which is cropped out and OCR'd per node
TXT_BOX = {"x": 32, "y": 0, "w": 900, "h": 320}
mod_files = {
"passives": "data/passives.json",
"passivesAlt": "data/passivesAlternatives.json",
"passivesAdd": "data/passivesAdditions.json",
"passivesVaalAdd": "data/passivesVaalAdditions.json",
}
class TreeNavigator:
def __init__(self, resolution, halt_value):
self.resolution = resolution
self.input_handler = InputHandler(self.resolution)
logging.basicConfig(
level=logging.INFO,
format="%(asctime)s %(message)s",
datefmt="[%H:%M:%S %d-%m-%Y]",
)
self.log = logging.getLogger("tree_nav")
self.config = get_config("tree_nav")
self.find_mod_value_re = re.compile("(\(?(?:[0-9]*\.?[0-9]-?)+\)?)")
self.nonalpha_re = re.compile("[^a-zA-Z]")
self.origin_pos = (self.resolution[0] / 2, self.resolution[1] / 2)
self.ingame_pos = [0, 0]
self.px_multiplier = self.resolution[0] / 2560
self.resolution_prefix = str(self.resolution[1]) + "p_"
self.templates_and_masks = self.load_templates()
self.passive_mods, self.passive_names = self.generate_good_strings(mod_files)
self.passive_nodes = list(self.passive_mods.keys()) + list(
self.passive_names.keys()
)
self.halt = halt_value
self.first_run = True
def _run(self):
return not bool(self.halt.value)
def eval_jewel(self, item_location):
self.ingame_pos = [0, 0]
item_name, item_desc = self._setup(item_location, copy=True)
pool = Pool(self.config["ocr_threads"])
jobs = {}
if self.first_run:
# We just initiated the module and not sure where we are
# Thus, we better rectify our position estimate before starting
self._refind_position(SOCKETS[1])
self.first_run = False
for socket_id in sorted(SOCKETS.keys()):
if not self._run():
return None, None, None
found_socket = self._move_screen_to_socket(socket_id)
if not found_socket and socket_id == 1:
self.log.info("We are lost - trying to find known location")
# We just initiated the search and have no clue where we are
# Thus, we better rectify our position estimate before starting
self._refind_position(SOCKETS[1])
socket_nodes = self._analyze_nodes(socket_id)
# Convert stats for the socket from image to lines in separate process
self.log.info("Performing asynchronous OCR")
jobs[socket_id] = pool.map_async(OCR.node_to_strings, socket_nodes)
self.log.info("Analyzed socket %s" % socket_id)
# Return to socket 1 to ease next search
self._move_to_tree_pos_using_spaces(SOCKETS[1])
self._setup(item_location)
self.log.info("Waiting for last OCR to finish")
item_stats = [
{
"socket_id": socket_id,
"socket_nodes": self._filter_ocr_lines(
jobs[socket_id].get(timeout=300)
),
}
for socket_id in jobs
]
pool.close()
pool.join()
return item_name, item_desc, item_stats
def load_templates(self, threshold=128):
templates_and_masks = {}
for template_name in TEMPLATES.keys():
template_path = os.path.join(IMAGE_FOLDER, template_name)
img = cv2.imread(template_path, cv2.IMREAD_UNCHANGED)
size = TEMPLATES[template_name][self.resolution_prefix + "size"]
channels = cv2.split(img)
mask = None
if len(channels) > 3:
mask = np.array(channels[3])
mask[mask <= threshold] = 0
mask[mask > threshold] = 255
mask = cv2.resize(mask, size)
img = cv2.imread(template_path, 0)
img = cv2.resize(img, size)
templates_and_masks[template_name] = {"image": img, "mask": mask}
return templates_and_masks
def _move_screen_to_socket(self, socket_id):
self.log.debug("Moving close to socket %s" % socket_id)
move_offset_tx, move_offset_ty = SOCKET_MOVE_OFFSET[socket_id]
move_offset = self._tree_pos_to_xy(
[move_offset_tx, move_offset_ty], offset=True
)
socket_tx, socket_ty = SOCKETS[socket_id]
socket_xy = self._tree_pos_to_xy([socket_tx, socket_ty])
compensation_offset = self._find_socket(socket_xy)
if compensation_offset is None:
found_socket = False
compensation_offset = [0, 0]
else:
found_socket = True
self.log.debug("Compensated navigation with %s" % compensation_offset)
move_to = [
socket_xy[0] + compensation_offset[0] + move_offset[0],
socket_xy[1] + compensation_offset[1] + move_offset[1],
]
x_offset = move_to[0] - self.resolution[0] / 2
y_offset = move_to[1] - self.resolution[1] / 2
self.input_handler.click(
*move_to, *move_to, button=None, raw=True, speed_factor=1
)
self.input_handler.drag(self.origin_pos[0], self.origin_pos[1], speed_factor=1)
self.input_handler.rnd_sleep(min=200, mean=300, sigma=100)
self.ingame_pos = [socket_tx + move_offset_tx, socket_ty + move_offset_ty]
return found_socket
def _refind_position(self, desired_tree_pos):
# If the current location has been determined to be incorrect
# we can go to the bottom right corner and find a cluster close
# to socket 21, namely the Ambidexterity cluster
# This is a known location, which can then be used to calculate
# our way to a desired position
self.log.debug("Centering screen position")
# Correct our tree position to a known value
self._locate_screen_using_ambidexterity()
# Find our way to the desired position
self._move_to_tree_pos_using_spaces(desired_tree_pos)
def _move_to_tree_pos_using_spaces(self, desired_tree_pos, max_position_error=5):
dx = desired_tree_pos[0] - self.ingame_pos[0]
dy = desired_tree_pos[1] - self.ingame_pos[1]
self.log.debug("Moving to tree pos using spaces. Deltas: ({}, {})".format(dx, dy))
while (abs(dx) + abs(dy)) > max_position_error:
# Choose quadrant to find spaces in based on dx, dy
right, bottom = dx >= 0, dy >= 0
if right and not bottom:
quadrant = 0
elif not right and not bottom:
quadrant = 1
elif not right and bottom:
quadrant = 2
elif right and bottom:
quadrant = 3
# Find empty spaces that we can drag from
spaces = self._find_empty_space(quadrant)
if spaces is None:
raise ValueError("Could not find an empty space, quitting.")
# Choose a random empty space for maximum drag
chosen_space = spaces[np.random.randint(spaces.shape[0])]
# How far to drag the window to end up in the optimal place
screen_move_x, screen_move_y = self._tree_pos_to_xy([dx, dy],
offset=True)
# Calculate where our drag should end up to perform the move
drag_x = chosen_space[0] - screen_move_x
drag_y = chosen_space[1] - screen_move_y
# We should only drag within the screen's resolution
# Additionally, we use 100px margin to not trigger tree scroll
drag_x = np.clip(drag_x, 100, self.resolution[0] - 100)
drag_y = np.clip(drag_y, 100, self.resolution[1] - 100)
# Drag
self.input_handler.click(
*chosen_space, *chosen_space, button=None, raw=True, speed_factor=1
)
self.input_handler.drag(drag_x, drag_y, speed_factor=1)
self.input_handler.rnd_sleep(min=200, mean=300, sigma=100)
# Calculate how far we've actually moved
effective_move_x = chosen_space[0] - drag_x
effective_move_y = chosen_space[1] - drag_y
# Update our internal tree position
self.ingame_pos = self._add_xy_offset_to_tree_pos(
[effective_move_x, effective_move_y]
)
# Figure out how much we have left to move
dx = desired_tree_pos[0] - self.ingame_pos[0]
dy = desired_tree_pos[1] - self.ingame_pos[1]
def _locate_screen_using_ambidexterity(self):
# Essentially, this is _move_to_tree_pos_using_spaces but
# only used to find the tree position by navigating to a known point
self.log.debug("Moving to ambidexterity")
ambidexterity_position = None
assumed_ambidexterity_position = (0.25234375, 0.20555556)
while ambidexterity_position is None:
# Find empty spaces that we can drag from
spaces = self._find_empty_space(3)
if spaces is None:
raise ValueError("Could not | |
<reponame>nudglabs/books-python-wrappers
#$Id$
from books.model.Address import Address
class Organization:
"""This class is used to create object for organization."""
def __init__(self):
"""Initialize parameters for organization object. """
self.organization_id = ''
self.name = ''
self.is_default_org = None
self.account_created_date = ''
self.time_zone = ''
self.language_code = ''
self.date_format = ''
self.field_separator = ''
self.fiscal_year_start_month = ''
self.contact_name = ''
self.industry_type = ''
self.industry_size = ''
self.company_id_label = ''
self.company_id_value = ''
self.tax_id_label = ''
self.tax_id_value = ''
self.currency_id = ''
self.currency_code = ''
self.currency_symbol = ''
self.currency_format = ''
self.price_precision = 0
self.address = Address()
self.org_address = ''
self.remit_to_address = ''
self.phone = ''
self.fax = ''
self.website = ''
self.email = ''
self.tax_basis = ''
self.is_org_active = None
self.name = ''
self.value = ''
self.version = ''
self.plan_type = 0
self.plane_name = ''
self.plan_period = ''
self.tax_group_enabled = None
self.account_created_date_formatted = ""
self.zi_migration_status = 0
self.user_role = ''
self.custom_fields = []
self.is_new_customer_custom_fields = None
self.is_portal_enabled = None
self.portal_name = ''
self.tax_type = ''
def set_organization_id(self, organization_id):
"""Set organization id.
Args:
organization_id(str): Organization id.
"""
self.organization_id = organization_id
def get_organization_id(self):
"""Get organization id.
Returns:
str: Organization id.
"""
return self.organization_id
def set_name(self, name):
"""Set name.
Args:
name(str): Name.
"""
self.name = name
def set_is_default_org(self, is_default_org):
"""Set whether it is default organization.
Args:
is_default_org(bool): True if it is default organization else false.
"""
self.is_default_org = is_default_org
def get_is_default_org(self):
"""Get whether it is default organization.
Returns:
bool: True if it is default organization else false.
"""
return self.is_default_org
def set_account_created_date(self, account_created_date):
"""Set account created date.
Args:
account_created_date(str): Account created date.
"""
self.account_created_date = account_created_date
def get_account_created_date(self):
"""Get account created date.
Returns:
str: Account created date.
"""
return self.account_created_date
def set_time_zone(self, time_zone):
"""Set time zone.
Args:
time_zone(str): Time zone.
"""
self.time_zone = time_zone
def get_time_zone(self):
"""Get time zone.
Returns:
str: Time zone.
"""
return self.time_zone
def set_language_code(self, language_code):
"""Set language code.
Args:
language_code(str): Language code.
"""
self.language_code = language_code
def get_language_code(self):
"""Get language code.
Returns:
str: Language code.
"""
return self.language_code
def set_date_format(self, date_format):
"""Set date format.
Args:
date_format(str): Date format.
"""
self.date_format = date_format
def get_date_format(self):
"""Get date format.
Returns:
str: Date format.
"""
return self.date_format
def set_field_separator(self, field_separator):
"""Set field separator.
Args:
field_separator(str): Field separator.
"""
self.field_separator = field_separator
def get_field_separator(self):
"""Get field separator.
Returns:
str: Field separator.
"""
return self.field_separator
def set_fiscal_year_start_month(self, fiscal_year_start_month):
"""Set fiscal year field separator.
Args:
fiscal_year_start_month(str): Fiscal year start month.
"""
self.fiscal_year_start_month = fiscal_year_start_month
def get_fiscal_year_start_month(self):
"""Get fiscal year start month.
Returns:
str: Fiscal year start month.
"""
return self.fiscal_year_start_month
def set_contact_name(self, contact_name):
"""Set contact name.
Args:
contact_name(str): Contact name.
"""
self.contact_name = contact_name
def get_contact_name(self):
"""Get contact name.
Returns:
str: Contact name.
"""
return self.contact_name
def set_industry_type(self, industry_type):
"""Set industry type.
Args:
industry_type(str): Industry type.
"""
self.industry_type = industry_type
def get_industry_type(self):
"""Get industry type.
Returns:
str: Industry type.
"""
return self.industry_type
def set_industry_size(self, industry_size):
"""Set industry size.
Args:
industry_size(str): Industry size.
"""
self.industry_size = industry_size
def get_industry_size(self):
"""Get industry size.
Returns:
str: Industry size.
"""
return self.industry_size
def set_company_id_label(self, company_id_label):
"""Set company id label.
Args:
company_id_label(str): Company id label.
"""
self.company_id_label = company_id_label
def get_company_id_label(self):
"""Get company id label.
Returns:
str: Company id label.
"""
return self.company_id_label
def set_company_id_value(self, company_id_value):
"""Set company id value.
Args:
company_id_value(str): Company id value.
"""
self.company_id_value = company_id_value
def get_company_id_value(self):
"""Get company id value.
Returns:
str: Company id value.
"""
return self.company_id_value
def set_tax_id_label(self, tax_id_label):
"""Set tax id label.
Args:
tax_id_label(str): Tax id label.
"""
self.tax_id_label = tax_id_label
def get_tax_id_label(self):
"""Get tax id label.
Retruns:
str: Tax id label.
"""
return self.tax_id_label
def set_tax_id_value(self, tax_id_value):
"""Set tax id value.
Args:
tax_id_value(str): Tax id value.
"""
self.tax_id_value = tax_id_value
def get_tax_id_value(self):
"""Get atx id value.
Returns:
str: Tax id value.
"""
return self.tax_id_value
def set_currency_id(self, currency_id):
"""Set currency id.
Args:
currency_id(str): Currency id.
"""
self.currency_id = currency_id
def get_currency_id(self):
"""Get currency id.
Returns:
str: Currency id.
"""
return self.currency_id
def set_currency_code(self, currency_code):
"""Set currency code.
Args:
currency_code(str): Currency code.
"""
self.currency_code = currency_code
def get_currency_code(self):
"""Get currency code.
Returns:
str: Currency code.
"""
return self.currency_code
def set_currency_symbol(self, currency_symbol):
"""Set currency symbol.
Args:
currency_symbol(str): Currency symbol.
"""
self.currency_symbol = currency_symbol
def get_currency_symbol(self):
"""Get currency symbol.
Returns:
str: Currency symbol.
"""
return self.currency_symbol
def set_currency_format(self, currency_format):
"""Set currency format.
Args:
currency_format(str): Currency format.
"""
self.currency_format = currency_format
def get_currency_format(self):
"""Get currency format.
Retruns:
str: Currency format.
"""
return self.currency_format
def set_price_precision(self, price_precision):
"""Set price precision.
Args:
price_precision(int): Price precision.
"""
self.price_precision = price_precision
def set_address(self, address):
"""Set address.
Args:
address(instance): Address
"""
self.address = address
def get_address(self):
"""Get address.
Returns:
instance: Address.
"""
return self.address
def set_org_address(self, org_address):
"""Set organization address.
Args:
org_address(str): Organization address.
"""
self.org_address = org_address
def get_org_address(self):
"""Get organization address.
Returns:
str: Organization address.
"""
return self.org_address
def set_remit_to_address(self, remit_to_address):
"""Set remit to address.
Args:
remit_to_address(str): Remit to address.
"""
self.remit_to_address = remit_to_address
def get_remit_to_address(self):
"""Get remit to address.
Returns:
str: Remit to address.
"""
return self.remit_to_address
def set_phone(self, phone):
"""Set phone.
Args:
phone(str): Phone.
"""
self.phone = phone
def get_phone(self):
"""Get phone.
Returns:
str: Phone.
"""
return self.phone
def set_fax(self, fax):
"""Set fax.
Args:
fax(str): Fax.
"""
self.fax = fax
def get_fax(self):
"""Get fax.
Returns:
str: Fax.
"""
return self.fax
def set_website(self, website):
"""Set website.
Args:
website(str): Website.
"""
self.website = website
def set_email(self, email):
"""Set email.
Args:
email(str): Email.
"""
self.email = email
def get_email(self):
"""Get email.
Returns:
str: Email.
"""
return self.email
def set_tax_basis(self, tax_basis):
"""Set tax basis.
Args:
tax_basis(str): Tax basis.
"""
self.tax_basis = tax_basis
def get_tax_basis(self):
"""Get tax basis.
Returns:
str: Tax basis.
"""
return self.tax_basis
def set_is_org_active(self, is_org_active):
"""Set whether it the organization is active or not.
Args:
is_org_active(bool): True if organization is active else false.
"""
self.is_org_active = is_org_active
def set_name(self, name):
"""Set name.
Args:
name(str): Name.
"""
self.name = name
def get_name(self):
"""Get name.
Returns:
str: Name.
"""
return self.name
def set_value(self, value):
"""Set value.
Args:
value(str): Value.
"""
self.value = value
def get_value(self):
"""Get value.
Returns:
str: Value.
"""
return self.value
def set_version(self, version):
"""Set version.
Args:
version(str): Version
"""
self.version = version
def get_version(self):
"""Get version.
Returns:
str: Version.
"""
return self.version
def set_plan_type(self, plan_type):
"""Set plan type.
Args:
plan_type(int): Plan type.
"""
self.plan_type = plan_type
def get_plan_type(self):
"""Get plan type.
Returns:
int: Plan type.
"""
return self.plan_type
def set_plan_name(self, plan_name):
"""Set plan name.
Args:
plan_name(str): Plan name.
"""
self.plan_name = plan_name
def get_plan_name(self):
"""Get plan name.
Args:
str: Plan name.
"""
return self.plan_name
def set_plan_period(self, plan_period):
"""Set plan period.
Args:
plan_period(str): Plan period.
"""
self.plan_period = plan_period
def get_plan_period(self):
"""Get plan period.
Returns:
str: Plan period.
"""
return self.plan_period
def set_tax_group_enabled(self, tax_group_enabled):
"""Set tax group enabled.
Args:
tax_group_enabled(bool): Tax group enabled.
"""
self.tax_group_enabled = tax_group_enabled
def get_tax_group_enabled(self):
"""Get tax group enabled.
Returns:
bool: Tax group enabled.
"""
return self.tax_group_enabled
def set_account_created_date_formatted(self, account_created_date_formatted):
"""Set account created date formatted.
Args:
account_created_date_formatted(str): Account created date formatted.
"""
self.account_created_date_formatted = account_created_date_formatted
def get_account_created_date_formatted(self):
"""Get account created date formatted.
Returns:
str: Account created date formatted.
"""
return self.account_created_date_formatted
def set_zi_migration_status(self, zi_migration_status):
"""Set zi migration status.
Args:
zi_migration_status(int): Zi migration status.
"""
self.zi_migration_status = zi_migration_status
def get_zi_migration_status(self):
"""Get zi migration status .
Returns:
int: Zi migration status.
"""
return self.zi_migration_status
def set_custom_fields(self, custom_field):
"""Set custom fields.
Args:
custom_field(instance): Custom field.
"""
self.custom_fields.append(custom_field)
def get_custom_fields(self):
"""Get custom fields.
Returns:
list of instance: List of custom fields object.
"""
return self.custom_fields
def set_user_role(self, user_role):
"""Set user role.
Args:
user_role(str): User role.
"""
self.user_role = user_role
def get_user_role(self):
"""Get user role.
Returns:
str: User role.
"""
return self.user_role
def set_is_new_customer_custom_fields(self, is_new_customer_custom_fields):
"""Set whether new customer custom fields or not.
Args:
is_new_customer_custom_fields(bool): True if new customer custom fields else False.
"""
self.is_new_customer_custom_fields = is_new_customer_custom_fields
def get_is_new_customer_custom_fields(self):
"""Get whether new customer custom fields or not.
Returns:
bool: True if new customer custom fields | |
"""
Base class for the contexts as used in the paper "How to Train Your
Differentiable Filter". Contains code that is shared between all three
contexts.
"""
# this code only works with tensorflow 1
import tensorflow.compat.v1 as tf
tf.disable_v2_behavior()
import numpy as np
import os
import csv
from differentiable_filters.contexts import base_context as base
import differentiable_filters.utils.tensorflow_compatability as compat
class PaperBaseContext(base.BaseContext):
def __init__(self, param, mode):
"""
Base class for the contexts used in the paper containing shared
functions.
Parameters
----------
param : dict
A dictionary of arguments
mode : string
determines which parts of the model are trained. Use "filter" for
the whole model, "pretrain_obs" for pretraining the observation
related functions of the context in isolation or "pretrain_proc"
for pretrainign the process-related functions of the context.
"""
super(base.BaseContext, self).__init__()
# determine the loss function
self.loss = param['loss']
self.batch_size = param['batch_size']
self.mixture_std = param['mixture_std']
self.debug = param['debug']
self.param = param
self.update_ops = []
# if we extract more than one training example from one record in the
# dataset, we need to indicate this
self.train_multiplier = 1
self.test_multiplier = 1
self.epoch_size = 1
self.mode = mode
self.scale = param['scale']
self.sl = param['sequence_length']
###########################################################################
# observation models
###########################################################################
def run_sensor_model(self, raw_observations, training):
"""
Process raw observations and return the predicted observations z
for the filter and an encoding for predicting the observation noise
Parameters
----------
raw_observations : list of tensors
Raw sensory observations
training : boolean tensor
flag that indicates if model is in training or test mode
Returns
-------
z : tensor [batch_size, dim_z]
Low-dimensional observations
enc : tensor or list of tensors
An encoding of the raw observations that can be used for predicting
heteroscedastic observation noise or the learned observation update
of the particle filter
"""
z, enc = self.sensor_model_layer(raw_observations, training)
return z, enc
def get_observation_noise(self, encoding, training):
"""
Observation noise model
Parameters
----------
encoding : ensor or list of tensors
An encoding of the raw observations that can be used for predicting
heteroscedastic observation
training : bool
training or testing?
Returns
-------
R : tensor [batch_size, dim_z, dim_z]
Observation noise covariance matrix
"""
if not self.param['learn_r']:
return tf.tile(self.R[None, :, :], [self.batch_size, 1, 1])
if self.param['hetero_r']:
if self.param['diagonal_covar']:
return self.observation_noise_hetero_diag(encoding, training)
else:
return self.observation_noise_hetero_full(encoding, training)
else:
if self.param['diagonal_covar']:
return self.observation_noise_const_diag(encoding, training)
else:
return self.observation_noise_const_full(encoding, training)
def get_observation_likelihood(self, particles, encoding, training):
"""
Learned observation update for the particle filter.
Consumes an encoding of the raw observatuions and the predicted
particles and returns the likelihood of each particle
Parameters
----------
particles : tensor [batch_size, num_particles, dim_z]
Predicted observations for the particles
encoding : tensor or list of tensors
An encoding of the raw observations
training : bool
training or testing?
Returns
-------
tf.keras.layer
A layer that predicts the likelihood of the observations under each
particle
"""
return self.likelihood_layer([particles, encoding], training)
def run_observation_model(self, state, training):
"""
Predicts the observations for a given state
Parameters
----------
state : tensor [batch_size, dim_x]
the predicted state
training : bool
training or testing?
Returns
-------
tf.keras.layer
A layer that computes the expected observations for the input
state and the Jacobian of the observation model
"""
return self.observation_model_layer(state, training)
###########################################################################
# process models
###########################################################################
def run_process_model(self, old_state, action, training):
"""
Predicts the next state given the old state and actions performed
Parameters
----------
old_state : tensor [batch_size, dim_x]
the previous state
action : tensor [batch_size, dim_u]
the performed actions
training : bool
training or testing?
Returns
-------
new_state : tensor [batch_size, dim_x]
the predicted next state
F : tensor [batch_size, dim_x, dim_x]
the jacobian of the process model
"""
if self.param['learn_process']:
new_state, F = \
self.process_model_learned_layer([old_state, action], training)
else:
new_state, F = \
self.process_model_analytical_layer([old_state, action],
training)
new_state = self.correct_state(new_state, diff=False)
return new_state, F
def get_process_noise(self, old_state, action, training):
"""
Consumes the old state and action and predicts the process
noise with the desired attributs
Parameters
----------
old_state : tensor [batch_size, dim_x]
the previous state
action : tensor [batch_size, dim_u]
the performed actions
training : bool
training or testing?
Returns
-------
tf.keras.layer
A layer that computes the desired process noise
"""
if not self.param['learn_q']:
return tf.tile(self.Q[None, :, :], [self.batch_size, 1, 1])
if self.param['learn_process']:
if self.param['hetero_q']:
if self.param['diagonal_covar']:
return self.process_noise_hetero_diag_lrn([old_state,
action],
training)
else:
return self.process_noise_hetero_full_lrn([old_state,
action],
training)
else:
if self.param['diagonal_covar']:
return self.process_noise_const_diag_lrn([old_state,
action],
training)
else:
return self.process_noise_const_full_lrn([old_state,
action],
training)
else:
if self.param['hetero_q']:
if self.param['diagonal_covar']:
return self.process_noise_hetero_diag_ana([old_state,
action],
training)
else:
return self.process_noise_hetero_full_ana([old_state,
action],
training)
else:
if self.param['diagonal_covar']:
return self.process_noise_const_diag_ana([old_state,
action],
training)
else:
return self.process_noise_const_full_ana([old_state,
action],
training)
###########################################################################
# loss functions
###########################################################################
def get_filter_loss(self, prediction, label, step, training):
"""
Compute the loss for the filtering application - defined in the context
Args:
prediction: list of predicted tensors
label: list of label tensors
step: training step
training: boolean tensor, indicates if we compute a loss for
training or testing
Returns:
loss: the total loss for training the filtering application
metrics: additional metrics we might want to log for evaluation
metric-names: the names for those metrics
"""
raise NotImplementedError("Please implement this method")
def get_observation_loss(self, prediction, label, step, training):
"""
Compute the loss for the observation functions - defined in the context
Args:
prediction: list of predicted tensors
label: list of label tensors
step: training step
training: boolean tensor, indicates if we compute a loss for
training or testing
Returns:
loss: the total loss for training the observation preprocessing
metrics: additional metrics we might want to log for evaluation
metric-names: the names for those metrics
"""
raise NotImplementedError("Please implement this method")
def get_process_loss(self, prediction, label, step, training):
"""
Compute the loss for the process functions - defined in the context
Args:
prediction: list of predicted tensors
label: list of label tensors
step: training step
training: boolean tensor, indicates if we compute a loss for
training or testing
Returns:
loss: the total loss for training the process model
metrics: additional metrics we might want to log for evaluation
metric-names: the names for those metrics
"""
raise NotImplementedError("Please implement this method")
###########################################################################
# loss functions
###########################################################################
def _mixture_likelihood(self, diffs, weights, reduce_mean=False):
"""
Compute the negative log likelihood of y under a a gaussian
mixture model defined by a set of particles and their weights.
Parameters
----------
diffs : tensor
difference between y and the states of the particles
weights : tensor
weights of the particles
reduce_mean : bool, optional
if true, return the mean likelihood loss over the complete tensor.
The default is False.
Returns
-------
likelihood : tensor
the negative log likelihood
"""
dim = compat.get_dim_int(diffs, -1)
num = compat.get_dim_int(diffs, -2)
# remove nans and infs and replace them with high values/zeros
diffs = tf.where(tf.math.is_finite(diffs), diffs,
tf.ones_like(diffs)*1e5/self.scale)
weights = tf.where(tf.math.is_finite(weights), weights,
tf.zeros_like(weights))
weights /= tf.reduce_sum(weights, axis=-1, keepdims=True)
covar = np.ones(dim, dtype=np.float32)
for k in range(dim):
covar[k] *= self.mixture_std/self.scale
covar = tf.linalg.diag(tf.square(covar))
if len(diffs.get_shape().as_list()) > 3:
sl = compat.get_dim_int(diffs, 1)
diffs = tf.reshape(diffs, [self.batch_size, -1, num, dim, 1])
covar = tf.tile(covar[None, None, None, :, :],
[self.batch_size, sl, num, 1, 1])
else:
sl = 1
diffs = tf.reshape(diffs, [self.batch_size, num, dim, 1])
covar = tf.tile(covar[None, None, :, :],
[self.batch_size, num, 1, 1])
# transfer to float 64 for higher accuracy
covar = tf.cast(covar, tf.float64)
diffs = tf.cast(diffs, tf.float64)
weights = tf.cast(weights, tf.float64)
exponent = tf.matmul(tf.matmul(tf.linalg.matrix_transpose(diffs),
tf.linalg.inv(covar)), diffs)
exponent = tf.reshape(exponent, [self.batch_size, sl, num])
normalizer = tf.math.log(tf.linalg.det(covar)) + \
tf.cast(dim * tf.log(2*np.pi), tf.float64)
log_like = -0.5 * (exponent + normalizer)
log_like = tf.reshape(log_like, [self.batch_size, sl, num])
log_like = tf.where(tf.greater_equal(log_like, -500), log_like,
tf.ones_like(log_like)*-500)
exp = tf.exp(log_like)
# the per particle likelihoods are weighted and summed in the particle
# dimension
weighted = weights * exp
weighted = tf.reduce_sum(weighted, axis=-1)
# compute the negative logarithm and undo the bias
likelihood = - (tf.math.log(tf.maximum(weighted, 1e-300)))
if reduce_mean:
likelihood = tf.reduce_mean(likelihood)
likelihood = tf.cast(likelihood, tf.float32)
return likelihood
######################################
# Evaluation
######################################
def save_log(self, log_dict, out_dir, step, num=0, mode='filter'):
"""
A helper to save the results of testing a filter on a a given problem.
Parameters
----------
log_dict : dict
dictionary of the losses that should be | |
<filename>python/webgme_bindings/webgme_bindings/test.py<gh_stars>0
"""
To run with coverage first:
pip install coverage
Then from <rootDir>/python/webgme_bindings:
coverage run -m unittest discover -s <rootDir>/python/webgme_bindings/webgme_bindings -p test.py -t <rootDir>/python/webgme_bindings
coverage html
coverage run -m unittest discover -s C:/Users/patrik85/GIT/webgme-core-bindings/python/webgme_bindings/webgme_bindings -p test.py -t C:/Users/patrik85/GIT/webgme-core-bindings/python/webgme_bindings
"""
import unittest
import os
import signal
import subprocess
import time
import logging
from .webgme import WebGME
from .exceptions import JSError, CoreIllegalArgumentError, CoreIllegalOperationError
from .pluginbase import PluginBase
logger = logging.getLogger('test-logger')
logger.setLevel(logging.ERROR)
# Stuff needed for calling webgme from nodejs
WEBGME_IMPORT_BIN = 'node_modules/webgme-engine/src/bin/import.js'
SEED_FILE = 'node_modules/webgme-engine/seeds/EmptyProject.webgmex'
TEST_PROJECT = 'PythonTestProject'
PORT = '5555'
dir_path = os.path.dirname(os.path.realpath(__file__))
root_dir = os.path.join(dir_path, '..', '..', '..')
my_env = os.environ.copy()
my_env['NODE_ENV'] = 'test'
COREZMQ_SERVER_FILE = os.path.join(root_dir, 'bin', 'corezmq_server.js')
def node_dict_sort(n_dict):
return n_dict['nodePath']
class ConnectedTestClass(unittest.TestCase):
def setUp(self):
if 'DO_NOT_START_SERVER' in my_env:
self.node_process = None
else:
self.node_process = subprocess.Popen(['node', COREZMQ_SERVER_FILE, TEST_PROJECT, '-p', PORT],
env=my_env, cwd=root_dir)
self.webgme = WebGME(PORT, logger)
self.project = self.webgme.project
self.core = self.webgme.core
self.util = self.webgme.util
def tearDown(self):
self.webgme.disconnect()
if self.node_process is not None:
self.node_process.send_signal(signal.SIGTERM)
# class ProjectTests(object):
class ProjectTests(ConnectedTestClass):
def test_constants(self):
const = self.project.CONSTANTS
self.assertTrue(const['COMMIT_TYPE'] == 'commit')
const2 = self.project.CONSTANTS
self.assertTrue(const is const2) # Referencing the same object
def test_metadata_related(self):
self.assertEqual(self.project.get_user_id(), 'guest')
p_info = self.project.get_project_info()
self.assertEqual(p_info['owner'], 'guest')
self.assertEqual(p_info['_id'], 'guest+{0}'.format(TEST_PROJECT))
self.assertTrue('info' in p_info)
self.assertTrue('branches' in p_info)
self.assertTrue('rights' in p_info)
self.assertTrue('hooks' in p_info)
def test_commit_related(self):
branch_hash = self.project.get_branch_hash('master')
root_hash = self.project.get_root_hash('master')
commit_obj = self.project.get_commit_object('master')
self.assertTrue('_id' in commit_obj)
self.assertTrue('time' in commit_obj)
self.assertTrue('updater' in commit_obj)
self.assertTrue('parents' in commit_obj)
self.assertTrue('message' in commit_obj)
self.assertEqual(commit_obj['_id'], branch_hash)
self.assertEqual(commit_obj['root'], root_hash)
c_res1 = self.project.make_commit(None, [branch_hash], root_hash, {}, 'commit1')
c_res2 = self.project.make_commit(None, [branch_hash], root_hash, {}, 'commit2')
common = self.project.get_common_ancestor_commit(c_res1['hash'], c_res2['hash'])
self.assertEqual(common, branch_hash)
hist = self.project.get_history(c_res1['hash'], 100)
self.assertEqual(len(hist), 2)
time_ms = int(time.time() * 1000)
hist = self.project.get_commits(time_ms, 100)
self.assertTrue(len(hist) > 2)
hist = self.project.get_history([c_res1['hash'], c_res2['hash']], 100)
self.assertEqual(len(hist), 3)
def test_branch_related(self):
init_branches = self.project.get_branches()
self.assertEqual(len(init_branches.keys()), 1)
self.assertEqual(list(init_branches.keys())[0], 'master')
branch_hash = self.project.get_branch_hash('master')
self.assertEqual(init_branches['master'], branch_hash)
self.project.create_branch('new_branch', branch_hash)
res = self.project.create_branch('new_branch2', branch_hash)
self.assertEqual(res['hash'], branch_hash)
self.assertEqual(len(self.project.get_branches().keys()), 3)
root_hash = self.project.get_root_hash('new_branch')
res = self.project.make_commit('new_branch', [branch_hash], root_hash, {}, 'Test commit')
self.project.set_branch_hash('new_branch2', res['hash'], branch_hash)
self.assertDictEqual(self.project.get_branches(), {'new_branch': res['hash'],
'new_branch2': res['hash'],
'master': branch_hash,
})
self.project.delete_branch('new_branch', branch_hash)
res = self.project.delete_branch('new_branch2', branch_hash)
self.assertEqual(res['hash'], '')
self.assertEqual(len(self.project.get_branches().keys()), 1)
def test_tag_related(self):
init_tags = self.project.get_tags()
self.assertEqual(len(init_tags.keys()), 0)
branch_hash = self.project.get_branch_hash('master')
self.project.create_tag('tage', branch_hash)
self.project.create_tag('tage2', branch_hash)
self.assertDictEqual(self.project.get_tags(), {'tage': branch_hash,
'tage2': branch_hash,
})
self.project.delete_tag('tage')
self.project.delete_tag('tage2')
self.assertDictEqual(self.project.get_tags(), {})
def test_exceptions(self):
# All should throw JSError
self.assertRaises(JSError, self.project.create_branch, 1, 1)
self.assertRaises(JSError, self.project.create_tag, 1, 1)
self.assertRaises(JSError, self.project.delete_branch, 1, 1)
self.assertRaises(JSError, self.project.delete_tag, 1)
self.assertRaises(JSError, self.project.get_branch_hash, 1)
# self.assertRaises(JSError, self.project.get_branches)
self.assertRaises(JSError, self.project.get_commit_object, 1)
self.assertRaises(JSError, self.project.get_commits, 'Herro', 1)
self.assertRaises(JSError, self.project.get_common_ancestor_commit, 1, 1)
self.assertRaises(JSError, self.project.get_history, 1, 1)
# self.assertRaises(JSError, self.project.get_project_info)
self.assertRaises(JSError, self.project.get_root_hash, 1)
# self.assertRaises(JSError, self.project.get_tags)
# self.assertRaises(JSError, self.project.get_user_id)
self.assertRaises(JSError, self.project.make_commit, 1, 1, 1, 1, 1)
self.assertRaises(JSError, self.project.set_branch_hash, 1, 1, 1)
# class CoreTests(object):
class CoreTests(ConnectedTestClass):
def equal(self, node1, node2):
return self.util.equal(node1, node2)
def setUp(self):
super(CoreTests, self).setUp()
root_hash = self.project.get_root_hash('master')
self.root = self.core.load_root(root_hash)
self.fco = self.core.get_fco(self.root)
self.child = self.core.create_child(self.root, self.fco)
self.core.set_attribute(self.child, 'name', 'child')
self.child2 = self.core.create_child(self.root, self.fco)
self.core.set_attribute(self.child2, 'name', 'child2')
self.child_instance = self.core.create_child(self.root, self.child)
self.core.set_attribute(self.child_instance, 'name', 'child_instance')
def tearDown(self):
self.util.unload_root(self.root)
super(CoreTests, self).tearDown()
# @unittest.skip("Temp")
def test_constants(self):
const = self.core.CONSTANTS
self.assertTrue(const['META_SET_NAME'] == 'MetaAspectSet')
const2 = self.core.CONSTANTS
self.assertTrue(const is const2) # Referencing the same object
# @unittest.skip("Temp")
def test_load_by_path(self):
node = self.core.load_by_path(self.root, self.core.get_path(self.child))
self.assertTrue(self.equal(node, self.child))
# @unittest.skip("Temp")
def test_basic_properties(self):
self.core.set_attribute(self.child, 'intAttr', 1)
self.core.set_attribute(self.child, 'floatAttr', 1.1)
self.core.set_attribute(self.child, 'boolAttr', True)
self.core.set_attribute(self.child_instance, 'boolAttr', False)
self.core.set_registry(self.child, 'intReg', 1)
self.core.set_registry(self.child, 'floatReg', 1.1)
self.core.set_registry(self.child, 'boolReg', True)
self.core.set_registry(self.child_instance, 'boolReg', False)
# General properties
relid = self.core.get_relid(self.child)
self.assertEqual(self.core.get_path(self.child), '/{0}'.format(relid))
self.assertEqual(len(self.core.get_guid(self.child)), 36)
self.assertEqual(len(self.core.get_hash(self.fco)), 41)
self.assertFalse(self.core.is_abstract(self.child))
self.assertFalse(self.core.is_connection(self.child))
# Attributes
self.assertEqual(self.core.get_attribute(self.child, 'name'), 'child')
self.assertEqual(self.core.get_attribute(self.child_instance, 'intAttr'), 1)
self.assertEqual(self.core.get_attribute(self.child_instance, 'floatAttr'), 1.1)
self.assertEqual(self.core.get_attribute(self.child_instance, 'boolAttr'), False)
self.assertEqual(self.core.get_own_attribute(self.child_instance, 'intAttr'), None)
attrs = self.core.get_attribute_names(self.child_instance)
own_attrs = self.core.get_own_attribute_names(self.child_instance)
valid_attrs = self.core.get_valid_attribute_names(self.child_instance)
own_valid_attrs = self.core.get_own_valid_attribute_names(self.child_instance)
self.assertEqual(len(attrs), 4)
self.assertEqual(len(own_attrs), 2)
self.assertEqual(len(valid_attrs), 1)
self.assertEqual(len(own_valid_attrs), 0)
self.core.del_attribute(self.child_instance, 'boolAttr')
self.assertEqual(len(self.core.get_own_attribute_names(self.child_instance)), 1)
self.core.rename_attribute(self.child, 'intAttr', 'intAttrNew')
self.assertEqual(self.core.get_attribute(self.child_instance, 'intAttrNew'), 1)
self.assertEqual(self.core.get_registry(self.child_instance, 'intAttr'), None)
# Registry
self.assertEqual(self.core.get_registry(self.child_instance, 'intReg'), 1)
self.assertEqual(self.core.get_registry(self.child_instance, 'floatReg'), 1.1)
self.assertEqual(self.core.get_registry(self.child_instance, 'boolReg'), False)
self.assertEqual(self.core.get_own_registry(self.child_instance, 'intReg'), None)
regs = self.core.get_registry_names(self.child_instance)
own_regs = self.core.get_own_registry_names(self.child_instance)
self.assertEqual(len(regs), 5)
self.assertEqual(len(own_regs), 1)
self.core.del_registry(self.child_instance, 'boolReg')
self.assertEqual(len(self.core.get_own_registry_names(self.child_instance)), 0)
self.core.rename_registry(self.child, 'intReg', 'intRegNew')
self.assertEqual(self.core.get_registry(self.child_instance, 'intRegNew'), 1)
self.assertEqual(self.core.get_registry(self.child_instance, 'intReg'), None)
# @unittest.skip("Temp")
def test_node_creation_deletion(self):
self.assertEqual(len(self.core.load_children(self.child_instance)), 0)
new_child = self.core.create_child(self.child_instance, self.fco)
self.assertEqual(len(self.core.load_children(self.child_instance)), 1)
new_child2 = self.core.copy_node(new_child, self.child_instance)
self.assertEqual(len(self.core.load_children(self.child_instance)), 2)
new_child3 = self.core.create_node({'parent': self.child_instance, 'base': self.fco})
self.assertEqual(len(self.core.load_children(self.child_instance)), 3)
new_children = self.core.copy_nodes([new_child, new_child2], self.child_instance)
self.assertEqual(len(self.core.load_children(self.child_instance)), 5)
self.core.delete_node(new_child)
self.core.delete_node(new_child2)
self.core.delete_node(new_child3)
self.assertEqual(len(self.core.load_children(self.child_instance)), 2)
self.core.move_node(new_children[0], self.child2)
self.core.move_node(new_children[1], self.child2)
self.assertEqual(len(self.core.load_children(self.child_instance)), 0)
self.assertEqual(len(self.core.load_children(self.child2)), 2)
# @unittest.skip("Temp")
def test_child_parent_related(self):
self.assertEqual(self.core.get_parent(self.root), None)
self.assertTrue(self.equal(self.core.get_root(self.fco), self.root))
children = self.core.load_children(self.root)
own_children = self.core.load_own_children(self.root)
child_paths = self.core.get_children_paths(self.root)
own_child_paths = self.core.get_own_children_paths(self.root)
child_relids = self.core.get_children_relids(self.root)
own_child_relids = self.core.get_own_children_relids(self.root)
tree_nodes = self.core.load_sub_tree(self.root)
own_tree_nodes = self.core.load_own_sub_tree(self.root)
self.assertEqual(len(children), 4)
self.assertEqual(len(children), len(child_paths))
self.assertEqual(len(children), len(child_relids))
self.assertEqual(len(children), len(tree_nodes) - 1) # root included in tree
children.sort(key=node_dict_sort)
own_children.sort(key=node_dict_sort)
child_paths.sort()
own_child_paths.sort()
child_relids.sort()
own_child_relids.sort()
tree_nodes.sort(key=node_dict_sort)
own_tree_nodes.sort(key=node_dict_sort)
self.assertEqual(children, own_children)
self.assertEqual(own_child_paths, child_paths)
self.assertEqual(own_child_relids, child_relids)
self.assertEqual(tree_nodes, own_tree_nodes)
name_to_child = {}
for child in children:
rel_id = self.core.get_relid(child)
self.assertTrue(self.util.equal(self.core.load_child(self.root, rel_id), child))
name_to_child[self.core.get_attribute(child, 'name')] = child
self.assertTrue(self.util.equal(self.core.get_parent(child), self.root))
def_info = self.core.get_child_definition_info(self.root, child)
self.assertTrue(self.util.equal(def_info['ownerNode'], self.root))
self.assertTrue(self.util.equal(def_info['targetNode'], self.fco))
self.assertEqual(len(name_to_child.keys()), 4)
self.assertEqual(len(self.core.get_children_hashes(self.root).keys()), 4)
self.assertTrue(self.equal(self.core.get_common_parent(children), self.root))
self.assertEqual(self.core.get_common_parent([children[0]]), self.root)
self.assertEqual(self.core.get_common_parent([self.root]), None)
self.assertTrue(self.core.is_valid_new_parent(self.child2, self.child))
self.assertFalse(self.core.is_valid_new_parent(self.child2, self.fco))
self.assertTrue(self.core.is_valid_new_child(self.child2, self.child))
self.assertFalse(self.core.is_valid_new_child(self.fco, self.fco))
# @unittest.skip("Temp")
def test_instance_base_related(self):
self.assertEqual(self.core.get_base(self.fco), None)
self.assertEqual(self.core.get_type_root(self.fco), None)
self.assertTrue(self.equal(self.core.get_type_root(self.child), self.fco))
self.assertTrue(self.equal(self.core.get_base(self.child_instance), self.child))
instances = self.core.load_instances(self.child)
instance_paths = self.core.get_instance_paths(self.child)
self.assertEqual(len(instances), 1)
self.assertEqual(len(instance_paths), 1)
self.assertEqual(instance_paths[0], self.core.get_path(self.child_instance))
self.assertTrue(self.equal(instances[0], self.child_instance))
self.assertTrue(self.core.is_instance_of(self.child_instance, self.child))
self.assertTrue(self.core.is_type_of(self.child_instance, self.child))
self.assertTrue(self.core.is_instance_of(self.child_instance, self.core.get_path(self.child)))
self.assertTrue(self.core.is_type_of(self.child_instance, self.core.get_path(self.child)))
self.assertFalse(self.core.is_instance_of(self.child, self.child_instance))
self.assertFalse(self.core.is_type_of(self.child, self.child_instance))
self.assertTrue(self.equal(self.fco, self.core.get_base_root(self.child_instance)))
self.assertTrue(self.equal(self.fco, self.core.get_base_type(self.child_instance)))
self.assertTrue(self.equal(self.fco, self.core.get_meta_type(self.child_instance)))
self.assertTrue(self.equal(self.core.get_base_root(self.root), self.root))
self.assertEqual(self.core.get_base_type(self.root), None)
self.assertEqual(self.core.get_meta_type(self.root), None)
fco_instances = self.core.load_instances(self.fco)
self.assertTrue(self.equal(self.core.get_common_base(fco_instances), self.fco))
self.assertEqual(self.core.get_common_base([self.root, self.fco]), None)
self.assertEqual(self.core.get_common_base([self.fco]), None)
base_types = self.core.get_base_types(self.child)
self.assertEqual(len(base_types), 1)
self.assertTrue(self.equal(base_types[0], self.fco))
self.assertTrue(self.core.is_valid_new_base(self.child_instance, self.child2))
self.assertFalse(self.core.is_valid_new_base(self.child, self.child_instance))
self.core.set_base(self.child_instance, self.child2)
self.assertTrue(self.equal(self.core.get_base(self.child_instance), self.child2))
# @unittest.skip("Temp")
def test_pointer_related(self):
self.core.set_pointer(self.child, 'ptr', self.fco)
ptr_target = self.core.load_pointer(self.child_instance, 'ptr')
ptr_path = self.core.get_pointer_path(self.child_instance, 'ptr')
self.assertTrue(self.core.get_path(ptr_target), ptr_path)
self.assertEqual(self.core.get_own_pointer_path(self.child_instance, 'ptr'), None)
ptr_names = self.core.get_pointer_names(self.child_instance)
own_ptr_names = self.core.get_own_pointer_names(self.child_instance)
self.assertEqual(len(ptr_names), 2)
ptr_names.sort()
own_ptr_names.sort()
self.assertEqual(len(ptr_names), 2)
self.assertEqual(len(own_ptr_names), 1)
self.assertEqual(ptr_names[0], own_ptr_names[0])
coll_names = self.core.get_collection_names(self.fco)
self.assertEqual(len(coll_names), 2)
coll_names.sort()
self.assertEqual(coll_names, ['base', 'ptr'])
coll = self.core.load_collection(self.fco, 'ptr')
coll_paths = self.core.get_collection_paths(self.fco, 'ptr')
self.assertEqual(len(coll), 1)
self.assertEqual(len(coll_paths), 1)
self.assertEqual(self.core.get_path(coll[0]), coll_paths[0])
self.core.rename_pointer(self.child, 'ptr', 'new_ptr')
self.assertEqual(self.core.load_pointer(self.child, 'ptr'), None)
self.assertTrue(self.equal(self.core.load_pointer(self.child, 'new_ptr'), self.fco))
self.core.del_pointer(self.child, 'new_ptr')
self.assertEqual(self.core.load_pointer(self.child, 'new_ptr'), None)
self.core.set_pointer(self.child, 'ptr', self.fco)
self.assertTrue(self.equal(self.core.load_pointer(self.child, 'ptr'), self.fco))
self.core.delete_pointer(self.child, 'ptr')
self.assertEqual(self.core.load_pointer(self.child, 'ptr'), None)
# @unittest.skip("Temp")
def test_set_related(self):
self.core.create_set(self.child, 'set')
self.core.add_member(self.child, 'set', self.child2)
members = self.core.load_members(self.child_instance, 'set')
own_members = self.core.load_own_members(self.child_instance, 'set')
member_paths = self.core.get_member_paths(self.child_instance, 'set')
own_member_paths = self.core.get_own_member_paths(self.child_instance, 'set')
self.assertEqual(len(members), 1)
self.assertEqual(len(own_members), 0)
self.assertEqual(len(own_member_paths), 0)
self.assertEqual([self.core.get_path(members[0])], member_paths)
# Set attrs and regs
self.core.set_set_attribute(self.child, 'set', 'attr', 'val')
self.assertEqual(self.core.get_set_attribute_names(self.child_instance, 'set'), ['attr'])
self.assertEqual(self.core.get_own_set_attribute_names(self.child_instance, 'set'), [])
self.assertEqual(self.core.get_set_attribute(self.child_instance, 'set', 'attr'), 'val')
self.assertEqual(self.core.get_own_set_attribute(self.child_instance, 'set', 'attr'), None)
self.core.del_set_attribute(self.child, 'set', 'attr')
self.assertEqual(self.core.get_set_attribute(self.child_instance, 'set', 'attr'), None)
self.core.set_set_registry(self.child, 'set', 'reg', 'val')
self.assertEqual(self.core.get_set_registry_names(self.child_instance, 'set'), ['reg'])
self.assertEqual(self.core.get_own_set_registry_names(self.child_instance, 'set'), [])
self.assertEqual(self.core.get_set_registry(self.child_instance, 'set', 'reg'), 'val')
self.assertEqual(self.core.get_own_set_registry(self.child_instance, 'set', 'reg'), None)
self.core.del_set_registry(self.child, 'set', 'reg')
self.assertEqual(self.core.get_set_registry(self.child_instance, 'set', 'reg'), None)
# Set-member attrs and regs
p = member_paths[0]
self.core.set_member_attribute(self.child, 'set', p, 'attr', 'val')
self.assertEqual(self.core.get_member_attribute_names(self.child_instance, 'set', p), ['attr'])
self.assertEqual(self.core.get_member_own_attribute_names(self.child_instance, 'set', p), [])
self.assertEqual(self.core.get_member_attribute(self.child_instance, 'set', p, 'attr'), 'val')
self.assertEqual(self.core.get_member_own_attribute(self.child_instance, 'set', p, 'attr'), None)
self.core.del_member_attribute(self.child, 'set', p, 'attr')
self.assertEqual(self.core.get_member_attribute(self.child_instance, 'set', p, 'attr'), None)
self.core.set_member_registry(self.child, 'set', p, 'reg', 'val')
self.assertEqual(self.core.get_member_registry_names(self.child_instance, 'set', p), ['reg'])
self.assertEqual(self.core.get_member_own_registry_names(self.child_instance, 'set', p), [])
self.assertEqual(self.core.get_member_registry(self.child_instance, 'set', p, 'reg'), 'val')
self.assertEqual(self.core.get_member_own_registry(self.child_instance, 'set', p, 'reg'), None)
self.core.del_member_registry(self.child, 'set', p, 'reg')
self.assertEqual(self.core.get_member_registry(self.child_instance, 'set', p, 'reg'), None)
# Renaming, deletions etc.
self.core.rename_set(self.child, 'set', 'newSet')
self.assertEqual(self.core.get_set_names(self.child_instance), ['newSet'])
self.assertEqual(self.core.get_own_set_names(self.child_instance), [])
self.core.del_member(self.child, 'newSet', p)
self.assertEqual(self.core.load_members(self.child, 'newSet'), [])
self.core.del_set(self.child, 'newSet')
self.assertEqual(self.core.get_set_names(self.child), [])
self.core.create_set(self.child, 'set')
self.core.create_set(self.child, 'set2')
self.core.add_member(self.child, 'set', self.child2)
self.assertEqual(self.core.get_set_names(self.child), ['set', 'set2'])
self.assertEqual(self.core.get_member_paths(self.child, 'set'), [p])
self.assertEqual(self.core.get_member_paths(self.child, 'set2'), [])
self.core.move_member(self.child, p, 'set', 'set2')
# TODO: Should move_member delete the set when its the last member?
# self.assertEqual(self.core.get_member_paths(self.child, 'set'), [])
self.assertEqual(self.core.get_member_paths(self.child, 'set2'), [p])
self.core.delete_set(self.child, 'set')
self.core.delete_set(self.child, 'set2')
self.assertEqual(self.core.get_set_names(self.child), [])
# @unittest.skip("Temp")
def test_meta_and_mixin_related(self):
p = self.core.get_path(self.child)
p2 = self.core.get_path(self.child2)
self.assertFalse(self.core.is_meta_node(self.child))
self.core.add_member(self.root, 'MetaAspectSet', self.child)
self.core.add_member(self.root, 'MetaAspectSet', self.child2)
self.core.add_mixin(self.child, self.core.get_path(self.child2))
self.assertTrue(self.core.is_meta_node(self.child))
all_meta_nodes = self.core.get_all_meta_nodes(self.root)
meta_nodes_paths = [p, p2]
meta_nodes_paths.sort()
self.assertEqual(len(list(all_meta_nodes.keys())), 3)
self.assertTrue(self.equal(self.core.get_meta_type(self.child_instance), self.child))
self.assertTrue(self.equal(self.core.get_base_type(self.child_instance), self.child))
base_types = list(map(lambda b: self.core.get_path(b), self.core.get_base_types(self.child_instance)))
base_types.sort()
self.assertEqual(base_types, meta_nodes_paths)
# Containment
self.core.set_child_meta(self.child, self.child2)
child_meta = self.core.get_children_meta(self.child)
self.assertEqual(len(list(child_meta.keys())), 1)
self.assertEqual(child_meta[p2], {'max': -1, 'min': -1})
self.assertTrue(self.core.is_valid_child_of(self.child2, self.child))
self.assertEqual(self.core.get_valid_children_paths(self.child), child_meta.keys())
child_info = self.core.get_child_definition_info(self.child_instance, self.child2)
self.assertTrue(self.equal(child_info['ownerNode'], self.child))
self.assertTrue(self.equal(child_info['targetNode'], self.child2))
valid_children = self.core.get_valid_children_meta_nodes({'node': self.child_instance})
self.assertEqual(len(valid_children), 2)
valid_children = list(map(lambda c: self.core.get_path(c), valid_children))
valid_children.sort()
self.assertEqual(valid_children, meta_nodes_paths)
self.core.set_children_meta_limits(self.child, 1, 2)
child_meta = self.core.get_children_meta(self.child)
self.assertEqual(len(list(child_meta.keys())), 3)
self.assertEqual(child_meta[p2], {'max': -1, 'min': -1})
self.assertEqual(child_meta['min'], 1)
self.assertEqual(child_meta['max'], 2)
# Attributes
self.assertEqual(self.core.get_valid_attribute_names(self.child_instance), ['name'])
self.core.set_attribute_meta(self.child, 'attr', {'type': 'string', 'default': 'val'})
self.assertEqual(len(self.core.get_valid_attribute_names(self.child_instance)), 2)
self.assertEqual(self.core.get_own_valid_attribute_names(self.child_instance), [])
self.assertTrue(self.core.is_valid_attribute_value_of(self.child_instance, 'attr', 'aString'))
self.assertFalse(self.core.is_valid_attribute_value_of(self.child_instance, 'attr', 1))
self.assertTrue(self.equal(self.core.get_attribute_definition_owner(self.child_instance, 'attr'), self.child))
self.core.rename_attribute_meta(self.child, 'attr', 'newAttr')
self.assertEqual(self.core.get_own_valid_attribute_names(self.child), ['newAttr'])
attr_meta = self.core.get_attribute_meta(self.child, 'newAttr')
self.assertEqual(attr_meta, {'type': 'string'})
# Pointers
self.core.set_pointer_meta_target(self.child, 'ptr', | |
# -*- coding: utf-8 -*-
''' Some Tools for Pip-In - Verbose Documentation '''
# ##################################################################################
# MG ILLUMINATION #
# First Crazy Debroussailleur : jDepoortere #
# Author : cPOTTIER #
# Last Update : 27-06-2016 #
# ##################################################################################
#================================================================================================================================== PRIMARY CLASS
import sys, ink.proto
path_modules = '/u/'+ink.io.ConnectUserInfo()[2]+'/Users/COM/InK/Scripts/Python/proj/pipe/ink/exemples'
sys.path.append(path_modules)
if '__InK__connect' in sys.modules:
del(sys.modules["__InK__connect"])
import __InK__connect
from __InK__connect import *
else:
import __InK__connect
from __InK__connect import *
#==================================================================================================================== Ink external useful CLASSES
__PIPEIN_GRAPH = __InK__connect.__PIPEIN_GRAPH__(graphs.DEFAULT, None) # protograph, verbose mode
#================================================================================================================================================
# =========================================================================================================================== AK02_LAYOUT_BuildCameraModel
def AK02_LAYOUT_BuildCameraModel(autoload='True',autosave='True',save_private='False',cat='MAIN',all_cats='True',_cat=None):
'''
| /
| \ Tool - Last update 27-06-2016
----------------------------------------------------------------------
- Organize MODEL Context Layout
- autosave graphs in :
MODELING/CHARS/MODTECH/
-> M_MAIN.inkGraph
-> M_SECONDARY.inkGraph
-> M_TERTIARY.inkGraph
- Auto Execution or Select CAMERA-Actor_ModChars-Ok.a7
----------------------------------------------------------------------
todo :
- release gestion filters = None
- r&d : check len name for ecart auto optimal
'''
# PARAMS MODIFIABLES ############################################################################################
A7refPos_X = None # X origin a7 ref - None for not Used
A7refPos_Y = None # Y origin a7 ref - None for not Used
offset_X = 6 # 4 # relative to X origin a7 ref - can be negative
offset_Y = 3 # 0 # relative to Y origin a7 ref - can be negative
ecart_a7_X = 'Auto' # 3 # X space between streams a7 , if = 'Auto', space depends of A7 name longer
ecart_a7_Y = None # 2 # Y space between streams a7
n_A7_perColumn = None # 6 # max n streams vertically
n_A7_perRow = None # 6 # max n streams horizontally - None for not Used
X_space_betweenColumns = 2
quinconce = None # True False # boolean, decale les a7 une fois sur 2
quinconce_X = None # 0.5 # x offset, 0.5/-0.5 left-right, min max
quinconce_start = None # 0 # switch first quinconce_X value to modulo 0.5 to -0.5
n_col_byGroup = None # 2 # n column by a7 grouped
X_space_betweenGroup = 2 # ecart_a7_X*2 # subjectif, to do better in relation with n assets, n group etc
n_groups_grouded = None # todo paquet de groups - None for not Used
# DONT TOUCH ###################################################################################################
params = {}
if A7refPos_X != None :
params['A7refPos_X'] = A7refPos_X
if A7refPos_Y != None :
params['A7refPos_Y'] = A7refPos_Y
if offset_X != None :
params['offset_X'] = offset_X
if offset_Y != None :
params['offset_Y'] = offset_Y
if ecart_a7_X != None :
params['ecart_a7_X'] = ecart_a7_X
if ecart_a7_Y != None :
params['ecart_a7_Y'] = ecart_a7_Y
if n_A7_perColumn != None :
params['n_A7_perColumn'] = n_A7_perColumn
if n_A7_perRow != None :
params['n_A7_perRow'] = n_A7_perRow
if X_space_betweenColumns != None :
params['X_space_betweenColumns'] = X_space_betweenColumns
if quinconce != None :
params['quinconce'] = quinconce
if quinconce_X != None :
params['quinconce_X'] = quinconce_X
if quinconce_start != None :
params['quinconce_start'] = quinconce_start
if n_col_byGroup != None :
params['n_col_byGroup'] = n_col_byGroup
if X_space_betweenGroup != None :
params['X_space_betweenGroup'] = X_space_betweenGroup
if n_groups_grouded != None :
params['n_groups_grouded'] = n_groups_grouded
#=========
autoLoadA7ref = False
if autoload == 'True':
autoLoadA7ref = True # in 2 variables because this script can be call external
# Functions ###################################################################################################
def showStream(protoGraph,assetProto,typeStreams,catFamily,A7posRef=None):
''' show a7 streams '''
A7pos = None
Filters = {'type': ['Model']}
family = ['CHARS']
family.append(catFamily[0])
Filters['family'] = family
# Filters = {'family': ['CHARS', 'MAIN'] , 'type': ['Model']} # Ordre Important pour family filters 1-CHARS 2-MAIN etc
StreamProtoList = __PIPEIN_GRAPH.GetStreams(typeStreams,protoGraph,layout,assetProto,Filters)
return StreamProtoList
# End Functions ################################################################################################
#=========
cat_array = []
catFamily = [] # for external request
if _cat != None :
cat = _cat[0]
for a in _catFamily:
catFamily.append(a)
if _cat == None :
catFamily.append(cat)
cat = cat
cat_array.append(cat)
if all_cats == 'True':
cat_array = ['MAIN', 'SECONDARY', 'TERTIARY']
for cat_in_Array in cat_array:
A7R = 'CAMERA-Actor_ModChars-Ok.a7'
A7RefPath = 'LIB/CAMERAS/CAMERA/Ok/'+A7R
myG = 'M_'+cat_in_Array+'.inkGraph'
myGraph = 'MODELING/CHARS/MODTECH/'+myG
myGraphLocal = LOCALPATH+'M_'+cat_in_Array+'.inkGraph' # for debug
catFamily = []
catFamily.append(cat_in_Array)
#======================================================================
#========= Declare protograph
#======================================================================
protoGraph = ink.proto.Graph( graphs.DEFAULT )
layout = protoGraph.GetLayout()
#======================================================================
#========= Add Get Ref A7
#======================================================================
if autoLoadA7ref == True:
__PIPEIN_GRAPH.add_A7('dirPath',A7RefPath,True) # _type, A7(str,list,dic), A7Select[optional], A7position[optional]
protoGraph.Show()
protoGraph.Apply()
protoGraph.SelectAll()
if autoLoadA7ref == False:
layout = protoGraph.GetLayout()
selection = protoGraph.GetSelection()
if not selection:
raise Exception('Please select '+A7R+' !')
#======================================================================
#========= Get Selection and move to center graph
#======================================================================
layout = protoGraph.GetLayout()
selection = protoGraph.GetSelection()
layout.SetPos(A7RefPath, (0,0) )
protoGraph.Show()
protoGraph.Apply()
protoGraph.SelectAll()
#======================================================================
#========= Retrieve ref.a7 Pos
#======================================================================
for pa in selection:
A7_infos = __PIPEIN_GRAPH.getA7_infos(pa)
nm_asset = A7_infos['nm_asset']
a_types = A7_infos['a_types']
if str(pa) == str(A7RefPath):
ProtoA7 = pa
#======================================================================
#========= get Refa7 position
#======================================================================
A7refPos = __PIPEIN_GRAPH.getPosition(pa,layout)
#======================================================================
#========= show a7 Downstreams
#======================================================================
StreamProtoList = showStream(protoGraph,ProtoA7,'GetDownstreams',catFamily)
#========= apply and refresh graph
protoGraph.Show()
protoGraph.Apply()
protoGraph.SelectAll()
#======================================================================
#========= set position .a7 Downstreams
#======================================================================
#========= R&D for auto-ecart, check longest name for ecart optimal
if str(ecart_a7_X).upper() == 'AUTO':
params = __PIPEIN_GRAPH.get_autoEcart_X(StreamProtoList,params)
#========= set position .a7 Downstreams
n_streams = len(StreamProtoList)
__PIPEIN_GRAPH.move_StreamProtoList(n_streams,StreamProtoList,layout,A7refPos,params)
#========= apply and refresh graph
protoGraph.Show()
#========= save Graph
if str(save_private) == 'True':
myGraph = myGraphLocal
if str(autosave) == 'True' and str(save_private) == 'True':
# __PIPEIN_GRAPH.SaveGraph(myGraph) # to do , to debug
protoGraph.Write(myG, private=True)
print '[ OK ] ' + myG + ' have been saved -> ' + myGraph
if str(autosave) == 'True' and str(save_private) == 'False':
# __PIPEIN_GRAPH.SaveGraph(myGraph) # to do , to debug
protoGraph.Write(str(myGraph), private=False)
print '[ OK ] ' + myG + ' have been saved -> ' + myGraph
if str(autosave) == 'False':
print '[ OK ] You can save : ' + myG
# #=========================== UI
AK02_LAYOUT_BuildCameraModel.__category__ = 'A - PIPE-IN TOOLZ'
AK02_LAYOUT_BuildCameraModel.__author__ = 'cpottier'
AK02_LAYOUT_BuildCameraModel.__textColor__ = '#6699ff'
AK02_LAYOUT_BuildCameraModel.__paramsType__ = {
'autoload' : ( 'bool', 'True' , ['True', 'False'] ),
'autosave' : ( 'bool', 'True' , ['True', 'False'] ),
'save_private' : ( 'bool', 'False' , ['True', 'False'] ),
'cat' : ( 'enum', 'MAIN',['MAIN', 'SECONDARY', 'TERTIARY'] ),
'all_cats' : ( 'bool', 'True' , ['True', 'False'] )
}
def AK03_LAYOUT_BuildHumanShape(autoload='True',autosave='True',save_private='False',cat='MAIN',all_cats='True',_cat=None):
'''
| /
| \ Tool - Last update 27-06-2016
----------------------------------------------------------------------
- Organize FACIAL Context Layout
- autosave graphs in :
MODELING/CHARS/MODTECH/
-> F_MAIN.inkGraph
-> F_SECONDARY.inkGraph
-> F_TERTIARY.inkGraph
- Auto Execution or Select Human-Shape_BcsTpl.a7
----------------------------------------------------------------------
todo :
- release gestion Filters = None
- r&d : check len name for ecart auto optimal
'''
# PARAMS MODIFIABLES ############################################################################################
A7refPos_X = None # X origin a7 ref - None for not Used
A7refPos_Y = None # Y origin a7 ref - None for not Used
offset_X = 8 # 4 # relative to X origin a7 ref - can be negative
offset_Y = 3 # 0 # relative to Y origin a7 ref - can be negative
ecart_a7_X = 'Auto' # 3 # X space between streams a7 , if = 'Auto', space depends of A7 name longer
ecart_a7_Y = None # 2 # Y space between streams a7
n_A7_perColumn = None # 6 # max n streams vertically
n_A7_perRow = None # 6 # max n streams horizontally - None for not Used
X_space_betweenColumns = 3
quinconce = None # True False # boolean, decale les a7 une fois sur 2
quinconce_X = None # 0.5 # x offset, 0.5/-0.5 left-right, min max
quinconce_start = None # 0 # switch first quinconce_X value to modulo 0.5 to -0.5
n_col_byGroup = None # 2 # n column by a7 grouped
X_space_betweenGroup = 3 # ecart_a7_X*2 # subjectif, to do better in relation with n assets, n group etc
n_groups_grouded = None # todo paquet de groups - None for not Used
# DONT TOUCH ###################################################################################################
params = {}
if A7refPos_X != None :
params['A7refPos_X'] = A7refPos_X
if A7refPos_Y != None :
params['A7refPos_Y'] = A7refPos_Y
if offset_X != None :
params['offset_X'] = offset_X
if offset_Y != None :
params['offset_Y'] = offset_Y
if ecart_a7_X != None :
params['ecart_a7_X'] = ecart_a7_X
if ecart_a7_Y != None :
params['ecart_a7_Y'] = ecart_a7_Y
if n_A7_perColumn != None :
params['n_A7_perColumn'] = n_A7_perColumn
if n_A7_perRow != None :
params['n_A7_perRow'] = n_A7_perRow
if X_space_betweenColumns != None :
params['X_space_betweenColumns'] = X_space_betweenColumns
if quinconce != None :
params['quinconce'] = quinconce
if quinconce_X | |
your least favorite subject at school?',
'Who is your dream dinner guest?',
'Would you rather publish a book or release an album?',
'Have you ever met someone famous?',
'Have you ever had a supernatural experience?',
'Who is your favorite superhero?',
'What job would you be doing if computers had not been invented?',
'What is the best holiday you have ever been on?',
'In the book of your life, what is the best chapter?',
'Can you play a musical instrument?',
'It is late, you are hungry., What shameful snack will you prepare?',
'Make the noise of your favorite animal',
'If you could only eat one type of food for the rest of your life, what would it be?',
'Day off. What do you do to relax?',
'Have you ever been in a newspaper?',
'If you could ban any word or phrase, what would it be?',
'What was your favorite TV show when growing up?',
'Which famous sporting moment would you like to have been part of?',
'Do you have superstitions?',
'Have you ever walked out of a cinema before a movie has finished?',
'What was the one thing you always wanted as a kid, but never got?',
'What was the first movie you saw at the cinema?',
'You are put in charge of the country. What is the first thing you do?',
'What do you like most about coming to work?',
'What is your favorite animal?',
'What’s the best thing that’s happened to you this week?',
'What was the worst present you’ve received?',
'Which skill would you love to learn?',
'Would you rather be clever or beautiful?',
'Would you rather be really hairy or bald?',
'Would you like to be taller or shorter?',
'What irritates you the most?',
'Have you gone out with mismatched socks or shoes on?',
'What flavor ice cream you like the most?',
'What is your favorite drink?',
'Have you ever locked yourself out of the house?',
'Have you gone in to a room and forgotten why?',
'Given the choice of anyone in the world, whom would you want as a dinner guest?',
'Would you like to be famous? In what way?',
'Before making a telephone call, do you ever rehearse what you are going to say? Why?',
'What would constitute a “perfect” day for you?',
'When did you last sing to yourself? To someone else?',
'If you were able to live to the age of 90 and retain either the mind or body of a 30-year-old for the last 60 years of your life, which would you want?',
'Name three things you and a team mate appear to have in common.',
'For what in your life do you feel most grateful?',
'If you could wake up tomorrow having gained any one quality or ability, what would it be?',
'If a crystal ball could tell you the truth about yourself, your life, the future or anything else, what would you want to know?',
'Is there something that you’ve dreamed of doing for a long time? Why haven’t you done it?',
'What is the greatest accomplishment of your life?',
'What do you value most in a friendship?',
'What is your most treasured memory?',
'What does friendship mean to you?',
'Alternate sharing something you consider a positive characteristic of each person in your team.',
'Make a true “we” statements. For instance, “We are both in this room feeling ... “',
'Share an embarrassing moment in your life.',
'What, if anything, is too serious to be joked about?',
'Your house, containing everything you own, catches fire. After saving your loved ones and pets, you have time to safely make a final dash to save any one item. What would it be? Why?',
'Texting or talking?',
'Favorite day of the week?',
'Nickname your parents used to call you?',
'Last song you listened to?',
'Would you rather be able to speak every language in the world or be able to talk to animals?',
'Favorite holiday?',
'How long does it take you to get ready?',
'Scale of 1-10, how good of a driver are you?',
'At what age do you want to retire?',
'Invisibility or super strength?',
'Is it wrong for a vegetarian to eat animal shaped crackers?',
'Scale of 1-10, how good are you at keeping secrets?',
'Dawn or dusk?',
'Do you snore?',
'Place you most want to travel?',
'Favorite junk food?',
'Favorite season?',
'Last Halloween or Carnival costume?',
'Cake or pie?',
'Do you ever post inspirational quotes on social media?',
'Favorite ice cream flavor?',
'Say a word in Spanish.',
'Favorite number?',
'Have you ever worn socks with sandals?',
'Try to tickle yourself. Can you?',
'What’s the best age?',
'If Voldemort offered you a hug, would you accept?',
'Would you rather cuddle with a baby panda or a baby penguin?',
'Would you want to live forever?',
'What will you have for dinner tonight?',
'How many pull-ups can you do in a row?',
'Favorite type of tea?',
'Say something in an Asian language.',
'What is the fastest speed you have ever driven in a car?',
'Star Trek or Star Wars?',
'How many times did you sneeze in the last 7 days?',
'Big dogs or small dogs?',
'How many hours of sleep do you need?',
'Say "Gday mate" in an Australian accent.',
'What is your favorite carb: bread, pasta, rice, or potatoes?',
'How many kids would you like to have?',
'Are rats cute?',
'What is your favorite car?',
'Do you know how to salsa dance?',
'How many cups of coffee do you drink per day?',
'What is your ideal outside temperature?',
'Favorite type of muffin?',
'Giving presents or getting presents?',
'From 1-10, how hot do you like your shower water?',
'If <NAME> and <NAME> were both drowning and you could only save one, who would it be?',
'Do you like the smell of gasoline?',
'Can you touch your toes without bending your knees?',
'Have you ever tasted soap?',
'Do you currently own any stuffed animals?',
'Tapas or pasta?',
'Ask permission or ask forgiveness?',
'How many redheads are you friends with?',
'Name a word in English that starts with the letter Q',
'Climb a mountain or jump from a plane?',
'If you were really hungry, would you eat a bug?',
'How long can you hold your breath for?',
'Have you ever seen a kangaroo in person?',
'When people stand up for a standing ovation, are you usually one of the earlier people to stand up or one of the later?',
'What type of milk do you put in your cereal?',
'Did you ever believe in Santa Claus?',
'Have you ever been to Africa?',
'What is the most number of hours you have watched TV in a single day?',
'Do you Instagram your food?',
'What sound does a seal make?',
'Would you rather lose all your hair or gain 50% more hair?',
'If there is a spider in your house, do you kill it or set it free?',
'What is something you could eat for a week straight?',
'Would you rather wake up to an air horn blowing in your ear every day, or wake up and have to run 4 miles every day?',
'Dark Chocolate or Milk Chocolate?',
'Would you go to a cinema alone?',
'What is a country you would be okay never visiting in your life?',
'Would you rather eat some smoky gnocchi or some delish fish?',
'If you were given the opportunity to fly into space given current technology, would you take it?',
'When was the last time you stayed up past 4 in the morning?',
'When you fly on a plane, do you wear a neck pillow?',
'Do you like Disneyland?',
'How would you rate your karaoke skills on a scale of 1 to Mariah Carey?',
'Are tomatoes a fruit or a | |
'
'"path /usr/libexec/argo-monitoring/probes/argo", '
'"interval 5", "retryInterval 3"]'
)
self.assertEqual(metric.attribute, '["argo.api_TOKEN --token"]')
self.assertEqual(metric.dependancy, '')
self.assertEqual(metric.flags, '["OBSESS 1"]')
self.assertEqual(metric.files, '')
self.assertEqual(metric.parameter, '')
self.assertEqual(metric.fileparameter, '')
mt_history = poem_models.TenantHistory.objects.filter(
object_repr='argo.API-Check'
).order_by('-date_created')
self.assertEqual(mt_history.count(), 1)
self.assertEqual(
mt_history[0].comment, 'Initial version.'
)
serialized_data = json.loads(mt_history[0].serialized_data)[0]['fields']
self.assertEqual(serialized_data['name'], metric.name)
self.assertEqual(serialized_data['mtype'], ['Active'])
self.assertEqual(
serialized_data['probekey'], ['argo-web-api', '0.1.7']
)
self.assertEqual(serialized_data['group'], ['TEST'])
self.assertEqual(serialized_data['parent'], metric.parent)
self.assertEqual(
serialized_data['probeexecutable'], metric.probeexecutable
)
self.assertEqual(serialized_data['config'], metric.config)
self.assertEqual(serialized_data['attribute'], metric.attribute)
self.assertEqual(serialized_data['dependancy'], metric.dependancy)
self.assertEqual(serialized_data['flags'], metric.flags)
self.assertEqual(serialized_data['files'], metric.files)
self.assertEqual(serialized_data['parameter'], metric.parameter)
self.assertEqual(serialized_data['fileparameter'], metric.fileparameter)
def test_put_probe_with_new_version_without_metrictemplate_update_tn_sprusr(
self
):
data = {
'id': self.probe2.id,
'name': 'web-api',
'package': 'nagios-plugins-argo (0.1.11)',
'comment': 'New version.',
'docurl':
'https://github.com/ARGOeu/nagios-plugins-argo2/blob/'
'master/README.md',
'description': 'Probe is checking AR and status reports.',
'repository': 'https://github.com/ARGOeu/nagios-plugins-'
'argo2',
'update_metrics': False
}
content, content_type = encode_data(data)
request = self.factory.put(self.url, content, content_type=content_type)
request.tenant = self.tenant
force_authenticate(request, user=self.tenant_superuser)
response = self.view(request)
self.assertEqual(response.status_code, status.HTTP_401_UNAUTHORIZED)
self.assertEqual(
response.data['detail'],
'You do not have permission to change probes.'
)
probe = admin_models.Probe.objects.get(id=self.probe2.id)
self.assertEqual(
admin_models.ProbeHistory.objects.filter(object_id=probe).count(), 1
)
version = admin_models.ProbeHistory.objects.get(
object_id=probe, package__version=probe.package.version
)
self.assertEqual(probe.name, 'argo-web-api')
self.assertEqual(probe.package, self.package1)
self.assertEqual(probe.comment, 'Initial version.')
self.assertEqual(
probe.docurl,
'https://github.com/ARGOeu/nagios-plugins-argo/blob/master/'
'README.md',
)
self.assertEqual(
probe.description,
'This is a probe for checking AR and status reports are properly '
'working.'
)
self.assertEqual(
probe.repository, 'https://github.com/ARGOeu/nagios-plugins-argo'
)
self.assertEqual(version.name, probe.name)
self.assertEqual(version.package, probe.package)
self.assertEqual(version.comment, probe.comment)
self.assertEqual(version.docurl, probe.docurl)
self.assertEqual(version.description, probe.description)
self.assertEqual(version.repository, probe.repository)
self.assertEqual(version.version_comment, 'Initial version.')
mt = admin_models.MetricTemplate.objects.get(name='argo.API-Check')
self.assertEqual(mt.probekey, version)
metric = poem_models.Metric.objects.get(name='argo.API-Check')
self.assertEqual(metric.group.name, 'TEST')
self.assertEqual(metric.parent, '')
self.assertEqual(metric.probeexecutable, '["web-api"]')
self.assertEqual(metric.probekey, version)
self.assertEqual(
metric.config,
'["maxCheckAttempts 3", "timeout 120", '
'"path /usr/libexec/argo-monitoring/probes/argo", '
'"interval 5", "retryInterval 3"]'
)
self.assertEqual(metric.attribute, '["argo.api_TOKEN --token"]')
self.assertEqual(metric.dependancy, '')
self.assertEqual(metric.flags, '["OBSESS 1"]')
self.assertEqual(metric.files, '')
self.assertEqual(metric.parameter, '')
self.assertEqual(metric.fileparameter, '')
mt_history = poem_models.TenantHistory.objects.filter(
object_repr='argo.API-Check'
).order_by('-date_created')
self.assertEqual(mt_history.count(), 1)
self.assertEqual(
mt_history[0].comment, 'Initial version.'
)
serialized_data = json.loads(mt_history[0].serialized_data)[0]['fields']
self.assertEqual(serialized_data['name'], metric.name)
self.assertEqual(serialized_data['mtype'], ['Active'])
self.assertEqual(
serialized_data['probekey'], ['argo-web-api', '0.1.7']
)
self.assertEqual(serialized_data['group'], ['TEST'])
self.assertEqual(serialized_data['parent'], metric.parent)
self.assertEqual(
serialized_data['probeexecutable'], metric.probeexecutable
)
self.assertEqual(serialized_data['config'], metric.config)
self.assertEqual(serialized_data['attribute'], metric.attribute)
self.assertEqual(serialized_data['dependancy'], metric.dependancy)
self.assertEqual(serialized_data['flags'], metric.flags)
self.assertEqual(serialized_data['files'], metric.files)
self.assertEqual(serialized_data['parameter'], metric.parameter)
self.assertEqual(serialized_data['fileparameter'], metric.fileparameter)
def test_put_probe_with_new_version_without_metrictemplate_update_tn_user(
self
):
data = {
'id': self.probe2.id,
'name': 'web-api',
'package': 'nagios-plugins-argo (0.1.11)',
'comment': 'New version.',
'docurl':
'https://github.com/ARGOeu/nagios-plugins-argo2/blob/'
'master/README.md',
'description': 'Probe is checking AR and status reports.',
'repository': 'https://github.com/ARGOeu/nagios-plugins-'
'argo2',
'update_metrics': False
}
content, content_type = encode_data(data)
request = self.factory.put(self.url, content, content_type=content_type)
request.tenant = self.tenant
force_authenticate(request, user=self.tenant_user)
response = self.view(request)
self.assertEqual(response.status_code, status.HTTP_401_UNAUTHORIZED)
self.assertEqual(
response.data['detail'],
'You do not have permission to change probes.'
)
probe = admin_models.Probe.objects.get(id=self.probe2.id)
self.assertEqual(
admin_models.ProbeHistory.objects.filter(object_id=probe).count(), 1
)
version = admin_models.ProbeHistory.objects.get(
object_id=probe, package__version=probe.package.version
)
self.assertEqual(probe.name, 'argo-web-api')
self.assertEqual(probe.package, self.package1)
self.assertEqual(probe.comment, 'Initial version.')
self.assertEqual(
probe.docurl,
'https://github.com/ARGOeu/nagios-plugins-argo/blob/master/'
'README.md',
)
self.assertEqual(
probe.description,
'This is a probe for checking AR and status reports are properly '
'working.'
)
self.assertEqual(
probe.repository, 'https://github.com/ARGOeu/nagios-plugins-argo'
)
self.assertEqual(version.name, probe.name)
self.assertEqual(version.package, probe.package)
self.assertEqual(version.comment, probe.comment)
self.assertEqual(version.docurl, probe.docurl)
self.assertEqual(version.description, probe.description)
self.assertEqual(version.repository, probe.repository)
self.assertEqual(version.version_comment, 'Initial version.')
mt = admin_models.MetricTemplate.objects.get(name='argo.API-Check')
self.assertEqual(mt.probekey, version)
metric = poem_models.Metric.objects.get(name='argo.API-Check')
self.assertEqual(metric.group.name, 'TEST')
self.assertEqual(metric.parent, '')
self.assertEqual(metric.probeexecutable, '["web-api"]')
self.assertEqual(metric.probekey, version)
self.assertEqual(
metric.config,
'["maxCheckAttempts 3", "timeout 120", '
'"path /usr/libexec/argo-monitoring/probes/argo", '
'"interval 5", "retryInterval 3"]'
)
self.assertEqual(metric.attribute, '["argo.api_TOKEN --token"]')
self.assertEqual(metric.dependancy, '')
self.assertEqual(metric.flags, '["OBSESS 1"]')
self.assertEqual(metric.files, '')
self.assertEqual(metric.parameter, '')
self.assertEqual(metric.fileparameter, '')
mt_history = poem_models.TenantHistory.objects.filter(
object_repr='argo.API-Check'
).order_by('-date_created')
self.assertEqual(mt_history.count(), 1)
self.assertEqual(
mt_history[0].comment, 'Initial version.'
)
serialized_data = json.loads(mt_history[0].serialized_data)[0]['fields']
self.assertEqual(serialized_data['name'], metric.name)
self.assertEqual(serialized_data['mtype'], ['Active'])
self.assertEqual(
serialized_data['probekey'], ['argo-web-api', '0.1.7']
)
self.assertEqual(serialized_data['group'], ['TEST'])
self.assertEqual(serialized_data['parent'], metric.parent)
self.assertEqual(
serialized_data['probeexecutable'], metric.probeexecutable
)
self.assertEqual(serialized_data['config'], metric.config)
self.assertEqual(serialized_data['attribute'], metric.attribute)
self.assertEqual(serialized_data['dependancy'], metric.dependancy)
self.assertEqual(serialized_data['flags'], metric.flags)
self.assertEqual(serialized_data['files'], metric.files)
self.assertEqual(serialized_data['parameter'], metric.parameter)
self.assertEqual(serialized_data['fileparameter'], metric.fileparameter)
def test_put_probe_with_new_version_with_metrictemplate_update_sp_spruser(
self
):
data = {
'id': self.probe2.id,
'name': 'web-api',
'package': 'nagios-plugins-argo (0.1.11)',
'comment': 'New version.',
'docurl':
'https://github.com/ARGOeu/nagios-plugins-argo2/blob/'
'master/README.md',
'description': 'Probe is checking AR and status reports.',
'repository': 'https://github.com/ARGOeu/nagios-plugins-'
'argo2',
'update_metrics': True
}
content, content_type = encode_data(data)
request = self.factory.put(
self.url, content, content_type=content_type
)
request.tenant = self.super_tenant
force_authenticate(request, user=self.superuser)
response = self.view(request)
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
probe = admin_models.Probe.objects.get(id=self.probe2.id)
versions = admin_models.ProbeHistory.objects.filter(
object_id=self.probe2
).order_by('-date_created')
self.assertEqual(versions.count(), 2)
self.assertEqual(probe.name, 'web-api')
self.assertEqual(probe.package, self.package2)
self.assertEqual(probe.comment, 'New version.')
self.assertEqual(
probe.docurl,
'https://github.com/ARGOeu/nagios-plugins-argo2/blob/master/'
'README.md',
)
self.assertEqual(
probe.description,
'Probe is checking AR and status reports.'
)
self.assertEqual(
probe.repository,
'https://github.com/ARGOeu/nagios-plugins-argo2',
)
self.assertEqual(versions[0].name, probe.name)
self.assertEqual(versions[0].package, probe.package)
self.assertEqual(versions[0].comment, probe.comment)
self.assertEqual(versions[0].docurl, probe.docurl)
self.assertEqual(versions[0].description, probe.description)
self.assertEqual(versions[0].repository, probe.repository)
mt = admin_models.MetricTemplate.objects.get(name='argo.API-Check')
self.assertEqual(mt.probekey, versions[0])
metric = poem_models.Metric.objects.get(name='argo.API-Check')
self.assertEqual(metric.group.name, 'TEST')
self.assertEqual(metric.parent, '')
self.assertEqual(metric.probeexecutable, '["web-api"]')
self.assertEqual(metric.probekey, versions[1])
self.assertEqual(
metric.config,
'["maxCheckAttempts 3", "timeout 120", '
'"path /usr/libexec/argo-monitoring/probes/argo", '
'"interval 5", "retryInterval 3"]'
)
self.assertEqual(metric.attribute, '["argo.api_TOKEN --token"]')
self.assertEqual(metric.dependancy, '')
self.assertEqual(metric.flags, '["OBSESS 1"]')
self.assertEqual(metric.files, '')
self.assertEqual(metric.parameter, '')
self.assertEqual(metric.fileparameter, '')
mt_history = poem_models.TenantHistory.objects.filter(
object_repr='argo.API-Check'
).order_by('-date_created')
self.assertEqual(mt_history.count(), 1)
self.assertEqual(
mt_history[0].comment, 'Initial version.'
)
serialized_data = json.loads(mt_history[0].serialized_data)[0]['fields']
self.assertEqual(serialized_data['name'], metric.name)
self.assertEqual(serialized_data['mtype'], ['Active'])
self.assertEqual(
serialized_data['probekey'], ['argo-web-api', '0.1.7']
)
self.assertEqual(serialized_data['group'], ['TEST'])
self.assertEqual(serialized_data['parent'], metric.parent)
self.assertEqual(
serialized_data['probeexecutable'], metric.probeexecutable
)
self.assertEqual(serialized_data['config'], metric.config)
self.assertEqual(serialized_data['attribute'], metric.attribute)
self.assertEqual(serialized_data['dependancy'], metric.dependancy)
self.assertEqual(serialized_data['flags'], metric.flags)
self.assertEqual(serialized_data['files'], metric.files)
self.assertEqual(serialized_data['parameter'], metric.parameter)
self.assertEqual(serialized_data['fileparameter'], metric.fileparameter)
def test_put_probe_with_new_version_with_metrictemplate_update_sp_user(
self
):
data = {
'id': self.probe2.id,
'name': 'web-api',
'package': 'nagios-plugins-argo (0.1.11)',
'comment': 'New version.',
'docurl':
'https://github.com/ARGOeu/nagios-plugins-argo2/blob/'
'master/README.md',
'description': 'Probe is checking AR and status reports.',
'repository': 'https://github.com/ARGOeu/nagios-plugins-'
'argo2',
'update_metrics': True
}
content, content_type = encode_data(data)
request = self.factory.put(
self.url, content, content_type=content_type
)
request.tenant = self.super_tenant
force_authenticate(request, user=self.user)
response = self.view(request)
self.assertEqual(response.status_code, status.HTTP_401_UNAUTHORIZED)
self.assertEqual(
response.data['detail'],
'You do not have permission to change probes.'
)
probe = admin_models.Probe.objects.get(id=self.probe2.id)
versions = admin_models.ProbeHistory.objects.filter(
object_id=self.probe2
).order_by('-date_created')
self.assertEqual(versions.count(), 1)
self.assertEqual(probe.name, 'argo-web-api')
self.assertEqual(probe.package, self.package1)
self.assertEqual(probe.comment, 'Initial version.')
self.assertEqual(
probe.docurl,
'https://github.com/ARGOeu/nagios-plugins-argo/blob/master/'
'README.md',
)
self.assertEqual(
probe.description,
'This is a probe for checking AR and status reports are properly '
'working.'
)
self.assertEqual(
probe.repository,
'https://github.com/ARGOeu/nagios-plugins-argo'
)
self.assertEqual(versions[0].name, probe.name)
self.assertEqual(versions[0].package, probe.package)
self.assertEqual(versions[0].comment, probe.comment)
self.assertEqual(versions[0].docurl, probe.docurl)
self.assertEqual(versions[0].description, probe.description)
self.assertEqual(versions[0].repository, probe.repository)
mt = admin_models.MetricTemplate.objects.get(name='argo.API-Check')
self.assertEqual(mt.probekey, versions[0])
metric = poem_models.Metric.objects.get(name='argo.API-Check')
self.assertEqual(metric.group.name, 'TEST')
self.assertEqual(metric.parent, '')
self.assertEqual(metric.probeexecutable, '["web-api"]')
self.assertEqual(metric.probekey, versions[0])
self.assertEqual(
metric.config,
'["maxCheckAttempts 3", "timeout 120", '
'"path /usr/libexec/argo-monitoring/probes/argo", '
'"interval 5", "retryInterval 3"]'
)
self.assertEqual(metric.attribute, '["argo.api_TOKEN --token"]')
self.assertEqual(metric.dependancy, '')
self.assertEqual(metric.flags, '["OBSESS 1"]')
self.assertEqual(metric.files, '')
self.assertEqual(metric.parameter, '')
self.assertEqual(metric.fileparameter, '')
mt_history = poem_models.TenantHistory.objects.filter(
object_repr='argo.API-Check'
).order_by('-date_created')
self.assertEqual(mt_history.count(), 1)
self.assertEqual(
mt_history[0].comment, 'Initial version.'
)
serialized_data = json.loads(mt_history[0].serialized_data)[0]['fields']
self.assertEqual(serialized_data['name'], metric.name)
self.assertEqual(serialized_data['mtype'], ['Active'])
self.assertEqual(
serialized_data['probekey'], ['argo-web-api', '0.1.7']
)
self.assertEqual(serialized_data['group'], ['TEST'])
self.assertEqual(serialized_data['parent'], metric.parent)
self.assertEqual(
serialized_data['probeexecutable'], metric.probeexecutable
)
self.assertEqual(serialized_data['config'], metric.config)
self.assertEqual(serialized_data['attribute'], metric.attribute)
self.assertEqual(serialized_data['dependancy'], metric.dependancy)
self.assertEqual(serialized_data['flags'], metric.flags)
self.assertEqual(serialized_data['files'], metric.files)
self.assertEqual(serialized_data['parameter'], metric.parameter)
self.assertEqual(serialized_data['fileparameter'], metric.fileparameter)
def test_put_probe_with_new_version_with_metrictemplate_update_tennt_sprusr(
self
):
data = {
'id': self.probe2.id,
'name': 'web-api',
'package': 'nagios-plugins-argo (0.1.11)',
'comment': 'New version.',
'docurl':
'https://github.com/ARGOeu/nagios-plugins-argo2/blob/'
'master/README.md',
'description': 'Probe is checking AR and status reports.',
'repository': 'https://github.com/ARGOeu/nagios-plugins-'
'argo2',
'update_metrics': True
}
content, content_type = encode_data(data)
request = self.factory.put(
self.url, content, content_type=content_type
)
request.tenant = self.tenant
force_authenticate(request, user=self.tenant_superuser)
response = self.view(request)
self.assertEqual(response.status_code, status.HTTP_401_UNAUTHORIZED)
self.assertEqual(
response.data['detail'],
'You do not have permission to change probes.'
)
probe = admin_models.Probe.objects.get(id=self.probe2.id)
versions = admin_models.ProbeHistory.objects.filter(
object_id=self.probe2
).order_by('-date_created')
self.assertEqual(versions.count(), 1)
self.assertEqual(probe.name, 'argo-web-api')
self.assertEqual(probe.package, self.package1)
self.assertEqual(probe.comment, 'Initial version.')
self.assertEqual(
probe.docurl,
'https://github.com/ARGOeu/nagios-plugins-argo/blob/master/'
'README.md',
)
self.assertEqual(
probe.description,
'This is a probe for checking AR and status reports are properly '
'working.'
)
self.assertEqual(
probe.repository,
'https://github.com/ARGOeu/nagios-plugins-argo'
)
self.assertEqual(versions[0].name, probe.name)
self.assertEqual(versions[0].package, probe.package)
self.assertEqual(versions[0].comment, probe.comment)
self.assertEqual(versions[0].docurl, probe.docurl)
self.assertEqual(versions[0].description, probe.description)
self.assertEqual(versions[0].repository, probe.repository)
mt = admin_models.MetricTemplate.objects.get(name='argo.API-Check')
self.assertEqual(mt.probekey, versions[0])
metric = poem_models.Metric.objects.get(name='argo.API-Check')
self.assertEqual(metric.group.name, 'TEST')
self.assertEqual(metric.parent, '')
self.assertEqual(metric.probeexecutable, '["web-api"]')
self.assertEqual(metric.probekey, versions[0])
self.assertEqual(
metric.config,
'["maxCheckAttempts 3", "timeout 120", '
'"path /usr/libexec/argo-monitoring/probes/argo", '
'"interval 5", "retryInterval 3"]'
)
self.assertEqual(metric.attribute, '["argo.api_TOKEN --token"]')
self.assertEqual(metric.dependancy, '')
self.assertEqual(metric.flags, '["OBSESS 1"]')
self.assertEqual(metric.files, '')
self.assertEqual(metric.parameter, '')
self.assertEqual(metric.fileparameter, '')
mt_history = poem_models.TenantHistory.objects.filter(
object_repr='argo.API-Check'
).order_by('-date_created')
self.assertEqual(mt_history.count(), 1)
self.assertEqual(
mt_history[0].comment, 'Initial version.'
)
serialized_data = json.loads(mt_history[0].serialized_data)[0]['fields']
self.assertEqual(serialized_data['name'], metric.name)
self.assertEqual(serialized_data['mtype'], ['Active'])
self.assertEqual(
serialized_data['probekey'], ['argo-web-api', '0.1.7']
)
self.assertEqual(serialized_data['group'], ['TEST'])
self.assertEqual(serialized_data['parent'], metric.parent)
self.assertEqual(
serialized_data['probeexecutable'], metric.probeexecutable
)
self.assertEqual(serialized_data['config'], metric.config)
self.assertEqual(serialized_data['attribute'], metric.attribute)
self.assertEqual(serialized_data['dependancy'], metric.dependancy)
self.assertEqual(serialized_data['flags'], metric.flags)
self.assertEqual(serialized_data['files'], metric.files)
self.assertEqual(serialized_data['parameter'], metric.parameter)
self.assertEqual(serialized_data['fileparameter'], metric.fileparameter)
def test_put_probe_with_new_version_with_metrictemplate_update_tenant_user(
self
):
data = {
'id': self.probe2.id,
'name': 'web-api',
'package': 'nagios-plugins-argo (0.1.11)',
'comment': 'New version.',
'docurl':
'https://github.com/ARGOeu/nagios-plugins-argo2/blob/'
'master/README.md',
'description': 'Probe is checking AR and status reports.',
'repository': 'https://github.com/ARGOeu/nagios-plugins-'
'argo2',
'update_metrics': True
}
content, content_type = encode_data(data)
request = self.factory.put(
self.url, content, content_type=content_type
)
request.tenant = self.tenant
force_authenticate(request, user=self.tenant_user)
response = self.view(request)
self.assertEqual(response.status_code, status.HTTP_401_UNAUTHORIZED)
self.assertEqual(
response.data['detail'],
'You do not have permission to change probes.'
)
probe = admin_models.Probe.objects.get(id=self.probe2.id)
versions = admin_models.ProbeHistory.objects.filter(
object_id=self.probe2
).order_by('-date_created')
self.assertEqual(versions.count(), 1)
self.assertEqual(probe.name, 'argo-web-api')
self.assertEqual(probe.package, self.package1)
self.assertEqual(probe.comment, 'Initial version.')
self.assertEqual(
probe.docurl,
'https://github.com/ARGOeu/nagios-plugins-argo/blob/master/'
'README.md',
)
self.assertEqual(
probe.description,
'This is a probe for checking AR and status reports are properly '
'working.'
)
self.assertEqual(
probe.repository,
'https://github.com/ARGOeu/nagios-plugins-argo'
)
self.assertEqual(versions[0].name, probe.name)
self.assertEqual(versions[0].package, probe.package)
self.assertEqual(versions[0].comment, probe.comment)
self.assertEqual(versions[0].docurl, probe.docurl)
self.assertEqual(versions[0].description, probe.description)
self.assertEqual(versions[0].repository, probe.repository)
mt = admin_models.MetricTemplate.objects.get(name='argo.API-Check')
self.assertEqual(mt.probekey, versions[0])
metric = poem_models.Metric.objects.get(name='argo.API-Check')
self.assertEqual(metric.group.name, 'TEST')
self.assertEqual(metric.parent, '')
self.assertEqual(metric.probeexecutable, '["web-api"]')
self.assertEqual(metric.probekey, versions[0])
self.assertEqual(
metric.config,
'["maxCheckAttempts 3", "timeout 120", '
'"path /usr/libexec/argo-monitoring/probes/argo", '
'"interval 5", "retryInterval 3"]'
)
self.assertEqual(metric.attribute, '["argo.api_TOKEN --token"]')
self.assertEqual(metric.dependancy, '')
self.assertEqual(metric.flags, '["OBSESS 1"]')
self.assertEqual(metric.files, | |
edge mask using adaptive thresholding
"""
img_edges = cv2.adaptiveThreshold(src=img_blur, maxValue=255,
adaptiveMethod=adaptive_method,
thresholdType=thresh_method,
blockSize=thresh_bsize, C=thresh_C)
"""
step 4. combine color image with edge mask
"""
if (nb_channels == 3):
img_edges = cv2.cvtColor(img_edges, cv2.COLOR_GRAY2RGB)
img_cartoon = cv2.bitwise_and(img_color, img_edges)
transformed_images.append(img_cartoon/255.)
transformed_images = np.stack(transformed_images, axis=0)
if (nb_channels == 1):
transformed_images = transformed_images.reshape((nb_images, img_rows, img_cols, nb_channels))
return transformed_images
def _quant_trans(original_images, trans_args):
"""
Adapted from tutorial
https://www.pyimagesearch.com/2014/07/07/color-quantization-opencv-using-k-means-clustering/
:param original_images:
:param transformation:
:return:
"""
if len(original_images.shape) == 4:
nb_images, img_rows, img_cols, nb_channels = original_images.shape
else:
nb_images, img_rows, img_cols = original_images.shape
nb_channels = 1
nb_clusters = trans_args.get('nb_clusters', 4)
transformed_images = []
for img in original_images:
img_type = img.dtype
"""
Convert gray scale images to RGB color space such that
we can further convert the image to LAB color space.
This function will return a 3-channel gray image that
each channel is a copy of the original gray image.
"""
if (nb_channels == 1):
img = cv2.cvtColor(img, cv2.COLOR_GRAY2RGB)
"""
Convert the image from the RGB color space to the LAB color space,
since we will be clustering using k-means which is based on
the euclidean distance, we will use the LAB color space where
the euclidean distance implies perceptual meaning.
"""
img = cv2.cvtColor(img, cv2.COLOR_RGB2LAB)
"""
reshape the image into a feature vector so that k-mean can be applied
"""
img = img.reshape((img_rows * img_cols, 3))
"""
apply k-means using the specified number of clusters and then
create the quantized image based on the predictions.
"""
cluster = MiniBatchKMeans(n_clusters=nb_clusters)
labels = cluster.fit_predict(img)
quant = cluster.cluster_centers_[labels]
"""
reshape the feature vectors back to image
"""
quant = quant.reshape((img_rows, img_cols, 3))
"""
convert from LAB back to RGB
"""
quant = cv2.cvtColor(quant, cv2.COLOR_Lab2RGB)
"""
convert from RGB back to grayscale
"""
if (nb_channels == 1):
quant = cv2.cvtColor(quant, cv2.COLOR_RGB2GRAY)
transformed_images.append(quant.astype(img_type))
transformed_images = np.stack(transformed_images, axis=0)
if (nb_channels == 1):
# reshape a 3d array to a 4d array
transformed_images = transformed_images.reshape((nb_images, img_rows, img_cols, nb_channels))
return transformed_images
def _distort_trans(original_images, trans_args):
if len(original_images.shape) == 4:
nb_images, img_rows, img_cols, nb_channels = original_images.shape
else:
nb_images, img_rows, img_cols = original_images.shape
nb_channels = 1
distort_trans = trans_args.get('subtype')
transformed_images = []
if distort_trans in [trans_configs.DISTORT_TRANSFORMATIONS.X.value,
trans_configs.DISTORT_TRANSFORMATIONS.Y.value,]:
r1 = trans_args.get('r1', 5.)
r2 = trans_args.get('r2', 2.)
c = trans_args.get('c', 28.)
a = c / r1
w = r2 / c
shift_func = lambda x: a * np.sin(np.pi * x * w)
shift_func = trans_args.get('shift_func', shift_func)
if distort_trans == trans_configs.DISTORT_TRANSFORMATIONS.X.value:
for img in original_images:
for i in range(img_rows):
img[:, i] = np.roll(img[:, i], int(shift_func(i)))
transformed_images.append(img)
else:
for img in original_images:
for i in range(img_cols):
img[i, :] = np.roll(img[i, :], int(shift_func(i)))
transformed_images.append(img)
elif distort_trans == trans_configs.DISTORT_TRANSFORMATIONS.PIXELATE.value:
new_size = trans_args.get('new_size', (16, 16))
resample = trans_args.get('resample')
resample_method = trans_configs.get_distort_resample(resample)
for img in original_images:
img = Image.fromarray(img, 'RGB')
# resize smoothly down
img = img.resize(new_size, resample=resample_method)
img = img.resize((img_rows, img_cols), resample=resample_method)
img = np.array(img)
transformed_images.append(img)
elif distort_trans == trans_configs.DISTORT_TRANSFORMATIONS.CONTRAST.value:
c = trans_args.get('c', 0.1)
min_pixel_val = trans_args.get('min_pixel_val', 0.)
max_pixel_val = trans_args.get('max_pixel_val', 1.)
if nb_channels == 1:
for img in original_images:
means = np.mean(img, axis=0, keepdims=True)
img = np.clip((img - means) * c + means, min_pixel_val, max_pixel_val)
transformed_images.append(img)
else:
original_images *= 255.
max_pixel_val *= 255.
for img in original_images:
means = np.mean(img, axis=(0, 1), keepdims=True)
img = np.clip((img - means) * c + means, min_pixel_val, max_pixel_val)
transformed_images.append(img/255.)
elif distort_trans == trans_configs.DISTORT_TRANSFORMATIONS.BRIGHTNESS.value:
c = trans_args.get('c', 0.99)
min_pixel_val = trans_args.get('min_pixel_val', 0.)
max_pixel_val = trans_args.get('max_pixel_val', 1.)
if nb_channels == 1:
for img in original_images:
img = cv2.cvtColor(img, cv2.COLOR_GRAY2RGB)
img = color.rgb2hsv(img)
img[:, :, 2] = np.clip(img[:, :, 2] + c, min_pixel_val, max_pixel_val)
img = color.hsv2rgb(img)
img = cv2.cvtColor(img, cv2.COLOR_RGB2GRAY)
transformed_images.append(img)
else:
original_images *= 255.
max_pixel_val *= 255.
for img in original_images:
img = color.rgb2hsv(img)
img[:, :, 2] = np.clip(img[:, :, 2] + c, min_pixel_val, max_pixel_val)
img = color.hsv2rgb(img)
transformed_images.append(img/255.)
else:
raise ValueError('{} is not supported.'.format(distort_trans))
transformed_images = np.stack(transformed_images, axis=0)
if nb_channels == 1:
transformed_images = transformed_images.reshape((nb_images, img_rows, img_cols, nb_channels))
return transformed_images
def _noise_trans(original_images, trans_args):
"""
Adding noise to given images.
:param original_images:
:param transformation:
:return:
"""
if len(original_images.shape) == 4:
nb_images, img_rows, img_cols, nb_channels = original_images.shape
else:
nb_images, img_rows, img_cols = original_images.shape
nb_channels = 1
noise = trans_args.get('noise')
transformed_images = []
for img in original_images:
img = util.random_noise(img, mode=noise)
transformed_images.append(img)
transformed_images = np.stack(transformed_images, axis=0)
if (nb_channels == 1):
transformed_images = transformed_images.reshape((nb_images, img_rows, img_cols, nb_channels))
return transformed_images
def _filter_trans(original_images, trans_args):
if len(original_images.shape) == 4:
nb_images, img_rows, img_cols, nb_channels = original_images.shape
else:
nb_images, img_rows, img_cols = original_images.shape
nb_channels = 1
filter_trans = trans_args.get('subtype')
op = trans_configs.get_filter_op(filter_trans)
transformed_images = []
if filter_trans in [trans_configs.FILTER_TRANSFORMATION.SOBEL.value,
trans_configs.FILTER_TRANSFORMATION.ROBERTS.value,
trans_configs.FILTER_TRANSFORMATION.SCHARR.value,
trans_configs.FILTER_TRANSFORMATION.PREWITT.value,
trans_configs.FILTER_TRANSFORMATION.SKELETONIZE.value]:
for img in original_images:
if (nb_channels == 3):
img = cv2.cvtColor(img, cv2.COLOR_RGB2GRAY)
img = img.reshape(img_rows, img_cols)
img = op(img)
if (nb_channels == 3):
img = cv2.cvtColor(img, cv2.COLOR_GRAY2RGB)
transformed_images.append(img)
elif filter_trans in [trans_configs.FILTER_TRANSFORMATION.MEDIAN.value,
trans_configs.FILTER_TRANSFORMATION.MINIMUM.value,
trans_configs.FILTER_TRANSFORMATION.MAXIMUM.value,
trans_configs.FILTER_TRANSFORMATION.SATO.value,
trans_configs.FILTER_TRANSFORMATION.FRANGI.value,
trans_configs.FILTER_TRANSFORMATION.HESSIAN.value]:
size = trans_args.get('size', 3)
for img in original_images:
img = op(img, size=size)
transformed_images.append(img)
elif filter_trans == trans_configs.FILTER_TRANSFORMATION.RANK.value:
size = trans_args.get('size', 3)
rank = trans_args.get('rank', 15)
for img in original_images:
img = op(img, rank=rank, size=size)
transformed_images.append(img)
elif filter_trans == trans_configs.FILTER_TRANSFORMATION.GAUSSIAN.value:
sigma = trans_args.get('sigma', 1)
for img in original_images:
img = op(img, sigma=sigma)
transformed_images.append(img)
elif filter_trans == trans_configs.FILTER_TRANSFORMATION.MEIJERING.value:
sigmas = trans_args.get('sigmas', [0.01])
for img in original_images:
if nb_channels == 1:
img = cv2.cvtColor(img, cv2.COLOR_GRAY2RGB)
img = op(img, sigmas=sigmas)
if nb_channels == 1:
img = cv2.cvtColor(img, cv2.COLOR_RGB2GRAY)
transformed_images.append(img)
elif filter_trans == trans_configs.FILTER_TRANSFORMATION.ENTROPY.value:
radius = trans_args.get('radius', 2)
for img in original_images:
if (nb_channels == 3):
img = cv2.cvtColor(img, cv2.COLOR_RGB2GRAY)
img = img.reshape((img_rows, img_cols))
# requires values in range [-1., 1.]
img = (img - 0.5) / 2.
# skimage-entropy returns values in float64,
# however, opencv supports only float32.
img = np.float32(op(img, disk(radius=radius)))
# rescale to [0., 1.]
img = (img / 2.) + 0.5
if (nb_channels == 3):
img = cv2.cvtColor(img, cv2.COLOR_GRAY2RGB)
transformed_images.append(img)
elif filter_trans == trans_configs.FILTER_TRANSFORMATION.THIN.value:
max_iter = trans_args.get('max_iter', 100)
for img in original_images:
if (nb_channels == 3):
img = cv2.cvtColor(img, cv2.COLOR_RGB2GRAY)
img = img.reshape((img_rows, img_cols))
img = op(img, max_iter=max_iter)
if (nb_channels == 3):
img = cv2.cvtColor(img, cv2.COLOR_GRAY2RGB)
transformed_images.append(img)
else:
raise ValueError('{} is not supported.'.format(filter_trans))
transformed_images = np.stack(transformed_images, axis=0)
if (nb_channels == 1):
transformed_images = transformed_images.reshape((nb_images, img_rows, img_cols, nb_channels))
return transformed_images
def _compression_trans(original_images, trans_args):
"""
:param original_images:
:param transformation:
:return:
"""
if len(original_images.shape) == 4:
nb_images, img_rows, img_cols, nb_channels = original_images.shape
else:
nb_images, img_rows, img_cols = original_images.shape
nb_channels = 1
format = trans_args.get('format', trans_configs.COMPRESS_FORMAT.PNG)
rate = trans_args.get('rate', 80)
encode_param = trans_configs.get_compress_encoder(format, rate)
transformed_images = []
for img in original_images:
img *= 255.
result, encoded_img = cv2.imencode(ext=format, img=img, params=encode_param)
if False == result:
print('Failed to encode image to {} format.'.format(format))
quit()
# decode the image from encoded image
decoded_img = cv2.imdecode(buf=encoded_img, flags=1)
if (nb_channels == 1):
decoded_img = cv2.cvtColor(decoded_img, cv2.COLOR_RGB2GRAY)
transformed_images.append(decoded_img/255.)
transformed_images = np.stack(transformed_images, axis=0)
if (nb_channels == 1):
transformed_images = transformed_images.reshape((nb_images, img_rows, img_cols, nb_channels))
return transformed_images
def _denoise_trans(original_images, trans_args):
"""
denoising transformation
:param original_images:
:param transformation:
:return:
"""
if len(original_images.shape) == 4:
nb_images, img_rows, img_cols, nb_channels = original_images.shape
else:
nb_images, img_rows, img_cols = original_images.shape
nb_channels = 1
denoise_trans = trans_args.get('subtype')
transformed_images = []
if denoise_trans == trans_configs.DENOISE_TRANSFORMATIONS.WAVELET.value:
method = trans_args.get('method', 'VisuShrink') # any option in ['VisuShrink', 'BayesShrink']
mode = trans_args.get('mode', 'soft') # any option in ['soft', 'hard']
wavelet = trans_args.get('wavelet', 'db1') # any option in pywt.wavelist
sigma = trans_args.get('sigma', None) # float or list, optional
for img in original_images:
if sigma is None:
sigma_est = estimate_sigma(img, multichannel=True, average_sigmas=True)
else:
sigma_est = sigma
img = denoise_wavelet(img, wavelet=wavelet, multichannel=True,
convert2ycbcr=False, method=method, mode=mode,
sigma=sigma_est)
transformed_images.append(img)
elif denoise_trans == trans_configs.DENOISE_TRANSFORMATIONS.TV_CHAMBOLLE.value:
# default 0.4 (grayscale); 0.07 (color image)
weight = trans_args.get('weight', 0.4)
epsilon = trans_args.get('epsilon', 2.e-4)
max_iter = trans_args.get('max_iter', 200)
for img in original_images:
img = denoise_tv_chambolle(img, weight=weight, eps=epsilon,
n_iter_max=max_iter, multichannel=True)
transformed_images.append(img)
elif denoise_trans == trans_configs.DENOISE_TRANSFORMATIONS.TV_BREGMAN.value:
# default 2 (grayscale); 15 (color image)
weight = trans_args.get('weight', 2)
epsilon = trans_args.get('epsilon', 1e-6)
max_iter = trans_args.get('max_iter', 50)
for img in original_images:
img_trans = denoise_tv_bregman(img, eps=epsilon, max_iter=max_iter, weight=weight)
transformed_images.append(img_trans)
elif denoise_trans == trans_configs.DENOISE_TRANSFORMATIONS.BILATERAL.value:
sigma_color = np.double(trans_args.get('sigma_color', 0.05))
sigma_spatial = np.double(trans_args.get('sigma_spatial', 15.0))
for img in original_images:
| |
pass
def MItMeshPolygon_getUVArea(*args, **kwargs):
pass
def MFnNumericData_getData2Double(*args, **kwargs):
pass
def MArrayDataHandle_setClean(*args, **kwargs):
pass
def MFnCamera_postProjectionMatrix(*args, **kwargs):
pass
def MDGContext_isNormal(*args, **kwargs):
pass
def MItDag_root(*args, **kwargs):
pass
def MFnArrayAttrsData_list(*args, **kwargs):
pass
def MGlobal_isZAxisUp(*args, **kwargs):
pass
def MGlobal_removeOptionVar(*args, **kwargs):
pass
def MVectorArray_set(*args, **kwargs):
pass
def MFnDagNode_drawOverrideEnabled(*args, **kwargs):
pass
def MTrimBoundaryArray_remove(*args, **kwargs):
pass
def MRampAttribute_swigregister(*args, **kwargs):
pass
def MMeshIsectAccelParams_swigregister(*args, **kwargs):
pass
def MNamespace_namespaceExists(*args, **kwargs):
pass
def MPointArray_insert(*args, **kwargs):
pass
def MComputation_setProgress(*args, **kwargs):
pass
def MFnUint64SingleIndexedComponent_className(*args, **kwargs):
pass
def MCacheFormatDescription_getChannelStartTime(*args, **kwargs):
pass
def delete_MContainerMessage(*args, **kwargs):
pass
def MPlug_child(*args, **kwargs):
pass
def MFloatPoint_className(*args, **kwargs):
pass
def MFnDependencyNode_attribute(*args, **kwargs):
pass
def MDagPath_inclusiveMatrixInverse(*args, **kwargs):
pass
def MMessageNode_fSubClientPtr_set(*args, **kwargs):
pass
def MArgList_asPoint(*args, **kwargs):
pass
def MFnMesh_hasColorChannels(*args, **kwargs):
pass
def MDataHandle_asDouble3(*args, **kwargs):
pass
def MAttributeSpec___eq__(*args, **kwargs):
pass
def MScriptUtil_asInt2Ptr(*args, **kwargs):
pass
def MFloatArray_setSizeIncrement(*args, **kwargs):
pass
def MQuaternion_x_set(*args, **kwargs):
pass
def new_MTesselationParams(*args, **kwargs):
pass
def MQuaternion_log(*args, **kwargs):
pass
def MFnReference_isExportEditsFile(*args, **kwargs):
pass
def MFnComponentListData_swigregister(*args, **kwargs):
pass
def MFnGeometryData_swiginit(*args, **kwargs):
pass
def MUserEventMessage_swiginit(*args, **kwargs):
pass
def MDoubleArray___add__(*args, **kwargs):
pass
def MFnLambertShader_setRefractedRayDepthLimit(*args, **kwargs):
pass
def MEvaluationNodeIterator_swigregister(*args, **kwargs):
pass
def MFnCamera_cameraScale(*args, **kwargs):
pass
def MFnNurbsSurface_swigregister(*args, **kwargs):
pass
def MItMeshEdge_className(*args, **kwargs):
pass
def MItMeshVertex_updateSurface(*args, **kwargs):
pass
def MFnCamera_horizontalRollPivot(*args, **kwargs):
pass
def MIteratorType_setObjectType(*args, **kwargs):
pass
def MTransformationMatrix_setRotationQuaternion(*args, **kwargs):
pass
def MFnSubd_levelMaxCurrent(*args, **kwargs):
pass
def MArrayDataHandle_assign(*args, **kwargs):
pass
def MFnLight_opticalFXvisibility(*args, **kwargs):
pass
def MDagPath_swigregister(*args, **kwargs):
pass
def MProfiler_getColor(*args, **kwargs):
pass
def MFnAreaLight_className(*args, **kwargs):
pass
def MProfiler_getCPUId(*args, **kwargs):
pass
def MGlobal_setSelectionMode(*args, **kwargs):
pass
def MIffFile_beginReadGroup(*args, **kwargs):
pass
def MFnDagNode_transformationMatrix(*args, **kwargs):
pass
def MImage_haveDepth(*args, **kwargs):
pass
def MFnNurbsSurface_removeKnotInV(*args, **kwargs):
pass
def MQuaternion_setToXAxis(*args, **kwargs):
pass
def MMatrix_assign(*args, **kwargs):
pass
def MFnMesh_componentTypeName(*args, **kwargs):
pass
def MPointArray___getitem__(*args, **kwargs):
pass
def MArgParser_numberOfFlagUses(*args, **kwargs):
pass
def MQuaternion___getitem__(*args, **kwargs):
pass
def MBoundingBox_swiginit(*args, **kwargs):
pass
def MItMeshVertex_getColor(*args, **kwargs):
pass
def MItDependencyGraph_setCurrentFilter(*args, **kwargs):
pass
def MFloatPoint___eq__(*args, **kwargs):
pass
def new_MMessageNode(*args, **kwargs):
pass
def MFnCamera_getPortFieldOfView(*args, **kwargs):
pass
def MFnCamera_focalLength(*args, **kwargs):
pass
def MFnAmbientLight_setCastSoftShadows(*args, **kwargs):
pass
def MColor_set(*args, **kwargs):
pass
def MFnCompoundAttribute_getAddAttrCmds(*args, **kwargs):
pass
def MArgParser_flagArgumentString(*args, **kwargs):
pass
def MDataHandle_asAddr(*args, **kwargs):
pass
def MFnSpotLight_setStartDistance(*args, **kwargs):
pass
def MScriptUtil_getShort(*args, **kwargs):
pass
def MSyntax_setMaxObjects(*args, **kwargs):
pass
def MFnSubd_polygonHasChildren(*args, **kwargs):
pass
def MFnPhongEShader_swiginit(*args, **kwargs):
pass
def MFnBlinnShader_swigregister(*args, **kwargs):
pass
def MFnComponent_weight(*args, **kwargs):
pass
def delete_MUserData(*args, **kwargs):
pass
def MFnSet_clear(*args, **kwargs):
pass
def MDoubleArray_insert(*args, **kwargs):
pass
def MFnAttribute_parent(*args, **kwargs):
pass
def MEvaluationManager_swigregister(*args, **kwargs):
pass
def MFnNurbsSurface_numUVs(*args, **kwargs):
pass
def MFnTransform_getTranslation(*args, **kwargs):
pass
def MItMeshEdge_center(*args, **kwargs):
pass
def MFloatVectorArray_swiginit(*args, **kwargs):
pass
def MRenderPassDef_swigregister(*args, **kwargs):
pass
def MItGeometry_component(*args, **kwargs):
pass
def MFnMesh_getUVAtPoint(*args, **kwargs):
pass
def new_MFnSubdData(*args, **kwargs):
pass
def new_MFnLight(*args, **kwargs):
pass
def MDagPath_assign(*args, **kwargs):
pass
def delete_MFnDependencyNode(*args, **kwargs):
pass
def MGlobal_clearSelectionList(*args, **kwargs):
pass
def MFnExpression_expression(*args, **kwargs):
pass
def MFnDagNode_hasChild(*args, **kwargs):
pass
def MFnNurbsSurface_numNonZeroSpansInU(*args, **kwargs):
pass
def MItSubdVertex_isValid(*args, **kwargs):
pass
def new_MMeshIsectAccelParams(*args, **kwargs):
pass
def MFnMesh_extrudeFaces(*args, **kwargs):
pass
def MObject___eq__(*args, **kwargs):
pass
def MDataHandle_swigregister(*args, **kwargs):
pass
def MTypeId_id(*args, **kwargs):
pass
def MItSelectionList_className(*args, **kwargs):
pass
def MFileObject_swigregister(*args, **kwargs):
pass
def MItDag_swiginit(*args, **kwargs):
pass
def MFloatPointArray_swiginit(*args, **kwargs):
pass
def MMessageNode_fHeadNode_get(*args, **kwargs):
pass
def MFnVectorArrayData_className(*args, **kwargs):
pass
def MFnNumericData_setData2Short(*args, **kwargs):
pass
def MFnGeometryData_matrixIsNotIdentity(*args, **kwargs):
pass
def MURI_setFileName(*args, **kwargs):
pass
def new_MArgDatabase(*args, **kwargs):
pass
def MScriptUtil_setInt3ArrayItem(*args, **kwargs):
pass
def MDataHandle_isGeneric(*args, **kwargs):
pass
def new_intPtr(*args, **kwargs):
pass
def new_MFnSpotLight(*args, **kwargs):
pass
def MURI_getPassword(*args, **kwargs):
pass
def MDGModifier_undoIt(*args, **kwargs):
pass
def MAttributePatternArray_sizeIncrement(*args, **kwargs):
pass
def MMatrixArray_assign(*args, **kwargs):
pass
def MTesselationParams_setFitTolerance(*args, **kwargs):
pass
def MFnSubd_edgeVertices(*args, **kwargs):
pass
def MFnCamera_setVerticalPan(*args, **kwargs):
pass
def delete_MItDependencyNodes(*args, **kwargs):
pass
def MTime___iadd__(*args, **kwargs):
pass
def MFnUnitAttribute_setMin(*args, **kwargs):
pass
def MFnCamera_tumblePivot(*args, **kwargs):
pass
def delete_boolPtr(*args, **kwargs):
pass
def MURI_getAllQueryItemKeys(*args, **kwargs):
pass
def MDataHandle_asDouble(*args, **kwargs):
pass
def MFnPointArrayData_create(*args, **kwargs):
pass
def MFloatVector___call__(*args, **kwargs):
pass
def MFnNonExtendedLight_setDepthMapFilterSize(*args, **kwargs):
pass
def MDistance_setUIUnit(*args, **kwargs):
pass
def MFnBase_className(*args, **kwargs):
pass
def MFnAttribute_setKeyable(*args, **kwargs):
pass
def MNodeClass_getAttributes(*args, **kwargs):
pass
def MFnNurbsSurface_isTrimmedSurface(*args, **kwargs):
pass
def MFnTransform_className(*args, **kwargs):
pass
def MItMeshEdge_isSmooth(*args, **kwargs):
pass
def MObjectArray_copy(*args, **kwargs):
pass
def MTimerMessage_className(*args, **kwargs):
pass
def delete_MAttributeSpecArray(*args, **kwargs):
pass
def MItGeometry_setPosition(*args, **kwargs):
pass
def MFnMesh_setCurrentUVSetName(*args, **kwargs):
pass
def MFnStringArrayData_array(*args, **kwargs):
pass
def delete_MFnContainerNode(*args, **kwargs):
pass
def MProfiler_swigregister(*args, **kwargs):
pass
def MFloatVector_x_set(*args, **kwargs):
pass
def MGlobal_deleteNode(*args, **kwargs):
pass
def MFnNurbsSurface_setCVs(*args, **kwargs):
pass
def MFnComponent_componentType(*args, **kwargs):
pass
def MMatrix_inverse(*args, **kwargs):
pass
def MFnMeshData_swiginit(*args, **kwargs):
pass
def MAngle_setUnit(*args, **kwargs):
pass
def MDataHandle_asGenericShort(*args, **kwargs):
pass
def MItMeshPolygon_tangentIndex(*args, **kwargs):
pass
def MItSubdVertex_reset(*args, **kwargs):
pass
def MFileObject_resolvedFullName(*args, **kwargs):
pass
def MItDependencyGraph_isTraversingOverWorldSpaceDependents(*args, **kwargs):
pass
def MFloatPointArray___getitem__(*args, **kwargs):
pass
def new_MDagModifier(*args, **kwargs):
pass
def MArrayDataHandle_inputArrayValue(*args, **kwargs):
pass
def MFnGenericAttribute_create(*args, **kwargs):
pass
def MUintArray___radd__(*args, **kwargs):
pass
def MItCurveCV_currentItem(*args, **kwargs):
pass
def MScriptUtil_getUint3ArrayItem(*args, **kwargs):
pass
def MEulerRotation_order_get(*args, **kwargs):
pass
def MFnSingleIndexedComponent_getElements(*args, **kwargs):
pass
def MDGModifier_newPlugValueBool(*args, **kwargs):
pass
def MAttributeSpec_name(*args, **kwargs):
pass
def MNamespace_currentNamespace(*args, **kwargs):
pass
def MDagPath_className(*args, **kwargs):
pass
def MTesselationParams_setStdFractionalTolerance(*args, **kwargs):
pass
def MFnCamera_horizontalShake(*args, **kwargs):
pass
def MFnLambertShader_setIncandescence(*args, **kwargs):
pass
def MTime_unit(*args, **kwargs):
pass
def doublePtr_value(*args, **kwargs):
pass
def MURI_setPort(*args, **kwargs):
pass
def MMeshSmoothOptions_setDivisions(*args, **kwargs):
pass
def MFnPluginData_className(*args, **kwargs):
pass
def MDistance_assign(*args, **kwargs):
pass
def MFnAttribute_isExtension(*args, **kwargs):
pass
def MPointOnNurbs_getPoint(*args, **kwargs):
pass
def MAttributeIndex_setUpper(*args, **kwargs):
pass
def MFnDependencyNode_swiginit(*args, **kwargs):
pass
def MFnAssembly_getRepNamespace(*args, **kwargs):
pass
def MTime_className(*args, **kwargs):
pass
def MFnMesh_getVertexNormal(*args, **kwargs):
pass
def MRenderPassRegistry_swigregister(*args, **kwargs):
pass
def MFnDagNode_drawOverrideColor(*args, **kwargs):
pass
def MFnMesh_isPolygonConvex(*args, **kwargs):
pass
def new_MDagPath(*args, **kwargs):
pass
def MFnMesh_isEdgeSmooth(*args, **kwargs):
pass
def MFnTransform_rotationOrder(*args, **kwargs):
pass
def array3dFloat_swiginit(*args, **kwargs):
pass
def MFnComponent_setComplete(*args, **kwargs):
pass
def MSelectionList_swigregister(*args, **kwargs):
pass
def MFnGeometryData_getMatrix(*args, **kwargs):
pass
def MFloatVector___iadd__(*args, **kwargs):
pass
def new_MRampAttribute(*args, **kwargs):
pass
def MFnNurbsSurfaceData_className(*args, **kwargs):
pass
def MItSubdVertex_index(*args, **kwargs):
pass
def MFnDoubleIndexedComponent_setCompleteData(*args, **kwargs):
pass
def delete_MFnMatrixData(*args, **kwargs):
pass
def MVector_z_get(*args, **kwargs):
pass
def MDataHandle_set2Short(*args, **kwargs):
pass
def MURI_removeQueryItem(*args, **kwargs):
pass
def MItMeshFaceVertex_className(*args, **kwargs):
pass
def MItSelectionList_isDone(*args, **kwargs):
pass
def MFileIO_swiginit(*args, **kwargs):
pass
def MItMeshPolygon_getEdges(*args, **kwargs):
pass
def MFnCamera_computeDepthOfField(*args, **kwargs):
pass
def MFnExpression_swiginit(*args, **kwargs):
pass
def MArrayDataHandle_jumpToElement(*args, **kwargs):
pass
def MUintArray_insert(*args, **kwargs):
pass
def MURI_removeAllQueryItems(*args, **kwargs):
pass
def MInt64Array_remove(*args, **kwargs):
pass
def MQuaternion_setAxisAngle(*args, **kwargs):
pass
def MEulerRotation_boundIt(*args, **kwargs):
pass
def MFnSet_intersectsWith(*args, **kwargs):
pass
def new_MDGModifier(*args, **kwargs):
pass
def MIntArray_className(*args, **kwargs):
pass
def MMatrixArray_swiginit(*args, **kwargs):
pass
def MStreamUtils_writeCharBuffer(*args, **kwargs):
pass
def MFnCamera_horizontalFilmAperture(*args, **kwargs):
pass
def MPointArray_swigregister(*args, **kwargs):
pass
def MURI_isEmpty(*args, **kwargs):
pass
def delete_MFnTypedAttribute(*args, **kwargs):
pass
def uIntPtr_swiginit(*args, **kwargs):
pass
def MMeshIntersector_getClosestPoint(*args, **kwargs):
pass
def MFnPhongEShader_highlightSize(*args, **kwargs):
pass
def MFnAnisotropyShader_setCorrelationX(*args, **kwargs):
pass
def MNurbsIntersector_getIntersects(*args, **kwargs):
pass
def MFnDependencyNode_getExternalContent(*args, **kwargs):
pass
def MFnAssembly_getTopLevelAssemblies(*args, **kwargs):
pass
def MFnPluginData_create(*args, **kwargs):
pass
def MFnMesh_getDoubleBlindData(*args, **kwargs):
pass
def MPlugArray_swiginit(*args, **kwargs):
pass
def MFnMesh_getVertices(*args, **kwargs):
pass
def MRampAttribute_className(*args, **kwargs):
pass
def MDAGDrawOverrideInfo_fDisplayType_set(*args, **kwargs):
pass
def MScriptUtil_asShort(*args, **kwargs):
pass
def new_array3dFloat(*args, **kwargs):
pass
def MFloatMatrix___ne__(*args, **kwargs):
pass
def MSelectionMask_swiginit(*args, **kwargs):
pass
def MFloatVectorArray_get(*args, **kwargs):
pass
def MFloatArray_sizeIncrement(*args, **kwargs):
pass
def MURI_clear(*args, **kwargs):
pass
def MQuaternion_invertIt(*args, **kwargs):
pass
def MCacheFormatDescription_getChannelName(*args, **kwargs):
pass
def MFnLayeredShader_hardwareColor(*args, **kwargs):
pass
def MFnDoubleArrayData___getitem__(*args, **kwargs):
pass
def MFnMatrixArrayData_length(*args, **kwargs):
pass
def MVector_rotateBy(*args, **kwargs):
pass
def MDataHandle_asPluginData(*args, **kwargs):
pass
def MVectorArray_insert(*args, **kwargs):
pass
def MFileIO_isSavingReference(*args, **kwargs):
pass
def MItMeshPolygon_getNormal(*args, **kwargs):
pass
def MFileIO_resetError(*args, **kwargs):
pass
def MItSubdVertex_swigregister(*args, **kwargs):
pass
def MFnFloatArrayData___getitem__(*args, **kwargs):
pass
def MArgParser_isQuery(*args, **kwargs):
pass
def MFnCamera_setNearClippingPlane(*args, **kwargs):
pass
def MArrayDataHandle_elementIndex(*args, **kwargs):
pass
def MDGMessage_addNodeChangeUuidCheckCallback(*args, **kwargs):
pass
def MInt64Array_assign(*args, **kwargs):
pass
def MFnAttribute_isReadable(*args, **kwargs):
pass
def MEulerRotation___sub__(*args, **kwargs):
pass
def MFnAnisotropyShader_rotateAngle(*args, **kwargs):
pass
def MMeshSmoothOptions_setBoundaryRule(*args, **kwargs):
pass
def MImageFileInfo_hasMipMaps(*args, **kwargs):
pass
def MImage_writeToFile(*args, **kwargs):
pass
def MModelMessage_addCallback(*args, **kwargs):
pass
def MFnNumericData_create(*args, **kwargs):
pass
def MFnCamera_upDirection(*args, **kwargs):
pass
def MPlane_setPlane(*args, **kwargs):
pass
def MTesselationParams_setUNumber(*args, **kwargs):
pass
def MFnUint64SingleIndexedComponent_element(*args, **kwargs):
pass
def floatPtr_frompointer(*args, **kwargs):
pass
def MCallbackIdArray_setSizeIncrement(*args, **kwargs):
pass
def charPtr_frompointer(*args, **kwargs):
pass
def MFnPointArrayData_className(*args, **kwargs):
pass
def MFnAmbientLight_castSoftShadows(*args, **kwargs):
pass
def MPlug_setInt64(*args, **kwargs):
pass
def MNurbsIntersector_swigregister(*args, **kwargs):
pass
def MFnDependencyNode_deallocateFlag(*args, **kwargs):
pass
def MFnLambertShader_refractiveIndex(*args, **kwargs):
pass
def MFnNurbsCurve_normal(*args, **kwargs):
pass
def MFnMesh_clearColors(*args, **kwargs):
pass
def MPlugArray_length(*args, **kwargs):
pass
def MArgList_addArg(*args, **kwargs):
pass
def MFnMesh_cachedIntersectionAcceleratorInfo(*args, **kwargs):
pass
def MQuaternion_conjugateIt(*args, **kwargs):
pass
def MDagPathArray_setLength(*args, **kwargs):
pass
def MScriptUtil_asDouble(*args, **kwargs):
pass
def delete_MFloatMatrix(*args, **kwargs):
pass
def MSceneMessage_addConnectionFailedCallback(*args, **kwargs):
pass
def MRampAttribute_isCurveRamp(*args, **kwargs):
pass
def MFnDirectionalLight_create(*args, **kwargs):
pass
def MFnLatticeData_lattice(*args, **kwargs):
pass
def MFnFloatArrayData_create(*args, **kwargs):
pass
def MFnCameraSet_setLayerSceneData(*args, **kwargs):
pass
def MVector_assign(*args, **kwargs):
pass
def intPtr_swigregister(*args, **kwargs):
pass
def MFileIO_exportSelectedAnimFromReference(*args, **kwargs):
pass
def MItMeshPolygon_hasValidTriangulation(*args, **kwargs):
pass
def MItMeshFaceVertex_getNormal(*args, **kwargs):
pass
def MItMeshVertex_getConnectedFaces(*args, **kwargs):
pass
def MFnAnisotropyShader_anisotropicReflectivity(*args, **kwargs):
pass
def MFnCamera_setDisplayGateMask(*args, **kwargs):
pass
def MTrimBoundaryArray_clear(*args, **kwargs):
pass
def MFnSubd_vertexBaseMeshGetWithId(*args, **kwargs):
pass
def MAttributeIndex_className(*args, **kwargs):
pass
def MDGContext_getTime(*args, **kwargs):
pass
def MItCurveCV_reset(*args, **kwargs):
pass
def MFnArrayAttrsData_checkArrayExist(*args, **kwargs):
pass
def MFnCamera_projectionMatrix(*args, **kwargs):
pass
def MGlobal_setZAxisUp(*args, **kwargs):
pass
def MGlobal_errorLogPathName(*args, **kwargs):
pass
def MFnDagNode_drawOverrideIsReference(*args, **kwargs):
pass
def MImage_className(*args, **kwargs):
pass
def MFnCamera_setClippingPlanes(*args, **kwargs):
pass
def MFnPointArrayData_set(*args, **kwargs):
pass
def MItMeshPolygon_index(*args, **kwargs):
pass
def MFnMesh_componentTypeFromName(*args, **kwargs):
pass
def MPointArray_setLength(*args, **kwargs):
pass
def MComputation_progress(*args, **kwargs):
pass
def MFnSubdNames_levelOneFaceIndexFromId(*args, **kwargs):
pass
def MCacheFormatDescription_getChannelEndTime(*args, **kwargs):
pass
def MContainerMessage_swigregister(*args, **kwargs):
pass
def MPlug_isConnected(*args, **kwargs):
pass
def MFloatPoint_swigregister(*args, **kwargs):
pass
def MFnDependencyNode_attributeClass(*args, **kwargs):
pass
def MFloatPoint_x_set(*args, **kwargs):
pass
def MMessageNode_fSubClientPtr_get(*args, **kwargs):
pass
def MArgDatabase_getFlagArgument(*args, **kwargs):
pass
def MFnMesh_hasAlphaChannels(*args, **kwargs):
pass
def MDataHandle_asVector(*args, **kwargs):
pass
def MAttributeSpec_swigregister(*args, **kwargs):
pass
def MScriptUtil_setFloatArray(*args, **kwargs):
pass
def MProfiler_getEventDuration(*args, **kwargs):
pass
def delete_MAttributeIndex(*args, **kwargs):
pass
def MFnSubd_updateSubdSurface(*args, **kwargs):
pass
def MQuaternion___imul__(*args, **kwargs):
pass
def MFnReference_swigregister(*args, **kwargs):
pass
def MFnComponentListData_swiginit(*args, **kwargs):
pass
def delete_MFnIntArrayData(*args, **kwargs):
pass
def new_MUuid(*args, **kwargs):
pass
def MFnDirectionalLight_useLightPosition(*args, **kwargs):
pass
def MVectorArray_setLength(*args, **kwargs):
pass
def MQuaternion___eq__(*args, **kwargs):
pass
def MEvaluationNodeIterator_swiginit(*args, **kwargs):
pass
def MFnNurbsSurface_swiginit(*args, **kwargs):
pass
def MItMeshEdge_swigregister(*args, **kwargs):
pass
def MItMeshPolygon_zeroUVArea(*args, **kwargs):
pass
def MTransformationMatrix_setShear(*args, **kwargs):
pass
def MFnCamera_setVerticalRollPivot(*args, **kwargs):
pass
def MItEdits_reset(*args, **kwargs):
pass
def MTransformationMatrix_translation(*args, **kwargs):
pass
def MFnSubd_levelMaxAllowed(*args, **kwargs):
pass
def MArrayDataBuilder_assign(*args, **kwargs):
pass
def MFnPointArrayData_swiginit(*args, **kwargs):
pass
def MFnLight_setOpticalFXvisibility(*args, **kwargs):
pass
def MDagPath_swiginit(*args, **kwargs):
pass
def new_MFnAreaLight(*args, **kwargs):
pass
def MProfiler_getDescription(*args, **kwargs):
pass
def MIffTag_swigregister(*args, **kwargs):
pass
def MTime___sub__(*args, **kwargs):
pass
def MIntArray_sizeIncrement(*args, **kwargs):
pass
def MMatrix_isSingular(*args, **kwargs):
pass
def MFnNumericAttribute_child(*args, **kwargs):
pass
def MPlug_className(*args, **kwargs):
pass
def new_MFnNonAmbientLight(*args, **kwargs):
pass
def MArgParser_flagArgumentBool(*args, **kwargs):
pass
def MCommandResult_resultType(*args, **kwargs):
pass
def array3dDouble_swigregister(*args, **kwargs):
pass
def MParentingEdit_parent(*args, **kwargs):
pass
def MCacheFormatDescription_setDistribution(*args, **kwargs):
pass
def delete_MItSubdVertex(*args, **kwargs):
pass
def MItDag_fullPathName(*args, **kwargs):
pass
def MPlug_setDouble(*args, **kwargs):
pass
def MFloatPoint___ne__(*args, **kwargs):
pass
def MFnDagNode_isIntermediateObject(*args, **kwargs):
pass
def delete_MFnNurbsSurface(*args, **kwargs):
pass
def MFnNurbsCurveData_swiginit(*args, **kwargs):
pass
def MDataHandle_asShort2(*args, **kwargs):
pass
def MFnGenericAttribute_addNumericDataAccept(*args, **kwargs):
pass
def delete_charPtr(*args, **kwargs):
pass
def MArgParser_commandArgumentMAngle(*args, **kwargs):
pass
def MDataHandle_asFloat(*args, **kwargs):
pass
def MFnSpotLight_endDistance(*args, **kwargs):
pass
def MAttributePatternArray_assign(*args, **kwargs):
pass
def MScriptUtil_getFloat(*args, **kwargs):
pass
def MSyntax_className(*args, **kwargs):
pass
def MFnSubd_polygonChildren(*args, **kwargs):
pass
def delete_MFnCompoundAttribute(*args, **kwargs):
pass
def MFnBlinnShader_swiginit(*args, **kwargs):
pass
def MFnComponent_setWeight(*args, **kwargs):
pass
def MUserData_deleteAfterUse(*args, **kwargs):
pass
def MFnSet_getMembers(*args, **kwargs):
pass
def MDoubleArray_append(*args, **kwargs):
pass
def MFnAttribute_name(*args, **kwargs):
pass
def delete_MEvaluationNode(*args, **kwargs):
pass
def MFnNurbsSurface_setUVs(*args, **kwargs):
pass
def MFnTransform_setTranslation(*args, **kwargs):
pass
def MItInstancer_instancerPath(*args, **kwargs):
pass
def MQuaternion_setToZAxis(*args, **kwargs):
pass
def MTransformationMatrix_addScale(*args, **kwargs):
pass
def MRenderPassDef_addFloatParameter(*args, **kwargs):
pass
def delete_MIteratorType(*args, **kwargs):
pass
def MFnMesh_getAxisAtPoint(*args, **kwargs):
pass
def MFnSubdData_create(*args, **kwargs):
pass
def MFnLight_color(*args, **kwargs):
pass
def MDagPath___eq__(*args, **kwargs):
pass
def MProfiler_setCategoryRecording(*args, **kwargs):
pass
def MFnDependencyNode_className(*args, **kwargs):
pass
def MGlobal_setMiscSelectionMask(*args, | |
import torch
from torch import nn
from torch.nn import functional as F
from torch.nn.modules.utils import _pair
from detectron2.layers.wrappers import _NewEmptyTensorOp
from ..spatial_conv import _SpatialConv
__all__ = [
"R4ConvF", "R4Conv",#"R4ConvL", "R4TConv", "ginterpolate"
]
spatial_conv = _SpatialConv.apply
class GConvF(nn.Module):
def __init__(
self,
in_channels,
out_channels,
kernel_type="3x3",
stride=1,
padding=0,
dilation=1,
bias=False,
norm=None,
activation=None,
):
"""
kernel_type (int): skel, 3x3
norm (nn.Module, optional): a normalization layer
activation (callable(Tensor) -> Tensor): a callable activation function
"""
super(GConvF, self).__init__()
assert not bias
self.in_channels = in_channels
self.out_channels = out_channels
self.stride = _pair(stride)
self.padding = _pair(padding)
self.dilation = _pair(dilation)
self.norm = norm
self.activation = activation
if kernel_type == "skel":
self.kernel_type = 0
self.num_kernel = 5
elif kernel_type == "3x3":
self.kernel_type = 1
self.num_kernel = 9
else:
self.kernel_type = 2
self.num_kernel = 3
self.weight = nn.Parameter(
torch.Tensor(out_channels, in_channels, self.num_kernel), requires_grad=True
)
self.bias = None
nn.init.kaiming_uniform_(self.weight, nonlinearity="relu")
def forward(self, x):
if x.numel() == 0:
output_shape = [
(i + 2 * p - (di * (k - 1) + 1)) // s + 1
for i, p, di, k, s in zip(
x.shape[-2:], self.padding, self.dilation, (3, 3), self.stride
)
]
output_shape = [x.shape[0], self.weight.shape[0]] + output_shape + [4]
return _NewEmptyTensorOp.apply(x, output_shape)
x_1 = spatial_conv(x, self.weight, self.kernel_type, self.num_kernel, 0, self.stride, self.padding, self.dilation).sum(0)
x_2 = spatial_conv(x, self.weight, self.kernel_type, self.num_kernel, 2, self.stride, self.padding, self.dilation).sum(0)
x_3 = spatial_conv(x, self.weight, self.kernel_type, self.num_kernel, 4, self.stride, self.padding, self.dilation).sum(0)
x_4 = spatial_conv(x, self.weight, self.kernel_type, self.num_kernel, 6, self.stride, self.padding, self.dilation).sum(0)
if self.norm != None:
x_1 = self.norm(x_1)
x_2 = self.norm(x_2)
x_3 = self.norm(x_3)
x_4 = self.norm(x_4)
x_out = torch.stack([x_1, x_2, x_3, x_4], dim=4)
if self.activation is not None:
x_out = self.activation(x_out)
return x_out
def extra_repr(self):
tmpstr = "in_channels=" + str(self.in_channels)
tmpstr += ", out_channels=" + str(self.out_channels)
tmpstr += ", kernel_type=" + str(self.kernel_type)
tmpstr += ", stride=" + str(self.stride)
tmpstr += ", padding=" + str(self.padding)
tmpstr += ", dilation=" + str(self.dilation)
tmpstr += ", bias=False"
return tmpstr
class GConv(nn.Module):
def __init__(
self,
in_channels,
out_channels,
kernel_type="3x3",
stride=1,
padding=0,
dilation=1,
bias=False,
norm=None,
activation=None,
with_1x1=False,
):
"""
kernel_type (int): skel, ex-skel, 3x3
norm (nn.Module, optional): a normalization layer
activation (callable(Tensor) -> Tensor): a callable activation function
"""
super(GConv, self).__init__()
assert not bias
self.in_channels = in_channels
self.out_channels = out_channels
self.stride = _pair(stride)
self.padding = _pair(padding)
self.dilation = _pair(dilation)
self.norm = norm
self.activation = activation
self.with_1x1 = with_1x1
in_channels = in_channels if with_1x1 else 4 * in_channels
if kernel_type == "skel":
self.kernel_type = 0
self.num_kernel = 5
elif kernel_type == "3x3":
self.kernel_type = 1
self.num_kernel = 9
else:
self.kernel_type = 2
self.num_kernel = 3
self.weight = nn.Parameter(
torch.Tensor(out_channels, in_channels, self.num_kernel), requires_grad=True
)
self.bias = None
nn.init.kaiming_uniform_(self.weight, nonlinearity="relu")
def forward(self, x_tensor):
if x_tensor.numel() == 0:
output_shape = [
(i + 2 * p - (di * (k - 1) + 1)) // s + 1
for i, p, di, k, s in zip(
x_tensor.shape[-2:], self.padding, self.dilation, (3, 3), self.stride
)
]
output_shape = [x_tensor.shape[0], self.weight.shape[0]] + output_shape + [4]
return _NewEmptyTensorOp.apply(x_tensor, output_shape)
if self.with_1x1:
x_1 = x_tensor[:, :, :, :, 0]
x_2 = x_tensor[:, :, :, :, 1]
x_3 = x_tensor[:, :, :, :, 2]
x_4 = x_tensor[:, :, :, :, 3]
else:
x_in = [x_tensor[:, :, :, :, 0], x_tensor[:, :, :, :, 1], x_tensor[:, :, :, :, 2], x_tensor[:, :, :, :, 3]]
x_1 = torch.cat([x_in[0], x_in[1], x_in[2], x_in[3]], dim=1)
x_2 = torch.cat([x_in[1], x_in[2], x_in[3], x_in[0]], dim=1)
x_3 = torch.cat([x_in[2], x_in[3], x_in[0], x_in[1]], dim=1)
x_4 = torch.cat([x_in[3], x_in[0], x_in[1], x_in[2]], dim=1)
x_1 = spatial_conv(x_1, self.weight, self.kernel_type, self.num_kernel, 0, self.stride, self.padding, self.dilation).sum(0)
x_2 = spatial_conv(x_2, self.weight, self.kernel_type, self.num_kernel, 2, self.stride, self.padding, self.dilation).sum(0)
x_3 = spatial_conv(x_3, self.weight, self.kernel_type, self.num_kernel, 4, self.stride, self.padding, self.dilation).sum(0)
x_4 = spatial_conv(x_4, self.weight, self.kernel_type, self.num_kernel, 6, self.stride, self.padding, self.dilation).sum(0)
if self.norm != None:
x_1 = self.norm(x_1)
x_2 = self.norm(x_2)
x_3 = self.norm(x_3)
x_4 = self.norm(x_4)
x_out = torch.stack([x_1, x_2, x_3, x_4], dim=4)
if self.activation is not None:
x_out = self.activation(x_out)
return x_out
def extra_repr(self):
tmpstr = "in_channels=" + str(self.in_channels)
tmpstr += ", out_channels=" + str(self.out_channels)
tmpstr += ", kernel_type=" + str(self.kernel_type)
tmpstr += ", stride=" + str(self.stride)
tmpstr += ", padding=" + str(self.padding)
tmpstr += ", dilation=" + str(self.dilation)
tmpstr += ", bias=False"
return tmpstr
class GConvL(nn.Module):
def __init__(
self,
in_channels,
out_channels,
kernel_size,
groups,
stride=1,
padding=0,
dilation=1,
bias=False,
norm=None,
activation=None,
):
super(GConvL, self).__init__()
self.in_channels = in_channels
self.out_channels = out_channels
self.kernel_size = _pair(kernel_size)
self.stride = _pair(stride)
self.groups = groups
self.with_bias = bias
self.padding = padding
self.dilations = dilation
self.norm = norm
self.activation = activation
self.weight = nn.Parameter(torch.Tensor(out_channels, groups * in_channels, *self.kernel_size))
if bias:
self.bias = nn.Parameter(torch.Tensor(out_channels))
else:
self.bias = None
nn.init.kaiming_uniform_(self.weight, nonlinearity="relu")
if self.bias is not None:
nn.init.constant_(self.bias, 0)
def forward(self, x_tensor):
x_in = [x_tensor[:, :, :, :, 0], x_tensor[:, :, :, :, 1], x_tensor[:, :, :, :, 2], x_tensor[:, :, :, :, 3]]
x = torch.cat([x_in[0], x_in[1], x_in[2], x_in[3]], dim=1)
x = F.conv2d(x, weight=self.weight, bias=self.bias, stride=self.stride, padding=self.padding, dilation=self.dilations)
if self.norm != None:
x = self.norm(x)
if self.activation != None:
x = self.activation(x)
return x
def extra_repr(self):
tmpstr = "in_channels=" + str(self.in_channels)
tmpstr += ", out_channels=" + str(self.out_channels)
tmpstr += ", kernel_size=" + str(self.kernel_size)
tmpstr += ", stride=" + str(self.stride)
tmpstr += ", padding=" + str(self.padding)
tmpstr += ", dilation=" + str(self.dilations)
tmpstr += ", groups=" + str(self.groups)
tmpstr += ", bias=" + str(self.with_bias)
return tmpstr
class R4ConvF(GConvF):
def __init__(
self,
in_channels,
out_channels,
kernel_type="3x3",
stride=1,
padding=0,
dilation=1,
bias=False,
norm=None,
activation=None,
):
super(R4ConvF, self).__init__(
in_channels,
out_channels,
kernel_type,
stride,
padding,
dilation,
bias,
norm,
activation)
class R4Conv(GConv):
def __init__(
self,
in_channels,
out_channels,
kernel_type="3x3",
stride=1,
padding=0,
dilation=1,
bias=False,
norm=None,
activation=None,
with_1x1=False,
):
super(R4Conv, self).__init__(
in_channels,
out_channels,
kernel_type,
stride,
padding,
dilation,
bias,
norm,
activation,
with_1x1)
# class R4ConvL(GConvL):
# def __init__(
# self,
# in_channels,
# out_channels,
# kernel_size,
# stride=1,
# padding=0,
# dilation=1,
# groups=4,
# bias=False,
# norm=None,
# activation=None,
# ):
# super(R4ConvL, self).__init__(
# in_channels,
# out_channels,
# kernel_size,
# groups,
# stride,
# padding,
# dilation,
# bias,
# norm,
# activation)
#
# class GTConv(nn.Module):
# def __init__(
# self,
# in_channels,
# out_channels,
# kernel_size,
# groups,
# stride=1,
# paddings=0,
# dilations=1,
# num_branch=1,
# test_branch_idx=-1,
# bias=False,
# norm=None,
# activation=None,
# ):
# super(GTConv, self).__init__()
# self.in_channels = in_channels
# self.out_channels = out_channels
# self.kernel_size = _pair(kernel_size)
# self.num_branch = num_branch
# self.stride = _pair(stride)
# self.groups = groups
# self.with_bias = bias
# if isinstance(paddings, int):
# paddings = [paddings] * self.num_branch
# if isinstance(dilations, int):
# dilations = [dilations] * self.num_branch
# self.paddings = [_pair(padding) for padding in paddings]
# self.dilations = [_pair(dilation) for dilation in dilations]
# self.test_branch_idx = test_branch_idx
# self.norm = norm
# self.activation = activation
#
# assert len({self.num_branch, len(self.paddings), len(self.dilations)}) == 1
#
# self.weight = nn.Parameter(torch.Tensor(out_channels, groups * in_channels, *self.kernel_size))
#
# if bias:
# self.bias = nn.Parameter(torch.Tensor(out_channels))
# else:
# self.bias = None
#
# nn.init.kaiming_uniform_(self.weight, nonlinearity="relu")
# if self.bias is not None:
# nn.init.constant_(self.bias, 0)
#
# def forward(self, inputs):
# num_branch = self.num_branch if self.training or self.test_branch_idx == -1 else 1
# assert len(inputs) == num_branch
#
# if inputs[0].numel() == 0:
# output_shape = [
# (i + 2 * p - (di * (k - 1) + 1)) // s + 1
# for i, p, di, k, s in zip(
# inputs[0].shape[-2:], self.padding, self.dilation, self.kernel_size, self.stride
# )
# ]
# output_shape = [inputs[0].shape[0], self.weight.shape[0]] + output_shape
# return [_NewEmptyTensorOp.apply(input, output_shape) for input in inputs]
#
# if self.training or self.test_branch_idx == -1:
# outputs = []
# for x_tensor, dilation, padding in zip(inputs, self.dilations, self.paddings):
# x_in = [x_tensor[:, :, :, :, 0], x_tensor[:, :, :, :, 1], x_tensor[:, :, :, :, 2],
# x_tensor[:, :, :, :, 3]]
#
# weight_1 = self.weight
# weight_2 = self.weight.rot90(1, [2, 3])
# weight_3 = self.weight.rot90(2, [2, 3])
# weight_4 = self.weight.rot90(3, [2, 3])
#
# x_1 = torch.cat([x_in[0], x_in[1], x_in[2], x_in[3]], dim=1)
# x_2 = torch.cat([x_in[1], x_in[2], x_in[3], x_in[0]], dim=1)
# x_3 = torch.cat([x_in[2], x_in[3], x_in[0], x_in[1]], dim=1)
# x_4 = torch.cat([x_in[3], x_in[0], x_in[1], x_in[2]], dim=1)
#
# x_1 = F.conv2d(x_1, weight=weight_1, bias=self.bias, stride=self.stride, padding=padding,
# dilation=dilation)
# x_2 = F.conv2d(x_2, weight=weight_2, bias=self.bias, stride=self.stride, padding=padding,
# dilation=dilation)
# x_3 = F.conv2d(x_3, weight=weight_3, bias=self.bias, stride=self.stride, padding=padding,
# dilation=dilation)
# x_4 = F.conv2d(x_4, weight=weight_4, bias=self.bias, stride=self.stride, padding=padding,
# dilation=dilation)
#
# if self.norm != None:
# x_1 = self.norm(x_1)
# x_2 = self.norm(x_2)
# x_3 = self.norm(x_3)
# x_4 = self.norm(x_4)
#
# x_out = torch.stack([x_1, x_2, x_3, x_4], dim=4)
#
# if self.activation != None:
# x_out = self.activation(x_out)
# outputs.append(x_out)
#
# else:
# outputs = []
# x_tensor = inputs[0]
# x_in = [x_tensor[:, :, :, :, 0], x_tensor[:, :, :, :, 1], x_tensor[:, :, :, :, 2],
# x_tensor[:, :, :, :, 3]]
#
# weight_1 | |
values (default: %(default)s)')
parser.add_argument('--synthesis-bool-only', action='store_true',
help='synthesize only boolean expressions (default: %(default)s)')
parser.add_argument('--max-z3-trials', metavar='NUM', type=int, default=2,
help='maxium Z3 trials when using SemFix synthesizer (default: %(default)s)')
parser.add_argument('--dump-only', action='store_true',
help='dump actual outputs for given tests (default: %(default)s)')
parser.add_argument('--synthesis-only', metavar="FILE", default=None,
help='synthesize and validate patch from angelic forest (default: %(default)s)')
parser.add_argument('--invalid-localization', action='store_true',
help='[deprecated] use tests that fail in golden version for localization (default: %(default)s)')
parser.add_argument('--verbose', action='store_true',
help='print compilation and KLEE messages (default: %(default)s)')
parser.add_argument('--mute-config-message', action='store_true',
help='mute configure message (default: %(default)s)')
parser.add_argument('--mute-build-message', action='store_true',
help='mute build message (default: %(default)s)')
parser.add_argument('--quiet', action='store_true',
help='print only errors (default: %(default)s)')
parser.add_argument('--gobble-klee-message', action='store_true',
help='Gobble klee message (default: %(default)s)')
parser.add_argument('--mute-test-message', action='store_true',
help='mute test message (default: %(default)s)')
parser.add_argument('--show-test-message', action='store_true',
help='show test message (default: %(default)s)')
parser.add_argument('--show-oracle-contents', action='store_true',
help='show the contents of the oracle (default: %(default)s)')
parser.add_argument('--show-syn-message', action='store_true',
help='show synthesis message (default: %(default)s)')
parser.add_argument('--mute-warning', action='store_true',
help='mute warning message (default: %(default)s)')
parser.add_argument('--ignore-lines', action='store_true',
help='[deprecated] ignore --lines options (default: %(default)s)')
parser.add_argument('--ignore-instrument', action='store_true',
help='default: %(default)s')
parser.add_argument('--ignore-unmatched-execution', action='store_true',
help='default: %(default)s')
parser.add_argument('--all-suspicious', action='store_true',
help='consider all suspicious locations (default: %(default)s)')
parser.add_argument('--show-suspicious-locations', action='store_true',
help='show all suspicious locations and their scores \
(default: %(default)s)')
parser.add_argument('--tests-summary', action='store_true',
help='run validation and golden tests and summarize the tests (default: %(default)s)')
parser.add_argument('--compilation-db-file',
help='Use the provided compilation db file')
parser.add_argument('--keep-angelix-dir', action='store_true',
help='keep .angelix dir (default: %(default)s)')
parser.add_argument('--skip-configure', action='store_true',
help='skip configure (default: %(default)s)')
parser.add_argument('--skip-build', action='store_true',
help='skip build (default: %(default)s)')
parser.add_argument('--angelic-search-strategy', metavar='STRATEGY', default='guided',
choices=ANGELIC_SEARCH_STRATEGIES,
help='angelic search strategy. choices: '
+ ', '.join(ANGELIC_SEARCH_STRATEGIES))
parser.add_argument('--step-method', metavar='STEP', default='metropolis',
choices=STEP_METHODS,
help='step method. choices: '
+ ', '.join(STEP_METHODS))
parser.add_argument('--search-max-trials', metavar='NUM', type=int, default=100,
help='max number of search trials (default: %(default)s)')
parser.add_argument('--max-same-cost-iter', metavar='NUM', type=int, default=20,
help='possible max iteration of the same cost (default: %(default)s)')
parser.add_argument('--one-bit-flip-prob', metavar='NUM', type=float, default=0.5,
help='probability that one bit flips (default: %(default)s)')
parser.add_argument('--mcmc-beta', metavar='NUM', type=float, default=0.8,
help='MCMC beta (default: %(default)s)')
parser.add_argument('--chunk-bits', metavar='NUM', type=int, default=32,
help='the number of bits for a chunk (default: %(default)s)')
parser.add_argument('--max-bits', metavar='NUM', type=int, default=100,
help='max number of bits (default: %(default)s)')
parser.add_argument('--max-resample', metavar='NUM', type=int, default=3,
help='max resample (default: %(default)s)')
parser.add_argument('--block-expand-factor', metavar='NUM', type=float, default=2,
help='block expand factor (default: %(default)s)')
parser.add_argument('--inc-fix', action='store_true', default=False,
help='allow incremental fix (default: %(default)s)')
parser.add_argument('--fixed-bv-len', action='store_true', default=False,
help='use a fixed the bitvector length (default: %(default)s)')
parser.add_argument('--max-syn-attempts', metavar='NUM', type=float, default=2,
help='maximum synthesis attempts (default: %(default)s)')
parser.add_argument('--max-repair-attempts', metavar='NUM', type=float, default=2,
help='maximum repair attempts for each location group \
(default: %(default)s)')
parser.add_argument('--default-max-cost', metavar='NUM', type=float, default=2,
help='default max cost (default: %(default)s)')
parser.add_argument('--error-cost', metavar='NUM', type=str, default="0",
help='error cost (default: %(default)s)')
parser.add_argument('--warning-cost', metavar='NUM', type=str, default="0",
help='warning cost (default: %(default)s)')
parser.add_argument('--penalty1', metavar='NUM', type=str, default="1",
help='penalty1 (default: %(default)s)')
parser.add_argument('--penalty2', metavar='NUM', type=str, default="1",
help='penalty2 (default: %(default)s)')
parser.add_argument('--default-non-zero-cost', metavar='NUM', type=str, default="1",
help='Used when test fails to generate non-zero cost. \
(default: %(default)s)')
parser.add_argument('--timeout-cost', metavar='NUM', type=str, default="1",
help='cost used when timeout occurs. \
(default: %(default)s)')
parser.add_argument('--log', metavar='LOG', default=None,
choices=['DEBUG', 'INFO', 'WARNING', 'ERROR', 'CRITICAL'],
help='set the logging level')
parser.add_argument('--parallel-testing', action='store_true',
help='perform testing in parallel (default: %(default)s)')
parser.add_argument('--mutate', metavar='NUM', type=str, default="0",
help='mutate the golden version (default: %(default)s)')
parser.add_argument('--epsilon', metavar='NUM', type=float, default="1",
help='cost difference less than epsilon is considered the same.\
(default: %(default)s)')
parser.add_argument('--default-susp-score', metavar='NUM', type=float, default="0.5",
help='default suspiciousness score.\
(default: %(default)s)')
parser.add_argument('--additional-susp-locs', metavar='DC:NUM-NUM-NUM-NUM',
type=str, nargs='+', default=None,
help='additional suspicious locations.(default: %(default)s)')
parser.add_argument('--always-accept', action='store_true',
help='always accept a proposal in MCMC (default: %(default)s)')
parser.add_argument('--random-cost', action='store_true',
help='use random cost in MCMC. (default: %(default)s)')
parser.add_argument('--max-random-cost', metavar='NUM', type=str, default="0",
help='maxium random cost. (default: %(default)s)')
parser.add_argument('--skip-dd', action='store_true',
help='skip delta debugging for spec inference (default: %(default)s)')
parser.add_argument('--finish-after-fault-localize', action='store_true',
help='finish after fault localization (default: %(default)s)')
parser.add_argument('--version', action='version', version='Angelix 1.1')
args = parser.parse_args()
working_dir = join(os.getcwd(), ".angelix")
if not args.keep_angelix_dir:
if exists(working_dir):
shutil.rmtree(working_dir, onerror=rm_force)
os.mkdir(working_dir)
rootLogger = logging.getLogger()
FORMAT = logging.Formatter('%(levelname)-8s %(name)-15s %(message)s')
if args.quiet:
rootLogger.setLevel(logging.WARNING)
elif args.log is not None:
log_level = getattr(logging, args.log, None)
rootLogger.setLevel(log_level)
else:
rootLogger.setLevel(logging.INFO)
fileHandler = logging.FileHandler("{0}/{1}.log".format(working_dir, 'angelix'))
fileHandler.setFormatter(FORMAT)
rootLogger.addHandler(fileHandler)
consoleHandler = logging.StreamHandler()
consoleHandler.setFormatter(FORMAT)
rootLogger.addHandler(consoleHandler)
if is_subdir(args.src, os.getcwd()):
logger.error('angelix must be run outside of the source directory')
exit(1)
if vars(args)['assert'] is not None and not args.dump_only:
with open(vars(args)['assert']) as output_file:
asserts = json.load(output_file)
else:
asserts = None
if 'guards' in args.defect and 'assignments' in args.defect:
logger.error('\'guards\' and \'assignments\' defect classes are currently incompatible')
exit(1)
if args.dump_only:
if args.golden is not None:
logger.warning('--dump-only disables --golden option')
if asserts is not None:
logger.warning('--dump-only disables --assert option')
config = dict()
config['initial_tests'] = args.initial_tests
config['all_tests'] = args.all_tests
config['max_z3_trials'] = args.max_z3_trials
config['defect'] = args.defect
config['test_timeout'] = args.test_timeout
config['single_group'] = args.single_group
config['group_size'] = args.group_size
config['group_by_score'] = args.group_by_score
config['localize_from_bottom'] = args.localize_from_bottom
config['suspicious'] = args.suspicious
config['localization'] = args.localization
config['ignore_trivial'] = args.ignore_trivial
config['path_solving_timeout'] = args.path_solving_timeout
config['timeout'] = args.timeout
config['max_angelic_paths'] = args.max_angelic_paths
config['klee_max_forks'] = args.klee_max_forks
config['klee_max_depth'] = args.klee_max_depth
config['klee_search'] = args.klee_search
config['klee_timeout'] = args.klee_timeout
config['klee_out_dir_timeout'] = args.klee_out_dir_timeout
config['klee_solver_timeout'] = args.klee_solver_timeout
config['klee_debug'] = args.klee_debug
config['klee_ignore_errors'] = args.klee_ignore_errors
config['ignore_trans_errors'] = args.ignore_trans_errors
config['ignore_infer_errors'] = args.ignore_infer_errors
config['ignore_instrument'] = args.ignore_instrument
config['ignore_unmatched_execution'] = args.ignore_unmatched_execution
config['ignore_z3_exception'] = args.ignore_z3_exception
config['skip_validating_angelic_path'] = args.skip_validating_angelic_path
config['use_nsynth'] = args.use_nsynth
config['use_osynth'] = args.use_osynth
config['use_gcc'] = args.use_gcc
config['use_frontend_for_test'] = args.use_frontend_for_test
config['keep_positive_behavior'] = args.keep_positive_behavior
config['synthesis_timeout'] = args.synthesis_timeout
config['synthesis_levels'] = args.synthesis_levels
config['synthesis_global_vars'] = args.synthesis_global_vars
config['synthesis_func_params'] = args.synthesis_func_params
config['synthesis_used_vars'] = True # for backward compatibility
config['synthesis_ptr_vars'] = args.synthesis_ptr_vars
config['synthesis_bool_only'] = args.synthesis_bool_only
config['forced_to_use_bool'] = args.forced_to_use_bool
config['empty_env_exps'] = args.empty_env_exps
config['exclude_member_exp'] = args.exclude_member_exp
config['generate_all'] = args.generate_all
config['init_uninit_vars'] = args.init_uninit_vars
config['verbose'] = args.verbose
config['build_before_instr'] = args.build_before_instr
config['instr_printf'] = args.instr_printf
config['mute_config_message'] = args.mute_config_message
config['mute_build_message'] = args.mute_build_message
config['mute_test_message'] = args.mute_test_message
config['show_test_message'] = args.show_test_message
config['show_oracle_contents'] = args.show_oracle_contents
config['show_syn_message'] = args.show_syn_message
config['mute_warning'] = args.mute_warning
config['show_suspicious_locations'] = args.show_suspicious_locations
config['invalid_localization'] = args.invalid_localization
config['angelic_search_strategy'] = args.angelic_search_strategy
config['step_method'] = args.step_method
config['compilation_db_file'] = args.compilation_db_file
config['keep_angelix_dir'] = args.keep_angelix_dir
config['skip_configure'] = args.skip_configure
config['skip_build'] = args.skip_build
config['gobble_klee_message'] = args.gobble_klee_message
config['lines'] = args.lines
config['search_max_trials'] = args.search_max_trials
config['max_same_cost_iter'] = args.max_same_cost_iter
config['mcmc_beta'] = args.mcmc_beta
config['one_bit_flip_prob'] = args.one_bit_flip_prob
config['chunk_bits'] = args.chunk_bits
config['max_bits'] = args.max_bits
config['max_resample'] = args.max_resample
config['block_expand_factor'] = args.block_expand_factor
config['inc_fix'] = args.inc_fix
config['fixed_bv_len'] = args.fixed_bv_len
config['default_max_cost'] = args.default_max_cost
config['error_cost'] = args.error_cost
config['warning_cost'] = args.warning_cost
config['max_syn_attempts'] = args.max_syn_attempts
config['max_repair_attempts'] = args.max_repair_attempts
config['penalty1'] = args.penalty1
config['penalty2'] = args.penalty2
config['parallel_testing'] = args.parallel_testing
config['all_suspicious'] = args.all_suspicious
config['mutate'] = int(args.mutate)
config['epsilon'] = float(args.epsilon)
config['always_accept'] = args.always_accept
config['random_cost'] = args.random_cost
config['max_random_cost'] = float(args.max_random_cost)
config['skip_dd'] = args.skip_dd
config['spec_from_only_negative'] = args.spec_from_only_negative
config['finish_after_fault_localize'] = args.finish_after_fault_localize
config['default_susp_score'] = args.default_susp_score
config['additional_susp_locs'] = args.additional_susp_locs
config['default_non_zero_cost'] = args.default_non_zero_cost
config['timeout_cost'] = args.timeout_cost
logger.debug('tests: {}'.format(args.tests))
if args.verbose:
logger.info('arg oracle = {}'.format(args.oracle))
for key, value in config.items():
logger.info('option {} = {}'.format(key, value))
statistics.init(working_dir, config)
if args.ignore_lines:
args.lines = None
tool = Angelix(working_dir,
src=args.src,
buggy=args.buggy,
oracle=abspath(args.oracle),
tests=args.tests,
golden=args.golden,
asserts=asserts,
lines=args.lines,
build=args.build,
configure=args.configure,
config=config)
if args.dump_only:
try:
dump = tool.dump_outputs()
with open('dump.json', 'w') as output_file:
asserts = json.dump(dump, output_file, indent=2)
logger.info('outputs successfully dumped (see dump.json)')
exit(0)
except (CompilationError, TransformationError):
logger.info('failed to dump outputs')
exit(1)
if args.tests_summary:
# run validation tests
positive, negative = tool.evaluate_ts(tool.validation_src, args.tests)
val_test_result = {'positive': sorted(positive), 'negative': sorted(negative)}
logger.info('positive: {}, negative: {}'.format(positive, negative))
# run golden tests
src = tool.golden_src
src.configure()
src.build()
positive, negative = tool.evaluate_ts(src, args.tests)
golden_test_result = {'positive': sorted(positive), 'negative': sorted(negative)}
logger.info('positive: {}, negative: {}'.format(positive, negative))
# delta
negative = list(set(val_test_result['negative']).difference(set(golden_test_result['negative'])))
delta = {'positive': val_test_result['positive'], 'negative': sorted(negative)}
logger.info('positive: {}, negative: {}'.format(delta['positive'], delta['negative']))
summary = {'validation': val_test_result,
'golden': golden_test_result,
'delta': delta }
summary_file = join(working_dir, "tests-summary.json")
with open(summary_file, "w") as write_file:
json.dump(summary, write_file, indent=4)
exit(0)
if config['mutate'] > 0:
mutate(config, args.buggy, working_dir, args.golden,
args.build, args.configure)
exit(0)
logger.debug('start to measure time')
start = time.time()
try:
if args.timeout is not None:
with time_limit(args.timeout):
patches = repair()
else:
patches = repair()
except TimeoutException:
logger.info("failed to generate patch (timeout)")
print('TIMEOUT')
statistics.data['patch_found'] = False
statistics.data['timeout_occurred'] = True
statistics.data['time']['total'] = args.timeout
statistics.save()
exit(0)
except (CompilationError, InferenceError, TransformationError):
logger.info("failed to generate patch")
print('FAIL')
statistics.data['patch_found'] = False
statistics.save()
exit(1)
end = time.time()
elapsed = format_time(end - start)
statistics.data['time']['total'] = end - start
statistics.save()
if not patches:
logger.info("no patch generated in {}".format(elapsed))
print('FAIL')
statistics.data['patch_found'] = False
statistics.save()
exit(0)
else:
if config['generate_all']:
patch_dir = basename(abspath(args.src)) + '-' + time.strftime("%Y-%b%d-%H%M%S")
if not exists(patch_dir):
os.mkdir(patch_dir)
for idx, patch in enumerate(patches):
patch_file = os.path.join(patch_dir, str(idx) + '.patch')
with open(patch_file, 'w+') as file:
for line in patch:
file.write(line)
logger.info("patches successfully generated in {} (see {})".format(elapsed, patch_dir))
else:
patch_file = basename(abspath(args.src)) + '-' + time.strftime("%Y-%b%d-%H%M%S") + '.patch'
logger.info("patch successfully generated in {} (see {})".format(elapsed, patch_file))
with open(patch_file, 'w+') as file:
for line in patches[0]:
file.write(line)
print('SUCCESS')
statistics.data['src'] = args.src
statistics.data['buggy'] = args.buggy
statistics.data['patch_found'] | |
to Slice.
to_slice([int] v, bool ind1) -> Slice
Construct from an index vector (requires is_slice(v) to be true)
to_slice(IM x, bool ind1) -> Slice
> to_slice(IM x, bool ind1)
------------------------------------------------------------------------
Convert IM to Slice.
> to_slice([int] v, bool ind1)
------------------------------------------------------------------------
Construct from an index vector (requires is_slice(v) to be true)
"""
return _casadi.to_slice(*args)
class DM(MatrixCommon, GenericExpressionCommon, GenDM, PrintableCommon):
"""
"""
__swig_setmethods__ = {}
for _s in [MatrixCommon, GenericExpressionCommon, GenDM, PrintableCommon]:
__swig_setmethods__.update(getattr(_s, '__swig_setmethods__', {}))
__setattr__ = lambda self, name, value: _swig_setattr(self, DM, name, value)
__swig_getmethods__ = {}
for _s in [MatrixCommon, GenericExpressionCommon, GenDM, PrintableCommon]:
__swig_getmethods__.update(getattr(_s, '__swig_getmethods__', {}))
__getattr__ = lambda self, name: _swig_getattr(self, DM, name)
__repr__ = _swig_repr
def sanity_check(self, *args):
"""
[DEPRECATED] Correctness is checked during construction
sanity_check(self, bool complete)
"""
return _casadi.DM_sanity_check(self, *args)
def has_nz(self, *args):
"""
Returns true if the matrix has a non-zero at location rr, cc.
has_nz(self, int rr, int cc) -> bool
"""
return _casadi.DM_has_nz(self, *args)
def __nonzero__(self, *args):
"""
[INTERNAL]
__nonzero__(self) -> bool
"""
return _casadi.DM___nonzero__(self, *args)
def get(self, *args):
"""
get(self, bool ind1, Sparsity sp) -> DM
get(self, bool ind1, Slice rr) -> DM
get(self, bool ind1, IM rr) -> DM
get(self, bool ind1, Slice rr, Slice cc) -> DM
get(self, bool ind1, Slice rr, IM cc) -> DM
get(self, bool ind1, IM rr, Slice cc) -> DM
get(self, bool ind1, IM rr, IM cc) -> DM
"""
return _casadi.DM_get(self, *args)
def set(self, *args):
"""
set(self, DM m, bool ind1, Sparsity sp)
set(self, DM m, bool ind1, Slice rr)
set(self, DM m, bool ind1, IM rr)
set(self, DM m, bool ind1, Slice rr, Slice cc)
set(self, DM m, bool ind1, Slice rr, IM cc)
set(self, DM m, bool ind1, IM rr, Slice cc)
set(self, DM m, bool ind1, IM rr, IM cc)
"""
return _casadi.DM_set(self, *args)
def get_nz(self, *args):
"""
get_nz(self, bool ind1, Slice k) -> DM
get_nz(self, bool ind1, IM k) -> DM
"""
return _casadi.DM_get_nz(self, *args)
def set_nz(self, *args):
"""
set_nz(self, DM m, bool ind1, Slice k)
set_nz(self, DM m, bool ind1, IM k)
"""
return _casadi.DM_set_nz(self, *args)
def __pos__(self, *args):
"""
__pos__(self) -> DM
"""
return _casadi.DM___pos__(self, *args)
def __neg__(self, *args):
"""
__neg__(self) -> DM
"""
return _casadi.DM___neg__(self, *args)
def binary(*args):
"""
binary(int op, DM x, DM y) -> DM
"""
return _casadi.DM_binary(*args)
binary = staticmethod(binary)
def unary(*args):
"""
unary(int op, DM x) -> DM
"""
return _casadi.DM_unary(*args)
unary = staticmethod(unary)
def scalar_matrix(*args):
"""
scalar_matrix(int op, DM x, DM y) -> DM
"""
return _casadi.DM_scalar_matrix(*args)
scalar_matrix = staticmethod(scalar_matrix)
def matrix_scalar(*args):
"""
matrix_scalar(int op, DM x, DM y) -> DM
"""
return _casadi.DM_matrix_scalar(*args)
matrix_scalar = staticmethod(matrix_scalar)
def matrix_matrix(*args):
"""
matrix_matrix(int op, DM x, DM y) -> DM
"""
return _casadi.DM_matrix_matrix(*args)
matrix_matrix = staticmethod(matrix_matrix)
def printme(self, *args):
"""
printme(self, DM y) -> DM
"""
return _casadi.DM_printme(self, *args)
def set_max_depth(*args):
"""
set_max_depth(int eq_depth)
"""
return _casadi.DM_set_max_depth(*args)
set_max_depth = staticmethod(set_max_depth)
def get_max_depth(*args):
"""
get_max_depth() -> int
"""
return _casadi.DM_get_max_depth(*args)
get_max_depth = staticmethod(get_max_depth)
def get_input(*args):
"""
get_input(Function f) -> [DM]
"""
return _casadi.DM_get_input(*args)
get_input = staticmethod(get_input)
def get_free(*args):
"""
get_free(Function f) -> [DM]
"""
return _casadi.DM_get_free(*args)
get_free = staticmethod(get_free)
def type_name(*args):
"""
type_name() -> str
"""
return _casadi.DM_type_name(*args)
type_name = staticmethod(type_name)
def print_split(self, *args):
"""
print_split(self) -> ([str] OUTPUT, [str] OUTPUT)
"""
return _casadi.DM_print_split(self, *args)
def disp(self, *args):
"""
Print a representation of the object.
disp(self, bool more)
"""
return _casadi.DM_disp(self, *args)
def str(self, *args):
"""
Get string representation.
str(self, bool more) -> str
"""
return _casadi.DM_str(self, *args)
def print_scalar(self, *args):
"""
Print scalar.
print_scalar(self)
"""
return _casadi.DM_print_scalar(self, *args)
def print_vector(self, *args):
"""
Print vector-style.
print_vector(self, bool truncate)
"""
return _casadi.DM_print_vector(self, *args)
def print_dense(self, *args):
"""
Print dense matrix-stype.
print_dense(self, bool truncate)
"""
return _casadi.DM_print_dense(self, *args)
def print_sparse(self, *args):
"""
Print sparse matrix style.
print_sparse(self, bool truncate)
"""
return _casadi.DM_print_sparse(self, *args)
def clear(self, *args):
"""
clear(self)
"""
return _casadi.DM_clear(self, *args)
def resize(self, *args):
"""
resize(self, int nrow, int ncol)
"""
return _casadi.DM_resize(self, *args)
def reserve(self, *args):
"""
reserve(self, int nnz)
reserve(self, int nnz, int ncol)
"""
return _casadi.DM_reserve(self, *args)
def erase(self, *args):
"""
Erase a submatrix (leaving structural zeros in its place) Erase elements of
erase(self, [int] rr, bool ind1)
erase(self, [int] rr, [int] cc, bool ind1)
Erase a submatrix (leaving structural zeros in its place) Erase rows and/or
a matrix.
> erase(self, [int] rr, bool ind1)
------------------------------------------------------------------------
Erase a submatrix (leaving structural zeros in its place) Erase elements of
a matrix.
> erase(self, [int] rr, [int] cc, bool ind1)
------------------------------------------------------------------------
Erase a submatrix (leaving structural zeros in its place) Erase rows and/or
columns of a matrix.
"""
return _casadi.DM_erase(self, *args)
def remove(self, *args):
"""
Remove columns and rows Remove/delete rows and/or columns of a matrix.
remove(self, [int] rr, [int] cc)
"""
return _casadi.DM_remove(self, *args)
def enlarge(self, *args):
"""
Enlarge matrix Make the matrix larger by inserting empty rows and columns,
enlarge(self, int nrow, int ncol, [int] rr, [int] cc, bool ind1)
keeping the existing non-zeros.
"""
return _casadi.DM_enlarge(self, *args)
def sparsity(self, *args):
"""
Get an owning reference to the sparsity pattern.
sparsity(self) -> Sparsity
"""
return _casadi.DM_sparsity(self, *args)
def triplet(*args):
"""
triplet([int] row, [int] col, DM d) -> DM
triplet([int] row, [int] col, DM d, (int,int) rc) -> DM
triplet([int] row, [int] col, DM d, int nrow, int ncol) -> DM
"""
return _casadi.DM_triplet(*args)
triplet = staticmethod(triplet)
def inf(*args):
"""
create a matrix with all inf
inf(int nrow, int ncol) -> DM
inf((int,int) rc) -> DM
inf(Sparsity sp) -> DM
"""
return _casadi.DM_inf(*args)
inf = staticmethod(inf)
def nan(*args):
"""
create a matrix with all nan
nan(int nrow, int ncol) -> DM
nan((int,int) rc) -> DM
nan(Sparsity sp) -> DM
"""
return _casadi.DM_nan(*args)
nan = staticmethod(nan)
def eye(*args):
"""
eye(int ncol) -> DM
"""
return _casadi.DM_eye(*args)
eye = staticmethod(eye)
def element_hash(self, *args):
"""
element_hash(self) -> int
"""
return _casadi.DM_element_hash(self, *args)
def is_regular(self, *args):
"""
is_regular(self) -> bool
"""
return _casadi.DM_is_regular(self, *args)
def is_smooth(self, *args):
"""
is_smooth(self) -> bool
"""
return _casadi.DM_is_smooth(self, *args)
def is_leaf(self, *args):
"""
is_leaf(self) -> bool
"""
return _casadi.DM_is_leaf(self, *args)
def is_commutative(self, *args):
"""
is_commutative(self) -> bool
"""
return _casadi.DM_is_commutative(self, *args)
def is_symbolic(self, *args):
"""
is_symbolic(self) -> bool
"""
return _casadi.DM_is_symbolic(self, *args)
def is_valid_input(self, *args):
"""
is_valid_input(self) -> bool
"""
return _casadi.DM_is_valid_input(self, *args)
def has_duplicates(self, *args):
"""
has_duplicates(self) -> bool
"""
return _casadi.DM_has_duplicates(self, *args)
def reset_input(self, *args):
"""
reset_input(self)
"""
return _casadi.DM_reset_input(self, *args)
def is_constant(self, *args):
"""
Check if the matrix is constant (note that false negative answers are
is_constant(self) -> bool
possible)
"""
return _casadi.DM_is_constant(self, *args)
def is_integer(self, *args):
"""
Check if the matrix is integer-valued (note that false negative answers are
is_integer(self) -> bool
possible)
"""
return _casadi.DM_is_integer(self, *args)
def is_zero(self, *args):
"""
check if the matrix is 0 (note that false negative answers are possible)
is_zero(self) -> bool
"""
return _casadi.DM_is_zero(self, *args)
def is_one(self, *args):
"""
check if the matrix is 1 (note that false negative answers are possible)
is_one(self) -> bool
"""
return _casadi.DM_is_one(self, *args)
def is_minus_one(self, *args):
"""
check if the matrix is -1 (note that false negative answers are possible)
is_minus_one(self) -> bool
"""
return _casadi.DM_is_minus_one(self, *args)
def is_eye(self, *args):
"""
check if the matrix is an identity matrix (note that false negative answers
is_eye(self) -> bool
are possible)
"""
return _casadi.DM_is_eye(self, *args)
def op(self, *args):
"""
op(self) -> int
"""
return _casadi.DM_op(self, *args)
def is_op(self, *args):
"""
is_op(self, int op) -> bool
"""
return _casadi.DM_is_op(self, *args)
def has_zeros(self, *args):
"""
Check if the matrix has any zero entries which are not structural zeros.
has_zeros(self) -> bool
"""
return _casadi.DM_has_zeros(self, *args)
def nonzeros(self, *args):
"""
Get all nonzeros.
nonzeros(self) -> [float]
Implementation of Matrix::get_nonzeros (in public API)
"""
return _casadi.DM_nonzeros(self, *args)
def elements(self, *args):
"""
Get all elements.
elements(self) -> [float]
"""
return _casadi.DM_elements(self, *args)
def __float__(self, *args):
"""
__float__(self) -> float
"""
return _casadi.DM___float__(self, *args)
def __int__(self, *args):
"""
__int__(self) -> int
"""
return _casadi.DM___int__(self, | |
matrices')
return self._new(L), self._new(D)
def liupc(self):
"""Liu's algorithm, for pre-determination of the Elimination Tree of
the given matrix, used in row-based symbolic Cholesky factorization.
Examples
========
>>> from sympy.matrices import SparseMatrix
>>> S = SparseMatrix([
... [1, 0, 3, 2],
... [0, 0, 1, 0],
... [4, 0, 0, 5],
... [0, 6, 7, 0]])
>>> S.liupc()
([[0], [], [0], [1, 2]], [4, 3, 4, 4])
References
==========
Symbolic Sparse Cholesky Factorization using Elimination Trees,
<NAME> (1999)
http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.39.7582
"""
# Algorithm 2.4, p 17 of reference
# get the indices of the elements that are non-zero on or below diag
R = [[] for r in range(self.rows)]
for r, c, _ in self.row_list():
if c <= r:
R[r].append(c)
inf = len(R) # nothing will be this large
parent = [inf]*self.rows
virtual = [inf]*self.rows
for r in range(self.rows):
for c in R[r][:-1]:
while virtual[c] < r:
t = virtual[c]
virtual[c] = r
c = t
if virtual[c] == inf:
parent[c] = virtual[c] = r
return R, parent
def multiply(self, other):
"""Fast multiplication exploiting the sparsity of the matrix.
Examples
========
>>> from sympy.matrices import SparseMatrix, ones
>>> A, B = SparseMatrix(ones(4, 3)), SparseMatrix(ones(3, 4))
>>> A.multiply(B) == 3*ones(4)
True
See Also
========
add
"""
A = self
B = other
# sort B's row_list into list of rows
Blist = [[] for i in range(B.rows)]
for i, j, v in B.row_list():
Blist[i].append((j, v))
Cdict = defaultdict(int)
for k, j, Akj in A.row_list():
for n, Bjn in Blist[j]:
temp = Akj*Bjn
Cdict[k, n] += temp
rv = self.zeros(A.rows, B.cols)
rv._smat = {k: v for k, v in Cdict.items() if v}
return rv
def nnz(self):
"""Returns the number of non-zero elements in Matrix."""
return len(self._smat)
def reshape(self, rows, cols):
"""Reshape matrix while retaining original size.
Examples
========
>>> from sympy.matrices import SparseMatrix
>>> S = SparseMatrix(4, 2, range(8))
>>> S.reshape(2, 4)
Matrix([
[0, 1, 2, 3],
[4, 5, 6, 7]])
"""
if len(self) != rows*cols:
raise ValueError("Invalid reshape parameters %d %d" % (rows, cols))
smat = {}
for k, v in self._smat.items():
i, j = k
n = i*self.cols + j
ii, jj = divmod(n, cols)
smat[(ii, jj)] = self._smat[(i, j)]
return self._new(rows, cols, smat)
def row_list(self):
"""Returns a row-sorted list of non-zero elements of the matrix.
Examples
========
>>> from sympy.matrices import SparseMatrix
>>> a = SparseMatrix(((1, 2), (3, 4)))
>>> a
Matrix([
[1, 2],
[3, 4]])
>>> a.RL
[(0, 0, 1), (0, 1, 2), (1, 0, 3), (1, 1, 4)]
See Also
========
row_op
col_list
"""
return [tuple(k + (self[k],)) for k in
sorted(list(self._smat.keys()), key=lambda k: list(k))]
def row_structure_symbolic_cholesky(self):
"""Symbolic cholesky factorization, for pre-determination of the
non-zero structure of the Cholesky factororization.
Examples
========
>>> from sympy.matrices import SparseMatrix
>>> S = SparseMatrix([
... [1, 0, 3, 2],
... [0, 0, 1, 0],
... [4, 0, 0, 5],
... [0, 6, 7, 0]])
>>> S.row_structure_symbolic_cholesky()
[[0], [], [0], [1, 2]]
References
==========
Symbolic Sparse Cholesky Factorization using Elimination Trees,
<NAME> (1999)
http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.39.7582
"""
R, parent = self.liupc()
inf = len(R) # this acts as infinity
Lrow = copy.deepcopy(R)
for k in range(self.rows):
for j in R[k]:
while j != inf and j != k:
Lrow[k].append(j)
j = parent[j]
Lrow[k] = list(sorted(set(Lrow[k])))
return Lrow
def row(self, i):
"""Returns column i from self as a row vector.
Examples
========
>>> from sympy.matrices import SparseMatrix
>>> a = SparseMatrix(((1, 2), (3, 4)))
>>> a.row(0)
Matrix([[1, 2]])
See Also
========
col
row_list
"""
return self[i,:]
def scalar_multiply(self, scalar):
"Scalar element-wise multiplication"
M = self.zeros(*self.shape)
if scalar:
for i in self._smat:
v = scalar*self._smat[i]
if v:
M._smat[i] = v
else:
M._smat.pop(i, None)
return M
def solve_least_squares(self, rhs, method='LDL'):
"""Return the least-square fit to the data.
By default the cholesky_solve routine is used (method='CH'); other
methods of matrix inversion can be used. To find out which are
available, see the docstring of the .inv() method.
Examples
========
>>> from sympy.matrices import SparseMatrix, Matrix, ones
>>> A = Matrix([1, 2, 3])
>>> B = Matrix([2, 3, 4])
>>> S = SparseMatrix(A.row_join(B))
>>> S
Matrix([
[1, 2],
[2, 3],
[3, 4]])
If each line of S represent coefficients of Ax + By
and x and y are [2, 3] then S*xy is:
>>> r = S*Matrix([2, 3]); r
Matrix([
[ 8],
[13],
[18]])
But let's add 1 to the middle value and then solve for the
least-squares value of xy:
>>> xy = S.solve_least_squares(Matrix([8, 14, 18])); xy
Matrix([
[ 5/3],
[10/3]])
The error is given by S*xy - r:
>>> S*xy - r
Matrix([
[1/3],
[1/3],
[1/3]])
>>> _.norm().n(2)
0.58
If a different xy is used, the norm will be higher:
>>> xy += ones(2, 1)/10
>>> (S*xy - r).norm().n(2)
1.5
"""
t = self.T
return (t*self).inv(method=method)*t*rhs
def solve(self, rhs, method='LDL'):
"""Return solution to self*soln = rhs using given inversion method.
For a list of possible inversion methods, see the .inv() docstring.
"""
if not self.is_square:
if self.rows < self.cols:
raise ValueError('Under-determined system.')
elif self.rows > self.cols:
raise ValueError('For over-determined system, M, having '
'more rows than columns, try M.solve_least_squares(rhs).')
else:
return self.inv(method=method)*rhs
def tolist(self):
"""Convert this sparse matrix into a list of nested Python lists.
Examples
========
>>> from sympy.matrices import SparseMatrix, ones
>>> a = SparseMatrix(((1, 2), (3, 4)))
>>> a.tolist()
[[1, 2], [3, 4]]
When there are no rows then it will not be possible to tell how
many columns were in the original matrix:
>>> SparseMatrix(ones(0, 3)).tolist()
[]
"""
if not self.rows:
return []
if not self.cols:
return [[] for i in range(self.rows)]
I, J = self.shape
return [[self[i, j] for j in range(J)] for i in range(I)]
RL = property(row_list, None, None, "Alternate faster representation")
CL = property(col_list, None, None, "Alternate faster representation")
__matmul__ = __mul__
__rmatmul__ = __rmul__
extract.__doc__ = MatrixBase.extract.__doc__
@classmethod
def zeros(cls, r, c=None):
"""Return an r x c matrix of zeros, square if c is omitted."""
c = r if c is None else c
r = as_int(r)
c = as_int(c)
return cls(r, c, {})
class MutableSparseMatrix(SparseMatrix, MatrixBase):
@classmethod
def _new(cls, *args, **kwargs):
return cls(*args)
def __setitem__(self, key, value):
"""Assign value to position designated by key.
Examples
========
>>> from sympy.matrices import SparseMatrix, ones
>>> M = SparseMatrix(2, 2, {})
>>> M[1] = 1; M
Matrix([
[0, 1],
[0, 0]])
>>> M[1, 1] = 2; M
Matrix([
[0, 1],
[0, 2]])
>>> M = SparseMatrix(2, 2, {})
>>> M[:, 1] = [1, 1]; M
Matrix([
[0, 1],
[0, 1]])
>>> M = SparseMatrix(2, 2, {})
>>> M[1, :] = [[1, 1]]; M
Matrix([
[0, 0],
[1, 1]])
To replace row r you assign to position r*m where m
is the number of columns:
>>> M = SparseMatrix(4, 4, {})
>>> m = M.cols
>>> M[3*m] = ones(1, m)*2; M
Matrix([
[0, 0, 0, 0],
[0, 0, 0, 0],
[0, 0, 0, 0],
[2, 2, 2, 2]])
And to replace column c you can assign to position c:
>>> M[2] = ones(m, 1)*4; M
Matrix([
[0, 0, 4, 0],
[0, 0, 4, 0],
[0, 0, 4, 0],
[2, 2, 4, 2]])
"""
rv = self._setitem(key, value)
if rv is not None:
i, j, value = rv
if value:
self._smat[(i, j)] = value
elif (i, j) in self._smat:
del self._smat[(i, j)]
def as_mutable(self):
return self.copy()
__hash__ = None
def col_del(self, k):
"""Delete the given column of the matrix.
Examples
========
>>> from sympy.matrices import SparseMatrix
>>> M = SparseMatrix([[0, 0], [0, 1]])
>>> M
Matrix([
[0, 0],
[0, 1]])
>>> M.col_del(0)
>>> M
Matrix([
[0],
[1]])
See Also
========
row_del
"""
newD = {}
k = a2idx(k, self.cols)
for (i, j) in self._smat:
if j == k:
pass
elif j > k:
newD[i, j - 1] = self._smat[i, j]
else:
newD[i, j] = self._smat[i, j]
self._smat = newD
self.cols -= 1
def col_join(self, | |
k_elongation.flatten().tolist()[:-1]
kelongs = kelongs + k_elongation.flatten().tolist()[:-1]
return kelongs
def get_k(self, nt_seq, k_init, k_elong_mean):
'''
returns all propensities for a given nucleotide sequence
*args*
**nt_seq**, nucleotide sequence as a string
**k_initiation**, initiation rate of ribosome binding
**k_elong_mean**, average rate of elgonation experimentally found
'''
codons = nt_seq
genelength = int(len(codons)/3)
seperated_codons = [codons[i:i+3] for i in range(0, len(codons), 3)] #split codons by 3
k_elongation = np.zeros((1, genelength))
tRNA_copynumber = np.zeros((1, genelength))
for i in range(len(seperated_codons)):
tRNA_copynumber[0, i] = self.strGeneCopy[seperated_codons[i]]
mean_tRNA_copynumber = np.mean(list(self.strGeneCopy.values()))
k_elongation = (tRNA_copynumber / mean_tRNA_copynumber) * k_elong_mean
all_k = [k_init] + k_elongation.flatten().tolist()[:-1] + [10]
return all_k
def get_temporal_proteins(self):
'''
gets all the temporal proteins after getting the ORFs
__.tagged_proteins = dictionary with keys of tag types and a list of proteins
__.pois = list of proteins of intrest
__.pois_seq = list of nucleotide sequences of proteins of sequences
__.proteins = dictonary with keys of 1 2 or 3 orfs
'''
self.proteins = {'1':[], '2':[], '3':[]}
self.tagged_proteins = {a:[] for a in self.tag_dict.keys()}
self.tagged_protein_seq = {a:[] for a in self.tag_dict.keys()}
for i in range(len(self.orfs)):
for j in range(len(self.orfs[str(i+1)])):
pro = self.nt2aa(self.sequence_str[self.orfs[str(i+1)][j][0]:self.orfs[str(i+1)][j][1]+3])
nt_seq = self.sequence_str[self.orfs[str(i+1)][j][0]:self.orfs[str(i+1)][j][1]+3]
self.proteins[str(i+1)].append(pro)
for tag in self.tag_dict.keys():
if self.tag_dict[tag] in pro:
self.tagged_protein_seq[tag].append(nt_seq)
self.tagged_proteins[tag].append(pro)
tags = 0
for key in self.tagged_proteins.keys():
tags += len(self.tagged_proteins[key])
self.pois = []
self.pois_seq = []
for tag in self.tag_dict.keys():
for i in range(len(self.tagged_proteins[tag])):
if self.tagged_proteins[tag][i] not in self.pois:
self.pois.append(self.tagged_proteins[tag][i])
self.pois_seq.append(self.tagged_protein_seq[tag][i])
if len(self.pois) == 0:
POIs = []
pois_s = []
pois_nt = []
for i in range(len(self.gb_obj.features)):
try:
self.gb_obj.features[i].qualifiers['translation']
if tags == 0:
POIs.append(self.gb_obj.features[i])
pois_s.append(self.nt2aa(self.tag_full['T_Flag']) + self.gb_obj.features[i].qualifiers['translation'][0])
pois_nt.append(self.tag_full['T_Flag'] + str(self.gb_obj.seq)[int(self.gb_obj.features[i].location.start):int(self.gb_obj.features[i].location.end)])
else:
POIs.append(self.gb_obj.features[i])
pois_s.append(self.gb_obj.features[i].qualifiers['translation'][0])
pois_nt.append(str(self.gb_obj.seq)[int(self.gb_obj.features[i].location.start):int(self.gb_obj.features[i].location.end)])
except:
pass
self.pois = pois_s
self.pois_seq = pois_nt
def analyze_poi(self, protein, sequence, epitope_loc = 'front'):
'''
Analyzes the protein of intrest and stores it in __.POI
*args*
**protein**, amino acid sequence as a string
**sequence**, nucleotide sequence that goes with the protein
**epitope_loc**, consider the epitope location as the front, middle or back:
DDYDDK: front: 0, middle: 3, back: 6 for epitope location
'''
self.POI = poi()
self.POI.nt_seq = sequence
self.POI.aa_seq = protein
self.POI.name = self.sequence_name
self.POI.total_length = len(protein)
'''
for key in self.tagged_proteins:
if protein in self.tagged_proteins[key]:
self.POI.tag_types.append(key)
'''
self.POI.tag_types = []
for tag in self.tag_dict.keys():
if self.tag_dict[tag] in protein:
self.POI.tag_types.append(tag)
#''.join(sms.poi[0].split('DYKDDDDK')
self.POI.tag_epitopes = {a:[] for a in self.POI.tag_types}
gs = protein
for i in range(len(self.POI.tag_types)):
try:
nt_tag = self.tag_full[self.POI.tag_types[i]]
aa_tag = self.nt2aa(nt_tag)
except:
epi = self.tag_dict[self.POI.tag_types[i]]
firstep = self.POI.aa_seq.find(epi)
lastep = len(self.POI.aa_seq) - self.POI.aa_seq[::-1].find(epi[::-1])
aa_tag = self.POI.aa_seq[firstep:lastep]
nt_tag = self.POI.nt_seq[3*firstep:3*lastep]
if epitope_loc == 'front':
offset = 0
if epitope_loc == 'middle':
offset = int(len(self.tag_dict[self.POI.tag_types[i]])/2)
if epitope_loc == 'back':
offset = len(self.tag_dict[self.POI.tag_types[i]])
self.POI.tag_epitopes[self.POI.tag_types[i]] = [m.start()+1+offset for m in re.finditer(self.tag_dict[self.POI.tag_types[i]], self.POI.aa_seq)]
gs = gs.replace(aa_tag, '')
self.POI.gene_seq = gs
self.POI.gene_length = len(gs)
codons = []
for i in range(0, len(sequence), 3):
codons.append(sequence[i:i+3])
self.POI.codons = codons
self.POI.codon_sensitivity, self.POI.CAI, self.POI.CAI_codons = self.codon_usage(self.POI.nt_seq)
def open_seq_file(self, seqfile):
'''
Reads a sequence file, either a .txt file or a .gb genbank file
*args*
**seqfile**, sequence file either in txt, gb, gbk format
'''
seq = seqfile
self.sequence_name = ''
if '.dna' in seq:
self.sequence_name = seq[:-4]
try:
seq_record = snapgene_file_to_seqrecord(seq)
except:
print('To read .dna files please install snapegenereader: pip install snapgene_reader - https://github.com/IsaacLuo/SnapGeneFileReader' )
self.sequence_str = seq_record.seq.tostring()
if '.txt' in seq:
with open(seq) as f:
raw = f.readlines()
raw = ''.join(raw)
onlychar = re.split(r'[^A-Za-z]', raw)
validt = ['A', 'G', 'T', 'C']
validu = ['A', 'G', 'U', 'C']
namelen = 0
self.sequence_str = ''
for i in range(len(onlychar)):
section = onlychar[i]
if set(section.upper()) == set(validt):
self.sequence_str += section.upper()
elif set(section.upper()) == set(validu):
self.sequence_str += section.upper()
else:
if len(section)>namelen:
self.sequence_name = section
namelen = len(section)
if '.gb' in seq:
gb_record = SeqIO.read(open(seq, "r"), "genbank")
self.sequence_str = str(gb_record.seq)
self.sequence_name = gb_record.name
self.gb_obj = gb_record
if self.sequence_name == '':
self.sequence_name = seqfile.replace('.txt','')
self.sequence_name = seqfile.replace('.gb','')
def codon_usage(self, nt_seq):
'''
Analyzes codon useage from the nucleotide sequence
*args*
**nt_seq**, nucleotide sequence as a string
*returns*
**codon_sensitivity**, a list of codon sensitivity for the nucleotide sequence
**cai**, cai value
'''
codon_usage = np.zeros((1, 21))
gene_len = len(nt_seq)/3
aa_seq = self.nt2aa(nt_seq)
for i in range(len(self.aa_keys)-1):
codon_usage[0, i] = len(re.findall(self.aa_keys[i], aa_seq))
codon_usage[0, 20] = len(re.findall('\*', aa_seq))
codon_norm = codon_usage/gene_len
codon_sensitivity = np.round(codon_norm*self.sensitivity_fast_slow, 2)
cai_codons = []
for i in range(0, len(nt_seq), 3):
cai_codons.append(self.strGeneCopy[nt_seq[i:i+3]] / self.strGeneCopy_fast[nt_seq[i:i+3]])
cai = self.geomean(cai_codons)
return codon_sensitivity, cai, cai_codons
def get_probvec(self):
'''
returns the probe vectors (epitope positions by codon position) associated with the tagged sequence stored in POI
*returns*
**probe_vec**, cumlative probe intensity vector by codon position. Ex: [0,0,0,0,1,1,1,1,2,2,2,3,3,3 etc]
**probe_loc**, epitope posistion as a binary vector, 1 for epitope pos, 0 for everything else
'''
probePositions = []
keylist = list(self.POI.tag_epitopes.keys())
for n in range(len(keylist)):
probePosition = []
key = keylist[n]
probePosition = probePosition + self.POI.tag_epitopes[key]
if probePosition != []:
probePosition = np.unique(probePosition).tolist()
probePositions.append(probePosition)
genelength = self.POI.total_length
pvfull = np.zeros((1, genelength+1)).astype(int).flatten()
if len(probePositions) > 1:
k = 0
for n in range(len(keylist)):
pv = np.zeros((1, genelength+1)).astype(int).flatten()
key = keylist[n]
probePosition = probePositions[k]
k+=1
if len(self.POI.tag_epitopes[key]) != 0:
for i in range(len(probePosition)):
pv[probePosition[i]:] = i+1
if n > 0:
pvfull = np.vstack((pvfull,pv))
else:
pvfull = pv
else:
probePosition = probePositions[0]
for n in range(len(keylist)):
pv = np.zeros((1, genelength+1)).astype(int).flatten()
key = keylist[n]
if len(self.POI.tag_epitopes[key]) != 0:
for i in range(len(probePosition)):
pv[probePosition[i]:] = i+1
if n > 0:
pvfull = np.vstack((pvfull,pv))
else:
pvfull = pv
numtags = 0
for key in keylist:
if len(self.POI.tag_epitopes[key]) != 0:
numtags += 1
ploc = np.zeros((numtags, self.POI.total_length+1)).astype(int)
numind = 0
for n in range(len(keylist)):
key = keylist[n]
if len(self.POI.tag_epitopes[key]) != 0:
ploc[numind][self.POI.tag_epitopes[key]] = 1
numind += 1
return pvfull, ploc
def simple_model(self, poi, tag, ki,ke):
'''
Simplified model
returns the analytical tau, intensity mean, and intensity variance
calculated from the simplified model
'''
L = poi.total_length #get the total length of the gene
Lm = np.mean(poi.tag_epitopes[tag]) #the mean location of the tag epitopes
L_tag = int((poi.tag_epitopes[tag][-1] - poi.tag_epitopes[tag][0]) / 2)
ke_analytical = L*ke / np.sum(self.get_ui(poi.nt_seq[:-3]))
tau_analytical = L_tag/ke_analytical #analytical tau ie autocovariance time
mean_analytical = ki*tau_analytical* (1.-Lm/float(L)) # mean intensity
var_analytical = ki*tau_analytical* (1.-Lm/float(L))**2 #var intensity
return tau_analytical,mean_analytical,var_analytical
def get_binned_k_emphasize_probes(self,k,bins,pl):
'''
evenly bins elongation rates as best it can.
'''
probe_region_start = np.where(pl > 0)[0]
probe_region_end = np.where(pl > 0)[-1]
binsize = int(np.floor(len(k)/bins))
binned_ks = []
k_binned = np.zeros(bins)
k_lens = np.ones(bins)*binsize
to_redistribute = len(k)%bins
k_lens[-to_redistribute:] = binsize+1
inds = np.hstack(([0.], np.cumsum(k_lens))).astype(int)
for i in range(0,bins):
binned_ks = binned_ks + [k[inds[i]:inds[i+1]].tolist(),]
for i in range(0,bins):
k_binned[i] = np.mean(binned_ks[i])/len(binned_ks[i])
return k_binned,k_lens
def get_binned_k(self,k,bins):
'''
evenly bins elongation rates as best it can.
'''
binsize = int(np.floor(len(k)/bins))
binned_ks = []
k_binned = np.zeros(bins)
k_lens = np.ones(bins)*binsize
to_redistribute = len(k)%bins
k_lens[-to_redistribute:] = binsize+1
inds = np.hstack(([0.], np.cumsum(k_lens))).astype(int)
for i in range(0,bins):
binned_ks = binned_ks + [k[inds[i]:inds[i+1]].tolist(),]
for i in range(0,bins):
k_binned[i] = 1/np.mean(1/np.array(binned_ks[i]))
return k_binned,k_lens
def get_binned_probe_vec(self,probe_loc,bins):
'''
bin the probe vector as even as possible
'''
probe_loc = np.atleast_2d(probe_loc)
binsize = int(np.floor(probe_loc.shape[1]/bins))
probeloc_binned = np.zeros((np.atleast_2d(probe_loc).shape[0],bins))
probe_lens = np.ones((np.atleast_2d(probe_loc).shape[0],bins))*binsize
to_redistribute = len(probe_loc)%bins
np.atleast_2d(probe_loc).shape[0]
probe_lens[-to_redistribute:] = binsize+1
inds = np.hstack(([0.], np.cumsum(probe_lens,axis=1)[0,:])).astype(int)
for i in range(0,bins):
probeloc_binned[:,i] = np.sum(probe_loc[:,inds[i]:inds[i+1]],axis=1)
probevec_binned = np.cumsum(probeloc_binned,axis=1)
return probevec_binned.astype(int), probeloc_binned.astype(int)
def ssa_binned(self,nt_seq=None, bins = 50,all_k=None, k_elong_mean=10, k_initiation=.03, probePosition=[], n_traj=100, tf=1000, start_time=0, tstep=1000, time_inhibit=0, evaluating_frap=False, evaluating_inhibitor=False,force_python = False):
if nt_seq == None: #get sequence if none was passed
nt_seq = self.POI.nt_seq
genelength = int(len(nt_seq)/3)
if len(probePosition) == 0:
pv,probePosition = self.get_probvec()
if all_k == | |
<filename>nevermined_gateway/routes.py<gh_stars>1-10
import json
import logging
from authlib.integrations.flask_oauth2 import current_token
from authlib.jose.errors import BadSignatureError
from common_utils_py.agreements.service_agreement import ServiceAgreement
from common_utils_py.agreements.service_types import ServiceTypes
from common_utils_py.did import id_to_did, NEVERMINED_PREFIX
from common_utils_py.did_resolver.did_resolver import DIDResolver
from common_utils_py.http_requests.requests_session import get_requests_session
from common_utils_py.utils.crypto import (ecdsa_encryption_from_file,
get_ecdsa_public_key_from_file,
rsa_encryption_from_file)
from eth_utils import remove_0x_prefix
from flask import Blueprint, jsonify, request
from flask.wrappers import Response
from secret_store_client.client import RPCError
from web3 import Web3
from nevermined_gateway import constants
from nevermined_gateway.conditions import fulfill_escrow_payment_condition, fulfill_for_delegate_nft_transfer_condition
from nevermined_gateway.config import upload_backends
from nevermined_gateway.identity.oauth2.authorization_server import create_authorization_server
from nevermined_gateway.identity.oauth2.resource_server import create_resource_server
from nevermined_gateway.log import setup_logging
from nevermined_gateway.myapp import app
from nevermined_gateway.util import (check_required_attributes,
do_secret_store_encrypt, get_asset_url_at_index, get_config,
get_provider_account, get_provider_key_file,
get_provider_password, get_rsa_public_key_file,
is_access_granted, is_escrow_payment_condition_fulfilled, is_nft_transfer_approved,
is_nft_transfer_condition_fulfilled, keeper_instance,
setup_keeper, used_by, verify_signature, was_compute_triggered,
get_asset, generate_random_id, is_lock_payment_condition_fulfilled,
get_upload_enabled, upload_content)
setup_logging()
services = Blueprint('services', __name__)
setup_keeper(app.config['CONFIG_FILE'])
provider_acc = get_provider_account()
requests_session = get_requests_session()
authorization = create_authorization_server(app)
require_oauth = create_resource_server()
logger = logging.getLogger(__name__)
@services.route("/encrypt", methods=['POST'])
def encrypt_content():
"""Call the execution of a workflow.
swagger_from_file: docs/encrypt.yml
"""
required_attributes = ['message', 'method']
data = request.json
msg, status = check_required_attributes(required_attributes, data, 'encrypt')
if msg:
return msg, status
try:
message = data.get('message')
method = data.get('method')
if (method == 'SecretStore'):
msg, status = check_required_attributes(['did'], data, 'encrypt')
if msg:
return msg, status
did = data.get('did').replace(NEVERMINED_PREFIX, '')
hash = do_secret_store_encrypt(
remove_0x_prefix(did),
message,
provider_acc,
get_config()
)
public_key = get_ecdsa_public_key_from_file(get_provider_key_file(),
get_provider_password())
elif (method == 'PSK-ECDSA'):
hash, public_key = ecdsa_encryption_from_file(message, get_provider_key_file(),
get_provider_password())
elif (method == 'PSK-RSA'):
hash, public_key = rsa_encryption_from_file(message, get_rsa_public_key_file())
else:
return f'Unknown method: {method}\n' \
f'Options available are (`SecretStore`, `PSK-ECDSA`, `PSK-RSA`)', 500
output = dict()
output['public-key'] = public_key
output['hash'] = hash
output['method'] = method
return jsonify(output)
except Exception as e:
logger.error(f'Error: {e}. ', exc_info=1)
return f'Error: {str(e)}', 500
@services.route('/publish', methods=['POST'])
def publish():
"""Encrypt document using the SecretStore and keyed by the given documentId.
swagger_from_file: docs/publish.yml
"""
required_attributes = [
'documentId',
'signature',
'document',
'publisherAddress'
]
data = request.json
if 'signedDocumentId' in data and 'signature' not in data:
data['signature'] = data['signedDocumentId']
msg, status = check_required_attributes(required_attributes, data, 'publish')
if msg:
return msg, status
did = data.get('documentId')
signature = data.get('signature')
document = json.dumps(json.loads(data.get('document')), separators=(',', ':'))
publisher_address = data.get('publisherAddress')
try:
if not verify_signature(keeper_instance(), publisher_address, signature, did):
msg = f'Invalid signature {signature} for ' \
f'publisherAddress {publisher_address} and documentId {did}.'
raise ValueError(msg)
print('Document: ' + document)
print('DID: ' + remove_0x_prefix(did))
encrypted_document, public_key = rsa_encryption_from_file(document, get_rsa_public_key_file())
logger.debug(f'encrypted urls {encrypted_document}, '
f'publisher {publisher_address}, '
f'documentId {did}')
return encrypted_document, 201
except (RPCError, Exception) as e:
logger.error(
f'Encryption Error: {e}. \n'
f'providerAddress={provider_acc.address}\n'
f'Payload was: documentId={did}, '
f'publisherAddress={publisher_address},'
f'signature={signature}',
exc_info=1
)
return f'Error: {str(e)}', 500
@services.route('/upload/<backend>', methods=['POST'])
def upload(backend=None):
if not get_upload_enabled():
return 'Upload not supported in this server', 501
if not upload_backends.keys().__contains__(backend):
return 'Backend not implemented', 501
file_ = request.files.get('file')
if file_ is None:
return 'No file provided in request', 400
data = request.args
try:
file_name = data.get('fileName', file_.filename)
url = upload_content(file_.read(), file_name, upload_backends[backend], app.config['CONFIG_FILE'])
return {'url': url }, 201
except Exception as e:
logger.error(f'Driver error when uploading file: {e}')
return f'Error: {str(e)}', 500
@services.route('/download/<int:index>', methods=['GET'])
@require_oauth()
def download(index=0):
"""Allows to download an asset data file.
swagger_from_file: docs/download.yml
"""
consumer_address = current_token["client_id"]
did = current_token["did"]
logger.info('Parameters:\nIndex: %d\nConsumerAddress: %s\n'
'DID: %s'
% (index, consumer_address, did))
try:
keeper = keeper_instance()
asset = DIDResolver(keeper.did_registry).resolve(did)
file_attributes = asset.metadata['main']['files'][index]
content_type = file_attributes.get('contentType', None)
try:
auth_method = asset.authorization.main['service']
except Exception:
auth_method = constants.ConfigSections.DEFAULT_DECRYPTION_METHOD
if auth_method not in constants.ConfigSections.DECRYPTION_METHODS:
msg = (
'The Authorization Method defined in the DDO is not part of the available '
'methods supported'
'by the Gateway: ' + auth_method)
logger.warning(msg)
return msg, 400
url = get_asset_url_at_index(index, asset, provider_acc, auth_method)
return get_asset(request, requests_session, content_type, url, app.config['CONFIG_FILE'])
except (ValueError, Exception) as e:
logger.error(f'Error- {str(e)}', exc_info=1)
return f'Error : {str(e)}', 500
@services.route('/access/<agreement_id>', methods=['GET'])
@services.route('/access/<agreement_id>/<int:index>', methods=['GET'])
@require_oauth()
def access(agreement_id, index=0):
"""Allows to get access to an asset data file.
swagger_from_file: docs/access.yml
"""
consumer_address = current_token["client_id"]
did = current_token["did"]
agreement_id = current_token["sub"]
logger.info('Parameters:\nAgreementId: %s\nIndex: %d\nConsumerAddress: %s\n'
'DID: %s\n'
% (agreement_id, index, consumer_address, did))
try:
keeper = keeper_instance()
asset = DIDResolver(keeper.did_registry).resolve(did)
logger.debug('AgreementID :' + agreement_id)
file_attributes = asset.metadata['main']['files'][index]
content_type = file_attributes.get('contentType', None)
try:
auth_method = asset.authorization.main['service']
except Exception:
auth_method = constants.ConfigSections.DEFAULT_DECRYPTION_METHOD
if auth_method not in constants.ConfigSections.DECRYPTION_METHODS:
msg = (
'The Authorization Method defined in the DDO is not part of the available '
'methods supported'
'by the Gateway: ' + auth_method)
logger.warning(msg)
return msg, 400
url = get_asset_url_at_index(index, asset, provider_acc, auth_method)
used_by(generate_random_id(), did, consumer_address, 'access', '0x00', 'access', provider_acc,
keeper)
return get_asset(request, requests_session, content_type, url, app.config['CONFIG_FILE'])
except (ValueError, Exception) as e:
logger.error(f'Error- {str(e)}', exc_info=1)
return f'Error : {str(e)}', 500
@services.route('/access-proof/<agreement_id>', methods=['GET'])
@services.route('/access-proof/<agreement_id>/<int:index>', methods=['GET'])
@require_oauth()
def access_proof(agreement_id, index=0):
"""Allows to get access to an asset data file.
swagger_from_file: docs/access.yml
"""
consumer_address = current_token["client_id"]
did = current_token["did"]
agreement_id = current_token["sub"]
logger.info('Parameters:\nAgreementId: %s\nIndex: %d\nConsumerAddress: %s\n'
'DID: %s\n'
% (agreement_id, index, consumer_address, did))
try:
keeper = keeper_instance()
asset = DIDResolver(keeper.did_registry).resolve(did)
logger.debug('AgreementID :' + agreement_id)
file_attributes = asset.metadata['main']['files'][index]
content_type = file_attributes.get('contentType', None)
try:
auth_method = asset.authorization.main['service']
except Exception:
auth_method = constants.ConfigSections.DEFAULT_DECRYPTION_METHOD
if auth_method not in constants.ConfigSections.DECRYPTION_METHODS:
msg = (
'The Authorization Method defined in the DDO is not part of the available '
'methods supported'
'by the Gateway: ' + auth_method)
logger.warning(msg)
return msg, 400
url = get_asset_url_at_index(index, asset, provider_acc, auth_method)
used_by(generate_random_id(), did, consumer_address, 'access', '0x00', 'access proof', provider_acc,
keeper)
return Response(
url,
'200',
content_type=content_type
)
except (ValueError, Exception) as e:
logger.error(f'Error- {str(e)}', exc_info=1)
return f'Error : {str(e)}', 500
@services.route('/nft-access/<agreement_id>', methods=['GET'])
@services.route('/nft-access/<agreement_id>/<int:index>', methods=['GET'])
@require_oauth()
def nft_access(agreement_id, index=0):
"""Allows to get access to an asset data file holding a NFT.
swagger_from_file: docs/nft_access.yml
"""
consumer_address = current_token["client_id"]
did = current_token["did"]
agreement_id = current_token["sub"]
logger.info('Parameters:\nAgreementId: %s\nIndex: %d\nConsumerAddress: %s\n'
'DID: %s\n'
% (agreement_id, index, consumer_address, did))
try:
keeper = keeper_instance()
asset = DIDResolver(keeper.did_registry).resolve(did)
file_attributes = asset.metadata['main']['files'][index]
content_type = file_attributes.get('contentType', None)
try:
auth_method = asset.authorization.main['service']
except Exception:
auth_method = constants.ConfigSections.DEFAULT_DECRYPTION_METHOD
if auth_method not in constants.ConfigSections.DECRYPTION_METHODS:
msg = (
'The Authorization Method defined in the DDO is not part of the available '
'methods supported'
'by the Gateway: ' + auth_method)
logger.warning(msg)
return msg, 400
url = get_asset_url_at_index(index, asset, provider_acc, auth_method)
used_by(generate_random_id(), did, consumer_address, 'access', '0x00', 'nft access', provider_acc,
keeper)
return get_asset(request, requests_session, content_type, url, app.config['CONFIG_FILE'])
except (ValueError, Exception) as e:
logger.error(f'Error- {str(e)}', exc_info=1)
return f'Error : {str(e)}', 500
@services.route('/nft-transfer', methods=['POST'])
def nft_transfer():
"""Allows the provider transfer and release the rewards.
swagger_from_file: docs/nft_transfer.yml
"""
required_attributes = ['agreementId', 'nftHolder', 'nftReceiver', 'nftAmount']
data = request.json
msg, status = check_required_attributes(required_attributes, data, 'nft-transfer')
if msg:
return msg, status
agreement_id = data.get('agreementId')
nft_holder_address = data.get('nftHolder')
nft_receiver_address = data.get('nftReceiver')
nft_amount = data.get('nftAmount')
keeper = keeper_instance()
agreement = keeper.agreement_manager.get_agreement(agreement_id)
did = id_to_did(agreement.did)
ddo = DIDResolver(keeper.did_registry).resolve(did)
try:
ServiceAgreement.from_ddo(ServiceTypes.NFT_SALES, ddo)
except ValueError as e:
logger.error('nft-sales service not found on ddo for %s', did)
return str(e), 400
(
lock_payment_condition_id,
nft_transfer_condition_id,
escrow_payment_condition_id
) = agreement.condition_ids
if not is_lock_payment_condition_fulfilled(lock_payment_condition_id, keeper):
msg = f'lockPayment condition for agreement_id={agreement_id} is not fulfilled'
logger.warning(msg)
return msg, 402
if not is_nft_transfer_approved(nft_holder_address, get_provider_account().address, keeper):
msg = f'Gateway ({get_provider_account().address}) is not approved to transfer nfts from {nft_holder_address}'
logger.warning(msg)
return msg, 405
# fulfill transferNFT condition
if not is_nft_transfer_condition_fulfilled(nft_transfer_condition_id, keeper):
logger.debug('NFTTransfer condition not fulfilled')
result = fulfill_for_delegate_nft_transfer_condition(
agreement_id,
agreement.did,
Web3.toChecksumAddress(nft_holder_address),
Web3.toChecksumAddress(nft_receiver_address),
nft_amount,
lock_payment_condition_id,
keeper
)
if result is False:
msg = f'There was an error fulfilling the NFTTransfer condition for agreement_id={agreement_id}'
logger.error(msg)
return msg, 500
# fulfill escrowPayment condition
if not is_escrow_payment_condition_fulfilled(escrow_payment_condition_id, keeper):
logger.debug('EscrowPayment condition not fulfilled')
result = fulfill_escrow_payment_condition(
keeper,
agreement_id,
[
nft_transfer_condition_id,
lock_payment_condition_id,
escrow_payment_condition_id
],
ddo,
get_provider_account(),
service_type=ServiceTypes.NFT_SALES
)
if result is False:
msg = f'There was an error fulfilling the EscrowPayment condition for agreement_id={agreement_id}'
logger.error(msg)
return msg, 500
return 'success', 200
@services.route('/execute/<agreement_id>', methods=['POST'])
@require_oauth()
def execute(agreement_id):
"""Call the execution of a workflow.
swagger_from_file: docs/execute.yml
"""
consumer_address = current_token["client_id"]
workflow_did = current_token["did"]
agreement_id = current_token["sub"]
try:
keeper = keeper_instance()
asset_id = keeper_instance().agreement_manager.get_agreement(agreement_id).did
did = id_to_did(asset_id)
signature = '0x00'
workflow = DIDResolver(keeper_instance().did_registry).resolve(workflow_did)
body = {"serviceAgreementId": agreement_id, "workflow": workflow.as_dictionary()}
response = requests_session.post(
get_config().compute_api_url + '/api/v1/nevermined-compute-api/init',
data=json.dumps(body),
headers={'content-type': 'application/json'})
if response.status_code != 200:
msg = f'The compute API was not able to create the workflow. {response.content}'
logger.warning(msg)
return msg, 401
used_by(generate_random_id(), did, consumer_address, 'compute', signature, 'compute', provider_acc,
keeper)
return jsonify({"workflowId": response.content.decode('utf-8')})
except Exception as e:
logger.error(f'Error- {str(e)}', exc_info=1)
return f'Error : {str(e)}', 500
@services.route('/compute/logs/<agreement_id>/<execution_id>', methods=['GET'])
@require_oauth()
def compute_logs(agreement_id, execution_id):
"""Allows to get access to an asset data file.
swagger_from_file: docs/compute_logs.yml
"""
consumer_address = current_token["client_id"]
execution_id = current_token["execution_id"]
agreement_id = current_token["sub"]
logger.info(('Parameters:\n'
'ConsumerAddress: %s\n'
'AgreementId: %s\n'
'ExecutionId: %s\n'),
consumer_address, agreement_id, execution_id)
response = requests_session.get(
get_config().compute_api_url + | |
<filename>firmware/coreboot/util/me_cleaner/me_cleaner.py
#!/usr/bin/python
# me_cleaner - Tool for partial deblobbing of Intel ME/TXE firmware images
# Copyright (C) 2016-2018 <NAME> <<EMAIL>>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
from __future__ import division, print_function
import argparse
import binascii
import hashlib
import itertools
import shutil
import sys
from struct import pack, unpack
min_ftpr_offset = 0x400
spared_blocks = 4
unremovable_modules = ("ROMP", "BUP")
unremovable_modules_me11 = ("rbe", "kernel", "syslib", "bup")
unremovable_partitions = ("FTPR",)
pubkeys_md5 = {
"763e59ebe235e45a197a5b1a378dfa04": ("ME", ("6.x.x.x",)),
"3a98c847d609c253e145bd36512629cb": ("ME", ("6.0.50.x",)),
"0903fc25b0f6bed8c4ed724aca02124c": ("ME", ("7.x.x.x", "8.x.x.x")),
"2011ae6df87c40fba09e3f20459b1ce0": ("ME", ("9.0.x.x", "9.1.x.x")),
"e8427c5691cf8b56bc5cdd82746957ed": ("ME", ("9.5.x.x", "10.x.x.x")),
"986a78e481f185f7d54e4af06eb413f6": ("ME", ("11.x.x.x",)),
"bda0b6bb8ca0bf0cac55ac4c4d55e0f2": ("TXE", ("1.x.x.x",)),
"b726a2ab9cd59d4e62fe2bead7cf6997": ("TXE", ("1.x.x.x",)),
"0633d7f951a3e7968ae7460861be9cfb": ("TXE", ("2.x.x.x",)),
"1d0a36e9f5881540d8e4b382c6612ed8": ("TXE", ("3.x.x.x",)),
"be900fef868f770d266b1fc67e887e69": ("SPS", ("2.x.x.x",)),
"4622e3f2cb212a89c90a4de3336d88d2": ("SPS", ("3.x.x.x",)),
"31ef3d950eac99d18e187375c0764ca4": ("SPS", ("4.x.x.x",))
}
class OutOfRegionException(Exception):
pass
class RegionFile:
def __init__(self, f, region_start, region_end):
self.f = f
self.region_start = region_start
self.region_end = region_end
def read(self, n):
if f.tell() + n <= self.region_end:
return self.f.read(n)
else:
raise OutOfRegionException()
def readinto(self, b):
if f.tell() + len(b) <= self.region_end:
return self.f.readinto(b)
else:
raise OutOfRegionException()
def seek(self, offset):
if self.region_start + offset <= self.region_end:
return self.f.seek(self.region_start + offset)
else:
raise OutOfRegionException()
def write_to(self, offset, data):
if self.region_start + offset + len(data) <= self.region_end:
self.f.seek(self.region_start + offset)
return self.f.write(data)
else:
raise OutOfRegionException()
def fill_range(self, start, end, fill):
if self.region_start + end <= self.region_end:
if start < end:
block = fill * 4096
self.f.seek(self.region_start + start)
self.f.writelines(itertools.repeat(block,
(end - start) // 4096))
self.f.write(block[:(end - start) % 4096])
else:
raise OutOfRegionException()
def fill_all(self, fill):
self.fill_range(0, self.region_end - self.region_start, fill)
def move_range(self, offset_from, size, offset_to, fill):
if self.region_start + offset_from + size <= self.region_end and \
self.region_start + offset_to + size <= self.region_end:
for i in range(0, size, 4096):
self.f.seek(self.region_start + offset_from + i, 0)
block = self.f.read(min(size - i, 4096))
self.f.seek(self.region_start + offset_from + i, 0)
self.f.write(fill * len(block))
self.f.seek(self.region_start + offset_to + i, 0)
self.f.write(block)
else:
raise OutOfRegionException()
def save(self, filename, size):
if self.region_start + size <= self.region_end:
self.f.seek(self.region_start)
copyf = open(filename, "w+b")
for i in range(0, size, 4096):
copyf.write(self.f.read(min(size - i, 4096)))
return copyf
else:
raise OutOfRegionException()
def get_chunks_offsets(llut):
chunk_count = unpack("<I", llut[0x04:0x08])[0]
huffman_stream_end = sum(unpack("<II", llut[0x10:0x18]))
nonzero_offsets = [huffman_stream_end]
offsets = []
for i in range(0, chunk_count):
chunk = llut[0x40 + i * 4:0x44 + i * 4]
offset = 0
if chunk[3] != 0x80:
offset = unpack("<I", chunk[0:3] + b"\x00")[0]
offsets.append([offset, 0])
if offset != 0:
nonzero_offsets.append(offset)
nonzero_offsets.sort()
for i in offsets:
if i[0] != 0:
i[1] = nonzero_offsets[nonzero_offsets.index(i[0]) + 1]
return offsets
def remove_modules(f, mod_headers, ftpr_offset, me_end):
comp_str = ("uncomp.", "Huffman", "LZMA")
unremovable_huff_chunks = []
chunks_offsets = []
base = 0
chunk_size = 0
end_addr = 0
for mod_header in mod_headers:
name = mod_header[0x04:0x14].rstrip(b"\x00").decode("ascii")
offset = unpack("<I", mod_header[0x38:0x3C])[0] + ftpr_offset
size = unpack("<I", mod_header[0x40:0x44])[0]
flags = unpack("<I", mod_header[0x50:0x54])[0]
comp_type = (flags >> 4) & 7
print(" {:<16} ({:<7}, ".format(name, comp_str[comp_type]), end="")
if comp_type == 0x00 or comp_type == 0x02:
print("0x{:06x} - 0x{:06x} ): "
.format(offset, offset + size), end="")
if name in unremovable_modules:
end_addr = max(end_addr, offset + size)
print("NOT removed, essential")
else:
end = min(offset + size, me_end)
f.fill_range(offset, end, b"\xff")
print("removed")
elif comp_type == 0x01:
if not chunks_offsets:
f.seek(offset)
llut = f.read(4)
if llut == b"LLUT":
llut += f.read(0x3c)
chunk_count = unpack("<I", llut[0x4:0x8])[0]
base = unpack("<I", llut[0x8:0xc])[0] + 0x10000000
chunk_size = unpack("<I", llut[0x30:0x34])[0]
llut += f.read(chunk_count * 4)
chunks_offsets = get_chunks_offsets(llut)
else:
sys.exit("Huffman modules found, but LLUT is not present")
module_base = unpack("<I", mod_header[0x34:0x38])[0]
module_size = unpack("<I", mod_header[0x3c:0x40])[0]
first_chunk_num = (module_base - base) // chunk_size
last_chunk_num = first_chunk_num + module_size // chunk_size
huff_size = 0
for chunk in chunks_offsets[first_chunk_num:last_chunk_num + 1]:
huff_size += chunk[1] - chunk[0]
print("fragmented data, {:<9}): "
.format("~" + str(int(round(huff_size / 1024))) + " KiB"),
end="")
if name in unremovable_modules:
print("NOT removed, essential")
unremovable_huff_chunks += \
[x for x in chunks_offsets[first_chunk_num:
last_chunk_num + 1] if x[0] != 0]
else:
print("removed")
else:
print("0x{:06x} - 0x{:06x}): unknown compression, skipping"
.format(offset, offset + size), end="")
if chunks_offsets:
removable_huff_chunks = []
for chunk in chunks_offsets:
if all(not(unremovable_chk[0] <= chunk[0] < unremovable_chk[1] or
unremovable_chk[0] < chunk[1] <= unremovable_chk[1])
for unremovable_chk in unremovable_huff_chunks):
removable_huff_chunks.append(chunk)
for removable_chunk in removable_huff_chunks:
if removable_chunk[1] > removable_chunk[0]:
end = min(removable_chunk[1], me_end)
f.fill_range(removable_chunk[0], end, b"\xff")
end_addr = max(end_addr,
max(unremovable_huff_chunks, key=lambda x: x[1])[1])
return end_addr
def check_partition_signature(f, offset):
f.seek(offset)
header = f.read(0x80)
modulus = int(binascii.hexlify(f.read(0x100)[::-1]), 16)
public_exponent = unpack("<I", f.read(4))[0]
signature = int(binascii.hexlify(f.read(0x100)[::-1]), 16)
header_len = unpack("<I", header[0x4:0x8])[0] * 4
manifest_len = unpack("<I", header[0x18:0x1c])[0] * 4
f.seek(offset + header_len)
sha256 = hashlib.sha256()
sha256.update(header)
sha256.update(f.read(manifest_len - header_len))
decrypted_sig = pow(signature, public_exponent, modulus)
return "{:#x}".format(decrypted_sig).endswith(sha256.hexdigest()) # FIXME
def print_check_partition_signature(f, offset):
if check_partition_signature(f, offset):
print("VALID")
else:
print("INVALID!!")
sys.exit("The FTPR partition signature is not valid. Is the input "
"ME/TXE image valid?")
def relocate_partition(f, me_end, partition_header_offset,
new_offset, mod_headers):
f.seek(partition_header_offset)
name = f.read(4).rstrip(b"\x00").decode("ascii")
f.seek(partition_header_offset + 0x8)
old_offset, partition_size = unpack("<II", f.read(0x8))
llut_start = 0
for mod_header in mod_headers:
if (unpack("<I", mod_header[0x50:0x54])[0] >> 4) & 7 == 0x01:
llut_start = unpack("<I", mod_header[0x38:0x3C])[0] + old_offset
break
if mod_headers and llut_start != 0:
# Bytes 0x9:0xb of the LLUT (bytes 0x1:0x3 of the AddrBase) are added
# to the SpiBase (bytes 0xc:0x10 of the LLUT) to compute the final
# start of the LLUT. Since AddrBase is not modifiable, we can act only
# on SpiBase and here we compute the minimum allowed new_offset.
f.seek(llut_start + 0x9)
lut_start_corr = unpack("<H", f.read(2))[0]
new_offset = max(new_offset,
lut_start_corr - llut_start - 0x40 + old_offset)
new_offset = ((new_offset + 0x1f) // 0x20) * 0x20
offset_diff = new_offset - old_offset
print("Relocating {} from {:#x} - {:#x} to {:#x} - {:#x}..."
.format(name, old_offset, old_offset + partition_size,
new_offset, new_offset + partition_size))
print(" Adjusting FPT entry...")
f.write_to(partition_header_offset + 0x8,
pack("<I", new_offset))
if mod_headers:
if llut_start != 0:
f.seek(llut_start)
if f.read(4) == b"LLUT":
print(" Adjusting LUT start offset...")
lut_offset = llut_start + offset_diff + 0x40 - lut_start_corr
f.write_to(llut_start + 0x0c, pack("<I", lut_offset))
print(" Adjusting Huffman start offset...")
f.seek(llut_start + 0x14)
old_huff_offset = unpack("<I", f.read(4))[0]
f.write_to(llut_start + 0x14,
pack("<I", old_huff_offset + offset_diff))
print(" Adjusting chunks offsets...")
f.seek(llut_start + 0x4)
chunk_count = unpack("<I", f.read(4))[0]
f.seek(llut_start + 0x40)
chunks = bytearray(chunk_count * 4)
f.readinto(chunks)
for i in range(0, chunk_count * 4, 4):
if chunks[i + 3] != 0x80:
chunks[i:i + 3] = \
pack("<I", unpack("<I", chunks[i:i + 3] +
b"\x00")[0] + offset_diff)[0:3]
f.write_to(llut_start + 0x40, chunks)
else:
sys.exit("Huffman modules present but no LLUT found!")
else:
print(" No Huffman modules found")
print(" Moving data...")
partition_size = min(partition_size, me_end - old_offset)
f.move_range(old_offset, partition_size, new_offset, b"\xff")
return new_offset
def check_and_remove_modules(f, me_end, offset, min_offset,
relocate, keep_modules):
f.seek(offset + 0x20)
num_modules = unpack("<I", f.read(4))[0]
f.seek(offset + 0x290)
data = f.read(0x84)
mod_header_size = 0
if data[0x0:0x4] == b"$MME":
if data[0x60:0x64] == b"$MME" or num_modules == 1:
mod_header_size = 0x60
elif data[0x80:0x84] == b"$MME":
mod_header_size = 0x80
if mod_header_size != 0:
f.seek(offset + 0x290)
data = f.read(mod_header_size * num_modules)
mod_headers = [data[i * mod_header_size:(i + 1) * mod_header_size]
for i in range(0, num_modules)]
if all(hdr.startswith(b"$MME") for hdr in mod_headers):
if args.keep_modules:
end_addr = offset + ftpr_length
else:
end_addr = remove_modules(f, mod_headers, offset, me_end)
if args.relocate:
new_offset = relocate_partition(f, me_end, 0x30, min_offset,
mod_headers)
end_addr += new_offset - offset
offset = new_offset
return end_addr, offset
else:
print("Found less modules than expected in the FTPR "
"partition; skipping modules removal")
else:
print("Can't find the module header size; skipping "
"modules removal")
return -1, offset
def check_and_remove_modules_me11(f, me_end, partition_offset,
partition_length, min_offset, relocate,
keep_modules):
comp_str = ("LZMA/uncomp.", "Huffman")
if keep_modules:
end_data = partition_offset + partition_length
else:
end_data = 0
f.seek(partition_offset + 0x4)
module_count = unpack("<I", f.read(4))[0]
modules = []
modules.append(("end", partition_length, 0))
f.seek(partition_offset + 0x10)
for i in range(0, module_count):
data = f.read(0x18)
name = data[0x0:0xc].rstrip(b"\x00").decode("ascii")
offset_block | |
import yaml
import os
import logging
from os.path import relpath
from dc_exceptions import DcException
from copy import deepcopy
class DcMixer(object):
"""
Main class for dc-mixer
"""
__MIXER_FILE = 'docker-compose-mixer.yml'
""":type : string"""
__EXIT_STATUS_INPUT_FILE_NOT_EXISTS = 2
""":type : string"""
__input_file = '/docker-compose-mixer.yml'
""":type : string"""
__output_file = '/docker-compose.yml'
""":type : string"""
__scopes_container = None
""":type : ScopesContainer"""
def __init__(self, input_file, output_file, scope_container):
"""
:param input_file: string
:param scope_container: ScopesContainer
"""
self.__input_file = input_file
self.__output_file = output_file
self.__scopes_container = scope_container
def get_input_file(self):
"""
:return: string
"""
return self.__input_file
def process(self):
logging.log(logging.DEBUG, 'Start compiling compose file...')
logging.log(logging.DEBUG, 'Input file: ' + self.__input_file + '; output file: ' + self.__output_file)
input_file = self.get_input_file()
if not os.path.isfile(self.get_input_file()):
raise DcException('File ' + input_file + ' does not exist, can\'t continue!')
mixer_config = yaml.load(open(self.get_input_file(), 'r'))
logging.log(logging.DEBUG, 'Mixer config is below:\n\t' + str(mixer_config))
if 'includes' not in mixer_config:
logging.log(logging.WARNING, 'No includes found in' + self.__MIXER_FILE)
else:
self.flush()
self.build_scopes(mixer_config)
self.resolve_services_names()
self.resolve_paths()
self.resolve_ports()
self.add_master_scope(mixer_config)
self.apply_overrides(mixer_config)
self.save_result_scope()
def flush(self):
"""
Flush ScopesContainer
"""
self.__scopes_container.flush()
@staticmethod
def is_path_relative(path):
"""
Check if we can update path with prefix
:param path: string
:return: Bool
"""
if os.path.isabs(path) or str(path).startswith('~'):
return False
else:
return True
def build_scopes(self, mixer_config):
"""
Build scopes in scopes container
:param mixer_config: dict
"""
if 'ignores' not in mixer_config:
mixer_config['ignores'] = []
self.__scopes_container.set_ignored_services(mixer_config['ignores'])
for (prefix, include_file) in mixer_config['includes'].iteritems():
if self.is_path_relative(include_file):
include_file = os.path.normpath(os.path.join(os.path.dirname(self.__input_file), include_file))
logging.log(logging.DEBUG, 'Creating scope for file: ' + include_file + ' and prefix: ' + prefix)
scope = ServicesScope(prefix)
scope.extract_services_from_file(include_file)
self.__scopes_container.add_scope(prefix, scope)
def resolve_services_names(self):
"""
Resolve services names in all scopes
"""
logging.log(logging.DEBUG, 'Resolving services names')
self.__scopes_container.resolve_names()
def resolve_paths(self):
"""
Resolve paths to files and dirs
"""
logging.log(logging.DEBUG, 'Resolving services paths with')
self.__scopes_container.resolve_paths(os.path.dirname(self.__output_file))
def resolve_ports(self):
"""
Resolve ports which we will bind to host machine
"""
logging.log(logging.DEBUG, 'Resolving services ports')
redefined_ports = self.__scopes_container.resolve_ports()
logging.log(logging.DEBUG, 'Redefined ports:\n\t' + str(redefined_ports))
def add_master_scope(self, mixer_config):
"""
Add master services from `master_services` section
"""
if 'master_services' not in mixer_config:
return
scope = ServicesScope('')
scope.extract_services(mixer_config['master_services'])
self.__scopes_container.add_scope('', scope)
def apply_overrides(self, mixer_config):
"""
Apply overrides from `overrides` section
:param mixer_config: dict
"""
if 'overrides' not in mixer_config:
return
self.__scopes_container.apply_overrides(mixer_config['overrides'])
def save_result_scope(self):
"""
Save result scope in output file as yaml
:return: bool
"""
scope = self.__scopes_container.get_result_scope()
logging.log(logging.DEBUG, 'Result scope is:\n\t' + str(scope))
with open(self.__output_file, 'w') as outfile:
logging.log(logging.DEBUG, 'Save result scope in the file "' + self.__output_file + '"')
outfile.write(yaml.dump(scope, default_flow_style=False, indent=2))
class ScopesContainer(object):
"""
High level container for scopes
"""
__scopes = {}
""":type : dict[ServicesScope]"""
__ignored_services = []
""":type : list"""
def __init__(self):
self.__scopes = {}
self.__ignored_services = []
def set_ignored_services(self, ignored_services):
self.__ignored_services = ignored_services
def add_scope(self, scope_name, scope):
"""
Add scope to container
:param scope_name:
:param scope: ServicesScope
"""
self.__scopes[scope_name] = scope
def flush(self):
"""
Remove all scopes from container
"""
self.__scopes = {}
self.__ignored_services = []
def get_result_scope(self):
"""
Get result array of services
:return: dict
"""
services_definitions = {}
for (scope_name, scope) in self.__scopes.iteritems():
services_definitions.update(scope.get_services_definitions())
return services_definitions
def resolve_names(self):
"""
Resolve names in services and add prefixes using scopes' name
"""
for (scope_name, scope) in self.__scopes.iteritems():
scope.update_names(self.__ignored_services)
def resolve_paths(self, work_path='/'):
"""
Resolve paths in services
"""
for (scope_name, scope) in self.__scopes.iteritems():
scope.update_paths(work_path)
def resolve_ports(self):
"""
Resolve ports and return redefined in each container
{
projaphp: {
80:81 // redefine port 80 to 81 for service projaphp
}
}
:return: dict
"""
busy_ports = []
redefined_ports = {}
for (scope_name, scope) in self.__scopes.iteritems():
busy_ports, new_redefined_ports = scope.update_ports(busy_ports)
redefined_ports.update(new_redefined_ports)
return redefined_ports
def apply_overrides(self, overrides):
"""
Apply overrides
:param overrides: dict
"""
for (scope_name, scope) in self.__scopes.iteritems():
self.__scopes[scope_name].apply_overrides(overrides)
class ServicesScope(object):
"""
Class which defines services from one scope (file docker-compose.yml)
"""
__scope_name = None
""":type : string"""
__services = {}
""":type : dict[Service]"""
__services_path = None
""":type : string"""
def __init__(self, scope_name):
self.__scope_name = scope_name
self.__services = {}
self.__services_path = None
def extract_services_from_file(self, file_name):
"""
Get scope of services from one file
:param file_name: list[Service]
"""
services_file = os.path.abspath(file_name)
self.__services_path = os.path.dirname(services_file)
services_config = yaml.load(open(services_file, 'r'))
self.extract_services(services_config)
def extract_services(self, services_config):
"""
Extract services from config array
:param services_config: dict
"""
for (service_name, service) in services_config.iteritems():
self.__services[service_name] = Service(service)
def get_services_definitions(self):
"""
Get services as dictionary
:return: dict
"""
definitions = {}
for (service_name, service) in self.__services.iteritems():
if not service.is_ignored():
definitions[service_name] = service.get_definition()
return definitions
def update_names(self, ignored_services=[], prefix=None):
"""
Update services names with prefix
:param prefix: string
"""
if not prefix:
prefix = self.__scope_name
services = {}
name_map = {}
for (service_name, service) in self.__services.iteritems():
# rename
new_name = str(prefix + service_name)
name_map[service_name] = new_name
service = deepcopy(self.__services[service_name]) # deepcopy and remove old dictionary
services[new_name] = service
if new_name in ignored_services:
service.ignore()
self.__services = services
for (service_name, service) in self.__services.iteritems():
# update usages of service name
service.update_container_name(service_name)
service.update_volumes_from(name_map, ignored_services)
service.update_links(name_map, ignored_services)
service.update_extends(name_map, ignored_services)
def update_paths(self, work_path):
"""
Resolve paths in scope
"""
rel_path = relpath(self.__services_path, work_path)
for (service_name, service) in self.__services.iteritems():
# update usages of service name
service.update_build_path(rel_path)
service.update_volumes_path(rel_path)
service.update_env_file_path(rel_path)
service.update_extends_path(rel_path)
def update_ports(self, busy_ports=[]):
"""
Update ports and add busy ports to input array
:param busy_ports: dict
:return: list, dict
"""
redefined_ports = {}
for (service_name, service) in self.__services.iteritems():
if service.is_ignored(): # dont need to deal with ignored service
continue
busy_ports, service_redefined_ports = service.update_ports()
if len(service_redefined_ports):
redefined_ports[service_name] = service_redefined_ports
return busy_ports, redefined_ports
def apply_overrides(self, overrides):
"""
Override
:param overrides: dict
"""
for (service_name, service) in self.__services.iteritems():
if service_name in overrides:
service.apply_overrides(overrides[service_name])
class Service(object):
"""
Class which defines service from docker compose
"""
__definition = None
""":type : dict"""
__ignored = False
""":type : bool"""
def __init__(self, definition):
"""
:param definition: dict
"""
self.__definition = definition
self.__ignored = False
def ignore(self):
"""
Ignore service in scope
"""
self.__ignored = True
def is_ignored(self):
"""
If service ignored
:return: bool
"""
return self.__ignored
def get_definition(self):
"""
Get definition of service
:return: list
"""
return self.__definition
def update_container_name(self, name):
"""
Update container name if only exists
:param name: string
"""
if 'container_name' in self.__definition:
if self.__definition['container_name']:
self.__definition['container_name'] = name
else:
del self.__definition['container_name']
def update_volumes_from(self, name_map, ignored_services=[]):
"""
Update volumes from
:param name_map: dict
:param ignored_services: list
"""
new_volumes_from = []
if 'volumes_from' in self.__definition:
if self.__definition['volumes_from']:
for (service_name) in self.__definition['volumes_from']:
new_volume_from = name_map[service_name]
if new_volume_from not in ignored_services: # we don't need ignored services
new_volumes_from.append(new_volume_from)
self.__definition['volumes_from'] = new_volumes_from
else:
del self.__definition['volumes_from']
def update_links(self, name_map, ignored_services=[]):
"""
Update links
:param name_map: dict
:param ignored_services: list
"""
new_links = []
if 'links' in self.__definition:
if self.__definition['links']:
for (link) in self.__definition['links']:
link_parts = str(link).split(':', 1)
old_link_service=link_parts[0]
new_link_service = name_map[old_link_service]
if new_link_service not in ignored_services: # we don't need ignored services
link_parts[0] = new_link_service
if len(link_parts) == 1:
link_parts.append(old_link_service)
new_links.append(':'.join(link_parts))
self.__definition['links'] = new_links
else:
del self.__definition['volumes_from']
def update_extends(self, name_map, ignored_services):
"""
:param name_map: dict
:param ignored_services: list
"""
if 'extends' in self.__definition and self.__definition['extends'] and \
('file' not in self.__definition['extends'] or not self.__definition['extends']['file']) and \
'service' in self.__definition['extends'] and self.__definition['extends']['service']:
new_extends_service = name_map[self.__definition['extends']['service']]
if new_extends_service in ignored_services:
del self.__definition['extends']
self.ignore()
else:
self.__definition['extends']['service'] = new_extends_service
def update_build_path(self, rel_path):
"""
:param rel_path: string
"""
if 'build' in self.__definition:
if self.__definition['build']:
new_build = self.__definition['build']
if DcMixer.is_path_relative(new_build):
new_build = os.path.normpath(os.path.join(rel_path, new_build))
self.__definition['build'] = new_build
else:
del self.__definition['build']
def update_volumes_path(self, rel_path):
"""
:param rel_path: string
"""
new_volumes = []
if 'volumes' in self.__definition:
if self.__definition['volumes']:
for (volume) in self.__definition['volumes']:
volume_parts = str(volume).split(':', 1)
if DcMixer.is_path_relative(volume_parts[0]):
host_path = os.path.normpath(os.path.join(rel_path, volume_parts[0]))
if not str(volume_parts[0]).startswith('..'):
host_path = os.path.join('.', host_path)
volume_parts[0] = host_path
new_volumes.append(':'.join(volume_parts))
self.__definition['volumes'] = new_volumes
else:
del self.__definition['volumes']
def update_env_file_path(self, rel_path):
"""
Update path[s] to file with ENV variables
:param rel_path: string
"""
if 'env_file' in self.__definition:
if self.__definition['env_file']:
if isinstance(self.__definition['env_file'], str):
self.__definition['env_file'] = [self.__definition['env_file']]
new_env_files = []
for (env_file) in self.__definition['env_file']:
new_env_file = env_file
if DcMixer.is_path_relative(new_env_file):
new_env_file = os.path.normpath(os.path.join(rel_path, new_env_file))
new_env_files.append(new_env_file)
self.__definition['env_file'] = new_env_files
else:
del self.__definition['env_file']
def update_extends_path(self, rel_path):
"""
:param rel_path: string
"""
if 'extends' in self.__definition and self.__definition['extends'] and \
'file' in self.__definition['extends'] and self.__definition['extends']['file']:
new_file = self.__definition['extends']['file']
if DcMixer.is_path_relative(new_file):
self.__definition['extends']['file'] = os.path.normpath(os.path.join(rel_path, new_file))
def update_ports(self, busy_ports=[]):
"""
Update ports and return busy ports | |
current law and records modified with imputation
elecv_credit = max(0., min(ev_credit_amt, EV_credit_c))
# phaseout based on agi
posevagi = max(c00100, 0.)
ev_max = EV_credit_ps[MARS - 1]
if posevagi < ev_max:
evtc = elecv_credit
else:
evtc_reduced = max(0., evtc - EV_credit_prt * (posevagi - ev_max))
evtc = min(evtc, evtc_reduced)
return evtc
@iterate_jit(nopython=True)
def AmOppCreditParts(exact, e87521, num, c00100, CR_AmOppRefundable_hc,
CR_AmOppNonRefundable_hc, c10960, c87668):
"""
Applies a phaseout to the Form 8863, line 1, American Opportunity Credit
amount, e87521, and then applies the 0.4 refundable rate.
Logic corresponds to Form 8863, Part I.
Notes
-----
Tax Law Parameters that are not parameterized:
90000 : American Opportunity Credit phaseout income base
10000 : American Opportunity Credit phaseout income range length
1/1000 : American Opportunity Credit phaseout rate
0.4 : American Opportunity Credit refundable rate
Parameters
----------
exact : whether or not to do rounding of phaseout fraction
e87521 : total tentative American Opportunity Credit for all students,
Form 8863, line 1
num : number of people filing jointly
c00100 : AGI
CR_AmOppRefundable_hc: haircut for the refundable portion of the
American Opportunity Credit
CR_AmOppNonRefundable_hc: haircut for the nonrefundable portion of the
American Opportunity Credit
Returns
-------
c10960 : Refundable part of American Opportunity Credit
c87668 : Tentative nonrefundable part of American Opportunity Credit
"""
if e87521 > 0.:
c87658 = max(0., 90000. * num - c00100)
c87660 = 10000. * num
if exact == 1: # exact calculation as on tax forms
c87662 = 1000. * min(1., round(c87658 / c87660, 3))
else:
c87662 = 1000. * min(1., c87658 / c87660)
c87664 = c87662 * e87521 / 1000.
c10960 = 0.4 * c87664 * (1. - CR_AmOppRefundable_hc)
c87668 = c87664 - c10960 * (1. - CR_AmOppNonRefundable_hc)
else:
c10960 = 0.
c87668 = 0.
return (c10960, c87668)
@iterate_jit(nopython=True)
def SchR(age_head, age_spouse, MARS, c00100,
c05800, e07300, c07180, e02400, c02500, e01500, e01700, CR_SchR_hc,
c07200):
"""
Calculates Schedule R credit for the elderly and the disabled, c07200.
Note that no Schedule R policy parameters are inflation indexed.
Note that all Schedule R policy parameters are hard-coded, and therefore,
are not able to be changed using Policy class parameters.
Note that the CR_SchR_hc policy parameter allows the user to eliminate
or reduce total Schedule R credits.
"""
if age_head >= 65 or (MARS == 2 and age_spouse >= 65):
# calculate credit assuming nobody is disabled (so line12 = line10)
if MARS == 2:
if age_head >= 65 and age_spouse >= 65:
schr12 = 7500.
else:
schr12 = 5000.
schr15 = 10000.
elif MARS == 3:
schr12 = 3750.
schr15 = 5000.
elif MARS in (1, 4):
schr12 = 5000.
schr15 = 7500.
else:
schr12 = 0.
schr15 = 0.
# nontaxable portion of OASDI benefits, line 13a
schr13a = max(0., e02400 - c02500)
# nontaxable portion of pension benefits, line 13b
# NOTE: the following approximation (required because of inadequate IRS
# data) will be accurate if all pensions are partially taxable
# or if all pensions are fully taxable. But if a filing unit
# receives at least one partially taxable pension and at least
# one fully taxable pension, then the approximation in the
# following line is not exactly correct.
schr13b = max(0., e01500 - e01700)
schr13c = schr13a + schr13b
schr16 = max(0., c00100 - schr15)
schr17 = 0.5 * schr16
schr18 = schr13c + schr17
schr19 = max(0., schr12 - schr18)
schr20 = 0.15 * schr19
schr21 = max(0., (c05800 - e07300 - c07180))
c07200 = min(schr20, schr21) * (1. - CR_SchR_hc)
else: # if not calculating Schedule R credit
c07200 = 0.
return c07200
@iterate_jit(nopython=True)
def EducationTaxCredit(exact, e87530, MARS, c00100, num, c05800,
e07300, c07180, c07200, c87668,
LLC_Expense_c, ETC_pe_Single, ETC_pe_Married,
CR_Education_hc,
c07230):
"""
Computes Education Tax Credits (Form 8863) nonrefundable amount, c07230.
Logic corresponds to Form 8863, Part II.
Notes
-----
Tax Law Parameters that are not parameterized:
0.2 : Lifetime Learning Credit ratio against expense
Tax Law Parameters that are parameterized:
LLC_Expense_c : Lifetime Learning Credit expense limit
ETC_pe_Married : Education Tax Credit phaseout end for married
ETC_pe_Single : Education Tax Credit phaseout end for single
Taxpayer Charateristics:
exact : whether or not to do rounding of phaseout fraction
e87530 : Lifetime Learning Credit total qualified expenses,
Form 8863, line 10
e07300 : Foreign tax credit - Form 1116
c07180 : Child/dependent care expense credit - Form 2441
c07200 : Schedule R credit
Returns
-------
c07230 : Education Tax Credits (Form 8863) nonrefundable amount
"""
c87560 = 0.2 * min(e87530, LLC_Expense_c)
if MARS == 2:
c87570 = ETC_pe_Married * 1000.
else:
c87570 = ETC_pe_Single * 1000.
c87590 = max(0., c87570 - c00100)
c87600 = 10000. * num
if exact == 1: # exact calculation as on tax forms
c87610 = min(1., round(c87590 / c87600, 3))
else:
c87610 = min(1., c87590 / c87600)
c87620 = c87560 * c87610
xline4 = max(0., c05800 - (e07300 + c07180 + c07200))
xline5 = min(c87620, xline4)
xline9 = max(0., c05800 - (e07300 + c07180 + c07200 + xline5))
xline10 = min(c87668, xline9)
c87680 = xline5 + xline10
c07230 = c87680 * (1. - CR_Education_hc)
return c07230
@iterate_jit(nopython=True)
def CharityCredit(e19800, e20100, c00100, CR_Charity_rt, CR_Charity_f,
CR_Charity_frt, MARS, charity_credit):
"""
Computes nonrefundable charity credit, charity_credit.
This credit is not part of current-law policy.
"""
total_charity = e19800 + e20100
floor = max(CR_Charity_frt * c00100, CR_Charity_f[MARS - 1])
charity_cr_floored = max(total_charity - floor, 0)
charity_credit = CR_Charity_rt * (charity_cr_floored)
return charity_credit
@iterate_jit(nopython=True)
def NonrefundableCredits(c05800, e07240, e07260, e07300, e07400,
e07600, p08000, odc,
personal_nonrefundable_credit, icgtc, iratc, evtc,
CR_RetirementSavings_hc, CR_ForeignTax_hc,
CR_ResidentialEnergy_hc, CR_GeneralBusiness_hc,
CR_MinimumTax_hc, CR_OtherCredits_hc, charity_credit,
c07180, c07200, c07220, c07230, c07240,
c07260, c07300, c07400, c07600, c08000):
"""
NonRefundableCredits function sequentially limits credits to tax liability.
Parameters
----------
CR_RetirementSavings_hc: Retirement savings credit haircut
CR_ForeignTax_hc: Foreign tax credit haircut
CR_ResidentialEnergy_hc: Residential energy credit haircut
CR_GeneralBusiness_hc: General business credit haircut
CR_MinimumTax_hc: Minimum tax credit haircut
CR_OtherCredits_hc: Other credits haircut
"""
# limit tax credits to tax liability in order they are on 2015 1040 form
avail = c05800
# Foreign tax credit - Form 1116
c07300 = min(e07300 * (1. - CR_ForeignTax_hc), avail)
avail = avail - c07300
# Child & dependent care expense credit
c07180 = min(c07180, avail)
avail = avail - c07180
# Education tax credit
c07230 = min(c07230, avail)
avail = avail - c07230
# Retirement savings credit - Form 8880
c07240 = min(e07240 * (1. - CR_RetirementSavings_hc), avail)
avail = avail - c07240
# Child tax credit
c07220 = min(c07220, avail)
avail = avail - c07220
# Other dependent credit
odc = min(odc, avail)
avail = avail - odc
# Residential energy credit - Form 5695
c07260 = min(e07260 * (1. - CR_ResidentialEnergy_hc), avail)
avail = avail - c07260
# General business credit - Form 3800
c07400 = min(e07400 * (1. - CR_GeneralBusiness_hc), avail)
avail = avail - c07400
# Prior year minimum tax credit - Form 8801
c07600 = min(e07600 * (1. - CR_MinimumTax_hc), avail)
avail = avail - c07600
# Schedule R credit
c07200 = min(c07200, avail)
avail = avail - c07200
# Other credits
c08000 = min(p08000 * (1. - CR_OtherCredits_hc), avail)
avail = avail - c08000
# Charity credit
charity_credit = min(charity_credit, avail)
avail = avail - charity_credit
# Personal nonrefundable credit
personal_nonrefundable_credit = min(personal_nonrefundable_credit, avail)
avail = avail - personal_nonrefundable_credit
# ICG credit
icgtc = min(icgtc, avail)
avail = avail - icgtc
# IRA credit
iratc = min(iratc, avail)
avail = avail - iratc
# EV credit
evtc = min(evtc, avail)
avail = avail - evtc
return (c07180, c07200, c07220, c07230, c07240, odc,
c07260, c07300, c07400, c07600, c08000, charity_credit,
personal_nonrefundable_credit, icgtc, iratc, evtc)
@iterate_jit(nopython=True)
def AdditionalCTC(codtc_limited, ACTC_c, n24, earned, ACTC_Income_thd,
ACTC_rt, nu06, ACTC_rt_bonus_under6family, ACTC_ChildNum,
ptax_was, c03260, e09800, c59660, e11200,
c11070):
"""
Calculates refundable Additional Child Tax Credit (ACTC), c11070,
following 2018 Form 8812 logic.
"""
# Part I
line3 = codtc_limited
line4 = ACTC_c * n24
c11070 = 0. # line15
if line3 | |
domain where the type named typeName is created.
assemblyName: The name of the assembly where the type named typeName is sought. If assemblyName is null,the
executing assembly is searched.
typeName: The name of the preferred type.
ignoreCase: true to specify that the search for typeName is not case-sensitive; false to specify that the
search is case-sensitive.
bindingAttr: A combination of zero or more bit flags that affect the search for the typeName constructor. If
bindingAttr is zero,a case-sensitive search for public constructors is conducted.
binder: An object that uses bindingAttr and args to seek and identify the typeName constructor. If
binder is null,the default binder is used.
args: An array of arguments that match in number,order,and type the parameters of the constructor to
invoke. If args is an empty array or null,the constructor that takes no parameters (the default
constructor) is invoked.
culture: Culture-specific information that governs the coercion of args to the formal types declared for
the typeName constructor. If culture is null,the System.Globalization.CultureInfo for the
current thread is used.
activationAttributes: An array of one or more attributes that can participate in activation. This is typically an
array that contains a single System.Runtime.Remoting.Activation.UrlAttribute object. The
System.Runtime.Remoting.Activation.UrlAttribute specifies the URL that is required to activate a
remote object.
Returns: A handle that must be unwrapped to access the newly created instance.
CreateInstance(type: Type,*args: Array[object]) -> object
Creates an instance of the specified type using the constructor that best matches the specified
parameters.
type: The type of object to create.
args: An array of arguments that match in number,order,and type the parameters of the constructor to
invoke. If args is an empty array or null,the constructor that takes no parameters (the default
constructor) is invoked.
Returns: A reference to the newly created object.
CreateInstance(type: Type,args: Array[object],activationAttributes: Array[object]) -> object
Creates an instance of the specified type using the constructor that best matches the specified
parameters.
type: The type of object to create.
args: An array of arguments that match in number,order,and type the parameters of the constructor to
invoke. If args is an empty array or null,the constructor that takes no parameters (the default
constructor) is invoked.
activationAttributes: An array of one or more attributes that can participate in activation. This is typically an
array that contains a single System.Runtime.Remoting.Activation.UrlAttribute object. The
System.Runtime.Remoting.Activation.UrlAttribute specifies the URL that is required to activate a
remote object.
Returns: A reference to the newly created object.
CreateInstance(type: Type,bindingAttr: BindingFlags,binder: Binder,args: Array[object],culture: CultureInfo) -> object
Creates an instance of the specified type using the constructor that best matches the specified
parameters.
type: The type of object to create.
bindingAttr: A combination of zero or more bit flags that affect the search for the type constructor. If
bindingAttr is zero,a case-sensitive search for public constructors is conducted.
binder: An object that uses bindingAttr and args to seek and identify the type constructor. If binder is
null,the default binder is used.
args: An array of arguments that match in number,order,and type the parameters of the constructor to
invoke. If args is an empty array or null,the constructor that takes no parameters (the default
constructor) is invoked.
culture: Culture-specific information that governs the coercion of args to the formal types declared for
the type constructor. If culture is null,the System.Globalization.CultureInfo for the current
thread is used.
Returns: A reference to the newly created object.
CreateInstance(type: Type,bindingAttr: BindingFlags,binder: Binder,args: Array[object],culture: CultureInfo,activationAttributes: Array[object]) -> object
Creates an instance of the specified type using the constructor that best matches the specified
parameters.
type: The type of object to create.
bindingAttr: A combination of zero or more bit flags that affect the search for the type constructor. If
bindingAttr is zero,a case-sensitive search for public constructors is conducted.
binder: An object that uses bindingAttr and args to seek and identify the type constructor. If binder is
null,the default binder is used.
args: An array of arguments that match in number,order,and type the parameters of the constructor to
invoke. If args is an empty array or null,the constructor that takes no parameters (the default
constructor) is invoked.
culture: Culture-specific information that governs the coercion of args to the formal types declared for
the type constructor. If culture is null,the System.Globalization.CultureInfo for the current
thread is used.
activationAttributes: An array of one or more attributes that can participate in activation. This is typically an
array that contains a single System.Runtime.Remoting.Activation.UrlAttribute object. The
System.Runtime.Remoting.Activation.UrlAttribute specifies the URL that is required to activate a
remote object.
Returns: A reference to the newly created object.
CreateInstance(assemblyName: str,typeName: str,activationAttributes: Array[object]) -> ObjectHandle
Creates an instance of the type whose name is specified,using the named assembly and default
constructor.
assemblyName: The name of the assembly where the type named typeName is sought. If assemblyName is null,the
executing assembly is searched.
typeName: The name of the preferred type.
activationAttributes: An array of one or more attributes that can participate in activation. This is typically an
array that contains a single System.Runtime.Remoting.Activation.UrlAttribute object. The
System.Runtime.Remoting.Activation.UrlAttribute specifies the URL that is required to activate a
remote object.
Returns: A handle that must be unwrapped to access the newly created instance.
CreateInstance(type: Type,nonPublic: bool) -> object
Creates an instance of the specified type using that type's default constructor.
type: The type of object to create.
nonPublic: true if a public or nonpublic default constructor can match; false if only a public default
constructor can match.
Returns: A reference to the newly created object.
CreateInstance(type: Type) -> object
Creates an instance of the specified type using that type's default constructor.
type: The type of object to create.
Returns: A reference to the newly created object.
CreateInstance(assemblyName: str,typeName: str) -> ObjectHandle
Creates an instance of the type whose name is specified,using the named assembly and default
constructor.
assemblyName: The name of the assembly where the type named typeName is sought. If assemblyName is null,the
executing assembly is searched.
typeName: The name of the preferred type.
Returns: A handle that must be unwrapped to access the newly created instance.
"""
pass
@staticmethod
def CreateInstanceFrom(*__args):
"""
CreateInstanceFrom(domain: AppDomain,assemblyFile: str,typeName: str) -> ObjectHandle
Creates an instance of the type whose name is specified in the specified remote domain,using
the named assembly file and default constructor.
domain: The remote domain where the type named typeName is created.
assemblyFile: The name of a file that contains an assembly where the type named typeName is sought.
typeName: The name of the preferred type.
Returns: A handle that must be unwrapped to access the newly created instance.
CreateInstanceFrom(domain: AppDomain,assemblyFile: str,typeName: str,ignoreCase: bool,bindingAttr: BindingFlags,binder: Binder,args: Array[object],culture: CultureInfo,activationAttributes: Array[object],securityAttributes: Evidence) -> ObjectHandle
Creates an instance of the type whose name is specified in the specified remote domain,using
the named assembly file and the constructor that best matches the specified parameters.
domain: The remote domain where the type named typeName is created.
assemblyFile: The name of a file that contains an assembly where the type named typeName is sought.
typeName: The name of the preferred type.
ignoreCase: true to specify that the search for typeName is not case-sensitive; false to specify that the
search is case-sensitive.
bindingAttr: A combination of zero or more bit flags that | |
self.sessionUuid:
raise Exception('sessionUuid of action[QueryVolumeAction] cannot be None')
reply = api.sync_call(self, self.sessionUuid)
self.reply = reply
self.out = reply.inventories
return self.out
class QueryVolumeSnapshotAction(inventory.APIQueryVolumeSnapshotMsg):
def __init__(self):
super(QueryVolumeSnapshotAction, self).__init__()
self.sessionUuid = None
self.reply = None
self.out = None
def run(self):
if not self.sessionUuid:
raise Exception('sessionUuid of action[QueryVolumeSnapshotAction] cannot be None')
reply = api.sync_call(self, self.sessionUuid)
self.reply = reply
self.out = reply.inventories
return self.out
class QueryVolumeSnapshotTreeAction(inventory.APIQueryVolumeSnapshotTreeMsg):
def __init__(self):
super(QueryVolumeSnapshotTreeAction, self).__init__()
self.sessionUuid = None
self.reply = None
self.out = None
def run(self):
if not self.sessionUuid:
raise Exception('sessionUuid of action[QueryVolumeSnapshotTreeAction] cannot be None')
reply = api.sync_call(self, self.sessionUuid)
self.reply = reply
self.out = reply.inventories
return self.out
class QueryVpcIkeConfigFromLocalAction(inventory.APIQueryVpcIkeConfigFromLocalMsg):
def __init__(self):
super(QueryVpcIkeConfigFromLocalAction, self).__init__()
self.sessionUuid = None
self.reply = None
self.out = None
def run(self):
if not self.sessionUuid:
raise Exception('sessionUuid of action[QueryVpcIkeConfigFromLocalAction] cannot be None')
reply = api.sync_call(self, self.sessionUuid)
self.reply = reply
self.out = reply.inventories
return self.out
class QueryVpcIpSecConfigFromLocalAction(inventory.APIQueryVpcIpSecConfigFromLocalMsg):
def __init__(self):
super(QueryVpcIpSecConfigFromLocalAction, self).__init__()
self.sessionUuid = None
self.reply = None
self.out = None
def run(self):
if not self.sessionUuid:
raise Exception('sessionUuid of action[QueryVpcIpSecConfigFromLocalAction] cannot be None')
reply = api.sync_call(self, self.sessionUuid)
self.reply = reply
self.out = reply.inventories
return self.out
class QueryVpcUserVpnGatewayFromLocalAction(inventory.APIQueryVpcUserVpnGatewayFromLocalMsg):
def __init__(self):
super(QueryVpcUserVpnGatewayFromLocalAction, self).__init__()
self.sessionUuid = None
self.reply = None
self.out = None
def run(self):
if not self.sessionUuid:
raise Exception('sessionUuid of action[QueryVpcUserVpnGatewayFromLocalAction] cannot be None')
reply = api.sync_call(self, self.sessionUuid)
self.reply = reply
self.out = reply.inventories
return self.out
class QueryVpcVpnConnectionFromLocalAction(inventory.APIQueryVpcVpnConnectionFromLocalMsg):
def __init__(self):
super(QueryVpcVpnConnectionFromLocalAction, self).__init__()
self.sessionUuid = None
self.reply = None
self.out = None
def run(self):
if not self.sessionUuid:
raise Exception('sessionUuid of action[QueryVpcVpnConnectionFromLocalAction] cannot be None')
reply = api.sync_call(self, self.sessionUuid)
self.reply = reply
self.out = reply.inventories
return self.out
class QueryVpcVpnGatewayFromLocalAction(inventory.APIQueryVpcVpnGatewayFromLocalMsg):
def __init__(self):
super(QueryVpcVpnGatewayFromLocalAction, self).__init__()
self.sessionUuid = None
self.reply = None
self.out = None
def run(self):
if not self.sessionUuid:
raise Exception('sessionUuid of action[QueryVpcVpnGatewayFromLocalAction] cannot be None')
reply = api.sync_call(self, self.sessionUuid)
self.reply = reply
self.out = reply.inventories
return self.out
class QueryVtepAction(inventory.APIQueryVtepMsg):
def __init__(self):
super(QueryVtepAction, self).__init__()
self.sessionUuid = None
self.reply = None
self.out = None
def run(self):
if not self.sessionUuid:
raise Exception('sessionUuid of action[QueryVtepAction] cannot be None')
reply = api.sync_call(self, self.sessionUuid)
self.reply = reply
self.out = reply.inventories
return self.out
class QueryWebhookAction(inventory.APIQueryWebhookMsg):
def __init__(self):
super(QueryWebhookAction, self).__init__()
self.sessionUuid = None
self.reply = None
self.out = None
def run(self):
if not self.sessionUuid:
raise Exception('sessionUuid of action[QueryWebhookAction] cannot be None')
reply = api.sync_call(self, self.sessionUuid)
self.reply = reply
self.out = reply.inventories
return self.out
class QueryZoneAction(inventory.APIQueryZoneMsg):
def __init__(self):
super(QueryZoneAction, self).__init__()
self.sessionUuid = None
self.reply = None
self.out = None
def run(self):
if not self.sessionUuid:
raise Exception('sessionUuid of action[QueryZoneAction] cannot be None')
reply = api.sync_call(self, self.sessionUuid)
self.reply = reply
self.out = reply.inventories
return self.out
class RebootEcsInstanceAction(inventory.APIRebootEcsInstanceMsg):
def __init__(self):
super(RebootEcsInstanceAction, self).__init__()
self.sessionUuid = None
self.out = None
def run(self):
if not self.sessionUuid:
raise Exception('sessionUuid of action[RebootEcsInstanceAction] cannot be None')
evt = api.async_call(self, self.sessionUuid)
self.out = evt
return self.out
class RebootVmInstanceAction(inventory.APIRebootVmInstanceMsg):
def __init__(self):
super(RebootVmInstanceAction, self).__init__()
self.sessionUuid = None
self.out = None
def run(self):
if not self.sessionUuid:
raise Exception('sessionUuid of action[RebootVmInstanceAction] cannot be None')
evt = api.async_call(self, self.sessionUuid)
self.out = evt
return self.out
class ReclaimSpaceFromImageStoreAction(inventory.APIReclaimSpaceFromImageStoreMsg):
def __init__(self):
super(ReclaimSpaceFromImageStoreAction, self).__init__()
self.sessionUuid = None
self.out = None
def run(self):
if not self.sessionUuid:
raise Exception('sessionUuid of action[ReclaimSpaceFromImageStoreAction] cannot be None')
evt = api.async_call(self, self.sessionUuid)
self.out = evt
return self.out
class ReconnectBackupStorageAction(inventory.APIReconnectBackupStorageMsg):
def __init__(self):
super(ReconnectBackupStorageAction, self).__init__()
self.sessionUuid = None
self.out = None
def run(self):
if not self.sessionUuid:
raise Exception('sessionUuid of action[ReconnectBackupStorageAction] cannot be None')
evt = api.async_call(self, self.sessionUuid)
self.out = evt
return self.out
class ReconnectConsoleProxyAgentAction(inventory.APIReconnectConsoleProxyAgentMsg):
def __init__(self):
super(ReconnectConsoleProxyAgentAction, self).__init__()
self.sessionUuid = None
self.out = None
def run(self):
if not self.sessionUuid:
raise Exception('sessionUuid of action[ReconnectConsoleProxyAgentAction] cannot be None')
evt = api.async_call(self, self.sessionUuid)
self.out = evt
return self.out
class ReconnectHostAction(inventory.APIReconnectHostMsg):
def __init__(self):
super(ReconnectHostAction, self).__init__()
self.sessionUuid = None
self.out = None
def run(self):
if not self.sessionUuid:
raise Exception('sessionUuid of action[ReconnectHostAction] cannot be None')
evt = api.async_call(self, self.sessionUuid)
self.out = evt
return self.out
class ReconnectImageStoreBackupStorageAction(inventory.APIReconnectImageStoreBackupStorageMsg):
def __init__(self):
super(ReconnectImageStoreBackupStorageAction, self).__init__()
self.sessionUuid = None
self.out = None
def run(self):
if not self.sessionUuid:
raise Exception('sessionUuid of action[ReconnectImageStoreBackupStorageAction] cannot be None')
evt = api.async_call(self, self.sessionUuid)
self.out = evt
return self.out
class ReconnectPrimaryStorageAction(inventory.APIReconnectPrimaryStorageMsg):
def __init__(self):
super(ReconnectPrimaryStorageAction, self).__init__()
self.sessionUuid = None
self.out = None
def run(self):
if not self.sessionUuid:
raise Exception('sessionUuid of action[ReconnectPrimaryStorageAction] cannot be None')
evt = api.async_call(self, self.sessionUuid)
self.out = evt
return self.out
class ReconnectSftpBackupStorageAction(inventory.APIReconnectSftpBackupStorageMsg):
def __init__(self):
super(ReconnectSftpBackupStorageAction, self).__init__()
self.sessionUuid = None
self.out = None
def run(self):
if not self.sessionUuid:
raise Exception('sessionUuid of action[ReconnectSftpBackupStorageAction] cannot be None')
evt = api.async_call(self, self.sessionUuid)
self.out = evt
return self.out
class ReconnectVirtualRouterAction(inventory.APIReconnectVirtualRouterMsg):
def __init__(self):
super(ReconnectVirtualRouterAction, self).__init__()
self.sessionUuid = None
self.out = None
def run(self):
if not self.sessionUuid:
raise Exception('sessionUuid of action[ReconnectVirtualRouterAction] cannot be None')
evt = api.async_call(self, self.sessionUuid)
self.out = evt
return self.out
class RecoverDataVolumeAction(inventory.APIRecoverDataVolumeMsg):
def __init__(self):
super(RecoverDataVolumeAction, self).__init__()
self.sessionUuid = None
self.out = None
def run(self):
if not self.sessionUuid:
raise Exception('sessionUuid of action[RecoverDataVolumeAction] cannot be None')
evt = api.async_call(self, self.sessionUuid)
self.out = evt
return self.out
class RecoverImageAction(inventory.APIRecoverImageMsg):
def __init__(self):
super(RecoverImageAction, self).__init__()
self.sessionUuid = None
self.out = None
def run(self):
if not self.sessionUuid:
raise Exception('sessionUuid of action[RecoverImageAction] cannot be None')
evt = api.async_call(self, self.sessionUuid)
self.out = evt
return self.out
class RecoverVmInstanceAction(inventory.APIRecoverVmInstanceMsg):
def __init__(self):
super(RecoverVmInstanceAction, self).__init__()
self.sessionUuid = None
self.out = None
def run(self):
if not self.sessionUuid:
raise Exception('sessionUuid of action[RecoverVmInstanceAction] cannot be None')
evt = api.async_call(self, self.sessionUuid)
self.out = evt
return self.out
class RecoveryImageFromImageStoreBackupStorageAction(inventory.APIRecoveryImageFromImageStoreBackupStorageMsg):
def __init__(self):
super(RecoveryImageFromImageStoreBackupStorageAction, self).__init__()
self.sessionUuid = None
self.out = None
def run(self):
if not self.sessionUuid:
raise Exception('sessionUuid of action[RecoveryImageFromImageStoreBackupStorageAction] cannot be None')
evt = api.async_call(self, self.sessionUuid)
self.out = evt
return self.out
class RecoveryVirtualBorderRouterRemoteAction(inventory.APIRecoveryVirtualBorderRouterRemoteMsg):
def __init__(self):
super(RecoveryVirtualBorderRouterRemoteAction, self).__init__()
self.sessionUuid = None
self.out = None
def run(self):
if not self.sessionUuid:
raise Exception('sessionUuid of action[RecoveryVirtualBorderRouterRemoteAction] cannot be None')
evt = api.async_call(self, self.sessionUuid)
self.out = evt
return self.out
class RefreshLoadBalancerAction(inventory.APIRefreshLoadBalancerMsg):
def __init__(self):
super(RefreshLoadBalancerAction, self).__init__()
self.sessionUuid = None
self.out = None
def run(self):
if not self.sessionUuid:
raise Exception('sessionUuid of action[RefreshLoadBalancerAction] cannot be None')
evt = api.async_call(self, self.sessionUuid)
self.out = evt
return self.out
class ReimageVmInstanceAction(inventory.APIReimageVmInstanceMsg):
def __init__(self):
super(ReimageVmInstanceAction, self).__init__()
self.sessionUuid = None
self.out = None
def run(self):
if not self.sessionUuid:
raise Exception('sessionUuid of action[ReimageVmInstanceAction] cannot be None')
evt = api.async_call(self, self.sessionUuid)
self.out = evt
return self.out
class ReloadLicenseAction(inventory.APIReloadLicenseMsg):
def __init__(self):
super(ReloadLicenseAction, self).__init__()
self.sessionUuid = None
self.out = None
def run(self):
if not self.sessionUuid:
raise Exception('sessionUuid of action[ReloadLicenseAction] cannot be None')
evt = api.async_call(self, self.sessionUuid)
self.out = evt
return self.out
class RemoveActionFromAlarmAction(inventory.APIRemoveActionFromAlarmMsg):
def __init__(self):
super(RemoveActionFromAlarmAction, self).__init__()
self.sessionUuid = None
self.out = None
def run(self):
if not self.sessionUuid:
raise Exception('sessionUuid of action[RemoveActionFromAlarmAction] cannot be None')
evt = api.async_call(self, self.sessionUuid)
self.out = evt
return self.out
class RemoveActionFromEventSubscriptionAction(inventory.APIRemoveActionFromEventSubscriptionMsg):
def __init__(self):
super(RemoveActionFromEventSubscriptionAction, self).__init__()
self.sessionUuid = None
self.out = None
def run(self):
if not self.sessionUuid:
raise Exception('sessionUuid of action[RemoveActionFromEventSubscriptionAction] cannot be None')
evt = api.async_call(self, self.sessionUuid)
self.out = evt
return self.out
class RemoveDnsFromL3NetworkAction(inventory.APIRemoveDnsFromL3NetworkMsg):
def __init__(self):
super(RemoveDnsFromL3NetworkAction, self).__init__()
self.sessionUuid = None
self.out = None
def run(self):
if not self.sessionUuid:
raise Exception('sessionUuid of action[RemoveDnsFromL3NetworkAction] cannot be None')
evt = api.async_call(self, self.sessionUuid)
self.out = evt
return self.out
class RemoveLabelFromAlarmAction(inventory.APIRemoveLabelFromAlarmMsg):
def __init__(self):
super(RemoveLabelFromAlarmAction, self).__init__()
self.sessionUuid = None
self.out = None
def run(self):
if not self.sessionUuid:
raise Exception('sessionUuid of action[RemoveLabelFromAlarmAction] cannot be None')
evt = api.async_call(self, self.sessionUuid)
self.out = evt
return self.out
class RemoveLabelFromEventSubscriptionAction(inventory.APIRemoveLabelFromEventSubscriptionMsg):
def __init__(self):
super(RemoveLabelFromEventSubscriptionAction, self).__init__()
self.sessionUuid = None
self.out = None
def run(self):
if not self.sessionUuid:
raise Exception('sessionUuid of action[RemoveLabelFromEventSubscriptionAction] cannot be None')
evt = api.async_call(self, self.sessionUuid)
self.out = evt
return self.out
class RemoveMonFromCephBackupStorageAction(inventory.APIRemoveMonFromCephBackupStorageMsg):
def __init__(self):
super(RemoveMonFromCephBackupStorageAction, self).__init__()
self.sessionUuid = None
self.out = None
def run(self):
if not self.sessionUuid:
raise Exception('sessionUuid of action[RemoveMonFromCephBackupStorageAction] cannot be None')
evt = api.async_call(self, self.sessionUuid)
self.out = evt
return self.out
class RemoveMonFromCephPrimaryStorageAction(inventory.APIRemoveMonFromCephPrimaryStorageMsg):
def __init__(self):
super(RemoveMonFromCephPrimaryStorageAction, self).__init__()
self.sessionUuid = None
self.out = None
def run(self):
if not self.sessionUuid:
raise Exception('sessionUuid of action[RemoveMonFromCephPrimaryStorageAction] cannot be None')
evt = api.async_call(self, self.sessionUuid)
self.out = evt
return self.out
class RemoveMonFromFusionstorBackupStorageAction(inventory.APIRemoveMonFromFusionstorBackupStorageMsg):
def __init__(self):
super(RemoveMonFromFusionstorBackupStorageAction, self).__init__()
self.sessionUuid = None
self.out = None
def run(self):
if not self.sessionUuid:
raise Exception('sessionUuid of action[RemoveMonFromFusionstorBackupStorageAction] cannot be None')
evt = api.async_call(self, self.sessionUuid)
self.out = evt
return self.out
class RemoveMonFromFusionstorPrimaryStorageAction(inventory.APIRemoveMonFromFusionstorPrimaryStorageMsg):
def __init__(self):
super(RemoveMonFromFusionstorPrimaryStorageAction, self).__init__()
self.sessionUuid = None
self.out = None
def run(self):
if not self.sessionUuid:
raise Exception('sessionUuid of action[RemoveMonFromFusionstorPrimaryStorageAction] cannot be None')
evt = api.async_call(self, self.sessionUuid)
self.out = evt
return self.out
class RemoveRemoteCidrsFromIPsecConnectionAction(inventory.APIRemoveRemoteCidrsFromIPsecConnectionMsg):
def __init__(self):
super(RemoveRemoteCidrsFromIPsecConnectionAction, self).__init__()
self.sessionUuid = None
self.out = None
def run(self):
if not self.sessionUuid:
| |
dict(np.load(os.path.join(opts.out_dir, "word_index.npy"), allow_pickle=True).item())
vocab.word2index.update(word_index)
else:
word_set = raw_data.get_word_set(attack_surface,
use_counter_vocab=not opts.use_a3t_settings and not opts.use_none_settings)
vocab, word_mat = vocabulary.Vocabulary.read_word_vecs(word_set, opts.glove_dir, opts.glove, device)
if opts.model == "lstm-dp-general":
train_data = TextClassificationDatasetGeneral.from_raw_data(raw_data.train_data, vocab, attack_surface,
downsample_to=opts.downsample_to,
downsample_shard=opts.downsample_shard,
truncate_to=opts.truncate_to,
perturbation=opts.perturbation)
dev_data = TextClassificationDatasetGeneral.from_raw_data(raw_data.dev_data, vocab, attack_surface,
downsample_to=opts.downsample_to,
downsample_shard=opts.downsample_shard,
truncate_to=opts.truncate_to,
perturbation=opts.perturbation)
else:
train_data = data_class.from_raw_data(raw_data.train_data, vocab, attack_surface,
downsample_to=opts.downsample_to,
downsample_shard=opts.downsample_shard,
truncate_to=opts.truncate_to,
perturbation=opts.perturbation)
dev_data = data_class.from_raw_data(raw_data.dev_data, vocab, attack_surface,
downsample_to=opts.downsample_to,
downsample_shard=opts.downsample_shard,
truncate_to=opts.truncate_to,
perturbation=opts.perturbation)
if opts.data_cache_dir:
with open(os.path.join(opts.data_cache_dir, 'train_data.pkl'), 'wb') as outfile:
pickle.dump(train_data, outfile)
with open(os.path.join(opts.data_cache_dir, 'dev_data.pkl'), 'wb') as outfile:
pickle.dump(dev_data, outfile)
with open(os.path.join(opts.data_cache_dir, 'word_mat.pkl'), 'wb') as outfile:
pickle.dump(word_mat, outfile)
with open(os.path.join(opts.data_cache_dir, 'attack_surface.pkl'), 'wb') as outfile:
pickle.dump(attack_surface, outfile)
return train_data, dev_data, word_mat, attack_surface
def num_correct_multi_classes(model_output, gold_labels, num_classes):
"""
Given the output of model and gold labels returns number of correct and certified correct
predictions
Args:
- model_output: output of the model, could be ibp.IntervalBoundedTensor or torch.Tensor
- gold_labels: torch.Tensor, should be of size 1 per sample, 1 for positive 0 for negative
Returns:
- num_correct: int - number of correct predictions from the actual model output
- num_cert_correct - number of bounds-certified correct predictions if the model_output was an
IntervalBoundedTensor, 0 otherwise.
"""
if isinstance(model_output, ibp.IntervalBoundedTensor):
logits = model_output.val
num_cert_correct = 0
for i, y in enumerate(gold_labels):
y = int(y)
num_cert_correct += all(
j == y or (model_output.lb[i][y].item() > model_output.ub[i][j].item()) for j in range(num_classes))
else:
logits = model_output
num_cert_correct = 0
pred = torch.argmax(logits, dim=1)
num_correct = sum(
pred[i].item() == y.item() for i, y in enumerate(gold_labels)
)
return num_correct, num_cert_correct
def num_correct(model_output, gold_labels):
"""
Given the output of model and gold labels returns number of correct and certified correct
predictions
Args:
- model_output: output of the model, could be ibp.IntervalBoundedTensor or torch.Tensor
- gold_labels: torch.Tensor, should be of size 1 per sample, 1 for positive 0 for negative
Returns:
- num_correct: int - number of correct predictions from the actual model output
- num_cert_correct - number of bounds-certified correct predictions if the model_output was an
IntervalBoundedTensor, 0 otherwise.
"""
if isinstance(model_output, ibp.IntervalBoundedTensor):
logits = model_output.val
num_cert_correct = sum(
all((b * (2 * y - 1)).item() > 0 for b in (model_output.lb[i], model_output.ub[i]))
for i, y in enumerate(gold_labels)
)
else:
logits = model_output
num_cert_correct = 0
num_correct = sum(
(logits[i] * (2 * y - 1)).item() > 0 for i, y in enumerate(gold_labels)
)
return num_correct, num_cert_correct
def load_model(word_mat, device, opts):
"""
Try to load a model on the device given the word_mat and opts.
Tries to load a model from the given or latest checkpoint if specified in the opts.
Otherwise instantiates a new model on the device.
"""
if opts.model == 'bow':
model = BOWModel(
vocabulary.GLOVE_CONFIGS[opts.glove]['size'], opts.hidden_size, word_mat,
pool=opts.pool, dropout=opts.dropout_prob, no_wordvec_layer=opts.no_wordvec_layer).to(device)
elif opts.model == 'cnn':
model = CNNModel(
vocabulary.GLOVE_CONFIGS[opts.glove]['size'], opts.hidden_size, opts.kernel_size,
word_mat, pool=opts.pool, dropout=opts.dropout_prob, no_wordvec_layer=opts.no_wordvec_layer,
early_ibp=opts.early_ibp, relu_wordvec=not opts.no_relu_wordvec, unfreeze_wordvec=opts.unfreeze_wordvec).to(
device)
elif opts.model == 'lstm':
model = LSTMModel(
vocabulary.GLOVE_CONFIGS[opts.glove]['size'], opts.hidden_size,
word_mat, device, pool=opts.pool, dropout=opts.dropout_prob, no_wordvec_layer=opts.no_wordvec_layer).to(
device)
elif opts.model == "lstm-dp":
model = LSTMDPModel(
vocabulary.GLOVE_CONFIGS[opts.glove]['size'], opts.hidden_size,
word_mat, device, pool=opts.pool, dropout=opts.dropout_prob, no_wordvec_layer=opts.no_wordvec_layer,
perturbation=opts.perturbation, bidirectional=not opts.no_bidirectional, baseline=opts.baseline).to(device)
elif opts.model == "lstm-dp-ascc":
model = LSTMDPModelASCC(
100001, opts.hidden_size, word_mat, device, opts.out_dir, pool=opts.pool, dropout=opts.dropout_prob,
perturbation=opts.perturbation, baseline=opts.baseline).to(device)
elif opts.model == "lstm-dp-general":
model = LSTMDPGeneralModel(
vocabulary.GLOVE_CONFIGS[opts.glove]['size'], opts.hidden_size,
word_mat, device, pool=opts.pool, dropout=opts.dropout_prob, no_wordvec_layer=opts.no_wordvec_layer,
perturbation=opts.perturbation).to(device)
elif opts.model == 'lstm-final-state':
model = LSTMFinalStateModel(
vocabulary.GLOVE_CONFIGS[opts.glove]['size'], opts.hidden_size,
word_mat, device, dropout=opts.dropout_prob, no_wordvec_layer=opts.no_wordvec_layer).to(device)
if opts.load_dir:
try:
if opts.load_ckpt is None:
load_fn = sorted(glob.glob(os.path.join(opts.load_dir, 'model-checkpoint-[0-9]+.pth')))[-1]
else:
load_fn = os.path.join(opts.load_dir, 'model-checkpoint-%d.pth' % opts.load_ckpt)
print('Loading model from %s.' % load_fn)
state_dict = dict(torch.load(load_fn, map_location=torch.device('cpu')))
state_dict['embs.weight'] = model.embs.weight
model.load_state_dict(state_dict)
print('Finished loading model.')
except Exception as ex:
print("Couldn't load model, starting anew: {}".format(ex))
return model
class RawClassificationDataset(data_util.RawDataset):
"""
Dataset that only holds x,y as (str, str) tuples
"""
def get_word_set(self, attack_surface, use_counter_vocab=True):
with open(COUNTER_FITTED_FILE) as f:
counter_vocab = set([line.split(' ')[0] for line in f])
word_set = set()
for x, y in self.data:
words = [w.lower() for w in x.split(' ')]
for w in words:
word_set.add(w)
try:
swaps = attack_surface.get_swaps(words)
for cur_swaps in swaps:
for w in cur_swaps:
word_set.add(w)
except KeyError:
# For now, ignore things not in attack surface
# If we really need them, later code will throw an error
pass
if use_counter_vocab:
return word_set & counter_vocab
else:
return word_set
class TextClassificationDataset(data_util.ProcessedDataset):
"""
Dataset that holds processed example dicts
"""
@classmethod
def from_raw_data(cls, raw_data, vocab, attack_surface=None, truncate_to=None,
downsample_to=None, downsample_shard=0, perturbation=None):
if downsample_to:
raw_data = raw_data[downsample_shard * downsample_to:(downsample_shard + 1) * downsample_to]
examples = []
if perturbation is not None:
deltas = Perturbation.str2deltas(perturbation)
for x, y in raw_data:
all_words = [w.lower() for w in x.split()]
ins_delta = 0
if perturbation is not None:
perturb = Perturbation(perturbation, all_words, vocab, attack_surface=attack_surface)
ins_delta = deltas[perturb.Ins_idx]
choices = perturb.get_output_for_baseline_final_state()
choices = [[x for x in choice if x == UNK or x in vocab] for choice in choices]
words = perturb.ipt
elif attack_surface is not None:
raise AttributeError
else:
words = [w for w in all_words if w in vocab] # Delete UNK words
# truncate and add padding
if truncate_to:
words = words[:truncate_to]
if perturbation is not None:
choices = choices[:truncate_to]
for _ in range(truncate_to - len(words)):
words.append(vocabulary.UNK_TOKEN)
if perturbation is not None:
choices.append([vocabulary.UNK_TOKEN])
if len(words) == 0:
continue
word_idxs = [vocab.get_index(w) for w in words] + [0] * ins_delta # append dummy words
x_torch = torch.tensor(word_idxs).view(1, -1, 1) # (1, T, d)
if perturbation is not None:
choices = choices + [[] for _ in range(ins_delta)] # append dummy choices
unk_mask = torch.tensor([0 if UNK in c_list else 1 for c_list in choices],
dtype=torch.long).unsqueeze(0) # (1, T)
choices_word_idxs = [
torch.tensor([vocab.get_index(c) for c in c_list if c != UNK], dtype=torch.long) for c_list in
choices
]
# if any(0 in c.view(-1).tolist() for c in choices_word_idxs):
# raise ValueError("UNK tokens found")
choices_torch = pad_sequence(choices_word_idxs, batch_first=True).unsqueeze(2).unsqueeze(
0) # (1, T, C, 1)
choices_mask = (choices_torch.squeeze(-1) != 0).long() # (1, T, C)
elif attack_surface is not None:
raise AttributeError
else:
choices_torch = x_torch.view(1, -1, 1, 1) # (1, T, 1, 1)
choices_mask = torch.ones_like(x_torch.view(1, -1, 1))
unk_mask = None
mask_torch = torch.ones((1, len(word_idxs)))
for i in range(1, ins_delta + 1):
mask_torch[0][-i] = 0
if unk_mask is None:
unk_mask = torch.ones((1, len(word_idxs)))
x_bounded = ibp.DiscreteChoiceTensorWithUNK(x_torch, choices_torch, choices_mask, mask_torch, unk_mask)
y_torch = torch.tensor(y, dtype=torch.float).view(1, 1)
lengths_torch = torch.tensor(len(word_idxs)).view(1)
examples.append(dict(x=x_bounded, y=y_torch, mask=mask_torch, lengths=lengths_torch))
return cls(raw_data, vocab, examples)
@staticmethod
def example_len(example):
return example['x'].shape[1]
@staticmethod
def collate_examples(examples):
"""
Turns a list of examples into a workable batch:
"""
if len(examples) == 1:
return examples[0]
B = len(examples)
max_len = max(ex['x'].shape[1] for ex in examples)
x_vals = []
choice_mats = []
choice_masks = []
y = torch.zeros((B, 1))
lengths = torch.zeros((B,), dtype=torch.long)
masks = torch.zeros((B, max_len))
unk_masks = torch.zeros((B, max_len))
for i, ex in enumerate(examples):
x_vals.append(ex['x'].val)
choice_mats.append(ex['x'].choice_mat)
choice_masks.append(ex['x'].choice_mask)
cur_len = ex['x'].shape[1]
masks[i, :cur_len] = ex['x'].sequence_mask[0]
unk_masks[i, :cur_len] = ex['x'].unk_mask[0] if ex['x'].unk_mask is not None else 1
y[i, 0] = ex['y']
lengths[i] = ex['lengths'][0]
x_vals = data_util.multi_dim_padded_cat(x_vals, 0).long()
choice_mats = data_util.multi_dim_padded_cat(choice_mats, 0).long()
choice_masks = data_util.multi_dim_padded_cat(choice_masks, 0).long()
return {'x': ibp.DiscreteChoiceTensorWithUNK(x_vals, choice_mats, choice_masks, masks, unk_masks),
'y': y, 'mask': masks, 'lengths': lengths}
class TextClassificationDatasetGeneral(data_util.ProcessedDataset):
"""
Dataset that holds processed example dicts
"""
@classmethod
def from_raw_data(cls, raw_data, vocab, attack_surface=None, truncate_to=None,
downsample_to=None, downsample_shard=0, perturbation=None):
assert perturbation is not None
if downsample_to:
raw_data = raw_data[downsample_shard * downsample_to:(downsample_shard + 1) * downsample_to]
examples = []
for x, y in raw_data:
all_words = [w.lower() for w in x.split()]
trans_o_id = []
trans_phi = []
perturb = Perturbation(perturbation, all_words, vocab, attack_surface=attack_surface)
words = perturb.ipt
for tran in perturb.trans:
choices = tran.gen_output_for_dp()
phi = []
o = [[[] for _ in range(tran.t)] for _ in choices]
for start_pos in range(len(choices)):
phi.append(False)
for choice in choices[start_pos]:
# We give up this choice if any of the words are out of vocab
# This can vary between different implementations
if all(choice[j] in vocab for j in range(tran.t)):
phi[-1] = True # there is a valid choice
for j in range(tran.t):
o[start_pos][j].append(vocab.get_index(choice[j]))
trans_phi.append(torch.tensor(phi, dtype=torch.bool).unsqueeze(0))
trans_o_id.append(o)
# trans_o_id contains a list of o. o has length len(x) and the shape (tran.t, #choice).
if truncate_to:
words = | |
== 'blueprint':
INPUT_FASTQ_PREFIX = "out/ln/updir/mw/" + ACCESSION
if SE_OR_PE == 'se':
fq_to_rename = INPUT_FASTQ_PREFIX + ".fastq.gz"
elif SE_OR_PE == 'pe':
fq_to_rename_1 = INPUT_FASTQ_PREFIX + "_1.fastq.gz"
fq_to_rename_2 = INPUT_FASTQ_PREFIX + "_2.fastq.gz"
elif str(row['origin']) == 'fastq_absolute_path':
if SE_OR_PE == 'se':
fq_to_rename = ACCESSION + ".fastq.gz"
elif SE_OR_PE == 'pe':
fq_to_rename_1 = ACCESSION + "_1.fastq.gz"
fq_to_rename_2 = ACCESSION + "_2.fastq.gz"
elif str(row['origin']) == 'illumina_fastq_sets':
if SE_OR_PE == 'se':
samples_to_cat = str(glob.glob(ACCESSION + "_[0-9][0-9][0-9].fastq.gz"))
concat_sample = "merge-illumina-fastq-sets/" + SAMPLE_NAME + ".fastq.gz"
mwconf['ids'][concat_sample] = samples_to_cat
fq_to_rename = "out/cat/merge-illumina-fastq-sets/" + SAMPLE_NAME + ".fastq.gz"
# TODO : add PE HERE LATER
elif str(row['origin']) == 'merge_fastq':
SAMPLES_TO_MERGE = ACCESSION.split(",")
if SE_OR_PE == 'se':
samples_to_cat = str(["out/ln/alias/sst/all_samples/fastq/" + SAMPLE + ".fastq.gz" for SAMPLE in SAMPLES_TO_MERGE])
concat_sample = "merge-fastq-samples/" + SAMPLE_NAME + ".fastq.gz"
mwconf['ids'][concat_sample] = samples_to_cat
fq_to_rename = "out/cat/merge-fastq-samples/" + SAMPLE_NAME + ".fastq.gz"
elif SE_OR_PE == 'pe':
samples_to_cat_1 = str(["out/ln/alias/sst/all_samples/fastq/" + SAMPLE + "_1.fastq.gz" for SAMPLE in SAMPLES_TO_MERGE])
samples_to_cat_2 = str(["out/ln/alias/sst/all_samples/fastq/" + SAMPLE + "_2.fastq.gz" for SAMPLE in SAMPLES_TO_MERGE])
concat_sample_1 = "merge-fastq-samples/" + SAMPLE_NAME + "_1.fastq.gz"
concat_sample_2 = "merge-fastq-samples/" + SAMPLE_NAME + "_2.fastq.gz"
mwconf['ids'][concat_sample_1] = samples_to_cat_1
mwconf['ids'][concat_sample_2] = samples_to_cat_2
fq_to_rename_1 = "out/cat/merge-fastq-samples/" + SAMPLE_NAME + "_1.fastq.gz"
fq_to_rename_2 = "out/cat/merge-fastq-samples/" + SAMPLE_NAME + "_2.fastq.gz"
elif str(row['origin']) == 'subsample_fastq':
SUBSAMPLE_ARGS = ACCESSION.split(",")
SAMPLE=SUBSAMPLE_ARGS.pop(0)
N_READS_OR_FRACTION = SUBSAMPLE_ARGS.pop(0)
if len(SUBSAMPLE_ARGS) == 0:
SUBSAMPLE_SEED="123"
else:
SUBSAMPLE_SEED = SUBSAMPLE_ARGS.pop(0)
fq_to_rename_prefix = "out/seqtk/sample_-s_" + \
SUBSAMPLE_SEED + \
"_" + \
N_READS_OR_FRACTION + \
"/ln/alias/sst/all_samples/fastq/" + \
SAMPLE
if SE_OR_PE == 'se':
fq_to_rename = fq_to_rename_prefix + ".fastq.gz"
elif SE_OR_PE == 'pe':
fq_to_rename_1 = fq_to_rename_prefix + "_1.fastq.gz"
fq_to_rename_2 = fq_to_rename_prefix + "_2.fastq.gz"
# 1)
# (2
# aliases are created for fastq in order to gather all samples in the same directory
# as well as other trees defined in the base_stem_dict dict below
base_stem_dict = {
"all" : "sst/all_samples",
"by_type_and_run": "sst/by_type_and_run/" + TYPE + "/run" + RUN,
"by_type_and_exp": "sst/by_type_and_exp/" + TYPE + "/" + EXP,
"by_project" : "sst/by_project/" + PROJECT,
"by_cell_type" : "sst/by_cell_type/" + CELL_TYPE,
"by_customer" : "sst/by_customer/" + CUSTOMER,
"by_run" : "sst/by_run/run" + RUN
}
fq_stem_dict = {}
for k in base_stem_dict.keys():
fq_stem_dict[k] = base_stem_dict[k] + '/fastq/' + SAMPLE_NAME
fq_stem = fq_stem_dict['all']
trim_stem = "sickle/" + SE_OR_PE + "_-t_sanger_-q_20/ln/alias/" + fq_stem
for k in fq_stem_dict.keys():
if SE_OR_PE == 'se':
fq_suffix = fq_stem_dict[k] + ".fastq.gz"
mwconf['ids'][fq_suffix] = fq_to_rename
fq_path = "out/ln/alias/" + fq_suffix
if PROCESS == 'yes':
mwconf['targets'].append(fq_path)
elif SE_OR_PE == 'pe':
fq_suffix_1 = fq_stem_dict[k] + "_1.fastq.gz"
fq_suffix_2 = fq_stem_dict[k] + "_2.fastq.gz"
mwconf['ids'][fq_suffix_1] = fq_to_rename_1
mwconf['ids'][fq_suffix_2] = fq_to_rename_2
fq_path_1 = "out/ln/alias/" + fq_suffix_1
fq_path_2 = "out/ln/alias/" + fq_suffix_2
if PROCESS == 'yes':
mwconf['targets'].append(fq_path_1)
mwconf['targets'].append(fq_path_2)
# 2)
# (3
# Here could be a good spot to run fastqc and fastq_screen on se or pe from all_samples tree
#trim_stem
#fastqc_path_R1 = "out/fastqc/fastq.gz/ln/alias/" + fq_stem + "_1_fastqc.zip"
#fastqc_path_R2 = "out/fastqc/fastq.gz/ln/alias/" + fq_stem + "_2_fastqc.zip"
if SE_OR_PE == "se":
fastqc_path = "out/fastqc/fastq.gz/ln/alias/" + fq_stem + "_fastqc.zip"
fastq_screen_path = "out/fastq_screen/filter/" + trim_stem + "_screen.txt"
qc_paths = [fastqc_path, fastq_screen_path]
elif SE_OR_PE == "pe":
fastqc_path_1 = "out/fastqc/fastq.gz/ln/alias/" + fq_stem + "_1_fastqc.zip"
fastqc_path_2 = "out/fastqc/fastq.gz/ln/alias/" + fq_stem + "_2_fastqc.zip"
fastq_screen_path_1 = "out/fastq_screen/filter/" + trim_stem + "_1_screen.txt"
fastq_screen_path_2 = "out/fastq_screen/filter/" + trim_stem + "_2_screen.txt"
qc_paths = [fastqc_path_1, fastqc_path_2, fastq_screen_path_1, fastq_screen_path_2]
if PROCESS == 'yes':
mwconf['targets'].append(qc_paths)
# 3)
# (4
# QC log are symlinked in the alternative trees in order
# to provide a mechanism to get multiQC reports with only
# subsets of samples for each subtree.
for k in base_stem_dict.keys():
if SE_OR_PE == "se":
fastqc_suffix = base_stem_dict[k] + "/logs/fastqc/" + SAMPLE_NAME + "_fastqc.zip"
mwconf['ids'][fastqc_suffix] = fastqc_path
ln_fastqc_path = "out/ln/alias/" + fastqc_suffix
fastq_screen_suffix = base_stem_dict[k] + "/logs/fastq_screen/" + SAMPLE_NAME + "_screen.txt"
mwconf['ids'][fastq_screen_suffix] = fastq_screen_path
ln_fastq_screen_path = "out/ln/alias/" + fastq_screen_suffix
ln_qc_paths = [ln_fastqc_path, ln_fastq_screen_path]
if SE_OR_PE == "pe":
fastqc_suffix_1 = base_stem_dict[k] + "/logs/fastqc/" + SAMPLE_NAME + "_1_fastqc.zip"
mwconf['ids'][fastqc_suffix_1] = fastqc_path_1
ln_fastqc_path_1 = "out/ln/alias/" + fastqc_suffix_1
fastqc_suffix_2 = base_stem_dict[k] + "/logs/fastqc/" + SAMPLE_NAME + "_2_fastqc.zip"
mwconf['ids'][fastqc_suffix_2] = fastqc_path_2
ln_fastqc_path_2 = "out/ln/alias/" + fastqc_suffix_2
fastq_screen_suffix_1 = base_stem_dict[k] + "/logs/fastq_screen/" + SAMPLE_NAME + "_1_screen.txt"
mwconf['ids'][fastq_screen_suffix_1] = fastq_screen_path_1
ln_fastq_screen_path_1 = "out/ln/alias/" + fastq_screen_suffix_1
fastq_screen_suffix_2 = base_stem_dict[k] + "/logs/fastq_screen/" + SAMPLE_NAME + "_2_screen.txt"
mwconf['ids'][fastq_screen_suffix_2] = fastq_screen_path_2
ln_fastq_screen_path_2 = "out/ln/alias/" + fastq_screen_suffix_2
ln_qc_paths = [ln_fastqc_path_1, ln_fastqc_path_2, ln_fastq_screen_path_1, ln_fastq_screen_path_2]
if PROCESS == 'yes':
mwconf['targets'].append(ln_qc_paths)
#for k in base_stem_dict.keys():
# for qc_path in qc_paths:
# qc_suffix = qc_path.replace('out/', base_stem_dict[k] + "/logs/")
# mwconf['ids'][qc_suffix] = qc_path
# print('qc_suffix')
# print(qc_suffix)
# print('qc_path')
# print(qc_path)
# print("mwconf['ids'][qc_suffix]")
# print(mwconf['ids'][qc_suffix])
# qc_k_path = "out/ln/alias/" + qc_suffix
# if PROCESS == 'yes':
# mwconf['targets'].append(qc_k_path)
# 4)
#sst/all_samples/logs/fastqc/fastq.gz/ln/alias/sst/all_samples/fastq/RPMI_H3K4me3_fastqc.zip
# (5
# Assemblies for each specie keyword should be defined here
SPECIE = str(row['specie'])
if SPECIE in ['human', 'Human', 'Homo_sapiens']:
assembly_list = ["GRCh38", "hg19"]
gsize = "hs"
scrna_assembly = "GRCh38-2020-A"
elif SPECIE in ['mouse', 'Mouse', 'Mus_musculus']:
#assembly_list = ["GRCm38", "mm9", "GRCm38-merge-attr", "GRCm38-merge-attr-retrieve"]
#2021-03-31 Remove GRCm3[89]-merge-attr, cause redondant with GRCm3[89]-merge-attr-retrieve. The results are the same if using gtftk to retrieve the gtf from ensembl or getting it from the ensembl ftp
#2021-04-20: Remove GRCm39 and GRCm39-merge-attr-retriev. Not used by the scientific community for now.
#assembly_list = ["GRCm38", "mm9", "GRCm38-merge-attr-retrieve", "GRCm39", "GRCm39-merge-attr-retrieve"]
assembly_list = ["GRCm38", "mm9"]
gsize = "mm"
elif SPECIE in ['drosophila', 'Fruit_fly', 'Drosophila_melanogaster']:
scrna_assembly = "mm10-2020-A"
assembly_list = ["BDGP6"]
gsize = "dm"
elif SPECIE in ['Yeast', 'Saccharomyces_cerevisiae']:
assembly_list = ["R64-1-1"]
gsize = "12e6"
elif SPECIE in ['Rat']:
assembly_list = ["Rnor6"]
elif not pandas.isna(SPECIE):
assembly_list = [SPECIE]
else:
continue
# 5)
# (6
# Targets for post-alignment, experiment-type-unspecific files are produced
# I do not want to throw an error if specie is not referred and just skip sample instead.
if 'assembly_list' in locals():
for assembly in assembly_list:
aligned_stem_dict = {}
for k in base_stem_dict.keys():
aligned_stem_dict[k] = base_stem_dict[k] + "/" + assembly
if row['analysis_type'] in ['Demultiplexage_Concatenation_QC', 'Concatenation_QC']:
continue
elif row['type'] == 'RNA' and row['origin'] != "fastq_absolute_path_agilent_XT_HS2":
aligned_stem = "samtools/index/star/" + row['se_or_pe'] + "_fastq.gz_to_bam_standard_staridx-" + assembly + "-merge-attr-retrieve-ensembl_gtf-" + assembly +"-merge-attr-retrieve-ensembl/sickle/" + row['se_or_pe'] + "_-t_sanger_-q_20/ln/alias/" + fq_stem
elif row['type'] == 'RNA' and row['origin'] == "fastq_absolute_path_agilent_XT_HS2":
aligned_stem = "samtools/index/samtools/sort/agent/locatit_mbc_-i_-R/samtools/sort_-n/star/" + row['se_or_pe'] + "_fastq.gz_to_bam_standard_staridx-" + assembly + "-merge-attr-retrieve-ensembl_gtf-" + assembly +"-merge-attr-retrieve-ensembl/agent/trim_-v2/ln/alias/" + fq_stem
else:
bowtie2_stem = "bowtie2/" + row['se_or_pe'] + "_" + assembly + "/sickle/" + row['se_or_pe'] + "_-t_sanger_-q_20/ln/alias/" + fq_stem
bowtie2_log_path = "out/" + bowtie2_stem + ".log"
aligned_stem = "samtools/index/samtools/sort/samtools/view_sam_to_bam_-q_30/" + bowtie2_stem
mw_bam_path = "out/" + aligned_stem + ".bam"
mw_bai_path = mw_bam_path + ".bai"
mw_idxstat_path = "out/samtools/idxstats/" + aligned_stem + ".idxstat.tsv"
mw_bw_path = "out/deepTools/bamCoverage_--binSize_20_--minMappingQuality_0_--normalizeUsing_RPKM/" + aligned_stem + ".bw"
if row['type'] == 'RNA':
mw_bw_fwd_path = "out/deepTools/bamCoverage_--binSize_20_--minMappingQuality_0_--normalizeUsing_RPKM_--filterRNAstrand_forward/" + aligned_stem + ".bw"
mw_bw_rev_path = "out/deepTools/bamCoverage_--binSize_20_--minMappingQuality_0_--normalizeUsing_RPKM_--filterRNAstrand_reverse/" + aligned_stem + ".bw"
# 2021-03-25 Add rseqc geneBody_coverage
mw_rseqc_path = "out/rseqc/geneBody_coverage_bed-housekeeping-genes-" + assembly + "/" + aligned_stem + ".geneBodyCoverage.curves.pdf"
for k in aligned_stem_dict.keys():
bam_suffix = aligned_stem_dict[k] + "/bam/" + SAMPLE_NAME + ".bam"
mwconf['ids'][bam_suffix] = mw_bam_path
ln_bam_path = "out/ln/alias/" + bam_suffix
bai_suffix = aligned_stem_dict[k] + "/bam/" + SAMPLE_NAME + ".bam.bai"
mwconf['ids'][bai_suffix] = mw_bai_path
ln_bai_path = "out/ln/alias/" + bai_suffix
idxstat_suffix = aligned_stem_dict[k] + "/logs/samtools_idxstats/" + SAMPLE_NAME + ".idxstat.tsv"
mwconf['ids'][idxstat_suffix] = mw_idxstat_path
ln_idxstat_path = "out/ln/alias/" + idxstat_suffix
bw_suffix = aligned_stem_dict[k] + "/bw/" + SAMPLE_NAME + ".bw"
mwconf['ids'][bw_suffix] = mw_bw_path
ln_bw_path = "out/ln/alias/" + bw_suffix
ln_aligned_unspecif_paths = [ln_bam_path, ln_bai_path, ln_idxstat_path, ln_bw_path]
if row['type'] == 'RNA':
bw_fwd_suffix = aligned_stem_dict[k] + "/bw/stranded/" + SAMPLE_NAME + "_fwd.bw"
mwconf['ids'][bw_fwd_suffix] = mw_bw_fwd_path
ln_bw_fwd_path = "out/ln/alias/" + bw_fwd_suffix
bw_rev_suffix = aligned_stem_dict[k] + "/bw/stranded/" + SAMPLE_NAME + "_rev.bw"
mwconf['ids'][bw_rev_suffix] = mw_bw_rev_path
ln_bw_rev_path = "out/ln/alias/" + bw_rev_suffix
ln_aligned_unspecif_paths.append(ln_bw_fwd_path)
ln_aligned_unspecif_paths.append(ln_bw_rev_path)
#rseqc_suffix = aligned_stem_dict[k] + "/pdf/" + SAMPLE_NAME + ".pdf"
#mwconf['ids'][rseqc_suffix] = mw_rseqc_path
#ln_rseqc_path = "out/ln/alias/" + rseqc_suffix
#ln_aligned_unspecif_paths.append(ln_rseqc_path)
if PROCESS == 'yes' and row['type'] not in ['RNA_fq_only', 'ChIP_fq_only', 'scRNA', 'scRNA_HTO', 'Demultiplexage_Concatenation_QC']:
mwconf['targets'].append(ln_aligned_unspecif_paths)
# (7
# Targets for files specific of ChIP-like approaches.
if row['type'] in ['ChIP','ATAC','FAIRE','DNASE','MNase'] and row['analysis_type'] not in ['Demultiplexage_Concatenation_QC', 'Concatenation_QC']:
mw_chip_qc_fingerprint_prefix = "out/deepTools/plotFingerprint/" + aligned_stem
mw_chip_qc_fingerprint_metrics = mw_chip_qc_fingerprint_prefix + ".metrics.tsv"
mw_chip_qc_fingerprint_counts = mw_chip_qc_fingerprint_prefix + ".counts.tsv"
mw_chip_qc_phantompeakqualtools = "out/phantompeakqualtools/bam_noctrl_-savp/" + aligned_stem + '.spp.out'
mw_bed_broad = "out/macs2/noctrl_callpeak_--broad_--gsize_" + gsize + "/" + aligned_stem + "_peaks.bed"
mw_xls_broad = "out/macs2/noctrl_callpeak_--broad_--gsize_" | |
_get_times(energy) + r / v0 - (st.sonset - st.origintime)
if smooth:
plot(t, energy.data_unsmoothed, color='0.7')
plot(t, energy.data, color=c1l)
G_ = smooth_func(lambda t_: G_func(r, t_, v0, g0),
t, smooth, window=smooth_window)
Emod = get_Emod(G_, t)
index = np.argwhere(Emod < 1e-30)[-1]
Emod[index] = 1e-30
plot(t, Emod, color=c2l)
plot(tcoda[i], Ecoda[i], color=c1)
Emodcoda = get_Emod(Gcoda[i], tcoda[i])
plot(tcoda[i], Emodcoda, color=c2)
if tbulk and len(tbulk) > 0:
plot(tbulk[i], Ebulk[i], 'o', color=c1, mec=c1, ms=MS)
Emodbulk = get_Emod(Gbulk[i], tbulk[i])
plot(tbulk[i], Emodbulk, 'o', ms=MS - 1,
color=c2, mec=c2)
l = '%s\n%s' % (evid, station)
l = l + '\nr=%dkm' % (r / 1000)
ax.annotate(l, (1, 1), (-5, -5), 'axes fraction',
'offset points', ha='right', va='top', size='x-small')
_set_gridlabels(ax, i, n, n, N, xlabel='time (s)',
ylabel=r'E (Jm$^{-3}$Hz$^{-1}$)')
tmaxs.append(t[-1])
ymaxs.append(max(np.max(Emod), np.max(energy.data)))
ymins.append(min(np.min(Emodcoda), np.max(Ecoda[i])))
if share is None:
share = ax
# if True:
# save = {'t': t, 'data': energy.stats.orig_data,
# 'Eobs': energy.data, 'Emod': Emod,
# 'tcoda': tcoda[i], 'Eobscoda': Ecoda[i],
# 'Emodcoda': Emodcoda}
# if tbulk:
# save.update({'tbulk': tbulk[i], 'Eobsbulk': Ebulk[i],
# 'Emodbulk': Emodbulk})
# np.savez(fname.replace('png', '') + station + '.npz', **save)
ax.locator_params(axis='x', nbins=5, prune='upper')
loglocator = mpl.ticker.LogLocator(base=100)
ax.yaxis.set_major_locator(loglocator)
ax.yaxis.set_minor_locator(mpl.ticker.NullLocator())
ax.set_xlim(xlim or (0, max(tmaxs)))
ax.set_ylim(ylim or (0.1 * min(ymins), 1.5 * max(ymaxs)))
_savefig(fig, **kwargs)
def plot_sds(freq, result, ax=None,
annotate=False, va='bottom',
seismic_moment_method=None, seismic_moment_options={},
**kwargs):
"""Plot source displacement spectrum and fitted source model"""
freq = np.array(freq)
omM = np.array(result['sds'], dtype=np.float)
if all(np.isnan(omM)):
return
fig = None
obs = ('M0', 'fc', 'n', 'gamma')
smo = seismic_moment_options
def _get(k): return smo.get(k) or result.get(k)
smo = {k: _get(k) for k in obs if _get(k) is not None}
M0 = smo.get('M0')
fc = smo.get('fc')
if ax is None:
fig = plt.figure()
ax = fig.add_subplot(111)
if seismic_moment_method == 'mean':
ax.loglog(freq, omM, 'o-', color='gray', mec='gray')
ax.loglog(freq[freq < fc], omM[freq < fc], 'o-k')
elif seismic_moment_method in ('fit', 'robust_fit'):
ax.loglog(freq, omM, 'ok')
if M0 and fc:
f = np.linspace(freq[0] / 1.5, freq[-1] * 1.5, 100)
omM2 = source_model(f, **smo)
ax.loglog(f, omM2, '-k')
else:
ax.loglog(freq, omM, 'o-k')
if M0:
ax.axhline(M0, ls='--', color='k')
labels = OrderedDict((('M0', r'M$_0$=%.1e Nm'),
('fc', r'f$_{\rm{c}}$=%.1f Hz'),
('n', 'n=%.1f'),
('gamma', r'$\gamma$=%.2f'),
('fit_error', 'err=%.2f')))
labels = [labels[key] % np.float32(result[key])
for key in labels if key in result]
if len(labels) > 0 and annotate:
ypos = 1 if va == 'top' else 0
ax.annotate('\n'.join(labels), (1, ypos), (-5, 5 - 10 * ypos),
'axes fraction', 'offset points',
ha='right', va=va, size='x-small')
if fig:
_savefig(fig, **kwargs)
def plot_eventresult(result, v0=None, quantities=QUANTITIES_EVENT,
seismic_moment_method=None, seismic_moment_options={},
**kwargs):
"""Plot all results of one `~qopen.core.invert()` call"""
v0 = v0 or result.get('v0') or result.get('config', {}).get('v0')
freq = np.array(result['freq'])
res = copy(result)
_values_view = res.pop('events').values()
res.update((list(_values_view))[0])
N = len(quantities)
n = int(np.ceil(np.sqrt(N)))
fig = plt.figure()
gs = gridspec.GridSpec(n, n)
share = None
for i, q in enumerate(quantities):
ax = plt.subplot(gs[i // n, i % n], sharex=share)
if q == 'sds':
plot_sds(freq, res, ax=ax,
seismic_moment_method=seismic_moment_method,
seismic_moment_options=seismic_moment_options)
else:
vals = calc_dependent(q, res[DEPMAP[q]], freq, v0)
ax.loglog(freq, vals, 'o-k')
if q == 'error':
ax.set_yscale('linear')
ax.annotate(QLABELS[q], (1, 1), (-5, -5), 'axes fraction',
'offset points', ha='right', va='top')
_set_gridlabels(ax, i, n, n, N)
if share is None:
share = ax
ax.set_xlim(freq[0], freq[-1])
_savefig(fig, **kwargs)
def plot_eventsites(result, **kwargs):
"""Plot site amplification factors of one `~qopen.core.invert()` call"""
freq = np.array(result['freq'])
R = result['R']
N = len(R)
n = int(np.ceil(np.sqrt(N)))
fig = plt.figure()
gs = gridspec.GridSpec(n, n)
share = None
allR = []
for i, station in enumerate(sorted(R)):
allR.extend(R[station])
ax = plt.subplot(gs[i // n, i % n], sharex=share, sharey=share)
Rs = np.array(R[station], dtype=np.float)
if not np.all(np.isnan(Rs)):
ax.loglog(freq, Rs, 'o-k')
l = station
ax.annotate(l, (1, 1), (-5, -5), 'axes fraction',
'offset points', ha='right', va='top', size='small')
_set_gridlabels(ax, i, n, n, N, ylabel='site correction')
if share is None:
share = ax
allR = np.array(allR, dtype=np.float)
allR = allR[~np.isnan(allR)]
if np.min(allR) != np.max(allR):
ax.set_ylim(np.min(allR), np.max(allR))
ax.set_xlim(freq[0], freq[-1])
_savefig(fig, **kwargs)
def plot_results(result, v0=None, quantities=QUANTITIES, mean=None,
llim=None, Qlim=None, **kwargs):
"""Plot results"""
freq = np.array(result['freq'])
N = len(quantities)
n = int(np.ceil(np.sqrt(N)))
fig = plt.figure()
gs = gridspec.GridSpec(n, n)
share = None
# True for invert_events_simultaneously
single_inversion = 'g0' not in list(result['events'].values())[0]
if single_inversion:
colres = result
else:
colres = collect_results(result, only=('g0', 'b', 'error', 'v0'))
v0 = v0 or result['config'].get('v0') or colres['v0']
colres.pop('v0', None)
weights = 1 / np.array(colres['error']) if mean == 'weighted' else None
robust = mean == 'robust'
for i, q in enumerate(quantities):
ax = plt.subplot(gs[i // n, i % n], sharex=share)
if q == 'nobs':
if single_inversion:
nobs = 1
else:
nobs = np.sum(~np.isnan(colres['g0']), axis=0)
ax.bar(freq, nobs, width=0.1 * freq, color='gray')
else:
value = colres[DEPMAP[q]]
value = calc_dependent(q, value, freq, v0)
if not single_inversion:
freqs = np.repeat(freq[np.newaxis, :], value.shape[0], axis=0)
ax.plot(freqs, value, 'o', ms=MS, color='gray', mec='gray')
means, err1, err2 = gerr(
value, axis=0, weights=weights, robust=robust)
errs = (err1, err2)
ax.errorbar(freq, means, yerr=errs, marker='o',
mfc='k', mec='k', color='m', ecolor='m')
if q != 'error':
ax.set_yscale('log')
ax.set_xscale('log')
ax.annotate(QLABELS[q], (1, 1), (-5, -5), 'axes fraction',
'offset points', ha='right', va='top')
_set_gridlabels(ax, i, n, n, N, ylabel=None)
if share is None:
share = ax
if q in ('Qsc', 'Qi') and Qlim:
ax.set_ylim(Qlim)
if q in ('lsc', 'li') and llim:
ax.set_ylim(llim)
ax.set_xlim(freqlim(freq))
_savefig(fig, **kwargs)
def plot_sites(result, mean=None,
xlim=None, ylim=(1e-2, 1e2), nx=None,
**kwargs):
"""Plot site amplification factors"""
freq = np.array(result['freq'])
# True for invert_events_simultaneously
single_inversion = 'R' not in list(result['events'].values())[0]
if single_inversion:
colres = result
R = copy(colres['R'])
for sta in R:
R[sta] = np.array(R[sta], dtype=float)
max_nobs = 1
else:
colres = collect_results(result, only=['R', 'error'])
R = colres['R']
max_nobs = np.max([np.sum(~np.isnan(r), axis=0) for r in R.values()])
weights = 1 / np.array(colres['error']) if mean == 'weighted' else None
robust = mean == 'robust'
N = max_nobs > 1
for station in sorted(R):
if not np.all(np.isnan(R[station])):
N = N + 1
# N = len(R) + (max_nobs > 1)
fig = plt.figure()
nx, ny, gs = _get_grid(N, nx=nx)
cmap = plt.get_cmap('hot_r', max_nobs)
norm = mpl.colors.Normalize(vmin=0.5, vmax=max_nobs + 0.5)
share = None
i = 0
for station in sorted(R):
if np.all(np.isnan(R[station])):
continue
ax = plt.subplot(gs[i // nx, i % nx], sharex=share, sharey=share)
means, err1, err2 = gerr(R[station], axis=0, weights=weights,
robust=robust)
errs = (err1, err2)
# if not np.all(np.isnan(R[station])):
if max_nobs == 1:
kw = {'c': 'k'}
else:
nobs = 1. * np.sum(~np.isnan(R[station]), axis=0)
kw = {'c': nobs, 'norm': norm, 'cmap': cmap}
if not single_inversion:
freqs = np.repeat(freq[np.newaxis, :], R[station].shape[0], axis=0)
ax.plot(freqs, R[station], 'o', ms=MS, color='gray', mec='gray')
ax.errorbar(freq, means, yerr=errs, marker=None,
color='m', ecolor='m')
sc = ax.scatter(freq, means, s=4 * MS ** 2,
marker='o', zorder=10,
linewidth=0.5,
**kw)
ax.set_xscale('log')
ax.set_yscale('log')
ax.annotate(station, (1, 1), (-5, -5), 'axes fraction',
'offset points', ha='right', va='top', size='x-small')
_set_gridlabels(ax, i, nx, ny, N, ylabel='amplification factor')
if share is None:
share = ax
i += 1
ax.set_xlim(xlim or freqlim(freq))
if ylim:
ax.set_ylim(ylim)
if max_nobs != 1:
ax = plt.subplot(gs[(N - 1) // nx, (N - 1) % nx])
ax.set_axis_off()
fig.colorbar(sc, ax=ax, shrink=0.9, format='%d', label='nobs',
ticks=np.arange(0, max_nobs + 1, max(1, max_nobs // 5)))
_savefig(fig, **kwargs)
def _get_grid(N, nx=None):
if nx is None:
nx = ny = int(np.ceil(np.sqrt(N)))
else:
ny = 1 + (N-1) // nx
gs = gridspec.GridSpec(ny, nx)
return nx, ny, gs
def plot_all_sds(result, seismic_moment_method=None,
seismic_moment_options=None,
xlim=None, ylim=None, nx=None,
annotate=None, va='top',
plot_only_ids=None, **kwargs):
"""Plot all source displacement spectra with fitted source models"""
freq = np.array(result['freq'])
conf = result.get('config', {})
smm = seismic_moment_method or conf.get('seismic_moment_method')
smo = seismic_moment_options or conf.get('seismic_moment_options', {})
# fc = seismic_moment_options.pop('fc', None)
result = result['events']
if plot_only_ids:
result = {id_: r for id_, r in result.items() if id_ in plot_only_ids}
N = len(result)
# n = int(np.ceil(np.sqrt(N)))
fig = plt.figure()
# gs = gridspec.GridSpec(n, n)
nx, ny, gs = _get_grid(N, nx=nx)
share = None
if annotate is None:
annotate = nx < 7
for i, evid in enumerate(sorted(result)):
ax = plt.subplot(gs[i // nx, i % nx], sharex=share, sharey=share)
plot_sds(freq, result[evid], seismic_moment_method=smm, va=va,
seismic_moment_options=smo, ax=ax, annotate=annotate)
ax.annotate(evid, (0, 0), (5, 5), 'axes fraction',
'offset points', ha='left', va='bottom', size='x-small')
_set_gridlabels(ax, i, nx, ny, N, ylabel=r'$\omega$M (Nm)')
if share is None:
share = ax
ax.autoscale()
ax.set_xlim(xlim or freqlim(freq))
if ylim:
ax.set_ylim(ylim)
_savefig(fig, **kwargs)
def plot_mags(result, xlim=None, ylim=None, plot_only_ids=None, **kwargs):
"""Plot Qopen moment magnitudes versus catalogue magnitudes"""
fig = plt.figure()
ax = fig.add_subplot(111)
temp = [(r['Mcat'], r['Mw']) for id_, r in result['events'].items()
if r.get('Mcat') is not | |
"""
Copyright 2020 <NAME>
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import inspect
import typeguard
from bunch import Bunch
from typing import Optional
import tensorflow as tf
import numpy as np
import logging
from .base_funnel import Funnel
from ..augment import augment
from ..register.register import FUNNEL
__all__ = ["BboxFunnel", "CategoricalTensorFunnel"]
"""Bbox Funnel for bounding box dataset."""
class TFDecoderMixin:
"""Tensorflow decoder."""
KEYS_TO_FEATURES = {
"image/encoded": tf.io.FixedLenFeature((), tf.string),
"image/source_id": tf.io.FixedLenFeature((), tf.string, ""),
"image/height": tf.io.FixedLenFeature((), tf.int64, -1),
"image/width": tf.io.FixedLenFeature((), tf.int64, -1),
"image/object/bbox/xmin": tf.io.VarLenFeature(tf.float32),
"image/object/bbox/xmax": tf.io.VarLenFeature(tf.float32),
"image/object/bbox/ymin": tf.io.VarLenFeature(tf.float32),
"image/object/bbox/ymax": tf.io.VarLenFeature(tf.float32),
"image/object/class/label": tf.io.VarLenFeature(tf.int64),
"image/object/area": tf.io.VarLenFeature(tf.float32),
"image/object/is_crowd": tf.io.VarLenFeature(tf.int64),
}
def _decode_image(self, parsed_tensors):
"""Decodes the image"""
image = tf.io.decode_image(parsed_tensors["image/encoded"], channels=3)
image.set_shape([None, None, 3])
return image
def _decode_boxes(self, parsed_tensors):
"""Concat box coordinates in the format of [ymin, xmin, ymax, xmax]."""
xmin = parsed_tensors["image/object/bbox/xmin"]
xmax = parsed_tensors["image/object/bbox/xmax"]
ymin = parsed_tensors["image/object/bbox/ymin"]
ymax = parsed_tensors["image/object/bbox/ymax"]
return tf.stack([ymin, xmin, ymax, xmax], axis=-1)
def decode(self, serialized_example):
"""Decode the serialized example."""
parsed_tensors = tf.io.parse_single_example(
serialized_example, self.KEYS_TO_FEATURES
)
for k in parsed_tensors:
if isinstance(parsed_tensors[k], tf.SparseTensor):
if parsed_tensors[k].dtype == tf.string:
parsed_tensors[k] = tf.sparse.to_dense(
parsed_tensors[k], default_value=""
)
else:
parsed_tensors[k] = tf.sparse.to_dense(
parsed_tensors[k], default_value=0
)
image = self._decode_image(parsed_tensors)
boxes = self._decode_boxes(parsed_tensors)
decode_image_shape = tf.logical_or(
tf.equal(parsed_tensors["image/height"], -1),
tf.equal(parsed_tensors["image/width"], -1),
)
image_shape = tf.cast(tf.shape(image), dtype=tf.int64)
parsed_tensors["image/height"] = tf.where(
decode_image_shape, image_shape[0], parsed_tensors["image/height"]
)
parsed_tensors["image/width"] = tf.where(
decode_image_shape, image_shape[1], parsed_tensors["image/width"]
)
decoded_tensors = {
"image": image,
"height": parsed_tensors["image/height"],
"width": parsed_tensors["image/width"],
"groundtruth_classes": parsed_tensors["image/object/class/label"],
"groundtruth_boxes": boxes,
}
return decoded_tensors
@FUNNEL.register_module(name="bbox")
class BboxFunnel(Funnel, TFDecoderMixin):
"""BboxFunnel.
BboxFunnel Class for Bbox dataset,This class will provide
data iterable with images,bboxs or images,targets with required
augmentations.
"""
def __init__(
self,
data_path: str,
config: Optional[dict] = None,
datatype="bbox",
training=True,
):
"""__init__.
Args:
data_path: Dataset Path ,this is required in proper structure
please see readme file for more details on structuring.
config: Config File for setting the required configuration of datapipeline.
training:Traning mode on or not?
Example::
e.g 1
>> funnel = Funnel(config=config, datatype = "bbox")
>> data = next(iter(funnel.from_tfrecords('tfrecord_data/' , type="train")))
e.g 2:
class CustomFunnel(BboxFunnel):
def __init__(self, *args):
super().__init__(*args)
def encoder(self,args):
# should be overriden if there is a need for anchors in the model.
image_id, image, bbox, classes = args
# make custom anchors and encode the image and bboxes as per
/the model need.
return image, custom_anchors, classes
funnel = CustomFunnel()
"""
# bunch the config dict.
config = Bunch(config)
if not isinstance(data_path, str):
msg = f"datapath should be str but pass {type(data_path)}."
logging.error(msg)
raise TypeError("Only str allowed")
if not os.path.exists(data_path):
msg = f"path doesnt exists"
logging.error(msg)
raise TypeError("Path doesnt exists")
self._datatype = "bbox"
self._data_path = data_path
self.config = config
self._training = training
self._drop_remainder = self.config.get("drop_remainder", True)
self.augmenter = augment.Augment(self.config, datatype)
self.numpy_function = self.config.get("numpy_function", None)
self._per_shard = self.config.get("shard", 10) # hardcoded shard size
self.max_instances_per_image = self.config.get("max_instances_per_image", 100)
self.numpy_function = self.config.get("numpy_function", None)
if self.numpy_function:
assert callable(self.numpy_function), "numpy_function should be a callable."
assert len(
inspect.getfullargspec(self.numpy_function).args
), "py_function should be having two arguments."
@property
def datatype(self):
return self._datatype
@property
def classes(self):
return self._classes
def parser(self, dataset_folder):
"""parser for reading images and bbox from tensor records."""
dataset = tf.data.Dataset.list_files(
self.tf_path_pattern(os.path.join(self.data_path, dataset_folder)),
shuffle=self._training,
)
if self._training:
dataset = dataset.repeat()
dataset = dataset.interleave(
self._fetch_records, num_parallel_calls=self.AUTOTUNE
)
dataset = dataset.with_options(self.optimized_options)
if self._training:
dataset = dataset.shuffle(self._per_shard)
return dataset
def encoder(self):
"""Method expected to be overidden by the user. """
raise NotImplementedError()
def decoder(self, value):
"""helper decoder, a wrapper around tfrecorde decoder."""
data = self.decode(value)
image_id = 1.0
image = data["image"]
boxes = data["groundtruth_boxes"]
classes = data["groundtruth_classes"]
classes = tf.reshape(tf.cast(classes, dtype=tf.float32), [-1, 1])
return (image_id, image, boxes, classes)
def from_tfrecords(self, type="train"):
"""tf_records.
Returns a iterable tf.data dataset ,which is configured
with the config file passed with require augmentations.
"""
dataset = self.parser(type)
decode_rawdata = lambda input_records: self.decoder(
input_records
) # pylint: enable=g-long-lambda
dataset = dataset.map(decode_rawdata, num_parallel_calls=self.AUTOTUNE)
dataset = dataset.prefetch(self.config.batch_size)
# custom numpy function to inject in datapipeline.
def _numpy_function(img_id, img, bbox, classes):
_output = tf.numpy_function(
func=self.numpy_function,
inp=[img, bbox],
Tout=(tf.float32, tf.float32),
)
return img_id, _output[0], _output[1], classes
if self._training:
dataset = dataset.map(
lambda image_id, image, bbox, classes: self.augmenter(
image, bbox, image_id, classes, return_image_label=False
)
)
if self.numpy_function:
dataset = dataset.map(_numpy_function, num_parallel_calls=self.AUTOTUNE)
# pad to fixed length.
dataset = dataset.map(
lambda *args: self.pad_to_fixed_len(*args),
num_parallel_calls=self.AUTOTUNE,
)
# try if encoder is implemented.
try:
self.encoder()
except NotImplementedError:
logging.info("Encoder is not implemented,giving raw output.")
else:
dataset = dataset.map(lambda *args: self.encoder(*args))
# make batches.
dataset = dataset.batch(
self.config.batch_size, drop_remainder=self._drop_remainder
)
dataset = self.pretraining(dataset)
return dataset
@property
def data_path(self):
return self._data_path
def from_dataset(self, tfrecord_path: str = None):
# TODO(kartik4949) : write me
# fetch raw data
raise NotImplementedError
def from_remote(self, remote_path: str = None):
# TODO(kartik4949) : write me
# fetch remote files
raise NotImplementedError
@FUNNEL.register_module(name="categorical")
class CategoricalTensorFunnel(Funnel):
# pylint: disable=line-too-long
"""CategoricalTensorFunnel
TensorFunnel for Categorical Data provides DataPipeline according
to config passed with required augmentations.
Example: ***********************************************
funnel = CategoricalTensorFunnel('testdata', config=config, datatype='categorical')
iterable = funnel.dataset(type = 'train')
Note: This class can only be used for categorical dataset.
i.e either multiclass or binary.
"""
# pylint: enable=line-too-long
@typeguard.typechecked
def __init__(
self,
data_path: str,
config: Optional[dict] = None,
datatype="categorical",
training=True,
):
"""__init__.
Args:
data_path: Dataset Path which should be in structure way
please see readme file for more details on structuring.
config: Config file , a dict file contains all required attributes
to configure.
datatype: Dataset Type i.e (Bbox , Labels ,Segmentations)
bbox dataset is object detection dataset which will be provided in
form of [image,bboxs] or [image, class_targets,bbox_targets].
training: is pipeline in training mode or not?
"""
# bunch the config dict.
config = Bunch(config)
if not isinstance(data_path, str):
msg = f"datapath should be str but pass {type(data_path)}."
logging.error(msg)
raise TypeError("Only str allowed")
self._datatype = datatype
self._data_path = data_path
self.config = config
self._training = training
self._shuffle_buffer = None
self._batch_size = self.config.get("batch_size", 32)
self._image_size = self.config.get("image_size", [512, 512])
self._drop_remainder = self.config.get("drop_remainder", True)
self.augmenter = augment.Augment(self.config, datatype)
self.numpy_function = self.config.get("numpy_function", None)
if self.numpy_function:
assert callable(self.numpy_function), "numpy_function should be a callable."
assert len(
inspect.getfullargspec(self.numpy_function).args
), "py_function should be having two arguments."
def categorical_encoding(self, labels):
"""categorical_encoding.
Encodes the labels with given encoding in config file.
Args:
labels: Labels to encode
"""
encoding = (
self.config.categorical_encoding
if self.config.categorical_encoding
else "onehot"
)
if encoding == "onehot":
from sklearn.preprocessing import (
OneHotEncoder,
) # pylint: disable=g-import-not-at-top
encoding = OneHotEncoder(drop="if_binary", sparse=False)
else:
from sklearn.preprocessing import (
LabelEncoder,
) # pylint: disable=g-import-not-at-top
encoding = LabelEncoder()
labels = encoding.fit_transform(labels)
return labels
@property
def get_id_to_imagefile(self):
return self._get_id_to_imagefile
@property
def classes(self):
return self._classes
@property
def data_path(self):
return self._data_path
@property
def datatype(self):
return self._datatype
def resize(self, image):
return tf.image.resize(
image,
self._image_size,
method=tf.image.ResizeMethod.BILINEAR,
preserve_aspect_ratio=False,
antialias=False,
name=None,
)
@get_id_to_imagefile.setter
def get_id_to_imagefile(self, value):
if not isinstance(value, dict):
msg = "Only dict assign is allowed"
logging.error(msg)
raise TypeError(msg)
self._get_id_to_imagefile = value
def _generate_ids(self, image_files):
"""_generate_ids.
Generate igs for the imagefiles, which will be further used
to parse image file to read.
Args:
image_files: images files list containing filename of images.
"""
# TODO: (HIGH) make get_id_to_imagefile as dataclass.
self._get_id_to_imagefile = {}
_ = [
self._get_id_to_imagefile.update({id: image_file})
for id, image_file in enumerate(image_files)
]
return list(self._get_id_to_imagefile.keys())
def _get_file_labels(self, subset):
"""_get_file_labels.
returns files, labels which will be further used for reading images.
"""
_images = []
_labels = []
for label_folder in os.listdir(self.data_path + "/" + subset):
for images in os.listdir(
self.data_path + "/" + subset + "/" + label_folder
):
_images.append(
self.data_path + "/" + subset + "/" + label_folder + "/" + images
)
_labels.append(label_folder)
self._classes = set(_labels)
_labels = np.reshape(np.asarray(_labels), (-1, 1))
_labels = self.categorical_encoding(_labels)
_labels = np.reshape(np.asarray(_labels), (-1, 1))
self._size = len(_images)
assert len(_images) == len(_labels), "Length of Images and Labels didnt match"
return _images, _labels
@typeguard.typechecked
def | |
# Audio processing tools
#
# <NAME> 2020
#
# Some code modified from original MATLAB rastamat package.
#
import numpy as np
from scipy.signal import hanning, spectrogram, resample, hilbert, butter, filtfilt
from scipy.io import wavfile
# import spectools
# from .fbtools import fft2melmx
from matplotlib import pyplot as plt
import parselmouth as pm
# from soundsig import sound
def get_meanF0s_v2(fileName, steps=1/128.0, f0min=50, f0max=300):
"""
Uses parselmouth Sound and Pitch object to generate frequency spectrum of
wavfile, 'fileName'. Mean F0 frequencies are calculated for each phoneme
in 'phoneme_times' by averaging non-zero frequencies within a given
phoneme's time segment. A range of 10 log spaced center frequencies is
calculated for pitch classes. A pitch belongs to the class of the closest
center frequency bin that falls below one standard deviation of the center
frequency range.
"""
#fileName = wav_dirs + wav_name
sound_obj = pm.Sound(fileName)
pitch = sound_obj.to_pitch(steps, f0min, f0max) #create a praat pitch object
pitch_values = pitch.selected_array['frequency']
return pitch_values
def fft2melmx(nfft, sr=8000, nfilts=0, bwidth=1.0, minfreq=0, maxfreq=4000, htkmel=False, constamp=0):
'''
# Generate a matrix of weights to combine FFT bins into Mel
# bins. nfft defines the source FFT size at sampling rate sr.
# Optional nfilts specifies the number of output bands required
# (else one per "mel/width"), and width is the constant width of each
# band relative to standard Mel (default 1).
# While wts has nfft columns, the second half are all zero.
# Hence, Mel spectrum is fft2melmx(nfft,sr)*abs(fft(xincols,nfft));
# minfreq is the frequency (in Hz) of the lowest band edge;
# default is 0, but 133.33 is a common standard (to skip LF).
# maxfreq is frequency in Hz of upper edge; default sr/2.
# You can exactly duplicate the mel matrix in Slaney's mfcc.m
# as fft2melmx(512, 8000, 40, 1, 133.33, 6855.5, 0);
# htkmel=1 means use HTK's version of the mel curve, not Slaney's.
# constamp=1 means make integration windows peak at 1, not sum to 1.
# frqs returns bin center frqs.
# 2004-09-05 <EMAIL> based on fft2barkmx
'''
if nfilts == 0:
nfilts = np.ceil(hz2mel(maxfreq, htkmel)/2);
wts = np.zeros((nfilts, nfft))
# Center freqs of each FFT bin
fftfrqs = np.arange(0,nfft/2.)/nfft*sr
# 'Center freqs' of mel bands - uniformly spaced between limits
minmel = hz2mel(minfreq, htkmel)
maxmel = hz2mel(maxfreq, htkmel)
binfrqs = mel2hz(minmel+np.arange(0.,nfilts+2)/(nfilts+2.)*(maxmel-minmel), htkmel);
binbin = np.round(binfrqs/sr*(nfft-1.))
for i in np.arange(nfilts):
fs = binfrqs[i+[0, 1, 2]]
#print fs
# scale by width
fs = fs[1]+bwidth*(fs - fs[1]);
# lower and upper slopes for all bins
loslope = (fftfrqs - fs[0])/(fs[1] - fs[0])
hislope = (fs[2] - fftfrqs)/(fs[2] - fs[1])
w = np.min((loslope, hislope), axis=0)
w[w<0] = 0
# .. then intersect them with each other and zero
wts[i, 0:np.int(nfft/2)] = w
if constamp == 0:
# Slaney-style mel is scaled to be approx constant E per channel
wts = np.dot(np.diag(2./(binfrqs[2+np.arange(nfilts)]-binfrqs[np.arange(nfilts)])),wts)
#wts = np.atleast_2d(2/(binfrqs[2+np.arange(nfilts)]-binfrqs[np.arange(nfilts)]))*wts
#wts = np.dot(2/(binfrqs[2+np.arange(nfilts)]-binfrqs[np.arange(nfilts)]),wts)
# Make sure 2nd half of FFT is zero
wts[:,np.int(nfft/2+2):np.int(nfft)] = 0
# seems like a good idea to avoid aliasing
return wts, binfrqs
def hz2mel(f,htk=False):
'''
# z = hz2mel(f,htk)
# Convert frequencies f (in Hz) to mel 'scale'.
# Optional htk = 1 uses the mel axis defined in the HTKBook
# otherwise use Slaney's formula
# 2005-04-19 <EMAIL>
'''
if htk:
z = 2595 * log10(1+f/700)
else:
# Mel fn to match Slaney's Auditory Toolbox mfcc.m
f_0 = 0.; # 133.33333;
f_sp = 200./3; # 66.66667;
brkfrq = 1000.;
brkpt = (brkfrq - f_0)/f_sp; # starting mel value for log region
logstep = np.exp(np.log(6.4)/27.); # the magic 1.0711703 which is the ratio needed to get from 1000 Hz to 6400 Hz in 27 steps, and is *almost* the ratio between 1000 Hz and the preceding linear filter center at 933.33333 Hz (actually 1000/933.33333 = 1.07142857142857 and exp(log(6.4)/27) = 1.07117028749447)
linpts = [f < brkfrq];
z = 0*f;
# fill in parts separately
if len(linpts) == 1:
if linpts[0]:
z = f - f_0/f_sp
else:
z = brkpt+(np.log(f/brkfrq))/np.log(logstep)
else:
z[linpts==True] = (f[linpts==True] - f_0)/f_sp
z[linpts==False] = brkpt+(np.log(f[linpts==False]/brkfrq))/np.log(logstep)
return z
def mel2hz(z, htk=False):
# f = mel2hz(z, htk)
# Convert 'mel scale' frequencies into Hz
# Optional htk = 1 means use the HTK formula
# else use the formula from Slaney's mfcc.m
# 2005-04-19 <EMAIL>
if htk:
f = 700.*(10**(z/2595.)-1);
else:
f_0 = 0.; # 133.33333;
f_sp = 200./3.; # 66.66667;
brkfrq = 1000.;
brkpt = (brkfrq - f_0)/f_sp; # starting mel value for log region
logstep = np.exp(np.log(6.4)/27.); # the magic 1.0711703 which is the ratio needed to get from 1000 Hz to 6400 Hz in 27 steps, and is *almost* the ratio between 1000 Hz and the preceding linear filter center at 933.33333 Hz (actually 1000/933.33333 = 1.07142857142857 and exp(log(6.4)/27) = 1.07117028749447)
linpts = [z < brkpt]
nonlinpts = [z >= brkpt]
f = 0*z;
# fill in parts separately
f[linpts] = f_0 + f_sp*z[linpts];
f[nonlinpts] = brkfrq*np.exp(np.log(logstep)*(z[nonlinpts]-brkpt));
return f
def get_envelope(audio, audio_fs, new_fs, cof=25, bef_aft=[0, 0], pad_next_pow2=False):
''' Get the envelope of a sound file
Inputs:
w [float] : audio signal vector
fs [int] : sampling rate of audio signal
new_fs [int] : desired sampling rate of the envelope (same as your EEG, for example)
Outputs:
envelope [array-like] : returns the envelope of the sound as an array
'''
if pad_next_pow2:
print("Padding the signal to the nearest power of two...this should speed things up")
orig_len = len(audio)
sound_pad = np.hstack((audio, np.zeros((2**np.int(np.ceil(np.log2(len(audio))))-len(audio),))))
audio = sound_pad
print("calculating hilbert transform")
env_hilb = np.abs(hilbert(audio))
nyq = audio_fs/2. #Nyquist frequency
b, a = butter(3, cof/nyq, 'low'); #this designs a 3-pole low-pass filter
print("Low-pass filtering hilbert transform to get audio envelope")
envelope_long = np.atleast_2d(filtfilt(b, a, env_hilb, axis=0)) #filtfilt makes it non-causal (fwd/backward)
envelope = resample(envelope_long.T, np.int(np.floor(envelope_long.shape[1]/(audio_fs/new_fs))))
if pad_next_pow2:
print("Removing padding")
final_len = np.int((orig_len/audio_fs)*new_fs)
envelope = envelope[:final_len,:]
print(envelope.shape)
if bef_aft[0] < 0:
print("Adding %.2f seconds of silence before"%bef_aft[0])
envelope = np.vstack(( np.zeros((np.int(np.abs(bef_aft[0])*new_fs), 1)), envelope ))
if bef_aft[1] > 0:
print("Adding %.2f seconds of silence after"%bef_aft[1])
envelope = np.vstack(( envelope, np.zeros((np.int(bef_aft[1]*new_fs), 1)) ))
return envelope
def get_cse_onset(audio, audio_fs, wins = [0.04], nfilts=80, pos_deriv=True, spec_noise_thresh=1.04):
"""
Get the onset based on cochlear scaled entropy
Inputs:
audio [np.array] : your audio
audio_fs [float] : audio sampling rate
wins [list] : list of windows to use in the boxcar convolution
pos_deriv [bool] : whether to detect onsets only (True) or onsets and offsets (False)
Outputs:
cse [np.array] : rectified cochlear scaled entropy over window [wins]
auddiff [np.array] : instantaneous derivative of spectrogram
"""
new_fs = 100 # Sampling frequency of spectrogram
specgram = get_mel_spectrogram(audio, audio_fs, nfilts=nfilts)
specgram[specgram<spec_noise_thresh] = 0
nfilts, ntimes = specgram.shape
if pos_deriv is False:
auddiff= np.sum(np.diff(np.hstack((np.atleast_2d(specgram[:,0]).T, specgram)))**2, axis=0)
else:
all_diff = np.diff(np.hstack((np.atleast_2d(specgram[:,0]).T, specgram)))
all_diff[all_diff<0] = 0
auddiff = np.sum(all_diff**2, axis=0)
cse = np.zeros((len(wins), ntimes))
# Get the windows over which we are summing as bins, not times
win_segments = [np.int(w*new_fs) for w in wins]
for wi, w in enumerate(win_segments):
box = np.hstack((np.atleast_2d(boxcar(w)), -np.ones((1, np.int(0.15*new_fs))))).ravel()
cse[wi,:] = convolve(auddiff, box, 'full')[:ntimes]
cse[cse<0] = 0
cse = cse/cse.max()
return cse, auddiff
def get_peak_rate(envelope):
env_diff = np.diff(np.concatenate((0, envelope), axis=None))
env_diff[env_diff<0] = 0
return env_diff
def get_mel_spectrogram(w, fs, wintime=0.025, steptime=0.010, nfilts=80, minfreq=0, maxfreq=None):
''' Make mel-band spectrogram
Inputs:
w [float] : audio signal vector
fs [int] : sampling rate of audio signal
wintime [float] : window size
steptime [float] : step size (time resolution)
nfilts [int] : number of mel-band filters
minfreq [int] : Minimum frequency to analyze (in Hz)
maxfreq [int] : Maximum frequency to analyze (in Hz). If none, defaults to fs/2
Outputs:
mel_spectrogram [array]: mel-band spectrogram
freqs [array] : array of floats, bin edges of spectrogram
'''
if maxfreq is None:
maxfreq = np.int(fs/2)
pspec, e = powspec(w, sr=fs, wintime=wintime, steptime=steptime, dither=1)
aspectrum, wts, freqs = audspec(pspec, sr=fs, nfilts=nfilts, fbtype='mel', minfreq=minfreq, maxfreq=maxfreq, sumpower=True, bwidth=1.0)
mel_spectrogram = aspectrum**0.001
return mel_spectrogram, freqs
def powspec(x, sr=8000, wintime=0.025, steptime=0.010, dither=1):
'''
# compute the powerspectrum and frame energy of the input signal.
# basically outputs a power spectrogram
#
# each column | |
<reponame>sun1638650145/text
# coding=utf-8
# Copyright 2022 TF.Text Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# encoding=utf-8
"""Tests for ngram ops."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import tensorflow as tf
import tensorflow_text as tf_text
from tensorflow.lite.python import interpreter
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import errors
from tensorflow.python.framework import test_util
from tensorflow.python.ops.ragged import ragged_factory_ops
from tensorflow.python.platform import test
from tensorflow_text.core.pybinds import tflite_registrar
from tensorflow_text.python.ops import ngrams_op
@test_util.run_all_in_graph_and_eager_modes
class NgramsOpTest(test_util.TensorFlowTestCase):
def testSumReduction(self):
test_data = constant_op.constant([[1.0, 2.0, 3.0], [10.0, 20.0, 30.0]])
op = ngrams_op.ngrams(
test_data, width=2, axis=1, reduction_type=ngrams_op.Reduction.SUM)
expected_values = [[3.0, 5.0], [30.0, 50.0]]
self.assertAllEqual(expected_values, op)
def testRaggedSumReduction(self):
test_data = ragged_factory_ops.constant([[1.0, 2.0, 3.0, 4.0],
[10.0, 20.0, 30.0]])
op = ngrams_op.ngrams(
test_data, width=2, axis=1, reduction_type=ngrams_op.Reduction.SUM)
expected_values = [[3.0, 5.0, 7.0], [30.0, 50.0]]
self.assertAllEqual(expected_values, op)
def testRaggedSumReductionAxisZero(self):
test_data = ragged_factory_ops.constant([[1.0, 2.0, 3.0, 4.0],
[10.0, 20.0, 30.0, 40.0]])
op = ngrams_op.ngrams(
test_data, width=2, axis=0, reduction_type=ngrams_op.Reduction.SUM)
expected_values = [[11.0, 22.0, 33.0, 44.0]]
self.assertAllEqual(expected_values, op)
def testMeanReduction(self):
test_data = constant_op.constant([[1.0, 2.0, 3.0], [10.0, 20.0, 30.0]])
op = ngrams_op.ngrams(
test_data, width=2, axis=1, reduction_type=ngrams_op.Reduction.MEAN)
expected_values = [[1.5, 2.5], [15.0, 25.0]]
self.assertAllEqual(expected_values, op)
def testRaggedMeanReduction(self):
test_data = ragged_factory_ops.constant([[1.0, 2.0, 3.0, 4.0],
[10.0, 20.0, 30.0]])
op = ngrams_op.ngrams(
test_data, width=2, axis=-1, reduction_type=ngrams_op.Reduction.MEAN)
expected_values = [[1.5, 2.5, 3.5], [15.0, 25.0]]
self.assertAllEqual(expected_values, op)
def testStringJoinReduction(self):
test_data = constant_op.constant([[b"a", b"b", b"c"],
[b"dd", b"ee", b"ff"]])
op = ngrams_op.ngrams(
test_data,
width=2,
axis=-1,
reduction_type=ngrams_op.Reduction.STRING_JOIN,
string_separator=b"|")
expected_values = [[b"a|b", b"b|c"], [b"dd|ee", b"ee|ff"]]
self.assertAllEqual(expected_values, op)
def testStringJoinReductionAxisZero(self):
test_data = constant_op.constant([b"a", b"b", b"c"])
op = ngrams_op.ngrams(
test_data,
width=2,
axis=-1, # The -1 axis is the zero axis here.
reduction_type=ngrams_op.Reduction.STRING_JOIN,
string_separator=b"|")
expected_values = [b"a|b", b"b|c"]
self.assertAllEqual(expected_values, op)
def testRaggedStringJoinReduction(self):
test_data = ragged_factory_ops.constant([[b"a", b"b", b"c"],
[b"dd", b"ee"]])
op = ngrams_op.ngrams(
test_data,
width=2,
axis=-1,
reduction_type=ngrams_op.Reduction.STRING_JOIN,
string_separator=b"|")
expected_values = [[b"a|b", b"b|c"], [b"dd|ee"]]
self.assertAllEqual(expected_values, op)
def testReductionWithNegativeAxis(self):
test_data = constant_op.constant([[1.0, 2.0, 3.0], [10.0, 20.0, 30.0]])
op = ngrams_op.ngrams(
test_data, width=2, axis=-1, reduction_type=ngrams_op.Reduction.SUM)
expected_values = [[3.0, 5.0], [30.0, 50.0]]
self.assertAllEqual(expected_values, op)
def testReductionOnInnerAxis(self):
test_data = constant_op.constant([[[1.0, 2.0, 3.0], [10.0, 20.0, 30.0]],
[[4.0, 5.0, 6.0], [40.0, 50.0, 60.0]]])
op = ngrams_op.ngrams(
test_data, width=2, axis=-2, reduction_type=ngrams_op.Reduction.SUM)
expected_values = [[[11.0, 22.0, 33.0]], [[44.0, 55.0, 66.0]]]
self.assertAllEqual(expected_values, op)
def testRaggedReductionOnInnerAxis(self):
test_data = ragged_factory_ops.constant([[[1.0, 2.0, 3.0, 4.0],
[10.0, 20.0, 30.0, 40.0]],
[[100.0, 200.0], [300.0, 400.0]]])
op = ngrams_op.ngrams(
test_data, width=2, axis=-2, reduction_type=ngrams_op.Reduction.SUM)
expected_values = [[[11.0, 22.0, 33.0, 44.0]], [[400.0, 600.0]]]
self.assertAllEqual(expected_values, op)
def testReductionOnAxisWithInsufficientValuesReturnsEmptySet(self):
test_data = constant_op.constant([[1.0, 2.0, 3.0], [10.0, 20.0, 30.0]])
op = ngrams_op.ngrams(
test_data, width=4, axis=-1, reduction_type=ngrams_op.Reduction.SUM)
expected_values = [[], []]
self.assertAllEqual(expected_values, op)
def testRaggedReductionOnAxisWithInsufficientValuesReturnsEmptySet(self):
test_data = ragged_factory_ops.constant([[1.0, 2.0, 3.0],
[10.0, 20.0, 30.0, 40.0]])
op = ngrams_op.ngrams(
test_data, width=4, axis=1, reduction_type=ngrams_op.Reduction.SUM)
expected_values = [[], [100.0]]
self.assertAllEqual(expected_values, op)
def testStringJoinReductionFailsWithImproperAxis(self):
with self.assertRaisesRegex(
errors.InvalidArgumentError,
r".*requires that ngrams' 'axis' parameter be -1."):
_ = ngrams_op.ngrams(
data=[],
width=2,
axis=0,
reduction_type=ngrams_op.Reduction.STRING_JOIN)
def testUnspecifiedReductionTypeFails(self):
with self.assertRaisesRegex(errors.InvalidArgumentError,
r"reduction_type must be specified."):
_ = ngrams_op.ngrams(data=[], width=2, axis=0)
def testBadReductionTypeFails(self):
with self.assertRaisesRegex(errors.InvalidArgumentError,
r"reduction_type must be a Reduction."):
_ = ngrams_op.ngrams(data=[], width=2, axis=0, reduction_type="SUM")
class NgramsV2OpTest(test_util.TensorFlowTestCase):
@test_util.with_forward_compatibility_horizons([2022, 11, 30])
def testSumReduction(self):
test_data = constant_op.constant([[1.0, 2.0, 3.0], [10.0, 20.0, 30.0]])
op = ngrams_op.ngrams(
test_data, width=2, axis=1, reduction_type=ngrams_op.Reduction.SUM)
expected_values = [[3.0, 5.0], [30.0, 50.0]]
self.assertAllEqual(expected_values, op)
@test_util.with_forward_compatibility_horizons([2022, 11, 30])
def testRaggedSumReduction(self):
test_data = ragged_factory_ops.constant([[1.0, 2.0, 3.0, 4.0],
[10.0, 20.0, 30.0]])
op = ngrams_op.ngrams(
test_data, width=2, axis=1, reduction_type=ngrams_op.Reduction.SUM)
expected_values = [[3.0, 5.0, 7.0], [30.0, 50.0]]
self.assertAllEqual(expected_values, op)
@test_util.with_forward_compatibility_horizons([2022, 11, 30])
def testRaggedSumReductionAxisZero(self):
test_data = ragged_factory_ops.constant([[1.0, 2.0, 3.0, 4.0],
[10.0, 20.0, 30.0, 40.0]])
op = ngrams_op.ngrams(
test_data, width=2, axis=0, reduction_type=ngrams_op.Reduction.SUM)
expected_values = [[11.0, 22.0, 33.0, 44.0]]
self.assertAllEqual(expected_values, op)
@test_util.with_forward_compatibility_horizons([2022, 11, 30])
def testMeanReduction(self):
test_data = constant_op.constant([[1.0, 2.0, 3.0], [10.0, 20.0, 30.0]])
op = ngrams_op.ngrams(
test_data, width=2, axis=1, reduction_type=ngrams_op.Reduction.MEAN)
expected_values = [[1.5, 2.5], [15.0, 25.0]]
self.assertAllEqual(expected_values, op)
@test_util.with_forward_compatibility_horizons([2022, 11, 30])
def testRaggedMeanReduction(self):
test_data = ragged_factory_ops.constant([[1.0, 2.0, 3.0, 4.0],
[10.0, 20.0, 30.0]])
op = ngrams_op.ngrams(
test_data, width=2, axis=-1, reduction_type=ngrams_op.Reduction.MEAN)
expected_values = [[1.5, 2.5, 3.5], [15.0, 25.0]]
self.assertAllEqual(expected_values, op)
@test_util.with_forward_compatibility_horizons([2022, 11, 30])
def testStringJoinReduction(self):
test_data = constant_op.constant([["a", "b", "c"], ["dd", "ee", "ff"]])
op = ngrams_op.ngrams(
test_data,
width=2,
axis=-1,
reduction_type=ngrams_op.Reduction.STRING_JOIN,
string_separator="|")
expected_values = [[b"a|b", b"b|c"], [b"dd|ee", b"ee|ff"]]
self.assertAllEqual(expected_values, op)
@test_util.with_forward_compatibility_horizons([2022, 11, 30])
def testStringJoinReductionRank3(self):
test_data = constant_op.constant([[["a", "b", "c"], ["z", "y", "x"]],
[["dd", "ee", "ff"], ["zz", "yy", "xx"]]])
op = ngrams_op.ngrams(
test_data,
width=2,
axis=-1,
reduction_type=ngrams_op.Reduction.STRING_JOIN,
string_separator="|")
expected_values = [[[b"a|b", b"b|c"], [b"z|y", b"y|x"]],
[[b"dd|ee", b"ee|ff"], [b"zz|yy", b"yy|xx"]]]
self.assertAllEqual(expected_values, op)
@test_util.with_forward_compatibility_horizons([2022, 11, 30])
def testStringJoinReductionAxisZero(self):
test_data = constant_op.constant(["a", "b", "c"])
op = ngrams_op.ngrams(
test_data,
width=2,
axis=-1, # The -1 axis is the zero axis here.
reduction_type=ngrams_op.Reduction.STRING_JOIN,
string_separator="|")
expected_values = [b"a|b", b"b|c"]
self.assertAllEqual(expected_values, op)
@test_util.with_forward_compatibility_horizons([2022, 11, 30])
def testRaggedStringJoinReduction(self):
test_data = ragged_factory_ops.constant([["a", "b", "c"], ["dd", "ee"]])
op = ngrams_op.ngrams(
test_data,
width=2,
axis=-1,
reduction_type=ngrams_op.Reduction.STRING_JOIN,
string_separator="|")
expected_values = [[b"a|b", b"b|c"], [b"dd|ee"]]
self.assertAllEqual(expected_values, op)
@test_util.with_forward_compatibility_horizons([2022, 11, 30])
def testRaggedDeepStringJoinReduction(self):
test_data = ragged_factory_ops.constant([[[["a", "b", "c"]],
[["dd", "ee"]]],
[[["f", "g"], ["h", "i", "j"]],
[["k", "l"]]]])
op = ngrams_op.ngrams(
test_data,
width=2,
axis=-1,
reduction_type=ngrams_op.Reduction.STRING_JOIN,
string_separator="|")
expected_values = [[[[b"a|b", b"b|c"]], [[b"dd|ee"]]],
[[[b"f|g"], [b"h|i", b"i|j"]], [[b"k|l"]]]]
self.assertAllEqual(expected_values, op)
@test_util.with_forward_compatibility_horizons([2022, 11, 30])
def testDoubleRaggedStringJoinReduction(self):
test_data = tf.constant([["a b c"], ["d e"]])
t = tf_text.WhitespaceTokenizer()
test_data = t.tokenize(test_data)
op = ngrams_op.ngrams(
test_data,
width=2,
axis=-1,
reduction_type=ngrams_op.Reduction.STRING_JOIN,
string_separator="|")
expected_values = [[[b"a|b", b"b|c"]], [[b"d|e"]]]
self.assertAllEqual(expected_values, op)
@test_util.with_forward_compatibility_horizons([2022, 11, 30])
def testReductionWithNegativeAxis(self):
test_data = constant_op.constant([[1.0, 2.0, 3.0], [10.0, 20.0, 30.0]])
op = ngrams_op.ngrams(
test_data, width=2, axis=-1, reduction_type=ngrams_op.Reduction.SUM)
expected_values = [[3.0, 5.0], [30.0, 50.0]]
self.assertAllEqual(expected_values, op)
@test_util.with_forward_compatibility_horizons([2022, 11, 30])
def testReductionOnInnerAxis(self):
test_data = constant_op.constant([[[1.0, 2.0, 3.0], [10.0, 20.0, 30.0]],
[[4.0, 5.0, 6.0], [40.0, 50.0, 60.0]]])
op = ngrams_op.ngrams(
test_data, width=2, axis=-2, reduction_type=ngrams_op.Reduction.SUM)
expected_values = [[[11.0, 22.0, 33.0]], [[44.0, 55.0, 66.0]]]
self.assertAllEqual(expected_values, op)
@test_util.with_forward_compatibility_horizons([2022, 11, 30])
def testRaggedReductionOnInnerAxis(self):
test_data = ragged_factory_ops.constant([[[1.0, 2.0, 3.0, 4.0],
[10.0, 20.0, 30.0, 40.0]],
[[100.0, 200.0], [300.0, 400.0]]])
op = ngrams_op.ngrams(
test_data, width=2, axis=-2, reduction_type=ngrams_op.Reduction.SUM)
expected_values = [[[11.0, 22.0, 33.0, 44.0]], [[400.0, 600.0]]]
self.assertAllEqual(expected_values, op)
@test_util.with_forward_compatibility_horizons([2022, 11, 30])
def testReductionOnAxisWithInsufficientValuesReturnsEmptySet(self):
test_data = constant_op.constant([[1.0, 2.0, 3.0], [10.0, 20.0, 30.0]])
op = ngrams_op.ngrams(
test_data, width=4, axis=-1, reduction_type=ngrams_op.Reduction.SUM)
expected_values = [[], []]
self.assertAllEqual(expected_values, op)
@test_util.with_forward_compatibility_horizons([2022, 11, 30])
def testRaggedReductionOnAxisWithInsufficientValuesReturnsEmptySet(self):
test_data = ragged_factory_ops.constant([[1.0, 2.0, 3.0],
[10.0, 20.0, 30.0, 40.0]])
op = ngrams_op.ngrams(
test_data, width=4, axis=1, reduction_type=ngrams_op.Reduction.SUM)
expected_values = [[], [100.0]]
self.assertAllEqual(expected_values, op)
@test_util.with_forward_compatibility_horizons([2022, 11, 30])
def testStringJoinReductionFailsWithImproperAxis(self):
with self.assertRaisesRegex(
errors.InvalidArgumentError,
r".*requires that ngrams' 'axis' parameter be -1."):
_ = ngrams_op.ngrams(
data=[],
width=2,
axis=0,
reduction_type=ngrams_op.Reduction.STRING_JOIN)
@test_util.with_forward_compatibility_horizons([2022, 11, 30])
def testUnspecifiedReductionTypeFails(self):
with self.assertRaisesRegex(errors.InvalidArgumentError,
r"reduction_type must be specified."):
_ = ngrams_op.ngrams(data=[], width=2, axis=0)
@test_util.with_forward_compatibility_horizons([2022, 11, 30])
def testBadReductionTypeFails(self):
with self.assertRaisesRegex(errors.InvalidArgumentError,
r"reduction_type must be a Reduction."):
_ = ngrams_op.ngrams(data=[], width=2, axis=0, reduction_type="SUM")
@test_util.with_forward_compatibility_horizons([2022, 11, 30])
def testTfLite(self):
"""Checks TFLite conversion and inference."""
class NgramModel(tf.keras.Model):
def call(self, input_tensor, **kwargs):
return ngrams_op.ngrams(input_tensor, width=2, axis=-1,
reduction_type=ngrams_op.Reduction.STRING_JOIN,
string_separator="|")
# Test input data.
input_data = np.array(["a", "b", "c"])
# Define a Keras model.
model = NgramModel()
# Do TF.Text inference.
tf_result = model(tf.constant(input_data))
# Convert to TFLite.
converter = tf.lite.TFLiteConverter.from_keras_model(model)
converter.target_spec.supported_ops = [tf.lite.OpsSet.TFLITE_BUILTINS]
converter.allow_custom_ops = True
tflite_model = converter.convert()
# Do TFLite inference.
op = tflite_registrar.AddNgramsStringJoin
interp = interpreter.InterpreterWithCustomOps(
model_content=tflite_model,
custom_op_registerers=[op])
input_details = interp.get_input_details()
interp.resize_tensor_input(input_details[0]["index"], tf.shape(input_data))
interp.allocate_tensors()
interp.set_tensor(input_details[0]["index"], input_data)
interp.invoke()
output_details = interp.get_output_details()
tflite_result = interp.get_tensor(output_details[0]["index"])
# Assert the results are identical.
self.assertAllEqual(tflite_result, tf_result)
@test_util.with_forward_compatibility_horizons([2022, 11, 30])
def testTfLiteRagged(self):
"""Checks TFLite conversion and inference."""
class NgramModel(tf.keras.Model):
def __init__(self, **kwargs):
super().__init__(**kwargs)
self.tokenizer = tf_text.WhitespaceTokenizer()
@tf.function(input_signature=[
tf.TensorSpec(shape=[1], dtype=tf.string, name="input")
])
def call(self, input_tensor):
input_tensor = self.tokenizer.tokenize(input_tensor)
x = ngrams_op.ngrams(input_tensor, width=2, axis=-1,
reduction_type=ngrams_op.Reduction.STRING_JOIN,
string_separator="|")
return {"result": x.flat_values}
# Test input data.
input_data = np.array(["foo bar"])
# Define a Keras model.
model = NgramModel()
# Do TF.Text inference.
tf_result = model(tf.constant(input_data))["result"]
print(tf_result.shape)
# Convert to TFLite.
converter = tf.lite.TFLiteConverter.from_keras_model(model)
converter.target_spec.supported_ops = [tf.lite.OpsSet.TFLITE_BUILTINS]
converter.allow_custom_ops = True
tflite_model = converter.convert()
# Do TFLite inference.
interp = interpreter.InterpreterWithCustomOps(
model_content=tflite_model,
custom_op_registerers=tf_text.tflite_registrar.SELECT_TFTEXT_OPS)
print(interp.get_signature_list())
tn = interp.get_signature_runner("serving_default")
output = tn(input=input_data)
if tf.executing_eagerly():
tflite_result = output["result"]
| |
origins.
"""
return pulumi.get(self, "web_origins")
@web_origins.setter
def web_origins(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]):
pulumi.set(self, "web_origins", value)
class Client(pulumi.CustomResource):
@overload
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
access_token_lifespan: Optional[pulumi.Input[str]] = None,
access_type: Optional[pulumi.Input[str]] = None,
admin_url: Optional[pulumi.Input[str]] = None,
authentication_flow_binding_overrides: Optional[pulumi.Input[pulumi.InputType['ClientAuthenticationFlowBindingOverridesArgs']]] = None,
authorization: Optional[pulumi.Input[pulumi.InputType['ClientAuthorizationArgs']]] = None,
backchannel_logout_revoke_offline_sessions: Optional[pulumi.Input[bool]] = None,
backchannel_logout_session_required: Optional[pulumi.Input[bool]] = None,
backchannel_logout_url: Optional[pulumi.Input[str]] = None,
base_url: Optional[pulumi.Input[str]] = None,
client_id: Optional[pulumi.Input[str]] = None,
client_offline_session_idle_timeout: Optional[pulumi.Input[str]] = None,
client_offline_session_max_lifespan: Optional[pulumi.Input[str]] = None,
client_secret: Optional[pulumi.Input[str]] = None,
client_session_idle_timeout: Optional[pulumi.Input[str]] = None,
client_session_max_lifespan: Optional[pulumi.Input[str]] = None,
consent_required: Optional[pulumi.Input[bool]] = None,
description: Optional[pulumi.Input[str]] = None,
direct_access_grants_enabled: Optional[pulumi.Input[bool]] = None,
enabled: Optional[pulumi.Input[bool]] = None,
exclude_session_state_from_auth_response: Optional[pulumi.Input[bool]] = None,
extra_config: Optional[pulumi.Input[Mapping[str, Any]]] = None,
full_scope_allowed: Optional[pulumi.Input[bool]] = None,
implicit_flow_enabled: Optional[pulumi.Input[bool]] = None,
login_theme: Optional[pulumi.Input[str]] = None,
name: Optional[pulumi.Input[str]] = None,
pkce_code_challenge_method: Optional[pulumi.Input[str]] = None,
realm_id: Optional[pulumi.Input[str]] = None,
root_url: Optional[pulumi.Input[str]] = None,
service_accounts_enabled: Optional[pulumi.Input[bool]] = None,
standard_flow_enabled: Optional[pulumi.Input[bool]] = None,
use_refresh_tokens: Optional[pulumi.Input[bool]] = None,
valid_redirect_uris: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
web_origins: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
__props__=None):
"""
Allows for creating and managing Keycloak clients that use the OpenID Connect protocol.
Clients are entities that can use Keycloak for user authentication. Typically,
clients are applications that redirect users to Keycloak for authentication
in order to take advantage of Keycloak's user sessions for SSO.
## Example Usage
```python
import pulumi
import pulumi_keycloak as keycloak
realm = keycloak.Realm("realm",
realm="my-realm",
enabled=True)
openid_client = keycloak.openid.Client("openidClient",
realm_id=realm.id,
client_id="test-client",
enabled=True,
access_type="CONFIDENTIAL",
valid_redirect_uris=["http://localhost:8080/openid-callback"],
login_theme="keycloak",
extra_config={
"key1": "value1",
"key2": "value2",
})
```
## Import
Clients can be imported using the format `{{realm_id}}/{{client_keycloak_id}}`, where `client_keycloak_id` is the unique ID that Keycloak assigns to the client upon creation. This value can be found in the URI when editing this client in the GUI, and is typically a GUID. Examplebash
```sh
$ pulumi import keycloak:openid/client:Client openid_client my-realm/dcbc4c73-e478-4928-ae2e-d5e420223352
```
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] access_token_lifespan: The amount of time in seconds before an access token expires. This will override the default for the realm.
:param pulumi.Input[str] access_type: Specifies the type of client, which can be one of the following:
:param pulumi.Input[str] admin_url: URL to the admin interface of the client.
:param pulumi.Input[pulumi.InputType['ClientAuthenticationFlowBindingOverridesArgs']] authentication_flow_binding_overrides: Override realm authentication flow bindings
:param pulumi.Input[pulumi.InputType['ClientAuthorizationArgs']] authorization: When this block is present, fine-grained authorization will be enabled for this client. The client's `access_type` must be `CONFIDENTIAL`, and `service_accounts_enabled` must be `true`. This block has the following arguments:
:param pulumi.Input[bool] backchannel_logout_revoke_offline_sessions: Specifying whether a "revoke_offline_access" event is included in the Logout Token when the Backchannel Logout URL is used. Keycloak will revoke offline sessions when receiving a Logout Token with this event.
:param pulumi.Input[bool] backchannel_logout_session_required: When `true`, a sid (session ID) claim will be included in the logout token when the backchannel logout URL is used. Defaults to `true`.
:param pulumi.Input[str] backchannel_logout_url: The URL that will cause the client to log itself out when a logout request is sent to this realm. If omitted, no logout request will be sent to the client is this case.
:param pulumi.Input[str] base_url: Default URL to use when the auth server needs to redirect or link back to the client.
:param pulumi.Input[str] client_id: The Client ID for this client, referenced in the URI during authentication and in issued tokens.
:param pulumi.Input[str] client_offline_session_idle_timeout: Time a client session is allowed to be idle before it expires. Tokens are invalidated when a client session is expired. If not set it uses the standard SSO Session Idle value.
:param pulumi.Input[str] client_offline_session_max_lifespan: Max time before a client session is expired. Tokens are invalidated when a client session is expired. If not set, it uses the standard SSO Session Max value.
:param pulumi.Input[str] client_secret: The secret for clients with an `access_type` of `CONFIDENTIAL` or `BEARER-ONLY`. This value is sensitive and should be treated with the same care as a password. If omitted, this will be generated by Keycloak.
:param pulumi.Input[str] client_session_idle_timeout: Time a client offline session is allowed to be idle before it expires. Offline tokens are invalidated when a client offline session is expired. If not set it uses the Offline Session Idle value.
:param pulumi.Input[str] client_session_max_lifespan: Max time before a client offline session is expired. Offline tokens are invalidated when a client offline session is expired. If not set, it uses the Offline Session Max value.
:param pulumi.Input[bool] consent_required: When `true`, users have to consent to client access.
:param pulumi.Input[str] description: The description of this client in the GUI.
:param pulumi.Input[bool] direct_access_grants_enabled: When `true`, the OAuth2 Resource Owner Password Grant will be enabled for this client. Defaults to `false`.
:param pulumi.Input[bool] enabled: When `false`, this client will not be able to initiate a login or obtain access tokens. Defaults to `true`.
:param pulumi.Input[bool] exclude_session_state_from_auth_response: When `true`, the parameter `session_state` will not be included in OpenID Connect Authentication Response.
:param pulumi.Input[bool] full_scope_allowed: Allow to include all roles mappings in the access token.
:param pulumi.Input[bool] implicit_flow_enabled: When `true`, the OAuth2 Implicit Grant will be enabled for this client. Defaults to `false`.
:param pulumi.Input[str] login_theme: The client login theme. This will override the default theme for the realm.
:param pulumi.Input[str] name: The display name of this client in the GUI.
:param pulumi.Input[str] pkce_code_challenge_method: The challenge method to use for Proof Key for Code Exchange. Can be either `plain` or `S256` or set to empty value ``.
:param pulumi.Input[str] realm_id: The realm this client is attached to.
:param pulumi.Input[str] root_url: When specified, this URL is prepended to any relative URLs found within `valid_redirect_uris`, `web_origins`, and `admin_url`. NOTE: Due to limitations in the Keycloak API, when the `root_url` attribute is used, the `valid_redirect_uris`, `web_origins`, and `admin_url` attributes will be required.
:param pulumi.Input[bool] service_accounts_enabled: When `true`, the OAuth2 Client Credentials grant will be enabled for this client. Defaults to `false`.
:param pulumi.Input[bool] standard_flow_enabled: When `true`, the OAuth2 Authorization Code Grant will be enabled for this client. Defaults to `false`.
:param pulumi.Input[bool] use_refresh_tokens: If this is `true`, a refresh_token will be created and added to the token response. If this is `false` then no refresh_token will be generated. Defaults to `true`.
:param pulumi.Input[Sequence[pulumi.Input[str]]] valid_redirect_uris: A list of valid URIs a browser is permitted to redirect to after a successful login or logout. Simple
wildcards in the form of an asterisk can be used here. This attribute must be set if either `standard_flow_enabled` or `implicit_flow_enabled`
is set to `true`.
:param pulumi.Input[Sequence[pulumi.Input[str]]] web_origins: A list of allowed CORS origins. `+` can be used to permit all valid redirect URIs, and `*` can be used to permit all origins.
"""
...
@overload
def __init__(__self__,
resource_name: str,
args: ClientArgs,
opts: Optional[pulumi.ResourceOptions] = None):
"""
Allows for creating and managing Keycloak clients that use the OpenID Connect protocol.
Clients are entities that can use Keycloak for user authentication. Typically,
clients are applications that redirect users to Keycloak for authentication
in order to take advantage of Keycloak's user sessions for SSO.
## Example Usage
```python
import pulumi
import pulumi_keycloak as keycloak
realm = keycloak.Realm("realm",
realm="my-realm",
enabled=True)
openid_client = keycloak.openid.Client("openidClient",
realm_id=realm.id,
client_id="test-client",
enabled=True,
access_type="CONFIDENTIAL",
valid_redirect_uris=["http://localhost:8080/openid-callback"],
login_theme="keycloak",
extra_config={
"key1": "value1",
"key2": "value2",
})
```
## Import
Clients can be imported using the format `{{realm_id}}/{{client_keycloak_id}}`, where `client_keycloak_id` is the unique ID that Keycloak assigns to the client upon creation. This value can be found in the URI when editing this client in the GUI, and is typically a GUID. Examplebash
```sh
$ pulumi import keycloak:openid/client:Client openid_client my-realm/dcbc4c73-e478-4928-ae2e-d5e420223352
```
:param str resource_name: The name of the resource.
:param ClientArgs args: The arguments to use to populate this resource's properties.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
...
def __init__(__self__, resource_name: str, *args, **kwargs):
resource_args, opts = _utilities.get_resource_args_opts(ClientArgs, pulumi.ResourceOptions, *args, **kwargs)
if resource_args is not None:
__self__._internal_init(resource_name, opts, **resource_args.__dict__)
else:
__self__._internal_init(resource_name, *args, **kwargs)
def _internal_init(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
access_token_lifespan: Optional[pulumi.Input[str]] = None,
access_type: Optional[pulumi.Input[str]] = None,
admin_url: Optional[pulumi.Input[str]] = | |
***" % interaction.action.utterance)
super().add_interaction(interaction)
interaction.action.success = 1
return # noqa R502
elif action_type == "Audio":
if interaction.agent_id == 0: # Commander
self.logger.info("*** Commander - Audio: %s ***" % interaction.action.utterance)
else:
self.logger.info("*** Driver - Audio: %s ***" % interaction.action.utterance)
super().add_interaction(interaction)
interaction.action.success = 1
return # noqa R502
else:
logger.warning("%s: Not supported" % interaction.action.action_type)
interaction.action.success = 0
return # noqa R502
def __update_custom_coffee_prop(self, event, objs_before_event=None):
"""
Check whether coffee has been made and update custom property - this uses get_parent_receptacles() for extra
reliability and checks that a container just got placed in a coffee maker and the coffee maker was on
"""
cur_objects = self.get_objects(event)
coffee_maker_ids = set(
[obj["objectId"] for obj in cur_objects if "CoffeeMachine" in obj["objectType"] and obj["isToggled"]]
)
for obj in cur_objects:
prev_filled_with_liquid = False
if objs_before_event is not None:
prev_state = self.__get_object_by_id(objs_before_event, obj["objectId"])
if prev_state:
prev_filled_with_liquid = prev_state["isFilledWithLiquid"]
parent_receptacles = self.get_parent_receptacles(obj, cur_objects)
placed_in_toggled_coffee_maker = False
if parent_receptacles is not None and len(set(parent_receptacles).intersection(coffee_maker_ids)) > 0:
placed_in_toggled_coffee_maker = True
if (
placed_in_toggled_coffee_maker
and obj["canFillWithLiquid"]
and obj["isFilledWithLiquid"]
and not prev_filled_with_liquid
):
self.__update_custom_object_metadata(obj["objectId"], "simbotIsFilledWithCoffee", True)
def __update_sink_interaction_outcomes(self, event):
"""
Force sink behaviour to be deterministic - if a faucet is turned on, clean all objects in the sink and
fill objects that can be filled with water
"""
cur_objects = self.get_objects(event)
sink_objects = list()
for obj in cur_objects:
# Check if any sink basin is filled with water and clean all dirty objects in.
if (
"SinkBasin" in obj["objectType"]
or "Sink" in obj["objectType"]
or "BathtubBasin" in obj["objectType"]
or "Bathtub" in obj["objectType"]
):
# Fetch the faucet near the sink
faucet_obj = self.__get_object_by_position(self.get_objects(event), obj["position"], obj_type="Faucet")
if faucet_obj["isToggled"]:
sink_objects.append(obj)
sink_obj_ids = set([obj["objectId"] for obj in sink_objects])
objs_in_sink = list()
for obj in cur_objects:
parent_receptacles = self.get_parent_receptacles(obj, cur_objects)
if parent_receptacles is not None:
if len(set(parent_receptacles).intersection(sink_obj_ids)) > 0:
objs_in_sink.append(obj)
for child_obj in objs_in_sink:
if child_obj["isDirty"]:
ac = dict(action="CleanObject", objectId=child_obj["objectId"], forceAction=True)
if debug_print_all_sim_steps:
logger.info("step %s", ac)
self.controller.step(ac)
if child_obj["canFillWithLiquid"]:
ac = dict(
action="FillObjectWithLiquid", objectId=child_obj["objectId"], fillLiquid="water", forceAction=True
)
if debug_print_all_sim_steps:
logger.info("step %s", ac)
self.controller.step(ac)
self.__update_custom_object_metadata(child_obj["objectId"], "simbotIsFilledWithWater", 1)
def __thor_error_to_help_message(self, msg):
"""
Translate AI2-THOR errorMessage field into something that can be shown as prompts to annotators for TEACh data
collection
"""
# Example: "Floor|+00.00|+00.00|+00.00 must have the property CanPickup to be picked up." # noqa: E800
if "CanPickup to be" in msg:
return 'Object "%s" can\'t be picked up.' % msg.split()[0].split("|")[0]
# Example: "Object ID appears to be invalid." # noqa: E800
if ("Object ID" in msg and "invalid" in msg) or "Could not retrieve object" in msg:
return "Could not determine what object was clicked."
# Example "Can't place an object if Agent isn't holding anything # noqa: E800
if "if Agent isn't holding" in msg:
return "Must be holding an object first."
# Example: "Slice: ObjectInteraction only supported for held object Knife" # noqa: E800
if "Slice: ObjectInteraction" in msg:
return "Must be holding a knife."
# Example: "object is not toggleable" # noqa: E800
if "not toggleable" in msg:
return "Object cannot be turned on or off."
# Example: "can't toggle object off if it's already off!" # noqa: E800
if "toggle object off if" in msg:
return "Object is already turned off."
# Example: "can't toggle object on if it's already on!" # noqa: E800
if "toggle object on if" in msg:
return "Object is already turned on."
# Example: "CounterTop|-00.08|+01.15|00.00 is not an Openable object" # noqa: E800
if "is not an Openable object" in msg:
return 'Object "%s" can\'t be opened.' % msg.split()[0].split("|")[0]
# Example: "CounterTop_d7cc8dfe Does not have the CanBeSliced property!" # noqa: E800
if "Does not have the CanBeSliced" in msg:
return "Object cannot be sliced."
# Example: "Object failed to open/close successfully." # noqa: E800
if "failed to open/close" in msg:
return "Something is blocking the object from opening or closing. Move farther away or remove obstruction."
# Example: "StandardIslandHeight is blocking Agent 0 from moving 0" # noqa: E800
if "is blocking" in msg:
return "Something is blocking the robot from moving in that direction."
# Example: "a held item: Book_3d15d052 with something if agent rotates Right 90 degrees" # noqa: E800
if "a held item" in msg and "if agent rotates" in msg:
return "The held item will collide with something if the robot turns that direction."
# Example: "No valid positions to place object found" # noqa: E800
if "No valid positions to place" in msg:
return "The receptacle is too full or too small to contain the held item."
# Example: "This target object is NOT a receptacle!" # noqa: E800
if "NOT a receptacle" in msg:
return "Object is not a receptacle the robot can place items in."
# Example: "Target must be OFF to open!" # noqa: E800
if "OFF to open!" in msg:
return "Object must be turned off before it can be opened."
# Example: "cracked_egg_5(Clone) is not a valid Object Type to be placed in StoveBurner_58b674c4" # noqa: E800
if "not a valid Object Type to be placed" in msg:
return "Held object cannot be placed there."
# Example: "No target found" # noqa: E800
if "No target found" in msg:
return "No reachable object at that location."
# Example: "Knife|-01.70|+01.71|+04.01 is not interactable and (perhaps it is occluded by something)." # noqa: E800
if "it is occluded by something" in msg:
return "An object is blocking you from interacting with the selected object."
# "Could not find a target object at the specified location" # noqa: E800
if "Could not find a target object" in msg:
return "No valid object at that location."
# "another object's collision is blocking held object from being placed" # noqa: E800
if "another object's collision is blocking" in msg:
return "The target area is too cluttered or the held object is already colliding with something."
# "CounterTop|+00.69|+00.95|-02.48 is too far away to be interacted with" # noqa: E800
if "too far away to" in msg:
return "That object is too far away to interact with."
# "Your partner is too far away for a handoff." # noqa: E800
if "too far away for" in msg:
return "Your partner is too far away for a handoff."
# "Place: ObjectInteraction only supported when holding an object" # noqa: E800
if "only supported when holding" in msg:
return "You are not holding an object."
# "Picking up object would cause it to collide and clip into something!" # noqa: E800
if "would cause it to collide and" in msg:
return "Cannot grab object from here without colliding with something."
# "You cannot slice something while your partner is holding it." # noqa: E800
if "cannot slice something while" in msg:
return msg
# If msg couldn't be handled, don't create a readable system message
return None
def get_hotspots(
self,
agent_id,
hotspot_pixel_width=None,
action_str=None,
object_id=None,
camera_id=None,
return_full_seg_mask=False,
):
"""
Return a segmentation mask highlighting object(s) in an egocentric image
:param agent_id: the agent whose image needs to be highlighted; 0 for Commander and 1 for Driver/ Follower
:param hotspot_pixel_width: Minimum hotspot size
:param action_str: Highlight objects on which this action can be performed
:param object_id: Specify object to be highlighted using object ID
:param camera_id: Generate segmentation mask for a disembodied camera with this ID instead of for an agent
:param return_full_seg_mask: additional flag to highlight a single object specified by object_id
"""
assert not return_full_seg_mask or object_id is not None
assert (action_str is None or object_id is None) and not (action_str is not None and object_id is not None)
assert agent_id is None or camera_id is None
if hotspot_pixel_width is None:
hotspot_pixel_width = self.hotspot_pixel_width
if agent_id is not None:
sim_agent_id = agent_id if self.commander_embodied else 0
le = (
self.controller.last_event.events[sim_agent_id]
if self.commander_embodied
else self.controller.last_event
)
# Take a no-op step to render the object segmentation frame for hotspots.
if | |
<reponame>aledelmo/3DSlicer_Plugins
import os
import string
import time
import unittest
from builtins import range
try:
from itertools import izip as zip
except ImportError:
pass
import RegistrationLib
from __main__ import vtk, qt, ctk, slicer
import MyRegistrationLib
class BonesSegmentation:
def __init__(self, parent):
parent.title = "Bones"
parent.categories = ['IMAG2', "Pelvic Segmentation"]
parent.dependencies = []
parent.contributors = ["<NAME>, <NAME>' (IMAG2)"]
parent.helpText = string.Template("""
This module performs the semiautomatic segmentation of the pelvic bones on T2w MRI.
""").substitute({'a': parent.slicerWikiUrl, 'b': slicer.app.majorVersion, 'c': slicer.app.minorVersion})
parent.acknowledgementText = """
.....
""" # replace with organization, grant and thanks.
self.parent = parent
# IMAG2: Add the corresponding icon to the module
self.moduleName = self.__class__.__name__
moduleDir = os.path.dirname(self.parent.path)
iconPath = os.path.join(moduleDir, 'Resources', 'icon.jpg')
if os.path.isfile(iconPath):
parent.icon = qt.QIcon(iconPath)
try:
slicer.selfTests
except AttributeError:
slicer.selfTests = {}
slicer.selfTests['BonesSegmentation'] = self.runTest
def runTest(self):
tester = BonesSegmentationTest()
tester.runTest()
class BonesSegmentationWidget:
"""The module GUI widget"""
def __init__(self, parent=None):
settings = qt.QSettings()
try:
self.developerMode = settings.value('Developer/DeveloperMode').lower() == 'true'
except AttributeError:
self.developerMode = settings.value('Developer/DeveloperMode') is True
self.logic = BonesSegmentationLogic()
self.logic.registationState = self.registationState
self.sliceNodesByViewName = {}
self.sliceNodesByVolumeID = {}
self.observerTags = []
self.viewNames = ("Fixed", "Moving", "Transformed")
self.volumeSelectDialog = None
self.currentRegistrationInterface = None
self.currentLocalRefinementInterface = None
if not parent:
self.parent = slicer.qMRMLWidget()
self.parent.setLayout(qt.QVBoxLayout())
self.parent.setMRMLScene(slicer.mrmlScene)
else:
self.parent = parent
self.layout = self.parent.layout()
if not parent:
self.setup()
self.parent.show()
def setup(self):
"""Instantiate and connect widgets ..."""
self.selectVolumesButton = qt.QPushButton("Show Pop-Up Selector")
self.selectVolumesButton.connect('clicked(bool)', self.enter)
self.layout.addWidget(self.selectVolumesButton)
#
# IMAG2: Apply Button (harden transform + image resampling)
#
self.applyButton = qt.QPushButton("Segment")
self.applyButton.toolTip = "Segment!"
self.applyButton.enabled = True
self.layout.addWidget(self.applyButton)
self.applyButton.connect('clicked(bool)', self.onApplyButton)
self.interfaceFrame = qt.QWidget(self.parent)
self.interfaceFrame.setLayout(qt.QVBoxLayout())
self.layout.addWidget(self.interfaceFrame)
#
# Parameters Area
#
parametersCollapsibleButton = ctk.ctkCollapsibleButton()
parametersCollapsibleButton.text = "Hips Segmentation"
self.interfaceFrame.layout().addWidget(parametersCollapsibleButton)
parametersFormLayout = qt.QFormLayout(parametersCollapsibleButton)
self.volumeSelectors = {}
for viewName in self.viewNames:
self.volumeSelectors[viewName] = slicer.qMRMLNodeComboBox()
self.volumeSelectors[viewName].nodeTypes = (("vtkMRMLScalarVolumeNode"), "")
self.volumeSelectors[viewName].selectNodeUponCreation = False
self.volumeSelectors[viewName].addEnabled = False
self.volumeSelectors[viewName].removeEnabled = True
self.volumeSelectors[viewName].noneEnabled = True
self.volumeSelectors[viewName].showHidden = False
self.volumeSelectors[viewName].showChildNodeTypes = True
self.volumeSelectors[viewName].setMRMLScene(slicer.mrmlScene)
self.volumeSelectors[viewName].setToolTip("Pick the %s volume." % viewName.lower())
self.volumeSelectors[viewName].enabled = False
# parametersFormLayout.addRow("%s Volume " % viewName, self.volumeSelectors[viewName])
self.volumeSelectors["Transformed"].addEnabled = True
self.volumeSelectors["Transformed"].selectNodeUponCreation = True
self.volumeSelectors["Transformed"].setToolTip(
"Pick the transformed volume, which is the target for the registration.")
self.transformSelector = slicer.qMRMLNodeComboBox()
self.transformSelector.nodeTypes = (("vtkMRMLTransformNode"), "")
self.transformSelector.selectNodeUponCreation = True
self.transformSelector.addEnabled = True
self.transformSelector.removeEnabled = True
self.transformSelector.noneEnabled = True
self.transformSelector.showHidden = False
self.transformSelector.showChildNodeTypes = False
self.transformSelector.setMRMLScene(slicer.mrmlScene)
self.transformSelector.setToolTip("The transform for linear registration")
self.transformSelector.enabled = False
# parametersFormLayout.addRow("Target Transform ", self.transformSelector)
self.visualizationWidget = RegistrationLib.VisualizationWidget(self.logic)
self.visualizationWidget.connect("layoutRequested(mode,volumesToShow)", self.onLayout)
parametersFormLayout.addRow(self.visualizationWidget.widget)
#
# IMAG2: Landmarks Widget - just the add button changing the RegistrationLib into MyRegistrationLib
# - manages landmarks
#
# self.landmarksWidget = MyRegistrationLib.myLandmarksWidget(self.logic)
# self.landmarksWidget.connect("landmarkPicked(landmarkName)", self.onLandmarkPicked)
# self.landmarksWidget.connect("landmarkMoved(landmarkName)", self.onLandmarkMoved)
# self.landmarksWidget.connect("landmarkEndMoving(landmarkName)", self.onLandmarkEndMoving)
# parametersFormLayout.addRow(self.landmarksWidget.widget)
self.landmarksWidget = RegistrationLib.LandmarksWidget(self.logic)
self.landmarksWidget.connect("landmarkPicked(landmarkName)", self.onLandmarkPicked)
self.landmarksWidget.connect("landmarkMoved(landmarkName)", self.onLandmarkMoved)
self.landmarksWidget.connect("landmarkEndMoving(landmarkName)", self.onLandmarkEndMoving)
parametersFormLayout.addRow(self.landmarksWidget.widget)
#
# Registration Options
#
self.registrationCollapsibleButton = ctk.ctkCollapsibleButton()
self.registrationCollapsibleButton.text = "Registration"
# self.interfaceFrame.layout().addWidget(self.registrationCollapsibleButton)
registrationFormLayout = qt.QFormLayout(self.registrationCollapsibleButton)
#
# registration type selection
# - allows selection of the active registration type to display
#
try:
slicer.modules.registrationPlugins
except AttributeError:
slicer.modules.registrationPlugins = {}
self.registrationTypeBox = qt.QGroupBox("Registration Type")
self.registrationTypeBox.setLayout(qt.QFormLayout())
self.registrationTypeButtons = {}
self.registrationTypes = slicer.modules.registrationPlugins.keys()
self.registrationTypes.sort()
for registrationType in self.registrationTypes:
plugin = slicer.modules.registrationPlugins[registrationType]
if plugin.name == "ThinPlate Registration":
self.onRegistrationType(registrationType)
# connections
for selector in self.volumeSelectors.values():
selector.connect("currentNodeChanged(vtkMRMLNode*)", self.onVolumeNodeSelect)
# listen to the scene
self.addObservers()
# Add vertical spacer
self.layout.addStretch(1)
if self.developerMode:
def create_hor_layout(elements):
widget = qt.QWidget()
row_layout = qt.QHBoxLayout()
widget.setLayout(row_layout)
for element in elements:
row_layout.addWidget(element)
return widget
"""Developer interface"""
reload_collapsible_button = ctk.ctkCollapsibleButton()
reload_collapsible_button.text = "Reload && Test"
self.layout.addWidget(reload_collapsible_button)
reload_form_layout = qt.QFormLayout(reload_collapsible_button)
reload_button = qt.QPushButton("Reload")
reload_button.toolTip = "Reload this module."
reload_button.name = "ScriptedLoadableModuleTemplate Reload"
reload_button.connect('clicked()', self.onReload)
reload_and_test_button = qt.QPushButton("Reload and Test")
reload_and_test_button.toolTip = "Reload this module and then run the self tests."
reload_and_test_button.connect('clicked()', self.onReloadAndTest)
edit_source_button = qt.QPushButton("Edit")
edit_source_button.toolTip = "Edit the module's source code."
edit_source_button.connect('clicked()', self.on_edit_source)
restart_button = qt.QPushButton("Restart Slicer")
restart_button.toolTip = "Restart Slicer"
restart_button.name = "ScriptedLoadableModuleTemplate Restart"
restart_button.connect('clicked()', slicer.app.restart)
reload_form_layout.addWidget(
create_hor_layout([reload_button, reload_and_test_button, edit_source_button, restart_button]))
def onApplyButton(self):
fix = self.volumeSelectors['Fixed'].currentNode() # fixed image
mov = self.volumeSelectors['Moving'].currentNode() # moving image
trans = self.volumeSelectors['Transformed'].currentNode() # transformed image
transName = "%s-transformed" % mov.GetName()
try:
transNode = slicer.util.getNode(transName)
except slicer.util.MRMLNodeNotFoundException:
transNode = None
fixName = fix.GetName()
fixNode = slicer.util.getNode(fixName)
slicer.util.showStatusMessage("Processing...", 2000)
# IMAG2: harden transform
transLogic = slicer.modules.transforms.logic()
transLogic.hardenTransform(transNode)
# IMAG2: resample image
resample = slicer.modules.brainsresample
parametersRes = {}
parametersRes['inputVolume'] = trans.GetID()
parametersRes['referenceVolume'] = fix.GetID()
parametersRes['outputVolume'] = trans.GetID()
parametersRes['pixelType'] = 'uint'
parametersRes['interpolationMode'] = 'NearestNeighbor'
slicer.cli.run(resample, None, parametersRes, wait_for_completion=True)
# IMAG2: reset the correct origins of the images (they were previously changed during the registration procedure)
fix.SetOrigin(self.FixOrigin)
trans.SetOrigin(self.FixOrigin)
# IMAG2: segmentation step (partial - just thresholding)
SlicerModule = slicer.modules.thresholdscalarvolume
ModelLogic = SlicerModule.cliModuleLogic()
CreateLogic = ModelLogic.CreateNode()
# inputNode=slicer.util.getNode('scanner15ansM')
inputNode = transNode
inputNodeMRI = fixNode
outputNode = slicer.vtkMRMLLabelMapVolumeNode()
# outputNode.SetName("%s-Bones-label" % inputNode.GetName())
outputNode.SetName("%s-Bones-label" % fix.GetName())
outputNode.SetScene(slicer.mrmlScene)
slicer.mrmlScene.AddNode(outputNode)
parametersTh = {}
parametersTh['InputVolume'] = inputNode.GetID()
parametersTh['OutputVolume'] = outputNode.GetID()
parametersTh['ThresholdType'] = 'Above'
parametersTh['ThresholdValue'] = 0.9
parametersTh['Lower'] = -200
parametersTh['Upper'] = 200
parametersTh['OutsideValue'] = 2
CLINode = None
CLINode = slicer.cli.run(slicer.modules.thresholdscalarvolume, None, parametersTh, wait_for_completion=True)
bonesParams = {}
bonesParams['inputvolumeMR'] = inputNodeMRI.GetID()
bonesParams['inputvolumePRE'] = inputNode.GetID()
bonesParams['outputvolume'] = outputNode.GetID()
CLINode_b = None
CLINode_b = slicer.cli.run(slicer.modules.bonesseg, None, bonesParams, wait_for_completion=True)
display = outputNode.GetDisplayNode()
try:
labelColorTable = slicer.util.getNode('GenericAnatomyColors')
except slicer.util.MRMLNodeNotFoundException:
labelColorTable = None
display.SetAndObserveColorNodeID(labelColorTable.GetID())
# IMAG2: model maker
modelParams = {}
modelParams['Name'] = fix.GetName() + '-Bones'
modelParams["InputVolume"] = outputNode.GetID()
modelParams['FilterType'] = "Sinc"
modelParams['Labels'] = 2
modelParams["StartLabel"] = -1
modelParams["EndLabel"] = -1
modelParams['GenerateAll'] = False
modelParams["JointSmoothing"] = False
modelParams["SplitNormals"] = True
modelParams["PointNormals"] = True
modelParams["SkipUnNamed"] = True
modelParams["Decimate"] = 0.25
modelParams["Smooth"] = 30
# - make a new hierarchy node if needed
#
numNodes = slicer.mrmlScene.GetNumberOfNodesByClass("vtkMRMLModelHierarchyNode")
outHierarchy = None
for n in range(numNodes):
node = slicer.mrmlScene.GetNthNodeByClass(n, "vtkMRMLModelHierarchyNode")
if node.GetName() == "Bones Models":
outHierarchy = node
break
if not outHierarchy:
outHierarchy = slicer.vtkMRMLModelHierarchyNode()
outHierarchy.SetScene(slicer.mrmlScene)
outHierarchy.SetName("Bones Models")
slicer.mrmlScene.AddNode(outHierarchy)
modelParams["ModelSceneFile"] = outHierarchy
modelMaker = slicer.modules.modelmaker
slicer.cli.run(modelMaker, None, modelParams)
slicer.util.showStatusMessage("3D Model Making Started...", 2000)
# IMAG2: switch to the four up view
LayoutWidget = slicer.qMRMLLayoutWidget()
LayoutWidget.setMRMLScene(slicer.mrmlScene)
LayoutWidget.setLayout(slicer.vtkMRMLLayoutNode.SlicerLayoutFourUpView)
# IMAG2: center 3D View
layoutManager = slicer.app.layoutManager()
threeDWidget = layoutManager.threeDWidget(0)
threeDView = threeDWidget.threeDView()
threeDView.resetFocalPoint()
# IMAG2: assign background and label volumes
red_logic = layoutManager.sliceWidget("Red").sliceLogic()
red_cn = red_logic.GetSliceCompositeNode()
red_cn.SetBackgroundVolumeID(fix.GetID())
red_cn.SetLabelVolumeID(outputNode.GetID())
red_cn.SetLabelOpacity(0.8)
green_logic = layoutManager.sliceWidget("Green").sliceLogic()
green_cn = green_logic.GetSliceCompositeNode()
green_cn.SetBackgroundVolumeID(fix.GetID())
green_cn.SetLabelVolumeID(outputNode.GetID())
green_cn.SetLabelOpacity(0.8)
yellow_logic = layoutManager.sliceWidget("Yellow").sliceLogic()
yellow_cn = yellow_logic.GetSliceCompositeNode()
yellow_cn.SetBackgroundVolumeID(fix.GetID())
yellow_cn.SetLabelVolumeID(outputNode.GetID())
yellow_cn.SetLabelOpacity(0.8)
# IMAG2: center slice views
red_logic.FitSliceToAll()
green_logic.FitSliceToAll()
yellow_logic.FitSliceToAll()
mark_nodes = slicer.mrmlScene.GetNodesByClass('vtkMRMLMarkupsFiducialNode')
mark_nodes.UnRegister(slicer.mrmlScene)
mark_nodes.InitTraversal()
mark_node = mark_nodes.GetNextItemAsObject()
while mark_node:
if mov.GetName() in mark_node.GetName() or fix.GetName() in mark_node.GetName() or mark_node == 'F':
slicer.mrmlScene.RemoveNode(mark_node)
mark_node = mark_nodes.GetNextItemAsObject()
vol_nodes = slicer.mrmlScene.GetNodesByClass('vtkMRMLVolumeNode')
vol_nodes.UnRegister(slicer.mrmlScene)
vol_nodes.InitTraversal()
vol_node = vol_nodes.GetNextItemAsObject()
while vol_node:
if mov.GetName() in vol_node.GetName():
slicer.mrmlScene.RemoveNode(vol_node)
vol_node = vol_nodes.GetNextItemAsObject()
def enter(self):
self.interfaceFrame.enabled = False
self.setupDialog()
def setupDialog(self):
"""setup dialog"""
if not self.volumeSelectDialog:
self.volumeSelectDialog = qt.QDialog(slicer.util.mainWindow())
self.volumeSelectDialog.objectName = 'BonesSegmentationVolumeSelect'
self.volumeSelectDialog.setLayout(qt.QVBoxLayout())
self.volumeSelectLabel = qt.QLabel()
self.volumeSelectDialog.layout().addWidget(self.volumeSelectLabel)
self.volumeSelectorFrame = qt.QFrame()
self.volumeSelectorFrame.objectName = 'VolumeSelectorFrame'
self.volumeSelectorFrame.setLayout(qt.QFormLayout())
self.volumeSelectDialog.layout().addWidget(self.volumeSelectorFrame)
self.volumeDialogSelectors = {}
# #IMAG2:
self.volumeDialogSelectors['Fixed'] = slicer.qMRMLNodeComboBox()
self.volumeDialogSelectors['Fixed'].nodeTypes = (("vtkMRMLScalarVolumeNode"), "")
self.volumeDialogSelectors['Fixed'].selectNodeUponCreation = False
self.volumeDialogSelectors['Fixed'].addEnabled = False
self.volumeDialogSelectors['Fixed'].removeEnabled = True
self.volumeDialogSelectors['Fixed'].noneEnabled = True
self.volumeDialogSelectors['Fixed'].showHidden = False
self.volumeDialogSelectors['Fixed'].showChildNodeTypes = True
self.volumeDialogSelectors['Fixed'].setMRMLScene(slicer.mrmlScene)
self.volumeDialogSelectors['Fixed'].setToolTip("Pick the MRI volume of the patient.")
self.volumeSelectorFrame.layout().addRow("Patient MRI", self.volumeDialogSelectors['Fixed'])
self.volumeButtonFrame = qt.QFrame()
self.volumeButtonFrame.objectName = 'VolumeButtonFrame'
self.volumeButtonFrame.setLayout(qt.QHBoxLayout())
self.volumeSelectDialog.layout().addWidget(self.volumeButtonFrame)
# IMAG2: buttons for the age and the sex selection
# Define the sex button
self.SexButton = qt.QComboBox()
self.SexButton.addItem('Male')
self.SexButton.addItem('Female')
self.volumeSelectorFrame.layout().addRow("Sex", self.SexButton)
# Define the age button
self.AgeButton = qt.QSpinBox()
self.AgeButton.setRange(0, 20)
self.volumeSelectorFrame.layout().addRow("Age", self.AgeButton)
self.volumeDialogApply = qt.QPushButton("Apply", self.volumeButtonFrame)
self.volumeDialogApply.objectName = 'VolumeDialogApply'
self.volumeDialogApply.setToolTip("Use currently selected volume nodes.")
self.volumeButtonFrame.layout().addWidget(self.volumeDialogApply)
self.volumeDialogCancel = qt.QPushButton("Cancel", self.volumeButtonFrame)
self.volumeDialogCancel.objectName = 'VolumeDialogCancel'
self.volumeDialogCancel.setToolTip("Cancel current operation.")
self.volumeButtonFrame.layout().addWidget(self.volumeDialogCancel)
self.volumeDialogApply.connect("clicked()", self.onVolumeDialogApply)
self.volumeDialogCancel.connect("clicked()", self.volumeSelectDialog.hide)
self.volumeSelectLabel.setText("Insert the patient information")
self.volumeSelectDialog.show()
def onVolumeDialogApply(self):
self.volumeSelectDialog.hide()
sexLabel = self.SexButton.currentIndex
# ageLabel=self.AgeButton.currentIndex
ageLabel = self.AgeButton.value
moduleName = 'BonesSegmentation'
filePath = eval('slicer.modules.%s.path' % moduleName.lower())
ModuleDir = os.path.dirname(filePath)
# Loading of the correct Template Volume
if sexLabel == 0: # Male patient
if 0 <= ageLabel <= 1:
VolumePath = os.path.join(ModuleDir, 'Templates', 'M_M7.nrrd')
slicer.util.loadVolume(VolumePath)
volume_nodes = slicer.mrmlScene.GetNodesByClass('vtkMRMLVolumeNode')
volume_nodes.UnRegister(slicer.mrmlScene)
volume_nodes.InitTraversal()
volume_node = volume_nodes.GetNextItemAsObject()
while volume_node:
if 'M_M7' in volume_node.GetName():
VolumeNode = volume_node
break
volume_node = volume_nodes.GetNextItemAsObject()
elif 1 < ageLabel <= 3:
VolumePath = os.path.join(ModuleDir, 'Templates', 'M_Y2.nrrd')
slicer.util.loadVolume(VolumePath)
volume_nodes = slicer.mrmlScene.GetNodesByClass('vtkMRMLVolumeNode')
volume_nodes.UnRegister(slicer.mrmlScene)
volume_nodes.InitTraversal()
volume_node = volume_nodes.GetNextItemAsObject()
while volume_node:
if 'M_Y2' in volume_node.GetName():
VolumeNode = volume_node
break
volume_node = volume_nodes.GetNextItemAsObject()
elif 3 < ageLabel <= 8:
VolumePath = os.path.join(ModuleDir, 'Templates', 'M_Y4.nrrd')
slicer.util.loadVolume(VolumePath)
volume_nodes = slicer.mrmlScene.GetNodesByClass('vtkMRMLVolumeNode')
volume_nodes.UnRegister(slicer.mrmlScene)
volume_nodes.InitTraversal()
volume_node = | |
import time
import os
import sys
from pathlib import Path
import numpy as nump
import pandas as panda
import uuid
import csv
import inspect
import re
import platform
import requests
import json
from datetime import datetime
from tir.technologies.core.config import ConfigLoader
from tir.technologies.core.logging_config import logger
class Log:
"""
This class is instantiated to create the log file and to append the results and failures to it.
Usage:
>>> # Instanted inside base.py:
>>> self.log = Log()
"""
def __init__(self, suite_datetime="", user="", station="", program="", program_date=("19800101"), version="", release="", database="", issue="", execution_id="", country="", folder="", test_type="TIR", config_path=""):
self.timestamp = time.strftime("%Y%m%d%H%M%S")
today = datetime.today()
self.config = ConfigLoader(config_path)
self.user = user
self.station = station
self.program = program
self.program_date = program_date
self.version = version
self.release = release
self.database = database
self.initial_time = datetime.today()
self.testcase_initial_time = datetime.today()
self.seconds = 0
self.testcase_seconds = 0
self.suite_datetime = suite_datetime
self.table_rows = []
self.test_case_log = []
self.csv_log = []
self.invalid_fields = []
self.table_rows.append(self.generate_header())
self.folder = folder
self.test_type = test_type
self.issue = self.config.issue
self.execution_id = self.config.execution_id
self.country = country
self.start_time = None
self.end_time = None
self.ct_method = ""
self.ct_number = ""
self.so_type = platform.system()
self.so_version = f"{self.so_type} {platform.release()}"
self.build_version = ""
self.lib_version = ""
self.webapp_version = ""
self.date = today.strftime('%Y%m%d')
self.hour = today.strftime('%H:%M:%S')
self.last_exec = today.strftime('%Y%m%d%H%M%S%f')[:-3]
self.hash_exec = ""
self.test_case = self.list_of_testcases()
self.finish_testcase = []
def generate_header(self):
"""
Generates the header line on the log file.
Usage:
>>> # Calling the method:
>>> self.log.generate_header()
"""
return ['Data','Usuário','Estação','Programa','Data Programa','Total CTs','Passou','Falhou', 'Segundos','Versão','Release', 'CTs Falhou', 'Banco de dados','Chamado','ID Execução','Pais', "Tipo de Teste"]
def new_line(self, result, message):
"""
Appends a new line with data on log file.
:param result: The result of the case.
:type result: bool
:param message: The message to be logged..
:type message: str
Usage:
>>> # Calling the method:
>>> self.log.new_line(True, "Success")
"""
line = []
total_cts = 1
passed = 1 if result else 0
failed = 0 if result else 1
printable_message = self.printable_message(message)
if not self.suite_datetime:
self.suite_datetime = time.strftime("%d/%m/%Y %X")
if self.get_testcase_stack() not in self.test_case_log:
line.extend([self.suite_datetime, self.user, self.station, self.program, self.program_date, total_cts, passed, failed, self.seconds, self.version, self.release, printable_message, self.database, self.issue, self.execution_id, self.country, self.test_type])
self.table_rows.append(line)
self.test_case_log.append(self.get_testcase_stack())
def save_file(self):
"""
Writes the log file to the file system.
Usage:
>>> # Calling the method:
>>> self.log.save_file()
"""
log_file = f"{self.user}_{uuid.uuid4().hex}_auto.csv"
if len(self.table_rows) > 0:
try:
if self.folder:
path = Path(self.folder, self.station+"_v6")
os.makedirs(path)
else:
path = Path("Log", self.station)
os.makedirs(path)
except OSError:
pass
if self.config.smart_test:
open("log_exec_file.txt", "w")
if ((len(self.table_rows[1:]) == len(self.test_case) and self.get_testcase_stack() not in self.csv_log) or (self.get_testcase_stack() == "setUpClass") and self.checks_empty_line()) :
with open( Path(path, log_file), mode="w", newline="", encoding="windows-1252") as csv_file:
csv_writer_header = csv.writer(csv_file, delimiter=';', quoting=csv.QUOTE_NONE)
csv_writer_header.writerow(self.table_rows[0])
csv_writer = csv.writer(csv_file, delimiter=';', quotechar='"', quoting=csv.QUOTE_NONNUMERIC)
for line in self.table_rows[1:]:
csv_writer.writerow(line)
logger().debug(f"Log file created successfully: {os.path.join(path, log_file)}")
self.csv_log.append(self.get_testcase_stack())
def set_seconds(self, initial_time):
"""
Sets the seconds variable through a calculation of current time minus the execution start time.
Usage:
>>> # Calling the method:
>>> self.log.set_seconds()
"""
delta = datetime.today() - initial_time
return round(delta.total_seconds(), 2)
def list_of_testcases(self):
"""
Returns a list of test cases from suite
"""
runner = next(iter(list(filter(lambda x: "runner.py" in x.filename, inspect.stack()))), None)
if runner:
try:
return list(filter(lambda x: x is not None, list(runner.frame.f_locals['test']._tests)))
except KeyError:
return []
else:
return []
def get_testcase_stack(self):
"""
Returns a string with the current testcase name
[Internal]
"""
return next(iter(list(map(lambda x: x.function, filter(lambda x: re.search('setUpClass', x.function) or re.search('test_', x.function), inspect.stack())))), None)
def checks_empty_line(self):
"""
Checks if the log file is not empty.
03 - 'Programa' 10 - 'Release' 14 - 'ID Execução' 15 - 'Pais'
[Internal]
"""
table_rows_has_line = False
if self.table_rows[1][3] == '':
self.table_rows[1][3] = 'NO PROGRAM'
if self.table_rows[1][10] == '':
self.table_rows[1][10] = '12.1.27'
if self.table_rows[1][15] == '':
self.table_rows[1][15] = 'BRA'
if self.table_rows[1][11] == '':
self.table_rows[1][11] = 'TIMEOUT'
if len(self.table_rows) > 1:
for x in [ 3, 10, 15 ]:
if (self.table_rows[1][x]):
table_rows_has_line = True
else:
table_rows_has_line = False
break
if self.config.smart_test and self.table_rows[1][14] and table_rows_has_line:
table_rows_has_line = True
elif self.config.smart_test:
table_rows_has_line = False
return table_rows_has_line
def generate_result(self, result, message):
"""
Generate a result of testcase and export to a json.
:param result: The result of the case.
:type result: bool
:param message: The message to be logged..
:type message: str
Usage:
>>> # Calling the method:
>>> self.log.generate_result(True, "Success")
"""
printable_message = self.printable_message(message)
if not self.suite_datetime:
self.suite_datetime = time.strftime("%d/%m/%Y %X")
self.generate_json(self.generate_dict(result, printable_message))
def get_file_name(self, file_name):
"""
Returns a Testsuite name
"""
testsuite_stack = next(iter(list(filter(lambda x: file_name in x.filename.lower(), inspect.stack()))), None)
if testsuite_stack:
if '/' in testsuite_stack.filename:
split_character = '/'
else:
split_character = '\\'
return testsuite_stack.filename.split(split_character)[-1].split(".")[0]
else:
return ""
def generate_dict(self, result, message):
"""
Returns a dictionary with the log information
"""
log_version = "20200814"
dict_key = {
"APPVERSION": self.build_version,
"CLIVERSION": self.webapp_version,
"COUNTRY": self.country,
"CTMETHOD": self.ct_method,
"CTNUMBER": self.ct_number,
"DBACCESS": "",
"DBTYPE": self.database,
"DBVERSION": "",
"EXECDATE": self.date,
"EXECTIME": self.hour,
"FAIL": 0 if result else 1,
"FAILMSG": message,
"IDENTI": self.issue,
"IDEXEC": self.config.execution_id,
"LASTEXEC": self.last_exec,
"LIBVERSION": self.lib_version,
"OBSERV": "",
"PASS": 1 if result else 0,
"PROGDATE": self.program_date,
"PROGRAM": self.program,
"PROGTIME": "00:00:00",
"RELEASE": self.release,
"SECONDSCT": self.testcase_seconds,
"SOTYPE": self.so_type,
"SOVERSION": self.so_version,
"STATION": self.station,
"STATUS": "", # ???
"TESTCASE": self.get_file_name('testcase'),
"TESTSUITE": self.get_file_name('testsuite'),
"TESTTYPE": "1",
"TOKEN": "TI<PASSWORD>", # ???
"TOOL": self.test_type,
"USRNAME": self.user,
"VERSION": self.version
}
return dict_key
def generate_json(self, dictionary):
"""
"""
server_address1 = self.config.logurl1
server_address2 = self.config.logurl2
success = False
data = dictionary
json_data = json.dumps(data)
endtime = time.time() + 120
while (time.time() < endtime and not success):
success = self.send_request(server_address1, json_data)
if not success:
success = self.send_request(server_address2, json_data)
time.sleep(10)
if not success:
self.save_json_file(json_data)
def send_request(self, server_address, json_data):
"""
Send a post request to server
"""
success = False
response = None
headers = {'content-type': 'application/json'}
try:
response = requests.post(server_address.strip(), data=json_data, headers=headers)
except:
pass
if response is not None:
if response.status_code == 200:
logger().debug("Log de execucao enviado com sucesso!")
success = True
elif response.status_code == 201 or response.status_code == 204:
logger().debug("Log de execucao enviado com sucesso!")
success = True
else:
self.save_response_log(response, server_address, json_data)
return False
else:
return False
return success
def save_response_log(self, response, server_address, json_data):
"""
"""
today = datetime.today()
try:
path = Path(self.folder, "new_log", self.station)
os.makedirs(path)
except OSError:
pass
try:
with open( Path(path, "response_log.csv"), mode="a", encoding="utf-8", newline='') as response_log:
csv_write = csv.writer(response_log, delimiter=';', quotechar='"', quoting=csv.QUOTE_MINIMAL)
csv_write.writerow([f"Time: {today.strftime('%Y%m%d%H%M%S%f')[:-3]}", f"URL: {server_address}", f"CT: {json.loads(json_data)['CTMETHOD']}",
{f"Status Code: {response.status_code}"}, f"Message: {response.text}"])
except:
pass
def save_json_file(self, json_data):
"""
Writes the log file to the file system.
Usage:
>>> # Calling the method:
>>> self.log.save_json_file()
"""
try:
if self.folder:
path = Path(self.folder, "new_log", self.station)
os.makedirs(path)
else:
path = Path("Log", self.station)
os.makedirs(path)
except OSError:
pass
log_file = f"{self.user}_{uuid.uuid4().hex}.json"
if self.config.smart_test:
open("log_exec_file.txt", "w")
with open( Path(path, log_file), mode="w", encoding="utf-8") as json_file:
json_file.write(json_data)
logger().debug(f"Log file created successfully: {Path(path, log_file)}")
def ident_test(self):
"""
:return:
"""
ct_method = self.get_testcase_stack()
ct_number = ''.join(list(filter(str.isdigit, f"{ct_method.split('_')[-1]}"))) if ct_method else ""
return (ct_method, ct_number)
def take_screenshot_log(self, driver, stack_item="", test_number=""):
"""
[Internal]
Takes a screenshot and saves on the log screenshot folder defined in config.
:param driver: The selenium driver.
:type: Selenium Driver
:param stack_item: test case stack
:type: str
:param test_number: test case number
:type: str
Usage:
>>> # Calling the method:
>>> self.log.take_screenshot_log()
"""
if not stack_item:
stack_item = self.get_testcase_stack()
if stack_item == "setUpClass":
stack_item = self.get_file_name("testsuite")
if not test_number:
test_number = f"{stack_item.split('_')[-1]} -" if stack_item else ""
if not self.release:
self.release = self.config.release
testsuite = self.get_file_name("testsuite")
today = datetime.today()
if self.search_stack("log_error"):
screenshot_file = self.screenshot_file_name("error", stack_item)
elif self.search_stack("CheckResult"):
screenshot_file = self.screenshot_file_name("CheckResult_result_divergence", stack_item)
else:
screenshot_file = self.screenshot_file_name(stack_item)
if self.config.debug_log:
logger().debug(f"take_screenshot_log in:{datetime.now()}\n")
try:
if self.config.log_http:
folder_path = Path(self.config.log_http, self.config.country, self.release, self.config.issue, self.config.execution_id, testsuite)
path = Path(folder_path, screenshot_file)
os.makedirs(Path(folder_path))
else:
path = Path("Log", self.station, screenshot_file)
os.makedirs(Path("Log", self.station))
except OSError:
pass
try:
driver.save_screenshot(str(path))
logger().debug(f"Screenshot file created successfully: {path}")
except Exception as e:
logger().exception(f"Warning Log Error save_screenshot exception {str(e)}")
def screenshot_file_name(self, description="", stack_item=""):
"""
:param name:
:return:
"""
today = datetime.today()
if description:
return f"{self.user}_{today.strftime('%Y%m%d%H%M%S%f')[:-3]}_{stack_item}_{description}.png"
else:
return f"{self.user}_{today.strftime('%Y%m%d%H%M%S%f')[:-3]}_{stack_item}.png"
def printable_message(self, string):
"""
:param string:
:return:
"""
return re.sub(';', ',', ''.join(filter(lambda x: x.isprintable(), string))[:600])
def search_stack(self, function):
"""
Returns True if passed function is present in the call stack.
:param function: Name of | |
<reponame>TAGC-Brun/RAINET-RNA
import shutil
import os
from os.path import basename
from fr.tagc.rainet.core.data import DataConstants
from fr.tagc.rainet.core.data.TableStatus import TableStatus
from fr.tagc.rainet.core.execution.ExecutionStrategy import ExecutionStrategy
from fr.tagc.rainet.core.util import Constants
from fr.tagc.rainet.core.util.exception.RainetException import RainetException
from fr.tagc.rainet.core.util.log.Logger import Logger
from fr.tagc.rainet.core.util.option import OptionConstants
from fr.tagc.rainet.core.util.option.OptionManager import OptionManager
from fr.tagc.rainet.core.util.parser.FastaParser import FastaParser
from fr.tagc.rainet.core.util.parser.NetworkModuleAnnotationParser import NetworkModuleAnnotationParser
from fr.tagc.rainet.core.util.parser.NetworkModuleParser import NetworkModuleParser
from fr.tagc.rainet.core.util.parser.OboParser import OboParser
from fr.tagc.rainet.core.util.parser.TSVParser import TSVParser
from fr.tagc.rainet.core.util.sql.SQLManager import SQLManager
from fr.tagc.rainet.core.util.sql.SQLUtil import SQLUtil
from fr.tagc.rainet.core.util.time.Timer import Timer
from fr.tagc.rainet.core.util.property.PropertyManager import PropertyManager
from fr.tagc.rainet.core.util.data.DataManager import DataManager
from fr.tagc.rainet.core.data.PPINetwork import PPINetwork
from fr.tagc.rainet.core.data.PPINetworkInteraction import PPINetworkInteraction
from fr.tagc.rainet.core.data.RNA import RNA
from fr.tagc.rainet.core.data.RNACrossReference import RNACrossReference
from fr.tagc.rainet.core.data.MRNA import MRNA
from fr.tagc.rainet.core.data.LncRNA import LncRNA
from fr.tagc.rainet.core.data.OtherRNA import OtherRNA
from fr.tagc.rainet.core.data.ProteinRNAInteractionCatRAPID import ProteinRNAInteractionCatRAPID
from fr.tagc.rainet.core.data.ProteinCrossReference import ProteinCrossReference
from fr.tagc.rainet.core.data.RNATissueExpression import RNATissueExpression
from fr.tagc.rainet.core.data.Tissue import Tissue
from fr.tagc.rainet.core.data.InteractingRNA import InteractingRNA
from fr.tagc.rainet.core.data.InteractingProtein import InteractingProtein
from fr.tagc.rainet.core.data.BioplexCluster import BioplexCluster
from fr.tagc.rainet.core.data.ProteinBioplexAnnotation import ProteinBioplexAnnotation
from fr.tagc.rainet.core.data.WanCluster import WanCluster
from fr.tagc.rainet.core.data.ProteinWanAnnotation import ProteinWanAnnotation
from fr.tagc.rainet.core.data.CorumCluster import CorumCluster
from fr.tagc.rainet.core.data.ProteinCorumAnnotation import ProteinCorumAnnotation
from fr.tagc.rainet.core.data.CustomCluster import CustomCluster
from fr.tagc.rainet.core.data.ProteinCustomAnnotation import ProteinCustomAnnotation
# #
# This class define the Strategy executing insertion of the various data in database
class InsertionStrategy( ExecutionStrategy ):
# #
# The Strategy execution method
def execute( self ):
self.DBPath = OptionManager.get_instance().get_option( OptionConstants.OPTION_DB_NAME )
self.forceOverride = OptionManager.get_instance().get_option( OptionConstants.OPTION_INSERTION_FORCE_OVERRIDE )
self.insert_data()
# #
# Insert the various data to the Database using file defined in the configuration file
#
def insert_data( self):
# Create Logger instance by using the first log action.
Logger.get_instance().info( "InsertionStrategy.insert_data: Starting..." )
# # Backup the database file
# try:
# Logger.get_instance().info( "InsertionStrategy.insert_data: Backuping DB file..." )
# shutil.copyfile(self.DBPath, self.DBPath + ".back")
# except IOError as ioe:
# Logger.get_instance().info( " warning : Unable to backup database file : " + self.DBPath + " : " + str( ioe))
# Create database sqlite file at the provided path
SQLManager.get_instance().build_database( self.DBPath, self.forceOverride )
self.check_database_tables()
# Retrieve the inertion properties
PropertyManager.get_instance().read_properties( OptionManager.get_instance().get_option( OptionConstants.OPTION_INSERTION_PROPERTIES_PATH, True))
# Start chrono
Timer.get_instance().start_chrono()
# Indicate insertion mode
if self.forceOverride:
Logger.get_instance().info( " -- MODE FORCE OVERRIDE -- " )
else:
Logger.get_instance().info( " -- MODE RESUME -- " )
#=======================================================================
# INSERTION OF DATA
#=======================================================================
try:
#===================================================================
# PROTEIN DEFINITION
#===================================================================
# Parse the protein file
input_file = PropertyManager.get_instance().get_property( DataConstants.PROTEIN_UNIPROT_DEFINITION_PROPERTY, True)
self.launch_insertion_TSV( input_file, True, DataConstants.PROTEIN_HEADERS, DataConstants.PROTEIN_CLASS, DataConstants.PROTEIN_PARAMS, None, DataConstants.PROTEIN_COMMENT_CHAR )
# Parse the protein cross references file
input_file = PropertyManager.get_instance().get_property( DataConstants.PROTEIN_CROSSREFERENCES_PROPERTY, True)
self.launch_insertion_TSV( input_file, False, DataConstants.PROTEIN_CROSS_REFERENCE_HEADERS, DataConstants.PROTEIN_CROSS_REFERENCE_CLASS, DataConstants.PROTEIN_CROSS_REFERENCE_PARAMS, None, DataConstants.PROTEIN_CROSS_REFERENCE_COMMENT_CHAR )
# Parse the protein isoform file
input_file = PropertyManager.get_instance().get_property( DataConstants.PROTEIN_ISOFORMS_PROPERTY, True)
self.launch_insertion_Fasta( input_file, DataConstants.ISOFORM_CLASS, DataConstants.ISOFORM_REGULAR_EXPRESSION, DataConstants.ISOFORM_GROUPS, DataConstants.ISOFORM_PARAMS, DataConstants.ISOFORM_PARAMS_VALUE_ALTERNATIVE, DataConstants.ISOFORM_COMMENT_CHAR )
# Parse the protein domain file of SMART DB
input_file = PropertyManager.get_instance().get_property( DataConstants.PROTEIN_DOMAIN_SMART_PROPERTY, True)
self.launch_insertion_TSV( input_file, True, DataConstants.PROTEIN_DOMAIN_HEADERS_SMART, DataConstants.PROTEIN_DOMAIN_CLASS, DataConstants.PROTEIN_DOMAIN_PARAM_SMART, DataConstants.PROTEIN_DOMAIN_VALUE_SMART, DataConstants.PROTEIN_DOMAIN_COMMENT_CHAR, "SMART" , False)
# Parse the protein domain file of PFAM DB
input_file = PropertyManager.get_instance().get_property( DataConstants.PROTEIN_DOMAIN_PFAM_PROPERTY, True)
self.launch_insertion_TSV( input_file, False, DataConstants.PROTEIN_DOMAIN_HEADERS_PFAM, DataConstants.PROTEIN_DOMAIN_CLASS, DataConstants.PROTEIN_DOMAIN_PARAM_PFAM, DataConstants.PROTEIN_DOMAIN_VALUE_PFAM, DataConstants.PROTEIN_DOMAIN_COMMENT_CHAR, "PFAM", False )
#===================================================================
# FUNCTION AND PATHWAY ANNOTATIONS
#===================================================================
# Parse the Gene Ontology file
input_file = PropertyManager.get_instance().get_property( DataConstants.GENE_ONTOLOGY_DEFINITION_PROPERTY, True)
self.launch_insertion_Obo( input_file, DataConstants.GENE_ONTOLOGY_CLASS, DataConstants.GENE_ONTOLOGY_ID_TAG, DataConstants.GENE_ONTOLOGY_NAME_TAG, DataConstants.GENE_ONTOLOGY_NAMESPACE_TAG )
# Parse the Protein Gene Ontology annotation file
input_file = PropertyManager.get_instance().get_property( DataConstants.GENE_ONTOLOGY_ANNOTATION_PROPERTY, True)
self.launch_insertion_TSV( input_file, False, DataConstants.PROTEIN_GO_ANNOTATION_HEADERS, DataConstants.PROTEIN_GO_ANNOTATION_CLASS, DataConstants.PROTEIN_GO_ANNOTATION_PARAMS, None, DataConstants.PROTEIN_GO_ANNOTATION_COMMENT_CHAR )
# Parse the KEGG pathway file
input_file = PropertyManager.get_instance().get_property( DataConstants.KEGG_PATHWAY_DEFINITION_PROPERTY, True)
self.launch_insertion_TSV( input_file, True, DataConstants.KEGG_PATHWAY_HEADERS, DataConstants.KEGG_PATHWAY_CLASS, DataConstants.KEGG_PATHWAY_PARAMS, None, DataConstants.KEGG_PATHWAY_COMMENT_CHAR )
# Parse the Protein KEGG Pathway annotation file
input_file = PropertyManager.get_instance().get_property( DataConstants.KEGG_PATHWAY_ANNOTATION_PROPERTY, True)
self.launch_insertion_TSV( input_file, True, DataConstants.KEGG_PATHWAY_ANNOTATION_HEADERS, DataConstants.KEGG_PATHWAY_ANNOTATION_CLASS, DataConstants.KEGG_PATHWAY_ANNOTATION_PARAMS, None, DataConstants.KEGG_PATHWAY_ANNOTATION_COMMENT_CHAR )
#===================================================================
# REACTOME
#===================================================================
# Parse the Reactome pathway file
input_file = PropertyManager.get_instance().get_property( DataConstants.REACTOME_PATHWAY_DEFINITION_PROPERTY, True)
self.launch_insertion_TSV( input_file, False, DataConstants.REACTOME_PATHWAY_HEADERS, DataConstants.REACTOME_PATHWAY_CLASS, DataConstants.REACTOME_PATHWAY_PARAMS, None, DataConstants.REACTOME_PATHWAY_COMMENT_CHAR )
# Parse the Protein Reactome Pathway annotation file
input_file = PropertyManager.get_instance().get_property( DataConstants.REACTOME_PATHWAY_ANNOTATION_PROPERTY, True)
self.launch_insertion_TSV( input_file, False, DataConstants.REACTOME_PATHWAY_ANNOTATION_HEADERS, DataConstants.REACTOME_PATHWAY_ANNOTATION_CLASS, DataConstants.REACTOME_PATHWAY_ANNOTATION_PARAMS, None, DataConstants.REACTOME_PATHWAY_ANNOTATION_COMMENT_CHAR )
#===================================================================
# BIOPLEX
#===================================================================
# Parse the file listing Bioplex clusters
input_file = PropertyManager.get_instance().get_property( DataConstants.BIOPLEX_CLUSTER_DEFINITION_PROPERTY, True)
self.launch_insertion_TSV( input_file, False, DataConstants.BIOPLEX_CLUSTER_HEADERS,
DataConstants.BIOPLEX_CLUSTER_CLASS, DataConstants.BIOPLEX_CLUSTER_PARAMS,
None, DataConstants.BIOPLEX_CLUSTER_COMMENT_CHAR )
# Parse the file with Bioplex annotations
input_file = PropertyManager.get_instance().get_property( DataConstants.BIOPLEX_ANNOTATION_PROPERTY, True)
self.launch_insertion_TSV( input_file, False, DataConstants.BIOPLEX_ANNOTATION_HEADERS,
DataConstants.BIOPLEX_ANNOTATION_CLASS, DataConstants.BIOPLEX_ANNOTATION_PARAMS,
None, DataConstants.BIOPLEX_ANNOTATION_COMMENT_CHAR )
#===================================================================
# WAN CLUSTERS
#===================================================================
# Parse the file listing Wan clusters
input_file = PropertyManager.get_instance().get_property( DataConstants.WAN_CLUSTER_DEFINITION_PROPERTY, True)
self.launch_insertion_TSV( input_file, False, DataConstants.WAN_CLUSTER_HEADERS,
DataConstants.WAN_CLUSTER_CLASS, DataConstants.WAN_CLUSTER_PARAMS,
None, DataConstants.WAN_CLUSTER_COMMENT_CHAR )
# Parse the file with Wan annotations
input_file = PropertyManager.get_instance().get_property( DataConstants.WAN_ANNOTATION_PROPERTY, True)
self.launch_insertion_TSV( input_file, False, DataConstants.WAN_ANNOTATION_HEADERS,
DataConstants.WAN_ANNOTATION_CLASS, DataConstants.WAN_ANNOTATION_PARAMS,
None, DataConstants.WAN_ANNOTATION_COMMENT_CHAR )
#===================================================================
# CORUM CLUSTERS
#===================================================================
# Parse the file listing Corum clusters
input_file = PropertyManager.get_instance().get_property( DataConstants.CORUM_CLUSTER_DEFINITION_PROPERTY, True)
self.launch_insertion_TSV( input_file, False, DataConstants.CORUM_CLUSTER_HEADERS,
DataConstants.CORUM_CLUSTER_CLASS, DataConstants.CORUM_CLUSTER_PARAMS,
None, DataConstants.CORUM_CLUSTER_COMMENT_CHAR )
# Parse the file with Corum annotations
input_file = PropertyManager.get_instance().get_property( DataConstants.CORUM_ANNOTATION_PROPERTY, True)
self.launch_insertion_TSV( input_file, False, DataConstants.CORUM_ANNOTATION_HEADERS,
DataConstants.CORUM_ANNOTATION_CLASS, DataConstants.CORUM_ANNOTATION_PARAMS,
None, DataConstants.CORUM_ANNOTATION_COMMENT_CHAR )
#===================================================================
# CUSTOM CLUSTERS
#===================================================================
# Parse the file listing Custom clusters
input_file = PropertyManager.get_instance().get_property( DataConstants.CUSTOM_CLUSTER_DEFINITION_PROPERTY, True)
self.launch_insertion_TSV( input_file, False, DataConstants.CUSTOM_CLUSTER_HEADERS,
DataConstants.CUSTOM_CLUSTER_CLASS, DataConstants.CUSTOM_CLUSTER_PARAMS,
None, DataConstants.CUSTOM_CLUSTER_COMMENT_CHAR )
# Parse the file with Custom annotations
input_file = PropertyManager.get_instance().get_property( DataConstants.CUSTOM_ANNOTATION_PROPERTY, True)
self.launch_insertion_TSV( input_file, False, DataConstants.CUSTOM_ANNOTATION_HEADERS,
DataConstants.CUSTOM_ANNOTATION_CLASS, DataConstants.CUSTOM_ANNOTATION_PARAMS,
None, DataConstants.CUSTOM_ANNOTATION_COMMENT_CHAR )
#===================================================================
# INTERACTOME
#===================================================================
# Parse the protein interaction file
input_file = PropertyManager.get_instance().get_property( DataConstants.INTERACTOME_DEFINITION_PROPERTY, True)
self.launch_insertion_TSV( input_file, False, DataConstants.INTERACTOME_HEADER, DataConstants.INTERACTOME_CLASS, DataConstants.INTERACTOME_PARAMS, None, DataConstants.INTERACTOME_COMMENT_CHAR )
# Parse the protein interaction network file
input_file = PropertyManager.get_instance().get_property( DataConstants.INTERACTOME_NETWORK_DEFINITION_PROPERTY, True)
ppi_default_values = [None, None, os.path.basename( input_file)]
self.launch_insertion_TSV( input_file, False, DataConstants.INTERACTOME_NETWORK_HEADER, DataConstants.INTERACTOME_NETWORK_CLASS, DataConstants.INTERACTOME_NETWORK_PARAMS, ppi_default_values, DataConstants.INTERACTOME_NETWORK_COMMENT_CHAR )
# Parse the Network Module file
input_file = PropertyManager.get_instance().get_property( DataConstants.INTERACTOME_NETWORK_PARTITION_DEFINITION_PROPERTY, True)
self.launch_insertion_NetworkModule( input_file, DataConstants.INTERACTOME_NETWORK_PARTITION_CLASS, DataConstants.INTERACTOME_NETWORK_PARTITION_CLASS_TAG, DataConstants.INTERACTOME_NETWORK_PARTITION_COMMENT_CHAR )
# Parse the Network Module Annotation file
input_file = PropertyManager.get_instance().get_property( DataConstants.INTERACTOME_NETWORK_PARTITION_ANNOTATION_PROPERTY, True)
self.launch_insertion_NetworkModuleAnnotation( input_file, DataConstants.INTERACTOME_NETWORK_PARTITION_ANNOTATION_CLASS, DataConstants.INTERACTOME_NETWORK_PARTITION_ANNOTATION_CLASS_TAG, DataConstants.INTERACTOME_NETWORK_PARTITION_ANNOTATION_CLASS_REGEX, DataConstants.INTERACTOME_NETWORK_PARTITION_ANNOTATION_PROTEIN_TAG, DataConstants.INTERACTOME_NETWORK_PARTITION_ANNOTATION_ANNOTATION_TAG, DataConstants.INTERACTOME_NETWORK_PARTITION_COMMENT_CHAR )
# Parse the protein redundancy file
input_file = PropertyManager.get_instance().get_property( DataConstants.INTERACTOME_NETWORK_REDUNDANCY_DEFINITION_PROPERTY, True)
interactome_network_redundancy_definition_value = [None, basename( input_file), None]
self.launch_insertion_TSV( input_file, False, DataConstants.INTERACTOME_NETWORK_REDUNDANCY_DEFINITION_HEADERS, DataConstants.INTERACTOME_NETWORK_REDUNDANCY_DEFINITION_CLASS, DataConstants.INTERACTOME_NETWORK_REDUNDANCY_DEFINITION_PARAMS, interactome_network_redundancy_definition_value, DataConstants.INTERACTOME_NETWORK_REDUNDANCY_DEFINITION_COMMENT_CHAR, "Redundancy", False )
#===================================================================
# RNA DEFINITION
#===================================================================
# Make query of specific type of protein cross references to speed up insertion
DataManager.get_instance().perform_query( DataConstants.PROTEIN_ENSP_XREF_KW,
"query( ProteinCrossReference.protein_id,ProteinCrossReference.crossReferenceID ).filter(ProteinCrossReference.sourceDB == DataConstants.PROTEIN_ENSP_XREF_DB).all()")
# Convert query into a dictionary
DataManager.get_instance().query_to_dict( DataConstants.PROTEIN_ENSP_XREF_KW, 1, 0)
# Parse the RNA file
input_file = PropertyManager.get_instance().get_property( DataConstants.RNA_DEFINITION_PROPERTY, True)
self.launch_insertion_TSV( input_file, True, DataConstants.RNA_HEADERS, DataConstants.RNA_CLASS, DataConstants.RNA_PARAMS, None, DataConstants.RNA_COMMENT_CHAR )
# Parse the RNA cross references file
input_file = PropertyManager.get_instance().get_property( DataConstants.RNA_CROSS_REFERENCE_PROPERTY, True)
self.launch_insertion_TSV( input_file, False, DataConstants.RNA_CROSS_REFERENCE_HEADERS, DataConstants.RNA_CROSS_REFERENCE_CLASS, DataConstants.RNA_CROSS_REFERENCE_PARAMS, None, DataConstants.RNA_CROSS_REFERENCE_COMMENT_CHAR )
#===================================================================
# RNA TISSUE EXPRESSION
#===================================================================
# Make query of all RNA IDs to speed up insertion
DataManager.get_instance().perform_query( DataConstants.RNA_ALL_KW, "query( RNA ).all()")
# Format query into dict data structure
DataManager.get_instance().query_to_object_dict( DataConstants.RNA_ALL_KW, "transcriptID")
# Make query of all Protein IDs (uniprotAC) to speed up insertion
DataManager.get_instance().perform_query( DataConstants.PROT_ALL_KW, "query( Protein ).all()")
# Format query into dict data structure
DataManager.get_instance().query_to_object_dict( DataConstants.PROT_ALL_KW, "uniprotAC")
# Parse the RNA tissue expression file
input_file = PropertyManager.get_instance().get_property( DataConstants.RNA_TISSUE_EXPRESSION_PROPERTY, True)
self.launch_insertion_TSV( input_file, True, DataConstants.RNA_TISSUE_EXPRESSION_HEADERS, DataConstants.RNA_TISSUE_EXPRESSION_CLASS,
DataConstants.RNA_TISSUE_EXPRESSION_PARAMS, DataConstants.RNA_TISSUE_EXPRESSION_VALUE,
DataConstants.RNA_TISSUE_EXPRESSION_COMMENT_CHAR )
#===================================================================
# PROTEIN RNA INTERACTION
#===================================================================
self.forceOverride = 1
# Parse the file listing RNA with catRAPID data
input_file = PropertyManager.get_instance().get_property( DataConstants.INTERACTING_RNA_DEFINITION_PROPERTY, True)
self.launch_insertion_TSV( input_file, True, DataConstants.INTERACTING_RNA_DEFINITION_HEADERS,
DataConstants.INTERACTING_RNA_DEFINITION_CLASS, DataConstants.INTERACTING_RNA_DEFINITION_PARAMS,
None, DataConstants.INTERACTING_RNA_DEFINITION_COMMENT_CHAR )
# Parse the file listing Proteins with catRAPID data
input_file = PropertyManager.get_instance().get_property( DataConstants.INTERACTING_PROTEIN_DEFINITION_PROPERTY, True)
self.launch_insertion_TSV( input_file, True, DataConstants.INTERACTING_PROTEIN_DEFINITION_HEADERS,
DataConstants.INTERACTING_PROTEIN_DEFINITION_CLASS, DataConstants.INTERACTING_PROTEIN_DEFINITION_PARAMS,
None, DataConstants.INTERACTING_PROTEIN_DEFINITION_COMMENT_CHAR )
# Initialize data items to store missing interactions
if DataConstants.PROTEIN_RNA_INTERACTION_CATRAPID_MISSING_RNA_KW not in DataManager.get_instance().data:
DataManager.get_instance().store_data(DataConstants.PROTEIN_RNA_INTERACTION_CATRAPID_MISSING_RNA_KW,[])
if DataConstants.PROTEIN_RNA_INTERACTION_CATRAPID_MISSING_PROT_KW not in DataManager.get_instance().data:
DataManager.get_instance().store_data(DataConstants.PROTEIN_RNA_INTERACTION_CATRAPID_MISSING_PROT_KW,[])
# Parse the ProteinRNAInteractionCatRAPID file
input_file = PropertyManager.get_instance().get_property( DataConstants.PROTEIN_RNA_INTERACTION_CATRAPID_DEFINITION_PROPERTY, True)
self.launch_insertion_TSV( input_file, False, DataConstants.PROTEIN_RNA_INTERACTION_CATRAPID_HEADERS,
DataConstants.PROTEIN_RNA_INTERACTION_CATRAPID_CLASS, DataConstants.PROTEIN_RNA_INTERACTION_CATRAPID_PARAMS,
None, DataConstants.PROTEIN_RNA_INTERACTION_CATRAPID_COMMENT_CHAR )
self.forceOverride = 0
# Remove data that will no longer be used to reduce memory usage
DataManager.get_instance().delete_data(DataConstants.PROTEIN_ENSP_XREF_KW)
DataManager.get_instance().delete_data(DataConstants.RNA_ALL_KW)
DataManager.get_instance().delete_data(DataConstants.PROT_ALL_KW)
except RainetException as re:
Logger.get_instance().error( re.to_string() )
Timer.get_instance().stop_chrono( "ERROR : Data insertion FAILED" )
return
# # Report on potential missing data
# self.check_missing_data()
# Stop the chrono
Timer.get_instance().stop_chrono( "Data insertion finished" )
# #
# Insert data linked to a TSV (tab separated) file
#
# @param file_path : string - The path to the data file
# @param has_headers : boolean - Indicates if the file has a header line as first line
# @param headers : list<string> - the list of headers name
# @param class_name : string - The | |
Criteria:** Includes only relevant concepts associated with diagnoses that represent frailty. This is a grouping of ICD10CM and SNOMEDCT codes.
**Exclusion Criteria:** No exclusions.
"""
OID = '2.16.840.1.113883.3.464.1003.113.12.1074'
VALUE_SET_NAME = 'Frailty Diagnosis'
EXPANSION_VERSION = 'eCQM Update 2020-05-07'
ICD10CM = {
'L89119',
'L89139',
'L89149',
'L89159',
'L89209',
'L89309',
'L89899',
'L8990',
'M6250',
'M6281',
'M6284',
'W010XXA',
'W010XXD',
'W010XXS',
'W0110XA',
'W0110XD',
'W0110XS',
'W01110A',
'W01110D',
'W01110S',
'W01111A',
'W01111D',
'W01111S',
'W01118A',
'W01118D',
'W01118S',
'W01119A',
'W01119D',
'W01119S',
'W01190A',
'W01190D',
'W01190S',
'W01198A',
'W01198D',
'W01198S',
'W06XXXA',
'W06XXXD',
'W06XXXS',
'W07XXXA',
'W07XXXD',
'W07XXXS',
'W08XXXA',
'W08XXXD',
'W08XXXS',
'W100XXA',
'W100XXD',
'W100XXS',
'W101XXA',
'W101XXD',
'W101XXS',
'W102XXA',
'W102XXD',
'W102XXS',
'W108XXA',
'W108XXD',
'W108XXS',
'W109XXA',
'W109XXD',
'W109XXS',
'W1800XA',
'W1800XD',
'W1800XS',
'W1802XA',
'W1802XD',
'W1802XS',
'W1809XA',
'W1809XD',
'W1809XS',
'W1811XA',
'W1811XD',
'W1811XS',
'W1812XA',
'W1812XD',
'W1812XS',
'W182XXA',
'W182XXD',
'W182XXS',
'W1830XA',
'W1830XD',
'W1830XS',
'W1831XA',
'W1831XD',
'W1831XS',
'W1839XA',
'W1839XD',
'W1839XS',
'W19XXXA',
'W19XXXD',
'W19XXXS',
'Y92199',
'Z593',
'Z736',
'Z7401',
'Z7409',
'Z741',
'Z742',
'Z743',
'Z748',
'Z749',
'Z9181',
'Z9911',
'Z993',
'Z9981',
'Z9989'
}
SNOMEDCT = {
'10637031000119106',
'10637071000119109',
'10637111000119102',
'10637151000119101',
'129588001',
'138371000119104',
'162845004',
'17886000',
'20902002',
'217082002',
'217083007',
'217084001',
'217086004',
'217088003',
'217090002',
'217092005',
'217093000',
'217094006',
'217142006',
'217154006',
'217155007',
'217156008',
'217157004',
'217158009',
'217173005',
'225558004',
'225562005',
'225563000',
'242388006',
'242389003',
'242390007',
'242391006',
'242392004',
'242393009',
'242394003',
'242395002',
'242396001',
'242402009',
'242404005',
'242405006',
'242406007',
'242407003',
'242413007',
'242414001',
'242419006',
'250054005',
'269699007',
'274918000',
'274919008',
'288296009',
'33036003',
'40104005',
'414188008',
'414189000',
'414190009',
'427849003',
'428484005',
'429621003',
'44188002',
'54840006',
'56307009',
'67223001',
'699214007',
'699215008',
'699216009',
'699218005',
'715504003',
'736313002',
'74541001',
'83468000',
'8960001000004106',
'90619006',
'92341000119107'
}
class GenitalHerpes(ValueSet):
"""
**Clinical Focus:** This value set contains concepts that represent genital herpes infections.
**Data Element Scope:** This value set may use the Quality Data Model (QDM) category related to Diagnosis.
**Inclusion Criteria:** Includes only relevant concepts associated with female genital herpes infections. This is a grouping of ICD-10-CM and SNOMED CT codes.
**Exclusion Criteria:** Excludes codes that indicate male genital herpes infections.
"""
OID = '2.16.840.1.113883.3.464.1003.110.12.1049'
VALUE_SET_NAME = '<NAME>'
EXPANSION_VERSION = 'eCQM Update 2020-05-07'
ICD10CM = {
'A6000',
'A6003',
'A6004',
'A6009'
}
SNOMEDCT = {
'129670002',
'27420004',
'278068003',
'33839006',
'402888002',
'402890001',
'402894005',
'402896007',
'423391007',
'427578006',
'439912007',
'439913002',
'440714005',
'59819007'
}
class Glaucoma(ValueSet):
"""
**Clinical Focus:** This value set contains concepts that represent a diagnosis of glaucoma.
**Data Element Scope:** This value set may use the Quality Data Model (QDM) category related to Diagnosis.
**Inclusion Criteria:** Includes only relevant concepts associated with a diagnosis of glaucoma.
**Exclusion Criteria:** Excludes concepts that pertain to 'unspecified eye.'
"""
OID = '2.16.840.1.113883.3.526.3.1423'
VALUE_SET_NAME = 'Glaucoma'
EXPANSION_VERSION = 'eCQM Update 2020-05-07'
ICD10CM = {
'H4010X0',
'H4010X1',
'H4010X2',
'H4010X3',
'H4010X4',
'H401110',
'H401111',
'H401112',
'H401113',
'H401114',
'H401120',
'H401121',
'H401122',
'H401123',
'H401124',
'H401130',
'H401131',
'H401132',
'H401133',
'H401134',
'H401210',
'H401211',
'H401212',
'H401213',
'H401214',
'H401220',
'H401221',
'H401222',
'H401223',
'H401224',
'H401230',
'H401231',
'H401232',
'H401233',
'H401234',
'H401310',
'H401311',
'H401312',
'H401313',
'H401314',
'H401320',
'H401321',
'H401322',
'H401323',
'H401324',
'H401330',
'H401331',
'H401332',
'H401333',
'H401334',
'H401410',
'H401411',
'H401412',
'H401413',
'H401414',
'H401420',
'H401421',
'H401422',
'H401423',
'H401424',
'H401430',
'H401431',
'H401432',
'H401433',
'H401434',
'H40151',
'H40152',
'H40153',
'H4020X0',
'H4020X1',
'H4020X2',
'H4020X3',
'H4020X4',
'H40211',
'H40212',
'H40213',
'H402210',
'H402211',
'H402212',
'H402213',
'H402214',
'H402220',
'H402221',
'H402222',
'H402223',
'H402224',
'H402230',
'H402231',
'H402232',
'H402233',
'H402234',
'H40231',
'H40232',
'H40233',
'H40241',
'H40242',
'H40243',
'H4031X0',
'H4031X1',
'H4031X2',
'H4031X3',
'H4031X4',
'H4032X0',
'H4032X1',
'H4032X2',
'H4032X3',
'H4032X4',
'H4033X0',
'H4033X1',
'H4033X2',
'H4033X3',
'H4033X4',
'H4041X0',
'H4041X1',
'H4041X2',
'H4041X3',
'H4041X4',
'H4042X0',
'H4042X1',
'H4042X2',
'H4042X3',
'H4042X4',
'H4043X0',
'H4043X1',
'H4043X2',
'H4043X3',
'H4043X4',
'H4051X0',
'H4051X1',
'H4051X2',
'H4051X3',
'H4051X4',
'H4052X0',
'H4052X1',
'H4052X2',
'H4052X3',
'H4052X4',
'H4053X0',
'H4053X1',
'H4053X2',
'H4053X3',
'H4053X4',
'H4061X0',
'H4061X1',
'H4061X2',
'H4061X3',
'H4061X4',
'H4062X0',
'H4062X1',
'H4062X2',
'H4062X3',
'H4062X4',
'H4063X0',
'H4063X1',
'H4063X2',
'H4063X3',
'H4063X4',
'H40811',
'H40812',
'H40813',
'H40821',
'H40822',
'H40823',
'H40831',
'H40832',
'H40833',
'H4089',
'Q150'
}
SNOMEDCT = {
'10100008',
'111513000',
'111514006',
'12239301000119102',
'12239421000119101',
'12239461000119106',
'12239501000119106',
'15374009',
'15633281000119103',
'15633321000119108',
'15640441000119104',
'15673001000119103',
'15673081000119106',
'15736441000119108',
'15736481000119103',
'15736521000119103',
'15736561000119108',
'15736601000119108',
'15736641000119105',
'15736681000119100',
'15736721000119106',
'15736761000119101',
'15738841000119105',
'15738881000119100',
'15738921000119107',
'15739041000119106',
'15739081000119101',
'15739121000119104',
'15739161000119109',
'15739201000119104',
'15739241000119102',
'15739281000119107',
'15739321000119102',
'15739361000119107',
'15739401000119103',
'15739441000119101',
'15739481000119106',
'15739561000119101',
'15739641000119104',
'15739681000119109',
'15739721000119103',
'15739761000119108',
'15993671000119108',
'15993711000119107',
'15993751000119108',
'15996831000119101',
'1654001',
'19144002',
'19309007',
'193546005',
'193548006',
'193549003',
'193552006',
'193553001',
'193555008',
'193561006',
'193562004',
'204113001',
'206248004',
'21571006',
'21928008',
'232081005',
'232082003',
'232083008',
'232086000',
'232087009',
'232088004',
'232090003',
'23986001',
'24151000119106',
'267625001',
'275477002',
'27735002',
'29369005',
'30041005',
'314017009',
'314033007',
'314784002',
'32893002',
'33647009',
'336611000119109',
'336631000119104',
'342221000119104',
'342241000119105',
'34623005',
'347381000119106',
'347401000119106',
'367360002',
'37155002',
'392030001',
'392288006',
'392291006',
'392300000',
'392352004',
'404634005',
'404648005',
'415176004',
'41911000119107',
'444863008',
'45623002',
'46168003',
'50485007',
'53667005',
'55129006',
'60981000119103',
'65460003',
'66725002',
'66747002',
'66990007',
'68241007',
'698840003',
'713457002',
'715144004',
'716166002',
'721898008',
'722321001',
'722329004',
'77075001',
'787051000',
'787052007',
'84333006',
'84494001',
'89215000',
'92829008',
'95213001',
'95250000',
'95717004'
}
class GlaucomaAssociatedWithCongenitalAnomaliesDystrophiesAndSystemicSyndromes(ValueSet):
"""
**Clinical Focus:** This value set contains concepts that represent a diagnosis of glaucoma that is associated with congenital anomalies, dystrophies, and systemic syndromes.
**Data Element Scope:** This value set may use the Quality Data Model (QDM) category related to Diagnosis.
**Inclusion Criteria:** Includes only relevant concepts associated with a diagnosis of glaucoma that may be due to congenital anomalies, dystrophies, or other systemic syndromes, including trauma, or a secondary diagnosis.
**Exclusion Criteria:** Excludes concepts that pertain to 'unspecified eye.'
"""
OID = '2.16.840.1.113883.3.526.3.1461'
VALUE_SET_NAME = 'Glaucoma Associated with Congenital Anomalies, Dystrophies, and Systemic Syndromes'
EXPANSION_VERSION = 'eCQM Update 2020-05-07'
ICD10CM = {
'H4031X0',
'H4031X1',
'H4031X2',
'H4031X3',
'H4031X4',
'H4032X0',
'H4032X1',
'H4032X2',
'H4032X3',
'H4032X4',
'H4033X0',
'H4033X1',
'H4033X2',
'H4033X3',
'H4033X4',
'H4041X0',
'H4041X1',
'H4041X2',
'H4041X3',
'H4041X4',
'H4042X0',
'H4042X1',
'H4042X2',
'H4042X3',
'H4042X4',
'H4043X0',
'H4043X1',
'H4043X2',
'H4043X3',
'H4043X4',
'H4051X0',
'H4051X1',
'H4051X2',
'H4051X3',
'H4051X4',
'H4052X0',
'H4052X1',
'H4052X2',
'H4052X3',
'H4052X4',
'H4053X0',
'H4053X1',
'H4053X2',
'H4053X3',
'H4053X4',
'H40811',
'H40812',
'H40813',
'H40821',
'H40822',
'H40823',
'H40831',
'H40832',
'H40833',
'H4089',
'H409',
'H42'
}
SNOMEDCT = {
'15736561000119108',
'15736601000119108',
'15736641000119105',
'15739041000119106',
'15739081000119101',
'15739121000119104',
'15739161000119109',
'15739321000119102',
'15739361000119107',
'15739401000119103',
'15739441000119101',
'15739481000119106',
'15739561000119101',
'15739681000119109',
'15739721000119103',
'15739761000119108',
'19309007',
'193552006',
'193553001',
'193555008',
'193556009',
'193561006',
'206248004',
'232083008',
'232086000',
'29538005',
'314033007',
'32893002',
'37155002',
'392300000',
'392352004',
'404634005',
'404648005',
'418435001',
'45623002',
'46168003',
'66725002',
'66747002',
'68241007',
'84333006',
'95717004'
}
class GlomerulonephritisAndNephroticSyndrome(ValueSet):
"""
**Clinical Focus:** This value set contains concepts that represent a diagnosis of glomerulonephritis and nephrotic syndrome.
**Data Element Scope:** This value set may use the Quality Data Model (QDM) category related to Diagnosis.
**Inclusion Criteria:** Includes only relevant concepts associated with identifying patients who are diagnosed with glomerulonephritis and nephrotic syndrome.
**Exclusion Criteria:** No exclusions.
"""
OID = '2.16.840.1.113883.3.464.1003.109.12.1018'
VALUE_SET_NAME = 'Glomerulonephritis and Nephrotic Syndrome'
EXPANSION_VERSION = 'eCQM Update 2020-05-07'
ICD10CM = {
'N000',
'N001',
'N002',
'N003',
'N004',
'N005',
'N006',
'N007',
'N008',
'N009',
'N010',
'N011',
'N012',
'N013',
'N014',
'N015',
'N016',
'N017',
'N018',
'N019',
'N020',
'N021',
'N022',
'N023',
'N024',
'N025',
'N026',
'N027',
'N028',
'N029',
'N030',
'N031',
'N032',
'N033',
'N034',
'N035',
'N036',
'N037',
'N038',
'N039',
'N040',
'N041',
'N042',
'N043',
'N044',
'N045',
'N046',
'N047',
'N048',
'N049',
'N050',
'N051',
'N052',
'N053',
'N054',
'N055',
'N056',
'N057',
'N058',
'N059',
'N060',
'N061',
'N062',
'N063',
'N064',
'N065',
'N066',
'N067',
'N068',
'N069',
'N070',
'N071',
'N072',
'N073',
'N074',
'N075',
'N076',
'N077',
'N078',
'N079',
'N08',
'N140',
'N141',
'N142',
'N143',
'N144',
'N250',
'N251',
'N2581',
'N2589',
'N259',
'N261',
'N262',
'N269',
'Q600',
'Q601',
'Q602',
'Q603',
'Q604',
'Q605',
'Q606',
'Q6100',
'Q6101',
'Q6102',
'Q6111',
'Q6119',
'Q612',
'Q613',
'Q614',
'Q615',
'Q618',
'Q619'
}
ICD9CM = {
'5804',
'58081',
'58089',
'5809',
'5810',
'5811',
'5812',
'5813',
'58181',
'58189',
'5819',
'5820',
'5821',
'5822',
'5824',
'58281',
'58289',
'5829',
'5830',
'5831',
'5832',
'5834',
'5836',
'5837',
'58381',
'58389',
'5839',
'5880',
'5881',
'58881',
'58889',
'5889'
}
SNOMEDCT = {
'101711000119105',
'110996009',
'111395007',
'118951003',
'123609007',
'123610002',
'123752003',
'12491000132101',
'12511000132108',
'13335004',
'1426004',
'16726004',
'1776003',
'19351000',
'194909006',
'195353004',
'197579006',
'197580009',
'197582001',
'197584000',
'197585004',
'197589005',
'197590001',
'197591002',
'197593004',
'197594005',
'197595006',
'197596007',
'197597003',
'197598008',
'197599000',
'197600002',
'197601003',
'197603000',
'197605007',
'197607004',
'197613008',
'197616000',
'197617009',
'197618004',
'197619007',
'197626007',
'197628008',
'197629000',
'197632002',
'197661001',
'197663003',
'197664009',
'197671004',
'197679002',
'197681000',
'197682007',
'197683002',
'197684008',
'197685009',
'197686005',
'197687001',
'197688006',
'197689003',
'197690007',
'197691006',
'197692004',
'197693009',
'197694003',
'197695002',
'197696001',
'197697005',
'197707007',
'197708002',
'197709005',
'197712008',
'197713003',
'197714009',
'197715005',
'197716006',
'197717002',
'197719004',
'197720005',
'197721009',
'197722002',
'197723007',
'197724001',
'197725000',
'197738008',
'197739000',
'197769007',
'20917003',
'22702000',
'232369001',
'232460001',
'234485006',
'235000001',
'236380004',
'236381000',
'236382007',
'236383002',
'236384008',
'236385009',
'236392004',
'236393009',
'236394003',
'236395002',
'236397005',
'236398000',
'236399008',
'236400001',
'236401002',
'236402009',
'236403004',
'236404005',
'236405006',
'236407003',
'236409000',
'236410005',
'236411009',
'236412002',
'236413007',
'236414001',
'236415000',
'236416004',
'236417008',
'236418003',
'236419006',
'236422008',
'236460004',
'236461000',
'236463002',
'236479001',
'236480003',
'236508005',
'236534002',
'236535001',
'236587002',
'236588007',
'236589004',
'236590008',
'239932005',
'239935007',
'239936008',
'24790002',
'253864004',
'25821008',
'266549004',
'276585000',
'282364005',
'309426007',
'35546006',
'359694003',
'36171008',
'363234001',
'3704008',
'370494002',
'37085009',
'38046004',
'399190000',
| |
lagged_regressor_df],
axis=1,
sort=False)
if value_col in features_df_fut.columns:
# This is to remove duplicate ``value_col`` generated by building features.
# The duplicates happen during calculating extended fitted values
# when we intentionally include ``value_col``.
del features_df_fut[value_col]
features_df_fut[value_col] = 0.0
if trained_model["uncertainty_dict"] is None:
# predictions are stored to ``value_col``
pred_res = predict_ml(
fut_df=features_df_fut,
trained_model=trained_model)
fut_df = pred_res["fut_df"]
x_mat = pred_res["x_mat"]
else:
# predictions are stored to ``value_col``
# quantiles are stored to ``f"{value_col}_quantile_summary"``
pred_res = predict_ml_with_uncertainty(
fut_df=features_df_fut,
trained_model=trained_model)
fut_df = pred_res["fut_df"]
x_mat = pred_res["x_mat"]
# Makes sure to return only necessary columns
potential_forecast_cols = [time_col, value_col, f"{value_col}_quantile_summary", ERR_STD_COL]
existing_forecast_cols = [col for col in potential_forecast_cols if col in fut_df.columns]
fut_df = fut_df[existing_forecast_cols]
return {
"fut_df": fut_df,
"x_mat": x_mat}
def predict_n_no_sim(
self,
fut_time_num,
trained_model,
freq,
new_external_regressor_df=None,
time_features_ready=False,
regressors_ready=False):
"""This is the forecast function which can be used to forecast.
It accepts extra regressors (``extra_pred_cols``) originally in
``df`` via ``new_external_regressor_df``.
Parameters
----------
fut_time_num : `int`
number of needed future values
trained_model : `dict`
A fitted silverkite model which is the output of ``self.forecast``
freq : `str`
Frequency of future predictions.
Accepts any valid frequency for ``pd.date_range``.
new_external_regressor_df : `pandas.DataFrame` or None
Contains the extra regressors if specified.
time_features_ready : `bool`
Boolean to denote if time features are already given in df or not.
regressors_ready : `bool`
Boolean to denote if regressors are already added to data (``fut_df``).
Returns
-------
result: `dict`
A dictionary with following items
- "fut_df": `pandas.DataFrame`
The same as input dataframe with an added column for the response.
If value_col already appears in ``fut_df``, it will be over-written.
If ``uncertainty_dict`` is provided as input,
it will also contain a ``{value_col}_quantile_summary`` column.
- "x_mat": `pandas.DataFrame`
Design matrix of the predictive machine-learning model
"""
# creates the future time grid
dates = pd.date_range(
start=trained_model["last_date_for_fit"],
periods=fut_time_num + 1,
freq=freq)
dates = dates[dates > trained_model["last_date_for_fit"]] # drops values up to last_date_for_fit
fut_df = pd.DataFrame({trained_model["time_col"]: dates.tolist()})
return self.predict_no_sim(
fut_df=fut_df,
trained_model=trained_model,
past_df=trained_model["df"].copy(), # observed data used for training the model
new_external_regressor_df=new_external_regressor_df,
time_features_ready=time_features_ready,
regressors_ready=regressors_ready)
def simulate(
self,
fut_df,
trained_model,
past_df=None,
new_external_regressor_df=None,
include_err=True,
time_features_ready=False,
regressors_ready=False):
"""A function to simulate future series.
If the fitted model supports uncertainty e.g. via ``uncertainty_dict``,
errors are incorporated into the simulations.
Parameters
----------
fut_df : `pandas.DataFrame`
The data frame which includes the timestamps
for prediction and any regressors.
trained_model : `dict`
A fitted silverkite model which is the output of ``self.forecast``.
past_df : `pandas.DataFrame`, optional
A data frame with past values if autoregressive methods are called
via ``autoreg_dict`` parameter of ``greykite.algo.forecast.silverkite.SilverkiteForecast.py``
new_external_regressor_df: `pandas.DataFrame`, optional
Contains the regressors not already included in ``fut_df``.
include_err : `bool`
Boolean to determine if errors are to be incorporated in the simulations.
time_features_ready : `bool`
Boolean to denote if time features are already given in df or not.
regressors_ready : `bool`
Boolean to denote if regressors are already added to data (``fut_df``).
Returns
-------
result: `dict`
A dictionary with following items
- "fut_df": `pandas.DataFrame`
The same as input dataframe with an added column for the response.
If value_col already appears in ``fut_df``, it will be over-written.
If ``uncertainty_dict`` is provided as input,
it will also contain a ``{value_col}_quantile_summary`` column.
Here are the expected columns:
(1) A time column with the column name being ``trained_model["time_col"]``
(2) The predicted response in ``value_col`` column.
(3) Quantile summary response in ``f"{value_col}_quantile_summary`` column.
This column only appears if the model includes uncertainty.
(4) Error std in `ERR_STD_COL` column.
This column only appears if the model includes uncertainty.
- "x_mat": `pandas.DataFrame`
Design matrix of the predictive machine-learning model
"""
n = fut_df.shape[0]
past_df_sim = None if past_df is None else past_df.copy()
fut_df = fut_df.copy()
fut_df_sim = fut_df.copy()
time_col = trained_model["time_col"]
value_col = trained_model["value_col"]
fut_df_sim[value_col] = np.nan
fut_df_sim = fut_df_sim.astype({value_col: "float64"})
# adds the other features
if time_features_ready is not True:
fut_df = self.__build_silverkite_features(
df=fut_df,
time_col=time_col,
origin_for_time_vars=trained_model["origin_for_time_vars"],
daily_event_df_dict=trained_model["daily_event_df_dict"],
changepoint_values=trained_model["changepoint_values"],
continuous_time_col=trained_model["continuous_time_col"],
growth_func=trained_model["growth_func"],
fs_func=trained_model["fs_func"],
seasonality_changepoint_result=trained_model["seasonality_changepoint_result"],
changepoint_dates=trained_model["trend_changepoint_dates"])
if new_external_regressor_df is not None and not regressors_ready:
new_external_regressor_df = new_external_regressor_df.reset_index(
drop=True)
fut_df = pd.concat(
[fut_df, new_external_regressor_df],
axis=1,
sort=False)
x_mat_list = []
for i in range(n):
fut_df_sub = fut_df.iloc[[i]].reset_index(drop=True)
pred_res = self.predict_no_sim(
fut_df=fut_df_sub,
trained_model=trained_model,
past_df=past_df_sim,
new_external_regressor_df=None,
time_features_ready=True,
regressors_ready=True)
fut_df_sub = pred_res["fut_df"]
x_mat = pred_res["x_mat"]
x_mat_list.append(x_mat)
fut_df_sim.at[i, value_col] = fut_df_sub[value_col].values[0]
if include_err:
if ERR_STD_COL in list(fut_df_sub.columns):
scale = fut_df_sub[ERR_STD_COL].values[0]
err = np.random.normal(
loc=0.0,
scale=scale)
fut_df_sim.at[i, value_col] = (
fut_df_sub[value_col].values[0]
+ err)
else:
raise ValueError(
"Error is requested via ``include_err = True``. "
f"However the std column ({ERR_STD_COL}) "
"does not appear in the prediction")
past_df_increment = fut_df_sim[[value_col]]
if past_df_sim is None:
past_df_sim = past_df_increment
else:
past_df_sim = pd.concat(
[past_df_sim, past_df_increment],
axis=0,
sort=False)
x_mat = pd.concat(
x_mat_list,
axis=0,
ignore_index=True, # The index does not matter as we simply want to stack up the data
sort=False)
return {
"sim_df": fut_df_sim[[time_col, value_col]],
"x_mat": x_mat}
def simulate_multi(
self,
fut_df,
trained_model,
simulation_num=10,
past_df=None,
new_external_regressor_df=None,
include_err=None):
"""A function to simulate future series.
If the fitted model supports uncertainty e.g. via ``uncertainty_dict``,
errors are incorporated into the simulations.
Parameters
----------
fut_df : `pandas.DataFrame`
The data frame which includes the timestamps
for prediction and any regressors.
trained_model : `dict`
A fitted silverkite model which is the output of ``self.forecast``.
simulation_num : `int`
The number of simulated series,
(each of which have the same number of rows as ``fut_df``)
to be stacked up row-wise.
past_df : `pandas.DataFrame`, optional
A data frame with past values if autoregressive methods are called
via ``autoreg_dict`` parameter of ``greykite.algo.forecast.silverkite.SilverkiteForecast.py``.
new_external_regressor_df: `pandas.DataFrame`, optional
Contains the regressors not already included in ``fut_df``.
include_err : `bool`, optional, default None
Boolean to determine if errors are to be incorporated in the simulations.
If None, it will be set to True if uncertainty is passed to the model and
otherwise will be set to False.
Returns
-------
result: `dict`
A dictionary with follwing items
- "fut_df_sim" : `pandas.DataFrame`
Row-wise concatenation of dataframes each being the same as
input dataframe (``fut_df``) with an added column for the response
and a new column: "sim_label" to differentiate various simulations.
The row number of the returned dataframe is:
``simulation_num`` times the row number of ``fut_df``.
If ``value_col`` already appears in ``fut_df``, it will be over-written.
- "x_mat": `pandas.DataFrame`
``simulation_num`` copies of design matrix of the predictive machine-learning model
concatenated. An extra index column ("original_row_index") is also added
for aggregation when needed.
Note that the all copies will be the same except for the case where
auto-regression is utilized.
"""
if include_err is None:
include_err = trained_model["uncertainty_dict"] is not None
if trained_model["uncertainty_dict"] is None and include_err:
raise ValueError(
"`include_err=True` was passed. "
"However model does not support uncertainty. "
"To support uncertainty pass `uncertainty_dict` to the model.")
value_col = trained_model["value_col"]
fut_df = fut_df.reset_index(drop=True) # reset_index returns a copy
fut_df = self.__build_silverkite_features(
df=fut_df,
time_col=trained_model["time_col"],
origin_for_time_vars=trained_model["origin_for_time_vars"],
daily_event_df_dict=trained_model["daily_event_df_dict"],
changepoint_values=trained_model["changepoint_values"],
continuous_time_col=trained_model["continuous_time_col"],
growth_func=trained_model["growth_func"],
fs_func=trained_model["fs_func"],
seasonality_changepoint_result=trained_model["seasonality_changepoint_result"],
changepoint_dates=trained_model["trend_changepoint_dates"])
if new_external_regressor_df is not None:
new_external_regressor_df = new_external_regressor_df.reset_index(
drop=True)
fut_df = pd.concat(
[fut_df, new_external_regressor_df],
axis=1)
def one_sim_func(label):
"""Creates one simulation and labels it with ``label`` in an added
column : "sim_label"
"""
sim_res = self.simulate(
fut_df=fut_df,
trained_model=trained_model,
past_df=past_df,
new_external_regressor_df=None,
include_err=include_err,
time_features_ready=True,
regressors_ready=True)
sim_df = sim_res["sim_df"]
x_mat = sim_res["x_mat"]
sim_df["sim_label"] = label
# ``x_mat`` does not necessarily have an index column
# we keet track of the original index, to be able to aggregate
# across simulations later
x_mat["original_row_index"] = range(len(fut_df))
return {
"sim_df": sim_df,
"x_mat": x_mat}
sim_res_list = [one_sim_func(i) for i in range(simulation_num)]
sim_df_list = [sim_res_list[i]["sim_df"] for i in range(simulation_num)]
x_mat_list = [sim_res_list[i]["x_mat"] for i in range(simulation_num)]
sim_df = pd.concat(
sim_df_list,
axis=0,
ignore_index=True, # The index does not matter as we simply want to stack up the data
sort=False)
sim_df[value_col] = sim_df[value_col].astype(float)
x_mat = pd.concat(
x_mat_list,
axis=0,
ignore_index=True, # The index does not matter as we simply want to stack up the data
sort=False)
sim_df[value_col] = sim_df[value_col].astype(float)
assert len(sim_df) == len(fut_df) * simulation_num
assert len(x_mat) == len(fut_df) * simulation_num
return {
"sim_df": sim_df,
"x_mat": x_mat}
def predict_via_sim(
self,
fut_df,
trained_model,
past_df=None,
new_external_regressor_df=None,
simulation_num=10,
include_err=None):
"""Performs | |
import sys, os, random, scipy
import numpy as np
import pandas as pd
from numba.typed import List
import statsmodels.api as sm
from statsmodels.stats.multitest import multipletests
from tqdm import tqdm
from sklearn.cluster import AgglomerativeClustering
from anndata import AnnData
from .base import lr, calc_neighbours, get_spot_lrs, get_lrs_scores, get_scores
from .merge import merge
from .perm_utils import get_lr_features, get_lr_bg
# Newest method #
def perform_spot_testing(
adata: AnnData,
lr_scores: np.ndarray,
lrs: np.array,
n_pairs: int,
neighbours: List,
het_vals: np.array,
min_expr: float,
adj_method: str = "fdr_bh",
pval_adj_cutoff: float = 0.05,
verbose: bool = True,
save_bg=False,
neg_binom=False,
quantiles=(0.5, 0.75, 0.85, 0.9, 0.95, 0.97, 0.98, 0.99, 0.995, 0.9975, 0.999, 1),
):
"""Calls significant spots by creating random gene pairs with similar
expression to given LR pair; only generate background for spots
which have score for given LR.
"""
quantiles = np.array(quantiles)
lr_genes = np.unique([lr_.split("_") for lr_ in lrs])
genes = np.array([gene for gene in adata.var_names if gene not in lr_genes])
candidate_expr = adata[:, genes].to_df().values
n_genes = round(np.sqrt(n_pairs) * 2)
if len(genes) < n_genes:
print(
"Exiting since need atleast "
f"{n_genes} genes to generate {n_pairs} pairs."
)
return
if n_pairs < 100:
print(
"Exiting since n_pairs<100, need much larger number of pairs to "
"get accurate backgrounds (e.g. 1000)."
)
return
####### Quantiles to select similar gene to LRs to gen. rand-pairs #######
lr_expr = adata[:, lr_genes].to_df()
lr_feats = get_lr_features(adata, lr_expr, lrs, quantiles)
l_quants = lr_feats.loc[
lrs, [col for col in lr_feats.columns if "L_" in col]
].values
r_quants = lr_feats.loc[
lrs, [col for col in lr_feats.columns if "R_" in col]
].values
candidate_quants = np.apply_along_axis(
np.quantile, 0, candidate_expr, q=quantiles, interpolation="nearest"
)
# Ensuring consistent typing to prevent numba errors #
l_quants = l_quants.astype("<f4")
r_quants = r_quants.astype("<f4")
candidate_quants = candidate_quants.astype("<f4")
######## Background per LR, but only for spots where LR has a score ########
# Determine the indices of the spots where each LR has a score #
cols = ["n_spots", "n_spots_sig", "n_spots_sig_pval"]
lr_summary = np.zeros((lr_scores.shape[1], 3), np.int)
pvals = np.ones(lr_scores.shape, dtype=np.float64)
pvals_adj = np.ones(lr_scores.shape, dtype=np.float64)
log10pvals_adj = np.zeros(lr_scores.shape, dtype=np.float64)
lr_sig_scores = lr_scores.copy()
# If we are saving the backgrounds #
if save_bg:
adata.uns["lrs_to_bg"] = {}
adata.uns["lr_spot_indices"] = {}
with tqdm(
total=lr_scores.shape[1],
desc="Generating backgrounds & testing each LR pair...",
bar_format="{l_bar}{bar} [ time left: {remaining} ]",
disable=verbose == False,
) as pbar:
gene_bg_genes = {} # Keep track of genes which can be used to gen. rand-pairs.
spot_lr_indices = [
[] for i in range(lr_scores.shape[0])
] # tracks the lrs tested in a given spot for MHT !!!!
for lr_j in range(lr_scores.shape[1]):
lr_ = lrs[lr_j]
background, spot_indices = get_lr_bg(
adata,
neighbours,
het_vals,
min_expr,
lr_,
lr_scores[:, lr_j],
l_quants[lr_j, :],
r_quants[lr_j, :],
genes,
candidate_quants,
gene_bg_genes,
n_genes,
n_pairs,
)
if save_bg:
adata.uns["lrs_to_bg"][lr_] = background
adata.uns["lr_spot_indices"][lr_] = spot_indices
if not neg_binom: # Calculate empirical p-values per-spot
for spot_i, spot_index in enumerate(spot_indices):
n_greater = len(
np.where(background[spot_i, :] >= lr_scores[spot_index, lr_j])[
0
]
)
n_greater = n_greater if n_greater != 0 else 1 # pseudocount
pvals[spot_index, lr_j] = n_greater / background.shape[1]
spot_lr_indices[spot_index].append(lr_j)
else: # Fitting NB per LR
lr_j_scores = lr_scores[spot_indices, lr_j]
bg_ = background.ravel()
bg_wScore = np.array(list(lr_j_scores) + list(bg_))
##### 1) rounding discretisation
# First multiple to get minimum value to be one before rounding #
bg_1 = bg_wScore * (1 / min(bg_wScore[bg_wScore != 0]))
bg_1 = np.round(bg_1)
lr_j_scores_1 = bg_1[0 : len(lr_j_scores)]
bg_1 = bg_1[len(lr_j_scores) : len(bg_1)]
###### Getting the pvalue from negative binomial approach
round_pvals, _, _, _ = get_stats(
lr_j_scores_1,
bg_1,
len(bg_1),
neg_binom=True,
return_negbinom_params=False,
)
pvals[spot_indices, lr_j] = round_pvals
for spot_index in spot_indices:
spot_lr_indices[spot_index].append(lr_j)
pbar.update(1)
# MHT correction # filling in other stats #
for spot_i in range(lr_scores.shape[0]):
lr_indices = spot_lr_indices[spot_i]
if len(lr_indices) != 0:
pvals_adj[spot_i, lr_indices] = multipletests(
pvals[spot_i, lr_indices], method=adj_method
)[1]
log10pvals_adj[spot_i, :] = -np.log10(pvals_adj[spot_i, :])
# Recording per lr results for this LR #
lrs_in_spot = lr_scores[spot_i] > min_expr
sig_lrs_in_spot = pvals_adj[spot_i, :] < pval_adj_cutoff
sigpval_lrs_in_spot = pvals[spot_i, :] < pval_adj_cutoff
lr_summary[lrs_in_spot, 0] += 1
lr_summary[sig_lrs_in_spot, 1] += 1
lr_summary[sigpval_lrs_in_spot, 2] += 1
lr_sig_scores[spot_i, sig_lrs_in_spot == False] = 0
# Ordering the results according to number of significant spots per LR#
order = np.argsort(-lr_summary[:, 1])
lrs_ordered = lrs[order]
lr_summary = lr_summary[order, :]
lr_summary = pd.DataFrame(lr_summary, index=lrs_ordered, columns=cols)
lr_scores = lr_scores[:, order]
pvals = pvals[:, order]
pvals_adj = pvals_adj[:, order]
log10pvals_adj = log10pvals_adj[:, order]
lr_sig_scores = lr_sig_scores[:, order]
# Saving the results in AnnData #
if verbose:
print("\nStoring results:\n")
adata.uns["lr_summary"] = lr_summary
res_info = ["lr_scores", "p_vals", "p_adjs", "-log10(p_adjs)", "lr_sig_scores"]
mats = [lr_scores, pvals, pvals_adj, log10pvals_adj, lr_sig_scores]
for i, info_name in enumerate(res_info):
adata.obsm[info_name] = mats[i]
if verbose:
print(f"{info_name} stored in adata.obsm['{info_name}'].")
if verbose:
print(
"\nPer-spot results in adata.obsm have columns in same order as "
"rows in adata.uns['lr_summary']."
)
print("Summary of LR results in adata.uns['lr_summary'].")
# Version 2, no longer in use, see above for newest method #
def perform_perm_testing(
adata: AnnData,
lr_scores: np.ndarray,
n_pairs: int,
lrs: np.array,
lr_mid_dist: int,
verbose: float,
neighbours: List,
het_vals: np.array,
min_expr: float,
neg_binom: bool,
adj_method: str,
pval_adj_cutoff: float,
):
"""Performs the grouped permutation testing when taking the stats approach."""
if n_pairs != 0: # Perform permutation testing
# Grouping spots with similar mean expression point #
genes = get_valid_genes(adata, n_pairs)
means_ordered, genes_ordered = get_ordered(adata, genes)
ims = np.array(
[
get_median_index(
lr_.split("_")[0],
lr_.split("_")[1],
means_ordered.values,
genes_ordered,
)
for lr_ in lrs
]
).reshape(-1, 1)
if len(lrs) > 1: # Multi-LR pair mode, group LRs to generate backgrounds
clusterer = AgglomerativeClustering(
n_clusters=None,
distance_threshold=lr_mid_dist,
affinity="manhattan",
linkage="single",
)
lr_groups = clusterer.fit_predict(ims)
lr_group_set = np.unique(lr_groups)
if verbose:
print(f"{len(lr_group_set)} lr groups with similar expression levels.")
else: # Single LR pair mode, generate background for the LR.
lr_groups = np.array([0])
lr_group_set = lr_groups
res_info = ["lr_scores", "p_val", "p_adj", "-log10(p_adj)", "lr_sig_scores"]
n_, n_sigs = np.array([0] * len(lrs)), np.array([0] * len(lrs))
per_lr_results = {}
with tqdm(
total=len(lr_group_set),
desc="Generating background distributions for the LR pair groups..",
bar_format="{l_bar}{bar} [ time left: {remaining} ]",
) as pbar:
for group in lr_group_set:
# Determining common mid-point for each group #
group_bool = lr_groups == group
group_im = int(np.median(ims[group_bool, 0]))
# Calculating the background #
rand_pairs = get_rand_pairs(adata, genes, n_pairs, lrs=lrs, im=group_im)
background = get_lrs_scores(
adata,
rand_pairs,
neighbours,
het_vals,
min_expr,
filter_pairs=False,
).ravel()
total_bg = len(background)
background = background[background != 0] # Filtering for increase speed
# Getting stats for each lr in group #
group_lr_indices = np.where(group_bool)[0]
for lr_i in group_lr_indices:
lr_ = lrs[lr_i]
lr_results = pd.DataFrame(index=adata.obs_names, columns=res_info)
scores = lr_scores[:, lr_i]
stats = get_stats(
scores,
background,
total_bg,
neg_binom,
adj_method,
pval_adj_cutoff=pval_adj_cutoff,
)
full_stats = [scores] + list(stats)
for vals, colname in zip(full_stats, res_info):
lr_results[colname] = vals
n_[lr_i] = len(np.where(scores > 0)[0])
n_sigs[lr_i] = len(
np.where(lr_results["p_adj"].values < pval_adj_cutoff)[0]
)
if n_sigs[lr_i] > 0:
per_lr_results[lr_] = lr_results
pbar.update(1)
print(f"{len(per_lr_results)} LR pairs with significant interactions.")
lr_summary = pd.DataFrame(index=lrs, columns=["n_spots", "n_spots_sig"])
lr_summary["n_spots"] = n_
lr_summary["n_spots_sig"] = n_sigs
lr_summary = lr_summary.iloc[np.argsort(-n_sigs)]
else: # Simply store the scores
per_lr_results = {}
lr_summary = pd.DataFrame(index=lrs, columns=["n_spots"])
for i, lr_ in enumerate(lrs):
lr_results = pd.DataFrame(index=adata.obs_names, columns=["lr_scores"])
lr_results["lr_scores"] = lr_scores[:, i]
per_lr_results[lr_] = lr_results
lr_summary.loc[lr_, "n_spots"] = len(np.where(lr_scores[:, i] > 0)[0])
lr_summary = lr_summary.iloc[np.argsort(-lr_summary.values[:, 0]), :]
adata.uns["lr_summary"] = lr_summary
adata.uns["per_lr_results"] = per_lr_results
if verbose:
print(
"Summary of significant spots for each lr pair in adata.uns['lr_summary']."
)
print(
"Spot enrichment statistics of LR interactions in adata.uns['per_lr_results']"
)
# No longer in use #
def permutation(
adata: AnnData,
n_pairs: int = 200,
distance: int = None,
use_lr: str = "cci_lr",
use_het: str = None,
neg_binom: bool = False,
adj_method: str = "fdr",
neighbours: list = None,
run_fast: bool = True,
bg_pairs: list = None,
background: np.array = None,
**kwargs,
) -> AnnData:
"""Permutation test for merged result
Parameters
----------
adata: AnnData The data object including the cell types to count
n_pairs: int Number of gene pairs to run permutation test (default: 1000)
distance: int Distance between spots (default: 30)
use_lr: str LR cluster used for permutation test (default: 'lr_neighbours_louvain_max')
use_het: str cell type diversity counts used for permutation test (default 'het')
neg_binom: bool Whether to fit neg binomial paramaters to bg distribution for p-val est.
adj_method: str Method used by statsmodels.stats.multitest.multipletests for MHT correction.
neighbours: | |
f.read() == "hello"
@pytest.mark.datafiles(DATA_DIR)
def test_open_force_different_workspace(cli, tmpdir, datafiles):
_, project, workspace = open_workspace(cli, tmpdir, datafiles, "git", "-alpha")
# Assert the workspace dir exists
assert os.path.exists(workspace)
hello_path = os.path.join(workspace, "usr", "bin", "hello")
hello1_path = os.path.join(workspace, "usr", "bin", "hello1")
tmpdir = os.path.join(str(tmpdir), "-beta")
shutil.move(hello_path, hello1_path)
element_name2, _, workspace2 = open_workspace(cli, tmpdir, datafiles, "git", "-beta")
# Assert the workspace dir exists
assert os.path.exists(workspace2)
# Assert that workspace 1 contains the modified file
assert os.path.exists(hello1_path)
# Assert that workspace 2 contains the unmodified file
assert os.path.exists(os.path.join(workspace2, "usr", "bin", "hello"))
result = cli.run(project=project, args=["workspace", "close", element_name2])
result.assert_success()
# Now open the workspace again with --force, this should happily succeed
result = cli.run(project=project, args=["workspace", "open", "--force", "--directory", workspace, element_name2])
result.assert_success()
# Assert that the file in workspace 1 has been replaced
# With the file from workspace 2
assert os.path.exists(hello_path)
assert not os.path.exists(hello1_path)
@pytest.mark.datafiles(DATA_DIR)
def test_close(cli, tmpdir, datafiles):
element_name, project, workspace = open_workspace(cli, tmpdir, datafiles, "git")
# Close the workspace
result = cli.run(project=project, args=["workspace", "close", "--remove-dir", element_name])
result.assert_success()
# Assert the workspace dir has been deleted
assert not os.path.exists(workspace)
@pytest.mark.datafiles(DATA_DIR)
def test_close_external_after_move_project(cli, tmpdir, datafiles):
workspace_dir = os.path.join(str(tmpdir), "workspace")
project_path = os.path.join(str(tmpdir), "initial_project")
element_name, _, _ = open_workspace(cli, tmpdir, datafiles, "git", "", workspace_dir, project_path)
assert os.path.exists(workspace_dir)
moved_dir = os.path.join(str(tmpdir), "external_project")
shutil.move(project_path, moved_dir)
assert os.path.exists(moved_dir)
# Close the workspace
result = cli.run(project=moved_dir, args=["workspace", "close", "--remove-dir", element_name])
result.assert_success()
# Assert the workspace dir has been deleted
assert not os.path.exists(workspace_dir)
@pytest.mark.datafiles(DATA_DIR)
def test_close_internal_after_move_project(cli, tmpdir, datafiles):
initial_dir = os.path.join(str(tmpdir), "initial_project")
initial_workspace = os.path.join(initial_dir, "workspace")
element_name, _, _ = open_workspace(
cli, tmpdir, datafiles, "git", workspace_dir=initial_workspace, project_path=initial_dir
)
moved_dir = os.path.join(str(tmpdir), "internal_project")
shutil.move(initial_dir, moved_dir)
assert os.path.exists(moved_dir)
# Close the workspace
result = cli.run(project=moved_dir, args=["workspace", "close", "--remove-dir", element_name])
result.assert_success()
# Assert the workspace dir has been deleted
workspace = os.path.join(moved_dir, "workspace")
assert not os.path.exists(workspace)
@pytest.mark.datafiles(DATA_DIR)
def test_close_removed(cli, tmpdir, datafiles):
element_name, project, workspace = open_workspace(cli, tmpdir, datafiles, "git")
# Remove it first, closing the workspace should work
shutil.rmtree(workspace)
# Close the workspace
result = cli.run(project=project, args=["workspace", "close", element_name])
result.assert_success()
# Assert the workspace dir has been deleted
assert not os.path.exists(workspace)
@pytest.mark.datafiles(DATA_DIR)
def test_close_nonexistant_element(cli, tmpdir, datafiles):
element_name, project, workspace = open_workspace(cli, tmpdir, datafiles, "git")
element_path = os.path.join(datafiles.dirname, datafiles.basename, "elements", element_name)
# First brutally remove the element.bst file, ensuring that
# the element does not exist anymore in the project where
# we want to close the workspace.
os.remove(element_path)
# Close the workspace
result = cli.run(project=project, args=["workspace", "close", "--remove-dir", element_name])
result.assert_success()
# Assert the workspace dir has been deleted
assert not os.path.exists(workspace)
@pytest.mark.datafiles(DATA_DIR)
def test_close_multiple(cli, tmpdir, datafiles):
tmpdir_alpha = os.path.join(str(tmpdir), "alpha")
tmpdir_beta = os.path.join(str(tmpdir), "beta")
alpha, project, workspace_alpha = open_workspace(cli, tmpdir_alpha, datafiles, "git", suffix="-alpha")
beta, project, workspace_beta = open_workspace(cli, tmpdir_beta, datafiles, "git", suffix="-beta")
# Close the workspaces
result = cli.run(project=project, args=["workspace", "close", "--remove-dir", alpha, beta])
result.assert_success()
# Assert the workspace dirs have been deleted
assert not os.path.exists(workspace_alpha)
assert not os.path.exists(workspace_beta)
@pytest.mark.datafiles(DATA_DIR)
def test_close_all(cli, tmpdir, datafiles):
tmpdir_alpha = os.path.join(str(tmpdir), "alpha")
tmpdir_beta = os.path.join(str(tmpdir), "beta")
_, project, workspace_alpha = open_workspace(cli, tmpdir_alpha, datafiles, "git", suffix="-alpha")
_, project, workspace_beta = open_workspace(cli, tmpdir_beta, datafiles, "git", suffix="-beta")
# Close the workspaces
result = cli.run(project=project, args=["workspace", "close", "--remove-dir", "--all"])
result.assert_success()
# Assert the workspace dirs have been deleted
assert not os.path.exists(workspace_alpha)
assert not os.path.exists(workspace_beta)
@pytest.mark.datafiles(DATA_DIR)
def test_reset(cli, tmpdir, datafiles):
# Open the workspace
element_name, project, workspace = open_workspace(cli, tmpdir, datafiles, "git")
# Modify workspace
shutil.rmtree(os.path.join(workspace, "usr", "bin"))
os.makedirs(os.path.join(workspace, "etc"))
with open(os.path.join(workspace, "etc", "pony.conf"), "w", encoding="utf-8") as f:
f.write("PONY='pink'")
# Now reset the open workspace, this should have the
# effect of reverting our changes.
result = cli.run(project=project, args=["workspace", "reset", element_name])
result.assert_success()
assert os.path.exists(os.path.join(workspace, "usr", "bin", "hello"))
assert not os.path.exists(os.path.join(workspace, "etc", "pony.conf"))
@pytest.mark.datafiles(DATA_DIR)
def test_reset_soft(cli, tmpdir, datafiles):
# Open the workspace
element_name, project, workspace = open_workspace(cli, tmpdir, datafiles, "git")
assert cli.get_element_state(project, element_name) == "buildable"
hello_path = os.path.join(workspace, "usr", "bin", "hello")
pony_path = os.path.join(workspace, "etc", "pony.conf")
assert os.path.exists(os.path.join(workspace, "usr", "bin"))
assert os.path.exists(hello_path)
assert not os.path.exists(pony_path)
key_1 = cli.get_element_key(project, element_name)
assert key_1 != "{:?<64}".format("")
result = cli.run(project=project, args=["build", element_name])
result.assert_success()
assert cli.get_element_state(project, element_name) == "cached"
key_2 = cli.get_element_key(project, element_name)
assert key_2 != "{:?<64}".format("")
# workspace keys are not recalculated
assert key_1 == key_2
wait_for_cache_granularity()
# Modify workspace
shutil.rmtree(os.path.join(workspace, "usr", "bin"))
os.makedirs(os.path.join(workspace, "etc"))
with open(os.path.join(workspace, "etc", "pony.conf"), "w", encoding="utf-8") as f:
f.write("PONY='pink'")
assert not os.path.exists(os.path.join(workspace, "usr", "bin"))
assert os.path.exists(pony_path)
# Now soft-reset the open workspace, this should not revert the changes
result = cli.run(project=project, args=["workspace", "reset", "--soft", element_name])
result.assert_success()
# we removed this dir
assert not os.path.exists(os.path.join(workspace, "usr", "bin"))
# and added this one
assert os.path.exists(os.path.join(workspace, "etc", "pony.conf"))
assert cli.get_element_state(project, element_name) == "buildable"
key_3 = cli.get_element_key(project, element_name)
assert key_3 != "{:?<64}".format("")
assert key_1 != key_3
@pytest.mark.datafiles(DATA_DIR)
def test_reset_multiple(cli, tmpdir, datafiles):
# Open the workspaces
tmpdir_alpha = os.path.join(str(tmpdir), "alpha")
tmpdir_beta = os.path.join(str(tmpdir), "beta")
alpha, project, workspace_alpha = open_workspace(cli, tmpdir_alpha, datafiles, "git", suffix="-alpha")
beta, project, workspace_beta = open_workspace(cli, tmpdir_beta, datafiles, "git", suffix="-beta")
# Modify workspaces
shutil.rmtree(os.path.join(workspace_alpha, "usr", "bin"))
os.makedirs(os.path.join(workspace_beta, "etc"))
with open(os.path.join(workspace_beta, "etc", "pony.conf"), "w", encoding="utf-8") as f:
f.write("PONY='pink'")
# Now reset the open workspaces, this should have the
# effect of reverting our changes.
result = cli.run(project=project, args=["workspace", "reset", alpha, beta,])
result.assert_success()
assert os.path.exists(os.path.join(workspace_alpha, "usr", "bin", "hello"))
assert not os.path.exists(os.path.join(workspace_beta, "etc", "pony.conf"))
@pytest.mark.datafiles(DATA_DIR)
def test_reset_all(cli, tmpdir, datafiles):
# Open the workspaces
tmpdir_alpha = os.path.join(str(tmpdir), "alpha")
tmpdir_beta = os.path.join(str(tmpdir), "beta")
_, project, workspace_alpha = open_workspace(cli, tmpdir_alpha, datafiles, "git", suffix="-alpha")
_, project, workspace_beta = open_workspace(cli, tmpdir_beta, datafiles, "git", suffix="-beta")
# Modify workspaces
shutil.rmtree(os.path.join(workspace_alpha, "usr", "bin"))
os.makedirs(os.path.join(workspace_beta, "etc"))
with open(os.path.join(workspace_beta, "etc", "pony.conf"), "w", encoding="utf-8") as f:
f.write("PONY='pink'")
# Now reset the open workspace, this should have the
# effect of reverting our changes.
result = cli.run(project=project, args=["workspace", "reset", "--all"])
result.assert_success()
assert os.path.exists(os.path.join(workspace_alpha, "usr", "bin", "hello"))
assert not os.path.exists(os.path.join(workspace_beta, "etc", "pony.conf"))
@pytest.mark.datafiles(DATA_DIR)
def test_list(cli, tmpdir, datafiles):
element_name, project, workspace = open_workspace(cli, tmpdir, datafiles, "git")
# Now list the workspaces
result = cli.run(project=project, args=["workspace", "list"])
result.assert_success()
loaded = _yaml.load_data(result.output)
workspaces = loaded.get_sequence("workspaces")
assert len(workspaces) == 1
space = workspaces.mapping_at(0)
assert space.get_str("element") == element_name
assert space.get_str("directory") == workspace
@pytest.mark.datafiles(DATA_DIR)
@pytest.mark.parametrize("kind", repo_kinds)
@pytest.mark.parametrize("strict", [("strict"), ("non-strict")])
@pytest.mark.parametrize(
"from_workspace,guess_element",
[(False, False), (True, True), (True, False)],
ids=["project-no-guess", "workspace-guess", "workspace-no-guess"],
)
def test_build(cli, tmpdir_factory, datafiles, kind, strict, from_workspace, guess_element):
tmpdir = tmpdir_factory.mktemp(BASE_FILENAME)
element_name, project, workspace = open_workspace(cli, tmpdir, datafiles, kind, False)
checkout = os.path.join(str(tmpdir), "checkout")
args_dir = ["-C", workspace] if from_workspace else []
args_elm = [element_name] if not guess_element else []
# Modify workspace
shutil.rmtree(os.path.join(workspace, "usr", "bin"))
os.makedirs(os.path.join(workspace, "etc"))
with open(os.path.join(workspace, "etc", "pony.conf"), "w", encoding="utf-8") as f:
f.write("PONY='pink'")
# Configure strict mode
strict_mode = True
if strict != "strict":
strict_mode = False
cli.configure({"projects": {"test": {"strict": strict_mode}}})
# Build modified workspace
assert cli.get_element_state(project, element_name) == "buildable"
key_1 = cli.get_element_key(project, element_name)
assert key_1 != "{:?<64}".format("")
result = cli.run(project=project, args=args_dir + ["build", *args_elm])
result.assert_success()
assert cli.get_element_state(project, element_name) == "cached"
key_2 = cli.get_element_key(project, element_name)
assert key_2 != "{:?<64}".format("")
# workspace keys are not recalculated
assert key_1 == key_2
# Checkout the result
result = cli.run(project=project, args=args_dir + ["artifact", "checkout", "--directory", checkout, *args_elm])
result.assert_success()
# Check that the pony.conf from the modified workspace exists
filename = os.path.join(checkout, "etc", "pony.conf")
assert os.path.exists(filename)
# Check that the original /usr/bin/hello is not in the checkout
assert not os.path.exists(os.path.join(checkout, "usr", "bin", "hello"))
@pytest.mark.datafiles(DATA_DIR)
def test_buildable_no_ref(cli, tmpdir, datafiles):
project = str(datafiles)
element_name = "workspace-test-no-ref.bst"
element_path = os.path.join(project, "elements")
# Write out our test target without any source ref
repo = create_repo("git", str(tmpdir))
element = {"kind": "import", "sources": [repo.source_config()]}
_yaml.roundtrip_dump(element, os.path.join(element_path, element_name))
# Assert that this target is not buildable when no workspace is associated.
assert cli.get_element_state(project, element_name) == "no reference"
# Now open the workspace. We don't need to checkout the source though.
workspace = os.path.join(str(tmpdir), "workspace-no-ref")
os.makedirs(workspace)
args = ["workspace", "open", "--no-checkout", "--directory", workspace, element_name]
result = cli.run(project=project, args=args)
result.assert_success()
# Assert that the target is now buildable.
assert cli.get_element_state(project, element_name) == "buildable"
@pytest.mark.datafiles(DATA_DIR)
@pytest.mark.parametrize("modification", [("addfile"), ("removefile"), ("modifyfile")])
@pytest.mark.parametrize("strict", [("strict"), ("non-strict")])
def test_detect_modifications(cli, tmpdir, datafiles, modification, strict):
element_name, project, workspace = open_workspace(cli, tmpdir, datafiles, "git")
checkout = os.path.join(str(tmpdir), "checkout")
# Configure strict mode
strict_mode = True
if strict != "strict":
strict_mode = False
cli.configure({"projects": {"test": {"strict": strict_mode}}})
# Build clean workspace
assert cli.get_element_state(project, element_name) == "buildable"
key_1 = cli.get_element_key(project, element_name)
assert key_1 != "{:?<64}".format("")
result = cli.run(project=project, | |
<filename>awxkit/test/test_dependency_resolver.py
import pytest
from awxkit.utils import filter_by_class
from awxkit.utils.toposort import CircularDependencyError
from awxkit.api.mixins import has_create
class MockHasCreate(has_create.HasCreate):
connection = None
def __str__(self):
return "instance of {0.__class__.__name__} ({1})".format(self, hex(id(self)))
def __init__(self, *a, **kw):
self.cleaned = False
super(MockHasCreate, self).__init__()
def silent_cleanup(self):
self.cleaned = True
class A(MockHasCreate):
def create(self, **kw):
return self
class B(MockHasCreate):
optional_dependencies = [A]
def create(self, a=None, **kw):
self.create_and_update_dependencies(*filter_by_class((a, A)))
return self
class C(MockHasCreate):
dependencies = [A, B]
def create(self, a=A, b=B, **kw):
self.create_and_update_dependencies(b, a)
return self
class D(MockHasCreate):
dependencies = [A]
optional_dependencies = [B]
def create(self, a=A, b=None, **kw):
self.create_and_update_dependencies(*filter_by_class((a, A), (b, B)))
return self
class E(MockHasCreate):
dependencies = [D, C]
def create(self, c=C, d=D, **kw):
self.create_and_update_dependencies(d, c)
return self
class F(MockHasCreate):
dependencies = [B]
optional_dependencies = [E]
def create(self, b=B, e=None, **kw):
self.create_and_update_dependencies(*filter_by_class((b, B), (e, E)))
return self
class G(MockHasCreate):
dependencies = [D]
optional_dependencies = [F, E]
def create(self, d=D, f=None, e=None, **kw):
self.create_and_update_dependencies(*filter_by_class((d, D), (f, F), (e, E)))
return self
class H(MockHasCreate):
optional_dependencies = [E, A]
def create(self, a=None, e=None, **kw):
self.create_and_update_dependencies(*filter_by_class((a, A), (e, E)))
return self
class MultipleWordClassName(MockHasCreate):
def create(self, **kw):
return self
class AnotherMultipleWordClassName(MockHasCreate):
optional_dependencies = [MultipleWordClassName]
def create(self, multiple_word_class_name=None, **kw):
self.create_and_update_dependencies(*filter_by_class((multiple_word_class_name, MultipleWordClassName)))
return self
def test_dependency_graph_single_page():
"""confirms that `dependency_graph(Base)` will return a dependency graph
consisting of only dependencies and dependencies of dependencies (if any)
"""
desired = {}
desired[G] = set([D])
desired[D] = set([A])
desired[A] = set()
assert has_create.dependency_graph(G) == desired
def test_dependency_graph_page_with_optional():
"""confirms that `dependency_graph(Base, OptionalBase)` will return a dependency
graph consisting of only dependencies and dependencies of dependencies (if any)
with the exception that the OptionalBase and its dependencies are included as well.
"""
desired = {}
desired[G] = set([D])
desired[E] = set([D, C])
desired[C] = set([A, B])
desired[D] = set([A])
desired[B] = set()
desired[A] = set()
assert has_create.dependency_graph(G, E) == desired
def test_dependency_graph_page_with_additionals():
"""confirms that `dependency_graph(Base, AdditionalBaseOne, AdditionalBaseTwo)`
will return a dependency graph consisting of only dependencies and dependencies
of dependencies (if any) with the exception that the AdditionalBases
are treated as a dependencies of Base (when they aren't) and their dependencies
are included as well.
"""
desired = {}
desired[E] = set([D, C])
desired[D] = set([A])
desired[C] = set([A, B])
desired[F] = set([B])
desired[G] = set([D])
desired[A] = set()
desired[B] = set()
assert has_create.dependency_graph(E, F, G) == desired
def test_optional_dependency_graph_single_page():
"""confirms that has_create._optional_dependency_graph(Base) returns a complete dependency tree
including all optional_dependencies
"""
desired = {}
desired[H] = set([E, A])
desired[E] = set([D, C])
desired[D] = set([A, B])
desired[C] = set([A, B])
desired[B] = set([A])
desired[A] = set()
assert has_create.optional_dependency_graph(H) == desired
def test_optional_dependency_graph_with_additional():
"""confirms that has_create._optional_dependency_graph(Base) returns a complete dependency tree
including all optional_dependencies with the AdditionalBases treated as a dependencies
of Base (when they aren't) and their dependencies and optional_dependencies included as well.
"""
desired = {}
desired[F] = set([B, E])
desired[H] = set([E, A])
desired[E] = set([D, C])
desired[D] = set([A, B])
desired[C] = set([A, B])
desired[B] = set([A])
desired[A] = set()
assert has_create.optional_dependency_graph(F, H, A) == desired
def test_creation_order():
"""confirms that `has_create.creation_order()` returns a valid creation order in the desired list of sets format"""
dependency_graph = dict(eight=set(['seven', 'six']),
seven=set(['five']),
six=set(),
five=set(['two', 'one']),
four=set(['one']),
three=set(['two']),
two=set(['one']),
one=set())
desired = [set(['one', 'six']),
set(['two', 'four']),
set(['three', 'five']),
set(['seven']),
set(['eight'])]
assert has_create.creation_order(dependency_graph) == desired
def test_creation_order_with_loop():
"""confirms that `has_create.creation_order()` raises toposort.CircularDependencyError when evaluating
a cyclic dependency graph
"""
dependency_graph = dict(eight=set(['seven', 'six']),
seven=set(['five']),
six=set(),
five=set(['two', 'one']),
four=set(['one']),
three=set(['two']),
two=set(['one']),
one=set(['eight']))
with pytest.raises(CircularDependencyError):
assert has_create.creation_order(dependency_graph)
class One(MockHasCreate):
pass
class Two(MockHasCreate):
dependencies = [One]
class Three(MockHasCreate):
dependencies = [Two, One]
class Four(MockHasCreate):
optional_dependencies = [Two]
class Five(MockHasCreate):
dependencies = [Two]
optional_dependencies = [One]
class IsntAHasCreate(object):
pass
class Six(MockHasCreate, IsntAHasCreate):
dependencies = [Two]
class Seven(MockHasCreate):
dependencies = [IsntAHasCreate]
def test_separate_async_optionals_none_exist():
"""confirms that when creation group classes have no async optional dependencies the order is unchanged"""
order = has_create.creation_order(has_create.optional_dependency_graph(Three, Two, One))
assert has_create.separate_async_optionals(order) == order
def test_separate_async_optionals_two_exist():
"""confirms that when two creation group classes have async dependencies
the class that has shared item as a dependency occurs first in a separate creation group
"""
order = has_create.creation_order(has_create.optional_dependency_graph(Four, Three, Two))
assert has_create.separate_async_optionals(order) == [set([One]), set([Two]), set([Three]), set([Four])]
def test_separate_async_optionals_three_exist():
"""confirms that when three creation group classes have async dependencies
the class that has shared item as a dependency occurs first in a separate creation group
"""
order = has_create.creation_order(has_create.optional_dependency_graph(Five, Four, Three))
assert has_create.separate_async_optionals(order) == [set([One]), set([Two]), set([Three]),
set([Five]), set([Four])]
def test_separate_async_optionals_not_has_create():
"""confirms that when a dependency isn't a HasCreate has_create.separate_aysnc_optionals doesn't
unnecessarily move it from the initial creation group
"""
order = has_create.creation_order(has_create.optional_dependency_graph(Seven, Six))
assert has_create.separate_async_optionals(order) == [set([One, IsntAHasCreate]), set([Two, Seven]), set([Six])]
def test_page_creation_order_single_page():
"""confirms that `has_create.page_creation_order()` returns a valid creation order"""
desired = [set([A]), set([D]), set([G])]
assert has_create.page_creation_order(G) == desired
def test_page_creation_order_optionals_provided():
"""confirms that `has_create.page_creation_order()` returns a valid creation order
when optional_dependencies are included
"""
desired = [set([A]), set([B]), set([C]), set([D]), set([E]), set([H])]
assert has_create.page_creation_order(H, A, E) == desired
def test_page_creation_order_additionals_provided():
"""confirms that `has_create.page_creation_order()` returns a valid creation order
when additional pages are included
"""
desired = [set([A]), set([B]), set([D]), set([F, H]), set([G])]
assert has_create.page_creation_order(F, H, G) == desired
def test_all_instantiated_dependencies_single_page():
f = F().create()
b = f._dependency_store[B]
desired = set([b, f])
assert set(has_create.all_instantiated_dependencies(f, A, B, C, D, E, F, G, H)) == desired
def test_all_instantiated_dependencies_single_page_are_ordered():
f = F().create()
b = f._dependency_store[B]
desired = [b, f]
assert has_create.all_instantiated_dependencies(f, A, B, C, D, E, F, G, H) == desired
def test_all_instantiated_dependencies_optionals():
a = A().create()
b = B().create(a=a)
c = C().create(a=a, b=b)
d = D().create(a=a, b=b)
e = E().create(c=c, d=d)
h = H().create(a=a, e=e)
desired = set([a, b, c, d, e, h])
assert set(has_create.all_instantiated_dependencies(h, A, B, C, D, E, F, G, H)) == desired
def test_all_instantiated_dependencies_optionals_are_ordered():
a = A().create()
b = B().create(a=a)
c = C().create(a=a, b=b)
d = D().create(a=a, b=b)
e = E().create(c=c, d=d)
h = H().create(a=a, e=e)
desired = [a, b, c, d, e, h]
assert has_create.all_instantiated_dependencies(h, A, B, C, D, E, F, G, H) == desired
def test_dependency_resolution_complete():
h = H().create(a=True, e=True)
a = h._dependency_store[A]
e = h._dependency_store[E]
c = e._dependency_store[C]
d = e._dependency_store[D]
b = c._dependency_store[B]
for item in (h, a, e, d, c, b):
if item._dependency_store:
assert all(item._dependency_store.values()
), "{0} missing dependency: {0._dependency_store}".format(item)
assert a == b._dependency_store[A], "Duplicate dependency detected"
assert a == c._dependency_store[A], "Duplicate dependency detected"
assert a == d._dependency_store[A], "Duplicate dependency detected"
assert b == c._dependency_store[B], "Duplicate dependency detected"
assert b == d._dependency_store[B], "Duplicate dependency detected"
def test_ds_mapping():
h = H().create(a=True, e=True)
a = h._dependency_store[A]
e = h._dependency_store[E]
c = e._dependency_store[C]
d = e._dependency_store[D]
b = c._dependency_store[B]
assert a == h.ds.a
assert e == h.ds.e
assert c == e.ds.c
assert d == e.ds.d
assert b == c.ds.b
def test_ds_multiple_word_class_and_attribute_name():
amwcn = AnotherMultipleWordClassName().create(multiple_word_class_name=True)
mwcn = amwcn._dependency_store[MultipleWordClassName]
assert amwcn.ds.multiple_word_class_name == mwcn
def test_ds_missing_dependency():
a = A().create()
with pytest.raises(AttributeError):
a.ds.b
def test_teardown_calls_silent_cleanup():
g = G().create(f=True, e=True)
f = g._dependency_store[F]
e = g._dependency_store[E]
b = f._dependency_store[B]
d = e._dependency_store[D]
c = e._dependency_store[C]
a = c._dependency_store[A]
instances = [g, f, e, b, d, c, a]
for instance in instances:
assert not instance.cleaned
g.teardown()
for instance in instances:
assert instance.cleaned
def test_teardown_dependency_store_cleared():
g = G().create(f=True, e=True)
f = g._dependency_store[F]
e = g._dependency_store[E]
b = f._dependency_store[B]
d = e._dependency_store[D]
c = e._dependency_store[C]
a = c._dependency_store[A]
g.teardown()
assert not g._dependency_store[F]
assert not g._dependency_store[E]
assert not f._dependency_store[B]
assert not e._dependency_store[D]
assert not e._dependency_store[C]
assert not c._dependency_store[A]
def test_idempotent_teardown_dependency_store_cleared():
g = G().create(f=True, e=True)
f = g._dependency_store[F]
e = g._dependency_store[E]
b = f._dependency_store[B]
d = e._dependency_store[D]
c = e._dependency_store[C]
a = c._dependency_store[A]
for item in (g, f, e, b, d, c, a):
item.teardown()
item.teardown()
assert not g._dependency_store[F]
assert not g._dependency_store[E]
assert not f._dependency_store[B]
assert not e._dependency_store[D]
assert not e._dependency_store[C]
assert not c._dependency_store[A]
def test_teardown_ds_cleared():
g = G().create(f=True, e=True)
f = g._dependency_store[F]
e = g._dependency_store[E]
b = f._dependency_store[B]
d = e._dependency_store[D]
c = e._dependency_store[C]
a = c._dependency_store[A]
g.teardown()
for former_dep in ('f', 'e'):
with pytest.raises(AttributeError):
getattr(g.ds, former_dep)
with pytest.raises(AttributeError):
getattr(f.ds, 'b')
for former_dep in ('d', 'c'):
with pytest.raises(AttributeError):
getattr(e.ds, former_dep)
with pytest.raises(AttributeError):
getattr(c.ds, 'a')
class OneWithArgs(MockHasCreate):
def create(self, **kw):
self.kw = kw
return self
class TwoWithArgs(MockHasCreate):
dependencies = [OneWithArgs]
def create(self, one_with_args=OneWithArgs, **kw):
if not one_with_args and kw.pop('make_one_with_args', False):
one_with_args = (OneWithArgs, dict(a='a', b='b', c='c'))
self.create_and_update_dependencies(one_with_args)
self.kw = kw
return self
class ThreeWithArgs(MockHasCreate):
dependencies = [OneWithArgs]
optional_dependencies = [TwoWithArgs]
def create(self, one_with_args=OneWithArgs, two_with_args=None, **kw):
self.create_and_update_dependencies(*filter_by_class((one_with_args, OneWithArgs),
| |
The size of the image to generate in [Nx, Ny, Nz] where N is
the number of voxels in each direction. For a 2D image, use
[Nx, Ny].
radius : int
The radius of spheres (circles) in the packing.
spacing : int or List[int]
The spacing between unit cells. If the spacing is too small then
spheres may overlap. If an ``int`` is given it will be applied
in all directions, while a list of ``int`` will be interpreted
to apply along each axis.
offset : int or List[int]
The amount offset to add between sphere centers and the edges of
the image. A single ``int`` will be applied in all directions,
while a list of ``int`` will be interpreted to apply along each
axis.
smooth : bool, default=True
If ``True`` (default) the outer extremities of the sphere will
not have the little bumps on each face.
lattice : str
Specifies the type of lattice to create. Options are:
'sc'
Simple Cubic (default)
'fcc'
Face Centered Cubic
'bcc'
Body Centered Cubic
Returns
-------
image : ndarray
A boolean array with ``True`` values denoting the pore space.
Notes
-----
For 2D images, 'sc' gives a square lattice and both 'fcc' and
'bcc' give a triangular lattice.
Examples
--------
`Click here
<https://porespy.org/examples/generators/howtos/lattice_spheres.html>`_
to view online example.
"""
logger.debug(f"Generating {lattice} lattice")
shape = np.array(shape)
im = np.zeros(shape, dtype=bool)
# Parse lattice type
lattice = lattice.lower()
if im.ndim == 2:
if lattice in ['sc', 'square', 'cubic', 'simple cubic']:
lattice = 'sq'
elif lattice in ['tri', 'triangular']:
lattice = 'tri'
else:
raise Exception(f'Unrecognized mode: {lattice}')
else:
if lattice in ['sc', 'cubic', 'simple cubic']:
lattice = 'sc'
elif lattice in ['bcc', 'body centered cubic']:
lattice = 'bcc'
elif lattice in ['fcc', 'face centered cubic']:
lattice = 'fcc'
else:
raise Exception(f'Unrecognized mode: {lattice}')
# Parse offset and spacing args
if spacing is None:
spacing = 2*r
if isinstance(spacing, int):
spacing = [spacing]*im.ndim
if offset is None:
offset = r
if isinstance(offset, int):
offset = [offset]*im.ndim
if lattice == 'sq':
im[offset[0]::spacing[0],
offset[1]::spacing[1]] = True
elif lattice == 'tri':
im[offset[0]::spacing[0],
offset[1]::spacing[1]] = True
im[offset[0]+int(spacing[0]/2)::spacing[0],
offset[1]+int(spacing[1]/2)::spacing[1]] = True
elif lattice == 'sc':
im[offset[0]::spacing[0],
offset[1]::spacing[1],
offset[2]::spacing[2]] = True
elif lattice == 'bcc':
im[offset[0]::spacing[0],
offset[1]::spacing[1],
offset[2]::spacing[2]] = True
im[offset[0]+int(spacing[0]/2)::spacing[0],
offset[1]+int(spacing[1]/2)::spacing[1],
offset[2]+int(spacing[2]/2)::spacing[2]] = True
elif lattice == 'fcc':
im[offset[0]::spacing[0],
offset[1]::spacing[1],
offset[2]::spacing[2]] = True
# xy-plane
im[offset[0]+int(spacing[0]/2)::spacing[0],
offset[1]+int(spacing[1]/2)::spacing[1],
offset[2]::spacing[2]] = True
# xz-plane
im[offset[0]+int(spacing[0]/2)::spacing[0],
offset[1]::spacing[1],
offset[2]+int(spacing[2]/2)::spacing[2]] = True
# yz-plane
im[offset[0]::spacing[0],
offset[1]+int(spacing[1]/2)::spacing[1],
offset[2]+int(spacing[2]/2)::spacing[2]] = True
if smooth:
im = ~(edt(~im) < r)
else:
im = ~(edt(~im) <= r)
return im
def overlapping_spheres(shape: List[int],
r: int,
porosity: float,
maxiter: int = 10,
tol: float = 0.01):
r"""
Generate a packing of overlapping mono-disperse spheres
Parameters
----------
shape : list
The size of the image to generate in [Nx, Ny, Nz] where Ni is the
number of voxels in the i-th direction.
r : scalar
The radius of spheres in the packing.
porosity : scalar
The porosity of the final image, accurate to the given tolerance.
maxiter : int
Maximum number of iterations for the iterative algorithm that improves
the porosity of the final image to match the given value.
tol : float
Tolerance for porosity of the final image compared to the given value.
Returns
-------
image : ndarray
A boolean array with ``True`` values denoting the pore space
Notes
-----
This method can also be used to generate a dispersion of hollows by
treating ``porosity`` as solid volume fraction and inverting the
returned image.
Examples
--------
`Click here
<https://porespy.org/examples/generators/howtos/overlapping_spheres.html>`_
to view online example.
"""
shape = np.array(shape)
if np.size(shape) == 1:
shape = np.full((3, ), int(shape))
ndim = (shape != 1).sum()
s_vol = ps_disk(r).sum() if ndim == 2 else ps_ball(r).sum()
bulk_vol = np.prod(shape)
N = int(np.ceil((1 - porosity) * bulk_vol / s_vol))
im = np.random.random(size=shape)
# Helper functions for calculating porosity: phi = g(f(N))
def f(N):
return edt(im > N / bulk_vol) < r
def g(im):
r"""Returns fraction of 0s, given a binary image"""
return 1 - im.sum() / np.prod(shape)
# # Newton's method for getting image porosity match the given
# w = 1.0 # Damping factor
# dN = 5 if ndim == 2 else 25 # Perturbation
# for i in range(maxiter):
# err = g(f(N)) - porosity
# d_err = (g(f(N+dN)) - g(f(N))) / dN
# if d_err == 0:
# break
# if abs(err) <= tol:
# break
# N2 = N - int(err/d_err) # xnew = xold - f/df
# N = w * N2 + (1-w) * N
# Bisection search: N is always undershoot (bc. of overlaps)
N_low, N_high = N, 4 * N
for i in range(maxiter):
N = np.mean([N_high, N_low], dtype=int)
err = g(f(N)) - porosity
if err > 0:
N_low = N
else:
N_high = N
if abs(err) <= tol:
break
return ~f(N)
def blobs(shape: List[int], porosity: float = 0.5, blobiness: int = 1,
divs: int = 1):
"""
Generates an image containing amorphous blobs
Parameters
----------
shape : list
The size of the image to generate in [Nx, Ny, Nz] where N is the
number of voxels
porosity : float
If specified, this will threshold the image to the specified value
prior to returning. If ``None`` is specified, then the scalar
noise field is converted to a uniform distribution and returned
without thresholding.
blobiness : int or list of ints(default = 1)
Controls the morphology of the blobs. A higher number results in
a larger number of small blobs. If a list is supplied then the
blobs are anisotropic.
divs : int or array_like
The number of times to divide the image for parallel processing.
If ``1`` then parallel processing does not occur. ``2`` is
equivalent to ``[2, 2, 2]`` for a 3D image. The number of cores
used is specified in ``porespy.settings.ncores`` and defaults to
all cores.
Returns
-------
image : ndarray
A boolean array with ``True`` values denoting the pore space
See Also
--------
norm_to_uniform
Notes
-----
This function generates random noise, the applies a gaussian blur to
the noise with a sigma controlled by the blobiness argument as:
$$ np.mean(shape) / (40 * blobiness) $$
The value of 40 was chosen so that a ``blobiness`` of 1 gave a
reasonable result.
Examples
--------
`Click here
<https://porespy.org/examples/generators/howtos/blobs.html>`_
to view online example.
"""
if isinstance(shape, int):
shape = [shape]*3
if len(shape) == 1:
shape = [shape[0]]*3
shape = np.array(shape)
if isinstance(blobiness, int):
blobiness = [blobiness]*len(shape)
blobiness = np.array(blobiness)
parallel = False
if isinstance(divs, int):
divs = [divs]*len(shape)
if max(divs) > 1:
parallel = True
logger.info(f'Performing {insp.currentframe().f_code.co_name} in parallel')
sigma = np.mean(shape) / (40 * blobiness)
im = np.random.random(shape)
if parallel:
overlap = max([int(s*4) for s in sigma])
im = ps.filters.chunked_func(func=spim.gaussian_filter,
input=im, sigma=sigma,
divs=divs, overlap=overlap)
else:
im = spim.gaussian_filter(im, sigma=sigma)
im = norm_to_uniform(im, scale=[0, 1])
if porosity:
im = im < porosity
return im
def _cylinders(shape: List[int],
r: int,
ncylinders: int,
phi_max: float = 0,
theta_max: float = 90,
length: float = None,
verbose: bool = True):
r"""
Generates a binary image of overlapping cylinders.
This is a good approximation of a fibrous mat.
Parameters
----------
shape : list
The size of the image to generate in [Nx, Ny, Nz] where N is the
number of voxels. 2D images are not permitted.
r : int
The radius of the cylinders in voxels
ncylinders : int
The number of cylinders to add to the domain. Adjust this value to
control the final porosity, which is not easily specified since
cylinders overlap and intersect different fractions of the domain.
phi_max : int
A value between 0 and 90 that controls the amount that the
cylinders lie *out of* the XY plane, with 0 meaning all cylinders
lie in the XY plane, and 90 meaning that cylinders are randomly
oriented out of the plane by as much as | |
Specific pending changes are identified by subelements.
- **MasterUserPassword** *(string) --*
The pending or in-progress change of the master user password for the cluster.
- **NodeType** *(string) --*
The pending or in-progress change of the cluster's node type.
- **NumberOfNodes** *(integer) --*
The pending or in-progress change of the number of nodes in the cluster.
- **ClusterType** *(string) --*
The pending or in-progress change of the cluster type.
- **ClusterVersion** *(string) --*
The pending or in-progress change of the service version.
- **AutomatedSnapshotRetentionPeriod** *(integer) --*
The pending or in-progress change of the automated snapshot retention period.
- **ClusterIdentifier** *(string) --*
The pending or in-progress change of the new identifier for the cluster.
- **PubliclyAccessible** *(boolean) --*
The pending or in-progress change of the ability to connect to the cluster from the public network.
- **EnhancedVpcRouting** *(boolean) --*
An option that specifies whether to create the cluster with enhanced VPC routing enabled. To create a cluster that uses enhanced VPC routing, the cluster must be in a VPC. For more information, see `Enhanced VPC Routing <https://docs.aws.amazon.com/redshift/latest/mgmt/enhanced-vpc-routing.html>`__ in the Amazon Redshift Cluster Management Guide.
If this option is ``true`` , enhanced VPC routing is enabled.
Default: false
- **MaintenanceTrackName** *(string) --*
The name of the maintenance track that the cluster will change to during the next maintenance window.
- **EncryptionType** *(string) --*
The encryption type for a cluster. Possible values are: KMS and None. For the China region the possible values are None, and Legacy.
- **ClusterVersion** *(string) --*
The version ID of the Amazon Redshift engine that is running on the cluster.
- **AllowVersionUpgrade** *(boolean) --*
A boolean value that, if ``true`` , indicates that major version upgrades will be applied automatically to the cluster during the maintenance window.
- **NumberOfNodes** *(integer) --*
The number of compute nodes in the cluster.
- **PubliclyAccessible** *(boolean) --*
A boolean value that, if ``true`` , indicates that the cluster can be accessed from a public network.
- **Encrypted** *(boolean) --*
A boolean value that, if ``true`` , indicates that data in the cluster is encrypted at rest.
- **RestoreStatus** *(dict) --*
A value that describes the status of a cluster restore action. This parameter returns null if the cluster was not created by restoring a snapshot.
- **Status** *(string) --*
The status of the restore action. Returns starting, restoring, completed, or failed.
- **CurrentRestoreRateInMegaBytesPerSecond** *(float) --*
The number of megabytes per second being transferred from the backup storage. Returns the average rate for a completed backup.
- **SnapshotSizeInMegaBytes** *(integer) --*
The size of the set of snapshot data used to restore the cluster.
- **ProgressInMegaBytes** *(integer) --*
The number of megabytes that have been transferred from snapshot storage.
- **ElapsedTimeInSeconds** *(integer) --*
The amount of time an in-progress restore has been running, or the amount of time it took a completed restore to finish.
- **EstimatedTimeToCompletionInSeconds** *(integer) --*
The estimate of the time remaining before the restore will complete. Returns 0 for a completed restore.
- **DataTransferProgress** *(dict) --*
- **Status** *(string) --*
Describes the status of the cluster. While the transfer is in progress the status is ``transferringdata`` .
- **CurrentRateInMegaBytesPerSecond** *(float) --*
Describes the data transfer rate in MB's per second.
- **TotalDataInMegaBytes** *(integer) --*
Describes the total amount of data to be transfered in megabytes.
- **DataTransferredInMegaBytes** *(integer) --*
Describes the total amount of data that has been transfered in MB's.
- **EstimatedTimeToCompletionInSeconds** *(integer) --*
Describes the estimated number of seconds remaining to complete the transfer.
- **ElapsedTimeInSeconds** *(integer) --*
Describes the number of seconds that have elapsed during the data transfer.
- **HsmStatus** *(dict) --*
A value that reports whether the Amazon Redshift cluster has finished applying any hardware security module (HSM) settings changes specified in a modify cluster command.
Values: active, applying
- **HsmClientCertificateIdentifier** *(string) --*
Specifies the name of the HSM client certificate the Amazon Redshift cluster uses to retrieve the data encryption keys stored in an HSM.
- **HsmConfigurationIdentifier** *(string) --*
Specifies the name of the HSM configuration that contains the information the Amazon Redshift cluster can use to retrieve and store keys in an HSM.
- **Status** *(string) --*
Reports whether the Amazon Redshift cluster has finished applying any HSM settings changes specified in a modify cluster command.
Values: active, applying
- **ClusterSnapshotCopyStatus** *(dict) --*
A value that returns the destination region and retention period that are configured for cross-region snapshot copy.
- **DestinationRegion** *(string) --*
The destination region that snapshots are automatically copied to when cross-region snapshot copy is enabled.
- **RetentionPeriod** *(integer) --*
The number of days that automated snapshots are retained in the destination region after they are copied from a source region.
- **ManualSnapshotRetentionPeriod** *(integer) --*
The number of days that automated snapshots are retained in the destination region after they are copied from a source region. If the value is -1, the manual snapshot is retained indefinitely.
The value must be either -1 or an integer between 1 and 3,653.
- **SnapshotCopyGrantName** *(string) --*
The name of the snapshot copy grant.
- **ClusterPublicKey** *(string) --*
The public key for the cluster.
- **ClusterNodes** *(list) --*
The nodes in the cluster.
- *(dict) --*
The identifier of a node in a cluster.
- **NodeRole** *(string) --*
Whether the node is a leader node or a compute node.
- **PrivateIPAddress** *(string) --*
The private IP address of a node within a cluster.
- **PublicIPAddress** *(string) --*
The public IP address of a node within a cluster.
- **ElasticIpStatus** *(dict) --*
The status of the elastic IP (EIP) address.
- **ElasticIp** *(string) --*
The elastic IP (EIP) address for the cluster.
- **Status** *(string) --*
The status of the elastic IP (EIP) address.
- **ClusterRevisionNumber** *(string) --*
The specific revision number of the database in the cluster.
- **Tags** *(list) --*
The list of tags for the cluster.
- *(dict) --*
A tag consisting of a name/value pair for a resource.
- **Key** *(string) --*
The key, or name, for the resource tag.
- **Value** *(string) --*
The value for the resource tag.
- **KmsKeyId** *(string) --*
The AWS Key Management Service (AWS KMS) key ID of the encryption key used to encrypt data in the cluster.
- **EnhancedVpcRouting** *(boolean) --*
An option that specifies whether to create the cluster with enhanced VPC routing enabled. To create a cluster that uses enhanced VPC routing, the cluster must be in a VPC. For more information, see `Enhanced VPC Routing <https://docs.aws.amazon.com/redshift/latest/mgmt/enhanced-vpc-routing.html>`__ in the Amazon Redshift Cluster Management Guide.
If this option is ``true`` , enhanced VPC routing is enabled.
Default: false
- **IamRoles** *(list) --*
A list of AWS Identity and Access Management (IAM) roles that can be used by the cluster to access other AWS services.
- *(dict) --*
An AWS Identity and Access Management (IAM) role that can be used by the associated Amazon Redshift cluster to access other AWS services.
- **IamRoleArn** *(string) --*
The Amazon Resource Name (ARN) of the IAM role, for example, ``arn:aws:iam::123456789012:role/RedshiftCopyUnload`` .
- **ApplyStatus** *(string) --*
A value that describes the status of the IAM role's association with an Amazon Redshift cluster.
The following are possible statuses and descriptions.
* ``in-sync`` : The role is available for use by the cluster.
* ``adding`` : The role is in the process of being associated with the cluster.
* ``removing`` : The role |