input
stringlengths 2.65k
237k
| output
stringclasses 1
value |
---|---|
to cont')
return img_arr
def fadeout(img_arr, bb_x1y1x2y2,gc_img):
'''
tkae img, gc img, and bb, add background but fade it out outside of bb
:param img_arr:
:param bb_x1y1x2y2:
:param gc_img:
:return:
'''
fadeout = np.zeros_like(img_arr)
fadeout[bb_x1y1x2y2[1]:bb_x1y1x2y2[3],bb_x1y1x2y2[0]:bb_x1y1x2y2[2]]
fadeout[0:bb_x1y1x2y2[1],:]=np.arange(start=0,stop=1,step=1.0/bb_x1y1x2y2[1])
def grabcut_bb(img_arr,bb_x1y1x2y2,visual_output=False,clothing_type=None):
'''
grabcut with subsection of bb as fg, outer border of image bg, prbg to bb, prfg from bb to subsection
then kill anything outside of bb
also anything thats utter white or blacak should get prbgd
return mask and gc image
:param img_arr:
:param bb_x1y1x2y2:
:return:
'''
orig_arr = copy.copy(img_arr)
labels = ['bg','fg','prbg','prfg'] #this is the order of cv2 values cv2.BG etc
bgdmodel = np.zeros((1, 65), np.float64)
fgdmodel = np.zeros((1, 65), np.float64)
mask = np.zeros(img_arr.shape[:2], np.uint8)
h,w = img_arr.shape[0:2]
#start with everything bg
mask[:,:] = cv2.GC_BGD
#big box (except for outer margin ) is pr_bg
pr_bg_frac = 0.05
pr_bg_margin_ud= int(pr_bg_frac*(h))
pr_bg_margin_lr= int(pr_bg_frac*(w))
mask[pr_bg_margin_ud:h-pr_bg_margin_ud,pr_bg_margin_lr:w-pr_bg_margin_lr] = cv2.GC_PR_BGD
#prevent masks frrom adding together by doing boolean or
nprbgd = np.sum(mask==cv2.GC_PR_BGD)
print('after bigbox '+str(nprbgd))
# cv2.imwrite('perimeter.jpg',img_arr)
# imutils.count_values(mask,labels=labels)
# imutils.show_mask_with_labels(mask,labels,visual_output=True)
#everything in bb+margin is pr_fgd
pr_fg_frac = 0.0
pr_bg_margin_ud= int(pr_bg_frac*(bb_x1y1x2y2[3]-bb_x1y1x2y2[1]))
pr_bg_margin_lr= int(pr_bg_frac*(bb_x1y1x2y2[3]-bb_x1y1x2y2[1]))
mask[bb_x1y1x2y2[1]:bb_x1y1x2y2[3],bb_x1y1x2y2[0]:bb_x1y1x2y2[2]] = cv2.GC_PR_FGD
# print('after middlebox '+str(nprbgd))
# imutils.count_values(mask,labels)
# imutils.show_mask_with_labels(mask,labels,visual_output=True)
#everything in small box within bb is fg (unless upper cover in which case its probably - maybe its
#a coat over a shirt and the sirt is visible
center_frac=0.1
side_frac = 0.1
side_margin= int(side_frac*(bb_x1y1x2y2[3]-bb_x1y1x2y2[1]))
upper_margin=int(center_frac*(bb_x1y1x2y2[3]-bb_x1y1x2y2[1]))
lower_margin=int(center_frac*(bb_x1y1x2y2[3]-bb_x1y1x2y2[1]))
center_y=(bb_x1y1x2y2[1]+bb_x1y1x2y2[3])/2
center_x=(bb_x1y1x2y2[0]+bb_x1y1x2y2[2])/2
top=max(0,center_y-upper_margin)
bottom=min(h,center_y+lower_margin)
left = max(0,center_x-side_margin)
right = min(w,center_x+side_margin)
print('fg box t {} b {} l {} r {}'.format(top,bottom,left,right))
if top>bottom:
temp=top
top=bottom
bottom=temp
if clothing_type == 'upper_cover':
mask[top:bottom,left:right] = cv2.GC_PR_FGD
else:
mask[top:bottom,left:right] = cv2.GC_FGD
# print('after innerbox ')
# imutils.count_values(mask,labels)
# imutils.show_mask_with_labels(mask,['bg','fg','prbg','prfg'],visual_output=True)
# print('unqies '+str(np.unique(mask)))
#add white and black vals as pr bgd
whitevals = cv2.inRange(img_arr,np.array([254,254,254]),np.array([255,255,255]))
mask[np.array(whitevals)!=0]=cv2.GC_PR_BGD
#fmi this could also be done with whitevals= (img_arr==[255,255,255]).all(-1))
blackvals = cv2.inRange(img_arr,np.array([0,0,0]),np.array([1,1,1]))
mask[np.array(blackvals)!=0]=cv2.GC_PR_BGD
nprbgd = np.sum(mask==cv2.GC_PR_BGD)
# print('after blackwhite ')
# imutils.count_values(mask,labels)
# imutils.show_mask_with_labels(mask,labels,visual_output=True)
logging.debug('imgarr shape b4r gc '+str(img_arr.shape))
rect = (bb_x1y1x2y2[0],bb_x1y1x2y2[1],bb_x1y1x2y2[2],bb_x1y1x2y2[3])
try:
#TODO - try more than 1 grabcut call in itr
itr = 1
cv2.grabCut(img=img_arr,mask=mask, rect=rect,bgdModel= bgdmodel,fgdModel= fgdmodel,iterCount= itr, mode=cv2.GC_INIT_WITH_MASK)
except:
print('grabcut exception ')
return img_arr
#kill anything no t in gc
mask2 = np.where((mask==2)|(mask==0),0,1).astype('uint8') ##0 and 2 are bgd and pr_bgd
#kill anything out of bb (except head)
# mask2[:bb_x1y1x2y2[1],0:w]=0 #top
mask2[bb_x1y1x2y2[3]:,0:w]=0 #bottom
mask2[0:h,0:bb_x1y1x2y2[0]]=0 #left
mask2[0:h,bb_x1y1x2y2[2]:w]=0 #right
img_arr = img_arr*mask2[:,:,np.newaxis]
fadeout = np.zeros([h,w],dtype=np.float )
fadeout[bb_x1y1x2y2[1]:bb_x1y1x2y2[3],bb_x1y1x2y2[0]:bb_x1y1x2y2[2]]=1.0
# fadeout[0:bb_x1y1x2y2[3],bb_x1y1x2y2[0]:bb_x1y1x2y2[2]]=1.0
fadefrac = 0.1
fade_dist_ud = int(fadefrac*(bb_x1y1x2y2[3]-bb_x1y1x2y2[1]))
fade_dist_rl = int(fadefrac*(bb_x1y1x2y2[2]-bb_x1y1x2y2[0]))
fadevec = np.arange(start=0,stop=1,step=1.0/fade_dist_ud)
fademat = np.tile(fadevec,(bb_x1y1x2y2[2]-bb_x1y1x2y2[0],1))
fademat=fademat.transpose()
fadeout[bb_x1y1x2y2[1]:bb_x1y1x2y2[1]+fade_dist_ud,bb_x1y1x2y2[0]:bb_x1y1x2y2[2]]=fademat #top
fadeout[bb_x1y1x2y2[3]-fade_dist_ud:bb_x1y1x2y2[3],bb_x1y1x2y2[0]:bb_x1y1x2y2[2]]=(1-fademat) #bottom
fadevec = np.arange(start=0,stop=1,step=1.0/fade_dist_rl)
fademat = np.tile(fadevec,(bb_x1y1x2y2[3]-bb_x1y1x2y2[1],1))
fadeout[bb_x1y1x2y2[1]:bb_x1y1x2y2[3],bb_x1y1x2y2[0]:bb_x1y1x2y2[0]+fade_dist_rl]=fadeout[bb_x1y1x2y2[1]:bb_x1y1x2y2[3],bb_x1y1x2y2[0]:bb_x1y1x2y2[0]+fade_dist_rl]*fademat
#np.maximum(fadeout[bb_x1y1x2y2[1]:bb_x1y1x2y2[3],bb_x1y1x2y2[0]-fade_dist_rl:bb_x1y1x2y2[0]],fademat)
fadeout[bb_x1y1x2y2[1]:bb_x1y1x2y2[3],bb_x1y1x2y2[2]-fade_dist_rl:bb_x1y1x2y2[2]]= fadeout[bb_x1y1x2y2[1]:bb_x1y1x2y2[3],bb_x1y1x2y2[2]-fade_dist_rl:bb_x1y1x2y2[2]] * (1-fademat)
#=np.maximum(fadeout[bb_x1y1x2y2[1]:bb_x1y1x2y2[3],bb_x1y1x2y2[0]-fade_dist_rl:bb_x1y1x2y2[0]],(1-fademat))
skin_index = constants.pixlevel_categories_v3.index('skin')
skin_mask = kassper.skin_detection_fast(orig_arr) * 255
if visual_output:
cv2.imshow('skin',skin_mask)
cv2.waitKey(0)
fadeout = np.where(skin_mask!=0,skin_mask,fadeout)
# mask2 = np.where(skin_mask!=0,constants.pixlevel_categories_v3.index('skin'),mask2)
# cv2.imshow('fade',fadeout)
# cv2.waitKey(0)
# mask2[:bb_x1y1x2y2[1],0:w]=0 #top
# mask2[bb_x1y1x2y2[3]:,0:w]=0 #bottom
# mask2[0:h,0:bb_x1y1x2y2[0]]=0 #left
# mask2[0:h,bb_x1y1x2y2[2]:w]=0 #right
# img_arr = img_arr*mask2[:,:,np.newaxis]
#can use img_arr (after gc) here instead of orig_arr
dofade=False
if dofade:
img_arr = (orig_arr*fadeout[:,:,np.newaxis]).astype('uint8')
# cv2.imshow('after orig*fadeout',img_arr)
img_arr = np.where(skin_mask[:,:,np.newaxis]!=0,orig_arr,img_arr)
# cv2.imshow('after skin add',img_arr)
# cv2.waitKey(0)
# negmask = np.where(mask2==0,1,0).astype('uint8')
# imutils.show_mask_with_labels(negmask,['0','1','2','3'])
# # fadeout = fadeout/255.0 #this was defined as float so its ok
fillval = np.mean(orig_arr[0:20,0:20],axis=(0,1))
print('fillval '+str(fillval))
bgnd_arr = np.zeros_like(orig_arr).astype('uint8')
bgnd_arr[:,:]=fillval
# bgnd_arr = np.where(fadeout!=0,(fadeout[:,:,np.newaxis]*bgnd_arr),bgnd_arr) #+orig_arr*(fadeout[:,:,np.newaxis]).astype('uint8')
img_arr = np.where(img_arr==0,bgnd_arr,img_arr)
# cv2.imshow('bgnd arr',bgnd_arr)
# cv2.waitKey(0)
if(visual_output):
# plt.imshow(img),plt.colorbar(),plt.show()
cv2.imshow('after gc',img_arr)
cv2.waitKey(0)
logging.debug('imgarr shape after gc '+str(img_arr.shape))
return mask2,img_arr
def dir_of_catalog_images_to_pixlevel(catalog_images_dir='/media/jeremy/9FBD-1B00/data/jeremy/image_dbs/mongo/amazon_us_female/dress',
swatch_bgnds_dir='/media/jeremy/9FBD-1B00/data/jeremy/image_dbs/tg/backgrounds/textures/kept',
person_bgnds_dir='/media/jeremy/9FBD-1B00/data/jeremy/image_dbs/tg/backgrounds/street_scenes/kept',
destination_img_dir = '/media/jeremy/9FBD-1B00/data/jeremy/image_dbs/mongo/amazon_us_female/dress_images',
destination_label_dir = '/media/jeremy/9FBD-1B00/data/jeremy/image_dbs/mongo/amazon_us_female/dress_labels',
manual_oversight=False):
files = [os.path.join(catalog_images_dir,f) for f in os.listdir(catalog_images_dir)]
human_bgnds = [os.path.join(person_bgnds_dir,f) for f in os.listdir(person_bgnds_dir)]
inhuman_bgnds = [os.path.join(swatch_bgnds_dir,f) for f in os.listdir(swatch_bgnds_dir)]
dress_index = constants.pixlevel_categories_v3.index('whole_body_items')
Utils.ensure_dir(destination_img_dir)
Utils.ensure_dir(destination_label_dir)
n=0
n_tot = len(files)
for f in files:
n=n+1
print('doing {}/{} {}'.format(n,n_tot,f))
img_arr = cv2.imread(f)
if img_arr is None:
print('got none for {}'.format(f))
continue
human_bgnd = Utils.get_cv2_img_array(random.choice(human_bgnds))
inhuman_bgnd = Utils.get_cv2_img_array(random.choice(inhuman_bgnds))
logging.debug('sizes: {} human bgnd {} inbgnd {}'.format(img_arr.shape,human_bgnd.shape,inhuman_bgnd.shape))
dest_imagesize=(300,300) #chosen to get figures to fit into bgnd - bgnd resized, figure not (margin added instead)
human_bgnd = cv2.resize(human_bgnd,dest_imagesize) #dont worry about warping just fill all image
inhuman_bgnd = cv2.resize(inhuman_bgnd,dest_imagesize)
img_arr = imutils.resize_by_adding_border(img_arr,output_size=dest_imagesize)
mask,img = image_to_pixlevel_no_bb(img_arr,clothing_indices=[dress_index],human_bgd = human_bgnd,inhuman_bgnd = inhuman_bgnd,visual_output=False)
save = True
if manual_oversight:
imutils.show_mask_with_labels(mask,labels=constants.pixlevel_categories_v3,original_image=img,visual_output=True)
k=cv2.waitKey(0)
print('k='+str(k))
if save:
dest_imgname = os.path.join(destination_img_dir,os.path.basename(f))
cv2.imwrite(dest_imgname,img)
dest_lblname = os.path.join(destination_label_dir,os.path.basename(f)).replace('.jpg','.png')
cv2.imwrite(dest_lblname,mask)
print('wrote img to {} and label to {}'.format(dest_imgname,dest_lblname))
def image_to_pixlevel_no_bb(img_arr,clothing_indices,visual_output=True,labels=constants.pixlevel_categories_v3,human_bgd=None,inhuman_bgnd=None):
'''
grabcut with subsection of bb as fg, outer border of image bg, prbg to bb, prfg from bb to subsection
then kill anything outside of bb
also anything thats utter white or blacak should get prbgd
return mask and gc image
:param img_arr:
:param clotihing indices - first is top, second is bottom , or only is wholebody
:return:
'''
orig_arr = copy.copy(img_arr)
gc_mask_labels = ['bg','fg','prbg','prfg'] #this is the order of cv2 values cv2.BG etc
bgdmodel = np.zeros((1, 65), np.float64)
fgdmodel = np.zeros((1, 65), np.float64)
gc_mask = np.zeros(img_arr.shape[:2], np.uint8) #gc_mask for gc with prFg etc
mask= np.zeros(img_arr.shape[:2],np.uint8) #mask(also a gc_mask) with item numbers
h,w = img_arr.shape[0:2]
#start with everything pr_bg
gc_mask[:,:] = cv2.GC_PR_BGD
#outermost box is _bg
bg_frac = 0.05
bg_margin_ud= int(bg_frac*(h))
bg_margin_lr= int(bg_frac*(w))
gc_mask[0:bg_margin_ud,:] = cv2.GC_BGD
gc_mask[h-bg_margin_ud:h,:] = cv2.GC_BGD
gc_mask[:,0:bg_margin_lr] = cv2.GC_BGD
gc_mask[:,w-bg_margin_lr:w] = cv2.GC_BGD
if visual_output:
imutils.show_mask_with_labels(gc_mask,labels,visual_output=True,original_image=img_arr)
#prevent gc_masks frrom adding together by doing boolean or
nprbgd = np.sum(gc_mask==cv2.GC_PR_BGD)
logging.debug('after bigbox '+str(nprbgd))
#see if theres a face 1-501510371 refno
ff_cascade = background_removal.find_face_cascade(img_arr, max_num_of_faces=10)
likely_fg_bb = None
face = None
if ff_cascade['are_faces'] :
faces = ff_cascade['faces']
if faces == []:
print('ffascade reported faces but gave none')
else:
face = background_removal.choose_faces(img_arr,faces,1)[0]
print('got a face: {}'.format(face))
extra_height=8#as measured in faces
extra_width=3
head_extra = face[2]/1.5
likely_fg_bb = [face[0]+face[2]/2-face[2]*extra_width/2,face[1]-head_extra,face[2]*extra_width,face[3]*extra_height]
if likely_fg_bb is None: #assume middle of image
top_margin=.10 #as measured in % of image height
bottom_margin=0.1
left_margin= 0.3
right_margin= 0.3
likely_fg_bb = [int(left_margin*w),int(top_margin*h),w*(1-(left_margin+right_margin)),h*(1-(top_margin+bottom_margin))]
logging.debug('pre-check likely fg bb:{} h {} w {} shape {} '.format(likely_fg_bb,h,w,img_arr.shape))
#make sure nothing out of bounds
likely_fg_bb=[max(likely_fg_bb[0],0),max(likely_fg_bb[1],0),max(likely_fg_bb[2],0),max(likely_fg_bb[3],0)]
likely_fg_bb=[min(likely_fg_bb[0],w),min(likely_fg_bb[1],h),min(likely_fg_bb[2],w-likely_fg_bb[0]),min(likely_fg_bb[3],h-likely_fg_bb[1])]
likely_fg_bb=[int(likely_fg_bb[0]),int(likely_fg_bb[1]),int(likely_fg_bb[2]),int(likely_fg_bb[3])]
logging.debug('likely fg bb:{}'.format(likely_fg_bb))
gc_mask[likely_fg_bb[1]:likely_fg_bb[1]+likely_fg_bb[3],likely_fg_bb[0]:likely_fg_bb[0]+likely_fg_bb[2]] = cv2.GC_PR_FGD
# print('after face/margins ')
# imutils.count_values(gc_mask,labels)
# imutils.show_mask_with_labels(gc_mask,gc_mask_labels,visual_output=True,original_image=img_arr)
# if clothing_type == 'upper_cover':
# gc_mask[top:bottom,left:right] = cv2.GC_PR_FGD
# else:
# gc_mask[top:bottom,left:right] = cv2.GC_FGD
logging.debug('after mainbox b4 blackwhite ')
# imutils.count_values(gc_mask,gc_mask_labels)
#add white and black vals as pr bgd
white_tolerance = 5 #anything from 255-this to 255 is called white bgnd
black_tolerance = 5 #anything from 0 to this is called black gbgnd
whitevals = cv2.inRange(img_arr,np.array([255-white_tolerance,255-white_tolerance,255-white_tolerance]),np.array([255,255,255]))
gc_mask[np.array(whitevals)!=0]=cv2.GC_PR_BGD
#fmi this could also be done with whitevals= (img_arr==[255,255,255]).all(-1))
blackvals = cv2.inRange(img_arr,np.array([0,0,0]),np.array([black_tolerance,black_tolerance,black_tolerance]))
gc_mask[np.array(blackvals)!=0]=cv2.GC_PR_BGD
# print('after blackwhite w {} b {}'.format(np.count_nonzero(whitevals),np.count_nonzero(blackvals)))
# imutils.count_values(gc_mask,gc_mask_labels)
# imutils.show_mask_with_labels(gc_mask,gc_mask_labels,visual_output=True,original_image=img_arr)
logging.debug('imgarr shape b4r gc '+str(img_arr.shape))
rect = (0,0,1,1)
try:
#TODO - try more than 1 grabcut call in itr
itr = 1
cv2.grabCut(img=img_arr,mask=gc_mask, rect=rect,bgdModel= bgdmodel,fgdModel= fgdmodel,iterCount= itr, mode=cv2.GC_INIT_WITH_MASK)
except:
print('grabcut exception '+str( sys.exc_info()[0]))
print(sys.exc_info())
print(sys.exc_info()[1])
return img_arr
gc_mask2 = np.where((gc_mask==2)|(gc_mask==0),0,1).astype('uint8') ##0 and 2 are bgd and pr_bgd
#kill anything out of bb (except head)
# gc_mask2[:bb_x1y1x2y2[1],0:w]=0 #top
# gc_mask2[bb_x1y1x2y2[3]:,0:w]=0 #bottom
# gc_mask2[0:h,0:bb_x1y1x2y2[0]]=0 #left
# gc_mask2[0:h,bb_x1y1x2y2[2]:w]=0 #right
img_arr = img_arr*gc_mask2[:,:,np.newaxis]
if visual_output:
cv2.imshow('right after gc',img_arr)
cv2.waitKey(0)
skin_index = constants.pixlevel_categories_v3.index('skin')
skin_tolerance = 1.0
if face is not None:
# skin_mask = kassper.skin_detection_fast(orig_arr) * 255 #sdfdsf
skin_mask = kassper.skin_detection_fast(orig_arr,face=face,tol=skin_tolerance) * 255
else:
skin_mask = kassper.skin_detection_fast(orig_arr,tol=skin_tolerance) * 255
# if visual_output:
# cv2.imshow('skin',skin_mask)
# cv2.waitKey(0)
#erode skin to eliminate 1x1 edges detected as skin
kernel = np.ones((2,2),np.uint8)
skin_mask = cv2.erode(skin_mask,kernel,iterations = 1)
skin_mask = cv2.dilate(skin_mask,kernel,iterations = 1)
if visual_output:
cv2.imshow('skin after erode/dilate',skin_mask)
cv2.waitKey(0)
gc_mask = np.where(skin_mask!=0,cv2.GC_FGD,gc_mask)
if visual_output:
imutils.show_mask_with_labels(gc_mask,gc_mask_labels,visual_output=True,original_image=img_arr)
img_arr = np.where(skin_mask[:,:,np.newaxis]!=0,orig_arr,img_arr)
#take out white black aftewr gc too since gc sometimes includes these
#add white and black vals as pr bgd
white_tolerance = 5 #anything from 255-this to 255 is called white bgnd
black_tolerance = 5 #anything from 0 to this is called black gbgnd
whitevals = cv2.inRange(img_arr,np.array([255-white_tolerance,255-white_tolerance,255-white_tolerance]),np.array([255,255,255]))
#fmi this could also be done with whitevals= (img_arr==[255,255,255]).all(-1))
blackvals = cv2.inRange(img_arr,np.array([0,0,0]),np.array([black_tolerance,black_tolerance,black_tolerance]))
img_arr = np.where(whitevals[:,:,np.newaxis]!=0 ,0,img_arr)
img_arr = np.where(blackvals[:,:,np.newaxis]!=0 ,0,img_arr)
if visual_output:
cv2.imshow('img after skin',img_arr)
cv2.waitKey(0)
#get rid of outermost pixels , they seem to wind up white a lot of time
kernel = np.ones((1,1),np.uint8)
current_nonzero = np.where(img_arr!=0,1,0)[:,:,0].astype(dtype=np.uint8) #maynbe there a better way but this works and is easy to remember - 1st chan of nonzeros arr
logging.debug('n before erode:{} mask {} size {}'.format(np.count_nonzero(img_arr),np.count_nonzero(current_nonzero),current_nonzero.shape))
current_nonzero = cv2.erode(current_nonzero,kernel,iterations = 1)
img_arr | |
Axe+5",
702800: "Fire Crescent Axe",
702801: "Fire Crescent Axe+1",
702802: "Fire Crescent Axe+2",
702803: "Fire Crescent Axe+3",
702804: "Fire Crescent Axe+4",
702805: "Fire Crescent Axe+5",
702806: "Fire Crescent Axe+6",
702807: "Fire Crescent Axe+7",
702808: "Fire Crescent Axe+8",
702809: "Fire Crescent Axe+9",
702810: "Fire Crescent Axe+10",
702900: "Chaos Crescent Axe",
702901: "Chaos Crescent Axe+1",
702902: "Chaos Crescent Axe+2",
702903: "Chaos Crescent Axe+3",
702904: "Chaos Crescent Axe+4",
702905: "Chaos Crescent Axe+5",
703000: "Butcher Knife",
703001: "Butcher Knife+1",
703002: "Butcher Knife+2",
703003: "Butcher Knife+3",
703004: "Butcher Knife+4",
703005: "Butcher Knife+5",
703006: "Butcher Knife+6",
703007: "Butcher Knife+7",
703008: "Butcher Knife+8",
703009: "Butcher Knife+9",
703010: "Butcher Knife+10",
703011: "Butcher Knife+11",
703012: "Butcher Knife+12",
703013: "Butcher Knife+13",
703014: "Butcher Knife+14",
703015: "Butcher Knife+15",
703100: "Crystal Butcher Knife",
703101: "Crystal Butcher Knife+1",
703102: "Crystal Butcher Knife+2",
703103: "Crystal Butcher Knife+3",
703104: "Crystal Butcher Knife+4",
703105: "Crystal Butcher Knife+5",
703200: "Lightning Butcher Knife",
703201: "Lightning Butcher Knife+1",
703202: "Lightning Butcher Knife+2",
703203: "Lightning Butcher Knife+3",
703204: "Lightning Butcher Knife+4",
703205: "Lightning Butcher Knife+5",
703300: "Raw Butcher Knife",
703301: "Raw Butcher Knife+1",
703302: "Raw Butcher Knife+2",
703303: "Raw Butcher Knife+3",
703304: "Raw Butcher Knife+4",
703305: "Raw Butcher Knife+5",
703400: "Magic Butcher Knife",
703401: "Magic Butcher Knife+1",
703402: "Magic Butcher Knife+2",
703403: "Magic Butcher Knife+3",
703404: "Magic Butcher Knife+4",
703405: "Magic Butcher Knife+5",
703406: "Magic Butcher Knife+6",
703407: "Magic Butcher Knife+7",
703408: "Magic Butcher Knife+8",
703409: "Magic Butcher Knife+9",
703410: "Magic Butcher Knife+10",
703500: "Enchanted Butcher Knife",
703501: "Enchanted Butcher Knife+1",
703502: "Enchanted Butcher Knife+2",
703503: "Enchanted Butcher Knife+3",
703504: "Enchanted Butcher Knife+4",
703505: "Enchanted Butcher Knife+5",
703600: "Divine Butcher Knife",
703601: "Divine Butcher Knife+1",
703602: "Divine Butcher Knife+2",
703603: "Divine Butcher Knife+3",
703604: "Divine Butcher Knife+4",
703605: "Divine Butcher Knife+5",
703606: "Divine Butcher Knife+6",
703607: "Divine Butcher Knife+7",
703608: "Divine Butcher Knife+8",
703609: "Divine Butcher Knife+9",
703610: "Divine Butcher Knife+10",
703700: "Occult Butcher Knife",
703701: "Occult Butcher Knife+1",
703702: "Occult Butcher Knife+2",
703703: "Occult Butcher Knife+3",
703704: "Occult Butcher Knife+4",
703705: "Occult Butcher Knife+5",
703800: "Fire Butcher Knife",
703801: "Fire Butcher Knife+1",
703802: "Fire Butcher Knife+2",
703803: "Fire Butcher Knife+3",
703804: "Fire Butcher Knife+4",
703805: "Fire Butcher Knife+5",
703806: "Fire Butcher Knife+6",
703807: "Fire Butcher Knife+7",
703808: "Fire Butcher Knife+8",
703809: "Fire Butcher Knife+9",
703810: "Fire Butcher Knife+10",
703900: "Chaos Butcher Knife",
703901: "Chaos Butcher Knife+1",
703902: "Chaos Butcher Knife+2",
703903: "Chaos Butcher Knife+3",
703904: "Chaos Butcher Knife+4",
703905: "Chaos Butcher Knife+5",
704000: "Golem Axe",
704001: "Golem Axe+1",
704002: "Golem Axe+2",
704003: "Golem Axe+3",
704004: "Golem Axe+4",
704005: "Golem Axe+5",
704100: "Golem Axe",
704101: "Golem Axe+1",
704102: "Golem Axe+2",
704103: "Golem Axe+3",
704104: "Golem Axe+4",
704105: "Golem Axe+5",
704200: "Golem Axe",
704201: "Golem Axe+1",
704202: "Golem Axe+2",
704203: "Golem Axe+3",
704204: "Golem Axe+4",
704205: "Golem Axe+5",
704300: "Golem Axe",
704301: "Golem Axe+1",
704302: "Golem Axe+2",
704303: "Golem Axe+3",
704304: "Golem Axe+4",
704305: "Golem Axe+5",
704400: "Golem Axe",
704401: "Golem Axe+1",
704402: "Golem Axe+2",
704403: "Golem Axe+3",
704404: "Golem Axe+4",
704405: "Golem Axe+5",
704500: "Golem Axe",
704501: "Golem Axe+1",
704502: "Golem Axe+2",
704503: "Golem Axe+3",
704504: "Golem Axe+4",
704505: "Golem Axe+5",
704600: "Golem Axe",
704601: "Golem Axe+1",
704602: "Golem Axe+2",
704603: "Golem Axe+3",
704604: "Golem Axe+4",
704605: "Golem Axe+5",
705000: "Gargoyle Tail Axe",
705001: "Gargoyle Tail Axe+1",
705002: "Gargoyle Tail Axe+2",
705003: "Gargoyle Tail Axe+3",
705004: "Gargoyle Tail Axe+4",
705005: "Gargoyle Tail Axe+5",
705006: "Gargoyle Tail Axe+6",
705007: "Gargoyle Tail Axe+7",
705008: "Gargoyle Tail Axe+8",
705009: "Gargoyle Tail Axe+9",
705010: "Gargoyle Tail Axe+10",
705011: "Gargoyle Tail Axe+11",
705012: "Gargoyle Tail Axe+12",
705013: "Gargoyle Tail Axe+13",
705014: "Gargoyle Tail Axe+14",
705015: "Gargoyle Tail Axe+15",
705100: "Crystal Gargoyle Tail Axe",
705101: "Crystal Gargoyle Tail Axe+1",
705102: "Crystal Gargoyle Tail Axe+2",
705103: "Crystal Gargoyle Tail Axe+3",
705104: "Crystal Gargoyle Tail Axe+4",
705105: "Crystal Gargoyle Tail Axe+5",
705200: "Lightning Gargoyle Tail Axe",
705201: "Ltng. Gargoyle Tail Axe+1",
705202: "Ltng. Gargoyle Tail Axe+2",
705203: "Ltng. Gargoyle Tail Axe+3",
705204: "Ltng. Gargoyle Tail Axe+4",
705205: "Ltng. Gargoyle Tail Axe+5",
705300: "Raw Gargoyle Tail Axe",
705301: "Raw Gargoyle Tail Axe+1",
705302: "Raw Gargoyle Tail Axe+2",
705303: "Raw Gargoyle Tail Axe+3",
705304: "Raw Gargoyle Tail Axe+4",
705305: "Raw Gargoyle Tail Axe+5",
705400: "Magic Gargoyle Tail Axe",
705401: "Magic Gargoyle Tail Axe+1",
705402: "Magic Gargoyle Tail Axe+2",
705403: "Magic Gargoyle Tail Axe+3",
705404: "Magic Gargoyle Tail Axe+4",
705405: "Magic Gargoyle Tail Axe+5",
705406: "Magic Gargoyle Tail Axe+6",
705407: "Magic Gargoyle Tail Axe+7",
705408: "Magic Gargoyle Tail Axe+8",
705409: "Magic Gargoyle Tail Axe+9",
705410: "Magic Gargoyle Tail Axe+10",
705500: "Enchanted Gargoyle Tail Axe",
705501: "Ench. Gargoyle Tail Axe+1",
705502: "Ench. Gargoyle Tail Axe+2",
705503: "Ench. Gargoyle Tail Axe+3",
705504: "Ench. Gargoyle Tail Axe+4",
705505: "Ench. Gargoyle Tail Axe+5",
705600: "Divine Gargoyle Tail Axe",
705601: "Divine Gargoyle Tail Axe+1",
705602: "Divine Gargoyle Tail Axe+2",
705603: "Divine Gargoyle Tail Axe+3",
705604: "Divine Gargoyle Tail Axe+4",
705605: "Divine Gargoyle Tail Axe+5",
705606: "Divine Gargoyle Tail Axe+6",
705607: "Divine Gargoyle Tail Axe+7",
705608: "Divine Gargoyle Tail Axe+8",
705609: "Divine Gargoyle Tail Axe+9",
705610: "Divine Gargoyle Tail Axe+10",
705700: "Occult Gargoyle Tail Axe",
705701: "Occult Gargoyle Tail Axe+1",
705702: "Occult Gargoyle Tail Axe+2",
705703: "Occult Gargoyle Tail Axe+3",
705704: "Occult Gargoyle Tail Axe+4",
705705: "Occult Gargoyle Tail Axe+5",
705800: "Fire Gargoyle Tail Axe",
705801: "Fire Gargoyle Tail Axe+1",
705802: "Fire Gargoyle Tail Axe+2",
705803: "Fire Gargoyle Tail Axe+3",
705804: "Fire Gargoyle Tail Axe+4",
705805: "Fire Gargoyle Tail Axe+5",
705806: "Fire Gargoyle Tail Axe+6",
705807: "Fire Gargoyle Tail Axe+7",
705808: "Fire Gargoyle Tail Axe+8",
705809: "Fire Gargoyle Tail Axe+9",
705810: "Fire Gargoyle Tail Axe+10",
705900: "Chaos Gargoyle Tail Axe",
705901: "Chaos Gargoyle Tail Axe+1",
705902: "Chaos Gargoyle Tail Axe+2",
705903: "Chaos Gargoyle Tail Axe+3",
705904: "Chaos Gargoyle Tail Axe+4",
705905: "Chaos Gargoyle Tail Axe+5",
750000: "Greataxe",
750001: "Greataxe+1",
750002: "Greataxe+2",
750003: "Greataxe+3",
750004: "Greataxe+4",
750005: "Greataxe+5",
750006: "Greataxe+6",
750007: "Greataxe+7",
750008: "Greataxe+8",
750009: "Greataxe+9",
750010: "Greataxe+10",
750011: "Greataxe+11",
750012: "Greataxe+12",
750013: "Greataxe+13",
750014: "Greataxe+14",
750015: "Greataxe+15",
750100: "Crystal Greataxe",
750101: "Crystal Greataxe+1",
750102: "Crystal Greataxe+2",
750103: "Crystal Greataxe+3",
750104: "Crystal Greataxe+4",
750105: "Crystal Greataxe+5",
750200: "Lightning Greataxe",
750201: "Lightning Greataxe+1",
750202: "Lightning Greataxe+2",
750203: "Lightning Greataxe+3",
750204: "Lightning Greataxe+4",
750205: "Lightning Greataxe+5",
750300: "Raw Greataxe",
750301: "Raw Greataxe+1",
750302: "Raw Greataxe+2",
750303: "Raw Greataxe+3",
750304: "Raw Greataxe+4",
750305: "Raw Greataxe+5",
750400: "Magic | |
<reponame>chenwenxiao/DOI<filename>ood_regularizer/experiment/models/likelihood/vib.py
# -*- coding: utf-8 -*-
import functools
import sys
from argparse import ArgumentParser
from contextlib import contextmanager
import tensorflow as tf
from pprint import pformat
from matplotlib import pyplot
from tensorflow.contrib.framework import arg_scope, add_arg_scope
import tfsnippet as spt
from tfsnippet import DiscretizedLogistic
from tfsnippet.examples.utils import (MLResults,
save_images_collection,
bernoulli_as_pixel,
bernoulli_flow,
bernoulli_flow,
print_with_title)
import numpy as np
from tfsnippet.preprocessing import UniformNoiseSampler
from ood_regularizer.experiment.datasets.celeba import load_celeba
from ood_regularizer.experiment.datasets.overall import load_overall, load_complexity
from ood_regularizer.experiment.datasets.svhn import load_svhn
from ood_regularizer.experiment.models.utils import get_mixed_array
from ood_regularizer.experiment.utils import make_diagram, get_ele, plot_fig
import os
class ExpConfig(spt.Config):
# model parameters
z_dim = 256
act_norm = False
weight_norm = False
batch_norm = False
l2_reg = 0.0002
kernel_size = 3
shortcut_kernel_size = 1
nf_layers = 20
# training parameters
result_dir = None
write_summary = True
max_epoch = 200
warm_up_start = 100
initial_beta = -3.0
uniform_scale = False
use_transductive = True
mixed_train = False
self_ood = False
mixed_ratio = 1.0
mutation_rate = 0.1
noise_type = "mutation" # or unit
in_dataset_test_ratio = 1.0
pretrain = False
max_beta = 0.01
in_dataset = 'cifar10'
out_dataset = 'svhn'
compressor = 2 # 0 for jpeg, 1 for png, 2 for flif
max_step = None
batch_size = 128
smallest_step = 5e-5
initial_lr = 0.0002
lr_anneal_factor = 0.5
lr_anneal_epoch_freq = []
lr_anneal_step_freq = None
n_critical = 5
# evaluation parameters
train_n_qz = 1
test_n_qz = 10
test_batch_size = 64
test_epoch_freq = 200
plot_epoch_freq = 20
distill_ratio = 1.0
distill_epoch = 5000
mcmc_times = 5
epsilon = -20.0
min_logstd_of_q = -10.0
sample_n_z = 100
x_shape = (32, 32, 3)
x_shape_multiple = 3072
extra_stride = 2
class_num = 10
count_experiment = False
config = ExpConfig()
@add_arg_scope
def batch_norm(inputs, training=False, scope=None):
return tf.layers.batch_normalization(inputs, training=training, name=scope)
@add_arg_scope
def dropout(inputs, training=False, scope=None):
print(inputs, training)
return spt.layers.dropout(inputs, rate=0.2, training=training, name=scope)
@add_arg_scope
@spt.global_reuse
def q_net(x, observed=None, n_z=None):
net = spt.BayesianNet(observed=observed)
normalizer_fn = None
shape = (1,) + config.x_shape[:-1] + (5,)
print(shape)
extend_x = tf.get_variable(name='extend_x', shape=shape, dtype=tf.float32,
trainable=True)
print(extend_x)
batch_size = spt.utils.get_shape(x)[0]
extend_x = tf.tile(extend_x, tf.concat([[batch_size], [1] * len(config.x_shape)], axis=0))
print(extend_x)
x = tf.concat([x, extend_x], axis=-1)
print(x)
# compute the hidden features
with arg_scope([spt.layers.resnet_conv2d_block],
kernel_size=config.kernel_size,
shortcut_kernel_size=config.shortcut_kernel_size,
activation_fn=tf.nn.leaky_relu,
normalizer_fn=normalizer_fn,
kernel_regularizer=spt.layers.l2_regularizer(config.l2_reg), ):
h_x = tf.to_float(x)
h_x = spt.layers.resnet_conv2d_block(h_x, 16, scope='level_0') # output: (28, 28, 16)
h_x = spt.layers.resnet_conv2d_block(h_x, 32, scope='level_2') # output: (14, 14, 32)
h_x = spt.layers.resnet_conv2d_block(h_x, 64, scope='level_3') # output: (14, 14, 32)
h_x = spt.layers.resnet_conv2d_block(h_x, 128, strides=config.extra_stride,
scope='level_4') # output: (14, 14, 32)
h_x = spt.layers.resnet_conv2d_block(h_x, 128, strides=2, scope='level_6') # output: (7, 7, 64)
h_x = spt.layers.resnet_conv2d_block(h_x, 128, strides=2, scope='level_8') # output: (7, 7, 64)
h_x = spt.ops.reshape_tail(h_x, ndims=3, shape=[-1])
z_mean = spt.layers.dense(h_x, config.z_dim, scope='z_mean')
z_logstd = spt.layers.dense(h_x, config.z_dim, scope='z_logstd')
# sample z ~ q(z|x)
z = net.add('z', spt.Normal(mean=z_mean, logstd=spt.ops.maybe_clip_value(z_logstd, min_val=config.min_logstd_of_q)),
n_samples=n_z, group_ndims=1)
return net
@add_arg_scope
@spt.global_reuse
def p_net(observed=None, n_z=None):
net = spt.BayesianNet(observed=observed)
# sample z ~ p(z)
normal = spt.Normal(mean=tf.zeros([1, config.z_dim]),
logstd=tf.zeros([1, config.z_dim]))
z = net.add('z', normal, n_samples=n_z, group_ndims=1)
normalizer_fn = None
# compute the hidden features
with arg_scope([spt.layers.dense],
activation_fn=tf.nn.leaky_relu,
normalizer_fn=normalizer_fn,
weight_norm=True,
kernel_regularizer=spt.layers.l2_regularizer(config.l2_reg)):
h_z = spt.layers.dense(z, 500)
h_z = spt.layers.dense(h_z, 500)
h_z = spt.layers.dense(h_z, 500)
h_z = spt.layers.dense(h_z, 500)
h_z = spt.layers.dense(h_z, 500)
logits = spt.layers.dense(h_z, config.class_num)
y = net.add('y', spt.Categorical(logits=logits))
return net
def get_all_loss(q_net, p_net, warm_up):
with tf.name_scope('vib_loss'):
train_recon = p_net['y'].log_prob()
train_kl = tf.reduce_mean(
-p_net['z'].log_prob() + q_net['z'].log_prob()
)
VAE_loss = -train_recon + warm_up * train_kl
return VAE_loss
class MyIterator(object):
def __init__(self, iterator):
self._iterator = iter(iterator)
self._next = None
self._has_next = True
self.next()
@property
def has_next(self):
return self._has_next
def next(self):
if not self._has_next:
raise StopIteration()
ret = self._next
try:
self._next = next(self._iterator)
except StopIteration:
self._next = None
self._has_next = False
else:
self._has_next = True
return ret
def __iter__(self):
return self
def __next__(self):
return self.next()
def limited(iterator, n):
i = 0
try:
while i < n:
yield next(iterator)
i += 1
except StopIteration:
pass
def get_var(name):
pfx = name.rsplit('/', 1)
if len(pfx) == 2:
vars = tf.global_variables(pfx[0] + '/')
else:
vars = tf.global_variables()
for var in vars:
if var.name.split(':', 1)[0] == name:
return var
raise NameError('Variable {} not exist.'.format(name))
def main():
# parse the arguments
arg_parser = ArgumentParser()
spt.register_config_arguments(config, arg_parser, title='Model options')
spt.register_config_arguments(spt.settings, arg_parser, prefix='tfsnippet',
title='TFSnippet options')
arg_parser.parse_args(sys.argv[1:])
# print the config
print_with_title('Configurations', pformat(config.to_dict()), after='\n')
# open the result object and prepare for result directories
results = MLResults(config.result_dir)
results.save_config(config) # save experiment settings for review
while True:
try:
results.make_dirs('plotting/sample', exist_ok=True)
results.make_dirs('plotting/z_plot', exist_ok=True)
results.make_dirs('plotting/train.reconstruct', exist_ok=True)
results.make_dirs('plotting/test.reconstruct', exist_ok=True)
results.make_dirs('train_summary', exist_ok=True)
results.make_dirs('checkpoint/checkpoint', exist_ok=True)
break
except Exception:
pass
if config.count_experiment:
with open('/home/cwx17/research/ml-workspace/projects/wasserstein-ood-regularizer/count_experiments', 'a') as f:
f.write(results.system_path("") + '\n')
f.close()
# prepare for training and testing data
(x_train, y_train, x_test, y_test) = load_overall(config.in_dataset)
(svhn_train, svhn_train_y, svhn_test, svhn_test_y) = load_overall(config.out_dataset)
def normalize(x, y):
return (x - 127.5) / 256.0 * 2, y
config.class_num = np.max(y_train) + 1
config.x_shape = x_train.shape[1:]
config.x_shape_multiple = 1
for x in config.x_shape:
config.x_shape_multiple *= x
if config.x_shape == (28, 28, 1):
config.extra_stride = 1
# input placeholders
input_x = tf.placeholder(
dtype=tf.float32, shape=(None,) + config.x_shape, name='input_x')
input_complexity = tf.placeholder(
dtype=tf.float32, shape=(None,), name='input_complexity')
input_y = tf.placeholder(
dtype=tf.int32, shape=(None,), name='input_y')
warm_up = tf.placeholder(
dtype=tf.float32, shape=(), name='warm_up')
learning_rate = spt.AnnealingVariable(
'learning_rate', config.initial_lr, config.lr_anneal_factor)
# derive the loss and lower-bound for training
with tf.name_scope('training'), \
arg_scope([batch_norm], training=True):
train_q_net = q_net(input_x, n_z=config.train_n_qz)
train_p_net = p_net(observed={'x': input_x, 'z': train_q_net['z'], 'y': input_y},
n_z=config.train_n_qz)
train_recon = train_p_net['y'].log_prob()
VAE_loss = get_all_loss(train_q_net, train_p_net, warm_up)
VAE_loss += tf.losses.get_regularization_loss()
# derive the nll and logits output for testing
with tf.name_scope('testing'):
test_q_net = q_net(input_x, n_z=config.test_n_qz)
print(test_q_net['z'])
test_chain = test_q_net.chain(p_net, observed={'y': input_y}, n_z=config.test_n_qz, latent_axis=0)
print(test_chain.model['y'].log_prob())
ele_test_recon = tf.reduce_mean(test_chain.model['y'].log_prob(), axis=0) / config.x_shape_multiple / np.log(2)
print(ele_test_recon)
ele_test_entropy = []
for i in range(config.class_num):
fake_y = tf.ones_like(input_y, dtype=tf.int32) * i
ele_test_entropy.append(
tf.reduce_mean(tf.exp(test_chain.model['y'].distribution.log_prob(given=fake_y)), axis=0))
ele_test_entropy = tf.stack(ele_test_entropy, axis=-1) # [batch_size, class_num]
print(ele_test_entropy)
ele_test_predict = tf.argmax(ele_test_entropy, axis=-1)
ele_test_predict_value = tf.reduce_max(ele_test_entropy, axis=-1)
ele_test_entropy = tf.reduce_sum(-tf.log(ele_test_entropy) * ele_test_entropy, axis=-1)
test_recon = tf.reduce_mean(
ele_test_recon
)
ele_test_ll = test_chain.vi.evaluation.is_loglikelihood() / config.x_shape_multiple / np.log(2)
print(ele_test_ll)
test_nll = -tf.reduce_mean(
ele_test_ll
)
ele_test_lb = test_chain.vi.lower_bound.elbo() / config.x_shape_multiple / np.log(2)
print(ele_test_lb)
test_lb = tf.reduce_mean(ele_test_lb)
# derive the optimizer
with tf.name_scope('optimizing'):
VAE_params = tf.trainable_variables('q_net') + tf.trainable_variables('p_net')
with tf.variable_scope('theta_optimizer'):
VAE_optimizer = tf.train.AdamOptimizer(learning_rate)
VAE_grads = VAE_optimizer.compute_gradients(VAE_loss, VAE_params)
with tf.control_dependencies(tf.get_collection(tf.GraphKeys.UPDATE_OPS)):
VAE_train_op = VAE_optimizer.apply_gradients(VAE_grads)
cifar_train_flow = spt.DataFlow.arrays([x_train, y_train], config.test_batch_size).map(normalize)
cifar_test_flow = spt.DataFlow.arrays([x_test, y_test], config.test_batch_size).map(normalize)
svhn_train_flow = spt.DataFlow.arrays([svhn_train, svhn_train_y], config.test_batch_size).map(normalize)
svhn_test_flow = spt.DataFlow.arrays([svhn_test, svhn_test_y], config.test_batch_size).map(normalize)
train_flow = spt.DataFlow.arrays([x_train, y_train], config.batch_size, shuffle=True,
skip_incomplete=True).map(normalize)
with spt.utils.create_session().as_default() as session, \
train_flow.threaded(5) as train_flow:
spt.utils.ensure_variables_initialized()
experiment_dict = {
'kmnist': '/mnt/mfs/mlstorage-experiments/cwx17/a8/e5/02279d802d3ada3a62f5',
'celeba': '/mnt/mfs/mlstorage-experiments/cwx17/00/e5/02732c28dc8de8d962f5',
'tinyimagenet': '/mnt/mfs/mlstorage-experiments/cwx17/ff/d5/02732c28dc8d02d962f5',
'not_mnist': '/mnt/mfs/mlstorage-experiments/cwx17/98/e5/02279d802d3a8f4962f5',
'cifar10': '/mnt/mfs/mlstorage-experiments/cwx17/ef/d5/02732c28dc8d2ad862f5',
'cifar100': '/mnt/mfs/mlstorage-experiments/cwx17/df/d5/02732c28dc8d87d862f5',
'svhn': '/mnt/mfs/mlstorage-experiments/cwx17/08/e5/02c52d867e431fc862f5',
'noise': '/mnt/mfs/mlstorage-experiments/cwx17/f7/e5/02c52d867e43403862f5',
'constant': '/mnt/mfs/mlstorage-experiments/cwx17/dd/d5/02812baa4f7014b762f5',
'fashion_mnist': '/mnt/mfs/mlstorage-experiments/cwx17/cf/d5/02732c28dc8d14b762f5',
'mnist': '/mnt/mfs/mlstorage-experiments/cwx17/cd/d5/02812baa4f7014b762f5',
'omniglot': '/mnt/mfs/mlstorage-experiments/cwx17/78/e5/02279d802d3a14b762f5',
'fashion_mnist28': '/mnt/mfs/mlstorage-experiments/cwx17/d0/f5/02279d802d3a3fdad2f5',
'mnist28': '/mnt/mfs/mlstorage-experiments/cwx17/e0/f5/02279d802d3ab75cd2f5',
'kmnist28': '/mnt/mfs/mlstorage-experiments/cwx17/01/f5/02279d802d3acecdd2f5',
'omniglot28': '/mnt/mfs/mlstorage-experiments/cwx17/11/f5/02279d802d3a6b7ed2f5',
'noise28': '/mnt/mfs/mlstorage-experiments/cwx17/21/f5/02279d802d3a7c7ed2f5',
'constant28': '/mnt/mfs/mlstorage-experiments/cwx17/31/f5/02279d802d3a71fed2f5',
'not_mnist28': '/mnt/mfs/mlstorage-experiments/cwx17/d0/f5/02c52d867e4350bdf2f5',
}
print(experiment_dict)
if config.in_dataset in experiment_dict:
restore_dir = experiment_dict[config.in_dataset] + '/checkpoint'
restore_checkpoint = os.path.join(
restore_dir, 'checkpoint',
'checkpoint.dat-{}'.format(config.max_epoch))
else:
restore_dir = results.system_path('checkpoint')
restore_checkpoint = None
# train the network
with spt.TrainLoop(tf.trainable_variables(),
var_groups=['q_net', 'p_net', 'posterior_flow', 'G_theta', 'D_psi', 'G_omega', 'D_kappa'],
max_epoch=config.max_epoch + 1,
max_step=config.max_step,
summary_dir=(results.system_path('train_summary')
if config.write_summary else None),
summary_graph=tf.get_default_graph(),
early_stopping=False,
checkpoint_dir=results.system_path('checkpoint'),
restore_checkpoint=restore_checkpoint
) as loop:
loop.print_training_summary()
spt.utils.ensure_variables_initialized()
epoch_iterator = loop.iter_epochs()
# adversarial training
for epoch in epoch_iterator:
if epoch == config.max_epoch + 1:
cifar_train_predict = get_ele(ele_test_predict, cifar_train_flow, [input_x, input_y])
cifar_test_predict = get_ele(ele_test_predict, cifar_test_flow, [input_x, input_y])
get_ele(ele_test_predict_value, cifar_train_flow, [input_x, input_y])
get_ele(ele_test_predict_value, cifar_test_flow, [input_x, input_y])
print('Correct number in cifar test is {}'.format(
np.sum(cifar_test_predict == y_test)))
print('Correct number in cifar train is {}'.format(
np.sum(cifar_train_predict == y_train)))
make_diagram(loop,
ele_test_entropy,
[cifar_test_flow, svhn_test_flow],
[input_x, input_y],
names=[config.in_dataset + ' Test', config.out_dataset + ' Test'],
fig_name='H_histogram'
)
make_diagram(loop,
ele_test_lb - ele_test_recon,
[cifar_test_flow, svhn_test_flow],
[input_x, input_y],
names=[config.in_dataset + ' Test', config.out_dataset + ' Test'],
fig_name='R_histogram'
)
make_diagram(loop,
-ele_test_entropy,
[cifar_test_flow, svhn_test_flow],
[input_x, input_y],
names=[config.in_dataset + ' Test', config.out_dataset + ' Test'],
fig_name='nH_histogram'
)
make_diagram(loop,
-ele_test_lb + ele_test_recon,
[cifar_test_flow, svhn_test_flow],
[input_x, input_y],
names=[config.in_dataset + ' Test', config.out_dataset + ' Test'],
fig_name='nR_histogram'
)
make_diagram(loop,
ele_test_recon,
[cifar_test_flow, svhn_test_flow],
[input_x, input_y],
names=[config.in_dataset + ' Test', config.out_dataset + ' Test'],
fig_name='recon_histogram'
)
make_diagram(loop,
ele_test_lb,
[cifar_test_flow, svhn_test_flow],
[input_x, input_y],
names=[config.in_dataset + ' Test', config.out_dataset + ' Test'],
fig_name='elbo_histogram'
)
make_diagram(loop,
ele_test_ll,
[cifar_test_flow, svhn_test_flow],
[input_x, input_y],
names=[config.in_dataset + ' Test', config.out_dataset + ' Test'],
fig_name='log_prob_histogram'
)
loop.print_logs()
break
for step, [x, y] in loop.iter_steps(train_flow):
_, batch_VAE_loss, batch_recon = session.run([VAE_train_op, VAE_loss, train_recon], feed_dict={
input_x: x, input_y: y, warm_up: max(1.0 * epoch / config.warm_up_start, 1.0) * config.max_beta
})
loop.collect_metrics(VAE_loss=batch_VAE_loss)
loop.collect_metrics(recon=batch_recon)
if epoch in config.lr_anneal_epoch_freq:
learning_rate.anneal()
if epoch | |
<reponame>sgibson91/magprop
import os
import numpy as np
import matplotlib.pyplot as plt
from scipy.integrate import odeint
# Global constants
G = 6.674e-8 # Gravitational constant - cgs units
c = 3.0e10 # Light speed - cm/s
R = 1.0e6 # Magnetar radius - cm
Rkm = 10.0 # Magnetar radius - km
omass = 1.4 # Magnetar mass - Msol
Msol = 1.99e33 # Solar mass - grams
M = omass * Msol # Magnetar mass - grams
I = (4.0 / 5.0) * M * (R ** 2.0) # Moment of inertia
alpha = 0.1 # Sound speed prescription
cs7 = 1.0 # Sound speed in disc - 10^7 cm/s
k = 0.9 # Capping fraction
j = 1.0e6 # Duration of plot
propeff = 1.0 # Propeller energy-to-luminosity conversion efficiency
dipeff = 1.0 # Dipole energy-to-luminosity conversion efficiency
GM = G * M
tarr = np.logspace(0.0, 6.0, num=10001, base=10.0)
# Calculate initial conditions to pass to odeint
def init_conds(MdiscI, P):
"""
Function to convert a disc mass from solar masses to grams and an initial spin
period in milliseconds into an angular frequency.
:param MdiscI: disc mass - solar masses
:param P: initial spin period - milliseconds
:return: an array containing the disc mass in grams and the angular freq.
"""
Mdisc0 = MdiscI * Msol # Disc mass
omega0 = (2.0 * np.pi) / (1.0e-3 * P) # Angular frequency
return np.array([Mdisc0, omega0])
# Model to be passed to odeint to calculate Mdisc and omega
def odes(y, t, B, MdiscI, RdiscI, epsilon, delta, n=1.0, alpha=0.1, cs7=1.0,
k=0.9):
"""
Function to be passed to ODEINT to calculate the disc mass and angular frequency
over time.
:param y: output from init_conds
:param t: time points to solve equations for
:param B: magnetic field strength - 10^15 G
:param MdiscI: initial disc mass - solar masses
:param RdiscI: disc radius - km
:param epsilon: timescale ratio
:param delta: mass ratio
:param n: propeller "switch-on"
:param alpha: sound speed prescription
:param cs7: sound speed in disc - 10^7 cm/s
:param k: capping fraction
:return: time derivatives of disc mass and angular frequency to be integrated
by ODEINT
"""
# Initial conditions
Mdisc, omega = y
# Constants
Rdisc = RdiscI * 1.0e5 # Disc radius
tvisc = Rdisc / (alpha * cs7 * 1.0e7) # Viscous timescale
mu = 1.0e15 * B * (R ** 3.0) # Magnetic Dipole Moment
M0 = delta * MdiscI * Msol # Global Mass Budget
tfb = epsilon * tvisc # Fallback timescale
# Radii -- Alfven, Corotation, Light Cylinder
Rm = ((mu ** (4.0 / 7.0)) * (GM ** (-1.0 / 7.0)) * (((3.0 * Mdisc) / tvisc)
** (-2.0 / 7.0)))
Rc = (GM / (omega ** 2.0)) ** (1.0 / 3.0)
Rlc = c / omega
# Cap Alfven radius
if Rm >= (k * Rlc):
Rm = k * Rlc
w = (Rm / Rc) ** (3.0 / 2.0) # Fastness Parameter
bigT = 0.5 * I * (omega ** 2.0) # Rotational energy
modW = (0.6 * M * (c ** 2.0) * ((GM / (R * (c ** 2.0))) / (1.0 - 0.5 * (GM /
(R * (c ** 2.0)))))) # Binding energy
rot_param = bigT / modW # Rotation parameter
# Dipole torque
Ndip = (-1.0 * (mu ** 2.0) * (omega ** 3.0)) / (6.0 * (c ** 3.0))
# Mass flow rates
eta2 = 0.5 * (1.0 + np.tanh(n * (w - 1.0)))
eta1 = 1.0 - eta2
Mdotprop = eta2 * (Mdisc / tvisc) # Propelled
Mdotacc = eta1 * (Mdisc / tvisc) # Accretion
Mdotfb = (M0 / tfb) * (((t + tfb) / tfb) ** (-5.0 / 3.0))
Mdotdisc = Mdotfb - Mdotprop - Mdotacc
if rot_param > 0.27:
Nacc = 0.0 # Prevents magnetar break-up
else:
# Accretion torque
if Rm >= R:
Nacc = ((GM * Rm) ** 0.5) * (Mdotacc - Mdotprop)
else:
Nacc = ((GM * R) ** 0.5) * (Mdotacc - Mdotprop)
omegadot = (Nacc + Ndip) / I # Angular frequency time derivative
return np.array([Mdotdisc, omegadot])
def model_lc(pars, dipeff=1.0, propeff=1.0, f_beam=1.0, n=1.0, alpha=0.1,
cs7=1.0, k=0.9):
"""
Function to calculate the model light curve for a given set of parameters.
:param pars: list of input parameters including:
* B: magnetic field strenght - 10^15 G
* P: initial spin period - milliseconds
* MdiscI: initial disc mass - solar masses
* RdiscI: disc radius - km
* epsilon: timescale ratio
* delta: mass ratio
:param dipeff: dipole energy-to-luminosity conversion efficiency
:param propeff: propeller energy-to-luminosity conversion efficiency
:param f_beam: beaming factor
:param n: propeller "switch-on"
:param alpha: sound speed prescription
:param cs7: sound speed in disc - 10^7 cm/s
:param k: capping fraction
:return: an array containing total, dipole and propeller luminosities in
units of 10^50 erg/s
"""
B, P, MdiscI, RdiscI, epsilon, delta = pars # Separate out variables
y0 = init_conds(MdiscI, P) # Calculate initial conditions
# Solve equations
soln, info = odeint(odes, y0, tarr, args=(B, MdiscI, RdiscI, epsilon, delta),
full_output=True)
if info["message"] != "Integration successful.":
return "flag"
# Split solution
Mdisc = np.array(soln[:, 0])
omega = np.array(soln[:, 1])
# Constants
Rdisc = RdiscI * 1.0e5 # Disc radius - cm
tvisc = Rdisc / (alpha * cs7 * 1.0e7) # Viscous timescale - s
mu = 1.0e15 * B * (R ** 3.0) # Magnetic dipole moment
# Radii -- Alfven, Corotation and Light Cylinder
Rm = ((mu ** (4.0 / 7.0)) * (GM ** (-1.0 / 7.0)) * (((3.0 * Mdisc) / tvisc)
** (-2.0 / 7.0)))
Rc = (GM / (omega ** 2.0)) ** (1.0/ 3.0)
Rlc = c / omega
Rm = np.where(Rm >= (k * Rlc), (k * Rlc), Rm)
w = (Rm / Rc) ** (3.0 / 2.0) # Fastness parameter
bigT = 0.5 * I * (omega ** 2.0) # Rotational energy
modW = (0.6 * M * (c ** 2.0) * ((GM / (R * (c ** 2.0))) / (1.0 - 0.5 * (GM /
(R * (c ** 2.0)))))) # Binding energy
rot_param = bigT / modW # Rotational parameter
# Efficiencies and Mass Flow Rates
eta2 = 0.5 * (1.0 + np.tanh(n * (w - 1.0)))
eta1 = 1.0 - eta2
Mdotprop = eta2 * (Mdisc / tvisc) # Propelled
Mdotacc = eta1 * (Mdisc / tvisc) # Accreted
Nacc = np.zeros_like(Mdisc)
for i in range(len(Nacc)):
if rot_param[i] > 0.27:
Nacc[i] = 0.0
else:
if Rm[i] >= R:
Nacc[i] = ((GM * Rm[i]) ** 0.5) * (Mdotacc[i] - Mdotprop[i])
else:
Nacc[i] = ((GM * R) ** 0.5) * (Mdotacc[i] - Mdotprop[i])
# Dipole luminosity
Ldip = (mu ** 2.0 * omega ** 4.0) / (6.0 * (c ** 3.0))
Ldip = np.where(Ldip <= 0.0, 0.0, Ldip)
Ldip = np.where(np.isfinite(Ldip), Ldip, 0.0)
# Propeller luminosity
Lprop = (-1.0 * Nacc * omega) - ((GM / Rm) * eta2 * (Mdisc / tvisc))
Lprop = np.where(Lprop <= 0.0, 0.0, Lprop)
Lprop = np.where(np.isfinite(Lprop), Lprop, 0.0)
# Total luminosity
Ltot = f_beam * ((dipeff * Ldip) + (propeff * Lprop))
return np.array([Ltot, Lprop, Ldip]) / 1.0e50
# Check if plots folder exists
if not (os.path.exists("plots")):
os.mkdir("plots")
grbs = {"Humped": [1.0, 5.0, 1.0e-3, 100.0, 1.0, 1.0e-6],
"Classic": [1.0, 5.0, 1.0e-4, 1000.0, 1.0, 1.0e-6],
"Sloped": [10.0, 5.0, 1.0e-4, 1000.0, 1.0, 1.0e-6],
"Stuttering": [5.0, 5.0, 1.0e-2, 500.0, 1.0, 1.0e-6]}
grbs_list = ["Humped", "Classic", "Sloped", "Stuttering"]
fig, axes = plt.subplots(2, 2, sharex=True, sharey=True, figsize=(5, 4.5))
pltx = 0
plty = 0
for z, grb in enumerate(grbs_list):
B, P, MdiscI, RdiscI, epsilon, delta = grbs[grb]
ax = axes[pltx, plty]
# === My model === #
Ltot_sg, Lprop_sg, Ldip_sg = model_lc(grbs[grb])
# === Ben's model === #
# Define constants and convert units
spin = P * 1.0e-3 # Convert to seconds
Rdisc = RdiscI * 1.0e5 # Convert to cm
visc = alpha * cs7 * 1.0e7 * Rdisc # Viscosity
tvisc = (Rdisc ** 2.0) / visc # Viscous timescale
mu = 1.0e15 | |
per image side.
* If a tuple of four entries, then the entries represent top, right,
bottom, left. Each entry may be a single integer (always crop by
exactly that value), a tuple of two ints ``a`` and ``b`` (crop by
an amount ``a <= x <= b``), a list of ints (crop by a random
value that is contained in the list) or a StochasticParameter
(sample the amount to crop from that parameter).
percent : None or int or float or imgaug.parameters.StochasticParameter \
or tuple, optional
The number of pixels to crop away (cut off) on each side of the image
given *in percent* of the image height/width.
E.g. if this is set to 0.1, the augmenter will always crop away
10 percent of the image's height at the top, 10 percent of the width
on the right, 10 percent of the height at the bottom and 10 percent
of the width on the left.
Either this or the parameter `px` may be set, not both at the same time.
* If None, then percent-based cropping will not be used.
* If int, then expected to be 0 (no cropping).
* If float, then that percentage will always be cropped away.
* If StochasticParameter, then that parameter will be used for each
image. Four samples will be drawn per image (top, right, bottom,
left).
* If a tuple of two floats with values ``a`` and ``b``, then each
side will be cropped by a random percentage in the range
``a <= x <= b``. ``x`` is sampled per image side.
* If a tuple of four entries, then the entries represent top, right,
bottom, left. Each entry may be a single float (always crop by
exactly that percent value), a tuple of two floats a and ``b``
(crop by a percentage ``a <= x <= b``), a list of floats (crop by
a random value that is contained in the list) or a
StochasticParameter (sample the percentage to crop from that
parameter).
keep_size : bool, optional
After cropping, the result image has a different height/width than
the input image. If this parameter is set to True, then the cropped
image will be resized to the input image's size, i.e. the image size
is then not changed by the augmenter.
sample_independently : bool, optional
If False AND the values for `px`/`percent` result in exactly one
probability distribution for the amount to crop, only one
single value will be sampled from that probability distribution
and used for all sides. I.e. the crop amount then is the same
for all sides.
name : None or str, optional
See :func:`imgaug.augmenters.meta.Augmenter.__init__`.
deterministic : bool, optional
See :func:`imgaug.augmenters.meta.Augmenter.__init__`.
random_state : None or int or numpy.random.RandomState, optional
See :func:`imgaug.augmenters.meta.Augmenter.__init__`.
Examples
--------
>>> aug = iaa.Crop(px=(0, 10))
crops each side by a random value from the range 0px to 10px (the value
is sampled per side).
>>> aug = iaa.Crop(px=(0, 10), sample_independently=False)
samples one value ``v`` from the discrete range ``[0..10]`` and crops all
sides by ``v`` pixels.
>>> aug = iaa.Crop(px=(0, 10), keep_size=False)
crops each side by a random value from the range 0px to 10px (the value
is sampled per side). After cropping, the images are NOT resized to their
original size (i.e. the images may end up having different heights/widths).
>>> aug = iaa.Crop(px=((0, 10), (0, 5), (0, 10), (0, 5)))
crops the top and bottom by a random value from the range 0px to 10px
and the left and right by a random value in the range 0px to 5px.
>>> aug = iaa.Crop(percent=(0, 0.1))
crops each side by a random value from the range 0 percent to
10 percent. (Percent with respect to the side's size, e.g. for the
top side it uses the image's height.)
>>> aug = iaa.Crop(percent=([0.05, 0.1], [0.05, 0.1], [0.05, 0.1], [0.05, 0.1]))
crops each side by either 5 percent or 10 percent.
"""
def recursive_negate(v):
if v is None:
return v
elif ia.is_single_number(v):
ia.do_assert(v >= 0)
return -v
elif isinstance(v, iap.StochasticParameter):
return iap.Multiply(v, -1)
elif isinstance(v, tuple):
return tuple([recursive_negate(v_) for v_ in v])
elif isinstance(v, list):
return [recursive_negate(v_) for v_ in v]
else:
raise Exception("Expected None or int or float or StochasticParameter or list or tuple, got %s." % (
type(v),))
px = recursive_negate(px)
percent = recursive_negate(percent)
if name is None:
name = "Unnamed%s" % (ia.caller_name(),)
aug = CropAndPad(
px=px, percent=percent,
keep_size=keep_size, sample_independently=sample_independently,
name=name, deterministic=deterministic, random_state=random_state
)
return aug
# TODO maybe rename this to PadToMinimumSize?
# TODO this is very similar to CropAndPad, maybe add a way to generate crop values imagewise via a callback in
# in CropAndPad?
# TODO why is padding mode and cval here called pad_mode, pad_cval but in other
# cases mode/cval?
class PadToFixedSize(meta.Augmenter):
"""
Pad images to minimum width/height.
If images are already at the minimum width/height or are larger, they will
not be padded. Note that this also means that images will not be cropped if
they exceed the required width/height.
The augmenter randomly decides per image how to distribute the required
padding amounts over the image axis. E.g. if 2px have to be padded on the
left or right to reach the required width, the augmenter will sometimes
add 2px to the left and 0px to the right, sometimes add 2px to the right
and 0px to the left and sometimes add 1px to both sides. Set `position`
to ``center`` to prevent that.
dtype support::
See :func:`imgaug.imgaug.pad`.
Parameters
----------
width : int
Minimum width of new images.
height : int
Minimum height of new images.
pad_mode : imgaug.ALL or str or list of str or imgaug.parameters.StochasticParameter, optional
See :func:`imgaug.augmenters.size.CropAndPad.__init__`.
pad_cval : number or tuple of number or list of number or imgaug.parameters.StochasticParameter, optional
See :func:`imgaug.augmenters.size.CropAndPad.__init__`.
position : {'uniform', 'normal', 'center', 'left-top', 'left-center', 'left-bottom', 'center-top', 'center-center',\
'center-bottom', 'right-top', 'right-center', 'right-bottom'} or tuple of float or StochasticParameter\
or tuple of StochasticParameter, optional
Sets the center point of the padding, which determines how the required padding amounts are distributed
to each side. For a tuple ``(a, b)``, both ``a`` and ``b`` are expected to be in range ``[0.0, 1.0]``
and describe the fraction of padding applied to the left/right (low/high values for ``a``) and the fraction
of padding applied to the top/bottom (low/high values for ``b``). A padding position at ``(0.5, 0.5)``
would be the center of the image and distribute the padding equally to all sides. A padding position
at ``(0.0, 1.0)`` would be the left-bottom and would apply 100% of the required padding to the bottom and
left sides of the image so that the bottom left corner becomes more and more the new image center (depending on
how much is padded).
* If string ``uniform`` then the share of padding is randomly and uniformly distributed over each side.
Equivalent to ``(Uniform(0.0, 1.0), Uniform(0.0, 1.0))``.
* If string ``normal`` then the share of padding is distributed based on a normal distribution,
leading to a focus on the center of the images.
Equivalent to ``(Clip(Normal(0.5, 0.45/2), 0, 1), Clip(Normal(0.5, 0.45/2), 0, 1))``.
* If string ``center`` then center point of the padding is identical to the image center.
Equivalent to ``(0.5, 0.5)``.
* If a string matching regex ``^(left|center|right)-(top|center|bottom)$``, e.g. ``left-top`` or
``center-bottom`` then sets the center point of the padding to the X-Y position matching that
description.
* If a tuple of float, then expected to have exactly two entries between ``0.0`` and ``1.0``, which will
always be used as the combination the position matching (x, y) form.
* If a StochasticParameter, then that parameter will be queries once per call to ``augment_*()`` to get
``Nx2`` center positions matching (x, y) form.
* If a tuple of StochasticParameter, then expected to have exactly two entries that will both be queries
per call to ``augment_*()``, each | |
# from wbia.algo.graph import nx_utils as nxu
infr = pblm.infr
infr.status()
pair_sample = pblm.hyper_params.pair_sample
n_pos = sum(ut.take(pair_sample, ['top_gt', 'mid_gt', 'bot_gt', 'rand_gt']))
n_neg = sum(ut.take(pair_sample, ['top_gf', 'mid_gf', 'bot_gf', 'rand_gf']))
logger.info('n_neg = {!r}'.format(n_neg))
logger.info('n_pos = {!r}'.format(n_pos))
cfgstr = pair_sample.get_cfgstr()
ibs = pblm.infr.ibs
cfgstr += ibs.get_annot_hashid_semantic_uuid(pblm.infr.aids)
cacher = ub.Cacher(
'pairsample_1_v6' + ibs.get_dbname(),
cfgstr=cfgstr,
appname=pblm.appname,
verbose=pblm.verbose,
)
data = cacher.tryload()
if data is None:
# LNBNN makes 48729 given a set of 6474, so about 8 examples per annot
multipler = (n_pos + n_neg) // 2
n_target = len(infr.aids) * multipler
def edgeset(iterable):
return set(it.starmap(infr.e_, iterable))
pos_edges = edgeset(infr.pos_graph.edges())
neg_edges = edgeset(infr.neg_graph.edges())
aid_pairs = pos_edges.union(neg_edges)
n_need = n_target - len(aid_pairs)
per_cc = int(n_need / infr.pos_graph.number_of_components() / 2)
per_cc = max(2, per_cc)
logger.info('per_cc = {!r}'.format(per_cc))
rng = ut.ensure_rng(2039141610)
# User previous explicit reviews
pccs = list(map(frozenset, infr.positive_components()))
for cc in ut.ProgIter(pccs, label='pos sample'):
pos_pairs = edgeset(ut.random_combinations(cc, 2, per_cc, rng=rng))
aid_pairs.update(pos_pairs)
n_need = n_target - len(aid_pairs)
rng = ut.ensure_rng(282695095)
per_pair = 1
for cc1, cc2 in ut.ProgIter(
ut.random_combinations(pccs, 2, rng=rng), label='neg sample'
):
neg_pairs = edgeset(ut.random_product((cc1, cc2), num=per_pair, rng=rng))
aid_pairs.update(neg_pairs)
if len(aid_pairs) >= n_target:
break
n_need = n_target - len(aid_pairs)
data = aid_pairs
cacher.save(data)
aid_pairs = data
return aid_pairs
def make_graph_based_bootstrap_pairs(pblm):
"""
Sampling method for when you want to bootstrap VAMP after several
reviews.
Sample pairs for VAMP training using manually reviewed edges and mines
other (random) pairs as needed.
We first sample a base set via:
(1) take all manually reviewed positive edges (not in an inconsistent PCC)
(2) take all manually reviewed negative edges (not touching an inconsistent PCC)
(3) take all manually reviewed incomparable edges.
Note: it is important to ignore any PCC currently in an
inconsistent state.
We can then generate additional positive samples by sampling
automatically reviewed positive edges within PCCs.
We can do the same for negatives.
"""
from networkx.algorithms.connectivity import k_edge_subgraphs
# from wbia.algo.graph import nx_utils as nxu
import itertools as it
infr = pblm.infr
def edgeset(iterable):
return set(it.starmap(infr.e_, iterable))
decision_to_samples = ub.ddict(set)
# Loop over all known edges in the graph
for aid1, aid2, data in infr.graph.edges(data=True):
nid1, nid2 = infr.pos_graph.node_labels(aid1, aid2)
# Check if this edge is touching an inconsistent PCC
is_touching_inconsistent_pcc = (
nid1 in infr.nid_to_errors or nid2 in infr.nid_to_errors
)
if not is_touching_inconsistent_pcc:
decision = data['evidence_decision']
user_id = data['user_id']
if user_id.startswith('user:'):
decision_to_samples[decision].add((aid1, aid2))
elif decision == NEGTV:
# If the decision is negative just put it in
# its between two PCCs that are consistent, we will just
# trust the decision.
decision_to_samples[decision].add((aid1, aid2))
# We have all of the user data. Can we add in anything else?
# Loop through all the consistent data add any automatically
# reviewed edges between k-edge-connected subgraphs
pccs = list(map(frozenset, infr.consistent_components()))
for cc in ut.ProgIter(pccs, label='pos sample'):
pos_subgraph = infr.pos_graph.subgraph(cc)
for ksub in k_edge_subgraphs(pos_subgraph, k=2):
ksub_g = pos_subgraph.subgraph(ksub)
decision_to_samples[POSTV].update(set(ksub_g.edges()))
#
decision_to_samples[POSTV] = edgeset(decision_to_samples[POSTV])
decision_to_samples[NEGTV] = edgeset(decision_to_samples[NEGTV])
decision_to_samples[INCMP] = edgeset(decision_to_samples[INCMP])
balance = int(
1.2 * min(len(decision_to_samples[POSTV]), len(decision_to_samples[NEGTV]))
)
decision_to_samples[POSTV] = ut.shuffle(list(decision_to_samples[POSTV]))[
0:balance
]
decision_to_samples[NEGTV] = ut.shuffle(list(decision_to_samples[NEGTV]))[
0:balance
]
decision_to_samples[INCMP] = ut.shuffle(list(decision_to_samples[INCMP]))[
0:balance
]
# Union all edges together and return
aid_pairs = sorted(
edgeset(
ub.flatten(
[
decision_to_samples[POSTV],
decision_to_samples[NEGTV],
decision_to_samples[INCMP],
]
)
)
)
return aid_pairs
@profile
def make_training_pairs(pblm):
"""
CommandLine:
python -m wbia.algo.verif.vsone make_training_pairs --db PZ_Master1
Example:
>>> # DISABLE_DOCTEST
>>> from wbia.algo.verif.vsone import * # NOQA
>>> pblm = OneVsOneProblem.from_empty('PZ_MTEST')
>>> pblm.make_training_pairs()
"""
infr = pblm.infr
if pblm.verbose > 0:
logger.info('[pblm] gathering training pairs')
sample_method = pblm.hyper_params['sample_method']
aid_pairs_ = []
if sample_method == 'lnbnn':
aid_pairs_.append(pblm.make_lnbnn_training_pairs())
aid_pairs_.append(infr.photobomb_samples())
aid_pairs_.append(list(infr.incomp_graph.edges()))
elif sample_method == 'random':
aid_pairs_.append(pblm.make_randomized_training_pairs())
aid_pairs_.append(infr.photobomb_samples())
aid_pairs_.append(list(infr.incomp_graph.edges()))
elif sample_method == 'lnbnn+random':
aid_pairs_.append(pblm.make_lnbnn_training_pairs())
aid_pairs_.append(pblm.make_randomized_training_pairs())
aid_pairs_.append(infr.photobomb_samples())
aid_pairs_.append(list(infr.incomp_graph.edges()))
elif sample_method == 'bootstrap':
aid_pairs_.append(pblm.make_graph_based_bootstrap_pairs())
else:
raise KeyError('Unknown sample_method={}'.format(sample_method))
# Simplify life by using sorted undirected pairs
aid_pairs = sorted(set(it.starmap(infr.e_, ub.flatten(aid_pairs_))))
return aid_pairs
@profile
def load_samples(pblm):
r"""
CommandLine:
python -m wbia.algo.verif.vsone load_samples --profile
Example:
>>> # DISABLE_DOCTEST
>>> from wbia.algo.verif.vsone import * # NOQA
>>> #pblm = OneVsOneProblem.from_empty('PZ_MTEST')
>>> #pblm = OneVsOneProblem.from_empty('PZ_PB_RF_TRAIN')
>>> pblm = OneVsOneProblem.from_empty('PZ_Master1')
>>> pblm.load_samples()
>>> samples = pblm.samples
>>> samples.print_info()
"""
# Get a set of training pairs
if pblm.verbose > 0:
ut.cprint('[pblm] load_samples', color='blue')
if pblm.samples is not None:
ut.cprint('[pblm] WARNING CLOBBERING OLD SAMPLES', color='yellow')
aid_pairs = pblm.make_training_pairs()
pblm.samples = AnnotPairSamples(pblm.infr.ibs, aid_pairs, pblm.infr)
if pblm.verbose > 0:
ut.cprint('[pblm] apply_multi_task_multi_label', color='blue')
pblm.samples.apply_multi_task_multi_label()
@profile
def load_features(pblm, use_cache=True, with_simple=False):
"""
CommandLine:
python -m wbia.algo.verif.vsone load_features --profile
Example:
>>> # DISABLE_DOCTEST
>>> from wbia.algo.verif.vsone import * # NOQA
>>> #pblm = OneVsOneProblem.from_empty('GZ_Master1')
>>> pblm = OneVsOneProblem.from_empty('PZ_PB_RF_TRAIN')
>>> pblm.load_samples()
>>> pblm.load_features(with_simple=False)
"""
if pblm.verbose > 0:
ut.cprint('[pblm] load_features', color='blue')
ibs = pblm.infr.ibs
edges = ut.emap(tuple, pblm.samples.aid_pairs.tolist())
feat_extract_config = pblm.feat_extract_config
extr = pairfeat.PairwiseFeatureExtractor(
ibs, verbose=10, config=feat_extract_config
)
X_all = extr.transform(edges)
pblm.raw_X_dict = {'learn(all)': X_all}
pblm.samples.set_feats(copy.deepcopy(pblm.raw_X_dict))
if with_simple:
pblm.load_simple_scores()
def load_simple_scores(pblm):
if pblm.verbose > 0:
ut.cprint('[pblm] load_simple_scores', color='blue')
infr = pblm.infr
ibs = infr.ibs
aid_pairs = ut.emap(tuple, pblm.samples.aid_pairs.tolist())
hyper_params = pblm.hyper_params
sample_hashid = pblm.samples.sample_hashid()
feat_cfgstr = hyper_params.get_cfgstr()
feat_hashid = ut.hashstr27(sample_hashid + feat_cfgstr)
# logger.info('features_hashid = %r' % (features_hashid,))
cfgstr = '_'.join(['devcache', str(ibs.dbname), feat_hashid])
cacher = ub.Cacher(
'simple_scores_' + ibs.dbname,
cfgstr=cfgstr,
appname=pblm.appname,
enabled=0,
verbose=pblm.verbose,
)
data = cacher.tryload()
if data is None:
# ---------------
X_all = pblm.raw_X_dict['learn(all)']
featinfo = vt.AnnotPairFeatInfo(X_all)
simple_cols = featinfo.find('summary_op', '==', 'sum')
simple_cols += featinfo.find('summary_op', '==', 'len', hack=False)
# Select simple scores out of the full feat vectors
simple_scores = X_all[simple_cols]
if True:
# The main idea here is to load lnbnn scores for the pairwise
# matches so we can compare them to the outputs of the pairwise
# classifier.
# TODO: separate this into different cache
# Add vsmany_lnbnn to simple scoren
# Only query the aids in the sampled set
aids = sorted(set(ut.flatten(aid_pairs)))
qreq_ = pblm._make_lnbnn_qreq(aids)
cm_list = qreq_.execute()
edge_to_data = infr._get_cm_edge_data(aid_pairs, cm_list=cm_list)
edge_data = ut.take(edge_to_data, aid_pairs)
lnbnn_score_list = [d.get('score', 0) for d in edge_data]
lnbnn_rank_list = [d.get('rank', np.inf) for d in edge_data]
lnbnn_score_list = [0 if s is None else s for s in lnbnn_score_list]
simple_scores = simple_scores.assign(
score_lnbnn_1vM=lnbnn_score_list, rank_lnbnn_1vM=lnbnn_rank_list
)
simple_scores[pd.isnull(simple_scores)] = 0
data = simple_scores
cacher.save(data)
simple_scores = data
pblm.raw_simple_scores = simple_scores
pblm.samples.set_simple_scores(copy.deepcopy(pblm.raw_simple_scores))
def ensure_deploy_classifiers(pblm, dpath='.'):
classifiers = {}
task_keys = list(pblm.samples.supported_tasks())
for task_key in task_keys:
verif = deploy.Deployer(dpath, pblm).ensure(task_key)
classifiers[task_key] = verif
return classifiers
def deploy_all(pblm, dpath='.', publish=False):
task_keys = list(pblm.samples.supported_tasks())
for task_key in task_keys:
pblm.deploy(dpath, task_key=task_key, publish=publish)
def deploy(pblm, dpath='.', task_key=None, publish=False):
"""
Trains and saves a classifier for deployment
Args:
dpath (str): where to save the deployable model
task_key (str): task to train for (default match_state)
publish (bool): if True will try to rsync the model and metadata to
the publication server.
Example:
>>> # DISABLE_DOCTEST
>>> from wbia.algo.verif.vsone import * # NOQA
>>> pblm = OneVsOneProblem.from_empty(defaultdb='PZ_MTEST',
>>> sample_method='random')
>>> task_key = ut.get_argval('--task', default='match_state')
>>> publish = ut.get_argflag('--publish')
>>> pblm.deploy(task_key=task_key, publish=publish)
Notes:
A deployment consists of the following information
* The classifier itself
* Information needed to construct the input to the classifier
- TODO: can this be encoded as an sklearn pipeline?
* Metadata concerning what data the classifier was trained with
* PUBLISH TO /media/hdd/PUBLIC/models/pairclf
Ignore:
pblm.evaluate_classifiers(with_simple=False)
res = pblm.task_combo_res[pblm.primary_task_key]['RF']['learn(sum,glob)']
"""
return deploy.Deployer(dpath=dpath, pblm=pblm).deploy(task_key, publish)
def setup(pblm, with_simple=False):
pblm.set_pandas_options()
ut.cprint('\n[pblm] --- LOADING DATA ---', 'blue')
pblm.load_samples()
# pblm.samples.print_info()
pblm.load_features(with_simple=with_simple)
# pblm.samples.print_info()
ut.cprint('\n[pblm] --- CURATING DATA ---', 'blue')
pblm.samples.print_info()
logger.info('---------------')
ut.cprint('\n[pblm] --- FEATURE INFO ---', 'blue')
pblm.build_feature_subsets()
pblm.samples.print_featinfo()
task_keys = pblm.eval_task_keys
clf_keys = pblm.eval_clf_keys
data_keys = pblm.eval_data_keys
if task_keys is None:
task_keys = list(pblm.samples.subtasks.keys())
if clf_keys is None:
clf_keys = ['RF']
if data_keys is None:
data_keys = list(pblm.samples.X_dict.keys())
pblm.eval_task_keys = task_keys
pblm.eval_clf_keys = clf_keys
pblm.eval_data_keys = data_keys
# Remove any tasks that cant be done
unsupported = set(task_keys) - set(pblm.samples.supported_tasks())
for task_key in unsupported:
logger.info('No data to train task_key = %r' % (task_key,))
task_keys.remove(task_key)
def setup_evaluation(pblm, with_simple=False):
pblm.setup(with_simple=with_simple)
task_keys = pblm.eval_task_keys
clf_keys = pblm.eval_clf_keys
data_keys = pblm.eval_data_keys
if pblm.samples.simple_scores is not None:
ut.cprint('\n--- EVALUTE SIMPLE SCORES ---', 'blue')
pblm.evaluate_simple_scores(task_keys)
else:
logger.info('no simple scores')
logger.info('...skipping simple evaluation')
ut.cprint('\n--- LEARN CROSS-VALIDATED RANDOM FORESTS ---', 'blue')
pblm.learn_evaluation_classifiers(task_keys, clf_keys, data_keys)
| |
# pylint: disable=protected-access
import os
import glob
import radical.utils as ru
from .. import states as s
from .session import fetch_json
_debug = os.environ.get('RP_PROF_DEBUG')
# ------------------------------------------------------------------------------
#
# pilot and task activities: core hours are derived by multiplying the
# respective time durations with pilot size / task size. The 'idle'
# utilization and the 'agent' utilization are derived separately.
#
# Note that durations should add up to the `x_total` generations to ensure
# accounting for the complete task/pilot utilization.
#
# An updated list of events is available at docs/source/events.md
PILOT_DURATIONS = {
'provide' : {
'total' : [{ru.EVENT: 'bootstrap_0_start'},
{ru.EVENT: 'bootstrap_0_stop' }]
},
# times between PMGR_ACTIVE and the termination command are not
# considered pilot specific consumptions. If some resources remain
# unused during that time, it is either due to inefficiencies of
# workload management (accounted for in the task consumption metrics),
# or the pilot is starving for workload.
'consume' : {
'boot' : [{ru.EVENT: 'bootstrap_0_start'},
{ru.EVENT: 'bootstrap_0_ok' }],
'setup_1' : [{ru.EVENT: 'bootstrap_0_ok' },
{ru.STATE: s.PMGR_ACTIVE }],
'idle' : [{ru.STATE: s.PMGR_ACTIVE },
{ru.EVENT: 'cmd' ,
ru.MSG : 'cancel_pilot' }],
'term' : [{ru.EVENT: 'cmd' ,
ru.MSG : 'cancel_pilot' },
{ru.EVENT: 'bootstrap_0_stop' }],
'agent' : [{ru.EVENT: 'sub_agent_start' },
{ru.EVENT: 'sub_agent_stop' }],
},
# FIXME: separate out DVM startup time
# 'rte' : [{ru.STATE: s.PMGR_ACTIVE },
# {ru.STATE: s.PMGR_ACTIVE }],
# 'setup_2' : [{ru.STATE: s.PMGR_ACTIVE },
# {ru.STATE: s.PMGR_ACTIVE }],
#
# resources on agent nodes are consumed for all of the pilot's lifetime
'agent' : {
'total' : [{ru.EVENT: 'bootstrap_0_start'},
{ru.EVENT: 'bootstrap_0_stop' }]
}
}
# The set of default task durations that are available for every task
# description, default resource configuration, and default scheduler and
# launcher.
TASK_DURATIONS_DEFAULT = {
'consume' : {
'exec_queue' : [{ru.EVENT: 'schedule_ok' },
{ru.STATE: s.AGENT_EXECUTING }],
'exec_prep' : [{ru.STATE: s.AGENT_EXECUTING },
{ru.EVENT: 'exec_start' }],
'exec_rp' : [{ru.EVENT: 'exec_start' },
{ru.EVENT: 'task_start' }],
'exec_sh' : [{ru.EVENT: 'task_start' },
{ru.EVENT: 'task_exec_start' }],
'exec_cmd' : [{ru.EVENT: 'task_exec_start' },
{ru.EVENT: 'task_exec_stop' }],
'term_sh' : [{ru.EVENT: 'task_exec_stop' },
{ru.EVENT: 'task_stop' }],
'term_rp' : [{ru.EVENT: 'task_stop' },
{ru.EVENT: 'exec_stop' }],
'unschedule' : [{ru.EVENT: 'exec_stop' },
{ru.EVENT: 'unschedule_stop' }]
# # if we have cmd_start / cmd_stop:
# 'exec_sh' : [{ru.EVENT: 'task_start' },
# {ru.EVENT: 'cmd_start' }],
# 'exec_cmd' : [{ru.EVENT: 'cmd_start' },
# {ru.EVENT: 'cmd_stop' }],
# 'term_sh' : [{ru.EVENT: 'cmd_stop' },
# {ru.EVENT: 'task_stop' }],
}
}
# The set of default task durations augmented with the durations of the app
# events. App events are generated by RADICAL Synapse and by `hello_rp.sh`. The
# latter is useful for testing as a sleep command drop-in.
TASK_DURATIONS_APP = {
'consume' : {
'exec_queue' : [{ru.EVENT: 'schedule_ok' },
{ru.STATE: s.AGENT_EXECUTING }],
'exec_prep' : [{ru.STATE: s.AGENT_EXECUTING },
{ru.EVENT: 'exec_start' }],
'exec_rp' : [{ru.EVENT: 'exec_start' },
{ru.EVENT: 'task_start' }],
'exec_sh' : [{ru.EVENT: 'task_start' },
{ru.EVENT: 'task_exec_start' }],
'init_app' : [{ru.EVENT: 'task_exec_start' },
{ru.EVENT: 'cmd_start' }],
'exec_cmd' : [{ru.EVENT: 'cmd_start' },
{ru.EVENT: 'cmd_stop' }],
'term_app' : [{ru.EVENT: 'cmd_stop' },
{ru.EVENT: 'task_exec_stop' }],
'term_sh' : [{ru.EVENT: 'task_exec_stop' },
{ru.EVENT: 'task_stop' }],
'term_rp' : [{ru.EVENT: 'task_stop' },
{ru.EVENT: 'exec_stop' }],
'unschedule' : [{ru.EVENT: 'exec_stop' },
{ru.EVENT: 'unschedule_stop' }]
}
}
# The set of default task durations with the durations generated when using
# PRRTE as launch method.
TASK_DURATIONS_PRTE = {
'consume' : {
'exec_queue' : [{ru.EVENT: 'schedule_ok' },
{ru.STATE: s.AGENT_EXECUTING }],
'exec_prep' : [{ru.STATE: s.AGENT_EXECUTING },
{ru.EVENT: 'exec_start' }],
'exec_rp' : [{ru.EVENT: 'exec_start' },
{ru.EVENT: 'task_start' }],
'exec_sh' : [{ru.EVENT: 'task_start' },
{ru.EVENT: 'task_exec_start' }],
'prte_phase_1': [{ru.EVENT: 'task_exec_start' },
{ru.EVENT: 'prte_init_complete' }],
'prte_phase_2': [{ru.EVENT: 'prte_init_complete' },
{ru.EVENT: 'prte_sending_launch_msg'}],
'exec_cmd' : [{ru.EVENT: 'prte_sending_launch_msg'},
{ru.EVENT: 'prte_iof_complete' }],
'prte_phase_3': [{ru.EVENT: 'prte_iof_complete' },
{ru.EVENT: 'prte_notify_completed' }],
'prte_phase_4': [{ru.EVENT: 'prte_notify_completed' },
{ru.EVENT: 'task_exec_stop' }],
'term_sh' : [{ru.EVENT: 'task_exec_stop' },
{ru.EVENT: 'task_stop' }],
'term_rp' : [{ru.EVENT: 'task_stop' },
{ru.EVENT: 'exec_stop' }],
'unschedule' : [{ru.EVENT: 'exec_stop' },
{ru.EVENT: 'unschedule_stop' }],
# # if we have cmd_start / cmd_stop:
# 'prte_phase_2': [{ru.EVENT: 'prte_init_complete' },
# {ru.EVENT: 'cmd_start' }],
# 'exec_cmd' : [{ru.EVENT: 'cmd_start' },
# {ru.EVENT: 'cmd_stop' }],
# 'prte_phase_3': [{ru.EVENT: 'cmd_stop' },
# {ru.EVENT: 'prte_notify_completed' }],
}
}
# The set of default task durations with the durations generated when using
# PRRTE as launch method and an app that records app events (e.g., RADICAL
# Synapse and `hello_rp.sh`).
TASK_DURATIONS_PRTE_APP = {
'consume' : {
'exec_queue' : [{ru.EVENT: 'schedule_ok' },
{ru.STATE: s.AGENT_EXECUTING }],
'exec_prep' : [{ru.STATE: s.AGENT_EXECUTING },
{ru.EVENT: 'exec_start' }],
'exec_rp' : [{ru.EVENT: 'exec_start' },
{ru.EVENT: 'task_start' }],
'exec_sh' : [{ru.EVENT: 'task_start' },
{ru.EVENT: 'task_exec_start' }],
'prte_phase_1': [{ru.EVENT: 'task_exec_start' },
{ru.EVENT: 'prte_init_complete' }],
'prte_phase_2': [{ru.EVENT: 'prte_init_complete' },
{ru.EVENT: 'prte_sending_launch_msg'}],
'init_app' : [{ru.EVENT: 'prte_sending_launch_msg'},
{ru.EVENT: 'cmd_start' }],
'exec_cmd' : [{ru.EVENT: 'cmd_start' },
{ru.EVENT: 'cmd_stop' }],
'term_app' : [{ru.EVENT: 'cmd_stop' },
{ru.EVENT: 'prte_iof_complete' }],
'prte_phase_3': [{ru.EVENT: 'prte_iof_complete' },
{ru.EVENT: 'prte_notify_completed' }],
'prte_phase_4': [{ru.EVENT: 'prte_notify_completed' },
{ru.EVENT: 'task_exec_stop' }],
'term_sh' : [{ru.EVENT: 'task_exec_stop' },
{ru.EVENT: 'task_stop' }],
'term_rp' : [{ru.EVENT: 'task_stop' },
{ru.EVENT: 'exec_stop' }],
'unschedule' : [{ru.EVENT: 'exec_stop' },
{ru.EVENT: 'unschedule_stop' }]
}
}
# ----------------------------------------------------------------------------
#
def _convert_sdurations(sdurations):
'''
Converts a collection of durations expressed in short form to the same
collection of durations expressed in long form.
Definitions:
- Short form collection: one dictionary of short form durations
- Long form: one dictionary of long form durations.
Args:
sdurations (dict): a collections of durations in short form
Return:
ldurations (dict): a collection of long form durations
Example:
sdurations = {'name_of_duration': [{'STATE': s.STATE_NAME},
{'EVENT': 'event_name'}]}
ldurations = {'name_of_duration': [{ru.EVENT: 'state',
ru.STATE: s.STATE_NAME},
{ru.EVENT: 'event_name',
ru.STATE: None}]}
sdurations = {'name_of_duration': [{'STATE': s.STATE_NAME},
[{'EVENT': 'event_name'},
{'STATE': s.STATE_NAME}]]}
ldurations = {'name_of_duration': [{ru.EVENT: 'state',
ru.STATE: s.STATE_NAME},
[{ru.EVENT: 'event_name',
ru.STATE: None},
{ru.EVENT: 'state',
ru.STATE: s.STATE_NAME}]]}
sdurations = {'name_of_duration': [{'STATE': s.STATE_NAME},
{'MSG': 'message_name'}]}
ldurations = {'name_of_duration': [{ru.EVENT: 'state',
ru.STATE: s.STATE_NAME},
{ru.EVENT: 'cmd',
ru.MSG: 'message_name'}]}
'''
ldurations = dict()
for k,v in sdurations.items():
ldurations[k] = list()
for ts in v:
if isinstance(ts, dict):
ldurations[k].append(_expand_sduration(ts))
if isinstance(ts, list):
lds = list()
for i in ts:
lds.append(_expand_sduration(i))
ldurations[k].append(lds)
return ldurations
# ----------------------------------------------------------------------------
#
def _expand_sduration(sduration):
'''
Expands a duration expressed in short form to its long form for the
timestamp types `ru.STATE`, `ru.EVENT` and `ru.MSG`.
Definitions:
- Short form duration: one dictionary containing a state or event name.
- Long form duration: one dictionary containing two keys, one of type
`ru.EVENT` and one of type `ru.STATE`. The `ru.EVENT` key has a string
value while the `ru.STATE` key has a `s.STATE_NAME` object as its value.
Args:
sduration (dict): a duration in short form
Return:
lduration (dict): sduration in long form
Example:
sduration = {'STATE': s.STATE_NAME}
lduration = {ru.EVENT: 'state', ru.STATE: s.STATE_NAME}
sduration = {'EVENT': 'event_name'}
lduration = {ru.EVENT: 'event_name', ru.STATE: None}
sduration = {'MSG': 'mesage_name'}
lduration = {ru.EVENT: 'cmd', ru.MSG: 'message_name'}
'''
# Allow durations with both ru.EVENT and ru.STATE.
tt = list(sduration.keys())
if len(tt) == 1 and tt[0] not in ['STATE', 'EVENT', 'MSG']:
raise Exception('unknown timestamp type: %s' % tt)
if len(tt) == 2:
return sduration
if len(tt) > 2:
raise Exception('invalid duration: too many timestamps (%s)' % tt)
# Expand known timestamps.
lduration = None
for k,v in sduration.items():
if k == 'STATE':
lduration = {ru.EVENT: 'state', ru.STATE: v}
elif k == 'EVENT':
lduration = {ru.EVENT: v, ru.STATE: None}
elif k == 'MSG':
lduration = {ru.EVENT: 'cmd', ru.MSG: v}
return lduration
# Set of default pilot durations for RADICAL-Analytics. All the durations
# are contiguos.
# NOTE: _init durations are most often 0.
PILOT_DURATIONS_DEBUG_SHORT = {
'p_pmgr_create' : [{'STATE': s.NEW },
{'STATE': s.PMGR_LAUNCHING_PENDING}],
'p_pmgr_launching_init' : [{'STATE': s.PMGR_LAUNCHING_PENDING},
{'STATE': s.PMGR_LAUNCHING }],
'p_pmgr_launching' : [{'STATE': s.PMGR_LAUNCHING },
{'EVENT': 'staging_in_start' }],
'p_pmgr_stage_in' : [{'EVENT': 'staging_in_start' },
{'EVENT': 'staging_in_stop' }],
'p_pmgr_submission_init' : [{'EVENT': 'staging_in_stop' },
{'EVENT': 'submission_start' }],
'p_pmgr_submission' : [{'EVENT': 'submission_start' },
{'EVENT': 'submission_stop' }],
'p_pmgr_scheduling_init' : [{'EVENT': 'submission_stop' },
{'STATE': s.PMGR_ACTIVE_PENDING }],
# batch system queue time
'p_pmgr_scheduling' : [{'STATE': s.PMGR_ACTIVE_PENDING },
{'EVENT': 'bootstrap_0_start' }],
'p_agent_ve_setup_init' : [{'EVENT': 'bootstrap_0_start' },
{'EVENT': 've_setup_start' }],
'p_agent_ve_setup' : [{'EVENT': 've_setup_start' },
{'EVENT': 've_setup_stop' }],
'p_agent_ve_activate_init': [{'EVENT': 've_setup_stop' },
{'EVENT': 've_activate_start' }],
'p_agent_ve_activate' : [{'EVENT': 've_activate_start' },
{'EVENT': 've_activate_stop' }],
'p_agent_install_init' : [{'EVENT': 've_activate_stop' },
{'EVENT': 'rp_install_start' }],
'p_agent_install' : [{'EVENT': 'rp_install_start' },
{'EVENT': 'rp_install_stop' }],
'p_agent_launching' : | |
# Enter a parse tree produced by CSharp4Parser#attribute_argument_expression.
def enterAttribute_argument_expression(self, ctx):
pass
# Exit a parse tree produced by CSharp4Parser#attribute_argument_expression.
def exitAttribute_argument_expression(self, ctx):
pass
# Enter a parse tree produced by CSharp4Parser#class_modifier_unsafe.
def enterClass_modifier_unsafe(self, ctx):
pass
# Exit a parse tree produced by CSharp4Parser#class_modifier_unsafe.
def exitClass_modifier_unsafe(self, ctx):
pass
# Enter a parse tree produced by CSharp4Parser#struct_modifier_unsafe.
def enterStruct_modifier_unsafe(self, ctx):
pass
# Exit a parse tree produced by CSharp4Parser#struct_modifier_unsafe.
def exitStruct_modifier_unsafe(self, ctx):
pass
# Enter a parse tree produced by CSharp4Parser#interface_modifier_unsafe.
def enterInterface_modifier_unsafe(self, ctx):
pass
# Exit a parse tree produced by CSharp4Parser#interface_modifier_unsafe.
def exitInterface_modifier_unsafe(self, ctx):
pass
# Enter a parse tree produced by CSharp4Parser#delegate_modifier_unsafe.
def enterDelegate_modifier_unsafe(self, ctx):
pass
# Exit a parse tree produced by CSharp4Parser#delegate_modifier_unsafe.
def exitDelegate_modifier_unsafe(self, ctx):
pass
# Enter a parse tree produced by CSharp4Parser#field_modifier_unsafe.
def enterField_modifier_unsafe(self, ctx):
pass
# Exit a parse tree produced by CSharp4Parser#field_modifier_unsafe.
def exitField_modifier_unsafe(self, ctx):
pass
# Enter a parse tree produced by CSharp4Parser#method_modifier_unsafe.
def enterMethod_modifier_unsafe(self, ctx):
pass
# Exit a parse tree produced by CSharp4Parser#method_modifier_unsafe.
def exitMethod_modifier_unsafe(self, ctx):
pass
# Enter a parse tree produced by CSharp4Parser#property_modifier_unsafe.
def enterProperty_modifier_unsafe(self, ctx):
pass
# Exit a parse tree produced by CSharp4Parser#property_modifier_unsafe.
def exitProperty_modifier_unsafe(self, ctx):
pass
# Enter a parse tree produced by CSharp4Parser#event_modifier_unsafe.
def enterEvent_modifier_unsafe(self, ctx):
pass
# Exit a parse tree produced by CSharp4Parser#event_modifier_unsafe.
def exitEvent_modifier_unsafe(self, ctx):
pass
# Enter a parse tree produced by CSharp4Parser#indexer_modifier_unsafe.
def enterIndexer_modifier_unsafe(self, ctx):
pass
# Exit a parse tree produced by CSharp4Parser#indexer_modifier_unsafe.
def exitIndexer_modifier_unsafe(self, ctx):
pass
# Enter a parse tree produced by CSharp4Parser#operator_modifier_unsafe.
def enterOperator_modifier_unsafe(self, ctx):
pass
# Exit a parse tree produced by CSharp4Parser#operator_modifier_unsafe.
def exitOperator_modifier_unsafe(self, ctx):
pass
# Enter a parse tree produced by CSharp4Parser#constructor_modifier_unsafe.
def enterConstructor_modifier_unsafe(self, ctx):
pass
# Exit a parse tree produced by CSharp4Parser#constructor_modifier_unsafe.
def exitConstructor_modifier_unsafe(self, ctx):
pass
# Enter a parse tree produced by CSharp4Parser#destructor_declaration_unsafe.
def enterDestructor_declaration_unsafe(self, ctx):
pass
# Exit a parse tree produced by CSharp4Parser#destructor_declaration_unsafe.
def exitDestructor_declaration_unsafe(self, ctx):
pass
# Enter a parse tree produced by CSharp4Parser#static_constructor_modifiers_unsafe.
def enterStatic_constructor_modifiers_unsafe(self, ctx):
pass
# Exit a parse tree produced by CSharp4Parser#static_constructor_modifiers_unsafe.
def exitStatic_constructor_modifiers_unsafe(self, ctx):
pass
# Enter a parse tree produced by CSharp4Parser#embedded_statement_unsafe.
def enterEmbedded_statement_unsafe(self, ctx):
pass
# Exit a parse tree produced by CSharp4Parser#embedded_statement_unsafe.
def exitEmbedded_statement_unsafe(self, ctx):
pass
# Enter a parse tree produced by CSharp4Parser#unsafe_statement.
def enterUnsafe_statement(self, ctx):
pass
# Exit a parse tree produced by CSharp4Parser#unsafe_statement.
def exitUnsafe_statement(self, ctx):
pass
# Enter a parse tree produced by CSharp4Parser#type121_unsafe.
def enterType121_unsafe(self, ctx):
pass
# Exit a parse tree produced by CSharp4Parser#type121_unsafe.
def exitType121_unsafe(self, ctx):
pass
# Enter a parse tree produced by CSharp4Parser#pointer_type121.
def enterPointer_type121(self, ctx):
pass
# Exit a parse tree produced by CSharp4Parser#pointer_type121.
def exitPointer_type121(self, ctx):
pass
# Enter a parse tree produced by CSharp4Parser#unmanaged_type121.
def enterUnmanaged_type121(self, ctx):
pass
# Exit a parse tree produced by CSharp4Parser#unmanaged_type121.
def exitUnmanaged_type121(self, ctx):
pass
# Enter a parse tree produced by CSharp4Parser#primary_no_array_creation_expression_unsafe.
def enterPrimary_no_array_creation_expression_unsafe(self, ctx):
pass
# Exit a parse tree produced by CSharp4Parser#primary_no_array_creation_expression_unsafe.
def exitPrimary_no_array_creation_expression_unsafe(self, ctx):
pass
# Enter a parse tree produced by CSharp4Parser#unary_expression_unsafe.
def enterUnary_expression_unsafe(self, ctx):
pass
# Exit a parse tree produced by CSharp4Parser#unary_expression_unsafe.
def exitUnary_expression_unsafe(self, ctx):
pass
# Enter a parse tree produced by CSharp4Parser#pointer_indirection_expression.
def enterPointer_indirection_expression(self, ctx):
pass
# Exit a parse tree produced by CSharp4Parser#pointer_indirection_expression.
def exitPointer_indirection_expression(self, ctx):
pass
# Enter a parse tree produced by CSharp4Parser#addressof_expression.
def enterAddressof_expression(self, ctx):
pass
# Exit a parse tree produced by CSharp4Parser#addressof_expression.
def exitAddressof_expression(self, ctx):
pass
# Enter a parse tree produced by CSharp4Parser#sizeof_expression.
def enterSizeof_expression(self, ctx):
pass
# Exit a parse tree produced by CSharp4Parser#sizeof_expression.
def exitSizeof_expression(self, ctx):
pass
# Enter a parse tree produced by CSharp4Parser#fixed_statement.
def enterFixed_statement(self, ctx):
pass
# Exit a parse tree produced by CSharp4Parser#fixed_statement.
def exitFixed_statement(self, ctx):
pass
# Enter a parse tree produced by CSharp4Parser#fixed_pointer_declarators.
def enterFixed_pointer_declarators(self, ctx):
pass
# Exit a parse tree produced by CSharp4Parser#fixed_pointer_declarators.
def exitFixed_pointer_declarators(self, ctx):
pass
# Enter a parse tree produced by CSharp4Parser#fixed_pointer_declarator.
def enterFixed_pointer_declarator(self, ctx):
pass
# Exit a parse tree produced by CSharp4Parser#fixed_pointer_declarator.
def exitFixed_pointer_declarator(self, ctx):
pass
# Enter a parse tree produced by CSharp4Parser#fixed_pointer_initializer.
def enterFixed_pointer_initializer(self, ctx):
pass
# Exit a parse tree produced by CSharp4Parser#fixed_pointer_initializer.
def exitFixed_pointer_initializer(self, ctx):
pass
# Enter a parse tree produced by CSharp4Parser#struct_member_declaration_unsafe.
def enterStruct_member_declaration_unsafe(self, ctx):
pass
# Exit a parse tree produced by CSharp4Parser#struct_member_declaration_unsafe.
def exitStruct_member_declaration_unsafe(self, ctx):
pass
# Enter a parse tree produced by CSharp4Parser#fixed_size_buffer_declaration.
def enterFixed_size_buffer_declaration(self, ctx):
pass
# Exit a parse tree produced by CSharp4Parser#fixed_size_buffer_declaration.
def exitFixed_size_buffer_declaration(self, ctx):
pass
# Enter a parse tree produced by CSharp4Parser#fixed_size_buffer_modifiers.
def enterFixed_size_buffer_modifiers(self, ctx):
pass
# Exit a parse tree produced by CSharp4Parser#fixed_size_buffer_modifiers.
def exitFixed_size_buffer_modifiers(self, ctx):
pass
# Enter a parse tree produced by CSharp4Parser#fixed_size_buffer_modifier.
def enterFixed_size_buffer_modifier(self, ctx):
pass
# Exit a parse tree produced by CSharp4Parser#fixed_size_buffer_modifier.
def exitFixed_size_buffer_modifier(self, ctx):
pass
# Enter a parse tree produced by CSharp4Parser#buffer_element_type121.
def enterBuffer_element_type121(self, ctx):
pass
# Exit a parse tree produced by CSharp4Parser#buffer_element_type121.
def exitBuffer_element_type121(self, ctx):
pass
# Enter a parse tree produced by CSharp4Parser#fixed_size_buffer_declarators.
def enterFixed_size_buffer_declarators(self, ctx):
pass
# Exit a parse tree produced by CSharp4Parser#fixed_size_buffer_declarators.
def exitFixed_size_buffer_declarators(self, ctx):
pass
# Enter a parse tree produced by CSharp4Parser#fixed_size_buffer_declarator.
def enterFixed_size_buffer_declarator(self, ctx):
pass
# Exit a parse tree produced by CSharp4Parser#fixed_size_buffer_declarator.
def exitFixed_size_buffer_declarator(self, ctx):
pass
# Enter a parse tree produced by CSharp4Parser#local_variable_initializer_unsafe.
def enterLocal_variable_initializer_unsafe(self, ctx):
pass
# Exit a parse tree produced by CSharp4Parser#local_variable_initializer_unsafe.
def exitLocal_variable_initializer_unsafe(self, ctx):
pass
# Enter a parse tree produced by CSharp4Parser#stackalloc_initializer.
def enterStackalloc_initializer(self, ctx):
pass
# Exit a parse tree produced by CSharp4Parser#stackalloc_initializer.
def exitStackalloc_initializer(self, ctx):
pass
# Enter a parse tree produced by CSharp4Parser#from_contextual_keyword.
def enterFrom_contextual_keyword(self, ctx):
pass
# Exit a parse tree produced by CSharp4Parser#from_contextual_keyword.
def exitFrom_contextual_keyword(self, ctx):
pass
# Enter a parse tree produced by CSharp4Parser#let_contextual_keyword.
def enterLet_contextual_keyword(self, ctx):
pass
# Exit a parse tree produced by CSharp4Parser#let_contextual_keyword.
def exitLet_contextual_keyword(self, ctx):
pass
# Enter a parse tree produced by CSharp4Parser#where_contextual_keyword.
def enterWhere_contextual_keyword(self, ctx):
pass
# Exit a parse tree produced by CSharp4Parser#where_contextual_keyword.
def exitWhere_contextual_keyword(self, ctx):
pass
# Enter a parse tree produced by CSharp4Parser#join_contextual_keyword.
def enterJoin_contextual_keyword(self, ctx):
pass
# Exit a parse tree produced by CSharp4Parser#join_contextual_keyword.
def exitJoin_contextual_keyword(self, ctx):
pass
# Enter a parse tree produced by CSharp4Parser#on_contextual_keyword.
def enterOn_contextual_keyword(self, ctx):
pass
# Exit a parse tree produced by CSharp4Parser#on_contextual_keyword.
def exitOn_contextual_keyword(self, ctx):
pass
# Enter a parse tree produced by CSharp4Parser#equals_contextual_keyword.
def enterEquals_contextual_keyword(self, ctx):
pass
# Exit a parse tree produced by CSharp4Parser#equals_contextual_keyword.
def exitEquals_contextual_keyword(self, ctx):
pass
# Enter a parse tree produced by CSharp4Parser#into_contextual_keyword.
def enterInto_contextual_keyword(self, ctx):
pass
# Exit a parse tree produced by CSharp4Parser#into_contextual_keyword.
def exitInto_contextual_keyword(self, ctx):
pass
# Enter a parse tree produced by CSharp4Parser#orderby_contextual_keyword.
def enterOrderby_contextual_keyword(self, ctx):
pass
# Exit a parse tree produced by CSharp4Parser#orderby_contextual_keyword.
def exitOrderby_contextual_keyword(self, ctx):
pass
# Enter a parse tree produced by CSharp4Parser#ascending_contextual_keyword.
def enterAscending_contextual_keyword(self, ctx):
pass
# Exit a parse tree produced by CSharp4Parser#ascending_contextual_keyword.
def exitAscending_contextual_keyword(self, ctx):
pass
# Enter a parse tree produced by CSharp4Parser#descending_contextual_keyword.
def enterDescending_contextual_keyword(self, ctx):
pass
# Exit a parse tree produced by CSharp4Parser#descending_contextual_keyword.
def exitDescending_contextual_keyword(self, ctx):
pass
# Enter a parse tree produced by CSharp4Parser#select_contextual_keyword.
def enterSelect_contextual_keyword(self, ctx):
pass
# Exit a parse tree produced by CSharp4Parser#select_contextual_keyword.
def exitSelect_contextual_keyword(self, ctx):
pass
# Enter a parse tree produced by CSharp4Parser#group_contextual_keyword.
def enterGroup_contextual_keyword(self, ctx):
pass
# Exit a parse tree produced by CSharp4Parser#group_contextual_keyword.
def exitGroup_contextual_keyword(self, ctx):
pass
# Enter a parse tree produced by CSharp4Parser#by_contextual_keyword.
def enterBy_contextual_keyword(self, ctx):
pass
# Exit a parse tree produced by CSharp4Parser#by_contextual_keyword.
def exitBy_contextual_keyword(self, ctx):
pass
# Enter a parse tree produced by CSharp4Parser#partial_contextual_keyword.
def enterPartial_contextual_keyword(self, ctx):
pass
# Exit a parse tree produced by CSharp4Parser#partial_contextual_keyword.
def exitPartial_contextual_keyword(self, ctx):
pass
# Enter a parse tree produced by CSharp4Parser#alias_contextual_keyword.
def enterAlias_contextual_keyword(self, ctx):
pass
# Exit a parse tree produced by CSharp4Parser#alias_contextual_keyword.
def exitAlias_contextual_keyword(self, ctx):
pass
# Enter a parse tree produced by CSharp4Parser#yield_contextual_keyword.
def enterYield_contextual_keyword(self, ctx):
pass
# Exit a parse tree produced by CSharp4Parser#yield_contextual_keyword.
def exitYield_contextual_keyword(self, ctx):
pass
# Enter a parse tree produced by CSharp4Parser#get_contextual_keyword.
| |
uses it. [lb] thinks the best
# it could do is to help a historical trail for a user, e.g.,
# in the client, highlight all split-intos with the same
# split-from, and maybe show the original, split-from line.
if first_ref and (not target_item.split_from_stack_id):
# FIXME: Add split-from to export Shapefile?
# FIXME: Only if the other ID > ours?
target_item.split_from_stack_id = source_item.stack_id
updated_item |= 0x0010
self.stats['split_split_from_sid'] += 1
g.assurt(target_item.split_from_stack_id > 0)
elif target_item.split_from_stack_id != source_item.split_from_stack_id:
self.stats['diffs_split_from_sid'] += 1
# else, they're equal.
if updated_item:
target_item.dirty = item_base.One.dirty_reason_item_auto
#
# E.g., "MSAS 103".
#RE_mndot_msas = re.compile(r'^\s*MSAS\s+\d+\s*$')
# E.g., "M-1179". MN Stk Ids: 1359673
#RE_mndot_mdash = re.compile(r'^\s*M-\d+\s*$')
# There are a few named UT-... but they're in the woods.
RE_mndot_names = re.compile(r'^\s*(M-|MSAS |T-)\d+\s*$')
# Python 2.7's re.sub accepts flags, but not 2.6's, so compile them first.
RE_mndot_usth = re.compile(r'(\s|^)USTH\s+(\d+)', re.IGNORECASE)
RE_mndot_mnth1 = re.compile(r'(\s|^)MN TH\s+(\d+)', re.IGNORECASE)
RE_mndot_mnth2 = re.compile(r'(\s|^)MNTH\s+(\d+)', re.IGNORECASE)
RE_mndot_csah1 = re.compile(r'(\s|^)CSAH\s+(\d+)', re.IGNORECASE)
RE_mndot_csah2 = re.compile(r'(\s|^)Co Rd\s+(\d+)', re.IGNORECASE)
RE_mndot_cr = re.compile(r'(\s|^)CR-(\d+)', re.IGNORECASE)
RE_mndot_isth = re.compile(r'(\s|^)ISTH\s+(\d+)', re.IGNORECASE)
# These are no longer needed now that we capitalize() route names...
RE_mndot_sp = re.compile(r'(\s|^)STATE PARK RD\s+(\d+)', re.IGNORECASE)
RE_mndot_nf = re.compile(r'(\s|^)NATIONAL FOREST RD\s+(\d+)', re.IGNORECASE)
#
def cleanup_byway_name(self, byway_name, ref_item, delimiter='/'):
if byway_name:
route_names = [x for x in
[x.strip() for x in byway_name.split(delimiter)]
if x]
else:
route_names = []
parsed_addrs = []
for route_name in route_names:
# Normalize certain names, like DOT road classification-type names.
if self.cli_opts.friendly_names:
# Some roads from MnDOT are ugly connector identifiers. We can
# whack these.
if Hausdorff_Import.RE_mndot_names.match(route_name) is not None:
if ((ref_item is not None)
and (ref_item.geofeature_layer_id not in (
byway.Geofeature_Layer.Expressway_Ramp,
byway.Geofeature_Layer.Other_Ramp,))):
#log.debug('clnup_by_nom: MSAS/M- named item not ramp: %s'
# % (ref_item,))
pass
# Use a blank name instead.
route_name = ''
else:
# Fix MNTH, etc. E.g.s,
# USTH 8 => US Hwy 8
# MNTH 50 => State Hwy 50
# CSAH 61 => County Rd 61
# ISTH 94 => I-94 or I 94
fname = route_name
fname = re.sub(Hausdorff_Import.RE_mndot_usth,
'\\1US Hwy \\2', fname)
fname = re.sub(Hausdorff_Import.RE_mndot_mnth1,
'\\1State Hwy \\2', fname)
fname = re.sub(Hausdorff_Import.RE_mndot_mnth2,
'\\1State Hwy \\2', fname)
fname = re.sub(Hausdorff_Import.RE_mndot_csah1,
'\\1County Rd \\2', fname)
fname = re.sub(Hausdorff_Import.RE_mndot_csah2,
'\\1County Rd \\2', fname)
fname = re.sub(Hausdorff_Import.RE_mndot_cr,
'\\1County Rd \\2', fname)
fname = re.sub(Hausdorff_Import.RE_mndot_isth,
'\\1I-\\2', fname)
fname = re.sub(Hausdorff_Import.RE_mndot_sp,
'\\1State Park Rd \\2', fname)
fname = re.sub(Hausdorff_Import.RE_mndot_nf,
'\\1National Forest Rd \\2', fname)
if fname != route_name:
#log.debug(
# 'clnup_by_nom: friendly name: %s / fr: %s / in: %s'
# % (fname, route_name, byway_name,))
route_name = fname
# Deek out the parser. Supply a house number and citystate.
addr_parsed = self.parse_streetaddy(route_name)
# Add address to the ordered list we'll cull when we fish out dupls.
parsed_addrs.append((route_name, addr_parsed,))
# 2014.04.07: Maybe: Cleanup periods in names. There are 417 names with
# periods in them. Some are abbreviations, e.g., "<NAME>". Others are
# extraneous marks that we could/should remove, e.g., "2419 S. 9th St.".
# And others are not names, e.g., "4/10/11 path ends here.".
# SELECT DISTINCT(name) FROM item_versioned WHERE name like '%.%'
# ORDER BY name;
# You can uncomment this too poke around with the address object. The
# parse_streetaddy call removes periods in the prefix, suffix, and
# street type, so we could just
#
# try:
# if byway_name and (byway_name.index('Ave.') != -1):
# conf.break_here('ccpv3')
# except ValueError:
# pass
unparsable = set()
full_names = {}
for paddr in parsed_addrs:
route_name, addr_parsed = paddr
if not addr_parsed:
unparsable.add(route_name)
else:
new_name = ' '.join(
[x for x in [
addr_parsed['prefix'],
addr_parsed['street'],
addr_parsed['street_type'],
addr_parsed['suffix'],] if x])
if route_name != new_name:
log.debug('clnup_by_nom: normalized name: "%s" ==> "%s"'
% (route_name, new_name,))
route_name = new_name
exact_match = False
for match_list in full_names.values():
for match_tup in match_list:
other_name, other_addr = match_tup
if route_name == other_name:
exact_match = True
break
if not exact_match:
if addr_parsed['street'] not in full_names:
misc.dict_list_append(full_names,
addr_parsed['street'],
(route_name, addr_parsed,))
else:
# We've seen a route with this street name already, but the
# prefixes or suffices or street types differ.
new_list = []
merged = False
match_list = full_names[addr_parsed['street']]
for match_tup in match_list:
other_name, other_addr = match_tup
if not merged:
mergeable = False
for component in ('prefix',
'suffix',
'street_type',):
if (addr_parsed[component]
and other_addr[component]
and (addr_parsed[component]
!= other_addr[component])):
mergeable = False
if mergeable:
for component in ('prefix',
'suffix',
'street_type',):
if not addr_parsed[component]:
addr_parsed[component] = other_addr[component]
new_name = ' '.join(
[x for x in [
addr_parsed['prefix'],
addr_parsed['street'],
addr_parsed['street_type'],
addr_parsed['suffix'],] if x])
log.debug(
'clnup_by_nom: merged names "%s" + "%s" ==> "%s"'
% (full_names[addr_parsed['street']],
route_name,
new_name,))
new_list.append((new_name, addr_parsed,))
merged = True
if not merged:
# Not mergeable, so keep the existing match.
new_list.append((other_name, other_addr,))
# end: for match_tup in match_list
if not merged:
# Not merged, so add to list.
new_list.append((route_name, addr_parsed,))
full_names[addr_parsed['street']] = new_list
# else, exact_match, so ignore the duplicate.
# FIXME: Sort by street type. E.g., for highways in small towns, put local
# street name before highway name, e.g., "Main St / US Hwy 11",
# rather than "US Hwy 11 / Main St".
route_names = list(unparsable)
for match_tup in full_names.values():
route_names += [x[0] for x in match_tup]
new_name = ' / '.join(route_names)
#if new_name != byway_name:
# log.debug('clnup_by_nom: changing: "%s" ==> "%s"'
# % (byway_name, new_name,))
return new_name
#
def merge_non_user_lvals(self, shpfeat, target_item, source_item):
# Make any links in the target that are found in the source.
# Note that we've already added and deleted tags and attributes
# according to the feature fields, so just be sure not to re-add
# tags we deleted or attributes to unset.
del_tag_sids = set()
add_tag_names, del_tag_names = self.tag_list_assemble(shpfeat)
for tag_name in del_tag_names:
tag_item = self.qb.item_mgr.cache_tag_lookup_by_name(tag_name)
if tag_item is not None:
del_tag_sids.add(tag_item.stack_id)
tag_names = []
for lval in source_item.link_values.itervalues():
add_ok = True
if lval.link_lhs_type_id == Item_Type.TAG:
# Check not scrubbing 'gravel road' and 'unpaved', or not
# deleted by user via feature field.
if (((self.cli_opts.fix_gravel_unpaved_issue)
and (lval.lhs_stack_id in self.bad_tag_sids))
or (lval.lhs_stack_id in del_tag_sids)):
add_ok = False
if add_ok:
try:
the_tag = self.qb.item_mgr.cache_tags[lval.lhs_stack_id]
tag_names.append(the_tag.name)
except KeyError:
log.warning('Missing tag? No tag found with stack ID: %s'
% (lval.lhs_stack_id,))
if lval.lhs_stack_id not in target_item.link_values:
# The tag isn't already set or wasn't deleted; add it.
self.add_new_lval_lval(target_item, lval)
self.stats['split_lval_add'] += 1
misc.dict_count_inc(self.stats['edit_tag_add_name'],
the_tag.name)
elif lval.link_lhs_type_id == Item_Type.ATTRIBUTE:
try:
target_lval = target_item.link_values[lval.lhs_stack_id]
except KeyError:
target_lval = None
try:
ccp_attr = self.field_attr_cache_sid[lval.lhs_stack_id]
fieldn = self.attr_to_field[ccp_attr.value_internal_name]
except KeyError:
ccp_attr = None
if ccp_attr is not None:
g.assurt(not ccp_attr.multiple_allowed) # Am I right?
if target_lval is None:
self.add_new_lval_attc(target_item, ccp_attr,
value_integer=lval.value_integer,
value_text=lval.value_text)
shpfeat['properties'][fieldn] = lval.value_integer
self.stats['split_lval_add'] += 1
elif ccp_attr.value_type == 'integer':
if not target_lval.value_integer:
#self.update_lval_attc(target_lval, ccp_attr,
# value_integer=lval.value_integer, value_text=None)
self.update_lval_lval(target_lval,
value_integer=lval.value_integer,
value_text=lval.value_text)
shpfeat['properties'][fieldn] = lval.value_integer
self.stats['split_lval_edit'] += 1
elif (target_lval.value_integer != lval.value_integer):
self.stats['diffs_lval_edit'] += 1
else:
# The attribute values are equal.
self.stats['split_lval_skip'] += 1
elif ccp_attr.value_type == 'text':
if not target_lval.value_text:
#self.update_lval_attc(target_lval, ccp_attr,
# value_integer=None, value_text=lval.value_text)
self.update_lval_lval(target_lval,
value_integer=lval.value_integer,
value_text=lval.value_text)
shpfeat['properties'][fieldn] = lval.value_text
self.stats['split_lval_edit'] += 1
elif (target_lval.value_text != lval.value_text):
self.stats['diffs_lval_edit'] += 1
else:
# The attribute values are equal.
self.stats['split_lval_skip'] += 1
# else, this is some other attribute we don't care about.
else:
# ANNOTATION, POST, or DISCUSSION.
if lval.lhs_stack_id not in target_item.link_values:
self.add_new_lval_lval(target_item, lval)
# Skipping shpfeat['properties'], which doesn't show notes/posts
self.stats['split_lval_add'] += 1
else:
# A similar link_value already exists for the target.
self.stats['split_lval_skip'] += 1
shpfeat['properties']['item_tags'] = ','.join(tag_names)
#
def merge_byway_aadt(self, shpfeat, target_item, source_item):
# Skipping: We don't show or allow editing of AADT via Shpfile.
aadt_fetch_sql = target_item.aadt_fetch_sql(aadt_type='',
all_records=True)
target_rows = self.qb.db.sql(aadt_fetch_sql)
#
aadt_fetch_sql = source_item.aadt_fetch_sql(aadt_type='',
all_records=True)
source_rows = self.qb.db.sql(aadt_fetch_sql)
#
if (not target_rows) and source_rows:
for source_row in source_rows:
# See we might try merging to the same item from more than one
# other item, we have to check for duplicates.
try:
add_row = source_row['aadt_type'] not in self.aadts_dbldict[
target_item.stack_id][source_row['aadt_year']]
except KeyError:
add_row = True
if add_row:
try:
self.aadts_dbldict[target_item.stack_id][
source_row['aadt_year']].add(
source_row['aadt_type'])
except KeyError:
misc.dict_dict_update(self.aadts_dbldict,
target_item.stack_id,
source_row['aadt_year'],
set([source_row['aadt_type'],]))
insert_aadt = (
"(%d, %d, %d, | |
PhoneC.
Swap active call on PhoneA. (N times)
Args:
num_swaps: do swap for 'num_swaps' times.
This value can be 0 (no swap operation).
Returns:
call_ab_id, call_ac_id if succeed;
None, None if failed.
"""
ads = self.android_devices
# make sure PhoneA is GSM phone before proceed.
if (ads[0].droid.telephonyGetPhoneType() != PHONE_TYPE_GSM):
self.log.error("{} not GSM phone, abort wcdma swap test.".format(
ads[0].serial))
return None, None
call_ab_id = self._three_phone_call_mo_add_mo(
[ads[0], ads[1], ads[2]],
[phone_setup_voice_3g, phone_setup_voice_general,
phone_setup_voice_general], [is_phone_in_call_3g, None, None])
if call_ab_id is None:
self.log.error("Failed to get call_ab_id")
return None, None
calls = ads[0].droid.telecomCallGetCallIds()
self.log.info("Calls in PhoneA{}".format(calls))
if num_active_calls(self.log, ads[0]) != 2:
return None, None
if calls[0] == call_ab_id:
call_ac_id = calls[1]
else:
call_ac_id = calls[0]
if num_swaps > 0:
self.log.info("Step3: Begin Swap x{} test.".format(num_swaps))
if not swap_calls(self.log, ads, call_ab_id, call_ac_id,
num_swaps):
self.log.error("Swap test failed.")
return None, None
return call_ab_id, call_ac_id
def _test_wcdma_mt_mt_add_swap_x(self, num_swaps):
"""Test swap feature in WCDMA call.
PhoneB call PhoneA (WCDMA), accept on PhoneA.
PhoneC call PhoneA (WCDMA), accept on PhoneA.
Swap active call on PhoneA. (N times)
Args:
num_swaps: do swap for 'num_swaps' times.
This value can be 0 (no swap operation).
Returns:
call_ab_id, call_ac_id if succeed;
None, None if failed.
"""
ads = self.android_devices
# make sure PhoneA is GSM phone before proceed.
if (ads[0].droid.telephonyGetPhoneType() != PHONE_TYPE_GSM):
self.log.error("{} not GSM phone, abort wcdma swap test.".format(
ads[0].serial))
return None, None
call_ab_id = self._three_phone_call_mt_add_mt(
[ads[0], ads[1], ads[2]],
[phone_setup_voice_3g, phone_setup_voice_general,
phone_setup_voice_general], [is_phone_in_call_3g, None, None])
if call_ab_id is None:
self.log.error("Failed to get call_ab_id")
return None, None
calls = ads[0].droid.telecomCallGetCallIds()
self.log.info("Calls in PhoneA{}".format(calls))
if num_active_calls(self.log, ads[0]) != 2:
return None, None
if calls[0] == call_ab_id:
call_ac_id = calls[1]
else:
call_ac_id = calls[0]
if num_swaps > 0:
self.log.info("Step3: Begin Swap x{} test.".format(num_swaps))
if not swap_calls(self.log, ads, call_ab_id, call_ac_id,
num_swaps):
self.log.error("Swap test failed.")
return None, None
return call_ab_id, call_ac_id
def _test_wcdma_mo_mt_add_swap_x(self, num_swaps):
"""Test swap feature in WCDMA call.
PhoneA (WCDMA) call PhoneB, accept on PhoneB.
PhoneC call PhoneA (WCDMA), accept on PhoneA.
Swap active call on PhoneA. (N times)
Args:
num_swaps: do swap for 'num_swaps' times.
This value can be 0 (no swap operation).
Returns:
call_ab_id, call_ac_id if succeed;
None, None if failed.
"""
ads = self.android_devices
# make sure PhoneA is GSM phone before proceed.
if (ads[0].droid.telephonyGetPhoneType() != PHONE_TYPE_GSM):
self.log.error("{} not GSM phone, abort wcdma swap test.".format(
ads[0].serial))
return None, None
call_ab_id = self._three_phone_call_mo_add_mt(
[ads[0], ads[1], ads[2]],
[phone_setup_voice_3g, phone_setup_voice_general,
phone_setup_voice_general], [is_phone_in_call_wcdma, None, None])
if call_ab_id is None:
self.log.error("Failed to get call_ab_id")
return None, None
calls = ads[0].droid.telecomCallGetCallIds()
self.log.info("Calls in PhoneA{}".format(calls))
if num_active_calls(self.log, ads[0]) != 2:
return None, None
if calls[0] == call_ab_id:
call_ac_id = calls[1]
else:
call_ac_id = calls[0]
if num_swaps > 0:
self.log.info("Step3: Begin Swap x{} test.".format(num_swaps))
if not swap_calls(self.log, ads, call_ab_id, call_ac_id,
num_swaps):
self.log.error("Swap test failed.")
return None, None
return call_ab_id, call_ac_id
def _test_csfb_wcdma_mo_mo_add_swap_x(self, num_swaps):
"""Test swap feature in CSFB WCDMA call.
PhoneA (CSFB WCDMA) call PhoneB, accept on PhoneB.
PhoneA (CSFB WCDMA) call PhoneC, accept on PhoneC.
Swap active call on PhoneA. (N times)
Args:
num_swaps: do swap for 'num_swaps' times.
This value can be 0 (no swap operation).
Returns:
call_ab_id, call_ac_id if succeed;
None, None if failed.
"""
ads = self.android_devices
# make sure PhoneA is GSM phone before proceed.
if (ads[0].droid.telephonyGetPhoneType() != PHONE_TYPE_GSM):
self.log.error("{} not GSM phone, abort wcdma swap test.".format(
ads[0].serial))
return None, None
call_ab_id = self._three_phone_call_mo_add_mo(
[ads[0], ads[1], ads[2]],
[phone_setup_csfb, phone_setup_voice_general,
phone_setup_voice_general], [is_phone_in_call_csfb, None, None])
if call_ab_id is None:
self.log.error("Failed to get call_ab_id")
return None, None
calls = ads[0].droid.telecomCallGetCallIds()
self.log.info("Calls in PhoneA{}".format(calls))
if num_active_calls(self.log, ads[0]) != 2:
return None, None
if calls[0] == call_ab_id:
call_ac_id = calls[1]
else:
call_ac_id = calls[0]
if num_swaps > 0:
self.log.info("Step3: Begin Swap x{} test.".format(num_swaps))
if not swap_calls(self.log, ads, call_ab_id, call_ac_id,
num_swaps):
self.log.error("Swap test failed.")
return None, None
return call_ab_id, call_ac_id
def _test_csfb_wcdma_mo_mt_add_swap_x(self, num_swaps):
"""Test swap feature in CSFB WCDMA call.
PhoneA (CSFB WCDMA) call PhoneB, accept on PhoneB.
PhoneC call PhoneA (CSFB WCDMA), accept on PhoneA.
Swap active call on PhoneA. (N times)
Args:
num_swaps: do swap for 'num_swaps' times.
This value can be 0 (no swap operation).
Returns:
call_ab_id, call_ac_id if succeed;
None, None if failed.
"""
ads = self.android_devices
# make sure PhoneA is GSM phone before proceed.
if (ads[0].droid.telephonyGetPhoneType() != PHONE_TYPE_GSM):
self.log.error("{} not GSM phone, abort wcdma swap test.".format(
ads[0].serial))
return None, None
call_ab_id = self._three_phone_call_mo_add_mt(
[ads[0], ads[1], ads[2]],
[phone_setup_csfb, phone_setup_voice_general,
phone_setup_voice_general], [is_phone_in_call_csfb, None, None])
if call_ab_id is None:
self.log.error("Failed to get call_ab_id")
return None, None
calls = ads[0].droid.telecomCallGetCallIds()
self.log.info("Calls in PhoneA{}".format(calls))
if num_active_calls(self.log, ads[0]) != 2:
return None, None
if calls[0] == call_ab_id:
call_ac_id = calls[1]
else:
call_ac_id = calls[0]
if num_swaps > 0:
self.log.info("Step3: Begin Swap x{} test.".format(num_swaps))
if not swap_calls(self.log, ads, call_ab_id, call_ac_id,
num_swaps):
self.log.error("Swap test failed.")
return None, None
return call_ab_id, call_ac_id
def _test_ims_conference_merge_drop_second_call_no_cep(self, call_ab_id,
call_ac_id):
"""Test conference merge and drop in VoLTE call.
PhoneA in IMS (VoLTE or WiFi Calling) call with PhoneB.
PhoneA in IMS (VoLTE or WiFi Calling) call with PhoneC.
Merge calls to conference on PhoneA.
Hangup on PhoneC, check call continues between AB.
Hangup on PhoneB, check A ends.
Args:
call_ab_id: call id for call_AB on PhoneA.
call_ac_id: call id for call_AC on PhoneA.
Returns:
True if succeed;
False if failed.
"""
ads = self.android_devices
self.log.info("Step4: Merge to Conf Call and verify Conf Call.")
ads[0].droid.telecomCallJoinCallsInConf(call_ab_id, call_ac_id)
time.sleep(WAIT_TIME_IN_CALL)
calls = ads[0].droid.telecomCallGetCallIds()
self.log.info("Calls in PhoneA{}".format(calls))
if num_active_calls(self.log, ads[0]) != 1:
self.log.error("Total number of call ids in {} is not 1.".format(
ads[0].serial))
if get_cep_conference_call_id(ads[0]) is not None:
self.log.error("CEP enabled.")
else:
self.log.error("Merge failed.")
return False
call_conf_id = None
for call_id in calls:
if call_id != call_ab_id and call_id != call_ac_id:
call_conf_id = call_id
if not call_conf_id:
self.log.error("Merge call fail, no new conference call id.")
return False
if not verify_incall_state(self.log, [ads[0], ads[1], ads[2]], True):
return False
# Check if Conf Call is currently active
if ads[0].droid.telecomCallGetCallState(
call_conf_id) != CALL_STATE_ACTIVE:
self.log.error(
"Call_id:{}, state:{}, expected: STATE_ACTIVE".format(
call_conf_id, ads[0].droid.telecomCallGetCallState(
call_conf_id)))
return False
self.log.info("Step5: End call on PhoneC and verify call continues.")
ads[2].droid.telecomEndCall()
time.sleep(WAIT_TIME_IN_CALL)
calls = ads[0].droid.telecomCallGetCallIds()
self.log.info("Calls in PhoneA{}".format(calls))
if not verify_incall_state(self.log, [ads[0], ads[1]], True):
return False
if not verify_incall_state(self.log, [ads[2]], False):
return False
# Because of b/18413009, VZW VoLTE conference host will not drop call
# even if all participants drop. The reason is VZW network is not
# providing such information to DUT.
# So this test probably will fail on the last step for VZW.
self.log.info("Step6: End call on PhoneB and verify PhoneA end.")
ads[1].droid.telecomEndCall()
time.sleep(WAIT_TIME_IN_CALL)
if not verify_incall_state(self.log, [ads[0], ads[1], ads[2]], False):
return False
return True
def _merge_cep_conference_call(self, call_ab_id, call_ac_id):
"""Merge CEP conference call.
PhoneA in IMS (VoLTE or WiFi Calling) call with PhoneB.
PhoneA in IMS (VoLTE or WiFi Calling) call with PhoneC.
Merge calls to conference on PhoneA (CEP enabled IMS conference).
Args:
call_ab_id: call id for call_AB on PhoneA.
call_ac_id: call id for call_AC on PhoneA.
Returns:
call_id for conference
"""
ads = self.android_devices
self.log.info("Step4: Merge to Conf Call and verify Conf Call.")
ads[0].droid.telecomCallJoinCallsInConf(call_ab_id, call_ac_id)
time.sleep(WAIT_TIME_IN_CALL)
calls = ads[0].droid.telecomCallGetCallIds()
self.log.info("Calls in PhoneA{}".format(calls))
call_conf_id = get_cep_conference_call_id(ads[0])
if call_conf_id is None:
self.log.error(
"No call with children. Probably CEP not enabled or merge failed.")
return None
calls.remove(call_conf_id)
if (set(ads[0].droid.telecomCallGetCallChildren(call_conf_id)) !=
set(calls)):
self.log.error(
"Children list<{}> for conference call is not correct.".format(
ads[0].droid.telecomCallGetCallChildren(call_conf_id)))
return None
if (CALL_PROPERTY_CONFERENCE not in
ads[0].droid.telecomCallGetProperties(call_conf_id)):
self.log.error("Conf call id properties wrong: {}".format(ads[
0].droid.telecomCallGetProperties(call_conf_id)))
return None
if (CALL_CAPABILITY_MANAGE_CONFERENCE not in
ads[0].droid.telecomCallGetCapabilities(call_conf_id)):
self.log.error("Conf call id capabilities wrong: {}".format(ads[
0].droid.telecomCallGetCapabilities(call_conf_id)))
return None
if (call_ab_id in calls) or (call_ac_id in calls):
self.log.error(
"Previous call ids should not in new call list after merge.")
return None
if not verify_incall_state(self.log, [ads[0], ads[1], ads[2]], True):
return None
# Check if Conf Call is currently active
if ads[0].droid.telecomCallGetCallState(
call_conf_id) != CALL_STATE_ACTIVE:
self.log.error(
"Call_id:{}, state:{}, expected: STATE_ACTIVE".format(
call_conf_id, ads[0].droid.telecomCallGetCallState(
call_conf_id)))
return None
return call_conf_id
def _test_ims_conference_merge_drop_second_call_from_participant_cep(
self, call_ab_id, call_ac_id):
"""Test conference merge and drop in IMS (VoLTE or WiFi Calling) call.
(CEP enabled).
PhoneA in IMS (VoLTE or WiFi Calling) call with PhoneB.
PhoneA | |
JSONSchemaValidatorD53842E83F0538CAb91E097Aa6800Ce_v3_0_0
from .validators.v3_0_0.jsd_f403dda9440503191536993f569cc6f \
import JSONSchemaValidatorF403Dda9440503191536993F569Cc6F \
as JSONSchemaValidatorF403Dda9440503191536993F569Cc6F_v3_0_0
from .validators.v3_0_0.jsd_e6734850fabb2097fa969948cb \
import JSONSchemaValidatorE6734850FaBb2097Fa969948Cb \
as JSONSchemaValidatorE6734850FaBb2097Fa969948Cb_v3_0_0
from .validators.v3_0_0.jsd_e84541805d1da1fa3d4d581102a9 \
import JSONSchemaValidatorE84541805D1DA1Fa3D4D581102A9 \
as JSONSchemaValidatorE84541805D1DA1Fa3D4D581102A9_v3_0_0
from .validators.v3_0_0.jsd_c137cad852579f4b810ff8adf661 \
import JSONSchemaValidatorC137Cad852579F4B810Ff8Adf661 \
as JSONSchemaValidatorC137Cad852579F4B810Ff8Adf661_v3_0_0
from .validators.v3_0_0.jsd_fd707ac0454be8fecc73a918a27b6 \
import JSONSchemaValidatorFd707Ac0454Be8FecC73A918A27B6 \
as JSONSchemaValidatorFd707Ac0454Be8FecC73A918A27B6_v3_0_0
from .validators.v3_0_0.jsd_fff985b5159a0aa52bfe9e62ba7 \
import JSONSchemaValidatorFff985B5159A0Aa52Bfe9E62Ba7 \
as JSONSchemaValidatorFff985B5159A0Aa52Bfe9E62Ba7_v3_0_0
from .validators.v3_0_0.jsd_d51ebdbbc75c0f8ed6161ae070a276 \
import JSONSchemaValidatorD51EbdBbc75C0F8Ed6161Ae070A276 \
as JSONSchemaValidatorD51EbdBbc75C0F8Ed6161Ae070A276_v3_0_0
from .validators.v3_0_0.jsd_a5b160a5675039b7ddf3dc960c7968 \
import JSONSchemaValidatorA5B160A5675039B7DdF3Dc960C7968 \
as JSONSchemaValidatorA5B160A5675039B7DdF3Dc960C7968_v3_0_0
from .validators.v3_0_0.jsd_a57687cef65891a6f48dd17f456c4e \
import JSONSchemaValidatorA57687Cef65891A6F48Dd17F456C4E \
as JSONSchemaValidatorA57687Cef65891A6F48Dd17F456C4E_v3_0_0
from .validators.v3_0_0.jsd_c785067a5a5e3283f96dd5006c7865 \
import JSONSchemaValidatorC785067A5A5E3283F96Dd5006C7865 \
as JSONSchemaValidatorC785067A5A5E3283F96Dd5006C7865_v3_0_0
from .validators.v3_0_0.jsd_af104d12b5c5e668af1504feca5c9b1 \
import JSONSchemaValidatorAf104D12B5C5E668Af1504Feca5C9B1 \
as JSONSchemaValidatorAf104D12B5C5E668Af1504Feca5C9B1_v3_0_0
from .validators.v3_0_0.jsd_b9eb9547216547cab8b9e686eee674b \
import JSONSchemaValidatorB9Eb9547216547CAb8B9E686Eee674B \
as JSONSchemaValidatorB9Eb9547216547CAb8B9E686Eee674B_v3_0_0
from .validators.v3_0_0.jsd_c6c2a4908ee5f48b7e9cae7572f6a94 \
import JSONSchemaValidatorC6C2A4908Ee5F48B7E9Cae7572F6A94 \
as JSONSchemaValidatorC6C2A4908Ee5F48B7E9Cae7572F6A94_v3_0_0
from .validators.v3_0_0.jsd_ea7e01261355dcfae6412e0615ba1f5 \
import JSONSchemaValidatorEa7E01261355DcfAe6412E0615Ba1F5 \
as JSONSchemaValidatorEa7E01261355DcfAe6412E0615Ba1F5_v3_0_0
from .validators.v3_0_0.jsd_f1a8ae602c95ac08676391c374274f2 \
import JSONSchemaValidatorF1A8Ae602C95Ac08676391C374274F2 \
as JSONSchemaValidatorF1A8Ae602C95Ac08676391C374274F2_v3_0_0
from .validators.v3_0_0.jsd_f9081a48e3c5f4fae5aa00f889216dd \
import JSONSchemaValidatorF9081A48E3C5F4FAe5AA00F889216Dd \
as JSONSchemaValidatorF9081A48E3C5F4FAe5AA00F889216Dd_v3_0_0
from .validators.v3_0_0.jsd_a71ccf29f05ee29af909b07bb9c754 \
import JSONSchemaValidatorA71Ccf29F05Ee29Af909B07Bb9C754 \
as JSONSchemaValidatorA71Ccf29F05Ee29Af909B07Bb9C754_v3_0_0
from .validators.v3_0_0.jsd_bc200af85d598885a990ff9bcbf8 \
import JSONSchemaValidatorBc200Af85D598885A990Ff9Bcbf8 \
as JSONSchemaValidatorBc200Af85D598885A990Ff9Bcbf8_v3_0_0
from .validators.v3_0_0.jsd_f845bd746a5c00967fe66178c5edbf \
import JSONSchemaValidatorF845Bd746A5C00967FE66178C5Edbf \
as JSONSchemaValidatorF845Bd746A5C00967FE66178C5Edbf_v3_0_0
from .validators.v3_0_0.jsd_e92c6e47625711b9ce06f92bd4d219 \
import JSONSchemaValidatorE92C6E47625711B9Ce06F92Bd4D219 \
as JSONSchemaValidatorE92C6E47625711B9Ce06F92Bd4D219_v3_0_0
from .validators.v3_0_0.jsd_bdae59219027b4d40b94fa3d \
import JSONSchemaValidatorBdae59219027B4D40B94Fa3D \
as JSONSchemaValidatorBdae59219027B4D40B94Fa3D_v3_0_0
from .validators.v3_0_0.jsd_a160f293375ae9924d8240c4efdc6a \
import JSONSchemaValidatorA160F293375Ae9924D8240C4Efdc6A \
as JSONSchemaValidatorA160F293375Ae9924D8240C4Efdc6A_v3_0_0
from .validators.v3_0_0.jsd_a250e5e46850384fa5cb10a5f \
import JSONSchemaValidatorA250E5E46850384Fa5Cb10A5F \
as JSONSchemaValidatorA250E5E46850384Fa5Cb10A5F_v3_0_0
from .validators.v3_0_0.jsd_a095b061f564ebba331f66505b0e3 \
import JSONSchemaValidatorA095B061F564EBba331F66505B0E3 \
as JSONSchemaValidatorA095B061F564EBba331F66505B0E3_v3_0_0
from .validators.v3_0_0.jsd_b22d6ad9f595ab7e3eee5cf44de8a \
import JSONSchemaValidatorB22D6Ad9F595AB7E3Eee5Cf44De8A \
as JSONSchemaValidatorB22D6Ad9F595AB7E3Eee5Cf44De8A_v3_0_0
from .validators.v3_0_0.jsd_a4cccea3c9567498f6f688e0cf86e7 \
import JSONSchemaValidatorA4CcceA3C9567498F6F688E0Cf86E7 \
as JSONSchemaValidatorA4CcceA3C9567498F6F688E0Cf86E7_v3_0_0
from .validators.v3_0_0.jsd_d87a24994c514d955149d33e1a99fb \
import JSONSchemaValidatorD87A24994C514D955149D33E1A99Fb \
as JSONSchemaValidatorD87A24994C514D955149D33E1A99Fb_v3_0_0
from .validators.v3_0_0.jsd_a207a157244508c99bf3e9abb26aab8 \
import JSONSchemaValidatorA207A157244508C99Bf3E9Abb26Aab8 \
as JSONSchemaValidatorA207A157244508C99Bf3E9Abb26Aab8_v3_0_0
from .validators.v3_0_0.jsd_afa6d7527045ebc928ee7e30ad3092a \
import JSONSchemaValidatorAfa6D7527045Ebc928EE7E30Ad3092A \
as JSONSchemaValidatorAfa6D7527045Ebc928EE7E30Ad3092A_v3_0_0
from .validators.v3_0_0.jsd_b641825a9555ecba105cabbdf50fc78 \
import JSONSchemaValidatorB641825A9555EcbA105Cabbdf50Fc78 \
as JSONSchemaValidatorB641825A9555EcbA105Cabbdf50Fc78_v3_0_0
from .validators.v3_0_0.jsd_d904c521059563490c4a93871b33d51 \
import JSONSchemaValidatorD904C521059563490C4A93871B33D51 \
as JSONSchemaValidatorD904C521059563490C4A93871B33D51_v3_0_0
from .validators.v3_0_0.jsd_dfe1db8729d541fb3a17d31d47d1881 \
import JSONSchemaValidatorDfe1Db8729D541FB3A17D31D47D1881 \
as JSONSchemaValidatorDfe1Db8729D541FB3A17D31D47D1881_v3_0_0
from .validators.v3_0_0.jsd_ed5bf99062d5dee87fe5cd96e360ec2 \
import JSONSchemaValidatorEd5Bf99062D5Dee87Fe5Cd96E360Ec2 \
as JSONSchemaValidatorEd5Bf99062D5Dee87Fe5Cd96E360Ec2_v3_0_0
from .validators.v3_0_0.jsd_a0824f9a589c58cd8df522524cb550ad \
import JSONSchemaValidatorA0824F9A589C58Cd8Df522524Cb550Ad \
as JSONSchemaValidatorA0824F9A589C58Cd8Df522524Cb550Ad_v3_0_0
from .validators.v3_0_0.jsd_a0fdb67d95475cd39382171dec96d6c1 \
import JSONSchemaValidatorA0Fdb67D95475Cd39382171Dec96D6C1 \
as JSONSchemaValidatorA0Fdb67D95475Cd39382171Dec96D6C1_v3_0_0
from .validators.v3_0_0.jsd_a14b837d975e5666860b664edde58837 \
import JSONSchemaValidatorA14B837D975E5666860B664Edde58837 \
as JSONSchemaValidatorA14B837D975E5666860B664Edde58837_v3_0_0
from .validators.v3_0_0.jsd_a1e3cde0c3f254b39caaaf7c907ae67e \
import JSONSchemaValidatorA1E3Cde0C3F254B39CaaAf7C907Ae67E \
as JSONSchemaValidatorA1E3Cde0C3F254B39CaaAf7C907Ae67E_v3_0_0
from .validators.v3_0_0.jsd_a22b2304dcc855abb2a298de6ecddb65 \
import JSONSchemaValidatorA22B2304Dcc855AbB2A298De6Ecddb65 \
as JSONSchemaValidatorA22B2304Dcc855AbB2A298De6Ecddb65_v3_0_0
from .validators.v3_0_0.jsd_a4ab683ce53052e089626a096abaf451 \
import JSONSchemaValidatorA4Ab683CE53052E089626A096Abaf451 \
as JSONSchemaValidatorA4Ab683CE53052E089626A096Abaf451_v3_0_0
from .validators.v3_0_0.jsd_a50d1bd34d5f593aadf8eb02083c67b0 \
import JSONSchemaValidatorA50D1Bd34D5F593AAdf8Eb02083C67B0 \
as JSONSchemaValidatorA50D1Bd34D5F593AAdf8Eb02083C67B0_v3_0_0
from .validators.v3_0_0.jsd_a60b29bfe2b055299e4360d84380ddd4 \
import JSONSchemaValidatorA60B29BfE2B055299E4360D84380Ddd4 \
as JSONSchemaValidatorA60B29BfE2B055299E4360D84380Ddd4_v3_0_0
from .validators.v3_0_0.jsd_a69c7f1ad54e5e9cae1f871e19eed61b \
import JSONSchemaValidatorA69C7F1AD54E5E9CAe1F871E19Eed61B \
as JSONSchemaValidatorA69C7F1AD54E5E9CAe1F871E19Eed61B_v3_0_0
from .validators.v3_0_0.jsd_a87d60d590485830aed781bfb15b5c95 \
import JSONSchemaValidatorA87D60D590485830Aed781Bfb15B5C95 \
as JSONSchemaValidatorA87D60D590485830Aed781Bfb15B5C95_v3_0_0
from .validators.v3_0_0.jsd_aa4daefaa3b95ecca521188a43eacbd9 \
import JSONSchemaValidatorAa4DaefaA3B95EccA521188A43Eacbd9 \
as JSONSchemaValidatorAa4DaefaA3B95EccA521188A43Eacbd9_v3_0_0
from .validators.v3_0_0.jsd_ab225d0b2a6c52a99df1f1d8fb6a4dac \
import JSONSchemaValidatorAb225D0B2A6C52A99Df1F1D8Fb6A4Dac \
as JSONSchemaValidatorAb225D0B2A6C52A99Df1F1D8Fb6A4Dac_v3_0_0
from .validators.v3_0_0.jsd_ab48268c76aa598788a5ebc370226f3a \
import JSONSchemaValidatorAb48268C76Aa598788A5Ebc370226F3A \
as JSONSchemaValidatorAb48268C76Aa598788A5Ebc370226F3A_v3_0_0
from .validators.v3_0_0.jsd_ab916b19789c59b79dddbc2d0a3c57fc \
import JSONSchemaValidatorAb916B19789C59B79DddBc2D0A3C57Fc \
as JSONSchemaValidatorAb916B19789C59B79DddBc2D0A3C57Fc_v3_0_0
from .validators.v3_0_0.jsd_ac171b8ccf79502fbc4b35909970a1cb \
import JSONSchemaValidatorAc171B8CCf79502FBc4B35909970A1Cb \
as JSONSchemaValidatorAc171B8CCf79502FBc4B35909970A1Cb_v3_0_0
from .validators.v3_0_0.jsd_acf0372068885036baee3c4524638f31 \
import JSONSchemaValidatorAcf0372068885036Baee3C4524638F31 \
as JSONSchemaValidatorAcf0372068885036Baee3C4524638F31_v3_0_0
from .validators.v3_0_0.jsd_adac9b81d9235be3b656acf9436583dd \
import JSONSchemaValidatorAdac9B81D9235Be3B656Acf9436583Dd \
as JSONSchemaValidatorAdac9B81D9235Be3B656Acf9436583Dd_v3_0_0
from .validators.v3_0_0.jsd_ae8d7c8f33bb52ceb04880845f2f45ba \
import JSONSchemaValidatorAe8D7C8F33Bb52CeB04880845F2F45Ba \
as JSONSchemaValidatorAe8D7C8F33Bb52CeB04880845F2F45Ba_v3_0_0
from .validators.v3_0_0.jsd_af14464cc6a05f6f87bbe7c174b6d5f6 \
import JSONSchemaValidatorAf14464CC6A05F6F87BbE7C174B6D5F6 \
as JSONSchemaValidatorAf14464CC6A05F6F87BbE7C174B6D5F6_v3_0_0
from .validators.v3_0_0.jsd_afe1108b1a59539ebe3de3e5652c9653 \
import JSONSchemaValidatorAfe1108B1A59539EBe3DE3E5652C9653 \
as JSONSchemaValidatorAfe1108B1A59539EBe3DE3E5652C9653_v3_0_0
from .validators.v3_0_0.jsd_b09ea91f72885e05b6aa73e89546f969 \
import JSONSchemaValidatorB09Ea91F72885E05B6Aa73E89546F969 \
as JSONSchemaValidatorB09Ea91F72885E05B6Aa73E89546F969_v3_0_0
from .validators.v3_0_0.jsd_b227e1b5bbac556a9f577d3a3ea407af \
import JSONSchemaValidatorB227E1B5Bbac556A9F577D3A3Ea407Af \
as JSONSchemaValidatorB227E1B5Bbac556A9F577D3A3Ea407Af_v3_0_0
from .validators.v3_0_0.jsd_b3284240745e5b929c51495fe80bc1c4 \
import JSONSchemaValidatorB3284240745E5B929C51495Fe80Bc1C4 \
as JSONSchemaValidatorB3284240745E5B929C51495Fe80Bc1C4_v3_0_0
from .validators.v3_0_0.jsd_b3c356cfc48a5da4b13b8ecbae5748b7 \
import JSONSchemaValidatorB3C356CfC48A5Da4B13B8Ecbae5748B7 \
as JSONSchemaValidatorB3C356CfC48A5Da4B13B8Ecbae5748B7_v3_0_0
from .validators.v3_0_0.jsd_b3d905ee2883501281de916733b4025c \
import JSONSchemaValidatorB3D905Ee2883501281De916733B4025C \
as JSONSchemaValidatorB3D905Ee2883501281De916733B4025C_v3_0_0
from .validators.v3_0_0.jsd_b4ceac9ee830523ca5ddbfdf3e1b44be \
import JSONSchemaValidatorB4Ceac9EE830523CA5DdBfdf3E1B44Be \
as JSONSchemaValidatorB4Ceac9EE830523CA5DdBfdf3E1B44Be_v3_0_0
from .validators.v3_0_0.jsd_b5c6ed4306f059cc963895a04f219d5d \
import JSONSchemaValidatorB5C6Ed4306F059Cc963895A04F219D5D \
as JSONSchemaValidatorB5C6Ed4306F059Cc963895A04F219D5D_v3_0_0
from .validators.v3_0_0.jsd_b8104a50fc565ae9a756d6d0152e0e5b \
import JSONSchemaValidatorB8104A50Fc565Ae9A756D6D0152E0E5B \
as JSONSchemaValidatorB8104A50Fc565Ae9A756D6D0152E0E5B_v3_0_0
from .validators.v3_0_0.jsd_b8319a8b5d195348a8763acd95ca2967 \
import JSONSchemaValidatorB8319A8B5D195348A8763Acd95Ca2967 \
as JSONSchemaValidatorB8319A8B5D195348A8763Acd95Ca2967_v3_0_0
from .validators.v3_0_0.jsd_b839d4dee9b958e48ccef056603e253f \
import JSONSchemaValidatorB839D4DeE9B958E48CceF056603E253F \
as JSONSchemaValidatorB839D4DeE9B958E48CceF056603E253F_v3_0_0
from .validators.v3_0_0.jsd_b95cf8c9aed95518b38be1fa4b514b67 \
import JSONSchemaValidatorB95Cf8C9Aed95518B38BE1Fa4B514B67 \
as JSONSchemaValidatorB95Cf8C9Aed95518B38BE1Fa4B514B67_v3_0_0
from .validators.v3_0_0.jsd_bacf1abfc35e509183c9a7f055cbbfec \
import JSONSchemaValidatorBacf1AbfC35E509183C9A7F055Cbbfec \
as JSONSchemaValidatorBacf1AbfC35E509183C9A7F055Cbbfec_v3_0_0
from .validators.v3_0_0.jsd_bb165bd00a6653ac9da440f23ee62ecc \
import JSONSchemaValidatorBb165Bd00A6653Ac9Da440F23Ee62Ecc \
as JSONSchemaValidatorBb165Bd00A6653Ac9Da440F23Ee62Ecc_v3_0_0
from .validators.v3_0_0.jsd_bba3187f0be4563aa8b6ff5931a123e7 \
import JSONSchemaValidatorBba3187F0Be4563AA8B6Ff5931A123E7 \
as JSONSchemaValidatorBba3187F0Be4563AA8B6Ff5931A123E7_v3_0_0
from .validators.v3_0_0.jsd_bcb7ec29968e5d5899df4a90d94ed659 \
import JSONSchemaValidatorBcb7Ec29968E5D5899Df4A90D94Ed659 \
as JSONSchemaValidatorBcb7Ec29968E5D5899Df4A90D94Ed659_v3_0_0
from .validators.v3_0_0.jsd_bcee1c9523a45056ab79dc64bdf827fe \
import JSONSchemaValidatorBcee1C9523A45056Ab79Dc64Bdf827Fe \
as JSONSchemaValidatorBcee1C9523A45056Ab79Dc64Bdf827Fe_v3_0_0
from .validators.v3_0_0.jsd_bdea52558473565c9963ec14c65727b8 \
import JSONSchemaValidatorBdea52558473565C9963Ec14C65727B8 \
as JSONSchemaValidatorBdea52558473565C9963Ec14C65727B8_v3_0_0
from .validators.v3_0_0.jsd_beebf3641335579e99c08f038303601e \
import JSONSchemaValidatorBeebf3641335579E99C08F038303601E \
as JSONSchemaValidatorBeebf3641335579E99C08F038303601E_v3_0_0
from .validators.v3_0_0.jsd_bf792ec664fa5202beb776556908b0c1 \
import JSONSchemaValidatorBf792Ec664Fa5202Beb776556908B0C1 \
as JSONSchemaValidatorBf792Ec664Fa5202Beb776556908B0C1_v3_0_0
from .validators.v3_0_0.jsd_bf95f099207a5b6599e04c47c22789c0 \
import JSONSchemaValidatorBf95F099207A5B6599E04C47C22789C0 \
as JSONSchemaValidatorBf95F099207A5B6599E04C47C22789C0_v3_0_0
from .validators.v3_0_0.jsd_c0984cde5e925c209ab87472ab905476 \
import JSONSchemaValidatorC0984Cde5E925C209Ab87472Ab905476 \
as JSONSchemaValidatorC0984Cde5E925C209Ab87472Ab905476_v3_0_0
from .validators.v3_0_0.jsd_c1052ac49dd35088a9874a4350182015 \
import JSONSchemaValidatorC1052Ac49Dd35088A9874A4350182015 \
as JSONSchemaValidatorC1052Ac49Dd35088A9874A4350182015_v3_0_0
from .validators.v3_0_0.jsd_c14128e5729b55e9b1feb638a8295e10 \
import JSONSchemaValidatorC14128E5729B55E9B1FeB638A8295E10 \
as JSONSchemaValidatorC14128E5729B55E9B1FeB638A8295E10_v3_0_0
from .validators.v3_0_0.jsd_c37778a2faa5552894cc60cec13c56c7 \
import JSONSchemaValidatorC37778A2Faa5552894Cc60Cec13C56C7 \
as JSONSchemaValidatorC37778A2Faa5552894Cc60Cec13C56C7_v3_0_0
from .validators.v3_0_0.jsd_c578ef80918b5d038024d126cd6e3b8d \
import JSONSchemaValidatorC578Ef80918B5D038024D126Cd6E3B8D \
as JSONSchemaValidatorC578Ef80918B5D038024D126Cd6E3B8D_v3_0_0
from .validators.v3_0_0.jsd_c5e52706e7095a81b8d32f3024e01cf6 \
import JSONSchemaValidatorC5E52706E7095A81B8D32F3024E01Cf6 \
as JSONSchemaValidatorC5E52706E7095A81B8D32F3024E01Cf6_v3_0_0
from .validators.v3_0_0.jsd_c654a18faf1b5571ac5ba61145d298c4 \
import JSONSchemaValidatorC654A18FAf1B5571Ac5BA61145D298C4 \
as JSONSchemaValidatorC654A18FAf1B5571Ac5BA61145D298C4_v3_0_0
from .validators.v3_0_0.jsd_c6c330dace185a548f70f4e5d67776ea \
import JSONSchemaValidatorC6C330DaCe185A548F70F4E5D67776Ea \
as JSONSchemaValidatorC6C330DaCe185A548F70F4E5D67776Ea_v3_0_0
from .validators.v3_0_0.jsd_c77600d349fc5c259dbd22d65b3ffa1d \
import JSONSchemaValidatorC77600D349Fc5C259Dbd22D65B3Ffa1D \
as JSONSchemaValidatorC77600D349Fc5C259Dbd22D65B3Ffa1D_v3_0_0
from .validators.v3_0_0.jsd_c7aa2a6cac155a6cb7ace3fd76a81e0f \
import JSONSchemaValidatorC7Aa2A6CAc155A6CB7AcE3Fd76A81E0F \
as JSONSchemaValidatorC7Aa2A6CAc155A6CB7AcE3Fd76A81E0F_v3_0_0
from .validators.v3_0_0.jsd_c87977b21b8f5d64852a8b6a5527928d \
import JSONSchemaValidatorC87977B21B8F5D64852A8B6A5527928D \
as JSONSchemaValidatorC87977B21B8F5D64852A8B6A5527928D_v3_0_0
from .validators.v3_0_0.jsd_c8cd2f618b655d988ce626e579486596 \
import JSONSchemaValidatorC8Cd2F618B655D988Ce626E579486596 \
as JSONSchemaValidatorC8Cd2F618B655D988Ce626E579486596_v3_0_0
from .validators.v3_0_0.jsd_c8dbec9679d453f78cb47d894c507a7b \
import JSONSchemaValidatorC8Dbec9679D453F78Cb47D894C507A7B \
as JSONSchemaValidatorC8Dbec9679D453F78Cb47D894C507A7B_v3_0_0
from .validators.v3_0_0.jsd_c941303330bc5615b3eb8d4d2702b874 \
import JSONSchemaValidatorC941303330Bc5615B3Eb8D4D2702B874 \
as JSONSchemaValidatorC941303330Bc5615B3Eb8D4D2702B874_v3_0_0
from .validators.v3_0_0.jsd_c97e7851003e5a63a2a8005ac8807dc7 \
import JSONSchemaValidatorC97E7851003E5A63A2A8005Ac8807Dc7 \
as JSONSchemaValidatorC97E7851003E5A63A2A8005Ac8807Dc7_v3_0_0
from .validators.v3_0_0.jsd_c988bb742d055294b74b4d6916ca1ada \
import JSONSchemaValidatorC988Bb742D055294B74B4D6916Ca1Ada \
as JSONSchemaValidatorC988Bb742D055294B74B4D6916Ca1Ada_v3_0_0
from .validators.v3_0_0.jsd_c9a67d3e9015580f93a52627f19e9916 \
import JSONSchemaValidatorC9A67D3E9015580F93A52627F19E9916 \
as JSONSchemaValidatorC9A67D3E9015580F93A52627F19E9916_v3_0_0
from .validators.v3_0_0.jsd_c9dea644f40453fead2b003b06c4c52b \
import JSONSchemaValidatorC9Dea644F40453FeAd2B003B06C4C52B \
as JSONSchemaValidatorC9Dea644F40453FeAd2B003B06C4C52B_v3_0_0
from .validators.v3_0_0.jsd_ca28129793d1569bb50de9f43b0d0ee8 \
import JSONSchemaValidatorCa28129793D1569BB50DE9F43B0D0Ee8 \
as JSONSchemaValidatorCa28129793D1569BB50DE9F43B0D0Ee8_v3_0_0
from .validators.v3_0_0.jsd_ca3df31c13b857e6b5dbc8357a8ab010 \
import JSONSchemaValidatorCa3Df31C13B857E6B5DbC8357A8Ab010 \
as JSONSchemaValidatorCa3Df31C13B857E6B5DbC8357A8Ab010_v3_0_0
from .validators.v3_0_0.jsd_cc909c2717cf55f1863a04a785166fe0 \
import JSONSchemaValidatorCc909C2717Cf55F1863A04A785166Fe0 \
as JSONSchemaValidatorCc909C2717Cf55F1863A04A785166Fe0_v3_0_0
from .validators.v3_0_0.jsd_cd429bb8ff3556a796570480f742028b \
import JSONSchemaValidatorCd429Bb8Ff3556A796570480F742028B \
as JSONSchemaValidatorCd429Bb8Ff3556A796570480F742028B_v3_0_0
from .validators.v3_0_0.jsd_cd59f40aa9305587b69944a9c819f7a9 \
import JSONSchemaValidatorCd59F40AA9305587B69944A9C819F7A9 \
as JSONSchemaValidatorCd59F40AA9305587B69944A9C819F7A9_v3_0_0
from .validators.v3_0_0.jsd_cd6793a4a8e7576c8b290bdc88001f6f \
import JSONSchemaValidatorCd6793A4A8E7576C8B290Bdc88001F6F \
as JSONSchemaValidatorCd6793A4A8E7576C8B290Bdc88001F6F_v3_0_0
from .validators.v3_0_0.jsd_cec7dc317e875ff0a315a7c0556f9c51 \
import JSONSchemaValidatorCec7Dc317E875Ff0A315A7C0556F9C51 \
as JSONSchemaValidatorCec7Dc317E875Ff0A315A7C0556F9C51_v3_0_0
from .validators.v3_0_0.jsd_d0e432f52e2a5863858c7dc0c3eda277 \
import JSONSchemaValidatorD0E432F52E2A5863858C7Dc0C3Eda277 \
as JSONSchemaValidatorD0E432F52E2A5863858C7Dc0C3Eda277_v3_0_0
from .validators.v3_0_0.jsd_d24a3f485ff758d099b1e4713f18f1c1 \
import JSONSchemaValidatorD24A3F485Ff758D099B1E4713F18F1C1 \
as JSONSchemaValidatorD24A3F485Ff758D099B1E4713F18F1C1_v3_0_0
from .validators.v3_0_0.jsd_d388e26255a15233ac682c0406880cfb \
import JSONSchemaValidatorD388E26255A15233Ac682C0406880Cfb \
as JSONSchemaValidatorD388E26255A15233Ac682C0406880Cfb_v3_0_0
from .validators.v3_0_0.jsd_d43fec9e7dc556cbb9bf0ebd1dcd6aad \
import JSONSchemaValidatorD43Fec9E7Dc556CbB9Bf0Ebd1Dcd6Aad \
as JSONSchemaValidatorD43Fec9E7Dc556CbB9Bf0Ebd1Dcd6Aad_v3_0_0
from .validators.v3_0_0.jsd_d5572c56526151cb8ea42de44b2db52c \
import JSONSchemaValidatorD5572C56526151Cb8Ea42De44B2Db52C \
as JSONSchemaValidatorD5572C56526151Cb8Ea42De44B2Db52C_v3_0_0
from .validators.v3_0_0.jsd_d810359e31e453ac8145981b7d5bb7e4 \
import JSONSchemaValidatorD810359E31E453Ac8145981B7D5Bb7E4 \
as JSONSchemaValidatorD810359E31E453Ac8145981B7D5Bb7E4_v3_0_0
from .validators.v3_0_0.jsd_d82fe0f9e4635b74af809beaaf98bd07 \
import JSONSchemaValidatorD82Fe0F9E4635B74Af809Beaaf98Bd07 \
as JSONSchemaValidatorD82Fe0F9E4635B74Af809Beaaf98Bd07_v3_0_0
from .validators.v3_0_0.jsd_d8e470a4ef6a58b3b21f9adbbdcc7a46 \
import JSONSchemaValidatorD8E470A4Ef6A58B3B21F9Adbbdcc7A46 \
as JSONSchemaValidatorD8E470A4Ef6A58B3B21F9Adbbdcc7A46_v3_0_0
from .validators.v3_0_0.jsd_d912b1c21e2b5dca8b56332d3a8ad13d \
import JSONSchemaValidatorD912B1C21E2B5Dca8B56332D3A8Ad13D \
as JSONSchemaValidatorD912B1C21E2B5Dca8B56332D3A8Ad13D_v3_0_0
from .validators.v3_0_0.jsd_d9ddc2557a495493bca08b8b973601aa \
import JSONSchemaValidatorD9Ddc2557A495493Bca08B8B973601Aa \
as JSONSchemaValidatorD9Ddc2557A495493Bca08B8B973601Aa_v3_0_0
from .validators.v3_0_0.jsd_db686413cf4558188ea60ccc05c3e920 \
import JSONSchemaValidatorDb686413Cf4558188Ea60Ccc05C3E920 \
as JSONSchemaValidatorDb686413Cf4558188Ea60Ccc05C3E920_v3_0_0
from .validators.v3_0_0.jsd_dd6c2553ae0053c1bbbdbd46c1df0ef9 \
import JSONSchemaValidatorDd6C2553Ae0053C1BbbdBd46C1Df0Ef9 \
as JSONSchemaValidatorDd6C2553Ae0053C1BbbdBd46C1Df0Ef9_v3_0_0
from .validators.v3_0_0.jsd_ded7f8573c255c318bb1f04bfdbf01e1 \
import JSONSchemaValidatorDed7F8573C255C318Bb1F04Bfdbf01E1 \
as JSONSchemaValidatorDed7F8573C255C318Bb1F04Bfdbf01E1_v3_0_0
from .validators.v3_0_0.jsd_dfa8f48210e85715beebb44e62fac408 \
import JSONSchemaValidatorDfa8F48210E85715BeebB44E62Fac408 \
as JSONSchemaValidatorDfa8F48210E85715BeebB44E62Fac408_v3_0_0
from .validators.v3_0_0.jsd_dfae2409eecc551298e9fa31d14f43d0 \
import JSONSchemaValidatorDfae2409Eecc551298E9Fa31D14F43D0 \
as JSONSchemaValidatorDfae2409Eecc551298E9Fa31D14F43D0_v3_0_0
from .validators.v3_0_0.jsd_e1d938f110e059a5abcb9cc8fb3cbd7c \
import JSONSchemaValidatorE1D938F110E059A5Abcb9Cc8Fb3Cbd7C \
as JSONSchemaValidatorE1D938F110E059A5Abcb9Cc8Fb3Cbd7C_v3_0_0
from .validators.v3_0_0.jsd_e2a697abfe2058d3adc7ad9922f5a5d6 \
import JSONSchemaValidatorE2A697AbFe2058D3Adc7Ad9922F5A5D6 \
as JSONSchemaValidatorE2A697AbFe2058D3Adc7Ad9922F5A5D6_v3_0_0
from .validators.v3_0_0.jsd_e2c930d3d75859b8b7d30e79f3eab084 \
import JSONSchemaValidatorE2C930D3D75859B8B7D30E79F3Eab084 \
as JSONSchemaValidatorE2C930D3D75859B8B7D30E79F3Eab084_v3_0_0
from .validators.v3_0_0.jsd_e39868ea7aec5efcaaf55009699eda5d \
import JSONSchemaValidatorE39868Ea7Aec5EfcAaf55009699Eda5D \
as JSONSchemaValidatorE39868Ea7Aec5EfcAaf55009699Eda5D_v3_0_0
from .validators.v3_0_0.jsd_e405a20316825460a1f37a2f161e7ac5 \
import JSONSchemaValidatorE405A20316825460A1F37A2F161E7Ac5 \
as JSONSchemaValidatorE405A20316825460A1F37A2F161E7Ac5_v3_0_0
from .validators.v3_0_0.jsd_e51b6e745cdb5bdda4de26a27b8d92bb \
import JSONSchemaValidatorE51B6E745Cdb5BddA4De26A27B8D92Bb \
as JSONSchemaValidatorE51B6E745Cdb5BddA4De26A27B8D92Bb_v3_0_0
from .validators.v3_0_0.jsd_e56b94dafa5652228fd71abd2b4d6df3 \
import JSONSchemaValidatorE56B94DaFa5652228Fd71Abd2B4D6Df3 \
as JSONSchemaValidatorE56B94DaFa5652228Fd71Abd2B4D6Df3_v3_0_0
from .validators.v3_0_0.jsd_e56bea5248a25f799b02fcb6098a7b10 \
import JSONSchemaValidatorE56Bea5248A25F799B02Fcb6098A7B10 \
as JSONSchemaValidatorE56Bea5248A25F799B02Fcb6098A7B10_v3_0_0
from .validators.v3_0_0.jsd_e56dd3caaf62589f9e827d03e8427467 \
import JSONSchemaValidatorE56Dd3CaAf62589F9E827D03E8427467 \
as JSONSchemaValidatorE56Dd3CaAf62589F9E827D03E8427467_v3_0_0
from .validators.v3_0_0.jsd_e623dba049b5569c83e13ccf4360e369 \
import JSONSchemaValidatorE623Dba049B5569C83E13Ccf4360E369 \
as JSONSchemaValidatorE623Dba049B5569C83E13Ccf4360E369_v3_0_0
from .validators.v3_0_0.jsd_e75d766151e85011870229f30e4f5ec3 \
import JSONSchemaValidatorE75D766151E85011870229F30E4F5Ec3 \
as JSONSchemaValidatorE75D766151E85011870229F30E4F5Ec3_v3_0_0
from .validators.v3_0_0.jsd_e7bd468ee94f53869e52e84454efd0e6 \
import JSONSchemaValidatorE7Bd468EE94F53869E52E84454Efd0E6 \
as JSONSchemaValidatorE7Bd468EE94F53869E52E84454Efd0E6_v3_0_0
from .validators.v3_0_0.jsd_e82e46732de25832a543c4640312588c \
import JSONSchemaValidatorE82E46732De25832A543C4640312588C \
as JSONSchemaValidatorE82E46732De25832A543C4640312588C_v3_0_0
from .validators.v3_0_0.jsd_e9ce4a1e1cf955f098343646760e9d58 \
import JSONSchemaValidatorE9Ce4A1E1Cf955F098343646760E9D58 \
as JSONSchemaValidatorE9Ce4A1E1Cf955F098343646760E9D58_v3_0_0
from .validators.v3_0_0.jsd_e9e38cdf5bcb5c018b7f10f1d0864215 \
import JSONSchemaValidatorE9E38Cdf5Bcb5C018B7F10F1D0864215 \
as JSONSchemaValidatorE9E38Cdf5Bcb5C018B7F10F1D0864215_v3_0_0
from .validators.v3_0_0.jsd_ea5b356b4bc053068a0052b6c807d286 \
import JSONSchemaValidatorEa5B356B4Bc053068A0052B6C807D286 \
as JSONSchemaValidatorEa5B356B4Bc053068A0052B6C807D286_v3_0_0
from .validators.v3_0_0.jsd_ea5e5a095d05598db7b99ddfd1d7f7fa \
import JSONSchemaValidatorEa5E5A095D05598DB7B99Ddfd1D7F7Fa \
as JSONSchemaValidatorEa5E5A095D05598DB7B99Ddfd1D7F7Fa_v3_0_0
from .validators.v3_0_0.jsd_ea658190e73c5ce1b27e7def4aea28e3 \
import JSONSchemaValidatorEa658190E73C5Ce1B27E7Def4Aea28E3 \
as JSONSchemaValidatorEa658190E73C5Ce1B27E7Def4Aea28E3_v3_0_0
from .validators.v3_0_0.jsd_eaa0d7c339d152b688876c2e10f51fe7 \
import JSONSchemaValidatorEaa0D7C339D152B688876C2E10F51Fe7 \
as JSONSchemaValidatorEaa0D7C339D152B688876C2E10F51Fe7_v3_0_0
from .validators.v3_0_0.jsd_eb8e0ce63376573995a49178435f7747 \
import JSONSchemaValidatorEb8E0Ce63376573995A49178435F7747 \
as JSONSchemaValidatorEb8E0Ce63376573995A49178435F7747_v3_0_0
from .validators.v3_0_0.jsd_ec26ec11d92356a594a6efa55ccb9be7 \
import JSONSchemaValidatorEc26Ec11D92356A594A6Efa55Ccb9Be7 \
as JSONSchemaValidatorEc26Ec11D92356A594A6Efa55Ccb9Be7_v3_0_0
from .validators.v3_0_0.jsd_ecff2eb67fe5591f8d9026f928a0d8aa \
import JSONSchemaValidatorEcff2Eb67Fe5591F8D9026F928A0D8Aa \
as JSONSchemaValidatorEcff2Eb67Fe5591F8D9026F928A0D8Aa_v3_0_0
from .validators.v3_0_0.jsd_ed1ef503c091506aa8e446182e625365 \
import JSONSchemaValidatorEd1Ef503C091506AA8E446182E625365 \
as JSONSchemaValidatorEd1Ef503C091506AA8E446182E625365_v3_0_0
from .validators.v3_0_0.jsd_edea91f35e90539f87a80eb107e02fff \
import JSONSchemaValidatorEdea91F35E90539F87A80Eb107E02Fff \
as JSONSchemaValidatorEdea91F35E90539F87A80Eb107E02Fff_v3_0_0
from .validators.v3_0_0.jsd_effdf30a3e3a5781ba1f5cf833395359 \
import JSONSchemaValidatorEffdf30A3E3A5781Ba1F5Cf833395359 \
as JSONSchemaValidatorEffdf30A3E3A5781Ba1F5Cf833395359_v3_0_0
from .validators.v3_0_0.jsd_f1196f1f6fde5978b0522f096926d443 \
import JSONSchemaValidatorF1196F1F6Fde5978B0522F096926D443 \
as JSONSchemaValidatorF1196F1F6Fde5978B0522F096926D443_v3_0_0
from .validators.v3_0_0.jsd_f16d14057660520dba53cc0df60db4a8 \
import JSONSchemaValidatorF16D14057660520DBa53Cc0Df60Db4A8 \
as JSONSchemaValidatorF16D14057660520DBa53Cc0Df60Db4A8_v3_0_0
from .validators.v3_0_0.jsd_f1b8eaf23e795f1a8525eb5905187aa9 \
import JSONSchemaValidatorF1B8Eaf23E795F1A8525Eb5905187Aa9 \
as JSONSchemaValidatorF1B8Eaf23E795F1A8525Eb5905187Aa9_v3_0_0
from .validators.v3_0_0.jsd_f1ff2b82953f5131884f0779db37190c \
import JSONSchemaValidatorF1Ff2B82953F5131884F0779Db37190C \
as JSONSchemaValidatorF1Ff2B82953F5131884F0779Db37190C_v3_0_0
from .validators.v3_0_0.jsd_f2b0a67d389a592dba005895594b77cc \
import JSONSchemaValidatorF2B0A67D389A592DBa005895594B77Cc \
as JSONSchemaValidatorF2B0A67D389A592DBa005895594B77Cc_v3_0_0
from .validators.v3_0_0.jsd_f3b45b8e4089574c9912407f88b1a5d2 \
import JSONSchemaValidatorF3B45B8E4089574C9912407F88B1A5D2 \
as JSONSchemaValidatorF3B45B8E4089574C9912407F88B1A5D2_v3_0_0
from .validators.v3_0_0.jsd_f41d844dbee15f7680920652004f69b6 \
import JSONSchemaValidatorF41D844DBee15F7680920652004F69B6 \
as JSONSchemaValidatorF41D844DBee15F7680920652004F69B6_v3_0_0
from .validators.v3_0_0.jsd_f41f77362663580d8cc3e6e88623889d \
import JSONSchemaValidatorF41F77362663580D8Cc3E6E88623889D \
as JSONSchemaValidatorF41F77362663580D8Cc3E6E88623889D_v3_0_0
from .validators.v3_0_0.jsd_f4dbfb874b3b56d7a651d6732f1bd55e \
import JSONSchemaValidatorF4Dbfb874B3B56D7A651D6732F1Bd55E \
as JSONSchemaValidatorF4Dbfb874B3B56D7A651D6732F1Bd55E_v3_0_0
from .validators.v3_0_0.jsd_f68aee0cdb425390b3ca90b0b46e6e2c \
import JSONSchemaValidatorF68Aee0CDb425390B3Ca90B0B46E6E2C \
as JSONSchemaValidatorF68Aee0CDb425390B3Ca90B0B46E6E2C_v3_0_0
from .validators.v3_0_0.jsd_f79ab23563d857e58e01a74e37333572 \
import JSONSchemaValidatorF79Ab23563D857E58E01A74E37333572 \
as JSONSchemaValidatorF79Ab23563D857E58E01A74E37333572_v3_0_0
from .validators.v3_0_0.jsd_f831d9ed2beb5c2b967aa10db8c22046 \
import JSONSchemaValidatorF831D9Ed2Beb5C2B967AA10Db8C22046 \
as JSONSchemaValidatorF831D9Ed2Beb5C2B967AA10Db8C22046_v3_0_0
from .validators.v3_0_0.jsd_f8a2f0834e625822bed1cb4cf34fde5e \
import JSONSchemaValidatorF8A2F0834E625822Bed1Cb4Cf34Fde5E \
as JSONSchemaValidatorF8A2F0834E625822Bed1Cb4Cf34Fde5E_v3_0_0
from .validators.v3_0_0.jsd_f9159c9f9a1951568daee7080e1dda47 \
import JSONSchemaValidatorF9159C9F9A1951568DaeE7080E1Dda47 \
as JSONSchemaValidatorF9159C9F9A1951568DaeE7080E1Dda47_v3_0_0
from .validators.v3_0_0.jsd_f92e61297eb05379bd9b92bc60735912 \
import JSONSchemaValidatorF92E61297Eb05379Bd9B92Bc60735912 \
as JSONSchemaValidatorF92E61297Eb05379Bd9B92Bc60735912_v3_0_0
from .validators.v3_0_0.jsd_f9c9a5e917af53dbbb91733e82e72ebe \
import JSONSchemaValidatorF9C9A5E917Af53DbBb91733E82E72Ebe \
as JSONSchemaValidatorF9C9A5E917Af53DbBb91733E82E72Ebe_v3_0_0
from .validators.v3_0_0.jsd_fa838e78175e51b4bcfb0821c19b81b7 \
import JSONSchemaValidatorFa838E78175E51B4Bcfb0821C19B81B7 \
as JSONSchemaValidatorFa838E78175E51B4Bcfb0821C19B81B7_v3_0_0
from .validators.v3_0_0.jsd_fc354ec4d361514a8e949f628f8e5f89 \
import JSONSchemaValidatorFc354Ec4D361514A8E949F628F8E5F89 \
as JSONSchemaValidatorFc354Ec4D361514A8E949F628F8E5F89_v3_0_0
from .validators.v3_0_0.jsd_fc9a4ee495785518bd2251b6b4fb41f4 \
import JSONSchemaValidatorFc9A4Ee495785518Bd2251B6B4Fb41F4 \
as JSONSchemaValidatorFc9A4Ee495785518Bd2251B6B4Fb41F4_v3_0_0
from .validators.v3_0_0.jsd_fc9ecf1e469154ae845236dbed070904 \
import JSONSchemaValidatorFc9Ecf1E469154Ae845236Dbed070904 \
as JSONSchemaValidatorFc9Ecf1E469154Ae845236Dbed070904_v3_0_0
from .validators.v3_0_0.jsd_fcf7754d5b45523a8227d37c476a1880 \
import JSONSchemaValidatorFcf7754D5B45523A8227D37C476A1880 \
as JSONSchemaValidatorFcf7754D5B45523A8227D37C476A1880_v3_0_0
from .validators.v3_0_0.jsd_fd4b5a56f8bd5f8f919e9fffc172e72f \
import JSONSchemaValidatorFd4B5A56F8Bd5F8F919E9Fffc172E72F \
as JSONSchemaValidatorFd4B5A56F8Bd5F8F919E9Fffc172E72F_v3_0_0
from .validators.v3_0_0.jsd_fe54c96ccba65af1abe3cd08f4fc69cb \
import JSONSchemaValidatorFe54C96CCba65Af1Abe3Cd08F4Fc69Cb \
as JSONSchemaValidatorFe54C96CCba65Af1Abe3Cd08F4Fc69Cb_v3_0_0
from .validators.v3_0_0.jsd_feb30ca768795eed82c118d009d7bcd4 \
import JSONSchemaValidatorFeb30Ca768795Eed82C118D009D7Bcd4 \
as JSONSchemaValidatorFeb30Ca768795Eed82C118D009D7Bcd4_v3_0_0
from .validators.v3_0_0.jsd_ff0055f9ef115a42bea6ffdd8e57d41b \
import JSONSchemaValidatorFf0055F9Ef115A42Bea6Ffdd8E57D41B \
as JSONSchemaValidatorFf0055F9Ef115A42Bea6Ffdd8E57D41B_v3_0_0
from .validators.v3_0_0.jsd_ffff1c792bf559ebb39b789421be6966 \
import JSONSchemaValidatorFfff1C792Bf559EbB39B789421Be6966 \
as JSONSchemaValidatorFfff1C792Bf559EbB39B789421Be6966_v3_0_0
class JSONSchemaValidator(object):
"""Validates a Identity Services Engine JSON request."""
def __init__(self):
super(JSONSchemaValidator, self).__init__()
self._validator = fastjsonschema.compile({})
def validate(self, request):
try:
self._validator(request)
except fastjsonschema.exceptions.JsonSchemaException as e:
raise MalformedRequest('{} is invalid. Reason: {}'.format(
request, e.message
))
class SchemaValidator:
def __init__(self, version):
self.json_schema_validators = {}
self.load_validators(version)
def load_validators(self, version):
if version == '3.0.0':
self.json_schema_validators['jsd_f2fcf04554db9ea4cdc3a7024322_v3_0_0'] =\
JSONSchemaValidatorF2FcF04554Db9Ea4Cdc3A7024322_v3_0_0()
self.json_schema_validators['jsd_ac8c8cb9b5007a1e1a6434a20a881_v3_0_0'] =\
JSONSchemaValidatorAc8C8Cb9B5007A1E1A6434A20A881_v3_0_0()
self.json_schema_validators['jsd_d6b1385f4cb9381c13a1fa4356_v3_0_0'] =\
JSONSchemaValidatorD6B1385F4CB9381C13A1Fa4356_v3_0_0()
self.json_schema_validators['jsd_daa171ab765a02a714c48376b3790d_v3_0_0'] =\
JSONSchemaValidatorDaa171Ab765A02A714C48376B3790D_v3_0_0()
self.json_schema_validators['jsd_fde0cbd2de50f680d0b0f681771829_v3_0_0'] =\
JSONSchemaValidatorFde0CbD2De50F680D0B0F681771829_v3_0_0()
self.json_schema_validators['jsd_bb2e9d6651c7bf18c1b60ff7eb14_v3_0_0'] =\
JSONSchemaValidatorBb2E9D6651C7Bf18C1B60Ff7Eb14_v3_0_0()
self.json_schema_validators['jsd_ab7717877a539b9b87f499817aee15_v3_0_0'] =\
JSONSchemaValidatorAb7717877A539B9B87F499817Aee15_v3_0_0()
self.json_schema_validators['jsd_db1d9dda53369e35d33138b29c16_v3_0_0'] =\
JSONSchemaValidatorDb1D9Dda53369E35D33138B29C16_v3_0_0()
self.json_schema_validators['jsd_ab015a9eb6d5f2b91002af068cb4ce2_v3_0_0'] =\
JSONSchemaValidatorAb015A9Eb6D5F2B91002Af068Cb4Ce2_v3_0_0()
self.json_schema_validators['jsd_ac243ecb8c157658a4bcfe77a102c14_v3_0_0'] =\
JSONSchemaValidatorAc243EcB8C157658A4BCfe77A102C14_v3_0_0()
self.json_schema_validators['jsd_b3fe0f3ea8a5368aea79a847288993e_v3_0_0'] =\
JSONSchemaValidatorB3Fe0F3Ea8A5368Aea79A847288993E_v3_0_0()
self.json_schema_validators['jsd_cdc971b23285b87945021bd5983d1cd_v3_0_0'] =\
JSONSchemaValidatorCdc971B23285B87945021Bd5983D1Cd_v3_0_0()
self.json_schema_validators['jsd_d1df0e230765104863b8d63d5beb68e_v3_0_0'] =\
JSONSchemaValidatorD1Df0E230765104863B8D63D5Beb68E_v3_0_0()
self.json_schema_validators['jsd_dedf09f59e754c6ae5212d43b1c8fb2_v3_0_0'] =\
JSONSchemaValidatorDedf09F59E754C6Ae5212D43B1C8Fb2_v3_0_0()
self.json_schema_validators['jsd_e176356698b5ec49609504a530c1d8a_v3_0_0'] =\
JSONSchemaValidatorE176356698B5Ec49609504A530C1D8A_v3_0_0()
self.json_schema_validators['jsd_e629f554fa652d980ff08988c788c57_v3_0_0'] =\
JSONSchemaValidatorE629F554Fa652D980Ff08988C788C57_v3_0_0()
self.json_schema_validators['jsd_f41a1e47105581fabf212f259626903_v3_0_0'] =\
JSONSchemaValidatorF41A1E47105581FAbf212F259626903_v3_0_0()
self.json_schema_validators['jsd_e34177d675622acd0a532f5b7c41b_v3_0_0'] =\
JSONSchemaValidatorE34177D675622Acd0A532F5B7C41B_v3_0_0()
self.json_schema_validators['jsd_f8f4956d29b821fa9bbf23266_v3_0_0'] =\
JSONSchemaValidatorF8F4956D29B821Fa9Bbf23266_v3_0_0()
self.json_schema_validators['jsd_cd9e91565f5c74b9f32ff0e5be6f17_v3_0_0'] =\
JSONSchemaValidatorCd9E91565F5C74B9F32Ff0E5Be6F17_v3_0_0()
self.json_schema_validators['jsd_a518d5655f69e8687c9c98740c6_v3_0_0'] =\
JSONSchemaValidatorA518D5655F69E8687C9C98740C6_v3_0_0()
self.json_schema_validators['jsd_c45ba035019803dacdbf15cf193_v3_0_0'] =\
JSONSchemaValidatorC45Ba035019803DAcdbf15Cf193_v3_0_0()
self.json_schema_validators['jsd_ca61ff725fedb94fba602d7afe46_v3_0_0'] =\
JSONSchemaValidatorCa61Ff725FedB94FBa602D7Afe46_v3_0_0()
self.json_schema_validators['jsd_ebcdc835e9b8d6844c1da6cf252_v3_0_0'] =\
JSONSchemaValidatorEbcDc835E9B8D6844C1Da6Cf252_v3_0_0()
self.json_schema_validators['jsd_f52605b5f6481f6a99ec8a7e8e6_v3_0_0'] =\
JSONSchemaValidatorF52605B5F6481F6A99Ec8A7E8E6_v3_0_0()
self.json_schema_validators['jsd_ea10f18c3655db84657ad855bf6972_v3_0_0'] =\
JSONSchemaValidatorEa10F18C3655Db84657Ad855Bf6972_v3_0_0()
self.json_schema_validators['jsd_b9e8541f25c4ea29944f659f68994_v3_0_0'] =\
JSONSchemaValidatorB9E8541F25C4EA29944F659F68994_v3_0_0()
self.json_schema_validators['jsd_c8aec23a55399a175acf105dbe1c2_v3_0_0'] =\
| |
the full name of the service without
the 'fabric:' URI scheme. Starting from version 6.0, hierarchical
names are delimited with the '~' character. For example, if the
service name is 'fabric:/myapp/app1/svc1', the service identity
would be 'myapp~app1~svc1' in 6.0+ and 'myapp/app1/svc1' in
previous versions."
- name: --source-id
type: string
short-summary: The source name which identifies the
client/watchdog/system component which generated the health
information.
- name: --health-property
type: string
short-summary: The property of the health information.
long-summary: An entity can have health reports for different
properties. The property is a string and not a fixed enumeration to
allow the reporter flexibility to categorize the state condition that
triggers the report. For example, a reporter with SourceId
"LocalWatchdog" can monitor the state of the available disk on a
node, so it can report "AvailableDisk" property on that node. The
same reporter can monitor the node connectivity, so it can report a
property "Connectivity" on the same node. In the health store,
these reports are treated as separate health events for the
specified node. Together with the SourceId, the property uniquely
identifies the health information.
- name: --health-state
type: string
short-summary: "Possible values include: 'Invalid', 'Ok', 'Warning',
'Error', 'Unknown'"
- name: --ttl
type: string
short-summary: The duration for which this health report is valid.
This field is using ISO8601 format for specifying the duration.
long-summary: When clients report periodically, they should send
reports with higher frequency than time to live. If clients report
on transition, they can set the time to live to infinite. When
time to live expires, the health event that contains the health
information is either removed from health store, if
RemoveWhenExpired is true, or evaluated at error, if
RemoveWhenExpired false. If not specified, time to live defaults
to infinite value.
- name: --description
type: string
short-summary: The description of the health information.
long-summary: It represents free text used to add human readable
information about the report. The maximum string length for the
description is 4096 characters. If the provided string is longer,
it will be automatically truncated. When truncated, the last
characters of the description contain a marker "[Truncated]", and
total string size is 4096 characters. The presence of the marker
indicates to users that truncation occurred. Note that when
truncated, the description has less than 4096 characters from the
original string.
- name: --sequence-number
type: string
short-summary: The sequence number for this health report as a
numeric string.
long-summary: The report sequence number is used by the health store
to detect stale reports. If not specified, a sequence number is
auto-generated by the health client when a report is added.
- name: --remove-when-expired
type: bool
short-summary: Value that indicates whether the report is removed
from health store when it expires.
long-summary: If set to true, the report is removed from the health
store after it expires. If set to false, the report is treated as
an error when expired. The value of this property is false by
default. When clients report periodically, they should set
RemoveWhenExpired false (default). This way, is the reporter has
issues (e.g. deadlock) and can't report, the entity is evaluated at
error when the health report expires. This flags the entity as
being in Error health state.
- name: --immediate
type: bool
short-summary: A flag which indicates whether the report should be
sent immediately.
long-summary: A health report is sent to a Service Fabric gateway
Application, which forwards to the health store. If Immediate is
set to true, the report is sent immediately from HTTP Gateway to
the health store, regardless of the fabric client settings that
the HTTP Gateway Application is using. This is useful for critical
reports that should be sent as soon as possible. Depending on
timing and other conditions, sending the report may still fail,
for example if the HTTP Gateway is closed or the message doesn't
reach the Gateway. If Immediate is set to false, the report is sent
based on the health client settings from the HTTP Gateway.
Therefore, it will be batched according to the
HealthReportSendInterval configuration. This is the recommended
setting because it allows the health client to optimize health
reporting messages to health store as well as health report
processing. By default, reports are not sent immediately.
"""
helps['partition report-health'] = """
type: command
short-summary: Sends a health report on the Service Fabric partition.
long-summary: Reports health state of the specified Service Fabric
partition. The report must contain the information about the source of
the health report and property on which it is reported. The report is
sent to a Service Fabric gateway Partition, which forwards to the
health store. The report may be accepted by the gateway, but rejected
by the health store after extra validation. For example, the health
store may reject the report because of an invalid parameter, like a
stale sequence number. To see whether the report was applied in the
health store, check that the report appears in the events section.
parameters:
- name: --partition-id
type: string
short-summary: The identity of the partition
- name: --source-id
type: string
short-summary: The source name which identifies the
client/watchdog/system component which generated the health
information.
- name: --health-property
type: string
short-summary: The property of the health information.
long-summary: An entity can have health reports for different
properties. The property is a string and not a fixed enumeration to
allow the reporter flexibility to categorize the state condition that
triggers the report. For example, a reporter with SourceId
"LocalWatchdog" can monitor the state of the available disk on a
node, so it can report "AvailableDisk" property on that node. The
same reporter can monitor the node connectivity, so it can report a
property "Connectivity" on the same node. In the health store,
these reports are treated as separate health events for the
specified node. Together with the SourceId, the property uniquely
identifies the health information.
- name: --health-state
type: string
short-summary: "Possible values include: 'Invalid', 'Ok', 'Warning',
'Error', 'Unknown'"
- name: --ttl
type: string
short-summary: The duration for which this health report is valid.
This field is using ISO8601 format for specifying the duration.
long-summary: When clients report periodically, they should send
reports with higher frequency than time to live. If clients report
on transition, they can set the time to live to infinite. When
time to live expires, the health event that contains the health
information is either removed from health store, if
RemoveWhenExpired is true, or evaluated at error, if
RemoveWhenExpired false. If not specified, time to live defaults
to infinite value.
- name: --description
type: string
short-summary: The description of the health information.
long-summary: It represents free text used to add human readable
information about the report. The maximum string length for the
description is 4096 characters. If the provided string is longer,
it will be automatically truncated. When truncated, the last
characters of the description contain a marker "[Truncated]", and
total string size is 4096 characters. The presence of the marker
indicates to users that truncation occurred. Note that when
truncated, the description has less than 4096 characters from the
original string.
- name: --sequence-number
type: string
short-summary: The sequence number for this health report as a
numeric string.
long-summary: The report sequence number is used by the health store
to detect stale reports. If not specified, a sequence number is
auto-generated by the health client when a report is added.
- name: --remove-when-expired
type: bool
short-summary: Value that indicates whether the report is removed
from health store when it expires.
long-summary: If set to true, the report is removed from the health
store after it expires. If set to false, the report is treated as
an error when expired. The value of this property is false by
default. When clients report | |
"""
isicarchive.font (Font)
This module provides the Font helper class and doesn't have to be
imported from outside the main package functionality (IsicApi).
To instantiate a Font object, simply call it with the font name:
>>> font = isicarchive.font.Font('calibri')
At this time, only calibri is available as a font file!
The format is similar to that used by
https://github.com/neuroelf/neuroelf-matlab/
in the image_font function of the @neuroelf class. It uses an
image with all available extended ASCII characters, and computes
a kerning value between all combinations of letters for the type
setting.
To then create an image with a set text, you can call either the
```font.set_line(TEXT)``` method for a one-channel uint8 image,
or the ```font.set_text(TEXT, ...)``` method for an RGB and alpha
image with additional options.
"""
# in large parts, code was translated from
# https://github.com/neuroelf/neuroelf-matlab/blob/master/%40neuroelf/private/image_font.m
# specific version for file
__version__ = '0.4.8'
# imports (needed for majority of functions)
import os
from typing import Tuple, Union
from imageio import imread
import numpy
class Font(object):
"""
Font
"""
__fonts = {}
def __init__(self, fontname:str):
# setup object
self._image = None
self._kerning = None
self._letters = None
self._lmap = 0 - numpy.ones(1024, dtype=numpy.int32)
self._num_letters = 0
self._size = 0
self._xktab = None
self._xstart = None
self._xstop = None
self._ybase = 0
self._yret = None
self.name = ''
# parse input (name = filename)
if fontname is None or fontname == '':
fontname = 'calibri'
else:
fontname = fontname.lower()
fontfolder = os.path.dirname(__file__) + os.sep + 'etc' + os.sep
if os.path.exists(fontfolder + 'font_' + fontname + '.npz'):
self.name = fontname
else:
self.name = 'calibri'
if self.name in self.__fonts:
f = self.__fonts[self.name]
self._image = f['image']
self._kerning = f['kerning']
self._letters = f['letters']
self._lmap = f['lmap']
self._num_letters = f['num_letters']
self._size = f['size']
self._xktab = f['xktab']
self._xstart = f['xstart']
self._xstop = f['xstop']
self._ybase = f['ybase']
self._yret = f['yret']
return
# load font file and set in object
fontfile = fontfolder + 'font_' + self.name + '.npz'
fontdata = numpy.load(fontfile)
fontdict = {k:v for (k,v) in fontdata.items()}
self._image = imread(fontdict['fimage'].tobytes())
self._letters = fontdict['letters']
self._num_letters = fontdict['flen']
self._lmap[self._letters] = numpy.asarray(range(self._num_letters))
self._size = fontdict['size']
nl = self._num_letters
self._xktab = numpy.zeros(nl * nl, dtype=numpy.float32).reshape((nl,nl,))
self._xstart = numpy.concatenate((numpy.zeros(1, dtype=numpy.int32),
fontdict['xstop'][0:-1]))
self._xstop = fontdict['xstop']
self._ybase = fontdict['ybase']
for (d0,d1,v) in zip(fontdict['xk0'], fontdict['xk1'], fontdict['xkv']):
self._xktab[d0-1,d1-1] = v
self._yret = self._size - self._image.shape[0]
self._add_kerning()
self.__fonts[self.name] = {
'image': self._image,
'kerning': self._kerning,
'letters': self._letters,
'lmap': self._lmap,
'num_letters': self._num_letters,
'size': self._size,
'xktab': self._xktab,
'xstart': self._xstart,
'xstop': self._xstop,
'ybase': self._ybase,
'yret': self._yret,
}
def __repr__(self):
return 'isicarchive.font.Font(\'' + self.name + '\')'
# font kerning
def _add_kerning(self):
# for each letter
nl = self._num_letters
fh = self._image.shape[0]
fhp = numpy.asarray(range(-1,2)).reshape((1,3,))
fhi = numpy.asarray(range(0,fh)).reshape((fh,1,)) + fhp
fhi[0,0] = 0
fhi[-1,-1] = fh - 1
fhi.shape = (3 * fh,)
lsf = numpy.zeros(fh * nl, dtype=numpy.int32).reshape((fh,nl,))
lsf.fill(-65536)
rsf = numpy.copy(lsf)
for lc in range(1, nl):
# get the letter image (masked)
lmi = (self._image[:, self._xstart[lc]:self._xstop[lc]] >= 128)
lms = self._xstop[lc] - self._xstart[lc]
lmi = numpy.any(lmi[fhi,:].reshape((fh,3,lms,)), axis=1).reshape((fh,lms,))
lms -= 1
# find the first pixel that is not background
cpix = numpy.where(numpy.sum(lmi, axis=1) > 1)[0]
for cc in range(cpix.size):
cpc = cpix[cc]
cpw = numpy.where(lmi[cpc,:])[0]
lsf[cpc,lc] = cpw[0]
rsf[cpc,lc] = lms - cpw[-1]
# next compute the median for each pair
ktab = numpy.zeros(nl * nl, dtype=numpy.float32).reshape((nl, nl,))
ktab.fill(numpy.nan)
for rc in range(1,nl):
rsfc = rsf[:,rc]
if numpy.all(rsfc == -65536):
continue
nrf = numpy.sum(rsfc > -65536)
for lc in range(1,nl):
rsflsf = rsfc + lsf[:,lc]
if all(rsflsf <= -32768):
continue
nlf = numpy.float(numpy.sum(lsf[:,lc] > -65536))
rsflsf = rsflsf[rsflsf > -32768]
nrl = numpy.float(rsflsf.size)
rlmin = numpy.float(numpy.amin(rsflsf))
rlmed = numpy.float(1 + numpy.median(rsflsf))
minw = (rlmed - rlmin) / rlmed
ktab[rc,lc] = (minw * rlmin + (1 - minw) * rlmed) * (nrl * nrl / (nrf * nlf))
# add "kerning additions"
ktab = ktab - self._xktab
# overall median
ktmed = numpy.median(ktab[numpy.isfinite(ktab)]) + 0.1 * (
numpy.float(self._image.shape[1]) / numpy.float(nl))
ktab = ktmed - ktab
ktab[numpy.isnan(ktab)] = 0.0
# adjust "space" kerning
ktabsp = numpy.ceil(numpy.mean(ktab[1:,1:]))
ktab[0,:] = -ktabsp
ktab[:,0] = -ktabsp
# store table
self._kerning = numpy.trunc(ktab).astype(numpy.int32)
# set single line into images
def set_line(self,
line:Union[str,list],
fsize:float,
spkern:int = 0,
xkern:int = 0,
) -> numpy.ndarray:
# IMPORTS DONE HERE TO SAVE TIME AT MODULE INIT
from .sampler import Sampler
sampler = Sampler()
if isinstance(line, str):
line = [line]
elif not isinstance(line, list):
raise ValueError('Invalid line(s) to set.')
out = [None] * len(line)
if fsize < 1.0:
raise ValueError('Invalid fsize parameter.')
ffact = fsize / numpy.float(self._size)
ifsize = self._image.shape
for lc in range(len(line)):
letters = [ord(l) for l in line[lc]]
nletters = len(letters)
if nletters == 0:
out[lc] = numpy.zeros(0, dtype=numpy.uint8).reshape((fsize,0,))
continue
let_ims = [None] * nletters
let_spc = numpy.zeros(nletters, dtype=numpy.int32)
for letc in range(nletters):
leti = self._lmap[letters[letc]]
if leti < 0:
leti = self._lmap[63]
let_ims[letc] = self._image[:, self._xstart[leti]:self._xstop[leti]]
if letc < (nletters - 1):
nleti = self._lmap[letters[letc+1]]
if nleti < 0:
nleti = self._lmap[63]
if nleti == 0:
let_spc[letc] = self._kerning[leti,nleti] + spkern
else:
let_spc[letc] = self._kerning[leti,nleti] + xkern
# element and total size
xsims = numpy.asarray([l.shape[1] for l in let_ims])
xstot = numpy.sum(xsims) + numpy.sum(let_spc)
lineimage = numpy.zeros(ifsize[0] * xstot, dtype=numpy.uint8).reshape(
(ifsize[0], xstot,))
lii = 0
for letc in range(nletters):
lineimage[:,lii:lii+xsims[letc]] = numpy.maximum(
lineimage[:,lii:lii+xsims[letc]], let_ims[letc])
lii += xsims[letc] + let_spc[letc]
# resize
if ffact != 1.0:
lineimage = sampler.sample_grid(lineimage, ffact, 'resample', 'uint8')
# store
out[lc] = lineimage
return out
# set text into image
def set_text(self,
text:str,
fsize:float = 24.0,
color:list = [0, 0, 0],
bcolor:list = [255, 255, 255],
align:str = 'left',
invert:bool = False,
outsize_x:int = 0,
outsize_y:int = 0,
padding:int = 4,
spkern:int = 0,
xkern:int = 0,
) -> Tuple[numpy.ndarray, numpy.ndarray, numpy.ndarray]:
if not isinstance(text, str):
raise ValueError('Invalid text.')
text = text.split('\n')
if not isinstance(fsize, float) or fsize <= 1.0:
fsize = 24.0
fsize = numpy.ceil(fsize)
if not isinstance(bcolor, list) or len(bcolor) != 3:
bcolor = [255, 255, 255]
try:
bcolor = numpy.asarray(bcolor).astype(numpy.uint8)
except:
raise
if not isinstance(color, list) or len(color) != 3:
color = [0, 0, 0]
try:
color = numpy.asarray(color).astype(numpy.uint8)
except:
raise
if not isinstance(invert, bool):
invert = False
if not isinstance(outsize_x, int) or outsize_x < 0:
outsize_x = 0
if not isinstance(outsize_y, int) or outsize_y < 0:
outsize_y = 0
if not isinstance(padding, int) or padding < 0:
padding = 0
if not isinstance(spkern, int) or spkern < -8 or spkern > 48:
spkern = 0
if not isinstance(xkern, int) or xkern < -16 or xkern > 16:
xkern = 0
# set each line with current settings
lines = self.set_line(text, fsize, spkern, xkern)
padsz = numpy.round_([2 * padding])[0]
fsize0 = numpy.asarray([line.shape[0] for line in lines])
fstot0 = numpy.sum(fsize0) + padsz
fsize1 = numpy.asarray([line.shape[1] for line in lines])
fstot1 = numpy.amax(fsize1) + padsz
# outsize needs to be determined
if outsize_y == 0:
outsize_y = fstot0
ypad = padding
else:
ypad = 0
if outsize_x == 0:
outsize_x = fstot1
xpad = padding
else:
xpad = 0
# create image
if outsize_y >= fstot0:
if outsize_x >= fstot1:
ima = numpy.zeros(outsize_y * outsize_x,
dtype=numpy.float32).reshape((outsize_y, outsize_x,))
else:
ima = numpy.zeros(outsize_y * fstot1,
dtype=numpy.float32).reshape((outsize_y, fstot1,))
else:
if outsize_x >= fstot1:
ima = numpy.zeros(fstot0 * outsize_x,
dtype=numpy.float32).reshape((fstot0, outsize_x,))
else:
ima = numpy.zeros(fstot0 * fstot1,
dtype=numpy.float32).reshape((fstot0, fstot1,))
imash = ima.shape
im = numpy.zeros(imash[0] * imash[1] * 3,
dtype=numpy.uint8).reshape((imash[0], imash[1], 3,))
for pc in range(3):
im[:,:,pc] = bcolor[pc]
# store font pieces into
yfrom = ypad
yfroms = numpy.zeros(len(lines), dtype=numpy.int32)
for lc in range(len(lines)):
lim = (1.0 / 255.0) * lines[lc].astype(numpy.float32)
if lim.shape[1] > outsize_x:
lim = lim[:, 0:outsize_x]
if invert:
lim = 1.0 - lim
tsize = lim.shape[1]
if align == 'left':
xfrom = xpad
else:
xleft = imash[1] - 2 * xpad
if align == 'right':
xfrom = xpad + xleft - tsize
else:
xfrom = xpad + ((xleft - tsize) // 2)
ima[yfrom:yfrom+fsize0[lc], xfrom:xfrom+tsize] = lim
lim = (lim > 0.0).astype(numpy.float32)
for pc in range(3):
cim = im[yfrom:yfrom+fsize0[lc], xfrom:xfrom+tsize, pc]
im[yfrom:yfrom+fsize0[lc], xfrom:xfrom+tsize, pc] = numpy.trunc(
0.5 | |
= "パスワードを入力してください。"
Ary['MOSJA10005'] = "ログインID、パスワードが正しくありません。入力し直してください。<br />ユーザ未登録の場合は、システム管理者にユーザ登録の依頼をしてください。"
Ary['MOSJA10006'] = "ログインID、パスワードが正しくありません。入力し直してください。<br />[パスワードをお忘れの場合]をクリックすると再設定できます。"
Ary['MOSJA10007'] = "パスワード誤り上限回数に達したため、ユーザをロックしました。<br />しばらくしてから再入力をしてください。ロックが解除されない場合や緊急の場合は、システム管理者にロック解除の依頼をしてください。"
Ary['MOSJA10008'] = "ユーザはロックされています。しばらくしてから再入力をしてください。<br />ロックが解除されない場合や緊急の場合は、システム管理者にロック解除の依頼をしてください。"
Ary['MOSJA10009'] = "ワンタイムパスワードの期限を超過しました。<br />[パスワードをお忘れの場合]をクリックすると再設定できます。"
Ary['MOSJA10010'] = "認証に失敗しました。"
Ary['MOSJA10011'] = "ログインできませんでした。"
Ary['MOSJA10012'] = "ログインID、パスワードが正しくありません。入力し直してください。"
Ary['MOSJA10013'] = "ユーザはロックされています。システム管理者にロック解除の依頼をしてください。"
Ary['MOSJA10014'] = "認証に失敗しました。"
Ary['MOSJA10015'] = "パスワード設定取得に失敗しました。"
Ary['MOSJA10016'] = "ご利用されているユーザは存在しないため、ログイン画面に遷移しました。"
Ary['MOSJA10017'] = "アカウントロック上限回数に達したため、ユーザをロックしました。<br/>ロックを解除したい場合はシステム管理者に解除の依頼をしてください。"
Ary['MOSJA10018'] = "ユーザはロックされています。<br />ロックを解除したい場合はシステム管理者に解除の依頼をしてください。"
Ary['MOSJA10019'] = "解除してよろしいですか?"
Ary['MOSJA10020'] = "解除されました。"
Ary['MOSJA10021'] = "DashBoard"
Ary['MOSJA10022'] = "OASEにようこそ!"
Ary['MOSJA10023'] = " Exastro"
Ary['MOSJA10024'] = " Operation Autonomy Support Engine"
Ary['MOSJA10025'] = "でログインしています。"
Ary['MOSJA10026'] = "個人設定"
Ary['MOSJA10027'] = "ログアウトしました。"
Ary['MOSJA10028'] = "403 Forbidden"
Ary['MOSJA10029'] = "このページを表示する権限がありません。"
Ary['MOSJA10030'] = "Exastro Operation Autonomy Support Engine"
Ary['MOSJA10031'] = "Forbidden"
Ary['MOSJA10032'] = "You don't have permission to access on this server."
Ary['MOSJA10033'] = "404 Not Found"
Ary['MOSJA10034'] = "要求されたURLはこのサーバで見つかりませんでした。"
Ary['MOSJA10035'] = "メールアドレス変更完了"
Ary['MOSJA10036'] = "メールアドレスを変更しました。"
Ary['MOSJA10037'] = "ログイン画面へ"
Ary['MOSJA10038'] = "ルール"
Ary['MOSJA10039'] = "システム"
Ary['MOSJA10040'] = "管理"
Ary['MOSJA10041'] = "お問い合わせ"
Ary['MOSJA10042'] = "ログアウト"
Ary['MOSJA11000'] = "ディシジョンテーブルが存在しません。<br>権限がある場合は、[新規追加]をクリックするとディシジョンテーブルを作成できます。"
Ary['MOSJA11001'] = "条件名は、32文字以内で入力してください。"
Ary['MOSJA11002'] = "Labelは、32文字以内で入力してください。"
Ary['MOSJA11003'] = "ルール種別は、64文字以内で入力してください。"
Ary['MOSJA11004'] = "概要は、4000文字以内で入力してください。"
Ary['MOSJA11005'] = "RuleTableは、64文字以内で入力してください。"
Ary['MOSJA11006'] = "RuleTableは、半角英数字(ただし、頭文字は半角英字)で入力してください。"
Ary['MOSJA11007'] = "はLabelで使用できません。"
Ary['MOSJA11008'] = "labelは、半角小文字英数字(ただし、頭文字は半角小文字英字)で入力してください。"
Ary['MOSJA11009'] = "他の行と重複しています。修正してください。"
Ary['MOSJA11010'] = "不正なリクエストです。"
Ary['MOSJA11011'] = "入力値が正しくありません。\n入力内容を確認してください。"
Ary['MOSJA11012'] = "編集内容を保存します。\nよろしいですか?"
Ary['MOSJA11013'] = "データ取得に失敗しました。"
Ary['MOSJA11014'] = "のディシジョンテーブルが削除されますがよろしいですか?\n削除しない場合は、[キャンセル]をクリックしてください。"
Ary['MOSJA11015'] = "登録済みのディシジョンテーブルと同一のRuleTable名は使用できません。"
Ary['MOSJA11016'] = "一部キーワードは使用できません。詳細はマニュアルを参照してください。"
Ary['MOSJA11017'] = "予期せぬエラーが発生しました。"
Ary['MOSJA11018'] = "ルール条件を追加してください。"
Ary['MOSJA11019'] = "同一項目で条件式の型が違います。型を揃えてください。"
Ary['MOSJA11020'] = "はRuleTable名で使用できません。"
Ary['MOSJA11021'] = "条件名は、32文字以内で入力してください。\n受信するリクエストの条件に合致した条件名を入力してください。\n条件に対し、複数の条件式を紐付たい場合は、同一の条件名を設定してください。\n記号を使用することはできません。"
Ary['MOSJA11022'] = "%(opename)sの権限がありません。\nディシジョンテーブルの一覧に本ルールが表示されていることをご確認ください。\nrule_type_name=%(rule_type_name)s\n"
Ary['MOSJA11023'] = "新規追加画面に遷移し、ディシジョンテーブルの基本情報、権限、ルール条件を登録します。"
Ary['MOSJA11024'] = "新規追加画面を閉じてディシジョンテーブル画面へ遷移します。"
Ary['MOSJA11025'] = "基本情報と権限の設定を行います。"
Ary['MOSJA11026'] = "条件式の設定を行います。"
Ary['MOSJA11027'] = "基本情報・権限タブの情報を保持したまま条件式タブへ遷移します。"
Ary['MOSJA11028'] = "条件式入力欄を1行ずつ追加します。"
Ary['MOSJA11029'] = "新規追加画面を閉じてディシジョンテーブル画面へ遷移します。"
Ary['MOSJA11030'] = "新規追加情報を保存してディシジョンテーブル画面へ遷移します。"
Ary['MOSJA11031'] = "ディシジョンテーブル詳細画面を閉じてディシジョンテーブル画面へ遷移します。"
Ary['MOSJA11032'] = "権限の設定と条件式をコピーして新規追加を行います。"
Ary['MOSJA11033'] = "ディシジョンテーブルの削除を行います。"
Ary['MOSJA11034'] = "ディシジョンテーブルの編集を行います。"
Ary['MOSJA11035'] = "ディシジョンテーブルファイルのダウンロードを行います。"
Ary['MOSJA11036'] = "ディシジョンテーブル編集画面を閉じてディシジョンテーブル画面へ遷移します。"
Ary['MOSJA11037'] = "更新情報を保存してディシジョンテーブル画面へ遷移します。"
Ary['MOSJA11038'] = "ディシジョンテーブル詳細画面へ遷移します。"
Ary['MOSJA11039'] = "追加行を削除します。"
Ary['MOSJA11040'] = "ルール種別に使用できない文字が含まれています。"
Ary['MOSJA11041'] = "概要に使用できない文字が含まれています。"
Ary['MOSJA11042'] = "RuleTableに使用できない文字が含まれています。"
Ary['MOSJA11043'] = "ルール種別が重複しています。"
Ary['MOSJA11044'] = "ルール種別が削除されているものと重複しています。"
Ary['MOSJA11045'] = "RuleTableが重複しています。"
Ary['MOSJA11046'] = "RuleTableが削除されているものと重複しています。"
Ary['MOSJA12000'] = "データの取得に失敗しました。"
Ary['MOSJA12001'] = "エラーが発生しました。"
Ary['MOSJA12002'] = "不正なリクエストです。"
Ary['MOSJA12003'] = "ルール種別が設定されていません。"
Ary['MOSJA12004'] = "イベント発生日時が設定されていません。"
Ary['MOSJA12005'] = "イベント情報が設定されていません。"
Ary['MOSJA12006'] = "イベント情報の要素数が %s 個ではありません。"
Ary['MOSJA12007'] = "ステージング実行をリクエストしました。"
Ary['MOSJA12008'] = "運用ステータスを更新しました。"
Ary['MOSJA12009'] = "ルールが存在しません。"
Ary['MOSJA12010'] = "運用ステータスを更新できません。"
Ary['MOSJA12011'] = "プロダクション適用済みのため、運用ステータスを更新できません。"
Ary['MOSJA12012'] = "テストリクエストが実行できませんでした。"
Ary['MOSJA12013'] = "一括テストリクエストが実行できませんでした。"
Ary['MOSJA12014'] = "必須項目が入力されていません。入力をしてから、再度アップロードしてください。 cell=%(cellname)s"
Ary['MOSJA12015'] = "%(colname)sには、半角数字を入力してください。 cell=%(cellname)s"
Ary['MOSJA12016'] = "時刻条件には、時刻(HH:mm)を入力してください。 cell=%(cellname)s"
Ary['MOSJA12017'] = "イベント発生日時には、年月日(%%Y-%%m-%%d %%H:%%M:%%S)を入力してください。 cell=%(cellname)s"
Ary['MOSJA12018'] = "フォーマットチェックに失敗しました。\nファイルの記載内容を修正してから、再度アップロードしてください。"
Ary['MOSJA12019'] = "空ファイルです。必須項目を入力してから再度アップロードしてください。 filename=%(filename)s"
Ary['MOSJA12020'] = "%(colname)sには、半角数字を[num,num,num]のような形式で入力してください。cell=%(cellname)s"
Ary['MOSJA12021'] = "%(colname)sには、[\"hoge\",\"hoge\",\"hoge\"]のような形式で入力してください。cell=%(cellname)s"
Ary['MOSJA12022'] = "イベント発生日時には、年月日(%Y-%m-%d %H:%M:%S)を入力してください。"
Ary['MOSJA12023'] = "フォーマットチェックに失敗しました。\n下記のエラー内容を確認してください。"
Ary['MOSJA12024'] = "テストリクエストの送信が完了しました。"
Ary['MOSJA12025'] = "%(colname)sには、半角数字を入力してください。条件名: %(conditional_name)s"
Ary['MOSJA12026'] = "時刻条件には、時刻(HH:mm)を入力してください。条件名: %(conditional_name)s"
Ary['MOSJA12027'] = "%(colname)sには、[\"hoge\",\"hoge\",\"hoge\"]のような形式で入力してください。条件名: %(conditional_name)s"
Ary['MOSJA12028'] = "%(colname)sに使用できない文字が使われています。 cell=%(cellname)s"
Ary['MOSJA12029'] = "%(colname)sに使用できない文字が使われています。条件名: %(conditional_name)s"
Ary['MOSJA12030'] = "必須項目が入力されていません。 条件名: %(conditional_name)s"
Ary['MOSJA12031'] = "%(opename)sの権限がありません。\nディシジョンテーブル画面から該当ルールの権限をご確認ください。\nrule_type_name=%(rule_type_name)s\n"
Ary['MOSJA12032'] = "ルールのマッチングは正常に行われましたが、ドライバー情報の取得に失敗しました。ドライバーのインストール状態をご確認のうえ、再度テストリクエストを試みてください。"
Ary['MOSJA12033'] = "切り戻し可能な運用ステータス/作業ステータスではありません。"
Ary['MOSJA12034'] = "ステージング適用ルール"
Ary['MOSJA12035'] = "テストリクエスト"
Ary['MOSJA12036'] = "ファイルを選択:"
Ary['MOSJA12037'] = "ファイルが選択されていません。"
Ary['MOSJA12038'] = "アップロード"
Ary['MOSJA12039'] = "過去を含め表示"
Ary['MOSJA12040'] = "対象データがありません。"
Ary['MOSJA12041'] = "プロダクション適用ルール"
Ary['MOSJA12042'] = "ルール管理ID"
Ary['MOSJA12043'] = "DL"
Ary['MOSJA12044'] = "(プロダクション適用済み)"
Ary['MOSJA12045'] = "ファイル名"
Ary['MOSJA12046'] = "切り戻し"
Ary['MOSJA12047'] = "ルールの切り戻し"
Ary['MOSJA12048'] = "過去のルールをプロダクションへ切り戻します。"
Ary['MOSJA12049'] = "ルールファイル"
Ary['MOSJA12050'] = "適用ステータス"
Ary['MOSJA12051'] = "作業ステータス"
Ary['MOSJA12052'] = "縦に分割"
Ary['MOSJA12053'] = "横に分割"
Ary['MOSJA12054'] = "画面の自動更新"
Ary['MOSJA12055'] = "運用ステータス"
Ary['MOSJA12056'] = "ステージング環境へテストリクエストを発行し、ルールの正常性を確認します。\nテストリクエストを実行し正常性が確認できたルールは、プロダクション環境へ適用することができます。"
Ary['MOSJA12057'] = "種別"
Ary['MOSJA12058'] = "設定"
Ary['MOSJA12059'] = "ログ"
Ary['MOSJA12060'] = "ルール種別選択"
Ary['MOSJA12061'] = "ルール種別を選択してください。"
Ary['MOSJA12062'] = "テストリクエスト設定へ"
Ary['MOSJA12063'] = "テストリクエスト設定"
Ary['MOSJA12064'] = "単発テスト"
Ary['MOSJA12065'] = "一括テスト"
Ary['MOSJA12066'] = "イベント発生日時"
Ary['MOSJA12067'] = "クリア"
Ary['MOSJA12068'] = "一括テスト用Excel"
Ary['MOSJA12069'] = "一括テスト用Excelファイルがダウンロードできます。\nダウンロード後ルールの設定をしてください。"
Ary['MOSJA12070'] = "一括テスト用Excelファイルのダウンロード"
Ary['MOSJA12071'] = "ファイル選択"
Ary['MOSJA12072'] = "入力済みの一括テスト用Excelファイルを選択してください。"
Ary['MOSJA12073'] = "ファイルを選択する"
Ary['MOSJA12074'] = "実行"
Ary['MOSJA12075'] = "実行ログ"
Ary['MOSJA12076'] = "ログクリア"
Ary['MOSJA12077'] = "ログダウンロード"
Ary['MOSJA12078'] = "再実行"
Ary['MOSJA12079'] = "検証未実施"
Ary['MOSJA12080'] = "検証実施中"
Ary['MOSJA12081'] = "正常処理済"
Ary['MOSJA12082'] = "強制処理済"
Ary['MOSJA12083'] = "異常終了"
Ary['MOSJA12084'] = "ルール未検出"
Ary['MOSJA12085'] = "ルール実行エラー"
Ary['MOSJA12086'] = "アクション中断"
Ary['MOSJA12087'] = "ステージング実行リクエスト中です"
Ary['MOSJA12088'] = "ステージング実行中です"
Ary['MOSJA12089'] = "正常に処理されました"
Ary['MOSJA12090'] = "強制処理済にされました"
Ary['MOSJA12091'] = "ルール種別が存在しない、実行順序の設定が不正、もしくは、ルールに設定されたアクション種別が不正です"
Ary['MOSJA12092'] = "ルールが検出できませんでした"
Ary['MOSJA12093'] = "ルール実行でエラーが発生しました。再実行を試みてください。何度も同じエラーとなる場合、ディシジョンテーブル画面から不要なルール種別を削除することで、エラーが解消されることもあります。削除可能なルールがない、または、エラーが解消されない場合は、OASEシステムの管理者へご連絡ください。"
Ary['MOSJA12094'] = "アクションが中断されました"
Ary['MOSJA12095'] = "未適用"
Ary['MOSJA12096'] = "検証NG"
Ary['MOSJA12097'] = "検証完了"
Ary['MOSJA12098'] = "適用終了"
Ary['MOSJA12099'] = "プロダクション未適用"
Ary['MOSJA12100'] = "プロダクション適用"
Ary['MOSJA12101'] = "プロダクション適用終了"
Ary['MOSJA12102'] = "アップロード中"
Ary['MOSJA12103'] = "アップロード異常終了"
Ary['MOSJA12104'] = "アップロード完了"
Ary['MOSJA12105'] = "ビルド中"
Ary['MOSJA12106'] = "ビルド異常終了"
Ary['MOSJA12107'] = "ビルド完了"
Ary['MOSJA12108'] = "ステージング適用処理中"
Ary['MOSJA12109'] = "ステージング適用異常終了"
Ary['MOSJA12110'] = "ステージング適用完了"
Ary['MOSJA12111'] = "プロダクション適用処理中"
Ary['MOSJA12112'] = "プロダクション適用異常終了"
Ary['MOSJA12113'] = "プロダクション適用完了"
Ary['MOSJA12114'] = "不明なステータスです"
Ary['MOSJA12115'] = "ステージング実行開始"
Ary['MOSJA12116'] = "日時"
Ary['MOSJA12117'] = "処理件数"
Ary['MOSJA12118'] = "運用ステータス変更"
Ary['MOSJA12119'] = "年"
Ary['MOSJA12120'] = "月"
Ary['MOSJA12121'] = "日"
Ary['MOSJA12122'] = " ルール名:"
Ary['MOSJA12123'] = " アクション実行順:"
Ary['MOSJA12124'] = " アクション種別:"
Ary['MOSJA12125'] = " アクションパラメータ情報:"
Ary['MOSJA12126'] = " アクション実行前パラメータ情報:"
Ary['MOSJA12127'] = "がマッチングされました"
Ary['MOSJA12128'] = "運用ステータスを変更します。\nよろしいですか?"
Ary['MOSJA12129'] = "運用ステータスを「検証完了」にしてよろしいですか?"
Ary['MOSJA12130'] = "ルール種別を変更します。\nよろしいですか?"
Ary['MOSJA12131'] = "選択されたファイル形式は.{}です。\n"
Ary['MOSJA12132'] = "選択できるファイル形式は.{0}及び.{1}です。\n"
Ary['MOSJA12133'] = "もう一度ファイルを選択してください"
Ary['MOSJA12134'] = "ファイルをアップロードします。\nよろしいですか?"
Ary['MOSJA12135'] = "リクエストを送信します。\nよろしいですか?"
Ary['MOSJA12136'] = "一括テストリクエスト実行します。\nよろしいですか?"
Ary['MOSJA12137'] = "一括実行_"
Ary['MOSJA12138'] = "{}を切り戻します。\nよろしいですか?"
Ary['MOSJA12139'] = "プロダクション適用します。\nよろしいですか?"
Ary['MOSJA12140'] = "自動リロードに失敗しました。"
Ary['MOSJA12141'] = "マッチング結果詳細情報"
Ary['MOSJA12142'] = " マッチング件数 :"
Ary['MOSJA12143'] = "件"
Ary['MOSJA12144'] = "件目"
Ary['MOSJA12145'] = " ルール名:"
Ary['MOSJA12146'] = " アクション実行順:"
Ary['MOSJA12147'] = " アクション種別:"
Ary['MOSJA12148'] = " アクションパラメータ情報:"
Ary['MOSJA12149'] = " アクション実行前パラメータ情報:"
Ary['MOSJA12150'] = "行目"
Ary['MOSJA12151'] = "数値条件"
Ary['MOSJA12152'] = "文字列条件"
Ary['MOSJA12153'] = "含む/含まない"
Ary['MOSJA12154'] = "無"
Ary['MOSJA12155'] = "有"
Ary['MOSJA12156'] = "不明"
Ary['MOSJA12157'] = "テストリクエスト画面へ遷移します。"
Ary['MOSJA12158'] = "アップロードするディシジョンテーブルファイルを設定します。"
Ary['MOSJA12159'] = "ディシジョンテーブルファイルをアップロードします。"
Ary['MOSJA12160'] = "ONにすると過去のレコードを含めて表示します。"
Ary['MOSJA12161'] = "アップロードしたディシジョンテーブルファイルをダウンロードします。"
Ary['MOSJA12162'] = "プロダクション環境へ適用します。"
Ary['MOSJA12163'] = "テストリクエスト画面を閉じてルール画面に遷移します。"
Ary['MOSJA12164'] = "ルール種別を選択します。"
Ary['MOSJA12165'] = "テストリクエストの設定を行います。"
Ary['MOSJA12166'] = "実行結果を確認できます。"
Ary['MOSJA12167'] = "テストリクエスト設定タブに遷移します。"
Ary['MOSJA12168'] = "単発テストを実施します。"
Ary['MOSJA12169'] = "一括テストを実施します。"
Ary['MOSJA12170'] = "「イベント発生日時」を除き、入力内容をクリアします。"
Ary['MOSJA12171'] = "一括テストリクエストファイルをダウンロードします。"
Ary['MOSJA12172'] = "投入する一括テストリクエストファイルを設定します。"
Ary['MOSJA12173'] = "選択したファイルをクリアします。"
Ary['MOSJA12174'] = "ルール種別選択タブに遷移します。"
Ary['MOSJA12175'] = "テストリクエストの実行を行います。"
Ary['MOSJA12176'] = "実行ログのクリアを行います。"
Ary['MOSJA12177'] = "実行ログの内容をテキストに出力します。"
Ary['MOSJA12178'] = "ルールの切り戻し画面に遷移します。"
Ary['MOSJA12179'] = "ルールの切り戻し画面を閉じてルール画面に遷移します。"
Ary['MOSJA12180'] = "切り戻しを実行します。"
Ary['MOSJA13000'] = "データの取得に失敗しました。\nしばらく経ってからもう一度お試しください。\n再度同じエラーが発生している場合はOASEシステムの管理者へご連絡ください。"
Ary['MOSJA13001'] = "リクエスト情報"
Ary['MOSJA13002'] = "イベント発生日時"
Ary['MOSJA13003'] = "トレースID"
Ary['MOSJA13004'] = "イベント情報"
Ary['MOSJA13005'] = "アクション情報"
Ary['MOSJA13006'] = "アクション日時"
Ary['MOSJA13007'] = "ルール種別"
Ary['MOSJA13008'] = "ルール名"
Ary['MOSJA13009'] = "アクションサーバリスト"
Ary['MOSJA13010'] = "アクションパラメータ情報"
Ary['MOSJA13011'] = "ログ"
Ary['MOSJA13012'] = "アクションを再実行しますがよろしいですか?"
Ary['MOSJA13013'] = "現在実行中のため再実行できません。"
Ary['MOSJA13014'] = "エラーが発生しました。"
Ary['MOSJA13015'] = "アクションを承認して、\n処理を再開しますがよろしいですか?"
Ary['MOSJA13016'] = "承認待ち状態ではないため、アクションを再開できません。"
Ary['MOSJA13017'] = "アクションを再実行する権限がありません。\nディシジョンテーブル画面から該当ルールの権限をご確認ください。\nrule_type_name=%(rule_type_name)s\n"
Ary['MOSJA13018'] = "アクションを承認する権限がありません。\nディシジョンテーブル画面から該当ルールの権限をご確認ください。\nrule_type_name=%(rule_type_name)s\n"
Ary['MOSJA13019'] = "条件名が重複しています。重複して指定する場合は以下の組合せで条件式を選択してください。\n①[等しい(数値),等しくない(数値),より大きい,より小さい,以上,以下]\n②[等しい(文字列),等しくない(文字列),正規表現に一致する,正規表現に一致しない,時間]\n③[含む,含まない]"
Ary['MOSJA13020'] = "アクションを停止しますがよろしいですか?"
Ary['MOSJA13021'] = "アクションを停止する権限がありません。\nディシジョンテーブル画面から該当ルールの権限をご確認ください。\nrule_type_name=%(rule_type_name)s\n"
Ary['MOSJA13022'] = "停止するアクションが見つかりませんでした。\n"
Ary['MOSJA13023'] = "ITA表示名"
Ary['MOSJA13024'] = "Symphonyインスタンス番号"
Ary['MOSJA13025'] = "SymphonyクラスID"
Ary['MOSJA13026'] = "オペレーションID"
Ary['MOSJA13027'] = "Symphony作業確認URL"
Ary['MOSJA13028'] = "RESTAPI異常時の詳細内容"
Ary['MOSJA13029'] = "メールテンプレート名"
Ary['MOSJA13030'] = "送信先メールアドレス"
Ary['MOSJA13031'] = "アクション履歴"
Ary['MOSJA13032'] = "状態"
Ary['MOSJA13033'] = "アクション種別"
Ary['MOSJA13034'] = "最終実行日時"
Ary['MOSJA13035'] = "最終実行者"
Ary['MOSJA13036'] = "承認"
Ary['MOSJA13037'] = "停止"
Ary['MOSJA13038'] = "アクション再実行"
Ary['MOSJA13039'] = "アクション実行"
Ary['MOSJA13040'] = "アクション履歴はありません。"
Ary['MOSJA13041'] = "ログ詳細"
Ary['MOSJA13042'] = "保留中アクション再開"
Ary['MOSJA13043'] = "保留中のアクションを再開するか、再開せずに停止できます。"
Ary['MOSJA13044'] = "再開"
Ary['MOSJA13045'] = "ログ詳細画面へ遷移します。"
Ary['MOSJA13046'] = "詳細情報の内容をテキストに出力します。"
Ary['MOSJA13047'] = "保留中のアクションを実行または停止を行います。"
Ary['MOSJA13048'] = "アクションの再実行を行います。"
Ary['MOSJA13049'] = "アクション履歴画面へ遷移します。"
Ary['MOSJA13050'] = "保留中のアクションを実行します。"
Ary['MOSJA13051'] = "保留中のアクションを停止します。"
Ary['MOSJA13052'] = "ログ詳細画面を閉じてアクション履歴画面へ遷移します。"
Ary['MOSJA13053'] = "処理中"
Ary['MOSJA13054'] = "完了(正常終了)"
Ary['MOSJA13055'] = "強制終了"
Ary['MOSJA13056'] = "承認待ち"
Ary['MOSJA13057'] = "処理済み"
Ary['MOSJA13058'] = "Exastro実行中"
Ary['MOSJA13059'] = "Exastro異常終了"
Ary['MOSJA13060'] = "Exastro実行停止"
Ary['MOSJA13061'] = "Exastro実行状態取得エラー"
Ary['MOSJA13062'] = "抑止済"
Ary['MOSJA13063'] = "アクション実行エラー"
Ary['MOSJA13064'] = "未処理"
Ary['MOSJA13065'] = "処理中(データを取得開始)"
Ary['MOSJA13066'] = "処理済み(正常終了)"
Ary['MOSJA13067'] = "強制処理済み"
Ary['MOSJA13068'] = "異常終了(サーバーエラー)"
Ary['MOSJA13069'] = "処理済み(ルール未検出)"
Ary['MOSJA13070'] = "処理済み(ルール実行エラー)"
Ary['MOSJA13071'] = "ルールマッチ"
Ary['MOSJA13072'] = "アクション中断"
Ary['MOSJA13073'] = "アクション実行前エラー"
Ary['MOSJA13074'] = "Exastroリクエスト"
Ary['MOSJA13075'] = "処理中(リトライ実行)"
Ary['MOSJA13076'] = "未実行"
Ary['MOSJA13077'] = "実行中"
Ary['MOSJA13078'] = "異常"
Ary['MOSJA13079'] = "取消"
Ary['MOSJA13080'] = "状態取得失敗"
Ary['MOSJA13081'] = "抑止済"
Ary['MOSJA13082'] = "すでにこのルールは削除されています"
Ary['MOSJA13083'] = "代入値管理登録中"
Ary['MOSJA14000'] = "編集可能なグループが存在しません。"
Ary['MOSJA14001'] = "基本情報・権限"
Ary['MOSJA14002'] = "条件式"
Ary['MOSJA14003'] = "条件名"
Ary['MOSJA14004'] = "移動"
Ary['MOSJA14005'] = "条件式を追加する"
Ary['MOSJA14006'] = "条件式の設定へ"
Ary['MOSJA14007'] = "基本情報"
Ary['MOSJA14008'] = "ディシジョンテーブルを新規追加します。"
Ary['MOSJA14009'] = "ディシジョンテーブル詳細"
Ary['MOSJA14010'] = "複製"
Ary['MOSJA14011'] = "64文字以内、半角英数字(ただし、頭文字は半角英字)で入力してください。登録済みのディシジョンテーブルと同一のRuleTableは使用できません。"
Ary['MOSJA14012'] = "ディシジョンテーブル編集"
Ary['MOSJA14013'] = "ルール種別、概要、権限の編集ができます。\nRuleTableおよび、条件式は変更できません。"
Ary['MOSJA14014'] = "ディシジョンテーブル 編集・削除"
Ary['MOSJA14015'] = "ルール ステージング環境"
Ary['MOSJA14016'] = "ルール プロダクション環境"
Ary['MOSJA14017'] = "編集・削除"
Ary['MOSJA14018'] = "ディシジョンテーブル複製"
Ary['MOSJA22000'] = "更新します。よろしいですか?"
Ary['MOSJA22001'] = "設定項目:%(strConName)s \n1から7の数値を入力してください。"
Ary['MOSJA22002'] = "設定項目:%(strConName)s \n1から60の数値を入力してください。"
Ary['MOSJA22003'] = "必須項目が入力されていません。\n項目名: %(strConName)s"
Ary['MOSJA22004'] = "設定項目:%(strConName)s \n1から5の数値を入力してください。"
Ary['MOSJA22005'] = "設定項目:%(strConName)s \n新パスワードが一致していません。"
Ary['MOSJA22006'] = "設定項目:%(strConName)s \n0または1を入力してください。"
Ary['MOSJA22007'] = "%(strConName)sは64文字以内で入力してください。"
Ary['MOSJA22008'] = "%(strConName)sは32文字以内で入力してください。"
Ary['MOSJA22009'] = "%(strConName)sは512文字以内で入力してください。"
Ary['MOSJA22010'] = "%(strConName)sは数字5桁以内で入力してください。"
Ary['MOSJA22011'] = "%(strConName)sは256文字以内で入力してください。"
Ary['MOSJA22012'] = "%(strConName)sは各入力欄40文字以内で入力してください。"
Ary['MOSJA22013'] = "更新対象のデータがありません。"
Ary['MOSJA22014'] = "不正なリクエストです。"
Ary['MOSJA22015'] = "DBの更新に失敗しました。"
Ary['MOSJA22016'] = "データの取得に失敗しました。"
Ary['MOSJA22017'] = "%(msgDetail)s"
Ary['MOSJA22018'] = "保存されました。"
Ary['MOSJA22019'] = "エラーが発生しました。"
Ary['MOSJA22020'] = "設定項目:%(strConName)s \n1から180の数値を入力してください。"
Ary['MOSJA22021'] = "設定項目:%(strConName)s \n1から10の数値を入力してください。"
Ary['MOSJA22022'] = "設定項目:%(strConName)s \n1から120の数値を入力してください。"
Ary['MOSJA22023'] = "設定項目:%(strConName)s \n1から72の数値を入力してください。"
Ary['MOSJA22024'] = "対象グループリストの属性値が重複しています。 %(key)s"
Ary['MOSJA22025'] = "対象グループリストの所属部署名が重複しています。 %(value)s"
Ary['MOSJA22026'] = "AD連携解除を行います。\nADから取得したグループ・ユーザ情報が全て削除されます。\nよろしいですか?"
Ary['MOSJA22027'] = "設定項目:%(strConName)s \n1から10の数値を入力してください。"
Ary['MOSJA22028'] = "設定項目:%(strConName)s \n1から1000の数値を入力してください。"
Ary['MOSJA22029'] = "設定項目:%(strConName)s \n00から23の数値を入力してください。"
Ary['MOSJA22030'] = "AD連携タイマーの設定に失敗しました。"
Ary['MOSJA22031'] = "メール通知種別を選択してください。"
Ary['MOSJA22032'] = "ログインIDが不正です。修正してください。"
| |
import os
import shutil
import unittest
import tempfile
import struct
from pyoram.storage.block_storage import \
BlockStorageTypeFactory
from pyoram.storage.block_storage_file import \
BlockStorageFile
from pyoram.storage.block_storage_mmap import \
BlockStorageMMap
from pyoram.storage.block_storage_ram import \
BlockStorageRAM
from pyoram.storage.block_storage_sftp import \
BlockStorageSFTP
from pyoram.storage.block_storage_s3 import \
BlockStorageS3
from pyoram.storage.boto3_s3_wrapper import \
(Boto3S3Wrapper,
MockBoto3S3Wrapper)
import six
from six.moves import xrange
from six import BytesIO
thisdir = os.path.dirname(os.path.abspath(__file__))
try:
import boto3
has_boto3 = True
except: # pragma: no cover
has_boto3 = False # pragma: no cover
class TestBlockStorageTypeFactory(unittest.TestCase):
def test_file(self):
self.assertIs(BlockStorageTypeFactory('file'),
BlockStorageFile)
def test_mmap(self):
self.assertIs(BlockStorageTypeFactory('mmap'),
BlockStorageMMap)
def test_ram(self):
self.assertIs(BlockStorageTypeFactory('ram'),
BlockStorageRAM)
def test_sftp(self):
self.assertIs(BlockStorageTypeFactory('sftp'),
BlockStorageSFTP)
def test_s3(self):
self.assertIs(BlockStorageTypeFactory('s3'),
BlockStorageS3)
def test_invalid(self):
with self.assertRaises(ValueError):
BlockStorageTypeFactory(None)
def test_register_invalid_name(self):
with self.assertRaises(ValueError):
BlockStorageTypeFactory.register_device(
's3', BlockStorageFile)
def test_register_invalid_type(self):
with self.assertRaises(TypeError):
BlockStorageTypeFactory.register_device(
'new_str_type', str)
class _TestBlockStorage(object):
_type = None
_type_kwds = None
@classmethod
def _read_storage(cls, storage):
with open(storage.storage_name, 'rb') as f:
return f.read()
@classmethod
def _remove_storage(cls, name):
if os.path.exists(name):
if os.path.isdir(name):
shutil.rmtree(name, ignore_errors=True)
else:
os.remove(name)
@classmethod
def _check_exists(cls, name):
return os.path.exists(name)
@classmethod
def _get_empty_existing(cls):
return os.path.join(thisdir,
"baselines",
"exists.empty")
@classmethod
def _get_dummy_noexist(cls):
fd, name = tempfile.mkstemp(dir=os.getcwd())
os.close(fd)
return name
def _open_teststorage(self, **kwds):
kwds.update(self._type_kwds)
return self._type(self._testfname, **kwds)
def _reopen_storage(self, storage):
return self._type(storage.storage_name, **self._type_kwds)
@classmethod
def setUpClass(cls):
assert cls._type is not None
assert cls._type_kwds is not None
cls._dummy_name = cls._get_dummy_noexist()
if cls._check_exists(cls._dummy_name):
cls._remove_storage(cls._dummy_name)
if os.path.exists(cls._dummy_name):
_TestBlockStorage.\
_remove_storage(cls._dummy_name) # pragma: no cover
cls._block_size = 25
cls._block_count = 5
cls._testfname = cls.__name__ + "_testfile.bin"
cls._blocks = []
f = cls._type.setup(
cls._testfname,
block_size=cls._block_size,
block_count=cls._block_count,
initialize=lambda i: bytes(bytearray([i])*cls._block_size),
ignore_existing=True,
**cls._type_kwds)
f.close()
cls._original_f = f
for i in range(cls._block_count):
data = bytearray([i])*cls._block_size
cls._blocks.append(data)
@classmethod
def tearDownClass(cls):
cls._remove_storage(cls._testfname)
cls._remove_storage(cls._dummy_name)
def test_setup_fails(self):
self.assertEqual(self._check_exists(self._dummy_name), False)
with self.assertRaises(IOError):
self._type.setup(
self._get_empty_existing(),
block_size=10,
block_count=10,
**self._type_kwds)
self.assertEqual(self._check_exists(self._dummy_name), False)
with self.assertRaises(IOError):
self._type.setup(
self._get_empty_existing(),
block_size=10,
block_count=10,
ignore_existing=False,
**self._type_kwds)
self.assertEqual(self._check_exists(self._dummy_name), False)
with self.assertRaises(ValueError):
self._type.setup(self._dummy_name,
block_size=0,
block_count=1,
**self._type_kwds)
self.assertEqual(self._check_exists(self._dummy_name), False)
with self.assertRaises(ValueError):
self._type.setup(self._dummy_name,
block_size=1,
block_count=0,
**self._type_kwds)
self.assertEqual(self._check_exists(self._dummy_name), False)
with self.assertRaises(TypeError):
self._type.setup(self._dummy_name,
block_size=1,
block_count=1,
header_data=2,
**self._type_kwds)
self.assertEqual(self._check_exists(self._dummy_name), False)
# TODO: The multiprocessing module is bad
# about handling exceptions raised on the
# thread's stack.
#with self.assertRaises(ValueError):
# def _init(i):
# raise ValueError
# self._type.setup(self._dummy_name,
# block_size=1,
# block_count=1,
# initialize=_init,
# **self._type_kwds)
#self.assertEqual(self._check_exists(self._dummy_name), False)
def test_setup(self):
fname = ".".join(self.id().split(".")[1:])
fname += ".bin"
fname = os.path.join(thisdir, fname)
self._remove_storage(fname)
bsize = 10
bcount = 11
fsetup = self._type.setup(fname, bsize, bcount, **self._type_kwds)
fsetup.close()
flen = len(self._read_storage(fsetup))
self.assertEqual(
flen,
self._type.compute_storage_size(bsize,
bcount))
self.assertEqual(
flen >
self._type.compute_storage_size(bsize,
bcount,
ignore_header=True),
True)
with self._reopen_storage(fsetup) as f:
self.assertEqual(f.header_data, bytes())
self.assertEqual(fsetup.header_data, bytes())
self.assertEqual(f.block_size, bsize)
self.assertEqual(fsetup.block_size, bsize)
self.assertEqual(f.block_count, bcount)
self.assertEqual(fsetup.block_count, bcount)
self.assertEqual(f.storage_name, fsetup.storage_name)
self.assertEqual(fsetup.storage_name, fsetup.storage_name)
if self._type is not BlockStorageRAM:
self.assertEqual(fsetup.storage_name, fname)
else:
self.assertEqual(fsetup.storage_name, None)
self._remove_storage(fname)
def test_setup_withdata(self):
fname = ".".join(self.id().split(".")[1:])
fname += ".bin"
fname = os.path.join(thisdir, fname)
self._remove_storage(fname)
bsize = 10
bcount = 11
header_data = bytes(bytearray([0,1,2]))
fsetup = self._type.setup(fname,
bsize,
bcount,
header_data=header_data,
**self._type_kwds)
fsetup.close()
flen = len(self._read_storage(fsetup))
self.assertEqual(
flen,
self._type.compute_storage_size(bsize,
bcount,
header_data=header_data))
self.assertTrue(len(header_data) > 0)
self.assertEqual(
self._type.compute_storage_size(bsize,
bcount) <
self._type.compute_storage_size(bsize,
bcount,
header_data=header_data),
True)
self.assertEqual(
flen >
self._type.compute_storage_size(bsize,
bcount,
header_data=header_data,
ignore_header=True),
True)
with self._reopen_storage(fsetup) as f:
self.assertEqual(f.header_data, header_data)
self.assertEqual(fsetup.header_data, header_data)
self.assertEqual(f.block_size, bsize)
self.assertEqual(fsetup.block_size, bsize)
self.assertEqual(f.block_count, bcount)
self.assertEqual(fsetup.block_count, bcount)
self.assertEqual(f.storage_name, fsetup.storage_name)
self.assertEqual(fsetup.storage_name, fsetup.storage_name)
if self._type is not BlockStorageRAM:
self.assertEqual(fsetup.storage_name, fname)
else:
self.assertEqual(fsetup.storage_name, None)
self._remove_storage(fname)
def test_init_noexists(self):
self.assertEqual(self._check_exists(self._dummy_name), False)
with self.assertRaises(IOError):
with self._type(self._dummy_name, **self._type_kwds) as f:
pass # pragma: no cover
def test_init_exists(self):
self.assertEqual(self._check_exists(self._testfname), True)
databefore = self._read_storage(self._original_f)
with self._open_teststorage() as f:
self.assertEqual(f.block_size, self._block_size)
self.assertEqual(f.block_count, self._block_count)
self.assertEqual(f.storage_name, self._testfname)
self.assertEqual(f.header_data, bytes())
self.assertEqual(self._check_exists(self._testfname), True)
dataafter = self._read_storage(self._original_f)
self.assertEqual(databefore, dataafter)
def test_read_block(self):
with self._open_teststorage() as f:
self.assertEqual(f.bytes_sent, 0)
self.assertEqual(f.bytes_received, 0)
for i, data in enumerate(self._blocks):
self.assertEqual(list(bytearray(f.read_block(i))),
list(self._blocks[i]))
for i, data in enumerate(self._blocks):
self.assertEqual(list(bytearray(f.read_block(i))),
list(self._blocks[i]))
for i, data in reversed(list(enumerate(self._blocks))):
self.assertEqual(list(bytearray(f.read_block(i))),
list(self._blocks[i]))
for i, data in reversed(list(enumerate(self._blocks))):
self.assertEqual(list(bytearray(f.read_block(i))),
list(self._blocks[i]))
self.assertEqual(f.bytes_sent, 0)
self.assertEqual(f.bytes_received,
self._block_count*self._block_size*4)
with self._open_teststorage() as f:
self.assertEqual(f.bytes_sent, 0)
self.assertEqual(f.bytes_received, 0)
self.assertEqual(list(bytearray(f.read_block(0))),
list(self._blocks[0]))
self.assertEqual(list(bytearray(f.read_block(self._block_count-1))),
list(self._blocks[-1]))
self.assertEqual(f.bytes_sent, 0)
self.assertEqual(f.bytes_received,
self._block_size*2)
def test_write_block(self):
data = bytearray([self._block_count])*self._block_size
self.assertEqual(len(data) > 0, True)
with self._open_teststorage() as f:
self.assertEqual(f.bytes_sent, 0)
self.assertEqual(f.bytes_received, 0)
for i in xrange(self._block_count):
self.assertNotEqual(list(bytearray(f.read_block(i))),
list(data))
for i in xrange(self._block_count):
f.write_block(i, bytes(data))
for i in xrange(self._block_count):
self.assertEqual(list(bytearray(f.read_block(i))),
list(data))
for i, block in enumerate(self._blocks):
f.write_block(i, bytes(block))
self.assertEqual(f.bytes_sent,
self._block_count*self._block_size*2)
self.assertEqual(f.bytes_received,
self._block_count*self._block_size*2)
def test_read_blocks(self):
with self._open_teststorage() as f:
self.assertEqual(f.bytes_sent, 0)
self.assertEqual(f.bytes_received, 0)
data = f.read_blocks(list(xrange(self._block_count)))
self.assertEqual(len(data), self._block_count)
for i, block in enumerate(data):
self.assertEqual(list(bytearray(block)),
list(self._blocks[i]))
data = f.read_blocks([0])
self.assertEqual(len(data), 1)
self.assertEqual(list(bytearray(data[0])),
list(self._blocks[0]))
self.assertEqual(len(self._blocks) > 1, True)
data = f.read_blocks(list(xrange(1, self._block_count)) + [0])
self.assertEqual(len(data), self._block_count)
for i, block in enumerate(data[:-1], 1):
self.assertEqual(list(bytearray(block)),
list(self._blocks[i]))
self.assertEqual(list(bytearray(data[-1])),
list(self._blocks[0]))
self.assertEqual(f.bytes_sent, 0)
self.assertEqual(f.bytes_received,
(2*self._block_count+1)*self._block_size)
def test_yield_blocks(self):
with self._open_teststorage() as f:
self.assertEqual(f.bytes_sent, 0)
self.assertEqual(f.bytes_received, 0)
data = list(f.yield_blocks(list(xrange(self._block_count))))
self.assertEqual(len(data), self._block_count)
for i, block in enumerate(data):
self.assertEqual(list(bytearray(block)),
list(self._blocks[i]))
data = list(f.yield_blocks([0]))
self.assertEqual(len(data), 1)
self.assertEqual(list(bytearray(data[0])),
list(self._blocks[0]))
self.assertEqual(len(self._blocks) > 1, True)
data = list(f.yield_blocks(list(xrange(1, self._block_count)) + [0]))
self.assertEqual(len(data), self._block_count)
for i, block in enumerate(data[:-1], 1):
self.assertEqual(list(bytearray(block)),
list(self._blocks[i]))
self.assertEqual(list(bytearray(data[-1])),
list(self._blocks[0]))
self.assertEqual(f.bytes_sent, 0)
self.assertEqual(f.bytes_received,
(2*self._block_count+1)*self._block_size)
def test_write_blocks(self):
data = [bytearray([self._block_count])*self._block_size
for i in xrange(self._block_count)]
with self._open_teststorage() as f:
self.assertEqual(f.bytes_sent, 0)
self.assertEqual(f.bytes_received, 0)
orig = f.read_blocks(list(xrange(self._block_count)))
self.assertEqual(len(orig), self._block_count)
for i, block in enumerate(orig):
self.assertEqual(list(bytearray(block)),
list(self._blocks[i]))
f.write_blocks(list(xrange(self._block_count)),
[bytes(b) for b in data])
new = f.read_blocks(list(xrange(self._block_count)))
self.assertEqual(len(new), self._block_count)
for i, block in enumerate(new):
self.assertEqual(list(bytearray(block)),
list(data[i]))
f.write_blocks(list(xrange(self._block_count)),
[bytes(b) for b in self._blocks])
orig = f.read_blocks(list(xrange(self._block_count)))
self.assertEqual(len(orig), self._block_count)
for i, block in enumerate(orig):
self.assertEqual(list(bytearray(block)),
list(self._blocks[i]))
self.assertEqual(f.bytes_sent,
self._block_count*self._block_size*2)
self.assertEqual(f.bytes_received,
self._block_count*self._block_size*3)
def test_update_header_data(self):
fname = ".".join(self.id().split(".")[1:])
fname += ".bin"
fname = os.path.join(thisdir, fname)
self._remove_storage(fname)
bsize = 10
bcount = 11
header_data = bytes(bytearray([0,1,2]))
fsetup = self._type.setup(fname,
block_size=bsize,
block_count=bcount,
header_data=header_data,
**self._type_kwds)
fsetup.close()
new_header_data = bytes(bytearray([1,1,1]))
with self._reopen_storage(fsetup) as f:
self.assertEqual(f.header_data, header_data)
f.update_header_data(new_header_data)
self.assertEqual(f.header_data, new_header_data)
with self._reopen_storage(fsetup) as f:
self.assertEqual(f.header_data, new_header_data)
with self.assertRaises(ValueError):
with self._reopen_storage(fsetup) as f:
f.update_header_data(bytes(bytearray([1,1])))
with self.assertRaises(ValueError):
with self._reopen_storage(fsetup) as f:
f.update_header_data(bytes(bytearray([1,1,1,1])))
with self._reopen_storage(fsetup) as f:
self.assertEqual(f.header_data, new_header_data)
self._remove_storage(fname)
def test_locked_flag(self):
with self._open_teststorage() as f:
with self.assertRaises(IOError):
with self._open_teststorage() as f1:
pass # pragma: no cover
with self.assertRaises(IOError):
with self._open_teststorage() as f1:
pass # pragma: no cover
with self._open_teststorage(ignore_lock=True) as f1:
pass
with self.assertRaises(IOError):
with self._open_teststorage() as f1:
pass # pragma: no cover
with self._open_teststorage(ignore_lock=True) as f1:
pass
with self._open_teststorage(ignore_lock=True) as f1:
pass
with self._open_teststorage(ignore_lock=True) as f:
pass
def test_read_block_cloned(self):
with self._open_teststorage() as forig:
self.assertEqual(forig.bytes_sent, 0)
self.assertEqual(forig.bytes_received, 0)
with forig.clone_device() as f:
self.assertEqual(forig.bytes_sent, 0)
self.assertEqual(forig.bytes_received, 0)
self.assertEqual(f.bytes_sent, 0)
self.assertEqual(f.bytes_received, 0)
for i, data in enumerate(self._blocks):
self.assertEqual(list(bytearray(f.read_block(i))),
list(self._blocks[i]))
for i, data in enumerate(self._blocks):
self.assertEqual(list(bytearray(f.read_block(i))),
list(self._blocks[i]))
for i, data in reversed(list(enumerate(self._blocks))):
self.assertEqual(list(bytearray(f.read_block(i))),
list(self._blocks[i]))
for i, data in reversed(list(enumerate(self._blocks))):
self.assertEqual(list(bytearray(f.read_block(i))),
list(self._blocks[i]))
self.assertEqual(f.bytes_sent, 0)
self.assertEqual(f.bytes_received,
self._block_count*self._block_size*4)
with forig.clone_device() as f:
self.assertEqual(forig.bytes_sent, 0)
self.assertEqual(forig.bytes_received, 0)
self.assertEqual(f.bytes_sent, 0)
self.assertEqual(f.bytes_received, 0)
self.assertEqual(list(bytearray(f.read_block(0))),
list(self._blocks[0]))
self.assertEqual(list(bytearray(f.read_block(self._block_count-1))),
list(self._blocks[-1]))
self.assertEqual(f.bytes_sent, 0)
self.assertEqual(f.bytes_received,
self._block_size*2)
self.assertEqual(forig.bytes_sent, 0)
self.assertEqual(forig.bytes_received, 0)
def test_write_block_cloned(self):
data = bytearray([self._block_count])*self._block_size
self.assertEqual(len(data) > 0, True)
with self._open_teststorage() as forig:
self.assertEqual(forig.bytes_sent, 0)
self.assertEqual(forig.bytes_received, 0)
with forig.clone_device() as f:
self.assertEqual(forig.bytes_sent, 0)
self.assertEqual(forig.bytes_received, 0)
self.assertEqual(f.bytes_sent, 0)
self.assertEqual(f.bytes_received, 0)
for i in xrange(self._block_count):
self.assertNotEqual(list(bytearray(f.read_block(i))),
list(data))
for i in xrange(self._block_count):
f.write_block(i, bytes(data))
for i in xrange(self._block_count):
self.assertEqual(list(bytearray(f.read_block(i))),
list(data))
for i, block in enumerate(self._blocks):
f.write_block(i, bytes(block))
self.assertEqual(f.bytes_sent,
self._block_count*self._block_size*2)
self.assertEqual(f.bytes_received,
self._block_count*self._block_size*2)
self.assertEqual(forig.bytes_sent, 0)
self.assertEqual(forig.bytes_received, 0)
def test_read_blocks_cloned(self):
with self._open_teststorage() as forig:
self.assertEqual(forig.bytes_sent, 0)
self.assertEqual(forig.bytes_received, 0)
with forig.clone_device() as f:
self.assertEqual(forig.bytes_sent, 0)
self.assertEqual(forig.bytes_received, 0)
self.assertEqual(f.bytes_sent, 0)
self.assertEqual(f.bytes_received, 0)
data = f.read_blocks(list(xrange(self._block_count)))
self.assertEqual(len(data), self._block_count)
for i, block in enumerate(data):
self.assertEqual(list(bytearray(block)),
list(self._blocks[i]))
data = f.read_blocks([0])
self.assertEqual(len(data), 1)
self.assertEqual(list(bytearray(data[0])),
list(self._blocks[0]))
self.assertEqual(len(self._blocks) > 1, True)
data = f.read_blocks(list(xrange(1, self._block_count)) + [0])
self.assertEqual(len(data), self._block_count)
for i, block in enumerate(data[:-1], 1):
self.assertEqual(list(bytearray(block)),
list(self._blocks[i]))
self.assertEqual(list(bytearray(data[-1])),
list(self._blocks[0]))
self.assertEqual(f.bytes_sent, 0)
self.assertEqual(f.bytes_received,
(2*self._block_count + 1)*self._block_size)
self.assertEqual(forig.bytes_sent, 0)
self.assertEqual(forig.bytes_received, 0)
def test_yield_blocks_cloned(self):
with self._open_teststorage() as forig:
self.assertEqual(forig.bytes_sent, 0)
self.assertEqual(forig.bytes_received, 0)
with forig.clone_device() as f:
self.assertEqual(forig.bytes_sent, 0)
self.assertEqual(forig.bytes_received, 0)
self.assertEqual(f.bytes_sent, 0)
self.assertEqual(f.bytes_received, 0)
data = list(f.yield_blocks(list(xrange(self._block_count))))
self.assertEqual(len(data), self._block_count)
for i, block in enumerate(data):
self.assertEqual(list(bytearray(block)),
list(self._blocks[i]))
data = list(f.yield_blocks([0]))
self.assertEqual(len(data), 1)
self.assertEqual(list(bytearray(data[0])),
list(self._blocks[0]))
self.assertEqual(len(self._blocks) > 1, True)
data = list(f.yield_blocks(list(xrange(1, self._block_count)) + [0]))
self.assertEqual(len(data), self._block_count)
for i, block in enumerate(data[:-1], 1):
self.assertEqual(list(bytearray(block)),
list(self._blocks[i]))
self.assertEqual(list(bytearray(data[-1])),
list(self._blocks[0]))
self.assertEqual(f.bytes_sent, 0)
self.assertEqual(f.bytes_received,
(2*self._block_count + 1)*self._block_size)
self.assertEqual(forig.bytes_sent, 0)
self.assertEqual(forig.bytes_received, 0)
def test_write_blocks_cloned(self):
data = [bytearray([self._block_count])*self._block_size
for i in xrange(self._block_count)]
with self._open_teststorage() as forig:
self.assertEqual(forig.bytes_sent, 0)
self.assertEqual(forig.bytes_received, 0)
with forig.clone_device() as f:
self.assertEqual(forig.bytes_sent, 0)
self.assertEqual(forig.bytes_received, 0)
self.assertEqual(f.bytes_sent, 0)
self.assertEqual(f.bytes_received, 0)
orig = f.read_blocks(list(xrange(self._block_count)))
self.assertEqual(len(orig), self._block_count)
for i, block in enumerate(orig):
self.assertEqual(list(bytearray(block)),
list(self._blocks[i]))
f.write_blocks(list(xrange(self._block_count)),
[bytes(b) for b in data])
new = f.read_blocks(list(xrange(self._block_count)))
self.assertEqual(len(new), self._block_count)
for i, block in enumerate(new):
self.assertEqual(list(bytearray(block)),
list(data[i]))
f.write_blocks(list(xrange(self._block_count)),
[bytes(b) for | |
"Only the owner can upload objects.")
return dict(
head=_generate_presigned_url(S3_HEAD_OBJECT, owner, blob_hash),
get=_generate_presigned_url(S3_GET_OBJECT, owner, blob_hash),
put=_generate_presigned_url(S3_PUT_OBJECT, owner, blob_hash),
)
def download_object_preview_impl(owner, obj_hash):
resp = s3_client.get_object(
Bucket=PACKAGE_BUCKET_NAME,
Key='%s/%s/%s' % (OBJ_DIR, owner, obj_hash),
Range='bytes=-%d' % MAX_PREVIEW_SIZE # Limit the size of the gzip'ed content.
)
body = resp['Body']
with gzip.GzipFile(fileobj=body, mode='rb') as fd:
data = fd.read(MAX_PREVIEW_SIZE)
return data.decode(errors='ignore') # Data may be truncated in the middle of a UTF-8 character.
def download_object_preview(owner, obj_hash):
try:
return download_object_preview_impl(owner, obj_hash)
except ClientError as ex:
_mp_track(
type="download_exception",
obj_owner=owner,
obj_hash=obj_hash,
error=str(ex),
)
if ex.response['ResponseMetadata']['HTTPStatusCode'] == requests.codes.not_found:
# The client somehow failed to upload the README.
raise ApiException(
requests.codes.forbidden,
"Failed to download the README; make sure it has been uploaded correctly."
)
else:
# Something unexpected happened.
raise
except OSError as ex:
# Failed to ungzip: either the contents is not actually gzipped,
# or the response was truncated because it was too big.
_mp_track(
type="download_exception",
obj_owner=owner,
obj_hash=obj_hash,
error=str(ex),
)
raise ApiException(
requests.codes.forbidden,
"Failed to ungzip the README; make sure it has been uploaded correctly."
)
@app.route('/api/package/<owner>/<package_name>/<package_hash>', methods=['PUT'])
@api(schema=PACKAGE_SCHEMA)
@as_json
def package_put(owner, package_name, package_hash):
# TODO: Write access for collaborators.
if g.auth.user != owner:
raise ApiException(requests.codes.forbidden,
"Only the package owner can push packages.")
if not VALID_NAME_RE.match(package_name):
raise ApiException(requests.codes.bad_request, "Invalid package name")
# TODO: Description.
data = json.loads(request.data.decode('utf-8'), object_hook=decode_node)
dry_run = data.get('dry_run', False)
public = data.get('is_public', data.get('public', False))
team = data.get('is_team', False)
contents = data['contents']
sizes = data.get('sizes', {})
if public and not ALLOW_ANONYMOUS_ACCESS:
raise ApiException(requests.codes.forbidden, "Public access not allowed")
if team and not ALLOW_TEAM_ACCESS:
raise ApiException(requests.codes.forbidden, "Team access not allowed")
if hash_contents(contents) != package_hash:
raise ApiException(requests.codes.bad_request, "Wrong contents hash")
all_hashes = set(find_object_hashes(contents))
# Old clients don't send sizes. But if sizes are present, make sure they match the hashes.
if sizes and set(sizes) != all_hashes:
raise ApiException(requests.codes.bad_request, "Sizes don't match the hashes")
# Insert a package if it doesn't already exist.
# TODO: Separate endpoint for just creating a package with no versions?
package = (
Package.query
.with_for_update()
.filter_by(owner=owner, name=package_name)
.one_or_none()
)
if package is None:
# Check for case-insensitive matches, and reject the push.
package_ci = (
Package.query
.filter(
sa.and_(
sa.func.lower(Package.owner) == sa.func.lower(owner),
sa.func.lower(Package.name) == sa.func.lower(package_name)
)
)
.one_or_none()
)
if package_ci is not None:
raise ApiException(
requests.codes.forbidden,
"Package already exists: %s/%s" % (package_ci.owner, package_ci.name)
)
if not public and not _private_packages_allowed():
raise ApiException(
requests.codes.payment_required,
("Insufficient permissions. Run `quilt push --public %s/%s` to make " +
"this package public, or upgrade your service plan to create " +
"private packages: https://quiltdata.com/profile.") %
(owner, package_name)
)
package = Package(owner=owner, name=package_name)
db.session.add(package)
owner_access = Access(package=package, user=owner)
db.session.add(owner_access)
if public:
public_access = Access(package=package, user=PUBLIC)
db.session.add(public_access)
if team:
team_access = Access(package=package, user=TEAM)
db.session.add(team_access)
else:
if public:
public_access = (
Access.query
.filter(sa.and_(
Access.package == package,
Access.user == PUBLIC
))
.one_or_none()
)
if public_access is None:
raise ApiException(
requests.codes.forbidden,
("%(user)s/%(pkg)s is private. To make it public, " +
"run `quilt access add %(user)s/%(pkg)s public`.") %
dict(user=owner, pkg=package_name)
)
if team:
team_access = (
Access.query
.filter(sa.and_(
Access.package == package,
Access.user == TEAM
))
.one_or_none()
)
if team_access is None:
raise ApiException(
requests.codes.forbidden,
("%(team)s:%(user)s/%(pkg)s is private. To share it with the team, " +
"run `quilt access add %(team)s:%(user)s/%(pkg)s team`.") %
dict(team=app.config['TEAM_ID'], user=owner, pkg=package_name)
)
# Insert an instance if it doesn't already exist.
instance = (
Instance.query
.with_for_update()
.filter_by(package=package, hash=package_hash)
.one_or_none()
)
# No more error checking at this point, so return from dry-run early.
if dry_run:
db.session.rollback()
# List of signed URLs is potentially huge, so stream it.
def _generate():
yield '{"upload_urls":{'
for idx, blob_hash in enumerate(all_hashes):
comma = ('' if idx == 0 else ',')
value = dict(
head=_generate_presigned_url(S3_HEAD_OBJECT, owner, blob_hash),
put=_generate_presigned_url(S3_PUT_OBJECT, owner, blob_hash)
)
yield '%s%s:%s' % (comma, json.dumps(blob_hash), json.dumps(value))
yield '}}'
return Response(_generate(), content_type='application/json')
keywords_tsv = keywords_tsvector(owner, package_name, contents)
if instance is None:
readme_hash = None
readme_preview = None
readme = contents.children.get(README)
if isinstance(readme, FileNode):
assert len(readme.hashes) == 1
readme_hash = readme.hashes[0]
# Download the README if necessary. We want to do this early, before we call
# with_for_update() on S3Blob, since it's potentially expensive.
have_readme = (
db.session.query(sa.func.count(S3Blob.id))
.filter_by(owner=owner, hash=readme_hash)
.filter(S3Blob.preview.isnot(None))
).one()[0] == 1
if not have_readme:
readme_preview = download_object_preview(owner, readme_hash)
# Add all the hashes that don't exist yet.
blobs = (
S3Blob.query
.with_for_update()
.filter(
sa.and_(
S3Blob.owner == owner,
S3Blob.hash.in_(all_hashes)
)
)
.all()
) if all_hashes else []
# Create the instance after querying the blobs - otherwise, SQLAlchemy
# will issue an INSERT followed by UPDATE instead of a single INSERT.
instance = Instance(
package=package,
contents=contents,
hash=package_hash,
created_by=g.auth.user,
updated_by=g.auth.user,
keywords_tsv=keywords_tsv,
)
blob_by_hash = { blob.hash: blob for blob in blobs }
for blob_hash in all_hashes:
blob_size = sizes.get(blob_hash)
blob = blob_by_hash.get(blob_hash)
if blob is None:
blob = S3Blob(owner=owner, hash=blob_hash, size=blob_size)
if blob_hash == readme_hash:
if readme_preview is not None:
# If we've just downloaded the README, save it in the blob.
# Otherwise, it was already set.
blob.preview = readme_preview
blob_preview_expr = readme_preview
else:
# README already exists in the DB; use a subquery to avoid fetching it
# only to insert it into the instance.
blob_preview_expr = sa.select([S3Blob.preview]).where(S3Blob.id == blob.id)
instance.readme_blob = blob
instance.blobs_tsv = sa.func.to_tsvector(FTS_LANGUAGE, blob_preview_expr)
instance.blobs.append(blob)
else:
# Just update the contents dictionary.
# Nothing else could've changed without invalidating the hash.
instance.contents = contents
instance.updated_at = sa.func.now()
instance.updated_by = g.auth.user
instance.keywords_tsv = keywords_tsv
db.session.add(instance)
# Insert a log.
log = Log(
package=package,
instance=instance,
author=owner,
)
db.session.add(log)
# Insert an event.
event = Event(
user=g.auth.user,
type=Event.Type.PUSH,
package_owner=owner,
package_name=package_name,
package_hash=package_hash,
extra=dict(
public=public
)
)
db.session.add(event)
db.session.commit()
_mp_track(
type="push",
package_owner=owner,
package_name=package_name,
public=public,
)
return dict(
package_url='%s/package/%s/%s' % (CATALOG_URL, owner, package_name)
)
@app.route('/api/package/<owner>/<package_name>/<package_hash>', methods=['GET'])
@api(require_login=False)
@as_json
def package_get(owner, package_name, package_hash):
subpath = request.args.get('subpath')
instance = _get_instance(g.auth, owner, package_name, package_hash)
assert isinstance(instance.contents, RootNode)
subnode = instance.contents
for component in subpath.split('/') if subpath else []:
try:
subnode = subnode.children[component]
except (AttributeError, KeyError):
raise ApiException(requests.codes.not_found, "Invalid subpath: %r" % component)
all_hashes = set(find_object_hashes(subnode))
blobs = (
S3Blob.query
.filter(
sa.and_(
S3Blob.owner == owner,
S3Blob.hash.in_(all_hashes)
)
)
.all()
) if all_hashes else []
urls = {
blob_hash: _generate_presigned_url(S3_GET_OBJECT, owner, blob_hash)
for blob_hash in all_hashes
}
# Insert an event.
event = Event(
user=g.auth.user,
type=Event.Type.INSTALL,
package_owner=owner,
package_name=package_name,
package_hash=package_hash,
extra=dict(
subpath=subpath
)
)
db.session.add(event)
db.session.commit()
_mp_track(
type="install",
package_owner=owner,
package_name=package_name,
subpath=subpath,
)
return dict(
contents=instance.contents,
urls=urls,
sizes={blob.hash: blob.size for blob in blobs},
created_by=instance.created_by,
created_at=instance.created_at.timestamp(),
updated_by=instance.updated_by,
updated_at=instance.updated_at.timestamp(),
)
def _generate_preview(node, max_depth=PREVIEW_MAX_DEPTH):
if isinstance(node, GroupNode):
max_children = PREVIEW_MAX_CHILDREN if max_depth else 0
children_preview = [
(name, _generate_preview(child, max_depth - 1))
for name, child in sorted(node.children.items())[:max_children]
]
if len(node.children) > max_children:
children_preview.append(('...', None))
return children_preview
else:
return None
def _iterate_data_nodes(node):
# TODO: Merge into core.py
if isinstance(node, (TableNode, FileNode)):
yield node
elif isinstance(node, GroupNode):
for child in node.children.values():
yield from _iterate_data_nodes(child)
@app.route('/api/package_preview/<owner>/<package_name>/<package_hash>', methods=['GET'])
@api(require_login=False)
@as_json
def package_preview(owner, package_name, package_hash):
result = (
db.session.query(
Instance,
sa.func.bool_or(Access.user == PUBLIC).label('is_public'),
sa.func.bool_or(Access.user == TEAM).label('is_team')
)
.filter_by(hash=package_hash)
.join(Instance.package)
.filter_by(owner=owner, name=package_name)
.join(Package.access)
.filter(_access_filter(g.auth))
.group_by(Package.id, Instance.id)
.one_or_none()
)
if result is None:
raise ApiException(
requests.codes.not_found,
"Package hash does not exist"
)
(instance, is_public, is_team) = result
assert isinstance(instance.contents, RootNode)
log_count = (
db.session.query(
sa.func.count(Log.package_id)
)
.filter(Log.package_id == instance.package_id)
).one()
readme = instance.contents.children.get(README)
if isinstance(readme, FileNode):
assert len(readme.hashes) == 1
readme_hash = readme.hashes[0]
readme_url = _generate_presigned_url(S3_GET_OBJECT, owner, readme_hash)
readme_blob = (
S3Blob.query
.filter_by(owner=owner, hash=readme_hash)
.options(undefer('preview'))
.one_or_none() # Should be one() once READMEs are backfilled.
)
readme_preview = readme_blob.preview if readme_blob is not None else None
else:
readme_url = None
readme_preview = None
contents_preview = _generate_preview(instance.contents)
total_size = int((
db.session.query(sa.func.coalesce(sa.func.sum(S3Blob.size), 0))
# We could do a join on S3Blob.instances - but that results in two joins instead of one.
# So do a completely manual join to make it efficient.
.join(InstanceBlobAssoc, sa.and_(
InstanceBlobAssoc.c.blob_id == S3Blob.id,
InstanceBlobAssoc.c.instance_id == instance.id
))
).one()[0])
file_types = defaultdict(int)
for node in _iterate_data_nodes(instance.contents):
path = node.metadata.get('q_path')
if not isinstance(path, str):
path = ''
# We don't know if it's a UNIX or a Windows path, so let's treat both \ and / as separators.
# PureWindowsPath will do that for us, since / is legal on Windows.
ext = pathlib.PureWindowsPath(path).suffix.lower()
| |
<filename>pymedphys/labs/pedromartinez/qc-graticule.py
#############################START LICENSE##########################################
# Copyright (C) 2019 <NAME>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#############################END LICENSE##########################################
###########################################################################################
#
# Script name: qc-graticule
#
# Description: Tool for calculating graticule centre at different gantry angles.
#
# Example usage: python qc-graticule "/folder/"
#
# The folder can contain:
# 1/2 image(s) at g=0
# 1/2 image(s) at g=90
# 1/2 image(s) at g=180
# 1/2 image(s) at g=270
#
# Author: <NAME>
# <EMAIL>
# 5877000722
# Date:2020-05-12
#
###########################################################################################
import argparse
import os
import sys
from datetime import datetime
from operator import itemgetter
from sys import platform
from tqdm import tqdm
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.backends.backend_pdf import PdfPages
from PIL import Image
from skimage.feature import blob_log
import pydicom
from pymedphys.labs.pedromartinez.utils import utils as u
def running_mean(x, N):
out = np.zeros_like(x, dtype=np.float64)
dim_len = x.shape[0]
for i in range(dim_len):
if N % 2 == 0:
a, b = i - (N - 1) // 2, i + (N - 1) // 2 + 2
else:
a, b = i - (N - 1) // 2, i + (N - 1) // 2 + 1
# cap indices to min and max indices
a = max(0, a)
b = min(dim_len, b)
out[i] = np.mean(x[a:b])
return out
# axial visualization and scrolling of the center points
def viewer(volume, dx, dy, center, title, textstr):
print("center=", center)
# remove_keymap_conflicts({'j', 'k'})
fig = plt.figure(figsize=(12, 7))
ax = fig.subplots()
ax.volume = volume
width = volume.shape[1]
height = volume.shape[0]
extent = (0, 0 + (volume.shape[1] * dx), 0, 0 + (volume.shape[0] * dy))
img = ax.imshow(volume, extent=extent)
# img=ax.imshow(volume)
ax.set_xlabel("x distance [mm]")
ax.set_ylabel("y distance [mm]")
# ax.set_xlabel('x pixel')
# ax.set_ylabel('y pixel')
ax.set_xlim(width * dx / 2 - 10, width * dx / 2 + 10)
ax.set_ylim(height * dy / 2 - 10, height * dy / 2 + 10)
# fig.suptitle('Image', fontsize=16)
print(title[0])
ax.set_title(title[0] + "\n" + title[1], fontsize=16)
ax.text((volume.shape[1] + 250) * dx, (volume.shape[0]) * dy, textstr)
fig.subplots_adjust(right=0.75)
fig.colorbar(img, ax=ax, orientation="vertical")
# fig.canvas.mpl_connect('key_press_event', process_key_axial)
return fig, ax
def scalingAnalysis(ArrayDicom_o, dx, dy): # determine scaling
ArrayDicom = u.norm01(ArrayDicom_o)
blobs_log = blob_log(
ArrayDicom, min_sigma=1, max_sigma=5, num_sigma=20, threshold=0.15
) # run on windows, for some stupid reason exclude_border is not recognized in my distro at home
point_det = []
for blob in blobs_log:
y, x, r = blob
point_det.append((x, y, r))
point_det = sorted(
point_det, key=itemgetter(2), reverse=True
) # here we sort by the radius of the dot bigger dots are around the center and edges
point_det = np.asarray(point_det)
# now we need to select the most extreme left and right point
print(np.shape(ArrayDicom)[0] // 2)
print(abs(point_det[:6, 1] - np.shape(ArrayDicom)[0] // 2) < 10)
point_sel = []
for i in range(0, 6):
if abs(point_det[i, 1] - np.shape(ArrayDicom)[0] // 2) < 10:
point_sel.append(abs(point_det[i, :]))
point_sel = np.asarray(point_sel)
imax = np.argmax(point_sel[:, 0])
imin = np.argmin(point_sel[:, 0])
print(point_sel[imax, :], point_sel[imin, :])
distance = (
np.sqrt(
(point_sel[imax, 0] - point_sel[imin, 0])
* (point_sel[imax, 0] - point_sel[imin, 0])
* dx
* dx
+ (point_sel[imax, 1] - point_sel[imin, 1])
* (point_sel[imax, 1] - point_sel[imin, 1])
* dy
* dy
)
/ 10.0
)
print("distance=", distance, "cm") # distance is reported in cm
# plotting the figure of scaling results
fig = plt.figure(figsize=(12, 7))
ax = fig.subplots()
ax.volume = ArrayDicom_o
width = ArrayDicom_o.shape[1]
height = ArrayDicom_o.shape[0]
extent = (0, 0 + (width * dx), 0, 0 + (height * dy))
img = ax.imshow(ArrayDicom_o, extent=extent, origin="lower")
# img = ax.imshow(ArrayDicom_o)
ax.set_xlabel("x distance [mm]")
ax.set_ylabel("y distance [mm]")
ax.scatter(point_sel[imax, 0] * dx, point_sel[imax, 1] * dy)
ax.scatter(point_sel[imin, 0] * dx, point_sel[imin, 1] * dy)
fig.colorbar(img, ax=ax, orientation="vertical")
# adding a horizontal arrow
ax.annotate(
s="",
xy=(point_sel[imax, 0] * dx, point_sel[imax, 1] * dy),
xytext=(point_sel[imin, 0] * dx, point_sel[imin, 1] * dy),
arrowprops=dict(arrowstyle="<->", color="r"),
) # example on how to plot a double headed arrow
ax.text(
(width // 2.8) * dx,
(height // 2 + 10) * dy,
"Distance=" + str(round(distance, 4)) + " cm",
rotation=0,
fontsize=14,
color="r",
)
return distance, fig
def full_imageProcess(ArrayDicom_o, dx, dy, title): # process a full image
ArrayDicom = u.norm01(ArrayDicom_o)
height = np.shape(ArrayDicom)[0]
width = np.shape(ArrayDicom)[1]
blobs_log = blob_log(
ArrayDicom, min_sigma=1, max_sigma=5, num_sigma=20, threshold=0.15
) # run on windows, for some stupid reason exclude_border is not recognized in my distro at home
center = []
point_det = []
for blob in blobs_log:
y, x, r = blob
point_det.append((x, y, r))
point_det = sorted(
point_det, key=itemgetter(2), reverse=True
) # here we sort by the radius of the dot bigger dots are around the center and edges
# we need to find the centre dot as well as the larger dots on the sides of the image
# for j in range(0, len(point_det)):
# x, y, r = point_det[j]
# center.append((int(round(x)), int(round(y))))
# now that we have detected the centre we are going to increase the precision of the detected point
im_centre = Image.fromarray(
255
* ArrayDicom[
height // 2 - 20 : height // 2 + 20, width // 2 - 20 : width // 2 + 20
]
)
im_centre = im_centre.resize(
(im_centre.width * 10, im_centre.height * 10), Image.LANCZOS
)
xdet_int, ydet_int = point_detect_singleImage(im_centre)
xdet = int(width // 2 - 20) + xdet_int / 10
ydet = int(height // 2 - 20) + ydet_int / 10
center.append((xdet, ydet))
textstr = ""
print("center=", center)
fig, ax = viewer(u.range_invert(ArrayDicom_o), dx, dy, center, title, textstr)
return fig, ax, center
def full_imageProcess_noGraph(ArrayDicom_o): # process a full image
ArrayDicom = u.norm01(ArrayDicom_o)
height = np.shape(ArrayDicom)[0]
width = np.shape(ArrayDicom)[1]
blobs_log = blob_log(
ArrayDicom, min_sigma=1, max_sigma=5, num_sigma=20, threshold=0.15
) # run on windows, for some stupid reason exclude_border is not recognized in my distro at home
center = []
point_det = []
for blob in blobs_log:
y, x, r = blob
point_det.append((x, y, r))
point_det = sorted(
point_det, key=itemgetter(2), reverse=True
) # here we sort by the radius of the dot bigger dots are around the center and edges
# we need to find the centre dot as well as the larger dots on the sides of the image
# for j in range(0, len(point_det)):
# x, y, r = point_det[j]
# center.append((int(round(x)), int(round(y))))
# now that we have detected the centre we are going to increase the precision of the detected point
im_centre = Image.fromarray(
255
* ArrayDicom[
height // 2 - 20 : height // 2 + 20, width // 2 - 20 : width // 2 + 20
]
)
im_centre = im_centre.resize(
(im_centre.width * 10, im_centre.height * 10), Image.LANCZOS
)
xdet_int, ydet_int = point_detect_singleImage(im_centre)
xdet = int(width // 2 - 20) + xdet_int / 10
ydet = int(height // 2 - 20) + ydet_int / 10
center.append((xdet, ydet))
# fig, ax=viewer(u.range_invert(ArrayDicom_o), dx, dy, center, title, textstr)
return center
def point_detect_singleImage(imcirclist):
detCenterXRegion = []
detCenterYRegion = []
print("Finding bibs in phantom...")
grey_img = np.array(imcirclist, dtype=np.uint8) # converting the image to grayscale
blobs_log = blob_log(
grey_img, min_sigma=15, max_sigma=50, num_sigma=10, threshold=0.05
)
centerXRegion = []
centerYRegion = []
centerRRegion = []
grey_ampRegion = []
for blob in blobs_log:
y, x, r = blob
# center = (int(x), int(y))
centerXRegion.append(x)
centerYRegion.append(y)
centerRRegion.append(r)
grey_ampRegion.append(grey_img[int(y), int(x)])
xindx = int(centerXRegion[np.argmin(grey_ampRegion)])
yindx = int(centerYRegion[np.argmin(grey_ampRegion)])
# rindx = int(centerRRegion[np.argmin(grey_ampRegion)])
detCenterXRegion = xindx
detCenterYRegion = yindx
return detCenterXRegion, detCenterYRegion
# def read_dicom(filename1,filename2,ioption):
def read_dicom(directory):
now = datetime.now()
for subdir, dirs, files in os.walk(directory): # pylint: disable = unused-variable
dirs.clear()
list_title = []
list_gantry_angle = []
list_collimator_angle = []
list_figs = []
center = []
center_g0 = [(0, 0)]
center_g90 = [(0, 0)]
center_g180 = [(0, 0)]
center_g270 = [(0, 0)]
dx = 0
dy = 0
k = 0 # we callect all the images in ArrayDicom
for file in tqdm(sorted(files)):
| |
return ["\v"]
elif c == "a":
return ["\a"]
elif c == "b":
return ["\b"]
elif c == "e":
return ["\x1b"]
elif c == "s":
return [" "]
elif c in "\r\n":
self.newline(c)
return ["\n"]
elif c == "u":
ch = self.peek()
brace_seen = (ch == "{")
if brace_seen:
self.read()
return self.read_utf_escape(brace_seen=brace_seen, character_escape=character_escape)
elif c == "x":
hex_escape = self.read()
if hex_escape not in string.hexdigits:
self.error()
if self.peek() in string.hexdigits:
hex_escape += self.read()
return [chr(int(hex_escape, 16))]
elif c in string.octdigits:
buf = c
octal = True
while self.peek() in string.digits:
ch2 = self.read()
if ch2 in string.octdigits:
buf += ch2
elif character_escape:
self.error()
else:
octal = False
buf += ch2
if len(buf) > 3 and character_escape:
self.error()
if octal:
codepoint = int(buf, 8) & 255
return [chr(codepoint)]
else:
buf = "0" * (len(buf) - 3) + buf
prefix_idx = 3
for i in xrange(3):
if buf[i] not in string.octdigits:
prefix_idx = i
break
codepoint = int(buf[:prefix_idx], 8) & 255
unicode_chars = [chr(codepoint)]
unicode_chars += buf[prefix_idx:]
return unicode_chars
elif c == "M":
if self.read() != "-":
self.error()
c = self.read()
if c == "\\":
c = self.read_escape()
if len(c) != 1:
self.error()
return [chr(ord(c[0]) | 0x80)]
elif c == self.EOF:
self.error()
else:
return [chr(ord(c) & 0xff | 0x80)]
elif c == "C" or c == "c":
if c == "C" and self.read() != "-":
self.error()
c = self.read()
if c == "?":
return ["\177"]
elif c == self.EOF:
self.error()
else:
if c == "\\":
c = self.read_escape()
if len(c) != 1:
self.error()
[c] = c
return [chr(ord(c) & 0x9f)]
return [c]
def read_utf_escape(self, brace_seen=False, character_escape=False):
if not brace_seen:
utf_escape = []
for i in xrange(4):
ch = self.read()
if ch not in string.hexdigits:
self.error("invalid Unicode escape")
utf_escape.append(ch)
return self.encode_utf_escape(utf_escape)
elif character_escape:
ch = self.read()
if ch not in string.hexdigits:
self.error("invalid Unicode escape")
res = self.read_delimited_utf_escape(ch)
ch = self.read()
if ch != "}":
self.error("unterminated Unicode escape")
return res
else:
chars = []
ch = self.read()
while ch in string.hexdigits:
chars += self.read_delimited_utf_escape(ch)
ch = self.read()
if ch.isspace():
ch = self.read()
else:
break
if not chars:
self.error("invalid Unicode escape")
if ch != "}":
self.error("unterminated Unicode escape")
return chars
def read_delimited_utf_escape(self, ch):
utf_escape = [ch]
ch = self.read()
while ch in string.hexdigits:
utf_escape.append(ch)
ch = self.read()
self.unread()
return self.encode_utf_escape(utf_escape)
def encode_utf_escape(self, utf_escape):
utf_codepoint = int("".join(utf_escape), 16)
if utf_codepoint > 0x101111:
self.error("invalid Unicode codepoint (too large)")
return [c for c in unicode_encode_utf_8(unichr(utf_codepoint), 1, "ignore")]
def colon(self, ch, space_seen):
ch2 = self.read()
self.add(ch)
if ch2 == ":":
self.add(ch2)
if self.is_beg() or self.state == self.EXPR_CLASS or (self.is_arg and space_seen):
self.state = self.EXPR_BEG
yield self.emit("COLON3")
else:
self.state = self.EXPR_DOT
yield self.emit("COLON2")
elif self.is_end() or ch2.isspace():
self.unread()
self.state = self.EXPR_BEG
yield self.emit("LITERAL_COLON")
else:
if ch2 == "'":
self.str_term = StringTerm(self, "\0", ch2, expand=False)
elif ch2 == '"':
self.str_term = StringTerm(self, "\0", ch2)
else:
self.unread()
self.state = self.EXPR_FNAME
yield self.emit("SYMBEG")
def left_paren(self, ch, space_seen):
self.add(ch)
tok_name = "LPAREN2"
if self.is_beg():
tok_name = "LPAREN"
elif space_seen:
if self.is_arg():
tok_name = "LPAREN_ARG"
self.paren_nest += 1
self.condition_state.stop()
self.cmd_argument_state.stop()
self.state = self.EXPR_BEG
self.label_state = self.EXPR_LABEL
yield self.emit(tok_name)
def right_paren(self, ch):
self.paren_nest -= 1
self.condition_state.restart()
self.cmd_argument_state.restart()
self.state = self.EXPR_ENDFN
yield self.emit("RPAREN")
def left_bracket(self, ch, space_seen):
self.paren_nest += 1
if self.state in [self.EXPR_FNAME, self.EXPR_DOT]: # IS_AFTER_OPERATOR
self.state = self.EXPR_ARG
ch2 = self.read()
if ch2 == "]":
self.add(ch)
self.add(ch2)
ch3 = self.read()
if ch3 == "=":
self.add(ch3)
yield self.emit("ASET")
else:
self.unread()
yield self.emit("AREF")
else:
self.unread()
self.label_state = self.EXPR_LABEL
yield self.emit("LITERAL_LBRACKET")
else:
if (self.is_beg() or (self.is_arg() and space_seen)):
tok = "LBRACK"
else:
tok = "LITERAL_LBRACKET"
self.state = self.EXPR_BEG
self.label_state = self.EXPR_LABEL
self.condition_state.stop()
self.cmd_argument_state.stop()
yield self.emit(tok)
def right_bracket(self, ch):
self.add(ch)
self.paren_nest -= 1
self.condition_state.restart()
self.cmd_argument_state.restart()
self.state = self.EXPR_ENDARG
yield self.emit("RBRACK")
def left_brace(self, ch):
self.add(ch)
if self.left_paren_begin > 0 and self.left_paren_begin == self.paren_nest:
self.state = self.EXPR_BEG
self.left_paren_begin = 0
self.paren_nest -= 1
self.condition_state.stop()
self.cmd_argument_state.stop()
yield self.emit("LAMBEG")
else:
if self.label_state == self.EXPR_LABELED:
tok = "LBRACE"
elif self.is_arg() or self.state in [self.EXPR_END, self.EXPR_ENDFN]:
tok = "LCURLY"
self.command_start = True
elif self.state == self.EXPR_ENDARG:
tok = "LBRACE_ARG"
self.command_start = True
else:
tok = "LBRACE"
self.condition_state.stop()
self.cmd_argument_state.stop()
self.state = self.EXPR_BEG
if tok != "LBRACE_ARG":
self.label_state = self.EXPR_LABEL
yield self.emit(tok)
def right_brace(self, ch):
self.add(ch)
self.condition_state.restart()
self.cmd_argument_state.restart()
self.state = self.EXPR_ENDARG
yield self.emit("RCURLY")
def backtick(self, ch, command_state):
self.add(ch)
if self.state == self.EXPR_FNAME:
self.state = self.EXPR_ENDFN
yield self.emit("BACK_REF2")
elif self.state == self.EXPR_DOT:
self.state = self.EXPR_CMDARG if command_state else self.EXPR_ARG
yield self.emit("BACK_REF2")
else:
self.str_term = StringTerm(self, "\0", "`")
yield self.emit("XSTRING_BEG")
def percent(self, ch, space_seen):
c = self.read()
if self.is_beg():
for token in self.quote(c):
yield token
elif c == "=":
self.add(ch)
self.add(c)
self.state = self.EXPR_BEG
yield self.emit("OP_ASGN")
elif self.is_arg() and space_seen and not c.isspace():
for token in self.quote(c):
yield token
else:
self.unread()
self.add(ch)
self.set_expression_state()
yield self.emit("PERCENT")
def quote(self, ch):
if not ch.isalnum():
begin = ch
ch = "Q"
else:
begin = self.read()
if begin.isalnum():
self.error()
if begin == "(":
end = ")"
elif begin == "[":
end = "]"
elif begin == "{":
end = "}"
elif begin == "<":
end = ">"
else:
end = begin
begin = "\0"
if ch == "Q":
self.str_term = StringTerm(self, begin, end)
yield self.emit("STRING_BEG")
elif ch == "q":
self.str_term = StringTerm(self, begin, end, expand=False)
yield self.emit("STRING_BEG")
elif ch == "x":
self.str_term = StringTerm(self, begin, end)
yield self.emit("XSTRING_BEG")
elif ch == "W":
self.str_term = StringTerm(self, begin, end, expand=True, is_qwords=True)
while True:
ch = self.read()
if not ch.isspace():
break
self.unread()
yield self.emit("WORDS_BEG")
elif ch == "w":
self.str_term = StringTerm(self, begin, end, expand=False, is_qwords=True)
while True:
ch = self.read()
if not ch.isspace():
break
self.unread()
yield self.emit("QWORDS_BEG")
elif ch == "I":
self.str_term = StringTerm(self, begin, end, expand=True, is_qwords=True)
while True:
ch = self.read()
if not ch.isspace():
break
self.unread()
yield self.emit("SYMBOLS_BEG")
elif ch == "i":
self.str_term = StringTerm(self, begin, end, expand=False, is_qwords=True)
while True:
ch = self.read()
if not ch.isspace():
break
self.unread()
yield self.emit("QSYMBOLS_BEG")
elif ch == "r":
self.str_term = StringTerm(self, begin, end, is_regexp=True)
yield self.emit("REGEXP_BEG")
elif ch == "s":
self.str_term = StringTerm(self, begin, end, expand=False)
self.state = self.EXPR_FNAME
yield self.emit("SYMBEG")
else:
raise NotImplementedError("%" + ch)
class BaseStringTerm(object):
def __init__(self, lexer, expand):
self.lexer = lexer
self.expand = expand
self.is_end = False
class StringTerm(BaseStringTerm):
def __init__(self, lexer, begin, end_char, expand=True, is_regexp=False, is_qwords=False):
BaseStringTerm.__init__(self, lexer, expand=expand)
self.begin = begin
self.end_char = end_char
self.is_regexp = is_regexp
self.is_qwords = is_qwords
self.nest = 0
def next(self):
if self.is_end:
return self.lexer.emit("STRING_END")
ch = self.lexer.read()
if ch == self.lexer.EOF:
self.lexer.error()
space_seen = False
if self.is_qwords and ch.isspace():
while ch.isspace():
if ch in "\r\n":
self.lexer.newline(ch)
ch = self.lexer.read()
space_seen = True
if ch == self.end_char and self.nest == 0:
return self.end_found()
if space_seen:
self.lexer.unread()
return self.lexer.emit("LITERAL_SPACE")
if self.expand and ch == "#":
self.lexer.add(ch)
ch = self.lexer.read()
if ch in ["$", "@"]:
self.lexer.unread()
return self.lexer.emit("STRING_DVAR")
elif ch == "{":
self.lexer.add(ch)
return self.lexer.emit("STRING_DBEG")
self.lexer.unread()
while True:
ch = self.lexer.read()
if ch == self.lexer.EOF:
break
if self.begin != "\0" and ch == self.begin:
self.lexer.add(ch)
self.nest += 1
elif ch == self.end_char:
if self.nest == 0:
self.lexer.unread()
break
self.lexer.add(ch)
self.nest -= 1
elif self.expand and ch == "#" and self.lexer.peek() not in "\r\n":
ch2 = self.lexer.read()
if ch2 in ["$", "@", "{"]:
self.lexer.unread()
self.lexer.unread()
break
self.lexer.add(ch)
self.lexer.unread()
elif ch == "\\":
escaped_char = self.lexer.read_escape()
if (self.is_regexp and len(escaped_char) == 1 and
escaped_char[0] in string.printable):
self.lexer.add(ch)
self.lexer.add(escaped_char[0])
else:
for ch in escaped_char:
self.lexer.add(ch)
elif self.is_qwords and ch.isspace():
self.lexer.unread()
break
elif ch == self.lexer.EOF:
self.lexer.error()
else:
self.lexer.add(ch)
return self.lexer.emit("STRING_CONTENT")
def end_found(self):
if self.is_qwords:
self.is_end = True
return self.lexer.emit("LITERAL_SPACE")
if self.is_regexp:
flags = ""
while True:
ch = self.lexer.read()
if ch == self.lexer.EOF or not ch.isalpha():
self.lexer.unread()
break
elif ch in "ixmouesn":
if ch not in flags:
flags += ch
self.lexer.add(ch)
else:
self.lexer.error()
return self.lexer.emit("REGEXP_END")
return self.lexer.emit("STRING_END")
class HeredocTerm(BaseStringTerm):
def __init__(self, lexer, marker, last_line, indent, expand=True):
BaseStringTerm.__init__(self, lexer, expand=expand)
self.marker = marker
self.last_line = last_line
self.indent = indent
self.start_of_line = True
def next(self):
if self.is_end:
if self.last_line:
# TODO: there should be a real API for this.
| |
#!/usr/bin/env python
import responses
import requests
import pytest
import time
import copy
from unittest.mock import Mock, patch
from freezegun import freeze_time
from owlet_api.owletapi import OwletAPI
from owlet_api.owlet import Owlet
from owlet_api.owletexceptions import OwletPermanentCommunicationException
from owlet_api.owletexceptions import OwletTemporaryCommunicationException
from owlet_api.owletexceptions import OwletNotInitializedException
LOGIN_PAYLOAD = {
'access_token': '<PASSWORD>',
'expires_in': 86400
}
DEVICE_PAYLOAD = {
'product_name': 'a',
'model': 'b',
'dsn': 'c',
'oem_model': 'd',
'sw_version': 'e',
'template_id': 1,
'mac': 'g',
'unique_hardware_id': None,
'hwsig': 'h',
'lan_ip': 'i',
'connected_at': 'j',
'key': 1,
'lan_enabled': False,
'has_properties': True,
'product_class': None,
'connection_status': 'k',
'lat': '1.0',
'lng': '2.0',
'locality': 'l',
'device_type': 'm'
}
DEVICE_ATTRIBUTES = [
{
'property':{
'type':'Property',
'name':'AGE_MONTHS_OLD',
'base_type':'integer',
'read_only':False,
'direction':'input',
'scope':'user',
'data_updated_at':'2018-12-30T09:43:23Z',
'key':42738116,
'device_key':24826059,
'product_name':'Owlet Baby Monitors',
'track_only_changes':True,
'display_name':'Age (Months)',
'host_sw_version':False,
'time_series':False,
'derived':False,
'app_type':None,
'recipe':None,
'value':None,
'denied_roles':[
],
'ack_enabled':False,
'retention_days':30
}
},
{
'property':{
'type':'Property',
'name':'ALRTS_DISABLED',
'base_type':'boolean',
'read_only':False,
'direction':'input',
'scope':'user',
'data_updated_at':'2018-12-30T09:43:23Z',
'key':42738165,
'device_key':24826059,
'product_name':'Owlet Baby Monitors',
'track_only_changes':True,
'display_name':'Disable Alerts',
'host_sw_version':False,
'time_series':False,
'derived':False,
'app_type':None,
'recipe':None,
'value':None,
'denied_roles':[
],
'ack_enabled':False,
'retention_days':30
}
},{
'property':{
'type':'Property',
'name':'APP_ACTIVE',
'base_type':'boolean',
'read_only':False,
'direction':'input',
'scope':'user',
'data_updated_at':'2018-12-30T09:43:23Z',
'key':42738119,
'device_key':24826059,
'product_name':'Owlet Baby Monitors',
'track_only_changes':False,
'display_name':'App Active',
'host_sw_version':False,
'time_series':False,
'derived':False,
'app_type':None,
'recipe':None,
'value':0,
'denied_roles':[
],
'ack_enabled':False,
'retention_days':30,
'ack_status':None,
'ack_message':None,
'acked_at':None
}
},{
'property':{
'type':'Property',
'name':'LOGGED_DATA_CACHE',
'base_type':'boolean',
'read_only':False,
'direction':'input',
'scope':'user',
'data_updated_at':'2018-12-30T09:43:23Z',
'key':42738119,
'device_key':24826059,
'product_name':'Owlet Baby Monitors',
'track_only_changes':False,
'display_name':'App Active',
'host_sw_version':False,
'time_series':False,
'derived':False,
'app_type':None,
'recipe':None,
'value':'http://de.mo/file',
'denied_roles':[
],
'ack_enabled':False,
'retention_days':30,
'ack_status':None,
'ack_message':None,
'acked_at':None
}
}
]
DOWNLOAD_DATA = {
'datapoint':{
'updated_at':'2018-05-09T10:41:00Z',
'created_at':'2018-05-09T10:41:00Z',
'echo':False,
'closed':True,
'metadata':{
},
'value':'https://ads-field.aylanetworks.com/apiv1/devices/24826059/properties/LOGGED_DATA_CACHE/datapoints/76ce9810-5375-11e8-e7a5-6450803806ca.json',
'created_at_from_device':None,
'file':'https://ayla-device-field-production-1a2039d9.s3.amazonaws.com/X?AWSAccessKeyId=Y&Expires=1234&Signature=Z'
}
}
@responses.activate
def test_owlet_ok():
# Initialize OwletAPI
api = OwletAPI()
# Instantiate the device
device = Owlet(api, DEVICE_PAYLOAD)
assert device.product_name == "a"
assert device.model == "b"
assert device.dsn == "c"
assert device.sw_version == "e"
assert device.mac == "g"
assert device.hwsig == "h"
assert device.lan_ip == "i"
assert device.connected_at == "j"
assert device.connection_status == "k"
assert device.lat == 1.0
assert device.lon == 2.0
assert device.device_type == "m"
# and so on and so forth
@responses.activate
def test_update_ok():
responses.add(responses.POST, 'https://user-field.aylanetworks.com/users/sign_in.json',
json=LOGIN_PAYLOAD, status=200)
# Owlet will pull the properties of this particular device
responses.add(responses.GET, 'https://ads-field.aylanetworks.com/apiv1/dsns/c/properties',
json=DEVICE_ATTRIBUTES, status=200)
# Initialize OwletAPI
api = OwletAPI("<EMAIL>", "moped")
api.login()
# Instantiate the device
device = Owlet(api, DEVICE_PAYLOAD)
# Update the decice
device.update()
assert device.get_property('AGE_MONTHS_OLD').value == None
assert device.get_property('ALRTS_DISABLED').value == None
assert device.get_property('APP_ACTIVE').value == 0
@responses.activate
def test_update_noresponse():
responses.add(responses.POST, 'https://user-field.aylanetworks.com/users/sign_in.json',
json=LOGIN_PAYLOAD, status=200)
# Initialize OwletAPI
api = OwletAPI("<EMAIL>", "moped")
api.login()
# Instantiate the device
device = Owlet(api, DEVICE_PAYLOAD)
# Update the decice
with pytest.raises(OwletTemporaryCommunicationException) as info:
device.update()
assert 'Server Request failed - no response' in str(info.value)
@responses.activate
def test_update_return_code():
# Owlet will pull the properties of this particular device
responses.add(responses.GET, 'https://ads-field.aylanetworks.com/apiv1/dsns/c/properties',
json=DEVICE_ATTRIBUTES, status=500)
responses.add(responses.POST, 'https://user-field.aylanetworks.com/users/sign_in.json',
json=LOGIN_PAYLOAD, status=200)
# Initialize OwletAPI
api = OwletAPI("<EMAIL>", "moped")
api.login()
# Instantiate the device
device = Owlet(api, DEVICE_PAYLOAD)
# Update the decice
with pytest.raises(OwletTemporaryCommunicationException) as info:
device.update()
assert 'Server Request failed - status code' in str(info.value)
@responses.activate
def test_update_invalid_json():
# Owlet will pull the properties of this particular device
responses.add(responses.GET, 'https://ads-field.aylanetworks.com/apiv1/dsns/c/properties',
body="INVALID", status=200)
responses.add(responses.POST, 'https://user-field.aylanetworks.com/users/sign_in.json',
json=LOGIN_PAYLOAD, status=200)
# Initialize OwletAPI
api = OwletAPI("<EMAIL>", "moped")
api.login()
# Instantiate the device
device = Owlet(api, DEVICE_PAYLOAD)
# Update the decice
with pytest.raises(OwletTemporaryCommunicationException) as info:
device.update()
assert 'Update failed - JSON error' in str(info.value)
@responses.activate
def test_update_repeat():
my_device_attributes = copy.deepcopy(DEVICE_ATTRIBUTES)
my_device_attributes[0]['property']['value'] = 'DEADBEEF'
my_device_attributes[0]['property']['data_updated_at'] = '2018-12-30T09:43:28Z'
# Owlet will pull the properties of this particular device
responses.add(responses.GET, 'https://ads-field.aylanetworks.com/apiv1/dsns/c/properties',
json=DEVICE_ATTRIBUTES, status=200)
responses.add(responses.GET, 'https://ads-field.aylanetworks.com/apiv1/dsns/c/properties',
json=my_device_attributes, status=200)
responses.add(responses.POST, 'https://user-field.aylanetworks.com/users/sign_in.json',
json=LOGIN_PAYLOAD, status=200)
# Initialize OwletAPI
api = OwletAPI("<EMAIL>", "moped")
api.login()
# Instantiate the device
device = Owlet(api, DEVICE_PAYLOAD)
# Update the decice
device.update()
device.update()
assert device.get_property('AGE_MONTHS_OLD').value == 'DEADBEEF'
assert device.get_property('DOES_NOT_EXIST') == None
properties = device.get_properties()
assert properties['AGE_MONTHS_OLD'].value == 'DEADBEEF'
assert device.get_update_interval() == 5
@responses.activate
def test_reactivate_ok():
responses.add(responses.POST, 'https://user-field.aylanetworks.com/users/sign_in.json',
json=LOGIN_PAYLOAD, status=200)
responses.add(responses.GET, 'https://ads-field.aylanetworks.com/apiv1/dsns/c/properties',
json=DEVICE_ATTRIBUTES, status=200)
responses.add(responses.POST, 'https://ads-field.aylanetworks.com/apiv1/properties/42738119/datapoints',
status=201)
# Initialize OwletAPI
api = OwletAPI("<EMAIL>", "moped")
api.login()
# Instantiate the device
device = Owlet(api, DEVICE_PAYLOAD)
# Update the decice
device.update()
device.reactivate()
@responses.activate
def test_reactivate_fail_noattributes():
responses.add(responses.POST, 'https://user-field.aylanetworks.com/users/sign_in.json',
json=LOGIN_PAYLOAD, status=200)
# Initialize OwletAPI
api = OwletAPI("<EMAIL>", "moped")
api.login()
# Instantiate the device
device = Owlet(api, DEVICE_PAYLOAD)
with pytest.raises(OwletNotInitializedException) as info:
device.reactivate()
assert 'Initialize first - no properties' in str(info.value)
@responses.activate
def test_reactivate_fail_wrongattributes():
responses.add(responses.POST, 'https://user-field.aylanetworks.com/users/sign_in.json',
json=LOGIN_PAYLOAD, status=200)
my_device_attributes = copy.deepcopy(DEVICE_ATTRIBUTES)
my_device_attributes[0]['property']['name'] = 'DEADBEEF1'
my_device_attributes[1]['property']['name'] = 'DEADBEEF2'
my_device_attributes[2]['property']['name'] = 'DEADBEEF3'
responses.add(responses.GET, 'https://ads-field.aylanetworks.com/apiv1/dsns/c/properties',
json=my_device_attributes, status=200)
# Initialize OwletAPI
api = OwletAPI("<EMAIL>", "moped")
api.login()
# Instantiate the device
device = Owlet(api, DEVICE_PAYLOAD)
# Update the decice
device.update()
with pytest.raises(OwletNotInitializedException) as info:
device.reactivate()
assert 'Initialize first - missing property' in str(info.value)
@responses.activate
def test_reactivate_fail_noconnection():
responses.add(responses.POST, 'https://user-field.aylanetworks.com/users/sign_in.json',
json=LOGIN_PAYLOAD, status=200)
responses.add(responses.GET, 'https://ads-field.aylanetworks.com/apiv1/dsns/c/properties',
json=DEVICE_ATTRIBUTES, status=200)
# Initialize OwletAPI
api = OwletAPI("<EMAIL>", "moped")
api.login()
# Instantiate the device
device = Owlet(api, DEVICE_PAYLOAD)
# Update the decice
device.update()
with pytest.raises(OwletTemporaryCommunicationException) as info:
device.reactivate()
assert 'Server Request failed - no response' in str(info.value)
@responses.activate
def test_reactivate_fail_statuscode():
responses.add(responses.POST, 'https://user-field.aylanetworks.com/users/sign_in.json',
json=LOGIN_PAYLOAD, status=200)
responses.add(responses.GET, 'https://ads-field.aylanetworks.com/apiv1/dsns/c/properties',
json=DEVICE_ATTRIBUTES, status=200)
responses.add(responses.POST, 'https://ads-field.aylanetworks.com/apiv1/properties/42738119/datapoints',
status=500)
# Initialize OwletAPI
api = OwletAPI("<EMAIL>", "moped")
api.login()
# Instantiate the device
device = Owlet(api, DEVICE_PAYLOAD)
# Update the decice
device.update()
with pytest.raises(OwletTemporaryCommunicationException) as info:
device.reactivate()
assert 'Server Request failed, return code' in str(info.value)
@responses.activate
def test_download_logged_data_ok():
responses.add(responses.POST, 'https://user-field.aylanetworks.com/users/sign_in.json',
json=LOGIN_PAYLOAD, status=200)
responses.add(responses.GET, 'https://ads-field.aylanetworks.com/apiv1/dsns/c/properties',
json=DEVICE_ATTRIBUTES, status=200)
responses.add(responses.GET, 'http://de.mo/file',
json=DOWNLOAD_DATA, status=200)
responses.add(responses.GET, 'https://ayla-device-field-production-1a2039d9.s3.amazonaws.com/X?AWSAccessKeyId=Y&Expires=1234&Signature=Z',
status=200)
# Initialize OwletAPI
api = OwletAPI("<EMAIL>", "moped")
api.login()
# Instantiate the device
device = Owlet(api, DEVICE_PAYLOAD)
# Update the decice
device.update()
device.download_logged_data()
@responses.activate
def test_download_logged_data_fail_noinit():
responses.add(responses.POST, 'https://user-field.aylanetworks.com/users/sign_in.json',
json=LOGIN_PAYLOAD, status=200)
# Initialize OwletAPI
api = OwletAPI("<EMAIL>", "moped")
api.login()
# Instantiate the device
device = Owlet(api, DEVICE_PAYLOAD)
print(device)
with pytest.raises(OwletNotInitializedException) as info:
device.download_logged_data()
assert 'Initialize first - no properties' in str(info.value)
@responses.activate
def test_download_logged_data_fail_noattribute():
responses.add(responses.POST, 'https://user-field.aylanetworks.com/users/sign_in.json',
json=LOGIN_PAYLOAD, status=200)
my_device_attributes = copy.deepcopy(DEVICE_ATTRIBUTES)
my_device_attributes[0]['property']['name'] = 'DEADBEEF3'
my_device_attributes[1]['property']['name'] = 'DEADBEEF3'
my_device_attributes[2]['property']['name'] = 'DEADBEEF3'
my_device_attributes[3]['property']['name'] = 'DEADBEEF3'
responses.add(responses.GET, 'https://ads-field.aylanetworks.com/apiv1/dsns/c/properties',
json=my_device_attributes, status=200)
# Initialize OwletAPI
api = OwletAPI("<EMAIL>", "moped")
api.login()
# Instantiate the device
device = Owlet(api, DEVICE_PAYLOAD)
# Update the decice
device.update()
with pytest.raises(OwletNotInitializedException) as info:
device.download_logged_data()
assert 'Initialize first - missing property' in str(info.value)
@responses.activate
def test_download_logged_data_fail_noconnection():
responses.add(responses.POST, 'https://user-field.aylanetworks.com/users/sign_in.json',
json=LOGIN_PAYLOAD, status=200)
responses.add(responses.GET, 'https://ads-field.aylanetworks.com/apiv1/dsns/c/properties',
json=DEVICE_ATTRIBUTES, status=200)
# Initialize OwletAPI
api = OwletAPI("<EMAIL>", "moped")
api.login()
# Instantiate the device
device = Owlet(api, DEVICE_PAYLOAD)
# Update the decice
device.update()
with pytest.raises(OwletTemporaryCommunicationException) as info:
device.download_logged_data()
assert 'Server Request failed - no answer' in str(info.value)
@responses.activate
def test_download_logged_data_fail_statuscode():
responses.add(responses.POST, 'https://user-field.aylanetworks.com/users/sign_in.json',
json=LOGIN_PAYLOAD, status=200)
responses.add(responses.GET, 'https://ads-field.aylanetworks.com/apiv1/dsns/c/properties',
json=DEVICE_ATTRIBUTES, status=200)
responses.add(responses.GET, 'http://de.mo/file',
json=DOWNLOAD_DATA, status=500)
# Initialize OwletAPI
api = OwletAPI("<EMAIL>", "moped")
api.login()
# Instantiate the device
device = Owlet(api, DEVICE_PAYLOAD)
# Update the decice
device.update()
with pytest.raises(OwletTemporaryCommunicationException) as info:
device.download_logged_data()
assert 'Server Request failed - return code' in str(info.value)
@responses.activate
def test_download_logged_data_fail_invalidjson():
responses.add(responses.POST, 'https://user-field.aylanetworks.com/users/sign_in.json',
json=LOGIN_PAYLOAD, status=200)
responses.add(responses.GET, 'https://ads-field.aylanetworks.com/apiv1/dsns/c/properties',
json=DEVICE_ATTRIBUTES, status=200)
responses.add(responses.GET, 'http://de.mo/file',
body="INVALID", status=200)
# Initialize OwletAPI
api = OwletAPI("<EMAIL>", "moped")
api.login()
# Instantiate the device
device = Owlet(api, DEVICE_PAYLOAD)
# Update the decice
device.update()
with pytest.raises(OwletTemporaryCommunicationException) as info:
device.download_logged_data()
assert 'Request failed - JSON invalid' in str(info.value)
@responses.activate
def test_download_logged_data_fail_incompletejson():
responses.add(responses.POST, 'https://user-field.aylanetworks.com/users/sign_in.json',
json=LOGIN_PAYLOAD, status=200)
responses.add(responses.GET, 'https://ads-field.aylanetworks.com/apiv1/dsns/c/properties',
json=DEVICE_ATTRIBUTES, status=200)
my_download_data = copy.deepcopy(DOWNLOAD_DATA)
my_download_data['datapoint'] = {}
responses.add(responses.GET, 'http://de.mo/file',
json=my_download_data, status=200)
# Initialize OwletAPI
api = OwletAPI("<EMAIL>", "moped")
api.login()
# Instantiate the device
device = Owlet(api, DEVICE_PAYLOAD)
# Update the decice
device.update()
with pytest.raises(OwletTemporaryCommunicationException) as info:
device.download_logged_data()
assert 'Request failed - JSON incomplete' in str(info.value)
@responses.activate
def test_download_logged_data_fail_nodownload():
responses.add(responses.POST, 'https://user-field.aylanetworks.com/users/sign_in.json',
json=LOGIN_PAYLOAD, status=200)
responses.add(responses.GET, 'https://ads-field.aylanetworks.com/apiv1/dsns/c/properties',
json=DEVICE_ATTRIBUTES, status=200)
responses.add(responses.GET, 'http://de.mo/file',
json=DOWNLOAD_DATA, status=200)
# Initialize OwletAPI
api = OwletAPI("<EMAIL>", "moped")
api.login()
# Instantiate the device
device = Owlet(api, DEVICE_PAYLOAD)
# Update the decice
device.update()
with pytest.raises(OwletTemporaryCommunicationException) as info:
device.download_logged_data()
assert 'Download Request failed - no answer' in str(info.value)
@responses.activate
def test_download_logged_data_fail_nodownloadcode():
responses.add(responses.POST, 'https://user-field.aylanetworks.com/users/sign_in.json',
json=LOGIN_PAYLOAD, status=200)
responses.add(responses.GET, 'https://ads-field.aylanetworks.com/apiv1/dsns/c/properties',
json=DEVICE_ATTRIBUTES, status=200)
responses.add(responses.GET, 'http://de.mo/file',
json=DOWNLOAD_DATA, status=200)
responses.add(responses.GET, 'https://ayla-device-field-production-1a2039d9.s3.amazonaws.com/X?AWSAccessKeyId=Y&Expires=1234&Signature=Z',
status=500)
# Initialize OwletAPI
api = OwletAPI("<EMAIL>", "moped")
api.login()
# Instantiate the device
device = Owlet(api, DEVICE_PAYLOAD)
# Update the decice
device.update()
with pytest.raises(OwletTemporaryCommunicationException) as info:
device.download_logged_data()
assert 'Download Request failed - | |
'dir', None)
>>> split_component_path(dst1)
>>> ('myrepo1', 'dir/subdir', None)
>>> split_component_path(dst2)
>>> ('myrepo2', 'dir/subdir', 'file')
>>> split_component_path(dst3)
>>> ('myrepo3', 'dir/subdir/etc', 'file.ext')
:param component_path: the Nexus component path, as described above.
:type component_path: str
:return: tuple of (repository_name, directory, filename). If the given
component_path doesn't represent a file, filename is set to None.
:rtype: tuple
"""
repository, path_fragments = self._pop_repository(component_path)
filename = self._pop_filename(component_path, path_fragments)
directory = self._pop_directory(path_fragments)
return repository, directory, filename
def _upload_file_raw(self, src_file, dst_repo, dst_dir, dst_file):
"""Process upload_file() for raw repositories"""
if dst_dir is None or dst_dir.startswith(self._remote_sep):
raise exception.NexusClientInvalidRepositoryPath(
'Destination path does not contain a directory, which is '
'required by raw repositories')
params = {'repository': dst_repo}
# FIXME me
if not isinstance(src_file, io.BytesIO):
src_file = open(src_file, 'rb')
files = {'raw.asset1': src_file.read()}
data = {
'raw.directory': dst_dir,
'raw.asset1.filename': dst_file,
}
response = self._post(
'components', files=files, data=data, params=params)
if response.status_code != 204:
raise exception.NexusClientAPIError(
'Uploading to {dst_repo}. '
'Reason: {response.reason}'.format(**locals()))
def _upload_file_yum(self, src_file, dst_repo, dst_dir, dst_file):
"""Process upload_file() for yum repositories"""
dst_dir = dst_dir or self._remote_sep
repository_path = self._remote_sep.join(
['repository', dst_repo, dst_dir, dst_file])
with open(src_file, 'rb') as fh:
response = self._put(
repository_path, data=fh, service_url=self.base_url)
if response.status_code != 200:
raise exception.NexusClientAPIError(
'Uploading to {repository_path}. '
'Reason: {response.reason}'.format(**locals()))
def upload_file(self, src_file, dst_repo, dst_dir, dst_file=None):
"""
Uploads a singe file to a Nexus repository under the directory and
file name specified. If the destination file name isn't given, the
source file name is used.
:param src_file: path to the local file to be uploaded.
:param dst_repo: name of the Nexus repository.
:param dst_dir: directory under dst_repo to place file in.
:param dst_file: destination file name.
"""
try:
repository = self.repositories.get_raw_by_name(dst_repo)
except IndexError:
raise exception.NexusClientInvalidRepository(dst_repo)
# TODO: support all repository formats
repo_format = repository['format']
if repo_format not in SUPPORTED_FORMATS_FOR_UPLOAD:
raise NotImplementedError(
'Upload to {} repository not supported'.format(repo_format))
if dst_file is None:
dst_file = os.path.basename(src_file)
_upload = getattr(self, '_upload_file_' + repo_format)
_upload(src_file, dst_repo, dst_dir, dst_file)
def _get_upload_fileset(self, src_dir, recurse=True):
"""
Walks the given directory and collects files to be uploaded. If
recurse option is False, only the files on the root of the directory
will be returned.
:param src_dir: location of files
:param recurse: If false, only the files on the root of src_dir
are returned
:return: file set to be used with upload_directory
:rtype: set
"""
source_files = set()
for dirname, dirnames, filenames in os.walk(src_dir):
if not recurse:
del dirnames[:]
source_files.update(
os.path.relpath(os.path.join(dirname, f), src_dir)
for f in filenames)
return source_files
def _get_upload_subdirectory(self, dst_dir, file_path, flatten=False):
# empty dst_dir because most repo formats, aside from raw, allow it
sub_directory = dst_dir or ''
sep = self._remote_sep
if not flatten:
dirname = os.path.dirname(file_path)
if sub_directory.endswith(sep) or dirname.startswith(sep):
sep = ''
sub_directory += '{sep}{dirname}'.format(**locals())
return sub_directory
def upload_directory(self, src_dir, dst_repo, dst_dir, **kwargs):
"""
Uploads all files in a directory, honouring options flatten and
recurse.
:param src_dir: path to local directory to be uploaded
:param dst_repo: destination repository
:param dst_dir: destination directory in dst_repo
:return: number of files uploaded
:rtype: int
"""
file_set = self._get_upload_fileset(
src_dir, kwargs.get('recurse', True))
file_count = len(file_set)
file_set = progress.bar(file_set, expected_size=file_count)
for relative_filepath in file_set:
file_path = os.path.join(src_dir, relative_filepath)
sub_directory = self._get_upload_subdirectory(
dst_dir, file_path, kwargs.get('flatten', False))
self.upload_file(file_path, dst_repo, sub_directory)
return file_count
def _upload_dir_or_file(self, file_or_dir, dst_repo, dst_dir, dst_file,
**kwargs):
"""
Helper for self.upload() to call the correct upload method according to
the source given by the user.
:param file_or_dir: location or file or directory to be uploaded.
:param dst_repo: destination repository in Nexus.
:param dst_dir: destination directory in dst_repo.
:param dst_file: destination file name.
:return: number of files uploaded.
"""
if os.path.isdir(file_or_dir):
if dst_file is None:
return self.upload_directory(file_or_dir, dst_repo, dst_dir,
**kwargs)
else:
raise exception.NexusClientInvalidRepositoryPath(
'Not allowed to upload a directory to a file')
self.upload_file(file_or_dir, dst_repo, dst_dir, dst_file)
return 1
def upload(self, source, destination, **kwargs):
"""
Process an upload. The source must be either a local file name or
directory. The flatten and recurse options are honoured for
directory uploads.
The destination must be a valid Nexus 3 repository path, including the
repository name as the first component of the path.
:param source: location of file or directory to be uploaded.
:param destination: destination path in Nexus, including repository
name and, if required, directory name (e.g. raw repos require a
directory).
:param recurse: do not process sub directories for uploads to remote
:param flatten: Flatten directory structure by not reproducing local
directory structure remotely
:return: number of files uploaded.
"""
repo, directory, filename = self.split_component_path(destination)
upload_count = self._upload_dir_or_file(
source, repo, directory, filename, **kwargs)
return upload_count
def upload_stream(self, source, destination):
"""
Process an upload. The source must be a file-like object.
The destination must be a valid Nexus 3 repository path, including the
repository name as the first component of the path.
:param source: file-like object to be uploaded.
:param destination: destination path in Nexus, including repository
name and, if required, directory name (e.g. raw repos require a
directory).
:return: number of files uploaded.
"""
repo, directory, filename = self.split_component_path(destination)
upload_count = self.upload_file(
source, repo, directory, filename)
return upload_count
def _remote_path_to_local(
self, remote_src, local_dst, flatten, create=True):
"""
Takes the remote path of an asset (without the repository name), the
desired destination in the local file system, and creates the fully
qualified path according to the instance settings.
If self.flatten is True, the remote_path isn't reproduced locally.
If the remote is a directory, we'll always assume the destination is
also a directory, even if it doesn't end with a /.
:param remote_src: path to the artefact as reported by the artefact
service (i.e.: the `path` attribute of an asset object).
:param local_dst: desired location in the local filesystem for the
remote_path.
:param create: whether or not to create the local destination file or
directory.
:return: the local path to be used.
"""
# FIXME: use of multiple .. in the local_dst isn't resolved correctly
remote_isdir = remote_src.endswith(self._remote_sep)
# force destination to be a directory if the remote is a directory
destination_isdir = (remote_isdir or
local_dst.endswith('.') or
local_dst.endswith('..') or
local_dst.endswith(self._local_sep))
local_relative = remote_src.replace(self._remote_sep, self._local_sep)
if flatten:
local_relative = os.path.basename(local_relative)
# remote=file, destination=file
if not (remote_isdir or destination_isdir):
# if files are given, rename the source to match destination
local_relative_dir = os.path.dirname(local_relative)
dst_file_name = os.path.basename(local_dst)
local_dst = os.path.dirname(local_dst)
if flatten:
local_relative = dst_file_name
else:
local_relative = os.path.join(
local_relative_dir, dst_file_name)
destination_path = py.path.local(local_dst)
local_absolute_path = destination_path.join(local_relative)
if create:
local_absolute_path.ensure(dir=remote_isdir)
return str(local_absolute_path)
def _local_hash_matches_remote(
self, file_path, remote_hash, hash_name='sha1'):
"""
True if the hash for file_path matches remote_hash for the given
algorithm
"""
local_hash = nexus_util.calculate_hash(hash_name, file_path)
return local_hash == remote_hash
def _should_skip_download(
self, download_url, download_path, artefact, nocache):
"""False when nocache is set or local file is out-of-date"""
if nocache:
try:
LOG.debug('Removing {} because nocache is set\n'.format(
download_path))
os.remove(download_path)
except FileNotFoundError:
pass
return False
for hash_name in ['sha1', 'md5']:
h = artefact.get('checksum', {}).get(hash_name)
if h is None:
continue
if self._local_hash_matches_remote(download_path, h, hash_name):
LOG.debug('Skipping {download_url} because local copy '
'{download_path} is up-to-date\n'.format(**locals()))
return True
return False
def _request_file(self, download_url):
response = self._get(download_url)
if response.status_code != 200:
sys.stderr.write(response.__dict__)
raise exception.DownloadError(
'Downloading from {download_url}. '
'Reason: {response.reason}'.format(**locals()))
return response
def download_file(self, download_url, destination):
"""Download an asset from Nexus artefact repository to local
file system.
:param download_url: fully-qualified URL to asset being downloaded.
:param destination: file or directory location to save downloaded
asset. Must be an existing directory; any exiting file in this
location will be overwritten.
:return:
"""
response = self._request_file(download_url)
with open(destination, 'wb') as fd:
LOG.debug('Writing {download_url} to {destination}\n'.format(
**locals()))
for chunk in response.iter_content():
fd.write(chunk)
def _download_stream(self, download_url):
"""Download an asset from Nexus artefact repository to in-memory stream.
:param download_url: fully-qualified URL to asset being downloaded.
:return:
"""
response = self._request_file(download_url)
return io.BytesIO(response.content) # FIXME me
def download(self, source, destination, **kwargs):
"""Process a download. The source must be a valid Nexus 3
repository path, including the repository name as the first component
of the path.
The destination must be a local file name or directory.
If a | |
:type publisherID: string
:param name: the publisher name to be set
:type name: string
:rtype: ``True``, if successful, ``False`` otherwise
"""
data = {}
if name is not None:
data.update(name=name)
# https://api.relayr.io/publishers/<id>
url = '{0}/publishers/{1}'.format(self.host, publisherID)
_, data = self.perform_request('PATCH', url, data=data, headers=self.headers)
return data
# ..............................................................................
# Devices
# ..............................................................................
def get_device_configuration(self, deviceID):
"""
Get configuration, default values and schema of a specific device.
Example result::
{
"version": "1.0.0",
"configuration": {
"defaultValues": {
"frequency": 1000
},
"schema": {
"required": [
"frequency"
],
"type": "object",
"properties": {
"frequency": {
"minimum": 5,
"type": "integer",
"description": "Frequency of the sensor updates in milliseconds"
}
},
"title": "Relayr configuration schema"
}
}
}
:param deviceID: the device UUID
:type deviceID: string
"""
# https://api.relayr.io/devices/<deviceID>/firmware
url = '{0}/devices/{1}/firmware'.format(self.host, deviceID)
_, data = self.perform_request('GET', url, headers=self.headers)
return data
def post_device_configuration(self, deviceID, frequency):
"""
Modify the configuration of a specific device facillitated by a schema.
:param deviceID: the device UUID
:type deviceID: string
:param frequency: the number of milliseconds between two sensor transmissions
:type frequency: integer
"""
data = {'frequency': frequency}
# https://api.relayr.io/devices/<deviceID>/configuration
url = '{0}/devices/{1}/configuration'.format(self.host, deviceID)
_, data = self.perform_request('POST', url, data=data, headers=self.headers)
return data
def get_public_devices(self, meaning=''):
"""
Get list of all public devices on the relayr platform filtered by meaning.
:param meaning: required meaning in the device model's ``readings`` attribute
:type meaning: string
:rtype: list of dicts, each representing a relayr device
"""
# https://api.relayr.io/devices/public
url = '{0}/devices/public'.format(self.host)
if meaning:
url += '?meaning={0}'.format(meaning)
_, data = self.perform_request('GET', url)
return data
def post_device(self, name, ownerID, modelID, firmwareVersion):
"""
Register a new device on the relayr platform.
:param name: the device name
:type name: string
:param ownerID: the device owner's UUID
:type ownerID: string
:param modelID: the device model's UUID
:type modelID: string
:param firmwareVersion: the device's firmware version
:type firmwareVersion: string
:rtype: list of dicts, each representing a relayr device
"""
data = {
"name": name,
"owner": ownerID,
"model": modelID,
"firmwareVersion": firmwareVersion
}
# https://api.relayr.io/devices
url = '{0}/devices'.format(self.host)
_, data = self.perform_request('POST', url, data=data, headers=self.headers)
return data
def post_device_wb2(self, name, ownerID, modelID, firmwareVersion, mac, transmitterId):
"""
Register a new device on the relayr platform.
:param name: the device name
:type name: string
:param ownerID: the device owner's UUID
:type ownerID: string
:param modelID: the device model's UUID
:type modelID: string
:param firmwareVersion: the device's firmware version
:type firmwareVersion: string
:rtype: list of dicts, each representing a relayr device
"""
data = {
"name": name,
"owner": ownerID,
"model": modelID,
"firmwareVersion": firmwareVersion,
"integrationType": "wunderbar2",
"mac": mac,
"transmitterId": transmitterId
}
# https://api.relayr.io/devices
url = '{0}/devices'.format(self.host)
_, data = self.perform_request('POST', url, data=data, headers=self.headers)
return data
def get_device(self, deviceID):
"""
Get information about a specific device.
:param deviceID: the device UUID
:type deviceID: string
:rtype: a dict with fields containing information about the device
Raises ``exceptions.RelayrApiException`` for invalid UUIDs or missing
credentials.
"""
# https://api.relayr.io/devices/%s
url = '{0}/devices/{1}'.format(self.host, deviceID)
_, data = self.perform_request('GET', url, headers=self.headers)
return data
def patch_device(self, deviceID, name=None, description=None, modelID=None, public=None):
"""
Update one or more attributes of a specific device.
:param deviceID: the device UUID
:type deviceID: string
:param name: the device name
:type name: string
:param description: the device description
:type description: string
:param modelID: the device model UUID
:type modelID: string
:param public: the device state (public or not)
:type public: boolean
:rtype: a dict with fields containing information about the device
Raises ``exceptions.RelayrApiException`` for invalid UUIDs or missing
credentials.
"""
data = {
"name": name,
"description": description,
"model": modelID,
"public": public
}
# filter data (compatible with Python 2.6)
data1 = {}
for k, v in data.items():
if v != None:
data1[k] = v
data = data1
# https://api.relayr.io/devices/%s
url = '{0}/devices/{1}'.format(self.host, deviceID)
_, data = self.perform_request('PATCH', url, data=data, headers=self.headers)
return data
def delete_device(self, deviceID):
"""
Delete a specific device from the relayr platform.
:param deviceID: the device UUID
:type deviceID: string
:rtype: a dict with fields containing information about the device
"""
# https://api.relayr.io/devices/%s
url = '{0}/devices/{1}'.format(self.host, deviceID)
_, data = self.perform_request('DELETE', url, headers=self.headers)
return data
def get_device_apps(self, deviceID):
"""
Get all the apps connected to a specific device.
:param deviceID: the device UUID
:type deviceID: string
:rtype: a list of dicts with information about apps
"""
# https://api.relayr.io/devices/<deviceID>/apps
url = '{0}/devices/{1}/apps'.format(self.host, deviceID)
_, data = self.perform_request('GET', url, headers=self.headers)
return data
def post_channel(self, deviceID, transport):
"""
Create a new channel to let the current user receive device data.
The return value is a channel UUID plus credentials to connect to it.
:param deviceID: the device UUID
:type deviceID: string
:param transport: transport for channel (mqtt, websockets, etc.)
:type transport: string
:rtype: dict with channel credentials to connect to the device
Example result (for transport='mqtt')::
{
'channelId': u'62e2ceb8-a63f-11e4-8792-6c400890724a',
'credentials': {
'password': '<PASSWORD>',
'topic': '/v1/62e2ceb8-a63f-11e4-8792-6c400890724a',
'user': '6<PASSWORD>-6c<PASSWORD>'
}
}
"""
url = '{0}/channels'.format(self.host)
data = {'deviceId': deviceID, 'transport': transport}
_, res = self.perform_request('POST', url,
data=data, headers=self.headers)
return res
def delete_channel_id(self, channelID):
"""
Delete an existing specific channel.
:param channelID: the UUID of the channel
:type channelID: string
:rtype: None
Raises ``exceptions.RelayrApiException`` for non-existing channelID.
"""
url = '{0}/channels/{1}'.format(self.host, channelID)
_, res = self.perform_request('DELETE', url, headers=self.headers)
return res
def delete_channels_device_transport(self, deviceID=None, transport=None):
"""
Delete all existing channels for the given device ID and/or transport.
:param deviceID: the device UUID
:type deviceID: string
:param transport: transport for channel (mqtt, websockets, etc.)
:type transport: string
:rtype: list of deleted channelIDs
"""
url = '{0}/channels'.format(self.host)
data = {}
if deviceID is not None:
data['deviceId'] = deviceID
if transport is not None:
data['transport'] = transport
_, res = self.perform_request('DELETE', url,
data=data, headers=self.headers)
def get_device_channels(self, deviceID):
"""
Get all existing channels for a specific device.
:param deviceID: the device UUID
:type deviceID: string
:rtype: dict with a list of attributes for each existing channel
Example output::
{
'deviceId': '...',
'channels': [
{
'channelId': '...',
'transport': 'mqtt',
'appId': '...'
},
{
'channelId': '...',
'transport': 'mqtt',
'appId': '...'
}
]
}
"""
url = '{0}/devices/{1}/channels'.format(self.host, deviceID)
_, res = self.perform_request('GET', url, headers=self.headers)
return res
def post_device_command_led(self, deviceID, data):
"""
Send a command to a specific device's LED.
:param deviceID: the device's UUID
:type deviceID: string
:param data: the data to be sent, here {'cmd': true/false}
:type data: dict
:rtype: dict with connection credentials
"""
# https://api.relayr.io/devices/<deviceID>/cmd/led
url = '{0}/devices/{1}/cmd/led'.format(self.host, deviceID)
_, data = self.perform_request('POST', url, data=data, headers=self.headers)
return data
def post_device_command(self, deviceID, command):
"""
Send a command to a specific device.
:param deviceID: the device's UUID
:type deviceID: string
:param command: the command to be sent
:type command: dict
:rtype: dict with connection credentials
"""
# https://api.relayr.io/devices/<deviceID>/cmd
url = '{0}/devices/{1}/cmd'.format(self.host, deviceID)
_, data = self.perform_request('POST', url, data=command, headers=self.headers)
return data
def post_device_data(self, deviceID, data):
"""
Send JSON formatted data to a device (eg. temperature readings).
:param deviceID: the device's UUID
:type deviceID: string
:param data: the command data
:type data: anything serializable as JSON
:rtype: string
"""
# https://api.relayr.io/devices/<device_id>/data
url = '{0}/devices/{1}/data'.format(self.host, deviceID)
_, data = self.perform_request('POST', url, data=data, headers=self.headers)
return data
def post_device_app(self, deviceID, appID):
"""
Connect a specific device to a specific app.
:param deviceID: the device's UUID
:type deviceID: string
:param appID: the app's UUID
:type appID: string
:rtype: dict
Credentials for data reception are returned as part of the response.
"""
# {{relayrAPI}}/devices/{{deviceID}}/apps/{{appID}}
url = '{0}/devices/{1}/apps/{2}'.format(self.host, deviceID, appID)
_, data = self.perform_request('POST', url, headers=self.headers)
return data
def delete_device_app(self, deviceID, appID):
"""
Disconnect a specific device from a specific app.
:param deviceID: the device's UUID
:type deviceID: string
:param appID: the app's UUID
:type appID: string
"""
# {{relayrAPI}}/devices/{{deviceID}}/apps/{{appID}}
url = '{0}/devices/{1}/apps/{2}'.format(self.host, deviceID, appID)
_, data = self.perform_request('DELETE', url, headers=self.headers)
return data
# ..............................................................................
# Device models
# ..............................................................................
def get_public_device_models(self):
"""
Get list of all device models available on the relayr platform.
:rtype: list of dicts, each representing a relayr device model
"""
# https://api.relayr.io/device-models
url = '{0}/device-models'.format(self.host)
_, data = self.perform_request('GET', url)
return data
def get_device_model(self, | |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
SELFIES: a robust representation of semantically constrained graphs with an
example application in chemistry (https://arxiv.org/abs/1905.13741)
by <NAME>, <NAME>, <NAME>, <NAME>, <NAME>
Variational Auto Encoder (VAE) for chemistry
comparing SMILES and SELFIES representation using reconstruction
quality, diversity and latent space validity as metrics of
interest
v0.1.0 -- 04. August 2019
information:
ML framework: pytorch
chemistry framework: RDKit
settings.yml
contains link to data file containing SMILES encoded molecule, and
hyperparameters of neural network model and training
get_selfie_and_smiles_encodings_for_dataset
generate complete encoding (inclusive alphabet) for SMILES and SELFIES given a data file
VAE_encode
fully connection, 3 layer neural network - encodes a one-hot representation
of molecule (in SMILES or SELFIES representation) to latent space
VAE_decode
decodes point in latent space using an RNN
latent_space_quality
samples points from latent space, decodes them into molecules,
calculates chemical validity (using RDKit's MolFromSmiles), calculates
diversity
environment.yml
shows dependencies
Particularily important: RDKit and SELFIES (via 'pip install selfies')
tested at:
- Python 3.7.1
- Python 3.6.8
CPU and GPU supported
Note: semantic validity is only implemented so far for atoms described in
Table 2 of our paper. This corresponds to (non-ionic) QM9. Other chemical
constraints might generate additional mistakes. Syntactical constraints
are always fulfilled
- Aromatic Symbols: they have additional semantic constraints, thus to reduce
invalidity due to aromatic constraints, one can
de-aromatize molecules (aromatic symbols are simplifications
in SMILES). Otherwise, one could add the semantic constraints
(this could be done in an automated way, but is not implemented yet)
For comments, bug reports or feature ideas, please send an email to
<EMAIL> and <EMAIL>
"""
import os, sys, time
import numpy as np
import torch
import pandas as pd
import selfies
import yaml
from torch import nn
from random import shuffle
sys.path.append('VAE_dependencies')
from data_loader import multiple_smile_to_hot, multiple_selfies_to_hot, len_selfie, split_selfie
from rdkit.Chem import MolFromSmiles
from rdkit import rdBase
rdBase.DisableLog('rdApp.error')
def _make_dir(directory):
os.makedirs(directory)
def save_models(encoder, decoder, epoch):
out_dir = './saved_models/{}'.format(epoch)
_make_dir(out_dir)
torch.save(encoder, '{}/E'.format(out_dir))
torch.save(decoder, '{}/D'.format(out_dir))
class VAE_encode(nn.Module):
def __init__(self, layer_1d, layer_2d, layer_3d, latent_dimension):
"""
Fully Connected layers to encode molecule to latent space
"""
super(VAE_encode, self).__init__()
# Reduce dimension upto second last layer of Encoder
self.encode_nn = nn.Sequential(
nn.Linear(len_max_molec1Hot, layer_1d),
nn.ReLU(),
nn.Linear(layer_1d, layer_2d),
nn.ReLU(),
nn.Linear(layer_2d, layer_3d),
nn.ReLU()
)
# Latent space mean
self.encode_mu = nn.Linear(layer_3d, latent_dimension)
# Latent space variance
self.encode_log_var = nn.Linear(layer_3d, latent_dimension)
def reparameterize(self, mu, log_var):
"""
This trick is explained well here:
https://stats.stackexchange.com/a/16338
"""
std = torch.exp(0.5 * log_var)
eps = torch.randn_like(std)
return eps.mul(std).add_(mu)
def forward(self, x):
"""
Pass throught the Encoder
"""
# Get results of encoder network
h1 = self.encode_nn(x)
# latent space
mu = self.encode_mu(h1)
log_var = self.encode_log_var(h1)
# Reparameterize
z = self.reparameterize(mu, log_var)
return z, mu, log_var
class VAE_decode(nn.Module):
def __init__(self, latent_dimension, gru_stack_size, gru_neurons_num):
"""
Through Decoder
"""
super(VAE_decode, self).__init__()
self.gru_stack_size = gru_stack_size
self.gru_neurons_num = gru_neurons_num
# Simple Decoder
self.decode_RNN = nn.GRU(
input_size = latent_dimension,
hidden_size = gru_neurons_num,
num_layers = gru_stack_size,
batch_first = False)
self.decode_FC = nn.Sequential(
nn.Linear(gru_neurons_num, len_alphabet),
)
def init_hidden(self, batch_size = 1):
weight = next(self.parameters())
return weight.new_zeros(self.gru_stack_size, batch_size, self.gru_neurons_num)
def forward(self, z, hidden):
"""
A forward pass throught the entire model.
"""
# Decode
l1, hidden = self.decode_RNN(z, hidden)
decoded = self.decode_FC(l1) # fully connected layer
return decoded, hidden
def is_correct_smiles(smiles):
"""
Using RDKit to calculate whether molecule is syntactically and semantically valid.
"""
if smiles == "":
return 0
try:
MolFromSmiles(smiles, sanitize=True)
return 1
except Exception:
return 0
def sample_latent_space(latent_dimension):
model_encode.eval()
model_decode.eval()
fancy_latent_point=torch.normal(torch.zeros(latent_dimension),torch.ones(latent_dimension))
hidden = model_decode.init_hidden()
gathered_atoms = []
for ii in range(len_max_molec): # runs over letters from molecules (len=size of largest molecule)
fancy_latent_point = fancy_latent_point.reshape(1, 1, latent_dimension)
fancy_latent_point=fancy_latent_point.to(device)
decoded_one_hot, hidden = model_decode(fancy_latent_point, hidden)
decoded_one_hot = decoded_one_hot.flatten()
decoded_one_hot = decoded_one_hot.detach()
soft = nn.Softmax(0)
decoded_one_hot = soft(decoded_one_hot)
_,max_index=decoded_one_hot.max(0)
gathered_atoms.append(max_index.data.cpu().numpy().tolist())
model_encode.train()
model_decode.train()
return gathered_atoms
def latent_space_quality(latent_dimension, encoding_alphabet, sample_num):
total_correct = 0
all_correct_molecules = set()
print(f"latent_space_quality:"
f" Take {sample_num} samples from the latent space")
for sample_i in range(1, sample_num + 1):
molecule_pre = ''
for ii in sample_latent_space(latent_dimension):
molecule_pre += encoding_alphabet[ii]
molecule = molecule_pre.replace(' ', '')
if type_of_encoding == 1: # if SELFIES, decode to SMILES
molecule = selfies.decoder(molecule)
if is_correct_smiles(molecule):
total_correct += 1
all_correct_molecules.add(molecule)
return total_correct, len(all_correct_molecules)
def quality_in_validation_set(data_valid):
x = [i for i in range(len(data_valid))] # random shuffle input
shuffle(x)
data_valid = data_valid[x]
quality_list=[]
for batch_iteration in range(min(25,num_batches_valid)): # batch iterator
current_smiles_start, current_smiles_stop = batch_iteration * batch_size, (batch_iteration + 1) * batch_size
inp_smile_hot = data_valid[current_smiles_start : current_smiles_stop]
inp_smile_encode = inp_smile_hot.reshape(inp_smile_hot.shape[0], inp_smile_hot.shape[1] * inp_smile_hot.shape[2])
latent_points, mus, log_vars = model_encode(inp_smile_encode)
latent_points = latent_points.reshape(1, batch_size, latent_points.shape[1])
hidden = model_decode.init_hidden(batch_size = batch_size)
decoded_one_hot = torch.zeros(batch_size, inp_smile_hot.shape[1], inp_smile_hot.shape[2]).to(device)
for seq_index in range(inp_smile_hot.shape[1]):
decoded_one_hot_line, hidden = model_decode(latent_points, hidden)
decoded_one_hot[:, seq_index, :] = decoded_one_hot_line[0]
decoded_one_hot = decoded_one_hot.reshape(batch_size * inp_smile_hot.shape[1], inp_smile_hot.shape[2])
_, label_atoms = inp_smile_hot.max(2)
label_atoms = label_atoms.reshape(batch_size * inp_smile_hot.shape[1])
# assess reconstruction quality
_, decoded_max_indices = decoded_one_hot.max(1)
_, input_max_indices = inp_smile_hot.reshape(batch_size * inp_smile_hot.shape[1], inp_smile_hot.shape[2]).max(1)
differences = 1. - torch.abs(decoded_max_indices - input_max_indices)
differences = torch.clamp(differences, min = 0., max = 1.).double()
quality = 100. * torch.mean(differences)
quality = quality.detach().cpu().numpy()
quality_list.append(quality)
return(np.mean(quality_list))
def train_model(data_train, data_valid, num_epochs, latent_dimension, lr_enc, lr_dec, KLD_alpha, sample_num, encoding_alphabet):
"""
Train the Variational Auto-Encoder
"""
print('num_epochs: ',num_epochs)
# initialize an instance of the model
optimizer_encoder = torch.optim.Adam(model_encode.parameters(), lr=lr_enc)
optimizer_decoder = torch.optim.Adam(model_decode.parameters(), lr=lr_dec)
data_train = data_train.clone().detach()
data_train=data_train.to(device)
#print(data)
quality_valid_list=[0,0,0,0];
for epoch in range(num_epochs):
x = [i for i in range(len(data_train))] # random shuffle input
shuffle(x)
data_train = data_train[x]
start = time.time()
for batch_iteration in range(num_batches_train): # batch iterator
loss, recon_loss, kld = 0., 0., 0.
# manual batch iterations
current_smiles_start, current_smiles_stop = batch_iteration * batch_size, (batch_iteration + 1) * batch_size
inp_smile_hot = data_train[current_smiles_start : current_smiles_stop]
# reshaping for efficient parallelization
inp_smile_encode = inp_smile_hot.reshape(inp_smile_hot.shape[0], inp_smile_hot.shape[1] * inp_smile_hot.shape[2])
latent_points, mus, log_vars = model_encode(inp_smile_encode)
latent_points = latent_points.reshape(1, batch_size, latent_points.shape[1])
# standard Kullback–Leibler divergence
kld += -0.5 * torch.mean(1. + log_vars - mus.pow(2) - log_vars.exp())
# initialization hidden internal state of RNN (RNN has two inputs and two outputs:)
# input: latent space & hidden state
# output: onehot encoding of one character of molecule & hidden state
# the hidden state acts as the internal memory
hidden = model_decode.init_hidden(batch_size = batch_size)
# decoding from RNN N times, where N is the length of the largest molecule (all molecules are padded)
decoded_one_hot = torch.zeros(batch_size, inp_smile_hot.shape[1], inp_smile_hot.shape[2]).to(device)
for seq_index in range(inp_smile_hot.shape[1]):
decoded_one_hot_line, hidden = model_decode(latent_points, hidden)
decoded_one_hot[:, seq_index, :] = decoded_one_hot_line[0]
decoded_one_hot = decoded_one_hot.reshape(batch_size * inp_smile_hot.shape[1], inp_smile_hot.shape[2])
_, label_atoms = inp_smile_hot.max(2)
label_atoms = label_atoms.reshape(batch_size * inp_smile_hot.shape[1])
# we use cross entropy of expected symbols and decoded one-hot
criterion = torch.nn.CrossEntropyLoss()
recon_loss += criterion(decoded_one_hot, label_atoms)
loss += recon_loss + KLD_alpha * kld
# perform back propogation
optimizer_encoder.zero_grad()
optimizer_decoder.zero_grad()
loss.backward(retain_graph=True)
nn.utils.clip_grad_norm_(model_decode.parameters(), 0.5)
optimizer_encoder.step()
optimizer_decoder.step()
if batch_iteration % 30 == 0:
end = time.time()
# assess reconstruction quality
_, decoded_max_indices = decoded_one_hot.max(1)
_, input_max_indices = inp_smile_hot.reshape(batch_size * inp_smile_hot.shape[1], inp_smile_hot.shape[2]).max(1)
differences = 1. - torch.abs(decoded_max_indices - input_max_indices)
differences = torch.clamp(differences, min = 0., max = 1.).double()
quality = 100. * torch.mean(differences)
quality = quality.detach().cpu().numpy()
qualityValid=quality_in_validation_set(data_valid)
new_line = 'Epoch: %d, Batch: %d / %d,\t(loss: %.4f\t| quality: %.4f | quality_valid: %.4f)\tELAPSED TIME: %.5f' % (epoch, batch_iteration, num_batches_train, loss.item(), quality, qualityValid, end - start)
print(new_line)
start = time.time()
qualityValid = quality_in_validation_set(data_valid)
quality_valid_list.append(qualityValid)
# only measure validity of reconstruction improved
quality_increase = len(quality_valid_list) - np.argmax(quality_valid_list)
if quality_increase == 1 and quality_valid_list[-1] > 50.:
corr, unique = latent_space_quality(latent_dimension,sample_num = sample_num, encoding_alphabet=encoding_alphabet)
else:
corr, unique = -1., -1.
new_line = 'Validity: %.5f %% | Diversity: %.5f %% | Reconstruction: %.5f %%' % (corr * 100. / sample_num, unique * 100. / sample_num, qualityValid)
print(new_line)
with open('results.dat', 'a') as content:
content.write(new_line + '\n')
if quality_valid_list[-1] < 70. and epoch > 200:
break
if quality_increase > 20:
print('Early stopping criteria')
break
def get_selfie_and_smiles_encodings_for_dataset(filename_data_set_file_smiles):
"""
Returns encoding, alphabet and length of largest molecule in SMILES and SELFIES, given a file containing SMILES molecules.
input:
csv file with molecules. Column's name must be 'smiles'.
output:
- selfies encoding
- selfies alphabet
- longest selfies string
- smiles encoding (equivalent to file content)
- smiles alphabet (character based)
- longest smiles string
"""
df = pd.read_csv(filename_data_set_file_smiles)
smiles_list = np.asanyarray(df.smiles)
| |
<gh_stars>0
"""
This module provides an implementation of the Paxos algorithm as
a set of composable classes.
Copied, with minor alterations for use with Python 3 and JSON from
https://github.com/cocagne/python-composable-paxos
The MIT License (MIT)
Copyright (c) 2015 <NAME>
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
# ProposalID
#
# In order for the Paxos algorithm to function, all proposal ids must be
# unique. A simple way to ensure this is to include the proposer's unique
# id in the proposal id.
#
# Python tuples allow the proposal number and the UID to be combined in a
# manner that supports comparison in the expected manner:
#
# (4,'C') > (4,'B') > (3,'Z')
#
# Named tuples from the collections module support all of the regular
# tuple operations but additionally allow access to the contents by
# name so the numeric component of the proposal ID may be referred to
# via 'proposal_id.number' instead of 'proposal_id[0]'.
#
# ProposalID = collections.namedtuple('ProposalID', ['number', 'uid'])
# Replaced ProposalID class, because json package turns namedtuples into a list.
class ProposalID(object):
def __init__(self, number, uid):
self.number = number
self.uid = uid
def __gt__(self, other):
if other is None:
return True
elif isinstance(other, ProposalID):
return (self.number, self.uid) > (other.number, other.uid)
elif isinstance(other, list):
return [self.number, self.uid] > other
def __ge__(self, other):
if other is None:
return True
elif isinstance(other, ProposalID):
return (self.number, self.uid) >= (other.number, other.uid)
elif isinstance(other, list):
return [self.number, self.uid] >= other
def __eq__(self, other):
if other is None:
return False
elif isinstance(other, ProposalID):
return (self.number, self.uid) == (other.number, other.uid)
elif isinstance(other, list):
return [self.number, self.uid] == other
def __repr__(self):
return 'ProposalID(number={}, uid="{}")'.format(self.number, self.uid)
def __hash__(self):
return hash((self.number, self.uid))
class PaxosMessage(object):
"""
Base class for all messages defined in this module
"""
from_uid = None # Set by subclass constructor
class Prepare(PaxosMessage):
"""
Prepare messages should be broadcast to all Acceptors.
"""
def __init__(self, from_uid, proposal_id):
self.from_uid = from_uid
self.proposal_id = proposal_id
class Nack(PaxosMessage):
"""
NACKs are technically optional though few practical applications will
want to omit their use. They are used to signal a proposer that their
current proposal number is out of date and that a new one should be
chosen. NACKs may be sent in response to both Prepare and Accept
messages
"""
def __init__(self, from_uid, proposer_uid, proposal_id, promised_proposal_id):
self.from_uid = from_uid
self.proposal_id = proposal_id
self.proposer_uid = proposer_uid
self.promised_proposal_id = promised_proposal_id
class Promise(PaxosMessage):
"""
Promise messages should be sent to at least the Proposer specified in
the proposer_uid field
"""
def __init__(
self, from_uid, proposer_uid, proposal_id, last_accepted_id, last_accepted_value
):
self.from_uid = from_uid
self.proposer_uid = proposer_uid
self.proposal_id = proposal_id
self.last_accepted_id = last_accepted_id
self.last_accepted_value = last_accepted_value
class Accept(PaxosMessage):
"""
Accept messages should be broadcast to all Acceptors
"""
def __init__(self, from_uid, proposal_id, proposal_value):
self.from_uid = from_uid
self.proposal_id = proposal_id
self.proposal_value = proposal_value
class Accepted(PaxosMessage):
"""
Accepted messages should be sent to all Learners
"""
def __init__(self, from_uid, proposal_id, proposal_value):
self.from_uid = from_uid
self.proposal_id = proposal_id
self.proposal_value = proposal_value
class Resolution(PaxosMessage):
"""
Optional message used to indicate that the final value has been selected
"""
def __init__(self, from_uid, value):
self.from_uid = from_uid
self.value = value
class InvalidMessageError(Exception):
"""
Thrown if a PaxosMessage subclass is passed to a class that does not
support it
"""
class MessageHandler(object):
def receive(self, msg):
"""
Message dispatching function. This function accepts any PaxosMessage subclass and calls
the appropriate handler function
"""
handler = getattr(self, "receive_" + msg.__class__.__name__.lower(), None)
if handler is None:
raise InvalidMessageError(
"Receiving class does not support messages of type: "
+ msg.__class__.__name__
)
return handler(msg)
class Proposer(MessageHandler):
"""
The 'leader' attribute is a boolean value indicating the Proposer's
belief in whether or not it is the current leader. This is not a reliable
value as multiple nodes may simultaneously believe themselves to be the
leader.
"""
leader = False
proposed_value = None
proposal_id = None
highest_accepted_id = None
promises_received = None
nacks_received = None
current_prepare_msg = None
current_accept_msg = None
def __init__(self, network_uid, quorum_size):
self.network_uid = network_uid
self.quorum_size = quorum_size
self.proposal_id = ProposalID(0, network_uid)
self.highest_proposal_id = ProposalID(0, network_uid)
def propose_value(self, value):
"""
Sets the proposal value for this node iff this node is not already aware of
a previous proposal value. If the node additionally believes itself to be
the current leader, an Accept message will be returned
"""
if self.proposed_value is None:
self.proposed_value = value
if self.leader:
self.current_accept_msg = Accept(
self.network_uid, self.proposal_id, value
)
return self.current_accept_msg
def prepare(self) -> Prepare:
"""
Returns a new Prepare message with a proposal id higher than
that of any observed proposals. A side effect of this method is
to clear the leader flag if it is currently set.
"""
self.leader = False
self.promises_received = set()
self.nacks_received = set()
self.proposal_id = ProposalID(
self.highest_proposal_id.number + 1, self.network_uid
)
self.highest_proposal_id = self.proposal_id
self.current_prepare_msg = Prepare(self.network_uid, self.proposal_id)
return self.current_prepare_msg
def observe_proposal(self, proposal_id):
"""
Optional method used to update the proposal counter as proposals are
seen on the network. When co-located with Acceptors and/or Learners,
this method may be used to avoid a message delay when attempting to
assume leadership (guaranteed NACK if the proposal number is too low).
This method is automatically called for all received Promise and Nack
messages.
"""
if proposal_id > self.highest_proposal_id:
self.highest_proposal_id = proposal_id
def receive_nack(self, msg):
"""
Returns a new Prepare message if the number of Nacks received reaches
a quorum.
"""
self.observe_proposal(msg.promised_proposal_id)
if msg.proposal_id == self.proposal_id and self.nacks_received is not None:
self.nacks_received.add(msg.from_uid)
if len(self.nacks_received) == self.quorum_size:
return self.prepare() # Lost leadership or failed to acquire it
def receive_promise(self, msg):
"""
Returns an Accept messages if a quorum of Promise messages is achieved
"""
self.observe_proposal(msg.proposal_id)
if (
not self.leader
and msg.proposal_id == self.proposal_id
and msg.from_uid not in self.promises_received
):
self.promises_received.add(msg.from_uid)
if (
self.highest_accepted_id is None
or msg.last_accepted_id > self.highest_accepted_id
):
self.highest_accepted_id = msg.last_accepted_id
if msg.last_accepted_value is not None:
self.proposed_value = msg.last_accepted_value
if len(self.promises_received) == self.quorum_size:
self.leader = True
if self.proposed_value is not None:
self.current_accept_msg = Accept(
self.network_uid, self.proposal_id, self.proposed_value
)
return self.current_accept_msg
class Acceptor(MessageHandler):
"""
Acceptors act as the fault-tolerant memory for Paxos. To ensure correctness
in the presense of failure, Acceptors must be able to remember the promises
they've made even in the event of power outages. Consequently, any changes
to the promised_id, accepted_id, and/or accepted_value must be persisted to
stable media prior to sending promise and accepted messages.
When an Acceptor instance is composed alongside a Proposer instance, it
is generally advantageous to call the proposer's observe_proposal()
method when methods of this class are called.
"""
def __init__(
self, network_uid, promised_id=None, accepted_id=None, accepted_value=None
):
"""
promised_id, accepted_id, and accepted_value should be provided if and only if this
instance is recovering from persistent state.
"""
self.network_uid = network_uid
self.promised_id = promised_id
self.accepted_id = accepted_id
self.accepted_value = accepted_value
def receive_prepare(self, msg):
"""
Returns either a Promise or a Nack in response. The Acceptor's state must be persisted to disk
prior to transmitting the Promise message.
"""
if self.promised_id is None or msg.proposal_id >= self.promised_id:
self.promised_id = msg.proposal_id
return Promise(
self.network_uid,
msg.from_uid,
self.promised_id,
self.accepted_id,
self.accepted_value,
)
else:
return Nack(
self.network_uid, msg.from_uid, msg.proposal_id, self.promised_id
)
def receive_accept(self, msg):
"""
Returns either an Accepted or Nack message in response. The Acceptor's state must be persisted
| |
== 0: # voc_size starts with 0 before first training
return 0
vec0 = K.slice(embedding_matrix, [0, 0], [1, em_dims[1]]) # zero vector only,
vecs = K.slice(embedding_matrix, [1, 0], [em_dims[0]-1, em_dims[1]]) # all vectors except zero
# make sure only vec0 is affected, i.e. vecs change only via global loss:
vecs = K.stop_gradient(K.mean(vecs, axis=0))
# scale to make gradients benign:
underspecification = 1 * K.sum(K.square(vec0 - vecs)) # c='\0' ~ mean of others
norms = K.sum(K.square(embedding_matrix), axis=1)
norm0 = K.ones_like(norms) # square of target (non-zero) weight norm
lowrank = 0.01 * K.sum(K.square(norm0 - norms)) # generalization/sparsity
# make sure loss contribution is zero after training/validation
# (so loss corresponds to perplexity in prediction/evaluation):
return K.in_train_phase(lowrank + underspecification, 0.)
def train(self, data, val_data=None):
'''Train model on text files.
Pass the character sequences in all `data` files to the loop
training model weights with stochastic gradient descent.
Derive meta-data for context variables from file names.
It will open file by file, repeating over the complete set (epoch)
as long as validation error does not increase in between (early stopping).
Validate on a random fraction of the file set automatically separated before.
(Data are split by window/file in stateless/stateful mode.)
If `val_data` is given, then do not split, but use those files
for validation instead (regardless of mode).
'''
from keras.callbacks import EarlyStopping, TerminateOnNaN
from .callbacks import StopSignalCallback, ResetStatesCallback
# uncomment the following lines to enter tfdbg during training:
#from keras import backend as K
#from tensorflow.python import debug as tf_debug
#K.set_session(tf_debug.LocalCLIDebugWrapperSession(K.get_session()))
assert self.status > 0 # must be configured already, but incremental training is allowed
assert self.incremental is False # no explicit state transfer
# extract character mapping and calculate epoch size:
training_data, validation_data, split, training_epoch_size, validation_epoch_size, total_size, steps = self._split_data(data, val_data)
self.logger.info('training on %d files / %d batches per epoch / %d character tokens for %d character types',
len(training_data), training_epoch_size, total_size, self.voc_size)
# update mapping-specific layers:
self.reconfigure_for_mapping()
# fit model
earlystopping = EarlyStopping(monitor='val_loss', patience=3, verbose=1, restore_best_weights=True)
callbacks = [earlystopping, TerminateOnNaN(),
StopSignalCallback(logger=self.logger)]
if self.stateful:
self.reset_cb = ResetStatesCallback(logger=self.logger)
callbacks.append(self.reset_cb)
history = self.model.fit_generator(
self._gen_data_from_files(training_data, steps, split=split, train=True, repeat=True),
steps_per_epoch=training_epoch_size, epochs=100,
workers=1, use_multiprocessing=False, # True makes communication with reset callback impossible
validation_data=self._gen_data_from_files(validation_data, steps, split=split, train=False, repeat=True),
validation_steps=validation_epoch_size,
verbose=1, callbacks=callbacks)
# set state
if 'val_loss' in history.history:
self.logger.info('training finished with val_loss %f', min(history.history['val_loss']))
if (np.isnan(history.history['val_loss'][-1]) or
earlystopping.stopped_epoch == 0):
# recover weights (which TerminateOnNaN prevented EarlyStopping from doing)
self.model.set_weights(earlystopping.best_weights)
self.status = 2
else:
self.logger.critical('training failed')
self.status = 1
def _split_data(self, data, val_data):
'''Read text files and split into training vs validation, count batches and update char mapping.'''
assert self.status >= 1
shuffle(data) # random order of files (because generators cannot shuffle within files)
total_size = 0
chars = set(self.mapping[0].keys())
if self.stateful: # we must split file-wise in stateful mode
steps = self.length
if val_data:
training_data = data
validation_data = val_data
else:
split = ceil(len(data)*self.validation_split) # split position in randomized file list
training_data, validation_data = data[:-split], data[-split:] # reserve last files for validation
assert training_data, "stateful mode needs at least one file for training"
assert validation_data, "stateful mode needs at least one file for validation"
for file in validation_data:
self.logger.info('using input %s for validation only', file.name)
training_epoch_size = 0
with click.progressbar(training_data) as pbar:
for file in pbar:
text, size = _read_normalize_file(file)
total_size += size
training_epoch_size += ceil((size-self.length)/steps/self.batch_size)
chars.update(set(text))
validation_epoch_size = 0
with click.progressbar(validation_data) as pbar:
for file in pbar:
text, size = _read_normalize_file(file)
total_size += size
validation_epoch_size += ceil((size-self.length)/steps/self.batch_size)
chars.update(set(text))
split = None
else: # we can split window by window in stateless mode
steps = 3
max_size = 0
with click.progressbar(data) as pbar:
for file in pbar:
text, size = _read_normalize_file(file)
total_size += size - self.length
max_size = max(max_size, size)
chars.update(set(text))
if val_data:
training_epoch_size = ceil(total_size/steps/self.batch_size)
with click.progressbar(val_data) as pbar:
for file in pbar:
text, size = _read_normalize_file(file)
total_size += size - self.length
validation_epoch_size = ceil(total_size/steps/self.batch_size)
training_data = data
validation_data = val_data
split = None
else:
epoch_size = total_size/steps/self.batch_size
training_epoch_size = ceil(epoch_size*(1-self.validation_split))
validation_epoch_size = ceil(epoch_size*self.validation_split)
validation_data, training_data = data, data # same data, different generators (see below)
split = np.random.uniform(0, 1, (ceil(max_size/steps),)) # reserve split fraction at random positions
if self.variable_length:
training_epoch_size *= 1.1 # training data augmented with partial windows (1+subsampling ratio)
chars = sorted(list(chars))
self.voc_size = len(chars) + 1 # reserve 0 for padding
c_i = dict((c, i) for i, c in enumerate(chars, 1))
i_c = dict((i, c) for i, c in enumerate(chars, 1))
self.mapping = (c_i, i_c)
return training_data, validation_data, split, training_epoch_size, validation_epoch_size, total_size, steps
def reconfigure_for_mapping(self):
'''Reconfigure character embedding layer after change of mapping (possibly transferring previous weights).'''
assert self.status >= 1
embedding = self.model.get_layer(name='char_embedding')
if embedding.input_dim < self.voc_size: # more chars than during last training?
if self.status >= 2: # weights exist already (i.e. incremental training)?
self.logger.warning('transferring weights from previous model with only %d character types', embedding.input_dim)
# get old weights:
layer_weights = [layer.get_weights() for layer in self.model.layers]
# reconfigure with new mapping size (and new initializers):
self.configure()
# set old weights:
for layer, weights in zip(self.model.layers, layer_weights):
self.logger.debug('transferring weights for layer %s %s', layer.name, str([w.shape for w in weights]))
if layer.name == 'char_embedding':
# transfer weights from previous Embedding layer to new one:
new_weights = layer.get_weights() # freshly initialised
#new_weights[0][embedding.input_dim:, 0:embedding.output_dim] = weights[0][0,:] # repeat zero vector instead
new_weights[0][0:embedding.input_dim, 0:embedding.output_dim] = weights[0]
layer.set_weights(new_weights)
else:
# use old weights:
layer.set_weights(weights)
else:
self.configure()
def remove_from_mapping(self, char=None, idx=None):
'''Remove one character from mapping and reconfigure embedding layer accordingly (transferring previous weights).'''
assert self.status > 1
assert self.voc_size > 0
if not char and not idx:
return False
if char:
if char in self.mapping[0]:
idx = self.mapping[0][char]
else:
self.logger.error('unmapped character "%s" cannot be removed', char)
return False
else:
if idx in self.mapping[1]:
char = self.mapping[1][idx]
else:
self.logger.error('unmapped index "%d" cannot be removed', idx)
return False
embedding = self.model.get_layer(name='char_embedding').get_weights()[0]
norm = np.linalg.norm(embedding[idx, :])
self.logger.warning('pruning character "%s" [%d] with norm %f', char, idx, norm)
self.mapping[0].pop(char)
self.mapping[1].pop(idx)
for i in range(idx + 1, self.voc_size):
otherchar = self.mapping[1][i]
self.mapping[0][otherchar] -= 1
self.mapping[1][i-1] = otherchar
self.mapping[1].pop(i)
self.voc_size -= 1
embedding = np.delete(embedding, idx, 0)
# get old weights:
layer_weights = [layer.get_weights() for layer in self.model.layers]
# reconfigure with new mapping size (and new initializers):
self.configure()
# set old weights:
for layer, weights in zip(self.model.layers, layer_weights):
if layer.name == 'char_embedding':
# transfer weights from previous Embedding layer to new one:
layer.set_weights([embedding])
else:
# use old weights:
layer.set_weights(weights)
self.status = 2
return True
def test(self, test_data):
'''Evaluate model on text files.
Calculate the perplexity of the character sequences in
all `test_data` files according to the current model.
Derive meta-data for context variables from file names.
Return the overall perplexity.
'''
assert self.status > 1
assert self.incremental is False # no explicit state transfer
if self.stateful:
self.model.reset_states()
# todo: Since Keras does not allow callbacks within evaluate() / evaluate_generator() / test_loop(),
# we cannot reset_states() between input files as we do in train().
# Thus we should evaluate each file individually, reset in between, and accumulate losses.
# But this looks awkward, since we get N progress bars instead of 1, in contrast to training.
# Perhaps the overall error introduced into stateful models by not resetting is not that high
# after all?
epoch_size = 0
steps = self.length if self.stateful else 1
with click.progressbar(test_data) as pbar:
for file in pbar:
_text, size = _read_normalize_file(file)
epoch_size += ceil((size-1)/self.batch_size/steps)
# todo: make iterator thread-safe and then use_multiprocesing=True
loss, _accuracy = self.model.evaluate_generator(self._gen_data_from_files(test_data, steps), steps=epoch_size, verbose=1)
return exp(loss)
def rate(self, text, context=None):
'''Rate a string all at once.
Calculate the probabilities of the character sequence in `text`
according to the current model (predicting all at once).
Use the integer list `context` as time-constant context variables,
or zero-based underspecification.
Return | |
import numpy as np
import pytest
import nengo
from nengo.builder import Builder
from nengo.builder.operator import Reset, Copy
from nengo.builder.signal import Signal
from nengo.dists import UniformHypersphere
from nengo.exceptions import ValidationError
from nengo.learning_rules import LearningRuleTypeParam, PES, BCM, Oja, Voja
from nengo.processes import WhiteSignal
from nengo.synapses import Alpha, Lowpass
def best_weights(weight_data):
return np.argmax(np.sum(np.var(weight_data, axis=0), axis=0))
def _test_pes(
Simulator,
nl,
plt,
seed,
allclose,
pre_neurons=False,
post_neurons=False,
weight_solver=False,
vin=np.array([0.5, -0.5]),
vout=None,
n=200,
function=None,
transform=np.array(1.0),
rate=1e-3,
):
vout = np.array(vin) if vout is None else vout
with nengo.Network(seed=seed) as model:
model.config[nengo.Ensemble].neuron_type = nl()
stim = nengo.Node(output=vin)
target = nengo.Node(output=vout)
pre = nengo.Ensemble(n, dimensions=stim.size_out)
post = nengo.Ensemble(n, dimensions=stim.size_out)
error = nengo.Ensemble(n, dimensions=target.size_out)
nengo.Connection(stim, pre)
postslice = post[: target.size_out] if target.size_out < stim.size_out else post
pre = pre.neurons if pre_neurons else pre
post = post.neurons if post_neurons else postslice
conn = nengo.Connection(
pre,
post,
function=function,
transform=transform,
learning_rule_type=PES(rate),
)
if weight_solver:
conn.solver = nengo.solvers.LstsqL2(weights=True)
nengo.Connection(target, error, transform=-1)
nengo.Connection(postslice, error)
nengo.Connection(error, conn.learning_rule)
post_p = nengo.Probe(postslice, synapse=0.03)
error_p = nengo.Probe(error, synapse=0.03)
weights_p = nengo.Probe(conn, "weights", sample_every=0.01)
with Simulator(model) as sim:
sim.run(0.5)
t = sim.trange()
weights = sim.data[weights_p]
plt.subplot(211)
plt.plot(t, sim.data[post_p])
plt.ylabel("Post decoded value")
plt.subplot(212)
plt.plot(t, sim.data[error_p])
plt.ylabel("Error decoded value")
plt.xlabel("Time (s)")
tend = t > 0.4
assert allclose(sim.data[post_p][tend], vout, atol=0.05)
assert allclose(sim.data[error_p][tend], 0, atol=0.05)
assert not allclose(weights[0], weights[-1], atol=1e-5, record_rmse=False)
def test_pes_ens_ens(Simulator, nl_nodirect, plt, seed, allclose):
function = lambda x: [x[1], x[0]]
_test_pes(Simulator, nl_nodirect, plt, seed, allclose, function=function)
def test_pes_weight_solver(Simulator, plt, seed, allclose):
function = lambda x: [x[1], x[0]]
_test_pes(
Simulator, nengo.LIF, plt, seed, allclose, function=function, weight_solver=True
)
def test_pes_ens_slice(Simulator, plt, seed, allclose):
vin = [0.5, -0.5]
vout = [vin[0] ** 2 + vin[1] ** 2]
function = lambda x: [x[0] - x[1]]
_test_pes(
Simulator, nengo.LIF, plt, seed, allclose, vin=vin, vout=vout, function=function
)
def test_pes_neuron_neuron(Simulator, plt, seed, rng, allclose):
n = 200
initial_weights = rng.uniform(high=4e-4, size=(n, n))
_test_pes(
Simulator,
nengo.LIF,
plt,
seed,
allclose,
pre_neurons=True,
post_neurons=True,
n=n,
transform=initial_weights,
rate=7e-4,
)
def test_pes_neuron_ens(Simulator, plt, seed, rng, allclose):
n = 200
initial_weights = rng.uniform(high=1e-4, size=(2, n))
_test_pes(
Simulator,
nengo.LIF,
plt,
seed,
allclose,
pre_neurons=True,
post_neurons=False,
n=n,
transform=initial_weights,
)
def test_pes_transform(Simulator, seed, allclose):
"""Test behaviour of PES when function and transform both defined."""
n = 200
# error must be with respect to transformed vector (conn.size_out)
T = np.asarray([[0.5], [-0.5]]) # transform to output
m = nengo.Network(seed=seed)
with m:
u = nengo.Node(output=[1])
a = nengo.Ensemble(n, dimensions=1)
b = nengo.Node(size_in=2)
e = nengo.Node(size_in=1)
nengo.Connection(u, a)
learned_conn = nengo.Connection(
a,
b,
function=lambda x: [0],
transform=T,
learning_rule_type=nengo.PES(learning_rate=1e-3),
)
assert T.shape[0] == learned_conn.size_out
assert T.shape[1] == learned_conn.size_mid
nengo.Connection(b[0], e, synapse=None)
nengo.Connection(nengo.Node(output=-1), e)
nengo.Connection(e, learned_conn.learning_rule, transform=T, synapse=None)
p_b = nengo.Probe(b, synapse=0.05)
with Simulator(m) as sim:
sim.run(1.0)
tend = sim.trange() > 0.7
assert allclose(sim.data[p_b][tend], [1, -1], atol=1e-2)
def test_pes_multidim_error(Simulator, seed):
"""Test that PES works on error connections mapping from N to 1 dims.
Note that the transform is applied before the learning rule, so the error
signal should be 1-dimensional.
"""
with nengo.Network(seed=seed) as net:
err = nengo.Node(output=[0])
ens1 = nengo.Ensemble(20, 3)
ens2 = nengo.Ensemble(10, 1)
# Case 1: ens -> ens, weights=False
conn = nengo.Connection(
ens1,
ens2,
transform=np.ones((1, 3)),
solver=nengo.solvers.LstsqL2(weights=False),
learning_rule_type={"pes": nengo.PES()},
)
nengo.Connection(err, conn.learning_rule["pes"])
# Case 2: ens -> ens, weights=True
conn = nengo.Connection(
ens1,
ens2,
transform=np.ones((1, 3)),
solver=nengo.solvers.LstsqL2(weights=True),
learning_rule_type={"pes": nengo.PES()},
)
nengo.Connection(err, conn.learning_rule["pes"])
# Case 3: neurons -> ens
conn = nengo.Connection(
ens1.neurons,
ens2,
transform=np.ones((1, ens1.n_neurons)),
learning_rule_type={"pes": nengo.PES()},
)
nengo.Connection(err, conn.learning_rule["pes"])
with Simulator(net) as sim:
sim.run(0.01)
@pytest.mark.parametrize("pre_synapse", [0, Lowpass(tau=0.05), Alpha(tau=0.005)])
def test_pes_synapse(Simulator, seed, pre_synapse, allclose):
rule = PES(pre_synapse=pre_synapse)
with nengo.Network(seed=seed) as model:
stim = nengo.Node(output=WhiteSignal(0.5, high=10))
x = nengo.Ensemble(100, 1)
nengo.Connection(stim, x, synapse=None)
conn = nengo.Connection(x, x, learning_rule_type=rule)
p_neurons = nengo.Probe(x.neurons, synapse=pre_synapse)
p_pes = nengo.Probe(conn.learning_rule, "activities")
with Simulator(model) as sim:
sim.run(0.5)
assert allclose(sim.data[p_neurons][1:, :], sim.data[p_pes][:-1, :])
@pytest.mark.parametrize("weights", [False, True])
def test_pes_recurrent_slice(Simulator, seed, weights, allclose):
"""Test that PES works on recurrent connections from N to 1 dims."""
with nengo.Network(seed=seed) as net:
err = nengo.Node(output=[-1])
stim = nengo.Node(output=[0, 0])
post = nengo.Ensemble(50, 2, radius=2)
nengo.Connection(stim, post)
conn = nengo.Connection(
post,
post[1],
function=lambda x: 0.0,
solver=nengo.solvers.LstsqL2(weights=weights),
learning_rule_type=nengo.PES(learning_rate=5e-4),
)
nengo.Connection(err, conn.learning_rule)
p = nengo.Probe(post, synapse=0.025)
with Simulator(net) as sim:
sim.run(0.2)
# Learning rule should drive second dimension high, but not first
assert allclose(sim.data[p][-10:, 0], 0, atol=0.2)
assert np.all(sim.data[p][-10:, 1] > 0.8)
def test_pes_cycle(Simulator):
"""Test that PES works when connection output feeds back into error."""
with nengo.Network() as net:
a = nengo.Ensemble(10, 1)
b = nengo.Node(size_in=1)
c = nengo.Connection(a, b, synapse=None, learning_rule_type=nengo.PES())
nengo.Connection(b, c.learning_rule, synapse=None)
with Simulator(net):
# just checking that this builds without error
pass
@pytest.mark.parametrize(
"rule_type, solver",
[
(BCM(learning_rate=1e-8), False),
(Oja(learning_rate=1e-5), False),
([Oja(learning_rate=1e-5), BCM(learning_rate=1e-8)], False),
([Oja(learning_rate=1e-5), BCM(learning_rate=1e-8)], True),
],
)
def test_unsupervised(Simulator, rule_type, solver, seed, rng, plt, allclose):
n = 200
m = nengo.Network(seed=seed)
with m:
u = nengo.Node(WhiteSignal(0.5, high=10), size_out=2)
a = nengo.Ensemble(n, dimensions=2)
b = nengo.Ensemble(n + 1, dimensions=2)
nengo.Connection(u, a)
if solver:
conn = nengo.Connection(a, b, solver=nengo.solvers.LstsqL2(weights=True))
else:
initial_weights = rng.uniform(high=1e-3, size=(b.n_neurons, a.n_neurons))
conn = nengo.Connection(a.neurons, b.neurons, transform=initial_weights)
conn.learning_rule_type = rule_type
inp_p = nengo.Probe(u)
weights_p = nengo.Probe(conn, "weights", sample_every=0.01)
ap = nengo.Probe(a, synapse=0.03)
up = nengo.Probe(b, synapse=0.03)
with Simulator(m, seed=seed + 1) as sim:
sim.run(0.5)
t = sim.trange()
plt.subplot(2, 1, 1)
plt.plot(t, sim.data[inp_p], label="Input")
plt.plot(t, sim.data[ap], label="Pre")
plt.plot(t, sim.data[up], label="Post")
plt.legend(loc="best", fontsize="x-small")
plt.subplot(2, 1, 2)
best_ix = best_weights(sim.data[weights_p])
plt.plot(sim.trange(sample_every=0.01), sim.data[weights_p][..., best_ix])
plt.xlabel("Time (s)")
plt.ylabel("Weights")
assert not allclose(
sim.data[weights_p][0], sim.data[weights_p][-1], record_rmse=False
)
def learning_net(learning_rule=nengo.PES, net=None, rng=np.random):
net = nengo.Network() if net is None else net
with net:
if learning_rule is nengo.PES:
learning_rule_type = learning_rule(learning_rate=1e-5)
else:
learning_rule_type = learning_rule()
u = nengo.Node(output=1.0)
pre = nengo.Ensemble(10, dimensions=1)
post = nengo.Ensemble(10, dimensions=1)
initial_weights = rng.uniform(high=1e-3, size=(pre.n_neurons, post.n_neurons))
conn = nengo.Connection(
pre.neurons,
post.neurons,
transform=initial_weights,
learning_rule_type=learning_rule_type,
)
if learning_rule is nengo.PES:
err = nengo.Ensemble(10, dimensions=1)
nengo.Connection(u, err)
nengo.Connection(err, conn.learning_rule)
net.activity_p = nengo.Probe(pre.neurons, synapse=0.01)
net.weights_p = nengo.Probe(conn, "weights", synapse=None, sample_every=0.01)
return net
@pytest.mark.parametrize("learning_rule", [nengo.PES, nengo.BCM, nengo.Oja])
def test_dt_dependence(Simulator, plt, learning_rule, seed, rng, allclose):
"""Learning rules should work the same regardless of dt."""
m = learning_net(learning_rule, nengo.Network(seed=seed), rng)
trans_data = []
# Using dts greater near tau_ref (0.002 by default) causes learning to
# differ due to lowered presynaptic firing rate
dts = (0.0001, 0.001)
colors = ("b", "g", "r")
ax1 = plt.subplot(2, 1, 1)
ax2 = plt.subplot(2, 1, 2)
for c, dt in zip(colors, dts):
with Simulator(m, dt=dt) as sim:
sim.run(0.1)
trans_data.append(sim.data[m.weights_p])
best_ix = best_weights(sim.data[m.weights_p])
ax1.plot(
sim.trange(sample_every=0.01), sim.data[m.weights_p][..., best_ix], c=c
)
ax2.plot(sim.trange(), sim.data[m.activity_p], c=c)
ax1.set_xlim(right=sim.trange()[-1])
ax1.set_ylabel("Connection weight")
ax2.set_xlim(right=sim.trange()[-1])
ax2.set_ylabel("Presynaptic activity")
assert allclose(trans_data[0], trans_data[1], atol=3e-3)
assert not allclose(
sim.data[m.weights_p][0], sim.data[m.weights_p][-1], record_rmse=False
)
@pytest.mark.parametrize("learning_rule", [nengo.PES, nengo.BCM, nengo.Oja])
def test_reset(Simulator, learning_rule, plt, seed, rng, allclose):
"""Make sure resetting learning rules resets all state."""
m = learning_net(learning_rule, nengo.Network(seed=seed), rng)
with Simulator(m) as sim:
sim.run(0.1)
sim.run(0.2)
first_t = sim.trange()
first_t_trans = sim.trange(sample_every=0.01)
first_activity_p = np.array(sim.data[m.activity_p], copy=True)
first_weights_p = np.array(sim.data[m.weights_p], copy=True)
sim.reset()
sim.run(0.3)
plt.subplot(2, 1, 1)
plt.ylabel("Neural activity")
plt.plot(first_t, first_activity_p, c="b")
plt.plot(sim.trange(), sim.data[m.activity_p], c="g")
plt.subplot(2, 1, 2)
plt.ylabel("Connection weight")
best_ix = best_weights(first_weights_p)
plt.plot(first_t_trans, first_weights_p[..., best_ix], c="b")
plt.plot(sim.trange(sample_every=0.01), sim.data[m.weights_p][..., best_ix], c="g")
assert allclose(sim.trange(), first_t)
assert allclose(sim.trange(sample_every=0.01), first_t_trans)
assert allclose(sim.data[m.activity_p], first_activity_p)
assert allclose(sim.data[m.weights_p], first_weights_p)
def test_learningruletypeparam():
"""LearningRuleTypeParam must be one or many learning rules."""
class Test:
lrp = LearningRuleTypeParam("lrp", default=None)
inst = Test()
assert inst.lrp is None
inst.lrp = Oja()
assert isinstance(inst.lrp, Oja)
inst.lrp = [Oja(), Oja()]
for lr in inst.lrp:
assert isinstance(lr, Oja)
# Non-LR no good
with pytest.raises(ValueError):
inst.lrp = "a"
# All elements in list must be LR
with pytest.raises(ValueError):
inst.lrp = [Oja(), "a", Oja()]
def test_learningrule_attr(seed):
"""Test learning_rule attribute on Connection"""
def check_rule(rule, conn, rule_type):
assert rule.connection is conn and rule.learning_rule_type is rule_type
with nengo.Network(seed=seed):
a, b, e = [nengo.Ensemble(10, 2) for i in range(3)]
T = np.ones((10, 10))
r1 = PES()
c1 = nengo.Connection(a.neurons, b.neurons, learning_rule_type=r1)
check_rule(c1.learning_rule, c1, r1)
r2 = [PES(), BCM()]
c2 = nengo.Connection(a.neurons, b.neurons, learning_rule_type=r2, transform=T)
assert isinstance(c2.learning_rule, list)
for rule, rule_type in zip(c2.learning_rule, r2):
check_rule(rule, c2, rule_type)
r3 = dict(oja=Oja(), bcm=BCM())
c3 = nengo.Connection(a.neurons, b.neurons, learning_rule_type=r3, transform=T)
assert isinstance(c3.learning_rule, dict)
assert set(c3.learning_rule) == set(r3) # assert same keys
for key in r3:
check_rule(c3.learning_rule[key], c3, r3[key])
def test_voja_encoders(Simulator, nl_nodirect, rng, seed, allclose):
"""Tests that voja changes active encoders to the input."""
n = 200
learned_vector = np.asarray([0.3, -0.4, 0.6])
learned_vector /= np.linalg.norm(learned_vector)
n_change = n // 2 # modify first half of the | |
13) == 0.0199004975124
assert round(zone_attr.r1_ow, 14) == 0.00688468914141
assert round(zone_attr.c1_ow, 5) == 533938.62338
assert round(zone_attr.r1_iw, 14) == 0.00971956114082
assert round(zone_attr.c1_iw, 5) == 319983.51874
assert round(zone_attr.r_rest_ow, 13) == 0.0399903108586
assert round(zone_attr.area_gf, 1) == 140.0
assert round(zone_attr.ua_value_gf, 16) == 58.351477449455686
assert round(zone_attr.r_conv_inner_gf, 16) == 0.0042016806722689
assert round(zone_attr.r_rad_inner_gf, 16) == 0.0014285714285714
assert round(zone_attr.alpha_conv_inner_gf, 5) == 1.7
assert round(zone_attr.alpha_rad_inner_gf, 1) == 5.0
assert round(zone_attr.r1_gf, 14) == 0.00236046484848
assert round(zone_attr.c1_gf, 5) == 1557320.98487
assert round(zone_attr.r_rest_gf, 13) == 0.0137109637229
assert round(zone_attr.area_rt, 1) == 140.0
assert round(zone_attr.ua_value_rt, 16) == 57.394603194028036
assert round(zone_attr.r_conv_inner_rt, 16) == 0.0042016806722689
assert round(zone_attr.r_rad_inner_rt, 16) == 0.0014285714285714
assert round(zone_attr.r_conv_outer_rt, 9) == 0.000357143
assert round(zone_attr.alpha_conv_inner_rt, 5) == 1.7
assert round(zone_attr.alpha_rad_inner_rt, 1) == 5.0
assert round(zone_attr.r1_rt, 14) == 0.00236046484848
assert round(zone_attr.c1_rt, 5) == 1557320.98487
assert round(zone_attr.r_rest_rt, 13) == 0.0137109637229
def test_volume_zone(self):
"""test of volume_zone"""
prj.buildings[-1].thermal_zones[-1].set_volume_zone()
assert prj.buildings[-1].thermal_zones[-1].volume == 490.0
def test_set_inner_wall_area(self):
"""test of set_inner_wall_area"""
prj.buildings[-1].thermal_zones[-1].set_inner_wall_area()
for wall in prj.buildings[-1].thermal_zones[-1].inner_walls:
assert round(wall.area, 16) == 11.951219512195122
# methods in UseConditions18599()
def test_load_use_conditions(self):
"""test of load_use_conditions, no parameter checking"""
use_cond = prj.buildings[-1].thermal_zones[-1].use_conditions
use_cond.load_use_conditions("Living",
data_class=prj.data)
def test_save_use_conditions(self):
"""test of save_use_conditions, no parameter checking"""
import os
path = os.path.join(utilities.get_default_path(),
'UseCondUT.xml')
prj.data.path_uc = path
prj.data.load_uc_binding()
use_cond = prj.buildings[-1].thermal_zones[-1].use_conditions
use_cond.save_use_conditions(data_class=prj.data)
# methods in BuildingElement
def test_ua_value(self):
"""test of ua_value"""
prj.set_default()
helptest.building_test2(prj)
therm_zone = prj.buildings[-1].thermal_zones[-1]
therm_zone.outer_walls[0].calc_ua_value()
assert round(
therm_zone.outer_walls[0].ua_value,
15) == 4.132453174475393
def test_gather_element_properties(self):
"""test of gather_element_properties"""
outerWalls = prj.buildings[-1].thermal_zones[-1].outer_walls[0]
number_of_layer, density, thermal_conduc, heat_capac, thickness = \
outerWalls.gather_element_properties()
assert number_of_layer == 2
assert (density == [5., 2.]).all()
assert (thermal_conduc == [4., 2.]).all()
assert (heat_capac == [0.48, 0.84]).all()
assert (thickness == [5., 2.]).all()
def test_load_type_element(self):
"""test of load_type_element, no parameter checking"""
# test load function
therm_zone = prj.buildings[-1].thermal_zones[-1]
therm_zone.outer_walls[0].load_type_element(1988, "heavy", prj.data)
therm_zone.inner_walls[0].load_type_element(1988, "light", prj.data)
therm_zone.windows[0].load_type_element(
1988,
"Kunststofffenster, Isolierverglasung",
prj.data)
def test_save_type_element(self):
"""test of save_type_element, no parameter checking"""
import os
# test load function
therm_zone = prj.buildings[-1].thermal_zones[-1]
path = os.path.join(utilities.get_default_path(),
'unitTestTB.xml')
prj.data.path_tb = path
prj.data.load_tb_binding()
therm_zone.outer_walls[0].save_type_element(data_class=prj.data)
therm_zone.inner_walls[0].save_type_element(data_class=prj.data)
therm_zone.windows[0].save_type_element(data_class=prj.data)
def test_delete_type_element(self):
"""test of save_type_element, no parameter checking"""
import os
# test load function
therm_zone = prj.buildings[-1].thermal_zones[-1]
path = os.path.join(utilities.get_default_path(),
'unitTestTB.xml')
prj.data.path_tb = path
prj.data.load_tb_binding()
therm_zone.outer_walls[0].delete_type_element(data_class=prj.data)
therm_zone.inner_walls[0].delete_type_element(data_class=prj.data)
therm_zone.windows[0].delete_type_element(data_class=prj.data)
# methods in Wall
def test_calc_equivalent_res_wall(self):
"""test of calc_equivalent_res, wall"""
prj.set_default()
helptest.building_test2(prj)
therm_zone = prj.buildings[-1].thermal_zones[-1]
therm_zone.outer_walls[0].calc_equivalent_res()
# parameters for outwall
assert round(therm_zone.outer_walls[0].c1, 6) == 111237.213205
assert round(therm_zone.outer_walls[0].c2, 7) == 59455.3856787
assert round(therm_zone.outer_walls[0].r1, 13) == 0.0330465078788
assert round(therm_zone.outer_walls[0].r2, 13) == 0.0549256129353
assert round(therm_zone.outer_walls[0].r3, 12) == 0.137027879186
assert round(therm_zone.outer_walls[0].c1_korr, 6) == 111237.213205
def test_insulate_wall(self):
"""test of insulate_wall"""
therm_zone = prj.buildings[-1].thermal_zones[-1]
therm_zone.outer_walls[0].insulate_wall("EPS_040_15", 0.04)
assert round(therm_zone.outer_walls[0].ua_value, 6) == 2.924088
def test_retrofit_wall(self):
"""test of retrofit_wall"""
prj.set_default()
helptest.building_test2(prj)
therm_zone = prj.buildings[-1].thermal_zones[-1]
therm_zone.outer_walls[0].retrofit_wall(2016, "EPS_040_15")
assert round(therm_zone.outer_walls[0].ua_value, 6) == 2.4
prj.set_default()
helptest.building_test2(prj)
therm_zone = prj.buildings[-1].thermal_zones[-1]
therm_zone.outer_walls[0].retrofit_wall(2010, "EPS_040_15")
assert round(therm_zone.outer_walls[0].ua_value, 6) == 2.4
prj.set_default()
helptest.building_test2(prj)
therm_zone = prj.buildings[-1].thermal_zones[-1]
therm_zone.outer_walls[0].retrofit_wall(2005, "EPS_040_15")
assert round(therm_zone.outer_walls[0].ua_value, 2) == 4.13
prj.set_default()
helptest.building_test2(prj)
therm_zone = prj.buildings[-1].thermal_zones[-1]
therm_zone.outer_walls[0].retrofit_wall(1998, "EPS_040_15")
assert round(therm_zone.outer_walls[0].ua_value, 2) == 4.13
prj.set_default()
helptest.building_test2(prj)
therm_zone = prj.buildings[-1].thermal_zones[-1]
therm_zone.outer_walls[0].retrofit_wall(1990, "EPS_040_15")
assert round(therm_zone.outer_walls[0].ua_value, 2) == 4.13
prj.set_default()
helptest.building_test2(prj)
therm_zone = prj.buildings[-1].thermal_zones[-1]
therm_zone.outer_walls[0].retrofit_wall(1980, "EPS_040_15")
assert round(therm_zone.outer_walls[0].ua_value, 2) == 4.13
def test_calc_equivalent_res_win(self):
"""test of calc_equivalent_res, win"""
prj.set_default()
helptest.building_test2(prj)
therm_zone = prj.buildings[-1].thermal_zones[-1]
therm_zone.windows[0].calc_equivalent_res()
assert round(therm_zone.windows[0].r1, 3) == 0.072
def test_change_infiltration_rate(self):
"""test for change of infiltration_rate"""
prj.set_default(load_data=True)
helptest.building_test2(prj)
therm_zone = prj.buildings[-1].thermal_zones[-1]
assert therm_zone.infiltration_rate == 0.2
therm_zone.infiltration_rate = 0.7
assert therm_zone.infiltration_rate == 0.7
therm_zone.use_conditions.base_ach = 0.5
assert therm_zone.infiltration_rate == 0.5
def test_load_save_material(self):
"""test of load_material_template and save_material_template,
no parameter checking"""
from teaser.logic.buildingobjects.buildingphysics.material import \
Material
path = os.path.join(utilities.get_default_path(),
'MatUT.xml')
mat = Material(parent=None)
mat.load_material_template(mat_name='Tiledroof',
data_class=prj.data)
from teaser.data.dataclass import DataClass
dat = DataClass()
dat.path_mat = path
dat.load_mat_binding()
mat.save_material_template(data_class=dat)
def test_properties_project(self):
"""Tests properties of project class"""
prj.number_of_elements_calc
prj.merge_windows_calc
prj.used_library_calc
prj.name = 123
assert prj.name == "P123"
def test_warnings_prj(self):
"""Tests misc parts in project.py"""
from teaser.logic.buildingobjects.building import Building
from teaser.logic.buildingobjects.thermalzone import ThermalZone
# warnings for not calculated buidlings
bld = Building(parent=prj)
tz = ThermalZone(parent=bld)
prj.calc_all_buildings()
prj.set_default()
# warning if iwu and number_of_apartments is used
prj.add_residential(method='iwu',
usage="single_family_dwelling",
name="test",
year_of_construction=1988,
number_of_floors=1,
height_of_floors=7,
net_leased_area=1988,
number_of_apartments=1)
# not all buildings if internal id is passed over
prj.add_residential(method='iwu',
usage="single_family_dwelling",
name="test1",
year_of_construction=1988,
number_of_floors=15,
height_of_floors=6,
net_leased_area=1988)
prj.calc_all_buildings()
prj.export_aixlib(internal_id=prj.buildings[-1].internal_id)
prj.number_of_elements_calc = 1
prj.merge_windows_calc = True
prj.used_library_calc = 'IBPSA'
prj.calc_all_buildings()
prj.export_ibpsa(internal_id=prj.buildings[-1].internal_id)
prj.set_default(load_data="Test")
def test_v5_bindings(self):
"""
Tests the old v4 project bindings
"""
prj.set_default()
prj.load_project(
os.path.join(
os.path.dirname(__file__),
'testfiles',
'teaser_v5.teaserXML'))
def test_v4_bindings(self):
"""
Tests the old v4 project bindings
"""
prj.set_default(load_data=True)
prj.load_project(
os.path.join(
os.path.dirname(__file__),
'testfiles',
'teaser_v4.teaserXML'))
prj.data.path_tb = os.path.join(
os.path.dirname(__file__),
'testfiles',
'TypeBuildingElements_v4.xml')
prj.data.path_mat = os.path.join(
os.path.dirname(__file__),
'testfiles',
'MaterialTemplates_v4.xml')
prj.data.path_uc = os.path.join(
os.path.dirname(__file__),
'testfiles',
'UseConditions_v4.xml')
prj.data.load_tb_binding()
prj.data.load_uc_binding()
prj.data.load_mat_binding()
def test_v39_bindings(self):
"""
Tests the old v39 project bindings
"""
prj.set_default()
prj.load_project(
os.path.join(
os.path.dirname(__file__),
'testfiles',
'teaser_v39.teaserXML'))
def test_export_aixlib_only_iw(self):
"""
Tests AixLib output for a building with inner walls only
"""
from teaser.logic.buildingobjects.building import Building
prj.set_default(load_data=True)
bldg = Building(parent=prj)
bldg.name = "SuperExampleBuilding"
bldg.street_name = "AwesomeAvenue42"
bldg.city = "46325FantasticTown"
bldg.year_of_construction = 2015
bldg.number_of_floors = 1
bldg.height_of_floors = 3.5
from teaser.logic.buildingobjects.thermalzone import ThermalZone
tz = ThermalZone(parent=bldg)
tz.name = "LivingRoom"
tz.area = 140.0
tz.volume = tz.area * bldg.number_of_floors * bldg.height_of_floors
tz.infiltration_rate = 0.5
from teaser.logic.buildingobjects.boundaryconditions.boundaryconditions import BoundaryConditions
tz.use_conditions = BoundaryConditions(parent=tz)
tz.use_conditions.load_use_conditions("Living", prj.data)
from teaser.logic.buildingobjects.buildingphysics.innerwall import InnerWall
in_wall_dict = {"InnerWall1": [10.0],
"InnerWall2": [14.0],
"InnerWall3": [10.0]}
for key, value in in_wall_dict.items():
in_wall = InnerWall(parent=tz)
in_wall.name = key
in_wall.load_type_element(
year=bldg.year_of_construction,
construction='heavy')
in_wall.area = value[0]
prj.number_of_elements_calc = 1
prj.merge_windows_calc = False
prj.used_library_calc = 'AixLib'
prj.calc_all_buildings()
prj.export_aixlib()
prj.number_of_elements_calc = 2
prj.merge_windows_calc = False
prj.used_library_calc = 'AixLib'
prj.calc_all_buildings()
prj.export_aixlib()
prj.number_of_elements_calc = 3
prj.merge_windows_calc = False
prj.used_library_calc = 'AixLib'
prj.calc_all_buildings()
prj.export_aixlib()
prj.number_of_elements_calc = 4
prj.merge_windows_calc = False
prj.used_library_calc = 'AixLib'
prj.calc_all_buildings()
prj.export_aixlib()
prj.number_of_elements_calc = 1
prj.merge_windows_calc = False
prj.used_library_calc = 'IBPSA'
prj.calc_all_buildings()
prj.export_ibpsa()
prj.number_of_elements_calc = 2
prj.merge_windows_calc = False
prj.used_library_calc = 'IBPSA'
prj.calc_all_buildings()
prj.export_ibpsa()
prj.number_of_elements_calc = 3
prj.merge_windows_calc = False
prj.used_library_calc = 'IBPSA'
prj.calc_all_buildings()
prj.export_ibpsa()
prj.number_of_elements_calc = 4
prj.merge_windows_calc = False
prj.used_library_calc = 'IBPSA'
prj.calc_all_buildings()
prj.export_ibpsa()
def test_export_only_ow(self):
"""
Tests AixLib output for a building with outer walls only
"""
from teaser.logic.buildingobjects.building import Building
bldg = Building(parent=prj)
bldg.name = "SuperExampleBuilding"
bldg.street_name = "AwesomeAvenue42"
bldg.city = "46325FantasticTown"
bldg.year_of_construction = 2015
bldg.number_of_floors = 1
bldg.height_of_floors = 3.5
from teaser.logic.buildingobjects.thermalzone import ThermalZone
tz = ThermalZone(parent=bldg)
tz.name = "LivingRoom"
tz.area = 140.0
tz.volume = tz.area * bldg.number_of_floors * bldg.height_of_floors
tz.infiltration_rate = 0.5
from teaser.logic.buildingobjects.boundaryconditions.boundaryconditions \
import BoundaryConditions
tz.use_conditions = BoundaryConditions(parent=tz)
tz.use_conditions.load_use_conditions("Living", prj.data)
from teaser.logic.buildingobjects.buildingphysics.outerwall import \
OuterWall
out_wall_dict = {"OuterWall_north": [10.0, 90.0, 0.0],
"OuterWall_east": [14.0, 90.0, 90.0],
"OuterWall_south": [10.0, 90.0, 180.0],
"OuterWall_west": [14.0, 90.0, 270.0]}
for key, value in out_wall_dict.items():
out_wall = OuterWall(parent=tz)
out_wall.name = key
out_wall.load_type_element(
year=bldg.year_of_construction,
construction='heavy')
out_wall.area = value[0]
out_wall.tilt = value[1]
out_wall.orientation = value[2]
prj.number_of_elements_calc = 1
prj.merge_windows_calc = False
prj.used_library_calc = 'AixLib'
prj.calc_all_buildings()
prj.export_aixlib()
prj.number_of_elements_calc = 2
prj.merge_windows_calc = False
prj.used_library_calc = 'AixLib'
prj.calc_all_buildings()
prj.export_aixlib()
prj.number_of_elements_calc = 3
prj.merge_windows_calc = False
prj.used_library_calc = 'AixLib'
prj.calc_all_buildings()
prj.export_aixlib()
prj.number_of_elements_calc = 4
prj.merge_windows_calc = False
prj.used_library_calc = 'AixLib'
prj.calc_all_buildings()
prj.export_aixlib()
prj.number_of_elements_calc = 1
prj.merge_windows_calc = False
prj.used_library_calc = 'IBPSA'
prj.calc_all_buildings()
prj.export_ibpsa()
prj.number_of_elements_calc = 2
prj.merge_windows_calc = False
prj.used_library_calc = 'IBPSA'
prj.calc_all_buildings()
prj.export_ibpsa()
prj.number_of_elements_calc = 3
prj.merge_windows_calc = False
prj.used_library_calc = 'IBPSA'
prj.calc_all_buildings()
prj.export_ibpsa()
prj.number_of_elements_calc = 4
prj.merge_windows_calc = False
prj.used_library_calc = 'IBPSA'
prj.calc_all_buildings()
prj.export_ibpsa()
prj.number_of_elements_calc = 1
prj.merge_windows_calc = True
prj.used_library_calc = 'IBPSA'
prj.calc_all_buildings()
prj.export_ibpsa()
prj.number_of_elements_calc = 2
prj.merge_windows_calc = True
prj.used_library_calc = 'IBPSA'
prj.calc_all_buildings()
prj.export_ibpsa()
prj.number_of_elements_calc = 3
prj.merge_windows_calc = True
prj.used_library_calc = 'IBPSA'
prj.calc_all_buildings()
prj.export_ibpsa()
prj.number_of_elements_calc = 4
prj.merge_windows_calc = True
prj.used_library_calc = 'IBPSA'
prj.calc_all_buildings()
prj.export_ibpsa()
def test_export_only_win(self):
"""
Tests AixLib output for a building with windows only
"""
from teaser.logic.buildingobjects.building import Building
bldg = Building(parent=prj)
bldg.name = "SuperExampleBuilding"
bldg.street_name = "AwesomeAvenue42"
bldg.city = "46325FantasticTown"
bldg.year_of_construction = 2015
bldg.number_of_floors = 1
bldg.height_of_floors = 3.5
from teaser.logic.buildingobjects.thermalzone import ThermalZone
tz = ThermalZone(parent=bldg)
tz.name = "LivingRoom"
tz.area = 140.0
tz.volume = tz.area * bldg.number_of_floors * bldg.height_of_floors
tz.infiltration_rate = 0.5
from teaser.logic.buildingobjects.boundaryconditions.boundaryconditions \
import BoundaryConditions
tz.use_conditions = BoundaryConditions(parent=tz)
tz.use_conditions.load_use_conditions("Living", prj.data)
from teaser.logic.buildingobjects.buildingphysics.window import Window
from teaser.logic.buildingobjects.buildingphysics.layer import Layer
from teaser.logic.buildingobjects.buildingphysics.material import \
Material
win_dict = {"Window_east": [5.0, 90.0, 90.0],
"Window_south": [8.0, 90.0, 180.0],
"Window_west": [5.0, 90.0, 270.0]}
for key, value in win_dict.items():
win = Window(parent=tz)
win.name = key
win.area = value[0]
win.tilt = value[1]
win.orientation = value[2]
win.inner_convection = 1.7
win.inner_radiation = 5.0
| |
<gh_stars>0
import os
from collections import defaultdict
from copy import deepcopy
from functools import partial
from pathlib import Path
from typing import Type
import numpy as np
from qtpy.QtCore import QByteArray, Qt, Signal, Slot
from qtpy.QtGui import QCloseEvent, QGuiApplication, QIcon, QKeySequence, QTextOption
from qtpy.QtWidgets import (
QAbstractSpinBox,
QCheckBox,
QDoubleSpinBox,
QFileDialog,
QFormLayout,
QGridLayout,
QHBoxLayout,
QLabel,
QMessageBox,
QProgressBar,
QPushButton,
QSizePolicy,
QSpinBox,
QTabWidget,
QTextEdit,
QVBoxLayout,
QWidget,
)
import PartSegData
from PartSegCore import UNIT_SCALE, Units, state_store
from PartSegCore.io_utils import HistoryElement, HistoryProblem, WrongFileTypeException
from PartSegCore.mask import io_functions
from PartSegCore.mask.algorithm_description import mask_algorithm_dict
from PartSegCore.mask.history_utils import create_history_element_from_segmentation_tuple
from PartSegCore.mask.io_functions import (
LoadSegmentation,
LoadSegmentationParameters,
SaveSegmentation,
SegmentationTuple,
)
from PartSegCore.mask_create import calculate_mask_from_project
from PartSegCore.segmentation.algorithm_base import SegmentationResult
from PartSegImage import Image, TiffImageReader
from ..common_gui.advanced_tabs import AdvancedWindow
from ..common_gui.algorithms_description import AlgorithmChoose, AlgorithmSettingsWidget, EnumComboBox
from ..common_gui.channel_control import ChannelProperty
from ..common_gui.custom_load_dialog import CustomLoadDialog
from ..common_gui.custom_save_dialog import SaveDialog
from ..common_gui.flow_layout import FlowLayout
from ..common_gui.main_window import BaseMainMenu, BaseMainWindow
from ..common_gui.mask_widget import MaskDialogBase
from ..common_gui.multiple_file_widget import MultipleFileWidget
from ..common_gui.napari_image_view import LabelEnum
from ..common_gui.select_multiple_files import AddFiles
from ..common_gui.stack_image_view import ColorBar
from ..common_gui.universal_gui_part import right_label
from ..common_gui.waiting_dialog import ExecuteFunctionDialog
from ..segmentation_mask.segmentation_info_dialog import SegmentationInfoDialog
from .batch_proceed import BatchProceed, BatchTask
from .image_view import StackImageView
from .simple_measurements import SimpleMeasurements
from .stack_settings import StackSettings, get_mask
CONFIG_FOLDER = os.path.join(state_store.save_folder, "mask")
class MaskDialog(MaskDialogBase):
def __init__(self, settings: StackSettings):
super().__init__(settings)
self.settings = settings
def next_mask(self):
project_info: SegmentationTuple = self.settings.get_project_info()
mask_property = self.mask_widget.get_mask_property()
self.settings.set("mask_manager.mask_property", mask_property)
mask = calculate_mask_from_project(mask_description=mask_property, project=project_info)
self.settings.add_history_element(create_history_element_from_segmentation_tuple(project_info, mask_property,))
self.settings.mask = mask
self.settings.chosen_components_widget.un_check_all()
self.close()
def prev_mask(self):
history: HistoryElement = self.settings.history_pop()
history.arrays.seek(0)
seg = np.load(history.arrays)
history.arrays.seek(0)
self.settings.segmentation = seg["segmentation"]
self.settings.set_segmentation(
seg["segmentation"],
False,
history.segmentation_parameters["selected"],
history.segmentation_parameters["parameters"],
)
if "mask" in seg:
self.settings.mask = seg["mask"]
else:
self.settings.mask = None
self.close()
class MainMenu(BaseMainMenu):
image_loaded = Signal()
def __init__(self, settings: StackSettings, main_window):
"""
:type settings: StackSettings
:param settings:
"""
super().__init__(settings, main_window)
self.settings = settings
self.segmentation_cache = None
self.read_thread = None
self.advanced_window = None
self.measurements_window = None
self.load_image_btn = QPushButton("Load image")
self.load_image_btn.clicked.connect(self.load_image)
self.load_segmentation_btn = QPushButton("Load segmentation")
self.load_segmentation_btn.clicked.connect(self.load_segmentation)
self.save_segmentation_btn = QPushButton("Save segmentation")
self.save_segmentation_btn.clicked.connect(self.save_segmentation)
self.save_catted_parts = QPushButton("Save components")
self.save_catted_parts.clicked.connect(self.save_result)
self.advanced_window_btn = QPushButton("Advanced settings")
self.advanced_window_btn.clicked.connect(self.show_advanced_window)
self.mask_manager_btn = QPushButton("Mask manager")
self.mask_manager_btn.clicked.connect(self.mask_manager)
self.measurements_btn = QPushButton("Simple measurements")
self.measurements_btn.clicked.connect(self.simple_measurement)
self.segmentation_dialog = SegmentationInfoDialog(
self.main_window.settings,
self.main_window.options_panel.algorithm_options.algorithm_choose_widget.change_algorithm,
)
self.setContentsMargins(0, 0, 0, 0)
layout = QHBoxLayout()
layout.setContentsMargins(0, 0, 0, 0)
# layout.setSpacing(0)
layout.addWidget(self.load_image_btn)
layout.addWidget(self.load_segmentation_btn)
layout.addWidget(self.save_catted_parts)
layout.addWidget(self.save_segmentation_btn)
layout.addWidget(self.advanced_window_btn)
layout.addWidget(self.mask_manager_btn)
layout.addWidget(self.measurements_btn)
self.setLayout(layout)
def simple_measurement(self):
if self.measurements_window is None:
self.measurements_window = SimpleMeasurements(self.settings)
self.measurements_window.show()
def mask_manager(self):
if self.settings.segmentation is None:
QMessageBox.information(self, "No segmentation", "Cannot create mask without segmentation")
return
if not self.settings.chosen_components():
QMessageBox.information(self, "No selected components", "Mask is created only from selected components")
return
dial = MaskDialog(self.settings)
dial.exec_()
def show_advanced_window(self):
if self.advanced_window is None:
self.advanced_window = AdvancedWindow(self.settings, ["channelcontrol"])
# FIXME temporary workaround
self.advanced_window.reload_list = []
self.advanced_window.show()
def load_image(self):
# TODO move segmentation with image load to load_segmentaion
dial = CustomLoadDialog(io_functions.load_dict)
dial.setDirectory(self.settings.get("io.load_image_directory", str(Path.home())))
dial.selectNameFilter(self.settings.get("io.load_data_filter", next(iter(io_functions.load_dict.keys()))))
dial.setHistory(dial.history() + self.settings.get_path_history())
if not dial.exec_():
return
load_property = dial.get_result()
self.settings.set("io.load_image_directory", os.path.dirname(load_property.load_location[0]))
self.settings.set("io.load_data_filter", load_property.selected_filter)
self.settings.add_path_history(os.path.dirname(load_property.load_location[0]))
def exception_hook(exception):
if isinstance(exception, ValueError) and exception.args[0] == "not a TIFF file":
QMessageBox.warning(self, "Open error", "Image is not proper tiff/lsm image")
elif isinstance(exception, MemoryError):
QMessageBox.warning(self, "Open error", "Not enough memory to read this image")
elif isinstance(exception, IOError):
QMessageBox.warning(self, "Open error", f"Some problem with reading from disc: {exception}")
elif isinstance(exception, WrongFileTypeException):
QMessageBox.warning(
self,
"Open error",
"No needed files inside archive. Most probably you choose file from segmentation analysis",
)
else:
raise exception
execute_dialog = ExecuteFunctionDialog(
load_property.load_class.load,
[load_property.load_location],
{"metadata": {"default_spacing": self.settings.image.spacing}},
text="Load data",
exception_hook=exception_hook,
)
if execute_dialog.exec():
result = execute_dialog.get_result()
if result is None:
return
self.set_data(result)
def set_image(self, image: Image) -> bool:
if image is None:
return False
if image.is_time:
if image.is_stack:
QMessageBox.warning(self, "Not supported", "Data that are time data are currently not supported")
return False
else:
res = QMessageBox.question(
self,
"Not supported",
"Time data are currently not supported. Maybe You would like to treat time as z-stack",
QMessageBox.Yes | QMessageBox.No,
QMessageBox.No,
)
if res == QMessageBox.Yes:
image = image.swap_time_and_stack()
else:
return False
self.settings.image = image
return True
def load_segmentation(self):
dial = CustomLoadDialog(
{
LoadSegmentation.get_name(): LoadSegmentation,
LoadSegmentationParameters.get_name(): LoadSegmentationParameters,
}
)
dial.setDirectory(self.settings.get("io.open_segmentation_directory", str(Path.home())))
dial.setHistory(dial.history() + self.settings.get_path_history())
if not dial.exec_():
return
load_property = dial.get_result()
self.settings.set("io.open_segmentation_directory", os.path.dirname(load_property.load_location[0]))
self.settings.add_path_history(os.path.dirname(load_property.load_location[0]))
def exception_hook(exception):
mess = QMessageBox(self)
if isinstance(exception, ValueError) and exception.args[0] == "Segmentation do not fit to image":
mess.warning(self, "Open error", "Segmentation do not fit to image")
elif isinstance(exception, MemoryError):
mess.warning(self, "Open error", "Not enough memory to read this image")
elif isinstance(exception, IOError):
mess.warning(self, "Open error", "Some problem with reading from disc")
elif isinstance(exception, WrongFileTypeException):
mess.warning(
self,
"Open error",
"No needed files inside archive. Most probably you choose file from segmentation analysis",
)
else:
raise exception
dial = ExecuteFunctionDialog(
load_property.load_class.load,
[load_property.load_location],
text="Load segmentation",
exception_hook=exception_hook,
)
if dial.exec():
result = dial.get_result()
if result is None:
QMessageBox.critical(self, "Data Load fail", "Fail of loading data")
return
if result.segmentation is not None:
try:
self.settings.set_project_info(dial.get_result())
return
except ValueError as e:
if e.args != ("Segmentation do not fit to image",):
raise
self.segmentation_dialog.set_additional_text(
"Segmentation do not fit to image, maybe you would lie to load parameters only."
)
except HistoryProblem:
QMessageBox().warning(
self,
"Load Problem",
"You set to save selected components when loading "
"another segmentation but history is incomatybile",
)
else:
self.segmentation_dialog.set_additional_text("")
self.segmentation_dialog.set_parameters_dict(result.segmentation_parameters)
self.segmentation_dialog.show()
def save_segmentation(self):
if self.settings.segmentation is None:
QMessageBox.warning(self, "No segmentation", "No segmentation to save")
return
dial = SaveDialog(io_functions.save_segmentation_dict, False, history=self.settings.get_path_history())
dial.setDirectory(self.settings.get("io.save_segmentation_directory", str(Path.home())))
dial.selectFile(os.path.splitext(os.path.basename(self.settings.image_path))[0] + ".seg")
if not dial.exec_():
return
save_location, _selected_filter, save_class, values = dial.get_result()
self.settings.set("io.save_segmentation_directory", os.path.dirname(str(save_location)))
self.settings.add_path_history(os.path.dirname(str(save_location)))
# self.settings.save_directory = os.path.dirname(str(file_path))
def exception_hook(exception):
QMessageBox.critical(self, "Save error", f"Error on disc operation. Text: {exception}", QMessageBox.Ok)
raise exception
dial = ExecuteFunctionDialog(
save_class.save,
[save_location, self.settings.get_project_info(), values],
text="Save segmentation",
exception_hook=exception_hook,
)
dial.exec()
def save_result(self):
if self.settings.image_path is not None and QMessageBox.Yes == QMessageBox.question(
self, "Copy", "Copy name to clipboard?", QMessageBox.Yes | QMessageBox.No, QMessageBox.Yes
):
clipboard = QGuiApplication.clipboard()
clipboard.setText(os.path.splitext(os.path.basename(self.settings.image_path))[0])
if self.settings.segmentation is None or len(self.settings.sizes) == 1:
QMessageBox.warning(self, "No components", "No components to save")
return
dial = SaveDialog(
io_functions.save_components_dict,
False,
history=self.settings.get_path_history(),
file_mode=QFileDialog.Directory,
)
dial.setDirectory(self.settings.get("io.save_components_directory", str(Path.home())))
dial.selectFile(os.path.splitext(os.path.basename(self.settings.image_path))[0])
if not dial.exec_():
return
res = dial.get_result()
potential_names = self.settings.get_file_names_for_save_result(res.save_destination)
conflict = []
for el in potential_names:
if os.path.exists(el):
conflict.append(el)
if len(conflict) > 0:
# TODO modify because of long lists
conflict_str = "\n".join(conflict)
if QMessageBox.No == QMessageBox.warning(
self,
"Overwrite",
f"Overwrite files:\n {conflict_str}",
QMessageBox.Yes | QMessageBox.No,
QMessageBox.No,
):
self.save_result()
return
self.settings.set("io.save_components_directory", os.path.dirname(str(res.save_destination)))
self.settings.add_path_history(os.path.dirname(str(res.save_destination)))
def exception_hook(exception):
QMessageBox.critical(self, "Save error", f"Error on disc operation. Text: {exception}", QMessageBox.Ok)
dial = ExecuteFunctionDialog(
res.save_class.save,
[res.save_destination, self.settings.get_project_info(), res.parameters],
text="Save components",
exception_hook=exception_hook,
)
dial.exec()
class ComponentCheckBox(QCheckBox):
mouse_enter = Signal(int)
mouse_leave = Signal(int)
def __init__(self, number: int, parent=None):
super().__init__(str(number), parent)
self.number = number
def enterEvent(self, _event):
self.mouse_enter.emit(self.number)
def leaveEvent(self, _event):
self.mouse_leave.emit(self.number)
class ChosenComponents(QWidget):
"""
:type check_box: dict[int, QCheckBox]
"""
check_change_signal = Signal()
mouse_enter = Signal(int)
mouse_leave = Signal(int)
def __init__(self):
super(ChosenComponents, self).__init__()
# self.setLayout(FlowLayout())
self.check_box = dict()
self.check_all_btn = QPushButton("Select all")
self.check_all_btn.clicked.connect(self.check_all)
self.un_check_all_btn = QPushButton("Unselect all")
self.un_check_all_btn.clicked.connect(self.un_check_all)
main_layout = QVBoxLayout()
btn_layout = QHBoxLayout()
btn_layout.addWidget(self.check_all_btn)
btn_layout.addWidget(self.un_check_all_btn)
self.check_layout = FlowLayout()
main_layout.addLayout(btn_layout)
main_layout.addLayout(self.check_layout)
self.setLayout(main_layout)
def other_component_choose(self, num):
check = self.check_box[num]
check.setChecked(not check.isChecked())
def check_all(self):
for el in self.check_box.values():
el.setChecked(True)
def un_check_all(self):
for el in self.check_box.values():
el.setChecked(False)
def remove_components(self):
self.check_layout.clear()
for el in self.check_box.values():
el.deleteLater()
el.stateChanged.disconnect()
el.mouse_leave.disconnect()
el.mouse_enter.disconnect()
self.check_box.clear()
def new_choose(self, num, chosen_components):
self.set_chose(range(1, num + 1), chosen_components)
def set_chose(self, components_index, chosen_components):
chosen_components = set(chosen_components)
self.blockSignals(True)
self.remove_components()
chosen_components = set(chosen_components)
for el in components_index:
check = ComponentCheckBox(el)
if el in chosen_components:
check.setChecked(True)
check.stateChanged.connect(self.check_change)
check.mouse_enter.connect(self.mouse_enter.emit)
check.mouse_leave.connect(self.mouse_leave.emit)
self.check_box[el] = check
self.check_layout.addWidget(check)
self.blockSignals(False)
self.update()
self.check_change_signal.emit()
def check_change(self):
self.check_change_signal.emit()
def change_state(self, num, val):
self.check_box[num].setChecked(val)
def get_state(self, num: int) -> bool:
# TODO Check what situation create report of id ID: af9b57f074264169b4353aa1e61d8bc2
if num >= len(self.check_box):
return False
return self.check_box[num].isChecked()
def get_chosen(self):
res = []
for num, check in self.check_box.items():
if check.isChecked():
res.append(num)
return res
def get_mask(self):
res = [0]
for _, check in sorted(self.check_box.items()):
res.append(check.isChecked())
return np.array(res, dtype=np.uint8)
class AlgorithmOptions(QWidget):
def __init__(self, settings: StackSettings, image_view: StackImageView, component_checker):
control_view = image_view.get_control_view()
super().__init__()
self.settings = settings
self.show_result = EnumComboBox(LabelEnum) # QCheckBox("Show result")
self.show_result.set_value(control_view.show_label)
self.opacity = QDoubleSpinBox()
self.opacity.setRange(0, 1)
self.opacity.setSingleStep(0.1)
self.opacity.setValue(control_view.opacity)
self.only_borders = QCheckBox("Only borders")
self.only_borders.setChecked(control_view.only_borders)
self.borders_thick = QSpinBox()
self.borders_thick.setRange(1, 11)
self.borders_thick.setSingleStep(2)
self.borders_thick.setValue(control_view.borders_thick)
# noinspection PyUnresolvedReferences
self.borders_thick.valueChanged.connect(self.border_value_check)
self.execute_in_background_btn = QPushButton("Execute in background")
self.execute_in_background_btn.setToolTip("Run calculation in background. Put result in multiple files panel")
self.execute_btn = QPushButton("Execute")
self.execute_btn.setStyleSheet("QPushButton{font-weight: bold;}")
self.execute_all_btn = QPushButton("Execute all")
self.execute_all_btn.setToolTip(
"Execute in batch mode segmentation with current parameter. " "File list need to be specified in image tab."
)
self.execute_all_btn.setDisabled(True)
self.save_parameters_btn = QPushButton("Save parameters")
self.block_execute_all_btn = False
self.algorithm_choose_widget = AlgorithmChoose(settings, mask_algorithm_dict)
self.algorithm_choose_widget.result.connect(self.execution_done)
self.algorithm_choose_widget.finished.connect(self.execution_finished)
| |
# Copyright (c) 2016, <NAME> <<EMAIL>>
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# * Neither the name of ciphrtxt nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import time
import json
import hashlib
import mimetypes
from binascii import hexlify, unhexlify
import base64
from ciphrtxt.message import Message, RawMessageHeader
from tornado.httpclient import AsyncHTTPClient, HTTPClient, HTTPRequest
import tornado.gen
from ecpy.curves import curve_secp256k1
from ecpy.point import Point, Generator
from ecpy.ecdsa import ECDSA
from Crypto.Random import random
from Crypto.Cipher import AES
from Crypto.Util import Counter
from threading import Lock
_C = curve_secp256k1
Point.set_curve(_C)
Generator.set_curve(_C)
_G = Generator.init(_C['G'][0], _C['G'][1])
ECDSA.set_generator(_G)
_ecdsa = ECDSA()
_statusPath = 'api/v2/status/'
_server_time = 'api/v2/time/'
_headers_since = 'api/v2/headers?since='
_download_message = 'api/v2/messages/'
_upload_message = 'api/v2/messages/'
_peer_list = 'api/v2/peers/'
_cache_expire_time = 5 # seconds
_high_water = 50
_low_water = 20
# NOTE: encode_multipart_formdata and get_content_type copied from public
# domain code posted at : http://code.activestate.com/recipes/146306/
def encode_multipart_formdata(fields, files):
"""
fields is a sequence of (name, value) elements for regular form fields.
files is a sequence of (name, filename, value) elements for data to be
uploaded as files.
Return (content_type, body) ready for httplib.HTTP instance
"""
BOUNDARY = '----------ThIs_Is_tHe_bouNdaRY_$'
CRLF = '\r\n'
L = []
for (key, value) in fields:
L.append('--' + BOUNDARY)
L.append('Content-Disposition: form-data; name="%s"' % key)
L.append('')
L.append(value)
for (key, filename, value) in files:
# print ('encoding ' + key + ' ' + filename + ' ' + str(value))
filename = filename.encode("utf8")
L.append('--' + BOUNDARY)
L.append(
'Content-Disposition: form-data; name="%s"; filename="%s"' % (
key, filename
)
)
L.append('Content-Type: %s' % get_content_type(filename))
L.append('')
L.append(value)
L.append('--' + BOUNDARY + '--')
L.append('')
body = CRLF.join(L)
content_type = 'multipart/form-data; boundary=%s' % BOUNDARY
return content_type, body
def get_content_type(filename):
# return mimetypes.guess_type(filename)[0] or 'application/octet-stream'
return 'application/octet-stream'
class CTClient (object):
_aclient = None
_sclient = None
def __init__(self):
pass
def __enter__(self):
CTClient._aclient = AsyncHTTPClient(max_clients=100)
CTClient._sclient = HTTPClient()
def __exit__(self, exc_type, exc_value, traceback):
CTClient._aclient.close()
CTClient._aclient = None
CTClient._sclient.close()
CTClient._sclient = None
class OnionHost(object):
def __init__(self, host, port=7754, Pkey=None):
self.host = host
self.port = port
self.Pkey = Pkey
def _baseurl(self):
return 'http://' + self.host + ':' + str(self.port) + '/'
def refresh(self):
req = HTTPRequest(self._baseurl() + _statusPath, method='GET')
r = CTClient._sclient.fetch(req)
if r.code != 200:
return False
pub = json.loads(r.body.decode('UTF-8'))['pubkey']
self.Pkey = Point.decompress(pub.encode('UTF-8'))
return True
def __str__(self):
return 'CT Onion host @ ' + self._baseurl() + ' key = ' + self.Pkey.compress().decode()
def get(self, path, nak=None, callback=None, headers=None, onions=None):
if onions is None:
if nak is not None:
raise(ValueError, 'Using NAK requires Onions route list is provided')
return OnionRequest().get(self._baseurl(), path, callback=callback, headers=headers)
if nak is None:
raise ValueError('Onion routing requires NAK is provided')
return OnionRequest().get(self, path, nak=nak, callback=callback, headers=headers, onions=onions)
def post(self, path, body, nak=None, callback=None, headers=None, onions=None):
if onions is None:
if nak is not None:
raise(ValueError, 'Using NAK requires Onions route list is provided')
return OnionRequest().post(self._baseurl(), path, body, callback=callback, headers=headers)
if nak is None:
raise ValueError('Onion routing requires NAK is provided')
return OnionRequest().post(self, path, body, nak=nak, callback=callback, headers=headers, onions=onions)
class NestedRequest(object):
def __init__(self):
self.callback = None
self.callback_next = None
def _callback(self, resp):
return self.callback(resp, self.callback_next)
def get(self, ohost, path, callback, callback_next, headers=None, nak=None, onions=None):
self.callback = callback
self.callback_next = callback_next
if onions is None:
if nak is not None:
raise(ValueError, 'Using NAK requires Onions route list is provided')
return OnionRequest().get(ohost._baseurl(), path, nak=nak, callback=self._callback, headers=headers, onions=onions)
if nak is None:
raise ValueError('Onion routing requires NAK is provided')
return OnionRequest().get(ohost, path, nak=nak, callback=self._callback, headers=headers, onions=onions)
def post(self, ohost, path, body, callback, callback_next, headers=None, nak=None, onions=None):
self.callback = callback
self.callback_next = callback_next
if onions is None:
if nak is not None:
raise(ValueError, 'Using NAK requires Onions route list is provided')
return OnionRequest().post(ohost._baseurl(), path, body, nak=nak, callback=self._callback, headers=headers, onions=onions)
if nak is None:
raise ValueError('Onion routing requires NAK is provided')
return OnionRequest().post(ohost, path, body, nak=nak, callback=self._callback, headers=headers, onions=onions)
class OnionRequest(object):
def __init__(self):
self.callback = None
self.reply_pkey = None
self.reply_Pkey = None
self.reply_ohost = None
def _format_get(self, path, headers):
self.reply_pkey = random.randint(1,_C['n']-1)
self.reply_Pkey = _G * self.reply_pkey
r = {}
r['local'] = True
r['url'] = path
r['action'] = 'GET'
r['headers'] = headers
r['replykey'] = self.reply_Pkey.compress().decode()
return r
def _format_post(self, path, body, headers):
self.reply_pkey = random.randint(1,_C['n']-1)
self.reply_Pkey = _G * self.reply_pkey
r = {}
r['local'] = True
r['url'] = path
r['action'] = 'POST'
r['headers'] = headers
r['body'] = str(body)
r['replykey'] = self.reply_Pkey.compress().decode()
return r
def _wrap(self, onion, req):
if not req['local']:
req['body'] = base64.b64encode(req['body']).decode()
session_pkey = random.randint(1,_C['n']-1)
session_Pkey = _G * session_pkey
if onion.Pkey is None:
if not onion.refresh():
return None
ECDH = onion.Pkey * session_pkey
keybin = hashlib.sha256(ECDH.compress()).digest()
iv = random.randint(0,(1 << 128)-1)
ivbin = unhexlify('%032x' % iv)
counter = Counter.new(128, initial_value=iv)
cryptor = AES.new(keybin, AES.MODE_CTR, counter=counter)
# print('req = ' + str(req))
# print('req type = ' + str(type(req)))
ciphertext = cryptor.encrypt(json.dumps(req))
r = {}
r['local'] = False
r['host'] = onion.host
r['port'] = onion.port
r['pubkey'] = session_Pkey.compress().decode()
r['body'] = ivbin+ciphertext
return r
def _decrypt_reply(self, ohost, text):
d_bd = base64.b64decode(text)
sig = (int(hexlify(d_bd[0:32]),16), int(hexlify(d_bd[32:64]),16))
if not _ecdsa.verify(ohost.Pkey, sig, d_bd[64:]):
return None
d_ecdh = ohost.Pkey * self.reply_pkey
d_keybin = hashlib.sha256(d_ecdh.compress()).digest()
d_ivcount = int(hexlify(d_bd[64:80]),16)
d_counter = Counter.new(128,initial_value=d_ivcount)
d_cryptor = AES.new(d_keybin, AES.MODE_CTR, counter=d_counter)
d_plaintext = d_cryptor.decrypt(d_bd[80:])
return d_plaintext.decode('UTF-8')
def _nakit(self, nak, request):
sig = nak.sign(request)
return unhexlify('%064x' % sig[0]) + unhexlify('%064x' % sig[1])
def _callback(self, resp):
self.callback(resp.body)
def _decrypt_callback(self, resp):
if self.callback is None:
raise ValueError('_decrypt_callback called with no chain callback')
d_resp = self._decrypt_reply(self.reply_ohost, resp.body)
self.callback(d_resp)
def _issue(self, ohost, path, body=None, rtype='GET', nak=None, callback=None, onions=None, headers=None):
if isinstance(ohost, OnionHost):
if nak is None:
raise ValueError('Onion routing requires network access key')
if rtype.lower() == 'get':
inner = self._format_get(path, headers)
else:
inner = self._format_post(path, body, headers)
outer = self._wrap(ohost, inner)
if outer is None:
print('wrap failed for host' + str(ohost))
return None
for o in reversed(onions):
inner = outer
outer = self._wrap(o,inner)
if outer is None:
print('wrap failed for host' + str(ohost))
return None
naksig = self._nakit(nak, outer['body'])
body = nak.pubkeybin() + naksig + outer['body']
body = base64.b64encode(body).decode()
url = 'http://' + outer['host'] + ':' + str(outer['port']) + '/onion/' + outer['pubkey']
req = HTTPRequest(url, method='POST', body=body, headers=headers)
if callback is None:
r = CTClient._sclient.fetch(req)
if r.code != 200:
return None
return self._decrypt_reply(ohost,r.body)
else:
self.callback = callback
self.reply_ohost = ohost
return CTClient._aclient.fetch(req, self._decrypt_callback)
else:
if onions is not None:
print('ohost type = ' + str(type(ohost)))
raise ValueError('Cannot onion route to non-onion target')
url = ohost + path
if rtype.lower() == 'get':
# print('sending GET to ' + url)
req = HTTPRequest(url, method='GET', headers=headers)
if callback is None:
r = CTClient._sclient.fetch(req)
if r.code != 200:
return None
# print('return 200')
return r.body
else:
self.callback = callback
# print('url = ' + url + ', callback = ' + str(callback))
return CTClient._aclient.fetch(req, callback=self._callback)
else:
# print('sending POST to ' + url)
req = HTTPRequest(url, method='POST', body=body, headers=headers)
if callback is None:
r = CTClient._sclient.fetch(req)
if r.code != 200:
return None
# print('return 200')
return r.body
else:
self.callback = callback
return CTClient._aclient.fetch(req, self._callback)
| |
<gh_stars>1-10
"""Function that routes symbols to the correct API provider.
"""
import datetime
import random
import re
from logging import critical, debug, error, info, warning
import pandas as pd
import schedule
from cachetools import TTLCache, cached
from cg_Crypto import cg_Crypto
from IEX_Symbol import IEX_Symbol
from Symbol import Coin, Stock, Symbol
class Router:
STOCK_REGEX = "(?:^|[^\\$])\\$([a-zA-Z.]{1,6})"
CRYPTO_REGEX = "[$]{2}([a-zA-Z]{1,20})"
trending_count = {}
def __init__(self):
self.stock = IEX_Symbol()
self.crypto = cg_Crypto()
schedule.every().hour.do(self.trending_decay)
def trending_decay(self, decay=0.5):
"""Decays the value of each trending stock by a multiplier"""
t_copy = {}
dead_keys = []
if self.trending_count:
t_copy = self.trending_count.copy()
for key in t_copy.keys():
if t_copy[key] < 0.01:
# This just makes sure were not keeping around keys that havent been called in a very long time.
dead_keys.append(key)
else:
t_copy[key] = t_copy[key] * decay
for dead in dead_keys:
t_copy.pop(dead)
self.trending_count = t_copy.copy()
info("Decayed trending symbols.")
def find_symbols(self, text: str) -> list[Symbol]:
"""Finds stock tickers starting with a dollar sign, and cryptocurrencies with two dollar signs
in a blob of text and returns them in a list.
Parameters
----------
text : str
Blob of text.
Returns
-------
list[Symbol]
List of stock symbols as Symbol objects
"""
schedule.run_pending()
symbols = []
stocks = set(re.findall(self.STOCK_REGEX, text))
for stock in stocks:
if stock.upper() in self.stock.symbol_list["symbol"].values:
symbols.append(Stock(stock))
else:
info(f"{stock} is not in list of stocks")
coins = set(re.findall(self.CRYPTO_REGEX, text))
for coin in coins:
if coin.lower() in self.crypto.symbol_list["symbol"].values:
symbols.append(Coin(coin.lower()))
else:
info(f"{coin} is not in list of coins")
if symbols:
info(symbols)
for symbol in symbols:
self.trending_count[symbol.tag] = (
self.trending_count.get(symbol.tag, 0) + 1
)
return symbols
def status(self, bot_resp) -> str:
"""Checks for any issues with APIs.
Returns
-------
str
Human readable text on status of the bot and relevant APIs
"""
stats = f"""
Bot Status:
{bot_resp}
Stock Market Data:
{self.stock.status()}
Cryptocurrency Data:
{self.crypto.status()}
"""
warning(stats)
return stats
def inline_search(self, search: str, matches: int = 5) -> pd.DataFrame:
"""Searches based on the shortest symbol that contains the same string as the search.
Should be very fast compared to a fuzzy search.
Parameters
----------
search : str
String used to match against symbols.
Returns
-------
list[tuple[str, str]]
Each tuple contains: (Symbol, Issue Name).
"""
df = pd.concat([self.stock.symbol_list, self.crypto.symbol_list])
df = df[
df["description"].str.contains(search, regex=False, case=False)
].sort_values(by="type_id", key=lambda x: x.str.len())
symbols = df.head(matches)
symbols["price_reply"] = symbols["type_id"].apply(
lambda sym: self.price_reply(self.find_symbols(sym))[0]
)
return symbols
def price_reply(self, symbols: list[Symbol]) -> list[str]:
"""Returns current market price or after hours if its available for a given stock symbol.
Parameters
----------
symbols : list
List of stock symbols.
Returns
-------
Dict[str, str]
Each symbol passed in is a key with its value being a human readable
markdown formatted string of the symbols price and movement.
"""
replies = []
for symbol in symbols:
info(symbol)
if isinstance(symbol, Stock):
replies.append(self.stock.price_reply(symbol))
elif isinstance(symbol, Coin):
replies.append(self.crypto.price_reply(symbol))
else:
info(f"{symbol} is not a Stock or Coin")
return replies
def dividend_reply(self, symbols: list) -> list[str]:
"""Returns the most recent, or next dividend date for a stock symbol.
Parameters
----------
symbols : list
List of stock symbols.
Returns
-------
Dict[str, str]
Each symbol passed in is a key with its value being a human readable
formatted string of the symbols div dates.
"""
replies = []
for symbol in symbols:
if isinstance(symbol, Stock):
replies.append(self.stock.dividend_reply(symbol))
elif isinstance(symbol, Coin):
replies.append("Cryptocurrencies do no have Dividends.")
else:
debug(f"{symbol} is not a Stock or Coin")
return replies
def news_reply(self, symbols: list) -> list[str]:
"""Gets recent english news on stock symbols.
Parameters
----------
symbols : list
List of stock symbols.
Returns
-------
Dict[str, str]
Each symbol passed in is a key with its value being a human
readable markdown formatted string of the symbols news.
"""
replies = []
for symbol in symbols:
if isinstance(symbol, Stock):
replies.append(self.stock.news_reply(symbol))
elif isinstance(symbol, Coin):
# replies.append(self.crypto.news_reply(symbol))
replies.append(
"News is not yet supported for cryptocurrencies. If you have any suggestions for news sources please contatct @MisterBiggs"
)
else:
debug(f"{symbol} is not a Stock or Coin")
return replies
def info_reply(self, symbols: list) -> list[str]:
"""Gets information on stock symbols.
Parameters
----------
symbols : list[str]
List of stock symbols.
Returns
-------
Dict[str, str]
Each symbol passed in is a key with its value being a human readable formatted
string of the symbols information.
"""
replies = []
for symbol in symbols:
if isinstance(symbol, Stock):
replies.append(self.stock.info_reply(symbol))
elif isinstance(symbol, Coin):
replies.append(self.crypto.info_reply(symbol))
else:
debug(f"{symbol} is not a Stock or Coin")
return replies
def intra_reply(self, symbol: Symbol) -> pd.DataFrame:
"""Returns price data for a symbol since the last market open.
Parameters
----------
symbol : str
Stock symbol.
Returns
-------
pd.DataFrame
Returns a timeseries dataframe with high, low, and volume data if its available.
Otherwise returns empty pd.DataFrame.
"""
if isinstance(symbol, Stock):
return self.stock.intra_reply(symbol)
elif isinstance(symbol, Coin):
return self.crypto.intra_reply(symbol)
else:
debug(f"{symbol} is not a Stock or Coin")
return pd.DataFrame()
def chart_reply(self, symbol: Symbol) -> pd.DataFrame:
"""Returns price data for a symbol of the past month up until the previous trading days close.
Also caches multiple requests made in the same day.
Parameters
----------
symbol : str
Stock symbol.
Returns
-------
pd.DataFrame
Returns a timeseries dataframe with high, low, and volume data if its available.
Otherwise returns empty pd.DataFrame.
"""
if isinstance(symbol, Stock):
return self.stock.chart_reply(symbol)
elif isinstance(symbol, Coin):
return self.crypto.chart_reply(symbol)
else:
debug(f"{symbol} is not a Stock or Coin")
return pd.DataFrame()
def stat_reply(self, symbols: list[Symbol]) -> list[str]:
"""Gets key statistics for each symbol in the list
Parameters
----------
symbols : list[str]
List of stock symbols
Returns
-------
Dict[str, str]
Each symbol passed in is a key with its value being a human readable
formatted string of the symbols statistics.
"""
replies = []
for symbol in symbols:
if isinstance(symbol, Stock):
replies.append(self.stock.stat_reply(symbol))
elif isinstance(symbol, Coin):
replies.append(self.crypto.stat_reply(symbol))
else:
debug(f"{symbol} is not a Stock or Coin")
return replies
def cap_reply(self, symbols: list[Symbol]) -> list[str]:
"""Gets market cap for each symbol in the list
Parameters
----------
symbols : list[str]
List of stock symbols
Returns
-------
Dict[str, str]
Each symbol passed in is a key with its value being a human readable
formatted string of the symbols market cap.
"""
replies = []
for symbol in symbols:
if isinstance(symbol, Stock):
replies.append(self.stock.cap_reply(symbol))
elif isinstance(symbol, Coin):
replies.append(self.crypto.cap_reply(symbol))
else:
debug(f"{symbol} is not a Stock or Coin")
return replies
def spark_reply(self, symbols: list[Symbol]) -> list[str]:
"""Gets change for each symbol and returns it in a compact format
Parameters
----------
symbols : list[str]
List of stock symbols
Returns
-------
list[str]
List of human readable strings.
"""
replies = []
for symbol in symbols:
if isinstance(symbol, Stock):
replies.append(self.stock.spark_reply(symbol))
elif isinstance(symbol, Coin):
replies.append(self.crypto.spark_reply(symbol))
else:
debug(f"{symbol} is not a Stock or Coin")
return replies
@cached(cache=TTLCache(maxsize=1024, ttl=600))
def trending(self) -> str:
"""Checks APIs for trending symbols.
Returns
-------
list[str]
List of preformatted strings to be sent to user.
"""
stocks = self.stock.trending()
coins = self.crypto.trending()
reply = ""
if self.trending_count:
reply += "🔥Trending on the Stock Bot:\n`"
reply += "━" * len("Trending on the Stock Bot:") + "`\n"
sorted_trending = [
s[0]
for s in sorted(self.trending_count.items(), key=lambda item: item[1])
][::-1][0:5]
for t in sorted_trending:
reply += self.spark_reply(self.find_symbols(t))[0] + "\n"
if stocks:
reply += "\n\n💵Trending Stocks:\n`"
reply += "━" * len("Trending Stocks:") + "`\n"
for stock in stocks:
reply += stock + "\n"
if coins:
reply += "\n\n🦎Trending Crypto:\n`"
reply += "━" * len("Trending Crypto:") + "`\n"
for coin in coins:
reply += coin + "\n"
if "`$GME" in reply:
reply = reply.replace("🔥", "🦍")
if reply:
return reply
else:
warning("Failed to collect trending data.")
return "Trending data is not currently available."
def random_pick(self) -> str:
choice = random.choice(
list(self.stock.symbol_list["description"])
+ list(self.crypto.symbol_list["description"])
)
hold = (
datetime.date.today() + datetime.timedelta(random.randint(1, 365))
).strftime("%b %d, %Y")
return f"{choice}\nBuy and hold until: {hold}"
def batch_price_reply(self, symbols: list[Symbol]) -> list[str]:
"""Returns current market price or after hours if its available for a given stock symbol.
Parameters
----------
symbols : list
List of stock symbols.
Returns
-------
Dict[str, str]
Each symbol passed in is a key with its value being a human readable
markdown formatted string of the symbols price and movement.
"""
replies = | |
import discord
import asyncio
import random
import steam
from steam.steamid import SteamId
from steam.steamprofile import SteamProfile
from steam.steamaccountuniverse import SteamAccountUniverse
from steam.steamaccounttype import SteamAccountType
from discord.ext import commands
from utils import checks
from mods.cog import Cog
code = "```py\n{0}\n```"
class Verification(Cog):
def __init__(self, bot):
super().__init__(bot)
self.cursor = bot.mysql.cursor
self.escape = bot.escape
self.bot.loop.create_task(self.verification_task())
async def remove_verification(self, server, idk=None):
role = discord.utils.get(server.roles, name='Awaiting Approval')
if role:
try:
await self.bot.delete_role(server, role)
except:
pass
sql = 'DELETE FROM `verification` WHERE server={0}'
sql = sql.format(server.id)
self.cursor.execute(sql)
self.cursor.commit()
sql = 'DELETE FROM `verification_queue` WHERE server={0}'
sql = sql.format(server.id)
self.cursor.execute(sql)
self.cursor.commit()
if idk is None:
try:
await self.bot.send_message(server.owner, ":warning: One of your server administrators (or you) have enabled approval/verification on user join.\n\nAdministrator permission was taken away from me making the feature unusable, I need Administrator permission to make/add a role to mute on join.\n\n`The system has been automatically disabled, re-enable anytime if you please.`")
except:
pass
@commands.group(pass_context=True, aliases=['onjoinverify', 'approval'], invoke_without_command=True, no_pm=True)
@checks.admin_or_perm(manage_server=True)
async def verification(self, ctx, channel:discord.Channel=None, *, mentions:str=None):
perms = ctx.message.server.me.permissions_in(ctx.message.channel)
if perms.manage_roles is False or perms.manage_channels is False:
if perms.administrator is False:
await self.bot.say(":warning: `I need Administrator permission to make/add a role to mute on join`")
return
if channel is None:
channel = ctx.message.channel
sql = 'SELECT * FROM `verification` WHERE server={0}'
sql = sql.format(ctx.message.server.id)
result = self.cursor.execute(sql).fetchall()
if len(result) == 0:
if mentions is None:
sql = "INSERT INTO `verification` (`server`, `channel`) VALUES (%s, %s)"
self.cursor.execute(sql, (ctx.message.server.id, channel.id))
self.cursor.commit()
await self.bot.say(":white_check_mark: Enabled user approval/verification on join, all requests will go to {0} (`verification #<discord_channel>` to change)!".format(channel.mention))
else:
if len(ctx.message.mentions) == 0:
await self.bot.say("invalid mention")
return
sql = "INSERT INTO `verification` (`server`, `channel`, `mentions`) VALUES (%s, %s, %s)"
mention_ids = []
mention_names = []
for mention in ctx.message.mentions:
mention_ids.append(mention.id)
mention_names.append(mention.name)
self.cursor.execute(sql, (ctx.message.server.id, channel.id, ' '.join(mention_ids)))
self.cursor.commit()
await self.bot.say(":white_check_mark: Enabled user approval/verification on join, all requests will go to {0} (`verification <#discord_channel>` to change) and mention `{0}`!".format(channel.mention, ', '.join(mention_names)))
permissions = discord.Permissions()
permissions.read_messages = True
try:
await self.bot.create_role(ctx.message.server, name='Awaiting Approval', color=discord.Colour(int("FF0000", 16)), permissions=permissions)
except Exception as e:
print(e)
await self.bot.say(":warning: For some reason I couldn't create the \"Awaiting Approval\" role and users won't be muted, please create it (same name) and disable all the permissions you don't want unapproved-users to have.\nMake sure I have the administrator permission!")
elif channel is None:
sql = 'UPDATE `verification` SET channel={0} WHERE server={1}'
sql = sql.format(channel.id, ctx.message.server.id)
self.cursor.execute(sql)
self.cursor.commit()
await self.bot.say(":white_check_mark: Set approval/verification channel to {0}".format(channel.mention))
else:
await self.bot.say(':warning: You are about to disable member verification/approval on join, type `yes` to proceed.')
while True:
response = await self.bot.wait_for_message(timeout=15, author=ctx.message.author, channel=ctx.message.channel)
if response is None or response.content != 'yes':
await self.bot.say('**Aborting**')
return
else:
break
await self.remove_verification(ctx.message.server, True)
try:
role = discord.utils.get(ctx.message.server.roles, name='Awaiting Approval')
if role != None:
await self.bot.delete_role(ctx.message.server, role)
except discord.errors.Forbidden:
await self.bot.say("could not remove role, you took my perms away :(")
role2 = discord.utils.get(ctx.message.server.roles, name='Approved')
if role2 != None:
try:
await self.bot.delete_role(ctx.message.server, role2)
except:
pass
await self.bot.say(":negative_squared_cross_mark: **Disabled** user approval on join")
@verification.command(name='mention', aliases=['mentions'], pass_context=True, invoke_without_command=True, no_pm=True)
@checks.admin_or_perm(manage_server=True)
async def verification_mention(self, ctx, *mentions:str):
perms = ctx.message.server.me.permissions_in(ctx.message.channel)
if perms.manage_roles is False or perms.manage_channels is False:
if perms.administrator is False:
await self.bot.say(":warning: `I need Administrator permission to make/add a role to mute on join`")
return
if len(ctx.message.mentions) == 0 and '@everyone' not in mentions and '@here' not in mentions:
await self.bot.say(':no_entry: `Invalid mention(s).`')
return
sql = 'SELECT * FROM `verification` WHERE server={0}'
sql = sql.format(ctx.message.server.id)
result = self.cursor.execute(sql).fetchall()
if len(result) == 0:
await self.bot.say(":no_entry: This server does not have approval/verification turned on (`verification <#discord_channel>` to do so)!!!")
return
if len(mentions) == 0:
sql = 'UPDATE `verification` SET mentions=NULL WHERE server={0}'
sql = sql.format(ctx.message.server.id)
self.cursor.execute(sql)
self.cursor.commit()
await self.bot.say(":negative_squared_cross_mark: Disabled/Removed mentions on user join for approval")
else:
mention_ids = []
mention_names = []
everyone = False
for mention in mentions:
if mention == '@everyone':
mention_ids.append('@everyone')
elif mention == '@here':
mention_ids.append('@here')
for mention in ctx.message.mentions:
mention_ids.append(mention.id)
mention_names.append(mention.name)
sql = 'SELECT mentions FROM `verification` WHERE server={0}'
sql = sql.format(ctx.message.server.id)
mention_results = self.cursor.execute(sql).fetchall()
update = False
if mention_results[0]['mentions'] != None:
update = True
things = mention_results[0]['mentions'].split()
for x in things:
mention_ids.append(x)
sql = "UPDATE `verification` SET mentions={0} WHERE server={1}"
sql = sql.format(self.escape(' '.join(mention_ids)), ctx.message.server.id)
self.cursor.execute(sql)
self.cursor.commit()
if update:
await self.bot.say(":white_check_mark: Updated mentions to include `{0}` on user join for approval".format(', '.join(mention_names)))
else:
await self.bot.say(":white_check_mark: Set `{0}` to be mentioned on user join for approval".format(', '.join(mention_names)))
@commands.group(pass_context=True, invoke_without_command=True, no_pm=True)
@checks.mod_or_perm(manage_server=True)
async def verify(self, ctx, *users:str):
perms = ctx.message.server.me.permissions_in(ctx.message.channel)
if perms.manage_roles is False or perms.manage_channels is False:
if perms.administrator is False:
await self.bot.say(":warning: `I need Administrator permission to make/add a role to mute on join`")
return
if len(users) == 0:
await self.bot.say("pls input users to verify thx")
return
sql = 'SELECT * FROM `verification` WHERE server={0}'
sql = sql.format(ctx.message.server.id)
result = self.cursor.execute(sql).fetchall()
if len(result) == 0:
await self.bot.say(":no_entry: This server does not have approval/verification turned **on** (`verification <#discord_channel>` to do so)!!!")
return
role = discord.utils.get(ctx.message.server.roles, name="Awaiting Approval")
count = 0
count2 = 0
discord_user = None
for user in users:
if user.isdigit():
user = int(user)
sql = 'SELECT * FROM `verification_queue` WHERE server={0} AND id={1}'
sql = sql.format(ctx.message.server.id, user)
result = self.cursor.execute(sql).fetchall()
if len(result) == 0:
await self.bot.say(":warning: `{0}` is not in the verification queue.".format(user))
if len(users) > 1:
continue
else:
return
sql = 'DELETE FROM `verification_queue` WHERE server={0} AND id={1}'
sql = sql.format(ctx.message.server.id, user)
self.cursor.execute(sql)
self.cursor.commit()
discord_user = discord.Server.get_member(ctx.message.server, user_id=str(result[count]['user']))
count += 1
else:
if len(ctx.message.mentions) == 0:
await self.bot.say("If you're not gonna use approval id, atleast mention correctly!")
return
for x in ctx.message.mentions:
if count == len(ctx.message.mentions):
break
sql = 'SELECT * FROM `verification_queue` WHERE server={0} AND user={1}'
sql = sql.format(ctx.message.server.id, x.id)
result = self.cursor.execute(sql).fetchall()
if len(result) == 0:
await self.bot.say(":warning: `{0}` is not in the verification queue.".format(user))
if len(users) > 1:
continue
else:
return
sql = 'DELETE FROM `verification_queue` WHERE server={0} AND user={1}'
sql = sql.format(ctx.message.server.id, x.id)
self.cursor.execute(sql)
self.cursor.commit()
discord_user = discord.Server.get_member(ctx.message.server, user_id=str(result[count2]['user']))
count2 += 1
if discord_user is None:
continue
try:
await self.bot.remove_roles(discord_user, role)
except Exception as e:
await self.bot.say(code.format(e))
await self.bot.say(":warning: {0} was removed from the queue however his role could not be removed because I do not have Administrator permissions.\nPlease remove the role manually and give me **Administrator**.".format(user))
return
role = discord.utils.get(ctx.message.server.roles, name='Approved')
if role != None:
try:
await self.bot.add_roles(discord_user, role)
except:
pass
await self.bot.say(":white_check_mark: Removed `{0}` from queue!".format(user))
queue_removed_msg = 'You have been approved/verified for `{0}` and can now message!'.format(ctx.message.server.name)
await self.bot.send_message(discord_user, queue_removed_msg)
@verify.command(name='list', pass_context=True, invoke_without_command=True, no_pm=True)
async def verify_list(self, ctx):
perms = ctx.message.server.me.permissions_in(ctx.message.channel)
if perms.manage_roles is False or perms.manage_channels is False:
if perms.administrator is False:
await self.bot.say(":warning: `I need Administrator permission to make/add a role to mute on join`")
return
sql = 'SELECT * FROM `verification` WHERE server={0}'
sql = sql.format(ctx.message.server.id)
result = self.cursor.execute(sql).fetchall()
if len(result) == 0:
await self.bot.say(":no_entry: This server does not have approval/verification turned on (`verification <#discord_channel>` to do so)!!!")
return
sql = 'SELECT * FROM `verification_queue` WHERE server={0}'
sql = sql.format(ctx.message.server.id)
result = self.cursor.execute(sql).fetchall()
if len(result) == 0:
await self.bot.say(":no_entry: `There are no users in the verification/approval queue`")
return
users = []
for s in result:
user = discord.Server.get_member(ctx.message.server, user_id=str(s['user']))
if user is None:
continue
users.append('{0}#{1} ({2})'.format(user.name, user.discriminator, str(s['id'])))
await self.bot.say("**{0} Users in Queue**\n`{1}`".format(len(users), ', '.join(users)))
# steam_regex = r"^(http|https|)(\:\/\/|)steamcommunity\.com\/id\/(.*)$"
@verify.command(name='check', pass_context=True, aliases=['steam', 'link'])
async def verify_check(self, ctx, stem:str):
try:
if ctx.message.channel.is_private is False:
await self.bot.say(':no_entry: `Private Message only.`')
return
sql = 'SELECT * FROM `verification_queue` WHERE user={0}'
sql = sql.format(ctx.message.author.id)
result = self.cursor.execute(sql).fetchall()
if len(result) == 0:
await self.bot.say(':no_entry: You are not in the verification queue for any server.')
return
server_id = result[0]['server']
sql = 'SELECT * FROM `verification` WHERE server={0}'
sql = sql.format(server_id)
result = self.cursor.execute(sql).fetchall()
if len(result) == 0:
await self.bot.say(":no_entry: Server you are in queue for disabled verification.")
return
sql = 'SELECT * FROM `verification_steam` WHERE server={0} AND user={1}'
sql = sql.format(server_id, ctx.message.author.id)
result = self.cursor.execute(sql).fetchall()
if len(result) != 0:
await self.bot.say(":no_entry: You've already verified your steam account!")
return
sql = 'SELECT id,server FROM `verification_queue` WHERE server={0} AND user={1}'
sql = sql.format(server_id, ctx.message.author.id)
result = self.cursor.execute(sql).fetchall()
if len(result) == 0:
await self.bot.say(":warning: `{0}` is not in the verification queue.".format(ctx.message.author))
return
verification_id = str(result[0]['id'])
steamId = None
steamProfile = None
if steamId is None:
steamId = SteamId.fromSteamId("{0}".format(stem))
if steamId is None:
steamId = SteamId.fromSteamId3(stem)
if steamId is None:
steamId = SteamId.fromSteamId64(stem)
if steamId is None:
steamId = SteamId.fromProfileUrl(stem)
if steamId is None:
steamProfile = SteamProfile.fromCustomProfileUrl(stem)
if steamProfile is None:
await self.bot.say("`:no_entry: `Bad Steam ID/64/URL`")
return
steamId = steamProfile.steamId
else:
steamProfile = SteamProfile.fromSteamId(steamId)
if verification_id in steamProfile.displayName:
sql = 'INSERT INTO `verification_steam` (`user`, `server`, `steam`, `id`) VALUES (%s, %s, %s, %s)'
self.cursor.execute(sql, (ctx.message.author.id, server_id, steamId.profileUrl, verification_id))
self.cursor.commit()
await self.bot.say(':white_check_mark: `{0}` steam profile submitted and passed steam name check, awaiting moderator approval.'.format(ctx.message.author))
else:
await self.bot.say(':warning: **{0}** is not in the steam accounts name.'.format(verification_id))
except Exception as e:
await self.bot.say(code.format(e))
async def verification_task(self):
if self.bot.shard_id != 0:
return
while True:
sql = 'SELECT * FROM `verification_steam`'
result = self.cursor.execute(sql).fetchall()
if len(result) == 0:
await asyncio.sleep(60)
continue
for s in result:
server = self.bot.manager.get_server(str(s['server']))
if server:
user = server.get_member(str(s['user']))
if user is None:
continue
sql = 'SELECT channel FROM `verification` WHERE server={0}'
sql = sql.format(server.id)
channel = server.get_channel(str(self.cursor.execute(sql).fetchall()[0]['channel']))
msg = '**Steam Account Check**\n`{0} (Verification ID: {1})` has submitted their steam profile and passed the name check.\n`Steam Profile:` {2}'.format(user, s['id'], s['steam'])
await self.bot.send_message(channel, msg)
sql = 'DELETE FROM `verification_steam` WHERE server={0} AND user={1}'
sql = sql.format(server.id, user.id)
self.cursor.execute(sql)
self.cursor.commit()
await asyncio.sleep(60)
async def on_member_join(self, member):
try:
if member.bot:
return
server = member.server
sql = 'SELECT * FROM `verification` WHERE server={0}'
sql = sql.format(server.id)
result = self.cursor.execute(sql).fetchall()
if len(result) == 0:
return
channel = server.get_channel(str(result[0]['channel']))
if channel is None:
raise discord.errors.NotFound
perms = server.me.permissions_in(channel)
if perms.manage_roles is False or perms.manage_channels is False:
if perms.administrator is False:
await self.remove_verification(server)
return
sql = "INSERT INTO `verification_queue` (`user`, `server`, `id`) VALUES (%s, %s, %s)"
rand = random.randint(0, 99999)
self.cursor.execute(sql, (member.id, server.id, rand))
self.cursor.commit()
role = discord.utils.get(server.roles, name='Awaiting Approval')
await self.bot.add_roles(member, role)
for s in server.channels:
perms = member.permissions_in(s)
if perms.read_messages is False:
continue
overwrite = discord.PermissionOverwrite()
overwrite.send_messages = False
overwrite.read_messages = False
await self.bot.edit_channel_permissions(s, role, overwrite)
msg = ''
if result[0]['mentions']:
for x in result[0]['mentions'].split(' '):
if 'everyone' | |
aN, L, b0, b1, ..., bN).
L must be a power of two.
axis: the "L" axis above, aka the axis over which to do the
Hadamard transform. All other dimensions are left alone;
data on those dimension do not interact.
normalize: Whether to normalize the results such that applying
the transform twice returns to the original input
value.
method:
'one': Original reshape to [2]*ll version
'two': Deal with TF "UnimplementedError: SliceOp : Unhandled input dimensions" error...
'c': Use C++ FWH Op.
Returns:
ret: transformed tensor with same shape as x. Returned
tensor is always float even if input was int.
Tests:
>>> in_x = tf.placeholder('float32')
>>> in_x
<tf.Tensor 'Placeholder:0' shape=<unknown> dtype=float32>
>>> sess = tf.InteractiveSession()
Wikipedia case:
>>> x = np.array([1,0,1,0,0,1,1,0])
>>> sess.run(tf_fast_walsh_hadamard(in_x, 0, False), feed_dict={in_x: x})
array([ 4., 2., 0., -2., 0., 2., 0., 2.], dtype=float32)
>>> sess.run(tf_fast_walsh_hadamard(in_x, 0, False, method='two'), feed_dict={in_x: x})
array([ 4., 2., 0., -2., 0., 2., 0., 2.], dtype=float32)
>>> sess.run(tf_fast_walsh_hadamard(tf_fast_walsh_hadamard(in_x, 0), 0), feed_dict={in_x: x})
array([ 1., 0., 1., 0., 0., 1., 1., 0.], dtype=float32)
Verify equivalence with numpy approach:
>>> np.random.seed(123)
>>> x = np.random.uniform(0, 1, (3, 64, 5))
>>> h_np = np_fast_walsh_hadamard(x, 1)
>>> h_tf_ = tf_fast_walsh_hadamard(in_x, 1)
>>> h_tf2_ = tf_fast_walsh_hadamard(in_x, 1, method='two')
>>> h_tf = sess.run(h_tf_, feed_dict={in_x: x})
>>> h_tf2 = sess.run(h_tf2_, feed_dict={in_x: x})
>>> x.shape
(3, 64, 5)
>>> h_np.shape
(3, 64, 5)
>>> h_tf.shape
(3, 64, 5)
>>> h_tf2.shape
(3, 64, 5)
>>> abs(h_np - h_tf).max() < 1e-6
True
>>> abs(h_np - h_tf2).max() < 1e-6
True
Try a few other shapes / axes
>>> sess.run(tf_fast_walsh_hadamard(in_x, 0), feed_dict={in_x: x[0]}).shape == x[0].shape
True
>>> sess.run(tf_fast_walsh_hadamard(in_x, 1), feed_dict={in_x: x[:, :, 0]}).shape == x[:, :, 0].shape
True
>>> sess.run(tf_fast_walsh_hadamard(in_x, 0), feed_dict={in_x: x[0, :, 0]}).shape == x[0, :, 0].shape
True
'''
orig_shape = tf.shape(in_x)
h_dim = orig_shape[axis]
h_dim_exp = tf.cast(tf.round(tf.log(tf.to_float(h_dim)) / np.log(2)), 'int32')
assert_pow2 = tf.assert_equal(h_dim, tf.pow(2, h_dim_exp),
message='hadamard can only be computed over axis with size that is a power of two')
with tf.control_dependencies([assert_pow2]):
working_shape_pre = tf.expand_dims(tf.reduce_prod(orig_shape[:axis]), axis=0) # reduce_prod of empty array is 1
working_shape_post = tf.expand_dims(tf.reduce_prod(orig_shape[axis + 1:]), axis=0) # reduce_prod of empty array is 1
ii = tf.constant(0)
assert method in ('one', 'two', 'c')
if method == 'one':
# expand to working dims [pre, 2, 2, 2, ..., 2, 2, post]
working_shape_mid = tf.tile([2], [h_dim_exp])
working_shape = tf.concat((working_shape_pre, working_shape_mid, working_shape_post),
axis=0)
ret_0 = tf.reshape(in_x, working_shape)
cond = lambda i, x: tf.less(i, h_dim_exp)
body = lambda i, x: (tf.add(i, 1), _fast_walsh_hadamard_one_step(x, i + 1))
ii_final, ret = tf.while_loop(
cond,
body,
[ii, ret_0],
parallel_iterations=1 # check on this?
)
elif method == 'two':
# Never expand to high rank. Roll dimensions instead. This is
# needed because backprop through the slice operator only
# supports up to rank 7 tensors in TF 1.3
# [pre, 1, 2, h_dim/2, post] ->
# [pre, 2, 2, h_dim/4, post] -> ...
# [pre, h_dim/2, 2, 1, post]
d1 = tf.expand_dims(tf.constant(1), axis=0)
d2 = tf.expand_dims(tf.constant(2), axis=0) # always 2
d3 = tf.expand_dims(h_dim / 2, axis=0)
working_shape_0 = tf.concat((working_shape_pre, d1, d2, d3, working_shape_post), axis=0)
ret_0 = tf.reshape(in_x, working_shape_0)
cond = lambda i, d1, d3, x: tf.less(i, h_dim_exp)
body = lambda i, d1, d3, x: (tf.add(i, 1),
d1 * 2,
d3 / 2,
_fast_walsh_hadamard_one_step_method2(x, working_shape_pre, d1, d2, d3, working_shape_post))
ii_final, d1_final, d3_final, ret = tf.while_loop(
cond,
body,
[ii, d1, d3, ret_0],
parallel_iterations=1 # check on this?
)
else:
# 'c' version
# Only works for rank-1 (vector) input
assert False, 'c version disabled for now'
assert axis == 0, 'axis must be 0 for the c version of tf_fast_walsh_hadamard'
assert normalize, 'for c version normalize must be True'
assert_rank1 = tf.assert_rank(in_x, 1)
with tf.control_dependencies([assert_rank1, assert_pow2]):
ret = c_fast_walsh_hadamard(in_x)
if normalize and method != 'c':
ret = ret / tf.sqrt(tf.to_float(h_dim))
ret = tf.reshape(ret, orig_shape)
return ret
def tf_fastfood_transform(in_x, dd, DD, use_get=False, use_C=False):
'''Transform from d to D. Pads as necessary.
For now: assume dd and DD are known in python.'''
# Tensor d and D
#assert_D_big = tf.assert_greater_equal(DD, dd, message='d cannot be larger than D')
#with tf.control_dependencies([assert_D_big]):
# ll = tf.cast(tf.round(tf.log(tf.to_float(DD)) / np.log(2)), 'int32')
# LL = tf.pow(2, ll)
# Python d and D
assert isinstance(dd, int), 'd should be int'
assert isinstance(DD, int), 'D should be int'
assert DD >= dd, 'd cannot be larger than D'
assert dd > 0, 'd and D must be positive'
ll = int(np.ceil(np.log(DD) / np.log(2)))
LL = 2 ** ll
# Make vars
init_BB = tf.to_float(tf.random_uniform((LL,), 0, 2, dtype='int32')) * 2 - 1
init_Pi = tf.random_shuffle(tf.range(LL))
init_GG = tf.random_normal((LL,))
init_divisor = lambda GG: tf.sqrt(LL * tf.reduce_sum(tf.pow(GG.initialized_value(), 2)))
if use_get:
BB = tf.get_variable('B', initializer=init_BB, trainable=False)
Pi = tf.get_variable('Pi', initializer=init_Pi, trainable=False)
GG = tf.get_variable('G', initializer=init_GG, trainable=False)
divisor = tf.get_variable('divisor', initializer=init_divisor(GG), trainable=False)
else:
BB = tf.Variable(init_BB, name='B', trainable=False)
Pi = tf.Variable(init_Pi, name='Pi', trainable=False)
GG = tf.Variable(init_GG, name='G', trainable=False)
divisor = tf.Variable(init_divisor(GG), name='divisor', trainable=False)
fastfood_vars = [BB, Pi, GG, divisor]
# Implement transform
dd_pad = tf.pad(in_x, [[0, LL - dd]])
mul_1 = tf.multiply(BB, dd_pad)
if use_C:
mul_2 = tf_fast_walsh_hadamard(mul_1, 0, method='c', normalize=True)
else:
mul_2 = tf_fast_walsh_hadamard(mul_1, 0, method='two', normalize=False)
mul_3 = tf.gather(mul_2, Pi)
mul_4 = tf.multiply(mul_3, GG)
if use_C:
mul_5 = tf_fast_walsh_hadamard(mul_4, 0, method='c', normalize=True)
print '\nWARNING: check normalization on this next line more carefully\n'
ret = tf.divide(tf.slice(mul_5, [0], [DD]), divisor * np.sqrt(float(DD) / LL / ll))
else:
mul_5 = tf_fast_walsh_hadamard(mul_4, 0, method='two', normalize=False)
ret = tf.divide(tf.slice(mul_5, [0], [DD]), divisor * np.sqrt(float(DD) / LL))
return fastfood_vars, ret
def test_timing():
N = 29
in_x = tf.placeholder('float32')
sum_x = tf.reduce_sum(in_x)
hh = tf_fast_walsh_hadamard(in_x, 1, True)
sum_h = tf.reduce_sum(hh)
sess = tf.InteractiveSession()
for ll in range(1, N):
L = 2**ll
print '\n%d, H dim %d' % (ll, L)
x = np.random.uniform(0, 1, (1, L, 1))
if L < 33554432:
start = time.time()
np_fast_walsh_hadamard(x, 1)
end = time.time()
print ' np %14s elems: %16s' % ('%d' % L, '%f' % (end - start))
else:
print ' np <skipped>'
start = time.time()
sess.run(sum_h, feed_dict={in_x: x})
end = time.time()
print ' tf %14s elems: %16s' % ('%d' % L, '%f' % (end - start))
# Time each op the third time (ignore CUDA tuning time) then subtract data transfer time
sess.run(sum_x, feed_dict={in_x: x})
sess.run(sum_x, feed_dict={in_x: x})
start = time.time()
sess.run(sum_x, feed_dict={in_x: x})
elap_data = time.time() - start
sess.run(sum_h, feed_dict={in_x: x})
sess.run(sum_h, feed_dict={in_x: x})
start = time.time()
sess.run(sum_h, feed_dict={in_x: x})
elap_had = time.time() - start
print ' tf just H %14s elems: %16s' % ('%d' % (L), '%f' % (elap_had - elap_data))
DD = max(5, int(np.ceil(L * .8)))
dd = max(3, int(np.ceil(DD * .001)))
if x.shape[1] >= dd:
for use_C in [False, True]:
st = '(C) ' if use_C else '(TF)'
ffvars, xform = tf_fastfood_transform(in_x, dd, DD, use_C=use_C)
sum_xf = tf.reduce_sum(xform)
sess.run(tf.global_variables_initializer())
sess.run(sum_xf, feed_dict={in_x: x[0, :dd, 0]})
start = time.time()
sess.run(sum_xf, feed_dict={in_x: x[0, :dd, 0]})
end = time.time()
print ' tf %s fastf %14s elems: %16s' % (st, '%d' % L, '%f' % (end - start))
sess.run(sum_x, feed_dict={in_x: x[0, :dd, 0]})
sess.run(sum_x, feed_dict={in_x: x[0, :dd, 0]})
start = time.time()
sess.run(sum_x, feed_dict={in_x: x[0, :dd, 0]})
elap_data = time.time() - start
sess.run(sum_xf, feed_dict={in_x: x[0, :dd, 0]})
sess.run(sum_xf, feed_dict={in_x: x[0, :dd, 0]})
start = time.time()
sess.run(sum_xf, feed_dict={in_x: x[0, :dd, 0]})
elap_had = time.time() - start
print ' tf %s just fastf%14s elems: %16s' % (st, '%d' % (L), '%f' % (elap_had - elap_data))
else:
print ' tf fastfood %14s elems: <skipped, too small>' % ('%d' % L)
if L > 32768:
print ' <skipped large batch cases>'
continue
x2 = np.random.uniform(0, 1, (10, L, 100))
start = time.time()
np_fast_walsh_hadamard(x2, 1)
end = time.time()
print ' np %14s elems: %16s' % ('%d' % (L*1000), '%f' % (end - start))
start = time.time()
sess.run(sum_h, feed_dict={in_x: x2})
end = time.time()
print ' tf %14s elems: %16s' % ('%d' % (L*1000), '%f' % (end - start))
# Time each op the third time (ignore CUDA tuning time) then subtract data | |
[right]
return False
return False
@register_specialize
@register_canonicalize
@gof.local_optimizer([T.mul])
def local_mul_switch_sink(node):
"""
This optimization makes the folowing changes in the graph:
T.mul(A,T.switch(cond,0,iff),B) --> T.switch(cond,0,T.mul(A,B,iff))
T.mul(A,T.switch(cond,ift,0),B) --> T.switch(cond,T.mul(A,B,ift),0)
A and B being several (or none) symbolic variables.
This is useful because A and B may not be numerically stable and give
NaN or inf values for cases where the switch returns 0.
With this optimization T.grad(T.switch(...)) has the right behavior.
Examples
--------
x -> f(x)
x -> g(x)
y = T.switch(cond,f(x),g(x))
**without the optimization
T.grad(y,x) -> grad(f(x),x) * grad(y,f(x)) + grad(g(x),x) * grad(y,g(x))
**with the optimization
T.grad(y,x) -> switch(cond,grad(f(x),x), 0) + switch(cond,0,grad(g(x),x))
This will be particularly useful for the lazyif because we skip
an entire part of the graph.
"""
if node.op != T.mul:
return False
for idx, i in enumerate(node.inputs):
if i.owner and i.owner.op == T.switch:
switch = i.owner
try:
if (get_scalar_constant_value(
switch.inputs[1], only_process_constants=True) == 0.):
listmul = node.inputs[:idx] + node.inputs[idx + 1:]
fmul = T.mul(*(listmul + [switch.inputs[2]]))
# Copy over stacktrace for elementwise multiplication op
# from previous elementwise multiplication op.
# An error in the multiplication (e.g. errors due to
# inconsistent shapes), will point to the
# multiplication op.
copy_stack_trace(node.outputs, fmul)
fct = [T.switch(switch.inputs[0], 0,
fmul)]
fct[0].tag.values_eq_approx = values_eq_approx_remove_nan
# Copy over stacktrace for switch op from both previous
# elementwise multiplication op and previous switch op,
# because an error in this part can be caused by either
# of the two previous ops.
copy_stack_trace(node.outputs + switch.outputs, fct)
return fct
except NotScalarConstantError:
pass
try:
if (get_scalar_constant_value(
switch.inputs[2], only_process_constants=True) == 0.):
listmul = node.inputs[:idx] + node.inputs[idx + 1:]
fmul = T.mul(*(listmul + [switch.inputs[1]]))
# Copy over stacktrace for elementwise multiplication op
# from previous elementwise multiplication op.
# An error in the multiplication (e.g. errors due to
# inconsistent shapes), will point to the
# multiplication op.
copy_stack_trace(node.outputs, fmul)
fct = [T.switch(switch.inputs[0],
fmul, 0)]
fct[0].tag.values_eq_approx = values_eq_approx_remove_nan
# Copy over stacktrace for switch op from both previous
# elementwise multiplication op and previous switch op,
# because an error in this part can be caused by either
# of the two previous ops.
copy_stack_trace(node.outputs + switch.outputs, fct)
return fct
except NotScalarConstantError:
pass
return False
@register_canonicalize
@gof.local_optimizer([T.true_div, T.int_div])
def local_div_switch_sink(node):
"""
This optimization makes the folowing changes in the graph:
T.div(T.switch(cond,0,iff),A) --> T.switch(cond,0,T.div(iff,A))
T.div(T.switch(cond,ift,0),A) --> T.switch(cond,T.div(ift,A),0)
A being a symbolic variable.
This is useful because A may not be numerically stable and give
NaN or inf values for cases where the switch returns 0.
See local_mul_switch_sink for more details.
"""
if (node.op != T.true_div and node.op != T.int_div):
return False
op = node.op
if node.inputs[0].owner and node.inputs[0].owner.op == T.switch:
switch = node.inputs[0].owner
try:
if get_scalar_constant_value(switch.inputs[1]) == 0.:
fdiv = op(switch.inputs[2], node.inputs[1])
# Copy over stacktrace for elementwise division op
# from previous elementwise multiplication op.
# An error in the division (e.g. errors due to
# inconsistent shapes or division by zero),
# will point to the new division op.
copy_stack_trace(node.outputs, fdiv)
fct = [T.switch(switch.inputs[0], 0,
fdiv)]
fct[0].tag.values_eq_approx = values_eq_approx_remove_nan
# Copy over stacktrace for switch op from both previous
# elementwise division op and previous switch op,
# because an error in this part can be caused by either
# of the two previous ops.
copy_stack_trace(node.outputs + switch.outputs, fct)
return fct
except NotScalarConstantError:
pass
try:
if get_scalar_constant_value(switch.inputs[2]) == 0.:
fdiv = op(switch.inputs[1], node.inputs[1])
# Copy over stacktrace for elementwise division op
# from previous elementwise multiplication op.
# An error in the division (e.g. errors due to
# inconsistent shapes or division by zero),
# will point to the new division op.
copy_stack_trace(node.outputs, fdiv)
fct = [T.switch(switch.inputs[0],
fdiv, 0)]
fct[0].tag.values_eq_approx = values_eq_approx_remove_nan
# Copy over stacktrace for switch op from both previous
# elementwise division op and previous switch op,
# because an error in this part can be caused by either
# of the two previous ops.
copy_stack_trace(node.outputs + switch.outputs, fct)
return fct
except NotScalarConstantError:
pass
return False
# Merge add/sub/mul/div/minimum/maximum/... of switches sharing the same
# condition, to enable further simplification of their branches
# Example: switch(c, a, b) + switch(c, x, y) -> switch(c, a+x, b+y)
@register_canonicalize
@gof.local_optimizer([T.Elemwise])
def local_merge_switch_same_cond(node):
scal = theano.scalar
# node must be binary elemwise or add or mul
if not isinstance(node.op, T.Elemwise) or not isinstance(
node.op.scalar_op, (scal.BinaryScalarOp, scal.Add, scal.Mul)):
return
# all inputs must be switch
if not all(s.owner and isinstance(s.owner.op, T.Elemwise) and
isinstance(s.owner.op.scalar_op, scal.Switch)
for s in node.inputs):
return
# all switch conditions must be the same
cond = node.inputs[0].owner.inputs[0]
if not all(s.owner.inputs[0] is cond for s in node.inputs[1:]):
return
# pull out switch
return [T.switch(cond,
node.op(*[s.owner.inputs[1] for s in node.inputs]),
node.op(*[s.owner.inputs[2] for s in node.inputs]))]
#############
# Tile Opts #
#############
@register_canonicalize
@register_stabilize
@gof.local_optimizer([T.Tile])
def local_useless_tile(node):
"""Tile(x, (1,)*N) -> x
This is useless tile. (1,)*N, just mean a vector with all element
being 1.
"""
if isinstance(node.op, T.Tile):
try:
a = T.get_scalar_constant_value(node.inputs[1])
if a == 1:
try:
l = T.get_vector_length(node.inputs[1])
if l == node.inputs[0].ndim:
# No need to copy over any stacktrace as previous
# input variable already has a stacktrace
return [node.inputs[0]]
elif l < node.inputs[0].ndim:
# The Op don't support that case, so we can't
# implement the opt and test it.
return
return [node.inputs[0]]
else:
# The Op don't support that case, so we can't
# implement the opt and test it.
return
x_nd = node.inputs[0].ndim
broad = ['x'] * (l - x_nd) + xrange(x_nd)
ret = node.inputs[0].dimshuffle(broad)
# Copy over stacktrace from previous output node,
# and from node before tiling operation.
copy_stack_trace(node.outputs + node.inputs[0], ret)
return [ret]
except ValueError:
return
except NotScalarConstantError:
return
##############
# Split Opts #
##############
@register_canonicalize
@register_specialize
@gof.local_optimizer([T.Split])
def local_useless_split(node):
""" Split{n_splits=1}(x, y) -> x
Remove Split with only 1 split.
"""
if isinstance(node.op, T.Split):
if node.op.len_splits == 1:
x, axis, splits = node.inputs
out = assert_op(x, T.eq(splits.shape[0], 1))
out = assert_op(out, T.eq(x.shape[axis], splits[0]))
# Copy over stacktrace from previous output node.
copy_stack_trace(node.outputs, out)
return [out]
################
# Flatten Opts #
################
@register_canonicalize
@register_stabilize
@gof.local_optimizer([T.Flatten])
def local_flatten_lift(node):
"""
Flatten(UnaryElemwise(x)) -> UnaryElemwise(Flatten(x))
This optimization is needed by optimization
nnet/sigm.py:log1msigm_to_softplus to get applied when there is a flatten.
"""
if (isinstance(node.op, T.Flatten) and
node.inputs[0].owner and
isinstance(node.inputs[0].owner.op, T.Elemwise) and
len(node.inputs[0].owner.inputs) == 1):
f = node.op(node.inputs[0].owner.inputs[0])
e = node.inputs[0].owner.op(f)
return [e]
##################
# Reshape opts #
##################
def local_reshape_chain(op):
@gof.local_optimizer([op])
def f(node):
"""
Reshape(Reshape(shape1),shape2) -> Reshape(shape2)
"""
if not opt.check_chain(node, op, op):
return False
# TODO: this can permit a failing program to run by eliminating
# the lower reshape
rval = node.op(node.inputs[0].owner.inputs[0], node.inputs[1])
# It might happen that the desired output of this node has a
# broadcastable pattern that does not match that of 'rval'. This is
# when originally, we were able to figure out that one of the
# dimensions of the reshape is one, but some other transformation
# replaced the shape by one for which this cannot be guessed.
# We should try to figure out why we lost the information about this
# constant value... but in the meantime, better not apply this
# optimization.
if rval.broadcastable == node.outputs[0].broadcastable:
return [rval]
else:
return False
return f
register_canonicalize(local_reshape_chain(T.Reshape),
name='local_reshape_chain')
@register_canonicalize
@register_stabilize
@gof.local_optimizer([T.Reshape])
def local_useless_reshape(node):
"""
Remove Reshape when both the input and the output have a
single dimension.
"""
if isinstance(node.op, T.Reshape):
if (node.inputs[0].ndim == 1 and node.outputs[0].ndim == 1 and
node.inputs[0].broadcastable ==
node.outputs[0].broadcastable):
return [node.inputs[0]]
@register_canonicalize
@register_stabilize
@gof.local_optimizer([T.Reshape])
def local_reshape_lift(node):
"""
Reshape(UnaryElemwise(x)) -> UnaryElemwise(Reshape(x))
This optimization is needed by optimization
nnet/sigm.py:log1msigm_to_softplus to get applied when there is a reshape.
"""
if (isinstance(node.op, T.Reshape) and
node.inputs[0].owner and
isinstance(node.inputs[0].owner.op, T.Elemwise) and
len(node.inputs[0].owner.inputs) == 1):
r = node.op(node.inputs[0].owner.inputs[0], node.inputs[1])
e = node.inputs[0].owner.op(r)
# In rare case the original broadcast was (False, True), but
# the new one is (False, False). So don't crash in that case.
if e.type != node.outputs[0].type:
e = T.patternbroadcast(e, node.outputs[0].broadcastable)
return [e]
if 0:
# TODO: Test that this optimziation works.
@register_canonicalize
@gof.local_optimizer([T.Reshape])
def local_scalar_reshape(node):
"""Eliminate reshape Ops whose inputs and outputs are scalars """
if isinstance(node.op, T.Reshape):
x, shp = node.inputs
if x.ndim == 0 and T.get_vector_length(shp) == 0:
return [x]
if 0:
# TODO: Finish writing and testing this optimization. The idea | |
<filename>tests/integration/test_models/standard_output_tests.py<gh_stars>1-10
#
# Standard tests on the standard set of model outputs
#
import pybamm
import numpy as np
class StandardOutputTests(object):
"""Calls all the tests on the standard output variables."""
def __init__(self, model, parameter_values, disc, solution):
# Assign attributes
self.model = model
self.parameter_values = parameter_values
self.disc = disc
self.solution = solution
if isinstance(self.model, pybamm.lithium_ion.BaseModel):
self.chemistry = "Lithium-ion"
elif isinstance(self.model, pybamm.lead_acid.BaseModel):
self.chemistry = "Lead acid"
# Only for constant current
current_sign = np.sign(parameter_values["Current function [A]"])
if current_sign == 1:
self.operating_condition = "discharge"
elif current_sign == -1:
self.operating_condition = "charge"
else:
self.operating_condition = "off"
def process_variables(self):
return
def run_test_class(self, ClassName):
"""Run all tests from a class 'ClassName'"""
tests = ClassName(
self.model,
self.parameter_values,
self.disc,
self.solution,
self.operating_condition,
)
tests.test_all()
def test_all(self, skip_first_timestep=False):
self.run_test_class(VoltageTests)
self.run_test_class(ElectrolyteConcentrationTests)
self.run_test_class(PotentialTests)
self.run_test_class(CurrentTests)
if self.chemistry == "Lithium-ion":
self.run_test_class(ParticleConcentrationTests)
self.run_test_class(DegradationTests)
if self.model.options["convection"] != "none":
self.run_test_class(VelocityTests)
class BaseOutputTest(object):
def __init__(self, model, param, disc, solution, operating_condition):
self.model = model
self.param = param
self.disc = disc
self.solution = solution
self.operating_condition = operating_condition
# Use dimensional time and space
self.t = solution.t * model.timescale_eval
geo = pybamm.geometric_parameters
L_x = param.evaluate(geo.L_x)
self.x_n = disc.mesh["negative electrode"].nodes * L_x
self.x_s = disc.mesh["separator"].nodes * L_x
self.x_p = disc.mesh["positive electrode"].nodes * L_x
whole_cell = ["negative electrode", "separator", "positive electrode"]
self.x = disc.mesh.combine_submeshes(*whole_cell).nodes * L_x
self.x_n_edge = disc.mesh["negative electrode"].edges * L_x
self.x_s_edge = disc.mesh["separator"].edges * L_x
self.x_p_edge = disc.mesh["positive electrode"].edges * L_x
self.x_edge = disc.mesh.combine_submeshes(*whole_cell).edges * L_x
if isinstance(self.model, pybamm.lithium_ion.BaseModel):
R_n_typ = param.evaluate(model.param.R_n_typ)
R_p_typ = param.evaluate(model.param.R_p_typ)
self.r_n = disc.mesh["negative particle"].nodes * R_n_typ
self.r_p = disc.mesh["positive particle"].nodes * R_p_typ
self.r_n_edge = disc.mesh["negative particle"].edges * R_n_typ
self.r_p_edge = disc.mesh["positive particle"].edges * R_p_typ
if self.model.options["particle size"] == "distribution":
self.R_n = disc.mesh["negative particle size"].nodes * R_n_typ
self.R_p = disc.mesh["positive particle size"].nodes * R_p_typ
# Useful parameters
self.l_n = param.evaluate(geo.l_n)
self.l_p = param.evaluate(geo.l_p)
current_param = self.model.param.current_with_time
self.i_cell = param.process_symbol(current_param).evaluate(solution.t)
class VoltageTests(BaseOutputTest):
def __init__(self, model, param, disc, solution, operating_condition):
super().__init__(model, param, disc, solution, operating_condition)
self.eta_r_n = solution["Negative electrode reaction overpotential [V]"]
self.eta_r_p = solution["Positive electrode reaction overpotential [V]"]
self.eta_r_n_av = solution[
"X-averaged negative electrode reaction overpotential [V]"
]
self.eta_r_p_av = solution[
"X-averaged positive electrode reaction overpotential [V]"
]
self.eta_r_av = solution["X-averaged reaction overpotential [V]"]
self.eta_sei_av = solution["X-averaged SEI film overpotential [V]"]
self.eta_e_av = solution["X-averaged electrolyte overpotential [V]"]
self.delta_phi_s_av = solution["X-averaged solid phase ohmic losses [V]"]
self.ocp_n_av = solution[
"X-averaged negative electrode open circuit potential [V]"
]
self.ocp_p_av = solution[
"X-averaged positive electrode open circuit potential [V]"
]
self.ocv_av = solution["X-averaged open circuit voltage [V]"]
self.voltage = solution["Terminal voltage [V]"]
def test_each_reaction_overpotential(self):
"""Testing that:
- discharge: eta_r_n > 0, eta_r_p < 0
- charge: eta_r_n < 0, eta_r_p > 0
- off: eta_r_n == 0, eta_r_p == 0
"""
tol = 0.01
t, x_n, x_p = self.t, self.x_n, self.x_p
if self.operating_condition == "discharge":
np.testing.assert_array_less(-self.eta_r_n(t, x_n), tol)
np.testing.assert_array_less(self.eta_r_p(t, x_p), tol)
elif self.operating_condition == "charge":
np.testing.assert_array_less(self.eta_r_n(t, x_n), tol)
np.testing.assert_array_less(-self.eta_r_p(t, x_p), tol)
elif self.operating_condition == "off":
np.testing.assert_array_equal(self.eta_r_n(t, x_n), 0)
np.testing.assert_array_equal(-self.eta_r_p(t, x_p), 0)
def test_overpotentials(self):
"""Testing that all are:
- discharge: . < 0
- charge: . > 0
- off: . == 0
"""
tol = 0.001
if self.operating_condition == "discharge":
np.testing.assert_array_less(self.eta_r_av(self.t), tol)
np.testing.assert_array_less(self.eta_e_av(self.t), tol)
np.testing.assert_array_less(self.delta_phi_s_av(self.t), tol)
elif self.operating_condition == "charge":
np.testing.assert_array_less(-self.eta_r_av(self.t), tol)
np.testing.assert_array_less(-self.eta_e_av(self.t), tol)
np.testing.assert_array_less(-self.delta_phi_s_av(self.t), tol)
elif self.operating_condition == "off":
np.testing.assert_array_equal(self.eta_r_av(self.t), 0)
np.testing.assert_array_equal(self.eta_e_av(self.t), 0)
# For some reason SPM gives delta_phi_s_av ~ 1e-17
np.testing.assert_array_almost_equal(
self.delta_phi_s_av(self.t), 0, decimal=16
)
def test_ocps(self):
"""Testing that:
- discharge: ocp_n increases, ocp_p decreases
- charge: ocp_n decreases, ocp_p increases
- off: ocp_n, ocp_p constant
"""
neg_end_vs_start = self.ocp_n_av(self.t[-1]) - self.ocp_n_av(self.t[1])
pos_end_vs_start = self.ocp_p_av(self.t[-1]) - self.ocp_p_av(self.t[1])
if self.operating_condition == "discharge":
np.testing.assert_array_less(-neg_end_vs_start, 0)
np.testing.assert_array_less(pos_end_vs_start, 0)
elif self.operating_condition == "charge":
np.testing.assert_array_less(neg_end_vs_start, 0)
np.testing.assert_array_less(-pos_end_vs_start, 0)
elif self.operating_condition == "off":
np.testing.assert_array_almost_equal(neg_end_vs_start, 0)
np.testing.assert_array_almost_equal(pos_end_vs_start, 0)
def test_ocv(self):
"""Testing that:
- discharge: ocv decreases
- charge: ocv increases
- off: ocv constant
"""
end_vs_start = self.ocv_av(self.t[-1]) - self.ocv_av(self.t[1])
if self.operating_condition == "discharge":
np.testing.assert_array_less(end_vs_start, 0)
elif self.operating_condition == "charge":
np.testing.assert_array_less(-end_vs_start, 0)
elif self.operating_condition == "off":
np.testing.assert_array_almost_equal(end_vs_start, 0)
def test_voltage(self):
"""Testing that:
- discharge: voltage decreases
- charge: voltage increases
- off: voltage constant
"""
end_vs_start = self.voltage(self.t[-1]) - self.voltage(self.t[1])
if self.operating_condition == "discharge":
np.testing.assert_array_less(end_vs_start, 0)
elif self.operating_condition == "charge":
np.testing.assert_array_less(-end_vs_start, 0)
elif self.operating_condition == "off":
np.testing.assert_array_almost_equal(end_vs_start, 0)
def test_consistent(self):
"""Test voltage components are consistent with one another by ensuring they sum
correctly"""
np.testing.assert_array_almost_equal(
self.ocv_av(self.t), self.ocp_p_av(self.t) - self.ocp_n_av(self.t)
)
np.testing.assert_array_almost_equal(
self.eta_r_av(self.t), self.eta_r_p_av(self.t) - self.eta_r_n_av(self.t)
)
np.testing.assert_array_almost_equal(
self.voltage(self.t),
self.ocv_av(self.t)
+ self.eta_r_av(self.t)
+ self.eta_e_av(self.t)
+ self.delta_phi_s_av(self.t)
+ self.eta_sei_av(self.t),
decimal=2,
)
def test_all(self):
self.test_each_reaction_overpotential()
self.test_overpotentials()
self.test_ocps()
self.test_ocv()
self.test_voltage()
self.test_consistent()
class ParticleConcentrationTests(BaseOutputTest):
def __init__(self, model, param, disc, solution, operating_condition):
super().__init__(model, param, disc, solution, operating_condition)
self.c_s_n = solution["Negative particle concentration"]
self.c_s_p = solution["Positive particle concentration"]
self.c_s_n_rav = solution["R-averaged negative particle concentration"]
self.c_s_p_rav = solution["R-averaged positive particle concentration"]
self.c_s_n_surf = solution["Negative particle surface concentration"]
self.c_s_p_surf = solution["Positive particle surface concentration"]
self.c_s_n_tot = solution["Total lithium in negative electrode [mol]"]
self.c_s_p_tot = solution["Total lithium in positive electrode [mol]"]
self.N_s_n = solution["Negative particle flux"]
self.N_s_p = solution["Positive particle flux"]
self.c_SEI_tot = solution["Loss of lithium to SEI [mol]"]
self.c_Li_tot = solution["Loss of lithium to lithium plating [mol]"]
if model.options["particle size"] == "distribution":
# These concentration variables are only present for distribution models.
# Take only the x-averaged of these for now, since variables cannot have
# 4 domains yet
self.c_s_n_dist = solution[
"X-averaged negative particle concentration distribution"
]
self.c_s_p_dist = solution[
"X-averaged positive particle concentration distribution"
]
self.c_s_n_surf_dist = solution[
"Negative particle surface concentration distribution"
]
self.c_s_p_surf_dist = solution[
"Positive particle surface concentration distribution"
]
def test_concentration_increase_decrease(self):
"""Test all concentrations in negative particles decrease and all
concentrations in positive particles increase over a discharge."""
t, x_n, x_p, r_n, r_p = self.t, self.x_n, self.x_p, self.r_n, self.r_p
tol = 1e-16
if self.model.options["particle"] in ["quadratic profile", "quartic profile"]:
# For the assumed polynomial concentration profiles the values
# can increase/decrease within the particle as the polynomial shifts,
# so we just check the average instead
neg_diff = self.c_s_n_rav(t[1:], x_n) - self.c_s_n_rav(t[:-1], x_n)
pos_diff = self.c_s_p_rav(t[1:], x_p) - self.c_s_p_rav(t[:-1], x_p)
neg_end_vs_start = self.c_s_n_rav(t[-1], x_n) - self.c_s_n_rav(t[0], x_n)
pos_end_vs_start = self.c_s_p_rav(t[-1], x_p) - self.c_s_p_rav(t[0], x_p)
elif self.model.options["particle size"] == "distribution":
R_n, R_p = self.R_n, self.R_p
# Test the concentration variables that depend on x-R (surface values only,
# as 3D vars not implemented)
neg_diff = self.c_s_n_surf_dist(t[1:], x=x_n, R=R_n) - self.c_s_n_surf_dist(
t[:-1], x=x_n, R=R_n
)
pos_diff = self.c_s_p_surf_dist(t[1:], x=x_p, R=R_p) - self.c_s_p_surf_dist(
t[:-1], x=x_p, R=R_p
)
neg_end_vs_start = self.c_s_n_surf_dist(
t[-1], x=x_n, R=R_n
) - self.c_s_n_surf_dist(t[0], x=x_n, R=R_n)
pos_end_vs_start = self.c_s_p_surf_dist(
t[-1], x=x_p, R=R_p
) - self.c_s_p_surf_dist(t[0], x=x_p, R=R_p)
tol = 1e-15
else:
neg_diff = self.c_s_n(t[1:], x_n, r_n) - self.c_s_n(t[:-1], x_n, r_n)
pos_diff = self.c_s_p(t[1:], x_p, r_p) - self.c_s_p(t[:-1], x_p, r_p)
neg_end_vs_start = self.c_s_n(t[-1], x_n, r_n) - self.c_s_n(t[0], x_n, r_n)
pos_end_vs_start = self.c_s_p(t[-1], x_p, r_p) - self.c_s_p(t[0], x_p, r_p)
if self.operating_condition == "discharge":
np.testing.assert_array_less(neg_diff, tol)
np.testing.assert_array_less(-tol, pos_diff)
np.testing.assert_array_less(neg_end_vs_start, 0)
np.testing.assert_array_less(0, pos_end_vs_start)
elif self.operating_condition == "charge":
np.testing.assert_array_less(-tol, neg_diff)
np.testing.assert_array_less(pos_diff, tol)
np.testing.assert_array_less(0, neg_end_vs_start)
np.testing.assert_array_less(pos_end_vs_start, 0)
elif self.operating_condition == "off":
np.testing.assert_array_almost_equal(neg_diff, 0)
np.testing.assert_array_almost_equal(pos_diff, 0)
np.testing.assert_array_almost_equal(neg_end_vs_start, 0)
np.testing.assert_array_almost_equal(pos_end_vs_start, 0)
def test_concentration_limits(self):
"""Test that concentrations do not go below 0 or exceed the maximum."""
t, x_n, x_p, r_n, r_p = self.t, self.x_n, self.x_p, self.r_n, self.r_p
np.testing.assert_array_less(-self.c_s_n(t, x_n, r_n), 0)
np.testing.assert_array_less(-self.c_s_p(t, x_p, r_p), 0)
np.testing.assert_array_less(self.c_s_n(t, x_n, r_n), 1)
np.testing.assert_array_less(self.c_s_p(t, x_p, r_p), 1)
if self.model.options["particle size"] == "distribution":
R_n, R_p = self.R_n, self.R_p
# Cannot have 3D processed variables, so test concs that depend on
# r-R and x-R
# r-R (x-averaged)
np.testing.assert_array_less(-self.c_s_n_dist(t, r=r_n, R=R_n), 0)
np.testing.assert_array_less(-self.c_s_p_dist(t, r=r_p, R=R_p), 0)
np.testing.assert_array_less(self.c_s_n_dist(t, r=r_n, R=R_n), 1)
np.testing.assert_array_less(self.c_s_p_dist(t, r=r_p, R=R_p), 1)
# x-R (surface concentrations)
np.testing.assert_array_less(-self.c_s_n_surf_dist(t, x=x_n, R=R_n), 0)
np.testing.assert_array_less(-self.c_s_p_surf_dist(t, x=x_p, R=R_p), 0)
np.testing.assert_array_less(self.c_s_n_surf_dist(t, x=x_n, R=R_n), 1)
np.testing.assert_array_less(self.c_s_p_surf_dist(t, x=x_p, R=R_p), 1)
def test_conservation(self):
"""Test amount of lithium stored across all particles and in SEI layers is
constant."""
c_s_tot = (
self.c_s_n_tot(self.solution.t)
+ self.c_s_p_tot(self.solution.t)
+ self.c_SEI_tot(self.solution.t)
+ self.c_Li_tot(self.solution.t)
)
diff = (c_s_tot[1:] - c_s_tot[:-1]) / c_s_tot[:-1]
if self.model.options["particle"] == "quartic profile":
np.testing.assert_array_almost_equal(diff, 0, decimal=10)
# elif self.model.options["particle size"] == "distribution":
# np.testing.assert_array_almost_equal(diff, 0, decimal=10)
elif self.model.options["surface form"] == "differential":
np.testing.assert_array_almost_equal(diff, 0, decimal=10)
| |
#!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright (C) 2016 Zomboided
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
# This module allows the user to bind a key to one of the operations
# that can be performed within the add-on
import xbmc
from xbmcgui import Dialog, WindowXMLDialog,DialogProgress
import xbmcgui
import xbmcaddon
import xbmcvfs
import os
import glob
from threading import Timer
from libs.utility import debugTrace, errorTrace, infoTrace, newPrint, getID, getName
from libs.vpnplatform import getKeyMapsPath, getKeyMapsFileName, getAddonPath
from libs.common import fixKeymaps, getCycleLock, freeCycleLock, clearVPNCycle
class KeyListener(WindowXMLDialog):
TIMEOUT = 5
def __new__(cls):
gui_api = tuple(map(int, xbmcaddon.Addon('xbmc.gui').getAddonInfo('version').split('.')))
file_name = "DialogNotification.xml" if gui_api >= (5, 11, 0) else "DialogKaiToast.xml"
return super(KeyListener, cls).__new__(cls, file_name, "")
def __init__(self):
self.key = None
def onInit(self):
self.getControl(400).setImage(getAddonPath(True, "/resources/map.png"))
self.getControl(401).addLabel(xbmcaddon.Addon(getID()).getAddonInfo("name"))
self.getControl(402).addLabel("Press a key to map or wait to clear.")
def onAction(self, action):
code = action.getButtonCode()
self.key = None if code == 0 else str(code)
self.close()
@staticmethod
def record_key():
dialog = KeyListener()
timeout = Timer(KeyListener.TIMEOUT, dialog.close)
timeout.start()
dialog.doModal()
timeout.cancel()
key = dialog.key
del dialog
if key == None: return ""
return key
action = sys.argv[1]
debugTrace("-- Entered mapkey.py with parameter " + action + " --")
if not getID() == "":
addon = xbmcaddon.Addon(getID())
addon_name = getName()
cycle_key = ""
table_key = ""
table_long = False
info_key = ""
map_name = getKeyMapsFileName()
xml_start = '<keymap><global><keyboard>\n'
xml_key = '<key id="#KEY">runscript(#PATH#COMMAND)</key>\n'
xml_long = '<key id="#KEY" mod="longpress">runscript(#PATH#COMMAND)</key>\n'
xml_end = '</keyboard></global></keymap>\n'
cycle_command = "cycle.py"
table_command = "table.py"
info_command = "infopopup.py"
# Fix the keymap if it's been renamed by the Keymap addon
fixKeymaps()
lines = []
# Read any existing keymap and the keys we're interested in
if xbmcvfs.exists(getKeyMapsPath(map_name)):
path = getKeyMapsPath(map_name)
try:
debugTrace("Writing the map file to " + path)
map_file = open(path, 'r')
lines = map_file.readlines()
map_file.close()
i = 0
for line in lines:
if cycle_command in line:
i1 = line.index("key id=\"") + 8
i2 = line.index("\"", i1)
cycle_key = line[i1:i2]
debugTrace("Found cycle key " + cycle_key)
lines[i] = ""
if table_command in line:
i1 = line.index("key id=\"") + 8
i2 = line.index("\"", i1)
table_key = line[i1:i2]
debugTrace("Found table key " + table_key)
if 'mod="longpress"' in line: table_long = True
lines[i] = ""
if info_command in line:
i1 = line.index("key id=\"") + 8
i2 = line.index("\"", i1)
info_key = line[i1:i2]
debugTrace("Found infopopup key " + info_key)
lines[i] = ""
i = i + 1
except Exception as e:
errorTrace("mapkey.py", map_name + " is malformed")
errorTrace("mapkey.py", str(e))
lines = []
# If there is no keymap, create a blank one with start and end tags
if len(lines) == 0:
lines.append(xml_start)
lines.append(xml_end)
if getCycleLock():
clearVPNCycle()
# Get the updated keys
if action == "cycle":
if cycle_key == "":
msg = "Do you want to map a key or remote button to the VPN cycle function?"
y = "No"
n = "Yes"
else:
msg = "Key ID " + cycle_key + " is mapped to the VPN cycle function. Remap or clear current mapping?"
y = "Clear"
n = "Remap"
if not xbmcgui.Dialog().yesno(addon_name, msg, nolabel=n, yeslabel=y):
cycle_key = KeyListener().record_key()
if cycle_key == "":
dialog = "VPN cycle is not mapped to a key."
icon = "/resources/unmapped.png"
else:
dialog = "VPN cycle is mapped to key ID " + cycle_key + "."
icon = "/resources/mapped.png"
xbmcgui.Dialog().notification(addon_name, dialog, getAddonPath(True, icon), 5000, False)
else:
if not cycle_key == "":
cycle_key = ""
if action == "table":
if table_key == "":
msg = "Do you want to map a key or remote button to the VPN connection table function?"
y = "No"
n = "Yes"
else:
msg = "Key ID " + table_key + " is mapped to the VPN connection table function. Remap or clear current mapping?"
y = "Clear"
n = "Remap"
if not xbmcgui.Dialog().yesno(addon_name, msg, nolabel=n, yeslabel=y):
if not cycle_key == "" and xbmcgui.Dialog().yesno(addon_name, "Do you want to map a long press of the current cycle key to bring up a list of connections?. [I]This is only recommended for keyboard usage, not remote controls.[/I]", nolabel="No", yeslabel="Yes"):
table_key = cycle_key
table_long = True
else:
table_key = KeyListener().record_key()
table_long = False
if table_key == "":
dialog = "VPN connection table is not mapped to a key."
icon = "/resources/unmapped.png"
else:
dialog = "VPN connection table is mapped to key ID " + cycle_key + "."
icon = "/resources/mapped.png"
if xbmcgui.Dialog().yesno(addon_name, "Do you want display the list of all connections (with protocol filter applied) or just those validated?. You can change this later in the Settings/Monitor menu.", nolabel="Validated", yeslabel="All"):
addon.setSetting("table_display_type", "All connections")
else:
addon.setSetting("table_display_type", "Validated connections")
xbmcgui.Dialog().notification(addon_name, dialog, getAddonPath(True, icon), 5000, False)
else:
if not table_key == "":
table_key = ""
if action == "info":
if info_key == "":
msg = "Map a key or remote button to the information display function?"
y = "No"
n = "Yes"
else:
msg = "Key ID " + info_key + " is mapped to the information display function. Remap or clear current mapping?"
y = "Clear"
n = "Remap"
if not xbmcgui.Dialog().yesno(addon_name, msg, nolabel=n, yeslabel=y):
info_key = KeyListener().record_key()
if info_key == "":
dialog = "Info display is not mapped to a key."
icon = "/resources/unmapped.png"
else:
dialog = "Info display is mapped to key ID " + info_key + "."
icon = "/resources/mapped.png"
xbmcgui.Dialog().notification(addon_name, dialog, getAddonPath(True, icon), 5000, False)
else:
if not info_key == "":
info_key = ""
# Add the keys to the start of the keymap file
if not cycle_key == "":
out = xml_key.replace("#KEY", cycle_key)
out = out.replace("#PATH", getAddonPath(True, ""))
out = out.replace("#COMMAND", cycle_command)
lines.insert(1, out)
if not table_key == "":
if cycle_key == table_key or table_long:
out = xml_long.replace("#KEY", table_key)
else:
out = xml_key.replace("#KEY", table_key)
out = out.replace("#PATH", getAddonPath(True, ""))
out = out.replace("#COMMAND", table_command)
lines.insert(1, out)
if not info_key == "":
out = xml_key.replace("#KEY", info_key)
out = out.replace("#PATH", getAddonPath(True, ""))
out = out.replace("#COMMAND", info_command)
lines.insert(1, out)
# Count the number of valid lines to write out
i = 0
for line in lines:
if not line == "": i += 1
try:
path = getKeyMapsPath(map_name)
if i == 2:
# Delete keymap file, it's empty apart from the start and end tags
if xbmcvfs.exists(path):
debugTrace("No key mappings so deleting the map file " + path)
xbmcvfs.delete(path)
xbmcgui.Dialog().ok(addon_name, "Keymap has been removed as no keys have been mapped. You must restart for these changes to take effect.")
else:
debugTrace("No key mappings so not creating a map file")
else:
# Write the updated keymap
path = getKeyMapsPath(map_name)
map_file = open(path, 'w')
for line in lines:
if not line == "": map_file.write(line)
map_file.close()
xbmcgui.Dialog().ok(addon_name, "Keymap has been updated. You must restart for these changes to take effect.")
except Exception as e:
errorTrace("mapkey.py", "Couldn't update keymap file " + path)
errorTrace("mapkey.py", str(e))
xbmcgui.Dialog().ok(addon_name, "Problem updating the keymap file, check error log.")
# Warn the user if maps could clash
path = getKeyMapsPath("*.xml")
try:
debugTrace("Getting contents of keymaps directory " + path)
files = (glob.glob(path))
if len(files) > 1:
xbmcgui.Dialog().ok(addon_name, "Other keymaps exist and are applied in alphabetical order. If your mappings don't work then it could be that they're being over written by another map.")
infoTrace("mapkey.py", "Multiple (" + str(len(files)) + ") keymaps, including " + map_name + " detected in " + getKeyMapsPath(""))
except Exception as e:
errorTrace("mapkey.py", "Couldn't | |
<gh_stars>0
import tkinter as tk
from tkinter import filedialog, ttk
from pathlib import Path
import functions as fn
from bs4 import BeautifulSoup
import subprocess, csv, json, threading, statistics, time, webbrowser, os, re
import concurrent.futures
test_case = 0
name='MRANS'
version='0.1'
# Class for tkinter Treeview and related functions
class result_window:
def __init__(self, parent,stat, headings, name, view_func):
# Draw a treeview of a fixed type
# self.viewer=viewer
self.stat = stat
self.parent = parent
self.view_func = view_func
self.fileList = []
self.file_path = []
self.tree = ttk.Treeview(self.parent, show='headings', columns=headings)
self.tree.grid(row=0, column=0, sticky='NSEW')
s = ttk.Style()
s.configure('Treeview',rowheight=30)
for n in range(len(name)):
self.tree.heading(headings[n], text=name[n])
self.tree.column(headings[0], width=30, stretch=tk.NO, anchor='e')
self.tree.column(headings[1], width=500)
self.tree.bind('<Button-1>',self.left_click)
self.tree.bind('<Delete>', self.delete_entry)
# self.tree.bind(('<Button-3>' ), self.double_left_click)
self.tree.bind(('<Double-Button-1>'), self.double_left_click)
# self.tree.bind(('w'), self.double_left_click)
self.last_focus = None
def display(self):
self.delete()
index = iid = 0
self.abs=[]
self.rel=[]
for row in self.fileList:
# print(row)
inPath = row[0][1]
# pvp = row[3]
# pop = row[4]
# p1 = inPath.relative_to(self.file_path)
# disp = ' >> '.join(p1.parts)
self.tree.insert("", index, iid, values=(iid + 1, inPath))
index = iid = index + 1
# generate queue for processing
def queue(self):
fl = self.fileList
# id = list(range(0, len(fl)))
index = self.tree.selection()
# if any items are selected, modify the file list to be processed
if len(index) != 0:
N = [int(i) for i in index]
fl = [fl[j] for j in N]
# id = N
return fl
# clears selection of all items in treeview
def clear(self):
for item in self.tree.selection(): self.tree.selection_remove(item)
# self.viewer.clearFrame()
def delete(self):
self.tree.delete(*self.tree.get_children())
# display status of a treeview item
def status(self, iid, stsMsg):
self.tree.set(iid, 'Status', stsMsg)
self.parent.update_idletasks()
def left_click(self, event):
iid = self.tree.identify_row(event.y)
self.clickID = iid
if not iid == '':
iid = int(iid)
self.current_selection = self.fileList[iid][0]
def double_left_click(self, event):
iid = self.clickID
if not iid == '':
iid = int(iid)
# self.selection = self.fileList[iid][0]
self.view_func(self.current_selection)
def delete_entry(self, event):
iid = self.clickID
if not iid == '':
iid = int(iid)
del self.fileList[iid]
self.delete()
self.display()
self.clickID = ''
class MainArea(tk.Frame):
def __init__(self, master,stat, **kwargs):
tk.Frame.__init__(self, master, **kwargs)
self.stat = stat
# self.config = config
self.overwrite = tk.IntVar()
self.columnconfigure(1, weight=1)
self.rowconfigure(0, weight=1)
self.master = master
# Frame for all controls
self.f0 = tk.Frame(self, borderwidth=1, relief='raised')
# self.f0.pack(fill = "both")
self.f0.grid(row=0, column=0, sticky='NSEW', columnspan=1, rowspan=1)
notebook =ttk.Notebook(self.f0)
notebook.pack(expand = 1, fill = "both")
# Frame for first level
# self.f1 = tk.LabelFrame(notebook, text='Controls', borderwidth=1, padx=10, pady=10, relief='raised')
self.fr_firstlv = tk.Frame(notebook)
self.fr_firstlv.grid(row=0, column=0, sticky='NSEW')
self.fr_firstlv.rowconfigure(1, weight=1)
self.firstlv_controls =tk.LabelFrame(self.fr_firstlv, text='Control')
self.firstlv_controls.grid(row=0, column=0, sticky='NSEW')
self.firstlv_tasks = tk.LabelFrame(self.fr_firstlv, text='Tasks')
self.firstlv_tasks.grid(row=1, column=0, sticky='NSEW')
self.firstlv_tasks.rowconfigure(0, weight=1)
self.fr_higherlevel = tk.Frame(notebook, borderwidth=1, padx=10, pady=10, relief='raised')
self.fr_higherlevel.grid(row=0, column=0, sticky='NSEW')
self.fr_higherlevel.rowconfigure(1, weight=1)
self.higherlv_controls = tk.LabelFrame(self.fr_higherlevel, text='Control')
self.higherlv_controls.grid(row=0, column=0, sticky='NSEW')
self.higherlv_tasks = tk.LabelFrame(self.fr_higherlevel, text='Tasks')
self.higherlv_tasks.grid(row=1, column=0, sticky='NSEW')
self.higherlv_tasks.rowconfigure(0, weight=1)
notebook.add(self.fr_firstlv, text ="First Level")
notebook.add(self.fr_higherlevel, text="Higher Level Analysis")
# Frame for File list Tree View
self.fr_results = tk.Frame(self, borderwidth=0, relief='raised', pady=10, padx = 2)
self.fr_results.grid(row=0, column=1, sticky='NSEW', rowspan=1)
self.fr_results.columnconfigure(0, weight=1)
self.fr_results.rowconfigure(0, weight=1)
# Individual elements
# Display results and status
self.result_tree = result_window(self.fr_results, stat, ['Number', 'Name', 'Status'], ['#', 'Name', 'Datasets'], self.extraction_view)
# Display tasks in first level
self.task_tree = result_window(self.firstlv_tasks, stat, ['Number', 'Name', 'Status'], ['#', 'Tasks', 'Status'], self.extraction_view)
# Display tasks in higher level
self.high_task_tree = result_window(self.higherlv_tasks, stat, ['Number', 'Name', 'Status'], ['#', 'Tasks', 'Status'], self.extraction_view)
# Display results and status
# self.result_tree = result_window(self.f2, viewer, stat)
self.file_path = ''
self.roi_path = ''
# Controls
el = fn.Elements(self.firstlv_controls)
el.button("Database", self.search_subjects,1, 0, 0, tk.W + tk.E, 1) # Selection of root directory
el.button("Brain extraction", self.brain_extraction, '', 2, 2, tk.W + tk.E, 1) # Brain extraction
el.button("Generate Profiles", self.generate_profile, '', 0, 2, tk.W + tk.E, 1) # Brain extraction
el.button("Process", self.process, '', 0, 4, tk.W + tk.E, 1) # Process dataset
el.button("Set Structural", self.set_structural, '', 2, 4, tk.W + tk.E, 1) # Select dataset corresponding to
el.button("Generate Report", self.generate_report, '', 6, 0, tk.W + tk.E, 1) # Select dataset corresponding to
self.report_name = tk.StringVar()
el.textField_var('Report Name', self.report_name, 20,5,1)
# structural scan for BET and registration
self.bet_thresh = el.textField("BET Frac. Int. Threshold", 5, 1, 0) # Task or Dataset to be searched for
self.bet_grad_thresh = el.textField("BET Grad. Threshold", 5, 1, 1) # Task or Dataset to be searched for
# self.filters = el.textField("Filters", 20, 1, 1) # keywords to filter individual datasets
self.bet_algo_list = ["Robust", "Bias Field Correction"]
self.bet_algo = tk.StringVar()
el.popupMenu(self.bet_algo, self.bet_algo_list, 1, 2, 20, 'W')
self.bet_algo.set(self.bet_algo_list[0])
# self.analysis_name.set('Hello')
# self.analysis_box = el.textField_var("Analysis Name", self.analysis_name, 20, 1, 3) # Task or Dataset to be searched for
# el.button("Search", self.search, '', 3, 0, tk.N + tk.S, 1) # button press to start search
# el.button("Clear", self.search, '', 3, 1, tk.N, 1) # button press to clear selection
# el.check('Overwrite', self.overwrite, 4, 1) # checkbox for overwrite option
## Higher Level Analysis
e2 = fn.Elements(self.higherlv_controls)
e2.button("Database", self.search_subjects, 2, 0, 0, tk.W + tk.E, 1) # Selection of output directory
e2.button("Run Higher Level Analysis", self.higher_level, '', 0, 1, tk.W + tk.E, 1) # Generate profile
self.prevselection = '0'
self.root = Path(__file__).parent.absolute()
tmp_path = Path(self.root)/'temp'
if not Path(tmp_path).is_dir():
os.mkdir(tmp_path)
##### Main tasks ########################################
# method for calling directory picker
def selectPath(self, var):
if test_case == 1:
self.file_path = self.root/'test_data'
self.higherlevel_directory = self.root/'test_data'/'grp_level'
else:
path = fn.appFuncs.selectPath()
if var == 1:
self.file_path = path
if var == 2:
self.roi_path = path
if var == 3:
self.higherlevel_directory = path
self.result_tree.file_path = self.file_path
self.stat.set('Selected Path: %s', self.file_path)
# self.result_tree.file_path = self.file_path
# self.search_subjects()
def search_subjects(self, case):
self.selectPath(case)
self.subject_names = []
self.subject_list = []
for item in Path(self.file_path).iterdir():
self.subject_names.append(item.name)
self.subject_list.append([item, item.name])
# print(self.subject_names)
self.result_tree.fileList = self.aggregated_list(self.subject_list)
self.result_tree.display() # display the results
if case == 1:
self.search_tasks()
if case == 2:
self.search_feat_tasks()
def search_tasks(self):
task_list = []
unique_list = []
self.task_list = []
for pa in self.result_tree.fileList:
scan = Path(pa[0][0]).rglob(f'*.nii*')
# find all scans and if any in post processed folders identified by ".feat", exclude them
tasks = [Path(t).name for t in scan if "feat" not in str(t)]
task_list += tasks
for word in task_list:
if word not in unique_list:
unique_list.append(word)
self.task_list = [['',item] for item in unique_list]
self.task_tree.fileList = self.aggregated_list(self.task_list)
self.task_tree.display() # display the results
def search_feat_tasks(self):
task_list = []
unique_list = []
self.task_list = []
for pa in self.result_tree.fileList:
scan = Path(pa[0][0]).rglob(f'*.feat')
# find all scans and if any in post processed folders identified by ".feat", exclude them
tasks = [Path(t).name for t in scan]
task_list += tasks
for word in task_list:
if word not in unique_list:
unique_list.append(word)
self.high_task_list = [['', item] for item in unique_list]
self.high_task_tree.fileList = self.aggregated_list(self.high_task_list)
self.high_task_tree.display() # display the results
# indicate which 4D file is to be used as structural scan
def set_structural(self):
self.structural_scan = [self.task_tree.current_selection[1], self.task_tree.clickID]
self.task_tree.status(self.structural_scan[1], 'Structural')
# if structural scan needs to be changed to a new one
if not (self.prevselection == self.structural_scan[1]):
self.task_tree.status(self.prevselection, '')
self.prevselection = self.task_tree.clickID
# Extract brain based on structural scan and entered parameters
def brain_extraction(self):
commands = []
self.stat.set('Performing brain extraction ...')
queue = self.result_tree.queue()
for row in queue:
subject = row[0][0]
# samp = Path(subject).rglob(self.structural_scan[0])
# for s in samp: self.sample = s
# temp = str(self.sample).split('.nii')
# sample_output = temp[0] + '_brain.nii.gz'
sample_output, sample = self.get_structural(subject)
algo = ['-R', '-B']
bet_in = self.bet_algo_list.index(self.bet_algo.get())
command = ['bet', str(sample), sample_output, algo[bet_in], '-m', '-f', self.bet_thresh.get(), '-g',
self.bet_grad_thresh.get(), '-o']
# print(command)
commands.append(command)
# print(commands)
self.threader_s(commands)
self.stat.set('Brain extraction completed')
self.sample_output = sample_output;
# get the extracted structural scan for a subject
def get_structural(self,subject):
samp = Path(subject).rglob(self.structural_scan[0])
for s in samp: sample = s
temp = str(sample).split('.nii')
subject_structural = temp[0] + '_brain.nii.gz'
return subject_structural, sample
def get_task_name(self):
task_name = (str(self.task_tree.current_selection[1]).split('.nii'))[0]
return task_name
# add visualizer for the output
def extraction_view(self, subject):
suffix = str(self.structural_scan[0]).split('.nii')
bet_output = suffix[0] + '_brain.nii.gz'
base_search = Path(subject[0]).rglob(self.structural_scan[0])
for b in base_search: base = b
mask_search = Path(subject[0]).rglob(bet_output)
for m in mask_search: mask = m
command = ['fsleyes', '-cs', '-std', str(base), str(mask), '-cm','blue']
# print(command)
command_except = ['fsleyes', '-std', str(base), str(mask), '-cm','blue']
self.update_idletasks()
try:
fn.appFuncs.thread(command, True)
except:
fn.appFuncs.thread(command_except, True)
# Allows user to create a custom design profile which can then be applied to all datasets
| |
self.Id
def set_Id(self, Id): self.Id = Id
def validate_tsNaturezaOperacao(self, value):
# Validate type tsNaturezaOperacao, a restriction on xsd:byte.
if value is not None and Validate_simpletypes_:
if not self.gds_validate_simple_patterns(
self.validate_tsNaturezaOperacao_patterns_, value):
warnings_.warn('Value "%s" does not match xsd pattern restrictions: %s' % (value.encode('utf-8'), self.validate_tsNaturezaOperacao_patterns_, ))
validate_tsNaturezaOperacao_patterns_ = [['^1$|^2$|^3$|^4$|^5$|^6$']]
def validate_tsRegimeEspecialTributacao(self, value):
# Validate type tsRegimeEspecialTributacao, a restriction on xsd:byte.
if value is not None and Validate_simpletypes_:
if not self.gds_validate_simple_patterns(
self.validate_tsRegimeEspecialTributacao_patterns_, value):
warnings_.warn('Value "%s" does not match xsd pattern restrictions: %s' % (value.encode('utf-8'), self.validate_tsRegimeEspecialTributacao_patterns_, ))
validate_tsRegimeEspecialTributacao_patterns_ = [['^0$|^1$|^2$|^3$|^4$|^5$|^6$']]
def validate_tsSimNao(self, value):
# Validate type tsSimNao, a restriction on xsd:byte.
if value is not None and Validate_simpletypes_:
if not self.gds_validate_simple_patterns(
self.validate_tsSimNao_patterns_, value):
warnings_.warn('Value "%s" does not match xsd pattern restrictions: %s' % (value.encode('utf-8'), self.validate_tsSimNao_patterns_, ))
validate_tsSimNao_patterns_ = [['^1$|^2$']]
def validate_tsStatusRps(self, value):
# Validate type tsStatusRps, a restriction on xsd:byte.
if value is not None and Validate_simpletypes_:
if not self.gds_validate_simple_patterns(
self.validate_tsStatusRps_patterns_, value):
warnings_.warn('Value "%s" does not match xsd pattern restrictions: %s' % (value.encode('utf-8'), self.validate_tsStatusRps_patterns_, ))
validate_tsStatusRps_patterns_ = [['^1$|^2$']]
def validate_tsIdTag(self, value):
# Validate type tsIdTag, a restriction on xsd:string.
if value is not None and Validate_simpletypes_:
if len(value) > 255:
warnings_.warn('Value "%(value)s" does not match xsd maxLength restriction on tsIdTag' % {"value" : value.encode("utf-8")} )
def hasContent_(self):
if (
self.IdentificacaoRps is not None or
self.DataEmissao is not None or
self.NaturezaOperacao is not None or
self.RegimeEspecialTributacao is not None or
self.OptanteSimplesNacional is not None or
self.IncentivadorCultural is not None or
self.Status is not None or
self.RpsSubstituido is not None or
self.Servico is not None or
self.Prestador is not None or
self.Tomador is not None or
self.IntermediarioServico is not None or
self.ConstrucaoCivil is not None
):
return True
else:
return False
def export(self, outfile, level, namespace_='', name_='tcInfRps', namespacedef_='', pretty_print=True):
imported_ns_def_ = GenerateDSNamespaceDefs_.get('tcInfRps')
if imported_ns_def_ is not None:
namespacedef_ = imported_ns_def_
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
if self.original_tagname_ is not None:
name_ = self.original_tagname_
showIndent(outfile, level, pretty_print)
outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = set()
self.exportAttributes(outfile, level, already_processed, namespace_, name_='tcInfRps')
if self.hasContent_():
outfile.write('>%s' % (eol_, ))
self.exportChildren(outfile, level + 1, namespace_='', name_='tcInfRps', pretty_print=pretty_print)
showIndent(outfile, level, pretty_print)
outfile.write('</%s%s>%s' % (namespace_, name_, eol_))
else:
outfile.write('/>%s' % (eol_, ))
def exportAttributes(self, outfile, level, already_processed, namespace_='', name_='tcInfRps'):
if self.Id is not None and 'Id' not in already_processed:
already_processed.add('Id')
outfile.write(' Id=%s' % (quote_attrib(self.Id), ))
def exportChildren(self, outfile, level, namespace_='', name_='tcInfRps', fromsubclass_=False, pretty_print=True):
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
if self.IdentificacaoRps is not None:
self.IdentificacaoRps.export(outfile, level, namespace_, name_='IdentificacaoRps', pretty_print=pretty_print)
if self.DataEmissao is not None:
showIndent(outfile, level, pretty_print)
outfile.write('<DataEmissao>%s</DataEmissao>%s' % (self.gds_format_datetime(self.DataEmissao, input_name='DataEmissao'), eol_))
if self.NaturezaOperacao is not None:
showIndent(outfile, level, pretty_print)
outfile.write('<NaturezaOperacao>%s</NaturezaOperacao>%s' % (self.gds_format_integer(self.NaturezaOperacao, input_name='NaturezaOperacao'), eol_))
if self.RegimeEspecialTributacao is not None:
showIndent(outfile, level, pretty_print)
outfile.write('<RegimeEspecialTributacao>%s</RegimeEspecialTributacao>%s' % (self.gds_format_integer(self.RegimeEspecialTributacao, input_name='RegimeEspecialTributacao'), eol_))
if self.OptanteSimplesNacional is not None:
showIndent(outfile, level, pretty_print)
outfile.write('<OptanteSimplesNacional>%s</OptanteSimplesNacional>%s' % (self.gds_format_integer(self.OptanteSimplesNacional, input_name='OptanteSimplesNacional'), eol_))
if self.IncentivadorCultural is not None:
showIndent(outfile, level, pretty_print)
outfile.write('<IncentivadorCultural>%s</IncentivadorCultural>%s' % (self.gds_format_integer(self.IncentivadorCultural, input_name='IncentivadorCultural'), eol_))
if self.Status is not None:
showIndent(outfile, level, pretty_print)
outfile.write('<Status>%s</Status>%s' % (self.gds_format_integer(self.Status, input_name='Status'), eol_))
if self.RpsSubstituido is not None:
self.RpsSubstituido.export(outfile, level, namespace_, name_='RpsSubstituido', pretty_print=pretty_print)
if self.Servico is not None:
self.Servico.export(outfile, level, namespace_, name_='Servico', pretty_print=pretty_print)
if self.Prestador is not None:
self.Prestador.export(outfile, level, namespace_, name_='Prestador', pretty_print=pretty_print)
if self.Tomador is not None:
self.Tomador.export(outfile, level, namespace_, name_='Tomador', pretty_print=pretty_print)
if self.IntermediarioServico is not None:
self.IntermediarioServico.export(outfile, level, namespace_, name_='IntermediarioServico', pretty_print=pretty_print)
if self.ConstrucaoCivil is not None:
self.ConstrucaoCivil.export(outfile, level, namespace_, name_='ConstrucaoCivil', pretty_print=pretty_print)
def build(self, node):
already_processed = set()
self.buildAttributes(node, node.attrib, already_processed)
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
return self
def buildAttributes(self, node, attrs, already_processed):
value = find_attr_value_('Id', node)
if value is not None and 'Id' not in already_processed:
already_processed.add('Id')
self.Id = value
self.validate_tsIdTag(self.Id) # validate type tsIdTag
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
if nodeName_ == 'IdentificacaoRps':
obj_ = tcIdentificacaoRps.factory()
obj_.build(child_)
self.IdentificacaoRps = obj_
obj_.original_tagname_ = 'IdentificacaoRps'
elif nodeName_ == 'DataEmissao':
sval_ = child_.text
dval_ = self.gds_parse_datetime(sval_)
self.DataEmissao = dval_
elif nodeName_ == 'NaturezaOperacao':
sval_ = child_.text
try:
ival_ = int(sval_)
except (TypeError, ValueError) as exp:
raise_parse_error(child_, 'requires integer: %s' % exp)
ival_ = self.gds_validate_integer(ival_, node, 'NaturezaOperacao')
self.NaturezaOperacao = ival_
# validate type tsNaturezaOperacao
self.validate_tsNaturezaOperacao(self.NaturezaOperacao)
elif nodeName_ == 'RegimeEspecialTributacao':
sval_ = child_.text
try:
ival_ = int(sval_)
except (TypeError, ValueError) as exp:
raise_parse_error(child_, 'requires integer: %s' % exp)
ival_ = self.gds_validate_integer(ival_, node, 'RegimeEspecialTributacao')
self.RegimeEspecialTributacao = ival_
# validate type tsRegimeEspecialTributacao
self.validate_tsRegimeEspecialTributacao(self.RegimeEspecialTributacao)
elif nodeName_ == 'OptanteSimplesNacional':
sval_ = child_.text
try:
ival_ = int(sval_)
except (TypeError, ValueError) as exp:
raise_parse_error(child_, 'requires integer: %s' % exp)
ival_ = self.gds_validate_integer(ival_, node, 'OptanteSimplesNacional')
self.OptanteSimplesNacional = ival_
# validate type tsSimNao
self.validate_tsSimNao(self.OptanteSimplesNacional)
elif nodeName_ == 'IncentivadorCultural':
sval_ = child_.text
try:
ival_ = int(sval_)
except (TypeError, ValueError) as exp:
raise_parse_error(child_, 'requires integer: %s' % exp)
ival_ = self.gds_validate_integer(ival_, node, 'IncentivadorCultural')
self.IncentivadorCultural = ival_
# validate type tsSimNao
self.validate_tsSimNao(self.IncentivadorCultural)
elif nodeName_ == 'Status':
sval_ = child_.text
try:
ival_ = int(sval_)
except (TypeError, ValueError) as exp:
raise_parse_error(child_, 'requires integer: %s' % exp)
ival_ = self.gds_validate_integer(ival_, node, 'Status')
self.Status = ival_
# validate type tsStatusRps
self.validate_tsStatusRps(self.Status)
elif nodeName_ == 'RpsSubstituido':
obj_ = tcIdentificacaoRps.factory()
obj_.build(child_)
self.RpsSubstituido = obj_
obj_.original_tagname_ = 'RpsSubstituido'
elif nodeName_ == 'Servico':
obj_ = tcDadosServico.factory()
obj_.build(child_)
self.Servico = obj_
obj_.original_tagname_ = 'Servico'
elif nodeName_ == 'Prestador':
obj_ = tcIdentificacaoPrestador.factory()
obj_.build(child_)
self.Prestador = obj_
obj_.original_tagname_ = 'Prestador'
elif nodeName_ == 'Tomador':
obj_ = tcDadosTomador.factory()
obj_.build(child_)
self.Tomador = obj_
obj_.original_tagname_ = 'Tomador'
elif nodeName_ == 'IntermediarioServico':
obj_ = tcIdentificacaoIntermediarioServico.factory()
obj_.build(child_)
self.IntermediarioServico = obj_
obj_.original_tagname_ = 'IntermediarioServico'
elif nodeName_ == 'ConstrucaoCivil':
obj_ = tcDadosConstrucaoCivil.factory()
obj_.build(child_)
self.ConstrucaoCivil = obj_
obj_.original_tagname_ = 'ConstrucaoCivil'
# end class tcInfRps
class tcRps(GeneratedsSuper):
subclass = None
superclass = None
def __init__(self, InfRps=None, Signature=None):
self.original_tagname_ = None
self.InfRps = InfRps
self.Signature = Signature
def factory(*args_, **kwargs_):
if CurrentSubclassModule_ is not None:
subclass = getSubclassFromModule_(
CurrentSubclassModule_, tcRps)
if subclass is not None:
return subclass(*args_, **kwargs_)
if tcRps.subclass:
return tcRps.subclass(*args_, **kwargs_)
else:
return tcRps(*args_, **kwargs_)
factory = staticmethod(factory)
def get_InfRps(self): return self.InfRps
def set_InfRps(self, InfRps): self.InfRps = InfRps
def get_Signature(self): return self.Signature
def set_Signature(self, Signature): self.Signature = Signature
def hasContent_(self):
if (
self.InfRps is not None or
self.Signature is not None
):
return True
else:
return False
def export(self, outfile, level, namespace_='', name_='tcRps', namespacedef_='', pretty_print=True):
imported_ns_def_ = GenerateDSNamespaceDefs_.get('tcRps')
if imported_ns_def_ is not None:
namespacedef_ = imported_ns_def_
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
if self.original_tagname_ is not None:
name_ = self.original_tagname_
showIndent(outfile, level, pretty_print)
outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = set()
self.exportAttributes(outfile, level, already_processed, namespace_, name_='tcRps')
if self.hasContent_():
outfile.write('>%s' % (eol_, ))
self.exportChildren(outfile, level + 1, namespace_='', name_='tcRps', pretty_print=pretty_print)
showIndent(outfile, level, pretty_print)
outfile.write('</%s%s>%s' % (namespace_, name_, eol_))
else:
outfile.write('/>%s' % (eol_, ))
def exportAttributes(self, outfile, level, already_processed, namespace_='', name_='tcRps'):
pass
def exportChildren(self, outfile, level, namespace_='', name_='tcRps', fromsubclass_=False, pretty_print=True):
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
if self.InfRps is not None:
self.InfRps.export(outfile, level, namespace_, name_='InfRps', pretty_print=pretty_print)
if self.Signature is not None:
self.Signature.export(outfile, level, namespace_='dsig:', name_='Signature', pretty_print=pretty_print)
def build(self, node):
already_processed = set()
self.buildAttributes(node, node.attrib, already_processed)
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
return self
def buildAttributes(self, node, attrs, already_processed):
pass
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
if nodeName_ == 'InfRps':
obj_ = tcInfRps.factory()
obj_.build(child_)
self.InfRps = obj_
obj_.original_tagname_ = 'InfRps'
elif nodeName_ == 'Signature':
obj_ = SignatureType.factory()
obj_.build(child_)
self.Signature = obj_
obj_.original_tagname_ = 'Signature'
# end class tcRps
class tcIdentificacaoNfse(GeneratedsSuper):
subclass = None
superclass = None
def __init__(self, Numero=None, Cnpj=None, InscricaoMunicipal=None, CodigoMunicipio=None):
self.original_tagname_ = None
self.Numero = Numero
self.validate_tsNumeroNfse(self.Numero)
self.Cnpj = Cnpj
self.validate_tsCnpj(self.Cnpj)
self.InscricaoMunicipal = InscricaoMunicipal
self.validate_tsInscricaoMunicipal(self.InscricaoMunicipal)
self.CodigoMunicipio = CodigoMunicipio
self.validate_tsCodigoMunicipioIbge(self.CodigoMunicipio)
def factory(*args_, **kwargs_):
if CurrentSubclassModule_ is not None:
subclass = getSubclassFromModule_(
CurrentSubclassModule_, tcIdentificacaoNfse)
if subclass is not None:
return subclass(*args_, **kwargs_)
if tcIdentificacaoNfse.subclass:
return tcIdentificacaoNfse.subclass(*args_, **kwargs_)
else:
return tcIdentificacaoNfse(*args_, **kwargs_)
factory = staticmethod(factory)
def get_Numero(self): return self.Numero
def set_Numero(self, Numero): self.Numero = Numero
def get_Cnpj(self): return self.Cnpj
def set_Cnpj(self, Cnpj): self.Cnpj = Cnpj
def get_InscricaoMunicipal(self): return self.InscricaoMunicipal
def set_InscricaoMunicipal(self, InscricaoMunicipal): self.InscricaoMunicipal = InscricaoMunicipal
def get_CodigoMunicipio(self): return self.CodigoMunicipio
def set_CodigoMunicipio(self, | |
<reponame>YimengYang/wol<filename>code/utils/tests/test_tree.py
#!/usr/bin/env python3
from unittest import TestCase, main
from shutil import rmtree
from tempfile import mkdtemp
from os.path import join, dirname, realpath
from skbio import TreeNode
from skbio.tree import MissingNodeError
from utils.tree import (
support, unpack, has_duplicates, compare_topology, intersect_trees,
unpack_by_func, read_taxdump, build_taxdump_tree, order_nodes,
is_ordered, lca2, cladistic, check_monophyly, _compare_length,
compare_branch_lengths, assign_taxa, assign_supports, support_to_label,
walk_copy, root_above, unroot_at, _exact_compare, calc_split_metrics,
calc_length_metrics, format_newick, root_by_outgroup, restore_rooting,
restore_node_labels, restore_node_order, get_base, calc_bidi_minlevels,
calc_bidi_mindepths)
class TreeTests(TestCase):
def setUp(self):
""" Set up working directory and test files
"""
# test output can be written to this directory
self.working_dir = mkdtemp()
# test data directory
datadir = join(dirname(realpath(__file__)), 'data')
# test data files
self.nodes_fp = join(datadir, 'nodes.dmp')
self.names_fp = join(datadir, 'names.dmp')
def tearDown(self):
# there isn't any file to remove at the moment
# but in the future there will be
rmtree(self.working_dir)
def test_support(self):
"""Test getting support value of a node."""
# test nodes with support alone as label
tree = TreeNode.read(['((a,b)75,(c,d)90);'])
node1, node2 = tree.children
self.assertEqual(support(node1), 75.0)
self.assertEqual(support(node2), 90.0)
# test nodes with support and branch length
tree = TreeNode.read(['((a,b)0.85:1.23,(c,d)0.95:4.56);'])
node1, node2 = tree.children
self.assertEqual(support(node1), 0.85)
self.assertEqual(support(node2), 0.95)
# test nodes with support and extra label (not a common scenario but
# can happen)
tree = TreeNode.read(['((a,b)\'80:X\',(c,d)\'60:Y\');'])
node1, node2 = tree.children
self.assertEqual(support(node1), 80.0)
self.assertEqual(support(node2), 60.0)
# test nodes without label, with non-numeric label, and with branch
# length only
tree = TreeNode.read(['((a,b),(c,d)x,(e,f):1.0);'])
for node in tree.children:
self.assertIsNone(support(node))
def test_unpack(self):
"""Test unpacking an internal node."""
# test unpacking a node without branch length
tree = TreeNode.read(['((c,d)a,(e,f)b);'])
unpack(tree.find('b'))
exp = '((c,d)a,e,f);\n'
self.assertEqual(str(tree), exp)
# test unpacking a node with branch length
tree = TreeNode.read(['((c:2.0,d:3.0)a:1.0,(e:2.0,f:1.0)b:2.0);'])
unpack(tree.find('b'))
exp = '((c:2.0,d:3.0)a:1.0,e:4.0,f:3.0);'
self.assertEqual(str(tree).rstrip(), exp)
# test attempting to unpack root
tree = TreeNode.read(['((d,e)b,(f,g)c)a;'])
msg = 'Cannot unpack root.'
with self.assertRaisesRegex(ValueError, msg):
unpack(tree.find('a'))
def test_has_duplicates(self):
"""Test checking for duplicated taxa."""
# test tree without duplicates
tree = TreeNode.read(['((a,b),(c,d));'])
obs = has_duplicates(tree)
self.assertFalse(obs)
# test tree with duplicates
tree = TreeNode.read(['((a,a),(c,a));'])
obs = has_duplicates(tree)
self.assertTrue(obs)
tree = TreeNode.read(['((1,(2,x)),4,(5,(6,x,8)));'])
obs = has_duplicates(tree)
self.assertTrue(obs)
# test tree with empty taxon names (not a common scenario but can
# happen)
tree = TreeNode.read(['((1,(2,,)),4,(5,(6,,8)));'])
msg = 'Empty taxon name\(s\) found.'
with self.assertRaisesRegex(ValueError, msg):
has_duplicates(tree)
def test_compare_topology(self):
"""Test comparing topologies of two trees."""
# test identical Newick strings
tree1 = TreeNode.read(['(a,b)c;'])
tree2 = TreeNode.read(['(a,b)c;'])
obs = compare_topology(tree1, tree2)
self.assertTrue(obs)
# test identical topologies with different branch lengths
tree1 = TreeNode.read(['(a:1,b:2)c:3;'])
tree2 = TreeNode.read(['(a:3,b:2)c:1;'])
obs = compare_topology(tree1, tree2)
self.assertTrue(obs)
# test identical topologies with flipped child nodes
tree1 = TreeNode.read(['(a,b)c;'])
tree2 = TreeNode.read(['(b,a)c;'])
obs = compare_topology(tree1, tree2)
self.assertTrue(obs)
tree1 = TreeNode.read(['((4,5)2,(6,7,8)3)1;'])
tree2 = TreeNode.read(['((8,7,6)3,(5,4)2)1;'])
obs = compare_topology(tree1, tree2)
self.assertTrue(obs)
tree1 = TreeNode.read(['(((9,10)4,(11,12,13)5)2,((14)6,(15,16,17,18)7,'
'(19,20)8)3)1;'])
tree2 = TreeNode.read(['(((15,16,17,18)7,(14)6,(20,19)8)3,((12,13,11)5'
',(10,9)4)2)1;'])
obs = compare_topology(tree1, tree2)
self.assertTrue(obs)
# test different topologies
tree1 = TreeNode.read(['(a,b)c;'])
tree2 = TreeNode.read(['(a,c)b;'])
obs = compare_topology(tree1, tree2)
self.assertFalse(obs)
tree1 = TreeNode.read(['((4,5)2,(6,7,8)3)1;'])
tree2 = TreeNode.read(['((4,5)3,(6,7,8)2)1;'])
obs = compare_topology(tree1, tree2)
self.assertFalse(obs)
tree1 = TreeNode.read(['((4,5)2,(6,7,8)3)1;'])
tree2 = TreeNode.read(['(((4,1)8)7,(6,3)2)5;'])
obs = compare_topology(tree1, tree2)
self.assertFalse(obs)
def test_intersect_trees(self):
"""Test intersecting two trees."""
# test trees with identical taxa
tree1 = TreeNode.read(['((a,b),(c,d));'])
tree2 = TreeNode.read(['(a,(b,c,d));'])
obs = intersect_trees(tree1, tree2)
exp = (tree1, tree2)
for i in range(2):
self.assertEqual(obs[i].compare_subsets(exp[i]), 0.0)
# test trees with partially different taxa
tree1 = TreeNode.read(['((a,b),(c,d));'])
tree2 = TreeNode.read(['((a,b),(c,e));'])
obs = intersect_trees(tree1, tree2)
tree1_lap = TreeNode.read(['((a,b),c);'])
tree2_lap = TreeNode.read(['((a,b),e);'])
exp = (tree1_lap, tree2_lap)
for i in range(2):
self.assertEqual(obs[i].compare_subsets(exp[i]), 0.0)
tree1 = TreeNode.read(['(((a,b),(c,d)),((e,f,g),h));'])
tree2 = TreeNode.read(['(a,((b,x),(d,y,(f,g,h))));'])
obs = intersect_trees(tree1, tree2)
tree1_lap = TreeNode.read(['(((a,b),d),((f,g),h));'])
tree2_lap = TreeNode.read(['(a,(b,(d,(f,g,h))));'])
exp = (tree1_lap, tree2_lap)
for i in range(2):
self.assertEqual(obs[i].compare_subsets(exp[i]), 0.0)
# test trees with completely different taxa
tree1 = TreeNode.read(['((a,b),(c,d));'])
tree2 = TreeNode.read(['((e,f),(g,h));'])
msg = 'Trees have no overlapping taxa.'
with self.assertRaisesRegex(ValueError, msg):
intersect_trees(tree1, tree2)
# test trees with duplicated taxa
tree1 = TreeNode.read(['((a,b),(c,d));'])
tree2 = TreeNode.read(['((a,a),(b,c));'])
msg = 'Either tree has duplicated taxa.'
with self.assertRaisesRegex(ValueError, msg):
intersect_trees(tree1, tree2)
def test_unpack_by_func(self):
"""Test unpacking nodes by function."""
# unpack internal nodes with branch length <= 1.0
def func(x):
return x.length <= 1.0
# will unpack node 'a', but not tip 'e'
# will add the branch length of 'a' to its child nodes 'c' and 'd'
tree = TreeNode.read(['((c:2,d:3)a:1,(e:1,f:2)b:2);'])
obs = str(unpack_by_func(tree, func)).rstrip()
exp = '((e:1.0,f:2.0)b:2.0,c:3.0,d:4.0);'
self.assertEqual(obs, exp)
# unpack internal nodes with branch length < 2.01
# will unpack both 'a' and 'b'
obs = str(unpack_by_func(tree, lambda x: x.length <= 2.0)).rstrip()
exp = '(c:3.0,d:4.0,e:3.0,f:4.0);'
self.assertEqual(obs, exp)
# unpack two nested nodes 'a' and 'c' simultaneously
tree = TreeNode.read(['(((e:3,f:2)c:1,d:3)a:1,b:4);'])
obs = str(unpack_by_func(tree, lambda x: x.length <= 2.0)).rstrip()
exp = '(b:4.0,d:4.0,e:5.0,f:4.0);'
self.assertEqual(obs, exp)
# test a complicated scenario (unpacking nodes 'g', 'h' and 'm')
def func(x):
return x.length < 2.0
tree = TreeNode.read(['(((a:1.04,b:2.32,c:1.44)d:3.20,'
'(e:3.91,f:2.47)g:1.21)h:1.75,'
'(i:4.14,(j:2.06,k:1.58)l:3.32)m:0.77);'])
obs = str(unpack_by_func(tree, func)).rstrip()
exp = ('((a:1.04,b:2.32,c:1.44)d:4.95,e:6.87,f:5.43,i:4.91,'
'(j:2.06,k:1.58)l:4.09);')
self.assertEqual(obs, exp)
# unpack nodes with support < 75
def func(x):
return support(x) < 75
tree = TreeNode.read(['(((a,b)85,(c,d)78)75,(e,(f,g)64)80);'])
obs = str(unpack_by_func(tree, func)).rstrip()
exp = '(((a,b)85,(c,d)78)75,(e,f,g)80);'
self.assertEqual(obs, exp)
# unpack nodes with support < 85
obs = str(unpack_by_func(tree, lambda x: support(x) < 85)).rstrip()
exp = '((a,b)85,c,d,e,f,g);'
self.assertEqual(obs, exp)
# unpack nodes with support < 0.95
tree = TreeNode.read(['(((a,b)0.97,(c,d)0.98)1.0,(e,(f,g)0.88)0.96);'])
obs = str(unpack_by_func(tree, lambda x: support(x) < 0.95)).rstrip()
exp = '(((a,b)0.97,(c,d)0.98)1.0,(e,f,g)0.96);'
self.assertEqual(obs, exp)
# test a case where there are branch lengths, none support values and
# node labels
def func(x):
sup = support(x)
return sup is not None and sup < 75
tree = TreeNode.read(['(((a:1.02,b:0.33)85:0.12,(c:0.86,d:2.23)'
'70:3.02)75:0.95,(e:1.43,(f:1.69,g:1.92)64:0.20)'
'node:0.35)root;'])
obs = str(unpack_by_func(tree, func)).rstrip()
exp = ('(((a:1.02,b:0.33)85:0.12,c:3.88,d:5.25)75:0.95,'
'(e:1.43,f:1.89,g:2.12)node:0.35)root;')
self.assertEqual(obs, exp)
def test_read_taxdump(self):
"""Test reading NCBI taxdump."""
obs = read_taxdump(self.nodes_fp)
exp = {
'1': {'parent': '1', 'rank': 'order',
'children': set(['2', '3'])},
'2': {'parent': '1', 'rank': 'family',
'children': set(['4', '5'])},
'3': {'parent': '1', 'rank': 'family',
'children': set(['6', '7', '8'])},
'4': {'parent': '2', 'rank': 'genus',
'children': set(['9', '10'])},
'5': {'parent': '2', 'rank': 'genus',
'children': set(['11', '12', '13'])},
'6': {'parent': '3', 'rank': 'genus',
'children': set(['14'])},
'7': {'parent': '3', 'rank': 'genus',
'children': set(['15', '16', '17', '18'])},
'8': {'parent': '3', 'rank': 'genus',
'children': set(['19', '20'])},
'9': {'parent': '4', 'rank': 'species', 'children': set()},
'10': {'parent': '4', 'rank': 'species', 'children': set()},
'11': {'parent': '5', 'rank': 'species', 'children': set()},
'12': {'parent': '5', 'rank': 'species', 'children': set()},
'13': {'parent': '5', 'rank': 'species', 'children': set()},
'14': {'parent': '6', 'rank': 'species', 'children': set()},
'15': {'parent': '7', 'rank': 'species', 'children': set()},
'16': {'parent': '7', 'rank': 'species', 'children': set()},
'17': {'parent': '7', 'rank': 'species', 'children': set()},
'18': {'parent': '7', 'rank': 'species', 'children': set()},
'19': {'parent': '8', 'rank': 'species', 'children': set()},
'20': {'parent': '8', 'rank': 'species', 'children': set()}
}
for tid in exp:
exp[tid]['name'] = ''
self.assertDictEqual(obs, exp)
obs = read_taxdump(self.nodes_fp, self.names_fp)
name_dict = {
'1': 'root', '2': 'Eukaryota', '3': 'Bacteria', '4': 'Plantae',
'5': 'Animalia', '6': 'Bacteroidetes', '7': 'Proteobacteria',
'8': 'Firmicutes', '9': 'Gymnosperms', '10': 'Angiosperms',
'11': 'Chordata', '12': 'Arthropoda', '13': 'Mollusca',
'14': 'Prevotella', '15': 'Escherichia', '16': 'Vibrio',
'17': 'Rhizobium', '18': 'Helicobacter', '19': 'Bacillus',
'20': 'Clostridia'
}
for tid in name_dict:
exp[tid]['name'] = name_dict[tid]
self.assertDictEqual(obs, exp)
def test_build_taxdump_tree(self):
"""Test building NCBI taxdump tree."""
taxdump = read_taxdump(self.nodes_fp)
obs = build_taxdump_tree(taxdump)
exp = TreeNode.read(['(((9,10)4,(11,12,13)5)2,((14)6,(15,16,17,18)7,'
'(19,20)8)3)1;'])
self.assertTrue(compare_topology(obs, exp))
def test_order_nodes(self):
"""Test order nodes"""
tree1 = TreeNode.read(['(((a,b),(c,d,i)j),((e,g),h));'])
# test increase ordering
tree1_increase = order_nodes(tree1, True)
self.assertTrue(is_ordered(tree1_increase))
# test decrease ordering
tree1_decrease = order_nodes(tree1, False)
self.assertTrue(is_ordered(tree1_decrease, False))
def test_is_ordered(self):
"""Test if a tree is ordered"""
# test tree in increasing order
tree1 = TreeNode.read(['((i,j)a,b)c;'])
self.assertTrue(is_ordered(tree1))
self.assertTrue(is_ordered(tree1, True))
self.assertFalse(is_ordered(tree1, False))
# test tree in both increasing and decreasing order
tree2 = TreeNode.read(['(a, b);'])
self.assertTrue(is_ordered(tree2))
self.assertTrue(is_ordered(tree2, False))
# test an unordered tree
tree3 = TreeNode.read(['(((a,b),(c,d,x,y,z)),((e,g),h));'])
self.assertFalse(is_ordered(tree3, True))
self.assertFalse(is_ordered(tree3, False))
# test tree in decreasing order
tree5 = TreeNode.read(['((h,(e,g)),((a,b),(c,d,i)j));'])
self.assertTrue(is_ordered(tree5, False))
def test_lca2(self):
newick = '((((a,b)n6,c)n4,(d,e)n5)n2,(f,(g,h)n7)n3,i)n1;'
tree = TreeNode.read([newick])
msg = "'TreeNode' object has no attribute 'taxa'"
with self.assertRaisesRegex(AttributeError, msg):
lca2(tree, set('ab'))
assign_taxa(tree)
self.assertEqual(lca2(tree, set('a')).name, 'a')
self.assertEqual(lca2(tree, set('ab')).name, 'n6')
self.assertEqual(lca2(tree, set('ac')).name, 'n4')
self.assertEqual(lca2(tree, set('ace')).name, 'n2')
self.assertEqual(lca2(tree, set('bgi')).name, 'n1')
def test_cladistic(self):
tree1 = TreeNode.read(['((i,j)a,b)c;'])
self.assertEqual('uni', cladistic(tree1, ['i']))
self.assertEqual('mono', cladistic(tree1, ['i', 'j']))
self.assertEqual('poly', cladistic(tree1, ['i', 'b']))
msg = 'Node x is not in self'
with self.assertRaisesRegex(MissingNodeError, msg):
cladistic(tree1, ['x', 'b'])
tree2 = TreeNode.read(['(((a,b),(c,d,x)),((e,g),h));'])
self.assertEqual('uni', cladistic(tree2, ['a']))
self.assertEqual('mono', cladistic(tree2, ['a', 'b', 'c', 'd', | |
<filename>mmcif/io/BinaryCifReader.py<gh_stars>1-10
##
# File: BinaryCifReader.py
# Date: 15-May-2021 jdw
#
# Reader methods and decoders for binary CIF.
#
# Updates:
##
from collections import OrderedDict
import gzip
import io
import logging
import struct
from contextlib import closing
import msgpack
import requests
from mmcif.api.DataCategory import DataCategory
from mmcif.api.PdbxContainers import DataContainer
try:
from urllib.parse import urlsplit
except Exception:
from urlparse import urlsplit
logger = logging.getLogger(__name__)
class BinaryCifReader(object):
"""Reader methods for the binary CIF format."""
def __init__(self, storeStringsAsBytes=False, defaultStringEncoding="utf-8"):
"""Create an instance of the binary CIF reader class.
Args:
storeStringsAsBytes (bool, optional): strings are stored as lists of bytes. Defaults to False.
defaultStringEncoding (str, optional): default encoding for string data. Defaults to "utf-8".
"""
self.__storeStringsAsBytes = storeStringsAsBytes
self.__defaultStringEncoding = defaultStringEncoding
def deserialize(self, locator):
"""Deserialize the input binary CIF file stored in the file/URL locator path.
Args:
locator (str): input file path or URL
Returns:
list: list DataContainer objects
"""
cL = []
try:
if self.__isLocal(locator):
with gzip.open(locator, mode="rb") if locator[-3:] == ".gz" else open(locator, "rb") as fh:
cL = self.__deserialize(fh, storeStringsAsBytes=self.__storeStringsAsBytes)
else:
if locator.endswith(".gz"):
customHeader = {"Accept-Encoding": "gzip"}
with closing(requests.get(locator, headers=customHeader)) as fh:
ufh = gzip.GzipFile(fileobj=io.BytesIO(fh.content))
cL = self.__deserialize(ufh, storeStringsAsBytes=self.__storeStringsAsBytes)
else:
with closing(requests.get(locator)) as fh:
cL = self.__deserialize(io.BytesIO(fh.content), storeStringsAsBytes=self.__storeStringsAsBytes)
except Exception as e:
logger.exception("Failing with %s", str(e))
return cL
def __deserialize(self, fh, storeStringsAsBytes=False):
cL = []
try:
dec = BinaryCifDecoders(storeStringsAsBytes=storeStringsAsBytes)
bD = msgpack.unpack(fh)
#
logger.debug("bD.keys() %r", bD.keys())
logger.debug("bD['dataBlocks'] %s", bD[self.__toBytes("dataBlocks")])
#
for dataBlock in bD[self.__toBytes("dataBlocks")]:
header = self.__fromBytes(dataBlock[self.__toBytes("header")]) if self.__toBytes("header") in dataBlock else None
logger.debug("header %r", header)
logger.debug("dataBlock %r", dataBlock)
#
dc = DataContainer(header)
categoryList = dataBlock[self.__toBytes("categories")] if self.__toBytes("categories") in dataBlock else []
for category in categoryList:
catName = self.__fromBytes(category[self.__toBytes("name")])[1:]
colList = category[self.__toBytes("columns")]
logger.debug("catName %r columns %r", catName, colList)
colD = OrderedDict()
atNameList = []
for col in colList:
logger.debug("col.keys() %r", col.keys())
atName = self.__fromBytes(col[self.__toBytes("name")])
atData = col[self.__toBytes("data")]
logger.debug("atData encoding (%d) data (%d)", len(atData[self.__toBytes("encoding")]), len(atData[self.__toBytes("data")]))
atMask = col[self.__toBytes("mask")]
logger.debug("catName %r atName %r", catName, atName)
logger.debug(" >atData.data %r", atData[self.__toBytes("data")])
logger.debug(" >atData.encoding (%d) %r", len(atData[self.__toBytes("encoding")]), atData[self.__toBytes("encoding")])
logger.debug(" >mask %r", atMask)
tVal = dec.decode(col[self.__toBytes("data")][self.__toBytes("data")], col[self.__toBytes("data")][self.__toBytes("encoding")])
if col[self.__toBytes("mask")]:
mVal = dec.decode(col[self.__toBytes("mask")][self.__toBytes("data")], col[self.__toBytes("mask")][self.__toBytes("encoding")])
tVal = ["?" if m == 2 else "." if m == 1 else d for d, m in zip(tVal, mVal)]
colD[atName] = tVal
atNameList.append(atName)
#
cObj = DataCategory(catName, attributeNameList=atNameList)
genL = [colGen for colGen in colD.values()]
for row in zip(*genL):
logger.debug("row %r", row)
cObj.append(row)
#
dc.append(cObj)
cL.append(dc)
except Exception as e:
logger.exception("Failing with %s", str(e))
return cL
def __isLocal(self, locator):
"""Returns true if input string can be interpreted as a local file path.
Args:
locator (str): url or path string
Returns:
bool: True if locator is a local path
"""
try:
locSp = urlsplit(locator)
return locSp.scheme in ["", "file"]
except Exception as e:
logger.exception("For locator %r failing with %s", locator, str(e))
return None
def __toBytes(self, strVal):
"""Optional conversion of the input string to bytes according to the class setting (storeStringsAsBytes).
Args:
strVal (string): input string
Returns:
string or bytes: optionally converted string.
"""
try:
return strVal.encode(self.__defaultStringEncoding) if self.__storeStringsAsBytes else strVal
except (UnicodeDecodeError, AttributeError):
logger.exception("Bad type for %r", strVal)
return strVal
def __fromBytes(self, byteVal):
"""Optional conversion of the input value according to the class setting (storeStringsAsBytes).
Args:
byteVal (string): input byte object
Returns:
string: optionally converted input value
"""
try:
return byteVal.decode(self.__defaultStringEncoding) if self.__storeStringsAsBytes else byteVal
except (UnicodeDecodeError, AttributeError):
logger.exception("Bad type for %r", byteVal)
return byteVal
class BinaryCifDecoders(object):
"""Column oriented Binary CIF decoders implementing
StringArray, ByteArray, IntegerPacking, Delta, RunLength,
FixedPoint, and IntervalQuantization from the BinaryCIF
specification described in:
<NAME>, <NAME>, <NAME>, <NAME>, <NAME>, <NAME>, <NAME>.
BinaryCIF and CIFTools-Lightweight, efficient and extensible macromolecular data management.
PLoS Comput Biol. 2020 Oct 19;16(10):e1008247.
doi: 10.1371/journal.pcbi.1008247. PMID: 33075050; PMCID: PMC7595629.
and in the specification at https://github.com/molstar/BinaryCIF/blob/master/encoding.md
and from the I/HM Python implementation at https://github.com/ihmwg/python-ihm[summary]
"""
bCifCodeTypeD = {1: "integer_8", 2: "integer_16", 3: "integer_32", 4: "unsigned_integer_8", 5: "unsigned_integer_16", 6: "unsigned_integer_32", 32: "float_32", 33: "float_64"}
"""Binary CIF protocol internal data type codes to integer and float types
"""
bCifTypeD = {
"integer_8": {"struct_format_code": "b", "min": -0x7F - 1, "max": 0x7F},
"integer_16": {"struct_format_code": "h", "min": -0x7FFF - 1, "max": 0x7FFF},
"integer_32": {"struct_format_code": "i", "min": -0x7FFFFFFF - 1, "max": 0x7FFFFFFF},
"unsigned_integer_8": {"struct_format_code": "B", "min": 0, "max": 0xFF},
"unsigned_integer_16": {"struct_format_code": "H", "min": 0, "max": 0xFFFF},
"unsigned_integer_32": {"struct_format_code": "I", "min": 0, "max": 0xFFFFFFFF},
"float_32": {"struct_format_code": "f", "min": 1.175494351e-38, "max": 3.402823466e38},
"float_64": {"struct_format_code": "d", "min": 2.2250738585072014e-308, "max": 1.7976931348623158e308},
}
"""Binary CIF data type feature dictionary
"""
def __init__(self, storeStringsAsBytes=False, defaultStringEncoding="utf-8", verbose=False):
"""Create an instance of the binary CIF encoder class.
Args:
storeStringsAsBytes (bool, optional): express keys and strings as byte types otherwise follow the default encoding. Defaults to False.
defaultStringEncoding (str, optional): default encoding for string types. Defaults to "utf-8".
verbose(bool, optional): provide tracking of type conversion issues. Defaults to False.
"""
self.__storeStringsAsBytes = storeStringsAsBytes
self.__defaultStringEncoding = defaultStringEncoding
self.__verbose = verbose
#
self.__encodersMethodD = {
"StringArray": self.stringArrayDecoder,
"ByteArray": self.byteArrayDecoder,
"IntegerPacking": self.integerPackingDecoder,
"Delta": self.deltaDecoder,
"RunLength": self.runLengthDecoder,
"FixedPoint": self.fixedPointDecoder,
"IntervalQuantization": self.intervalQuantizationDecoder,
}
def decode(self, colDataList, encodingDictList):
"""Return the decoded input data column using the input list of encodings
Args:
colDataList (list): column of data to be decoded
encodingDictList (list): list of dictionary holding binary CIF encoding details
elements described in the specification at
https://github.com/molstar/BinaryCIF/blob/master/encoding.md
Yields:
list: decoded list of column data
"""
for encoding in reversed(encodingDictList):
encType = self.__fromBytes(encoding[self.__toBytes("kind")])
colDataList = self.__encodersMethodD[encType](colDataList, encoding)
return colDataList
def stringArrayDecoder(self, colDataList, encodingDict):
"""Decode an array of strings stored as a concatenation of all unique
strings, a list of offsets to construct the unique substrings, and indices into
the offset array.
Args:
colDataList (list): column of data to be decoded
encodingDict (dict): dictionary of binary CIF encoding details
elements described in the specification at
https://github.com/molstar/BinaryCIF/blob/master/encoding.md
Yields:
list: decoded list of string data
"""
offsetList = list(self.decode(encodingDict[self.__toBytes("offsets")], encodingDict[self.__toBytes("offsetEncoding")]))
lookupIndexIt = self.decode(colDataList, encodingDict[self.__toBytes("dataEncoding")])
stringData = self.__fromBytes(encodingDict[self.__toBytes("stringData")])
uniqueStringList = []
for iBegin, iEnd in zip(offsetList, offsetList[1:]):
uniqueStringList.append(stringData[iBegin:iEnd])
logger.debug("iBegin %d iEnd %d %r ", iBegin, iEnd, stringData[iBegin:iEnd])
for ii in lookupIndexIt:
yield uniqueStringList[ii] if ii >= 0 else None
def byteArrayDecoder(self, colDataList, encodingDict):
"""Decode input byte list into a list of integers/floats
Args:
colDataList (list): column of data to be decoded
encodingDict (dict): dictionary of binary CIF encoding details
elements described in the specification at
https://github.com/molstar/BinaryCIF/blob/master/encoding.md
Yields:
list: decoded list of integer/float data
"""
structKey = self.bCifCodeTypeD[encodingDict[self.__toBytes("type")]]
structFormatCode = self.bCifTypeD[structKey]["struct_format_code"]
count = len(colDataList) // struct.calcsize(structFormatCode)
# struct.unpack() format string for little-endian = < format_string code * counts
return struct.unpack("<" + structFormatCode * count, colDataList)
def __unsignedDecode(self, colDataList, encodingDict):
upperLimit = self.bCifTypeD["unsigned_integer_8"]["max"] if encodingDict[self.__toBytes("byteCount")] == 1 else self.bCifTypeD["unsigned_integer_16"]["max"]
ii = 0
while ii < len(colDataList):
value = 0
tVal = colDataList[ii]
while tVal == upperLimit:
value += tVal
ii += 1
tVal = colDataList[ii]
yield value + tVal
ii += 1
def __signedDecode(self, colDataList, encodingDict):
upperLimit = self.bCifTypeD["integer_8"]["max"] if encodingDict[self.__toBytes("byteCount")] == 1 else self.bCifTypeD["integer_16"]["max"]
lowerLimit = self.bCifTypeD["integer_8"]["min"] if encodingDict[self.__toBytes("byteCount")] == 1 else self.bCifTypeD["integer_16"]["min"]
ii = 0
while ii < len(colDataList):
value = 0
tVal = colDataList[ii]
while tVal == upperLimit or tVal == lowerLimit:
value += tVal
ii += 1
tVal = colDataList[ii]
yield value + tVal
ii += 1
def integerPackingDecoder(self, colDataList, encodingDict):
"""Decode a (32-bit) integer list packed into 8- or 16-bit values.
Args:
colDataList (list): column of data to be decoded
encodingDict (dict): dictionary of binary CIF encoding details
elements described in the specification at
https://github.com/molstar/BinaryCIF/blob/master/encoding.md
Yields:
list: decoded list of integer data
"""
if encodingDict[self.__toBytes("isUnsigned")]:
return self.__unsignedDecode(colDataList, encodingDict)
else:
return self.__signedDecode(colDataList, encodingDict)
def deltaDecoder(self, colDataList, encodingDict):
"""Decode an integer list stored as a list of consecutive differences.
Args:
colDataList (list): column of data to be decoded
encodingDict (dict): dictionary of binary CIF encoding details
elements described in the specification at
https://github.com/molstar/BinaryCIF/blob/master/encoding.md
Yields:
list: decoded list of integer data
"""
val = encodingDict[self.__toBytes("origin")]
for diff in colDataList:
val += diff
yield val
def runLengthDecoder(self, colDataList, encodingDict):
"""Decode an integer list stored as pairs of (value, number of repeats).
Args:
colDataList (list): column of data to be decoded
encodingDict (dict): dictionary of binary CIF encoding details
elements described in the specification at
https://github.com/molstar/BinaryCIF/blob/master/encoding.md
Yields:
list: decoded list | |
<filename>pythonocc/lib/OCC/GeomToStep.py
# This file was automatically generated by SWIG (http://www.swig.org).
# Version 2.0.10
#
# Do not make changes to this file unless you know what you are doing--modify
# the SWIG interface file instead.
from sys import version_info
if version_info >= (3,0,0):
new_instancemethod = lambda func, inst, cls: _GeomToStep.SWIG_PyInstanceMethod_New(func)
else:
from new import instancemethod as new_instancemethod
if version_info >= (2,6,0):
def swig_import_helper():
from os.path import dirname
import imp
fp = None
try:
fp, pathname, description = imp.find_module('_GeomToStep', [dirname(__file__)])
except ImportError:
import _GeomToStep
return _GeomToStep
if fp is not None:
try:
_mod = imp.load_module('_GeomToStep', fp, pathname, description)
finally:
fp.close()
return _mod
_GeomToStep = swig_import_helper()
del swig_import_helper
else:
import _GeomToStep
del version_info
try:
_swig_property = property
except NameError:
pass # Python < 2.2 doesn't have 'property'.
def _swig_setattr_nondynamic(self,class_type,name,value,static=1):
if (name == "thisown"): return self.this.own(value)
if (name == "this"):
if type(value).__name__ == 'SwigPyObject':
self.__dict__[name] = value
return
method = class_type.__swig_setmethods__.get(name,None)
if method: return method(self,value)
if (not static):
self.__dict__[name] = value
else:
raise AttributeError("You cannot add attributes to %s" % self)
def _swig_setattr(self,class_type,name,value):
return _swig_setattr_nondynamic(self,class_type,name,value,0)
def _swig_getattr(self,class_type,name):
if (name == "thisown"): return self.this.own()
method = class_type.__swig_getmethods__.get(name,None)
if method: return method(self)
raise AttributeError(name)
def _swig_repr(self):
try: strthis = "proxy of " + self.this.__repr__()
except: strthis = ""
return "<%s.%s; %s >" % (self.__class__.__module__, self.__class__.__name__, strthis,)
try:
_object = object
_newclass = 1
except AttributeError:
class _object : pass
_newclass = 0
def _swig_setattr_nondynamic_method(set):
def set_attr(self,name,value):
if (name == "thisown"): return self.this.own(value)
if hasattr(self,name) or (name == "this"):
set(self,name,value)
else:
raise AttributeError("You cannot add attributes to %s" % self)
return set_attr
class SwigPyIterator(object):
thisown = _swig_property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc='The membership flag')
def __init__(self, *args, **kwargs): raise AttributeError("No constructor defined - class is abstract")
__repr__ = _swig_repr
__swig_destroy__ = _GeomToStep.delete_SwigPyIterator
def __iter__(self): return self
SwigPyIterator.value = new_instancemethod(_GeomToStep.SwigPyIterator_value,None,SwigPyIterator)
SwigPyIterator.incr = new_instancemethod(_GeomToStep.SwigPyIterator_incr,None,SwigPyIterator)
SwigPyIterator.decr = new_instancemethod(_GeomToStep.SwigPyIterator_decr,None,SwigPyIterator)
SwigPyIterator.distance = new_instancemethod(_GeomToStep.SwigPyIterator_distance,None,SwigPyIterator)
SwigPyIterator.equal = new_instancemethod(_GeomToStep.SwigPyIterator_equal,None,SwigPyIterator)
SwigPyIterator.copy = new_instancemethod(_GeomToStep.SwigPyIterator_copy,None,SwigPyIterator)
SwigPyIterator.next = new_instancemethod(_GeomToStep.SwigPyIterator_next,None,SwigPyIterator)
SwigPyIterator.__next__ = new_instancemethod(_GeomToStep.SwigPyIterator___next__,None,SwigPyIterator)
SwigPyIterator.previous = new_instancemethod(_GeomToStep.SwigPyIterator_previous,None,SwigPyIterator)
SwigPyIterator.advance = new_instancemethod(_GeomToStep.SwigPyIterator_advance,None,SwigPyIterator)
SwigPyIterator.__eq__ = new_instancemethod(_GeomToStep.SwigPyIterator___eq__,None,SwigPyIterator)
SwigPyIterator.__ne__ = new_instancemethod(_GeomToStep.SwigPyIterator___ne__,None,SwigPyIterator)
SwigPyIterator.__iadd__ = new_instancemethod(_GeomToStep.SwigPyIterator___iadd__,None,SwigPyIterator)
SwigPyIterator.__isub__ = new_instancemethod(_GeomToStep.SwigPyIterator___isub__,None,SwigPyIterator)
SwigPyIterator.__add__ = new_instancemethod(_GeomToStep.SwigPyIterator___add__,None,SwigPyIterator)
SwigPyIterator.__sub__ = new_instancemethod(_GeomToStep.SwigPyIterator___sub__,None,SwigPyIterator)
SwigPyIterator_swigregister = _GeomToStep.SwigPyIterator_swigregister
SwigPyIterator_swigregister(SwigPyIterator)
import OCC.Standard
import OCC.gp
import OCC.Geom
import OCC.MMgt
import OCC.TCollection
import OCC.GeomAbs
import OCC.TColgp
import OCC.TColStd
import OCC.Geom2d
import OCC.StepGeom
import OCC.StepRepr
import OCC.StepBasic
import OCC.Interface
import OCC.Message
def register_handle(handle, base_object):
"""
Inserts the handle into the base object to
prevent memory corruption in certain cases
"""
try:
if base_object.IsKind("Standard_Transient"):
base_object.thisHandle = handle
base_object.thisown = False
except:
pass
class GeomToStep_Root(object):
thisown = _swig_property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc='The membership flag')
__repr__ = _swig_repr
def IsDone(self, *args):
"""
:rtype: bool
"""
return _GeomToStep.GeomToStep_Root_IsDone(self, *args)
def __init__(self):
_GeomToStep.GeomToStep_Root_swiginit(self,_GeomToStep.new_GeomToStep_Root())
__swig_destroy__ = _GeomToStep.delete_GeomToStep_Root
GeomToStep_Root.IsDone = new_instancemethod(_GeomToStep.GeomToStep_Root_IsDone,None,GeomToStep_Root)
GeomToStep_Root_swigregister = _GeomToStep.GeomToStep_Root_swigregister
GeomToStep_Root_swigregister(GeomToStep_Root)
class GeomToStep_MakeAxis1Placement(GeomToStep_Root):
thisown = _swig_property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc='The membership flag')
__repr__ = _swig_repr
def __init__(self, *args):
"""
:param A:
:type A: gp_Ax1
:rtype: None
:param A:
:type A: gp_Ax2d
:rtype: None
:param A:
:type A: Handle_Geom_Axis1Placement &
:rtype: None
:param A:
:type A: Handle_Geom2d_AxisPlacement &
:rtype: None
"""
_GeomToStep.GeomToStep_MakeAxis1Placement_swiginit(self,_GeomToStep.new_GeomToStep_MakeAxis1Placement(*args))
def Value(self, *args):
"""
:rtype: Handle_StepGeom_Axis1Placement
"""
return _GeomToStep.GeomToStep_MakeAxis1Placement_Value(self, *args)
__swig_destroy__ = _GeomToStep.delete_GeomToStep_MakeAxis1Placement
GeomToStep_MakeAxis1Placement.Value = new_instancemethod(_GeomToStep.GeomToStep_MakeAxis1Placement_Value,None,GeomToStep_MakeAxis1Placement)
GeomToStep_MakeAxis1Placement_swigregister = _GeomToStep.GeomToStep_MakeAxis1Placement_swigregister
GeomToStep_MakeAxis1Placement_swigregister(GeomToStep_MakeAxis1Placement)
class GeomToStep_MakeAxis2Placement2d(GeomToStep_Root):
thisown = _swig_property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc='The membership flag')
__repr__ = _swig_repr
def __init__(self, *args):
"""
:param A:
:type A: gp_Ax2
:rtype: None
:param A:
:type A: gp_Ax22d
:rtype: None
"""
_GeomToStep.GeomToStep_MakeAxis2Placement2d_swiginit(self,_GeomToStep.new_GeomToStep_MakeAxis2Placement2d(*args))
def Value(self, *args):
"""
:rtype: Handle_StepGeom_Axis2Placement2d
"""
return _GeomToStep.GeomToStep_MakeAxis2Placement2d_Value(self, *args)
__swig_destroy__ = _GeomToStep.delete_GeomToStep_MakeAxis2Placement2d
GeomToStep_MakeAxis2Placement2d.Value = new_instancemethod(_GeomToStep.GeomToStep_MakeAxis2Placement2d_Value,None,GeomToStep_MakeAxis2Placement2d)
GeomToStep_MakeAxis2Placement2d_swigregister = _GeomToStep.GeomToStep_MakeAxis2Placement2d_swigregister
GeomToStep_MakeAxis2Placement2d_swigregister(GeomToStep_MakeAxis2Placement2d)
class GeomToStep_MakeAxis2Placement3d(GeomToStep_Root):
thisown = _swig_property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc='The membership flag')
__repr__ = _swig_repr
def __init__(self, *args):
"""
:rtype: None
:param A:
:type A: gp_Ax2
:rtype: None
:param A:
:type A: gp_Ax3
:rtype: None
:param T:
:type T: gp_Trsf
:rtype: None
:param A:
:type A: Handle_Geom_Axis2Placement &
:rtype: None
"""
_GeomToStep.GeomToStep_MakeAxis2Placement3d_swiginit(self,_GeomToStep.new_GeomToStep_MakeAxis2Placement3d(*args))
def Value(self, *args):
"""
:rtype: Handle_StepGeom_Axis2Placement3d
"""
return _GeomToStep.GeomToStep_MakeAxis2Placement3d_Value(self, *args)
__swig_destroy__ = _GeomToStep.delete_GeomToStep_MakeAxis2Placement3d
GeomToStep_MakeAxis2Placement3d.Value = new_instancemethod(_GeomToStep.GeomToStep_MakeAxis2Placement3d_Value,None,GeomToStep_MakeAxis2Placement3d)
GeomToStep_MakeAxis2Placement3d_swigregister = _GeomToStep.GeomToStep_MakeAxis2Placement3d_swigregister
GeomToStep_MakeAxis2Placement3d_swigregister(GeomToStep_MakeAxis2Placement3d)
class GeomToStep_MakeBSplineCurveWithKnots(GeomToStep_Root):
thisown = _swig_property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc='The membership flag')
__repr__ = _swig_repr
def __init__(self, *args):
"""
:param Bsplin:
:type Bsplin: Handle_Geom_BSplineCurve &
:rtype: None
:param Bsplin:
:type Bsplin: Handle_Geom2d_BSplineCurve &
:rtype: None
"""
_GeomToStep.GeomToStep_MakeBSplineCurveWithKnots_swiginit(self,_GeomToStep.new_GeomToStep_MakeBSplineCurveWithKnots(*args))
def Value(self, *args):
"""
:rtype: Handle_StepGeom_BSplineCurveWithKnots
"""
return _GeomToStep.GeomToStep_MakeBSplineCurveWithKnots_Value(self, *args)
__swig_destroy__ = _GeomToStep.delete_GeomToStep_MakeBSplineCurveWithKnots
GeomToStep_MakeBSplineCurveWithKnots.Value = new_instancemethod(_GeomToStep.GeomToStep_MakeBSplineCurveWithKnots_Value,None,GeomToStep_MakeBSplineCurveWithKnots)
GeomToStep_MakeBSplineCurveWithKnots_swigregister = _GeomToStep.GeomToStep_MakeBSplineCurveWithKnots_swigregister
GeomToStep_MakeBSplineCurveWithKnots_swigregister(GeomToStep_MakeBSplineCurveWithKnots)
class GeomToStep_MakeBSplineCurveWithKnotsAndRationalBSplineCurve(GeomToStep_Root):
thisown = _swig_property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc='The membership flag')
__repr__ = _swig_repr
def __init__(self, *args):
"""
:param Bsplin:
:type Bsplin: Handle_Geom_BSplineCurve &
:rtype: None
:param Bsplin:
:type Bsplin: Handle_Geom2d_BSplineCurve &
:rtype: None
"""
_GeomToStep.GeomToStep_MakeBSplineCurveWithKnotsAndRationalBSplineCurve_swiginit(self,_GeomToStep.new_GeomToStep_MakeBSplineCurveWithKnotsAndRationalBSplineCurve(*args))
def Value(self, *args):
"""
:rtype: Handle_StepGeom_BSplineCurveWithKnotsAndRationalBSplineCurve
"""
return _GeomToStep.GeomToStep_MakeBSplineCurveWithKnotsAndRationalBSplineCurve_Value(self, *args)
__swig_destroy__ = _GeomToStep.delete_GeomToStep_MakeBSplineCurveWithKnotsAndRationalBSplineCurve
GeomToStep_MakeBSplineCurveWithKnotsAndRationalBSplineCurve.Value = new_instancemethod(_GeomToStep.GeomToStep_MakeBSplineCurveWithKnotsAndRationalBSplineCurve_Value,None,GeomToStep_MakeBSplineCurveWithKnotsAndRationalBSplineCurve)
GeomToStep_MakeBSplineCurveWithKnotsAndRationalBSplineCurve_swigregister = _GeomToStep.GeomToStep_MakeBSplineCurveWithKnotsAndRationalBSplineCurve_swigregister
GeomToStep_MakeBSplineCurveWithKnotsAndRationalBSplineCurve_swigregister(GeomToStep_MakeBSplineCurveWithKnotsAndRationalBSplineCurve)
class GeomToStep_MakeBSplineSurfaceWithKnots(GeomToStep_Root):
thisown = _swig_property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc='The membership flag')
__repr__ = _swig_repr
def __init__(self, *args):
"""
:param Bsplin:
:type Bsplin: Handle_Geom_BSplineSurface &
:rtype: None
"""
_GeomToStep.GeomToStep_MakeBSplineSurfaceWithKnots_swiginit(self,_GeomToStep.new_GeomToStep_MakeBSplineSurfaceWithKnots(*args))
def Value(self, *args):
"""
:rtype: Handle_StepGeom_BSplineSurfaceWithKnots
"""
return _GeomToStep.GeomToStep_MakeBSplineSurfaceWithKnots_Value(self, *args)
__swig_destroy__ = _GeomToStep.delete_GeomToStep_MakeBSplineSurfaceWithKnots
GeomToStep_MakeBSplineSurfaceWithKnots.Value = new_instancemethod(_GeomToStep.GeomToStep_MakeBSplineSurfaceWithKnots_Value,None,GeomToStep_MakeBSplineSurfaceWithKnots)
GeomToStep_MakeBSplineSurfaceWithKnots_swigregister = _GeomToStep.GeomToStep_MakeBSplineSurfaceWithKnots_swigregister
GeomToStep_MakeBSplineSurfaceWithKnots_swigregister(GeomToStep_MakeBSplineSurfaceWithKnots)
class GeomToStep_MakeBSplineSurfaceWithKnotsAndRationalBSplineSurface(GeomToStep_Root):
thisown = _swig_property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc='The membership flag')
__repr__ = _swig_repr
def __init__(self, *args):
"""
:param Bsplin:
:type Bsplin: Handle_Geom_BSplineSurface &
:rtype: None
"""
_GeomToStep.GeomToStep_MakeBSplineSurfaceWithKnotsAndRationalBSplineSurface_swiginit(self,_GeomToStep.new_GeomToStep_MakeBSplineSurfaceWithKnotsAndRationalBSplineSurface(*args))
def Value(self, *args):
"""
:rtype: Handle_StepGeom_BSplineSurfaceWithKnotsAndRationalBSplineSurface
"""
return _GeomToStep.GeomToStep_MakeBSplineSurfaceWithKnotsAndRationalBSplineSurface_Value(self, *args)
__swig_destroy__ = _GeomToStep.delete_GeomToStep_MakeBSplineSurfaceWithKnotsAndRationalBSplineSurface
GeomToStep_MakeBSplineSurfaceWithKnotsAndRationalBSplineSurface.Value = new_instancemethod(_GeomToStep.GeomToStep_MakeBSplineSurfaceWithKnotsAndRationalBSplineSurface_Value,None,GeomToStep_MakeBSplineSurfaceWithKnotsAndRationalBSplineSurface)
GeomToStep_MakeBSplineSurfaceWithKnotsAndRationalBSplineSurface_swigregister = _GeomToStep.GeomToStep_MakeBSplineSurfaceWithKnotsAndRationalBSplineSurface_swigregister
GeomToStep_MakeBSplineSurfaceWithKnotsAndRationalBSplineSurface_swigregister(GeomToStep_MakeBSplineSurfaceWithKnotsAndRationalBSplineSurface)
class GeomToStep_MakeBoundedCurve(GeomToStep_Root):
thisown = _swig_property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc='The membership flag')
__repr__ = _swig_repr
def __init__(self, *args):
"""
:param C:
:type C: Handle_Geom_BoundedCurve &
:rtype: None
:param C:
:type C: Handle_Geom2d_BoundedCurve &
:rtype: None
"""
_GeomToStep.GeomToStep_MakeBoundedCurve_swiginit(self,_GeomToStep.new_GeomToStep_MakeBoundedCurve(*args))
def Value(self, *args):
"""
:rtype: Handle_StepGeom_BoundedCurve
"""
return _GeomToStep.GeomToStep_MakeBoundedCurve_Value(self, *args)
__swig_destroy__ = _GeomToStep.delete_GeomToStep_MakeBoundedCurve
GeomToStep_MakeBoundedCurve.Value = new_instancemethod(_GeomToStep.GeomToStep_MakeBoundedCurve_Value,None,GeomToStep_MakeBoundedCurve)
GeomToStep_MakeBoundedCurve_swigregister = _GeomToStep.GeomToStep_MakeBoundedCurve_swigregister
GeomToStep_MakeBoundedCurve_swigregister(GeomToStep_MakeBoundedCurve)
class GeomToStep_MakeBoundedSurface(GeomToStep_Root):
thisown = _swig_property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc='The membership flag')
__repr__ = _swig_repr
def __init__(self, *args):
"""
:param C:
:type C: Handle_Geom_BoundedSurface &
:rtype: None
"""
_GeomToStep.GeomToStep_MakeBoundedSurface_swiginit(self,_GeomToStep.new_GeomToStep_MakeBoundedSurface(*args))
def Value(self, *args):
"""
:rtype: Handle_StepGeom_BoundedSurface
"""
return _GeomToStep.GeomToStep_MakeBoundedSurface_Value(self, *args)
__swig_destroy__ = _GeomToStep.delete_GeomToStep_MakeBoundedSurface
GeomToStep_MakeBoundedSurface.Value = new_instancemethod(_GeomToStep.GeomToStep_MakeBoundedSurface_Value,None,GeomToStep_MakeBoundedSurface)
GeomToStep_MakeBoundedSurface_swigregister = _GeomToStep.GeomToStep_MakeBoundedSurface_swigregister
GeomToStep_MakeBoundedSurface_swigregister(GeomToStep_MakeBoundedSurface)
class GeomToStep_MakeCartesianPoint(GeomToStep_Root):
thisown = _swig_property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc='The membership flag')
__repr__ = _swig_repr
def __init__(self, *args):
"""
:param P:
:type P: gp_Pnt
:rtype: None
:param P:
:type P: gp_Pnt2d
:rtype: None
:param P:
:type P: Handle_Geom_CartesianPoint &
:rtype: None
:param P:
:type P: Handle_Geom2d_CartesianPoint &
:rtype: None
"""
_GeomToStep.GeomToStep_MakeCartesianPoint_swiginit(self,_GeomToStep.new_GeomToStep_MakeCartesianPoint(*args))
def Value(self, *args):
"""
:rtype: Handle_StepGeom_CartesianPoint
"""
return _GeomToStep.GeomToStep_MakeCartesianPoint_Value(self, *args)
__swig_destroy__ = _GeomToStep.delete_GeomToStep_MakeCartesianPoint
GeomToStep_MakeCartesianPoint.Value = new_instancemethod(_GeomToStep.GeomToStep_MakeCartesianPoint_Value,None,GeomToStep_MakeCartesianPoint)
GeomToStep_MakeCartesianPoint_swigregister = _GeomToStep.GeomToStep_MakeCartesianPoint_swigregister
GeomToStep_MakeCartesianPoint_swigregister(GeomToStep_MakeCartesianPoint)
class GeomToStep_MakeCircle(GeomToStep_Root):
thisown = _swig_property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc='The membership flag')
__repr__ = _swig_repr
def __init__(self, *args):
"""
:param C:
:type C: gp_Circ
:rtype: None
:param C:
:type C: Handle_Geom_Circle &
:rtype: None
:param C:
:type C: Handle_Geom2d_Circle &
:rtype: None
"""
_GeomToStep.GeomToStep_MakeCircle_swiginit(self,_GeomToStep.new_GeomToStep_MakeCircle(*args))
def Value(self, *args):
"""
:rtype: Handle_StepGeom_Circle
"""
return _GeomToStep.GeomToStep_MakeCircle_Value(self, *args)
__swig_destroy__ = _GeomToStep.delete_GeomToStep_MakeCircle
GeomToStep_MakeCircle.Value = new_instancemethod(_GeomToStep.GeomToStep_MakeCircle_Value,None,GeomToStep_MakeCircle)
GeomToStep_MakeCircle_swigregister = _GeomToStep.GeomToStep_MakeCircle_swigregister
GeomToStep_MakeCircle_swigregister(GeomToStep_MakeCircle)
class GeomToStep_MakeConic(GeomToStep_Root):
thisown = _swig_property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc='The membership flag')
__repr__ = _swig_repr
def __init__(self, *args):
"""
:param C:
:type C: Handle_Geom_Conic &
:rtype: None
:param C:
:type C: Handle_Geom2d_Conic &
:rtype: None
"""
_GeomToStep.GeomToStep_MakeConic_swiginit(self,_GeomToStep.new_GeomToStep_MakeConic(*args))
def Value(self, *args):
"""
:rtype: Handle_StepGeom_Conic
"""
return _GeomToStep.GeomToStep_MakeConic_Value(self, *args)
__swig_destroy__ = _GeomToStep.delete_GeomToStep_MakeConic
GeomToStep_MakeConic.Value = new_instancemethod(_GeomToStep.GeomToStep_MakeConic_Value,None,GeomToStep_MakeConic)
GeomToStep_MakeConic_swigregister = _GeomToStep.GeomToStep_MakeConic_swigregister
GeomToStep_MakeConic_swigregister(GeomToStep_MakeConic)
class GeomToStep_MakeConicalSurface(GeomToStep_Root):
thisown = _swig_property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc='The membership flag')
__repr__ = _swig_repr
def __init__(self, *args):
"""
:param CSurf:
:type CSurf: Handle_Geom_ConicalSurface &
:rtype: None
"""
_GeomToStep.GeomToStep_MakeConicalSurface_swiginit(self,_GeomToStep.new_GeomToStep_MakeConicalSurface(*args))
def Value(self, *args):
"""
:rtype: Handle_StepGeom_ConicalSurface
"""
return _GeomToStep.GeomToStep_MakeConicalSurface_Value(self, *args)
__swig_destroy__ = _GeomToStep.delete_GeomToStep_MakeConicalSurface
GeomToStep_MakeConicalSurface.Value = new_instancemethod(_GeomToStep.GeomToStep_MakeConicalSurface_Value,None,GeomToStep_MakeConicalSurface)
GeomToStep_MakeConicalSurface_swigregister = _GeomToStep.GeomToStep_MakeConicalSurface_swigregister
GeomToStep_MakeConicalSurface_swigregister(GeomToStep_MakeConicalSurface)
class GeomToStep_MakeCurve(GeomToStep_Root):
thisown = _swig_property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc='The membership flag')
__repr__ = _swig_repr
def __init__(self, *args):
"""
:param C:
:type C: Handle_Geom_Curve &
:rtype: None
:param C:
:type C: Handle_Geom2d_Curve &
:rtype: None
"""
_GeomToStep.GeomToStep_MakeCurve_swiginit(self,_GeomToStep.new_GeomToStep_MakeCurve(*args))
def Value(self, *args):
"""
:rtype: Handle_StepGeom_Curve
"""
return _GeomToStep.GeomToStep_MakeCurve_Value(self, *args)
__swig_destroy__ = _GeomToStep.delete_GeomToStep_MakeCurve
GeomToStep_MakeCurve.Value = new_instancemethod(_GeomToStep.GeomToStep_MakeCurve_Value,None,GeomToStep_MakeCurve)
GeomToStep_MakeCurve_swigregister = _GeomToStep.GeomToStep_MakeCurve_swigregister
GeomToStep_MakeCurve_swigregister(GeomToStep_MakeCurve)
class GeomToStep_MakeCylindricalSurface(GeomToStep_Root):
thisown = _swig_property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc='The membership flag')
__repr__ = _swig_repr
def __init__(self, *args):
"""
:param CSurf:
:type CSurf: Handle_Geom_CylindricalSurface &
:rtype: None
"""
_GeomToStep.GeomToStep_MakeCylindricalSurface_swiginit(self,_GeomToStep.new_GeomToStep_MakeCylindricalSurface(*args))
def Value(self, *args):
"""
:rtype: Handle_StepGeom_CylindricalSurface
"""
return _GeomToStep.GeomToStep_MakeCylindricalSurface_Value(self, *args)
__swig_destroy__ = _GeomToStep.delete_GeomToStep_MakeCylindricalSurface
GeomToStep_MakeCylindricalSurface.Value = new_instancemethod(_GeomToStep.GeomToStep_MakeCylindricalSurface_Value,None,GeomToStep_MakeCylindricalSurface)
GeomToStep_MakeCylindricalSurface_swigregister = _GeomToStep.GeomToStep_MakeCylindricalSurface_swigregister
GeomToStep_MakeCylindricalSurface_swigregister(GeomToStep_MakeCylindricalSurface)
class GeomToStep_MakeDirection(GeomToStep_Root):
thisown = _swig_property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc='The membership flag')
__repr__ = _swig_repr
def __init__(self, *args):
"""
:param D:
:type D: gp_Dir
:rtype: None
:param D:
:type D: gp_Dir2d
:rtype: None
:param D:
:type D: Handle_Geom_Direction &
:rtype: None
:param D:
:type D: | |
import re
from html.parser import HTMLParser
from urllib.parse import urlparse
from collections import namedtuple
import cssselect
from ._selectors import tag_matches, Specificity
from .rendering import full_justify
from .rendering import Box, BoxSide
from .rendering import Renderer, InlineRenderer, BlockRenderer
from .rendering import HeaderRenderer, MarkdownHeaderRenderer
from .rendering import ParagraphRenderer, BlockQuoteRenderer
from .rendering import CodeRenderer, PreRenderer
from .rendering import EmRenderer, StrongRenderer
from .rendering import UnderlineRenderer, StrikethroughRenderer
from .rendering import BreakRenderer
from .rendering import LinkRenderer, ExtractedLinkRenderer
from .rendering import ImageRenderer, ExtractedImageLinkRenderer
from .rendering import ListRenderer, ListItemRenderer, OrderedListItemRenderer
from .rendering import DefinitionListRenderer, DefinitionListTermHeaderRenderer, DefinitionListItemRenderer
from .rendering import AnsiEscapeCodeRenderer
# TODO: Maybe this class should do more actual parsing? Or just rename to Tag
class TagParser(object):
def __init__(self, tag, parent, attrs, **context):
self.tag = tag
self.parent = parent
self.children = []
self.closed = tag in ('br', 'img')
self.attrs = {}
self.classes = []
self.id = None
if attrs is not None:
self.attrs = dict(attrs)
self.classes = self.__extract_classes()
self.id = self.attrs.get('id', None)
self.renderer = None
self.renderer_settings = None
self._context = context
self._pending_links = []
def tag_children(self):
"""
Return only the children that represent tags (i.e. exclude any DataParsers)
"""
return [t for t in self.children if t.tag is not None]
def __extract_classes(self):
if 'class' in self.attrs:
self.attrs['class'] = self.attrs['class'].split()
return self.attrs['class']
return []
def add_pending_link(self, link):
self._pending_links.append(link)
def assign_renderer(self, renderer):
try:
self.renderer = renderer[0]
self.renderer_settings = renderer[1]
except TypeError:
self.renderer = renderer
def render(self, box):
render_context = dict(
parent_box=box,
)
render_context.update(self._context)
if self.renderer_settings is not None:
render_context['settings'] = self.renderer_settings
render_inst = self.renderer(self, **render_context)
try:
box = render_inst.box
except AttributeError:
# If the renderer doesn't provide a box then the parent's gets
# passed through.
pass
rendered_children = []
for c in self.children:
rendered_children.append(
c.render(box)
)
if len(self._pending_links):
rendered_children.append('\n')
for l in self._pending_links:
rendered_children.append(
l.link_render(box)
)
return render_inst.render(
"".join(rendered_children)
)
class LinkParser(TagParser):
def __init__(
self,
tag,
parent,
attrs,
**context
):
super().__init__(
tag,
parent,
attrs,
**context
)
self.link_renderer = None
if tag == 'a':
# If set, this will be used as the link description
self.title = self.attrs.get('title', None)
self.href = self.attrs.get('href', None)
self.gopher_link = self._parse_href()
elif tag == 'img':
# If set, this will be used as the link description
self.title = self.attrs.get('alt', None)
self.href = self.attrs.get('src', None)
self.gopher_link = self._parse_href()
# TODO: This needs a lot more work to be comprehensive
def _guess_type(self, path):
p = path.rpartition('.')
if p[0] == "":
# No file extension, so gopher menu?
return 1
elif p[2] in ("html", "htm"):
return 'h'
elif p[2] == 'gif':
return 'g'
elif p[2] in ('jpg', 'jpeg', 'png', 'bmp', 'tiff'):
return 'I'
elif p[2] in ('txt', 'csv', 'tsv', 'md'):
return 0
else:
# Default to binary for all other files
return 9
def _parse_href(self):
"""
Parse the href of the link and return a dictionary containing the
elements of a gophermap link
"""
parsed = urlparse(self.href)
if parsed.scheme in ("http", "https"):
return dict(
type='h',
selector="URL:{}".format(self.href),
host=self._context['gopher_host'],
port=self._context['gopher_port'],
)
elif parsed.scheme == 'gopher':
# Absolute gopher url
return dict(
type=self._guess_type(parsed.path),
selector=parsed.path,
host=parsed.hostname,
port=parsed.port,
)
elif parsed.scheme == '':
# Relative URL - interpret as a relative gopher link
return dict(
type=self._guess_type(parsed.path),
selector=parsed.path,
host=self._context['gopher_host'],
port=self._context['gopher_port'],
)
else:
# Unknown protocol: try it as a web link
return dict(
type='h',
selector="URL:{}".format(self.href),
host=self._context['gopher_host'],
port=self._context['gopher_port'],
)
def assign_renderer(self, renderer):
try:
self.renderer = renderer[0][0]
self.renderer_settings = renderer[0][1]
except TypeError:
self.renderer = renderer[0]
try:
self.link_renderer = renderer[1][0]
self.link_renderer_settings = renderer[1][1]
except TypeError:
self.link_renderer = renderer[1]
# For links, this generally renders the contents of the tag in its
# original location, unless 'link_placement' is 'inline', in which case
# link rendering occurs in the original location.
def render(self, box):
placement = self._context['image_placement'] if self.tag == 'img' else self._context['link_placement']
if placement != 'inline':
return super().render(box)
return self.link_render(box)
def link_render(self, box):
"""
Render an extracted link.
"""
render_context = dict(
href=self.href,
title=self.title,
gopher_link=self.gopher_link,
parent_box=box
)
render_context.update(self._context)
placement = render_context['image_placement'] if self.tag == 'img' else render_context['link_placement']
if placement == "inline":
renderer = self.renderer
renderer_settings = self.renderer_settings
else:
renderer = self.link_renderer
renderer_settings = self.link_renderer_settings
render_inst = renderer(self, **render_context)
if renderer_settings is not None:
render_context['settings'] = renderer_settings
try:
box = render_inst.box
except AttributeError:
# If the renderer doesn't provide a box then the parent's gets
# passed through.
pass
rendered_children = []
for c in self.children:
rendered_children.append(
c.render(box)
)
return render_inst.render(
"".join(rendered_children)
)
class DataParser(TagParser):
def __init__(self, parent, data, **context):
super().__init__(None, parent, None, **context)
if not context['in_pre']:
# This attempts to remove extraneous formatting internal to the data
# but does not remove whitespace from the start or end of the data
# because it may be followed or preceeded by a tag that depends on
# that whitespace for separation.
# This produces different results than how browsers handle whitespace.
# However, the paragraph renderer will also strip whitespace from the
# start and end of its content, minimising the impact of this.
data_split = data.split('\n')
if len(data_split) > 1:
data_stripped = []
data_stripped.append(data_split[0].rstrip())
if len(data_split) > 2:
data_stripped.extend([l.strip() for l in data_split[1:-1]])
data_stripped.append(data_split[-1].lstrip())
data_split = data_stripped
data = ' '.join(data_split)
data = re.sub('[ \t]+', ' ', data)
self.data = data
self.closed = True
def render(self, box):
return self.data
class DocumentParser(object):
"""
Implicit root for the document, even if the html being parsed is only a
fragment.
For html tags that are otherwise parentless, an instance of this class will
be the parent.
"""
def __init__(self):
self.children = []
def tag_children(self):
"""
Return only the children that represent tags (i.e. exclude any DataParsers)
"""
return [t for t in self.children if t.tag is not None]
def append(self, tag):
self.children.append(tag)
def reset(self):
self.children = []
RendererMapping = namedtuple('RendererMapping', 'selector, renderer')
class RendererMap(object):
"""
Provides a mapping of CSS selectors to Renderer specifications.
"""
def __init__(self, renderer_dict):
self._map = []
for key in renderer_dict:
selector = cssselect.parse(key)
self._map.append(RendererMapping(selector, renderer_dict[key]))
def get_for_tag(self, tag):
all_matches = []
for mapping in self._map:
match, specificity = tag_matches(tag, mapping.selector)
if match:
all_matches.append(
(
Specificity(specificity),
mapping
)
)
all_matches.sort(key=lambda m: m[0])
renderer = None
renderer_settings = {}
for s, mapping in all_matches:
try:
r = mapping.renderer[0]
s = mapping.renderer[1]
except TypeError:
r = mapping.renderer
s = None
if r is not None:
renderer = r
if s is not None:
renderer_settings.update(s)
if not renderer:
return None
return (renderer, renderer_settings)
class GopherHTMLParser(HTMLParser):
def __init__(
self,
width=67,
box=None,
renderers={},
extracted_link_renderers={},
output_format='text',
link_placement='footer',
image_placement='inline',
gopher_host="",
gopher_port=70,
optimise=True,
):
if output_format == 'gophermap' and link_placement == 'inline':
raise ValueError("Links cannot be inlined in gophermap output")
if output_format == 'gophermap' and gopher_host == '':
raise ValueError("gopher_host is required for gophermap output")
super().__init__(convert_charrefs=True)
self._parsed = []
self.parsed = ""
self._tag_stack = []
self.tree = DocumentParser()
#self._width = width
if box:
self._box = box
else:
# TODO: Maybe a default top margin as well?
self._box = Box(
width=67
)
self._output_format = output_format
self._link_placement = link_placement
self._image_placement = image_placement
self._gopher_host = gopher_host
self._gopher_port = gopher_port
self.renderers = {
# Default renderer. A * could also be used to match any element.
'': Renderer,
# Block elements
'h1': MarkdownHeaderRenderer,
'h2': MarkdownHeaderRenderer,
'h3': MarkdownHeaderRenderer,
'h4': MarkdownHeaderRenderer,
'h5': MarkdownHeaderRenderer,
'h6': MarkdownHeaderRenderer,
'p': ParagraphRenderer,
'br': BreakRenderer,
'blockquote': BlockQuoteRenderer,
'blockquote > p:first-child': (None, dict(
margin=[0,0,1,0]
)),
'blockquote > p:last-child': (None, dict(
margin=[1,0,0,0]
)),
# TODO: I think there is an :only-child selector for this
'blockquote > p:last-child:first-child': (None, dict(
margin=[0,0,0,0]
)),
'pre': PreRenderer,
'div': BlockRenderer,
'ul': ListRenderer,
'ol': ListRenderer,
'li': ListItemRenderer,
'ol > li': OrderedListItemRenderer,
'li:first-child': (None, dict(
margin=[0,0,0,0]
)),
# TODO: The need for this is unfortunate...
'li > ol > li:first-child, li > ul > li:first-child': (None, dict(
margin=[1,0,0,0]
)),
# Definition list
'dl': DefinitionListRenderer,
'dt': DefinitionListTermHeaderRenderer,
'dd': DefinitionListItemRenderer,
'dt:first-child': (None, dict(
margin=[0,0,0,0]
)),
# Inline elements
'code': CodeRenderer,
'a': LinkRenderer,
'img': ImageRenderer,
'em': EmRenderer,
'strong': StrongRenderer,
'i': EmRenderer,
'b': StrongRenderer,
'u': UnderlineRenderer,
'ins': UnderlineRenderer,
's': StrikethroughRenderer,
'del': StrikethroughRenderer,
'span': InlineRenderer,
}
self.renderers.update(renderers)
self.extracted_link_renderers = {
'a': ExtractedLinkRenderer,
'img': ExtractedImageLinkRenderer,
}
self.extracted_link_renderers.update(extracted_link_renderers)
self._default_renderer = self.renderers['']
del self.renderers['']
self._renderer_map = RendererMap(self.renderers)
self._extracted_link_renderer_map = RendererMap(self.extracted_link_renderers)
self._next_link_number = 1
self._footer_pending_links = []
self._in_pre = False
self._optimise = optimise
def _get_top(self):
t = None
if len(self._tag_stack) > 0:
t | |
return squashed
def normalize_io(io):
'''
Normalize an 'io' specifier in a pragma into either 'input' or 'output'
Parameters
----------
io : str
The I/O specifier from the pragma
Returns
-------
{'input', 'output'}
Raises
------
ValueError
If an invalid specifier is given
'''
if io in IO_OUTPUT:
return 'output'
if io in IO_INPUT:
return 'input'
raise ValueError('Invalid I/O specifier')
def _parse_rate(rate, hz_or_sec):
'''
Parse rate into frequency and seconds
Parameters
----------
rate : str
The pragma-specified rate.
hz_or_sec : str
{'hz', 's'}
Returns
-------
dict
With keys {'seconds', 'frequency'}
'''
try:
rate = float(rate)
except Exception:
raise ValueError(f'Invalid rate: {rate}')
if hz_or_sec == 'hz':
freq, seconds = rate, 1.0 / rate
elif hz_or_sec == 's':
freq, seconds = 1.0 / rate, rate
else:
raise ValueError(f'Invalid hz_or_sec: {hz_or_sec}')
return dict(
frequency=int(freq) if int(freq) == freq else freq,
seconds=int(seconds) if int(seconds) == seconds else seconds,
)
def parse_update_rate(update, default=UPDATE_RATE_DEFAULT):
'''
Parse an 'update' specifier in a pragma
Parameters
----------
update : str
The update rate specifier from the pragma.
Returns
-------
dict
With keys {'seconds', 'frequency', 'method'}
Where 'method' is one of: {'poll', 'notify'}
Raises
------
ValueError
If an invalid pragma is supplied
'''
update = update.lower().strip()
res = dict(default)
if update:
match = _UPDATE_RE.match(update)
if not match:
raise ValueError(f'Invalid update specifier: {update}')
# Method
d = match.groupdict()
method = d.get('method') or default['method']
if method not in {'poll', 'notify'}:
raise ValueError(f'Invalid update method: {method}')
res['method'] = method
# Rate + frequency/seconds
res.update(_parse_rate(d['rate'], d['hz_or_sec']))
if method == 'poll' and res['frequency'] not in VALID_POLL_RATES_HZ:
raise ValueError(
f"Invalid poll rate {res['frequency']}. "
f"Valid frequencies in Hz are: {VALID_POLL_RATES_HZ}"
)
return res
def parse_archive_settings(archive, default=ARCHIVE_DEFAULT):
'''
Parse an 'archive' specifier in a pragma
Parameters
----------
archive : str
The archive specifier from the pragma.
Returns
-------
dict
With keys {'seconds', 'frequency', 'method'}
Where 'method' is one of: {'scan', 'monitor'}
Raises
------
ValueError
If an invalid pragma is supplied
'''
archive = archive.lower().strip()
if archive in ('no', ):
return None
res = dict(default)
if archive:
match = _ARCHIVE_RE.match(archive)
if not match:
raise ValueError(f'Invalid archive specifier: {archive}')
# Method
d = match.groupdict()
method = d.get('method') or default['method']
if method not in {'scan', 'monitor'}:
raise ValueError(f'Invalid archive method: {method}')
res['method'] = method
# Rate + frequency/seconds
res.update(_parse_rate(d['rate'], d['hz_or_sec']))
return res
def parse_array_settings(pragma, dimensions):
'''
Parse an 'array' specifier in a pragma, yielding array elements.
Parameters
----------
pragma : str
The I/O specifier from the pragma.
dimensions : 2-tuple
Lower and upper-bound of the array corresponding to the pragma.
Yields
------
element : int
Integer element of selected array indices.
Raises
------
ValueError
If an invalid pragma is supplied
'''
pragma = pragma.strip()
try:
low, high = dimensions
except Exception:
raise ValueError(
f'Invalid dimensions {dimensions!r} for array specifier in pragma '
f'{pragma!r}'
)
if not pragma:
yield from range(low, high + 1)
return
def _parse_element(elem):
if '..' not in elem:
return [int(elem)]
# Split by .., such that this will support:
# ..to, from.., from..to, from..to..step
range_args = [int(idx) if idx else None
for idx in elem.split('..')]
# Ensure we have start, stop, step
range_args += [None] * (3 - len(range_args))
elem_low, elem_high, elem_step = range_args
elem_low = low if elem_low is None else elem_low
# Add one to make the exclusive upper bound inclusive:
elem_high = high + 1 if elem_high is None else elem_high + 1
elem_step = 1 if elem_step is None else elem_step
return range(elem_low, elem_high, elem_step)
try:
for elem in pragma.split(','):
for idx in _parse_element(elem):
if not low <= idx <= high:
raise ValueError(
f'Array pragma index out of bounds: '
f'{low} < {elem} < {high}'
)
yield idx
except Exception as ex:
raise ValueError(
f'Invalid array pragma: {pragma} ({ex})'
)
# Helpers which normalize various pragma values.
_normalizers = {
'io': (normalize_io, 'io'),
'update': (parse_update_rate, '1s poll'),
'archive': (parse_archive_settings, '1s scan'),
}
def normalize_config(config):
'''
Parse and normalize pragma values into Python representations
The following keys will be interpreted: ``io``, ``archive``, ``update``
Parameters
----------
config : dict
The configuration
Returns
-------
dict
A shallow-copy of ``config`` with parsed and normalized values
'''
ret = dict(config)
for key, (parser_func, default) in _normalizers.items():
ret[key] = parser_func(ret.get(key, default))
return ret
class SingularChain:
'''
A chain of data types, all with pytmc pragmas, representing a single piece
of data that should be accessible via EPICS/ADS
Parameters
----------
item_to_config : dict
Keys would be ``TwincatItem`` s such as Symbol, and values would be
dictionary configurations from parsed pytmc pragmas.
Attributes
----------
item_to_config : dict
chain : list
The chain of items (i.e., item_to_config keys)
tcname : str
The full TwinCAT name of the item
pvname : str
The user-specified PV name
last : list
The last item, which determines the overall data type
data_type : DataType
The data type of the last item
config : dict
The final configuration based on the full chain of configurations
'''
def __init__(self, item_to_config):
self.item_to_config = item_to_config
self.chain = list(self.item_to_config)
self.last = self.chain[-1]
self.data_type = self.chain[-1].data_type
self.array_info = self.chain[-1].array_info
self.tcname = '.'.join(part.name for part in self.chain)
self.valid = True
for config in item_to_config:
# Detect Nones signifying an incomplete pragma
if item_to_config[config] is None:
self.valid = False
self.config = squash_configs(*list(item_to_config.values()))
self.pvname = ':'.join(pv_segment for pv_segment in self.config['pv']
if pv_segment)
def __repr__(self):
return (f'<{self.__class__.__name__} pvname={self.pvname!r} '
f'tcname={self.tcname!r} config={self.config} '
f'data_type={self.data_type!r})')
def find_pytmc_symbols(tmc, allow_no_pragma=False):
'Find all symbols in a tmc file that contain pragmas'
for symbol in tmc.find(parser.Symbol):
if has_pragma(symbol) or allow_no_pragma:
if symbol.name.count('.') == 1:
yield symbol
def get_pragma(item: Union[parser.SubItem, Type[parser.Symbol]], *,
name: str = 'pytmc') -> Generator[str, None, None]:
"""
Get all pragmas with a certain tag.
Parameters
----------
item : parser.SubItem, parser.Symbol, parser.Symbol subclass
Representation of beckhoff variable or data structure
name : str, optional
Accept tmc entries where the <Name> field equals the passed string
Yields
------
str
"""
name_list = [
name,
'plcAttribute_{}'.format(name)
]
if hasattr(item, 'Properties'):
properties = item.Properties[0]
for prop in getattr(properties, 'Property', []):
# Return true if any of the names searched for are found
if any(indiv_name == prop.name for indiv_name in name_list):
yield prop.value
def has_pragma(item, *, name: str = 'pytmc'):
'Does `item` have a pragma titled `name`?'
return any(True for pragma in get_pragma(item, name=name)
if pragma is not None)
def always_true(*a, **kwargs):
return True
def chains_from_symbol(symbol, *, pragma: str = 'pytmc',
allow_no_pragma=False):
'Build all SingularChain instances from a Symbol'
if allow_no_pragma:
condition = always_true
else:
condition = has_pragma
for full_chain in symbol.walk(condition=condition):
configs = itertools.product(
*_expand_configurations_from_chain(full_chain,
allow_no_pragma=allow_no_pragma)
)
for item_and_config in configs:
yield SingularChain(dict(item_and_config))
def record_packages_from_symbol(symbol, *, pragma: str = 'pytmc',
yield_exceptions=False,
allow_no_pragma=False):
'Create all record packages from a given Symbol'
try:
ads_port = symbol.module.ads_port
for chain in chains_from_symbol(symbol, pragma=pragma,
allow_no_pragma=allow_no_pragma):
try:
yield RecordPackage.from_chain(ads_port, chain=chain)
except Exception as ex:
if yield_exceptions:
yield type(ex)(f"Symbol {symbol.name} "
f"chain: {chain.tcname}: {ex}")
else:
raise
except Exception as ex:
if yield_exceptions:
yield type(ex)(f"Symbol {symbol.name} failure: {ex}")
else:
raise
def _attach_pragma(item, name, value):
"""Attach a pragma to a TwincatItem using `_make_fake_item`."""
if not hasattr(item, 'Properties'):
properties = parser._make_fake_item('Properties', parent=item)
properties.Property = []
item.Properties = [properties]
properties = item.Properties[0]
prop = parser._make_fake_item('Property', parent=properties, text=value,
item_name=name)
properties.Property.append(prop)
return prop
class _FakeSymbol(parser.Symbol):
@property
def data_type(self):
return self._data_type
@property
def qualified_type_name(self):
return self._data_type.qualified_type
@property
def type_name(self):
return self._data_type.name
@property
def BitSize(self):
return self._data_type.BitSize
def make_fake_symbol_from_data_type(
data_type, symbol_pragma_text, *, name='$(SYMBOL)',
pragma_name: str = 'pytmc',
data_area_index=0, tmc=None,
create_data_area_if_needed=True):
"""
Create a :class:`_FakeSymbol` from the given data type.
Parameters
----------
data_type : pytmc.parser.DataType
The TMC data type.
symbol_pragma_text : str
The pragma text to attach.
name : str, optional
The symbol name.
pragma_name : str, optional
The pragma name to use (defaults to "pytmc").
data_area_index : int, optional
The data area to pretend the symbol exists in.
"""
if tmc is None:
# If defined in a .tmc file, this is the obvious choice.
tmc = data_type.tmc
if tmc is None:
# Fallback to the first .tmc we find. This really should be an
# error condition, but given that we're making fake symbols anyway
# it _probably_ | |
<reponame>quedah/cloud-sql-python-connector
"""
Copyright 2019 Google LLC
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
https://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
# Custom utils import
from google.cloud.sql.connector.rate_limiter import AsyncRateLimiter
from google.cloud.sql.connector.refresh_utils import _get_ephemeral, _get_metadata
from google.cloud.sql.connector.utils import write_to_file
from google.cloud.sql.connector.version import __version__ as version
# Importing libraries
import asyncio
import aiohttp
import concurrent
import datetime
from enum import Enum
import google.auth
from google.auth.credentials import Credentials
import google.auth.transport.requests
import OpenSSL
import platform
import ssl
import socket
from tempfile import TemporaryDirectory
from typing import (
Any,
Awaitable,
Dict,
Optional,
TYPE_CHECKING,
)
from functools import partial
import logging
if TYPE_CHECKING:
import pymysql
import pg8000
import pytds
logger = logging.getLogger(name=__name__)
APPLICATION_NAME = "cloud-sql-python-connector"
SERVER_PROXY_PORT = 3307
# default_refresh_buffer is the amount of time before a refresh's result expires
# that a new refresh operation begins.
_default_refresh_buffer: int = 5 * 60 # 5 minutes
# _iam_auth_refresh_buffer is the amount of time before a refresh's result expires
# that a new refresh operation begins when IAM DB AuthN is enabled. Because token
# sources may be cached until ~60 seconds before expiration, this value must be smaller
# than default_refresh_buffer.
_iam_auth_refresh_buffer: int = 55 # seconds
class IPTypes(Enum):
PUBLIC: str = "PRIMARY"
PRIVATE: str = "PRIVATE"
class ConnectionSSLContext(ssl.SSLContext):
"""Subclass of ssl.SSLContext with added request_ssl attribute. This is
required for compatibility with pg8000 driver.
"""
def __init__(self, *args: Any, **kwargs: Any) -> None:
self.request_ssl = False
super(ConnectionSSLContext, self).__init__(*args, **kwargs)
class TLSVersionError(Exception):
"""
Raised when the required TLS protocol version is not supported.
"""
def __init__(self, *args: Any) -> None:
super(TLSVersionError, self).__init__(self, *args)
class CloudSQLConnectionError(Exception):
"""
Raised when the provided connection string is not formatted
correctly.
"""
def __init__(self, *args: Any) -> None:
super(CloudSQLConnectionError, self).__init__(self, *args)
class CloudSQLIPTypeError(Exception):
"""
Raised when IP address for the preferred IP type is not found.
"""
def __init__(self, *args: Any) -> None:
super(CloudSQLIPTypeError, self).__init__(self, *args)
class PlatformNotSupportedError(Exception):
"""
Raised when a feature is not supported on the current platform.
"""
def __init__(self, *args: Any) -> None:
super(PlatformNotSupportedError, self).__init__(self, *args)
class InstanceMetadata:
ip_addrs: Dict[str, Any]
context: ssl.SSLContext
expiration: datetime.datetime
def __init__(
self,
ephemeral_cert: str,
ip_addrs: Dict[str, Any],
private_key: bytes,
server_ca_cert: str,
expiration: datetime.datetime,
enable_iam_auth: bool,
) -> None:
self.ip_addrs = ip_addrs
if enable_iam_auth and not ssl.HAS_TLSv1_3: # type: ignore
raise TLSVersionError(
"Your current version of OpenSSL does not support TLSv1.3, "
"which is required to use IAM Authentication."
)
self.context = ConnectionSSLContext()
self.expiration = expiration
# tmpdir and its contents are automatically deleted after the CA cert
# and ephemeral cert are loaded into the SSLcontext. The values
# need to be written to files in order to be loaded by the SSLContext
with TemporaryDirectory() as tmpdir:
ca_filename, cert_filename, key_filename = write_to_file(
tmpdir, server_ca_cert, ephemeral_cert, private_key
)
self.context.load_cert_chain(cert_filename, keyfile=key_filename)
self.context.load_verify_locations(cafile=ca_filename)
def get_preferred_ip(self, ip_type: IPTypes) -> str:
"""Returns the first IP address for the instance, according to the preference
supplied by ip_type. If no IP addressess with the given preference are found,
an error is raised."""
if ip_type.value in self.ip_addrs:
return self.ip_addrs[ip_type.value]
raise CloudSQLIPTypeError(
"Cloud SQL instance does not have any IP addresses matching "
f"preference: {ip_type.value})"
)
class InstanceConnectionManager:
"""A class to manage the details of the connection, including refreshing the
credentials.
:param instance_connection_string:
The Google Cloud SQL Instance's connection
string.
:type instance_connection_string: str
:param user_agent_string:
The user agent string to append to SQLAdmin API requests
:type user_agent_string: str
:param enable_iam_auth
Enables IAM based authentication for Postgres instances.
:type enable_iam_auth: bool
:param loop:
A new event loop for the refresh function to run in.
:type loop: asyncio.AbstractEventLoop
"""
# asyncio.AbstractEventLoop is used because the default loop,
# SelectorEventLoop, is usable on both Unix and Windows but has limited
# functionality on Windows. It is recommended to use ProactorEventLoop
# while developing on Windows.
# Link to Github issue:
# https://github.com/GoogleCloudPlatform/cloud-sql-python-connector/issues/22
_loop: asyncio.AbstractEventLoop
_enable_iam_auth: bool
__client_session: Optional[aiohttp.ClientSession] = None
@property
def _client_session(self) -> aiohttp.ClientSession:
if self.__client_session is None:
self.__client_session = aiohttp.ClientSession(
headers={
"x-goog-api-client": self._user_agent_string,
"User-Agent": self._user_agent_string,
"Content-Type": "application/json",
}
)
return self.__client_session
_credentials: Optional[Credentials] = None
_keys: Awaitable
_instance_connection_string: str
_user_agent_string: str
_instance: str
_project: str
_region: str
_refresh_in_progress: asyncio.locks.Event
_current: asyncio.Task # task wraps coroutine that returns InstanceMetadata
_next: asyncio.Task # task wraps coroutine that returns another task
def __init__(
self,
instance_connection_string: str,
driver_name: str,
keys: concurrent.futures.Future,
loop: asyncio.AbstractEventLoop,
enable_iam_auth: bool = False,
json_keyfile_dict: dict = {},
) -> None:
# Validate connection string
connection_string_split = instance_connection_string.split(":")
if len(connection_string_split) == 3:
self._instance_connection_string = instance_connection_string
self._project = connection_string_split[0]
self._region = connection_string_split[1]
self._instance = connection_string_split[2]
else:
raise CloudSQLConnectionError(
"Arg instance_connection_string must be in "
+ "format: project:region:instance."
)
self._enable_iam_auth = enable_iam_auth
self._user_agent_string = f"{APPLICATION_NAME}/{version}+{driver_name}"
self._loop = loop
self._json_keyfile_dict = json_keyfile_dict
self._keys = asyncio.wrap_future(keys, loop=self._loop)
self._auth_init()
self._refresh_rate_limiter = AsyncRateLimiter(
max_capacity=2, rate=1 / 30, loop=self._loop
)
async def _set_instance_data() -> None:
logger.debug("Updating instance data")
self._refresh_in_progress = asyncio.locks.Event(loop=self._loop)
self._current = self._loop.create_task(self._get_instance_data())
self._next = self._loop.create_task(self._schedule_refresh())
init_future = asyncio.run_coroutine_threadsafe(_set_instance_data(), self._loop)
init_future.result()
def __del__(self) -> None:
"""Deconstructor to make sure ClientSession is closed and tasks have
finished to have a graceful exit.
"""
logger.debug("Entering deconstructor")
async def _deconstruct() -> None:
if isinstance(self._current, asyncio.Task):
logger.debug("Waiting for _current to be cancelled")
self._current.cancel()
if isinstance(self._next, asyncio.Task):
logger.debug("Waiting for _next to be cancelled")
self._next.cancel()
if not self._client_session.closed:
logger.debug("Waiting for _client_session to close")
await self._client_session.close()
deconstruct_future = asyncio.run_coroutine_threadsafe(
_deconstruct(), loop=self._loop
)
# Will attempt to safely shut down tasks for 5s
deconstruct_future.result(timeout=5)
logger.debug("Finished deconstructing")
async def _get_instance_data(self) -> InstanceMetadata:
"""Asynchronous function that takes in the futures for the ephemeral certificate
and the instance metadata and generates an OpenSSL context object.
:rtype: InstanceMetadata
:returns: A dataclass containing a string representing the ephemeral certificate, a dict
containing the instances IP adresses, a string representing a PEM-encoded private key
and a string representing a PEM-encoded certificate authority.
"""
priv_key, pub_key = await self._keys
logger.debug("Creating context")
metadata_task = self._loop.create_task(
_get_metadata(
self._client_session, self._credentials, self._project, self._instance
)
)
ephemeral_task = self._loop.create_task(
_get_ephemeral(
self._client_session,
self._credentials,
self._project,
self._instance,
pub_key,
self._enable_iam_auth,
)
)
metadata, ephemeral_cert = await asyncio.gather(metadata_task, ephemeral_task)
x509 = OpenSSL.crypto.load_certificate(
OpenSSL.crypto.FILETYPE_PEM, ephemeral_cert
)
expiration = datetime.datetime.strptime(
x509.get_notAfter().decode("ascii"), "%Y%m%d%H%M%SZ"
)
if self._enable_iam_auth:
if self._credentials is not None:
token_expiration: datetime.datetime = self._credentials.expiry
if expiration > token_expiration:
expiration = token_expiration
return InstanceMetadata(
ephemeral_cert,
metadata["ip_addresses"],
priv_key,
metadata["server_ca_cert"],
expiration,
self._enable_iam_auth,
)
def _auth_init(self) -> None:
"""Creates and assigns a Google Python API service object for
Google Cloud SQL Admin API.
"""
scopes=[
"https://www.googleapis.com/auth/sqlservice.admin",
"https://www.googleapis.com/auth/cloud-platform",
]
if self._json_keyfile_dict:
from google.oauth2.service_account import Credentials as ServiceAccountCredentials
credentials = ServiceAccountCredentials.from_service_account_info(
self._json_keyfile_dict, scopes=scopes
)
print("SQL CONNECTOR FROM KEYFILE")
else:
credentials, project = google.auth.default(scopes=scopes)
print("SQL CONNECTOR FROM DEFAULT")
self._credentials = credentials
async def _force_refresh(self) -> bool:
if self._refresh_in_progress.is_set():
# if a new refresh is already in progress, then block on the result
self._current = await self._next
return True
try:
self._next.cancel()
# schedule a refresh immediately with no delay
self._next = self._loop.create_task(self._schedule_refresh(0))
self._current = await self._next
return True
except Exception as e:
# if anything else goes wrong, log the error and return false
logger.exception("Error occurred during force refresh attempt", exc_info=e)
return False
def force_refresh(self, timeout: Optional[int] = None) -> bool:
"""
Forces a new refresh attempt and returns a boolean value that indicates
whether the attempt was successful.
:type timeout: Optional[int]
:param timeout: Amount of time to wait for the attempted force refresh
to complete before throwing a timeout error.
"""
return asyncio.run_coroutine_threadsafe(
self._force_refresh(), self._loop
).result(timeout=timeout)
async def seconds_until_refresh(self) -> int:
expiration = (await self._current).expiration
if self._enable_iam_auth:
refresh_buffer = _iam_auth_refresh_buffer
else:
refresh_buffer = _default_refresh_buffer
delay = (expiration - datetime.datetime.now()) - datetime.timedelta(
seconds=refresh_buffer
)
if delay.total_seconds() < 0:
# If the time until the certificate expires is less than the buffer,
# schedule the refresh closer to the expiration time
delay = (expiration - datetime.datetime.now()) - datetime.timedelta(
seconds=5
)
return int(delay.total_seconds())
async def _perform_refresh(self) -> asyncio.Task:
"""Retrieves instance metadata and ephemeral certificate from the
Cloud SQL Instance.
:rtype: concurrent.future.Futures
:returns: A future representing the | |
<reponame>Nic30/layeredGraphLayouter<filename>layeredGraphLayouter/p5ortogonalRouter/routingGenerator.py
from enum import Enum
from typing import Dict, List
from layeredGraphLayouter.containers.geometry import Point
from layeredGraphLayouter.containers.lPort import LPort
from collections import deque
from math import isnan, inf
from layeredGraphLayouter.containers.lNode import LNode
from layeredGraphLayouter.containers.constants import PortType, PortSide
from layeredGraphLayouter.containers.lEdge import LEdge
from random import Random
from layeredGraphLayouter.containers.lGraph import LGraph
class RoutingDirection(Enum):
"""
Enumeration of available routing directions.
"""
""" west to east routing direction."""
WEST_TO_EAST = 0
""" north to south routing direction."""
NORTH_TO_SOUTH = 1
""" south to north routing direction."""
SOUTH_TO_NORTH = 2
class HyperNode():
"""
* A hypernode used for routing a hyperedge.
"""
def __init__(self):
"""
:ivar ports: List[LPort] ports represented by this hypernode.
:ivar mark: mark value used for cycle breaking.
:ivar rank: the rank determines the horizontal distance to the preceding layer.
:ivar start: vertical starting position of this hypernode.
:ivar end: vertical ending position of this hypernode.
:ivar sourcePosis: Dequeu[float], positions of line segments going to the preceding layer.
:ivar targetPosis: Dequeu[float], positions of line segments going to the next layer.
:ivar outgoing: List[Dependency] list of outgoing dependencies.
:ivar outweight: sum of the weights of outgoing dependencies.
:ivar incoming: List[Dependency], list of incoming dependencies.
:ivar inweight: sum of the weights of incoming depencencies.
"""
self.ports = []
self.mark = 0
self.rank = 0
self.start = float('nan')
self.end = float('nan')
self.sourcePosis = deque()
self.targetPosis = deque()
self.outgoing = []
self.outweight = 0
self.incoming = []
self.inweight = 0
def addPortPositions(self, port: LPort, hyperNodeMap: Dict[LPort, "HyperNode"]):
"""
Adds the positions of the given port and all connected ports.
:param port a port
:param hyperNodeMap map of ports to existing hypernodes
"""
hyperNodeMap[port] = self
self.ports.append(port)
pos = routingStrategy.getPortPositionOnHyperNode(port)
# set new start position
if isnan(self.start):
self.start = pos
else:
self.start = min(self.start, pos)
# set new end position
if isnan(self.end):
self.end = pos
else:
self.end = max(self.end, pos)
# add the new port position to the respective list
if port.side == routingStrategy.getSourcePortSide():
self.insertSorted(self.sourcePosis, pos)
else:
self.insertSorted(self.targetPosis, pos)
# add connected ports
for otherPort in port.iterConnectedPorts():
if otherPort not in hyperNodeMap:
self.addPortPositions(otherPort, hyperNodeMap)
def __repr__(self):
buff = []
for port in self.ports:
name = port.getNode().name
if (name is None):
name = "n" + port.getNode().getIndex()
buff.append(name)
buff.append('')
return "{%s}" % (",".join(buff))
def __lt__(self, other):
return self.mark < other.mark
def __eq__(self, other):
if isinstance(other, HyperNode):
return self.mark == other.mark
return False
def hashCode(self) -> int:
return self.mark
def getOutgoing(self) -> List["Dependency"]:
"""
* Return the outgoing dependencies.
*
* :return: the outgoing dependencies
"""
return self.outgoing
class Dependency():
"""
A dependency between two hypernodes.
:ivar source: the source hypernode of this dependency.
:ivar target: the target hypernode of this dependency.
:ivar weight: the weight of this dependency.
"""
def __init__(self, thesource: HyperNode, thetarget: HyperNode,
theweight: int):
"""
Creates a dependency from the given source to the given target.
:param thesource the dependency source
:param thetarget the dependency target
:param theweight weight of the dependency
"""
self.target = thetarget
self.source = thesource
self.weight = theweight
self.source.outgoing.append(self)
self.target.incoming.append(self)
def __repr__(self):
return "%r->%r" % (self.source, self.target)
def getSource(self)-> HyperNode:
"""
* Return the source node.
*
* :return: the source
"""
return self.source
def getTarget(self) -> HyperNode:
"""
* Return the target node.
*
* :return: the target
"""
return self.target
def getWeight(self) -> int:
"""
* Returns the weight of the hypernode dependency.
*
* :return: the weight
"""
return self.weight
class OrthogonalRoutingGenerator():
"""
Edge routing implementation that creates orthogonal bend points. Inspired by:
<ul>
<li><NAME>. Layout of directed hypergraphs with orthogonal hyperedges. In
<i>Proceedings of the 11th International Symposium on Graph Drawing (GD '03)</i>,
volume 2912 of LNCS, pp. 381-386. Springer, 2004.</li>
<li><NAME>, <NAME>, <NAME>, <NAME>,
<i>Graph Drawing: Algorithms for the Visualization of Graphs</i>,
Prentice Hall, New Jersey, 1999 (Section 9.4, for cycle breaking in the
hyperedge segment graph)
</ul>
<p>This is a generic implementation that can be applied to all four routing directions.
Usually, edges will be routed from west to east. However, with northern and southern
external ports, this changes: edges are routed from south to north and north to south,
respectively. To support these different requirements, the routing direction-related
code is factored out into {@link IRoutingDirectionStrategy routing strategies.</p>
<p>When instantiating a new routing generator, the concrete directional strategy must be
specified. Once that is done, {@link #routeEdges(LGraph, List, int, List, double)
is called repeatedly to route edges between given lists of nodes.</p>
"""
# Constants and Variables
""" differences below this tolerance value are treated as zero."""
TOLERANCE = 1e-3
""" factor for edge spacing used to determine the conflict threshold."""
CONFL_THRESH_FACTOR = 0.2
""" weight penalty for conflicts of horizontal line segments."""
CONFLICT_PENALTY = 16
"""
:ivar routingStrategy: routing direction strategy.
:ivar edgeSpacing: spacing between edges.
:ivar conflictThreshold: threshold at which conflicts of horizontal line segments are detected.
:ivar createdJunctionPoints: set of already created junction points, to adef multiple points at the same position.
:ivar debugPrefix: prefix of debug output files."""
# /
# Constructor
def __init__(self, direction: RoutingDirection, edgeSpacing: float,
debugPrefix: str):
"""
* Constructs a new instance.
*
* :param direction the direction edges should point at.
* :param edgeSpacing the space between edges.
* :param debugPrefix prefix of debug output files, or {@code null if no debug output should
* be generated.
"""
if direction == RoutingDirection.WEST_TO_EAST:
self.routingStrategy = WestToEastRoutingStrategy()
elif direction == RoutingDirection.NORTH_TO_SOUTH:
self.routingStrategy = NorthToSouthRoutingStrategy()
elif direction == RoutingDirection.SOUTH_TO_NORTH:
self.routingStrategy = SouthToNorthRoutingStrategy()
else:
raise ValueError(direction)
self.edgeSpacing = edgeSpacing
self.conflictThreshold = self.CONFL_THRESH_FACTOR * edgeSpacing
self.debugPrefix = debugPrefix
self.createdJunctionPoints = set()
# /
# Edge Routing
"""
* Route edges between the given layers.
*
* :param layeredGraph the layered graph.
* :param sourceLayerNodes the left layer. May be {@code null.
* :param sourceLayerIndex the source layer's index. Ignored if there is no source layer.
* :param targetLayerNodes the right layer. May be {@code null.
* :param startPos horizontal position of the first routing slot
* :return: the number of routing slots for this layer
"""
def routeEdges(self, layeredGraph: LGraph, sourceLayerNodes: List[LNode],
sourceLayerIndex: int, targetLayerNodes: List[LNode], startPos: float) -> int:
portToHyperNodeMap = {}
hyperNodes = []
routingStrategy = self.routingStrategy
conflictThreshold = self.conflictThreshold
# create hypernodes for eastern output ports of the left layer and for western
# output ports of the right layer
self.createHyperNodes(sourceLayerNodes, routingStrategy.getSourcePortSide(),
hyperNodes, portToHyperNodeMap)
self.createHyperNodes(targetLayerNodes, routingStrategy.getTargetPortSide(),
hyperNodes, portToHyperNodeMap)
createDependency = self.createDependency
# create dependencies for the hypernode ordering graph
iter1 = hyperNodes.listIterator()
while (iter1.hasNext()):
hyperNode1 = iter1.next()
iter2 = hyperNodes.listIterator(iter1.nextIndex())
while (iter2.hasNext()):
hyperNode2 = iter2.next()
createDependency(hyperNode1, hyperNode2, conflictThreshold)
# write the full dependency graph to an output file
# elkjs-exclude-start
if self.debugPrefix is not None:
DebugUtil.writeDebugGraph(layeredGraph,
0 if sourceLayerNodes is None else sourceLayerIndex + 1,
hyperNodes, self.debugPrefix, "full")
# elkjs-exclude-end
# break cycles
self.breakCycles(hyperNodes, layeredGraph.random)
# write the acyclic dependency graph to an output file
# elkjs-exclude-start
if self.debugPrefix is not None:
DebugUtil.writeDebugGraph(layeredGraph,
0 if sourceLayerNodes is None else sourceLayerIndex + 1,
hyperNodes, self.debugPrefix, "acyclic")
# elkjs-exclude-end
# assign ranks to the hypernodes
self.topologicalNumbering(hyperNodes)
TOLERANCE = self.TOLERANCE
# set bend points with appropriate coordinates
rankCount = -1
for node in hyperNodes:
# Hypernodes that are just straight lines don't take up a slot and
# don't need bend points
if abs(node.start - node.end) < TOLERANCE:
continue
rankCount = max(rankCount, node.rank)
routingStrategy.calculateBendPoints(node, startPos)
# release the created resources
self.createdJunctionPoints.clear()
return rankCount + 1
# /
# Hyper Node Graph Creation
def createHyperNodes(self, nodes: List[LNode], portSide: PortSide,
hyperNodes: List[HyperNode], portToHyperNodeMap: Dict[LPort, HyperNode]):
"""
Creates hypernodes for the given layer.
:param nodes the layer. May be {@code null, in which case nothing happens.
:param portSide side of the output ports for whose outgoing edges hypernodes should
be created.
:param hyperNodes list the created hypernodes should be added to.
:param portToHyperNodeMap map from ports to hypernodes that should be filled.
"""
if nodes is not None:
for node in nodes:
for port in node.getPorts(PortType.OUTPUT, portSide):
hyperNode = portToHyperNodeMap[port]
if hyperNode is None:
hyperNode = HyperNode()
hyperNodes.append(hyperNode)
hyperNode.addPortPositions(port, portToHyperNodeMap)
@classmethod
| |
= dist
return dist
def showManyStats(self,spec_name=None):
'''
Calculates the "many statistics" by averaging histories across simulated periods. Displays
the results as text and saves them to files if spec_name is not None.
Parameters
----------
spec_name : string
A name or label for the current specification.
Returns
-------
None
'''
# Calculate MPC overall and by subpopulations
MPCall = np.mean(self.MPCall_hist[self.ignore_periods:])
MPCemployed = np.mean(self.MPCemployed_hist[self.ignore_periods:])
MPCunemployed = np.mean(self.MPCunemployed_hist[self.ignore_periods:])
MPCretired = np.mean(self.MPCretired_hist[self.ignore_periods:])
MPCbyIncome = np.mean(np.array(self.MPCbyIncome_hist)[self.ignore_periods:,:],axis=0)
MPCbyWealthRatio = np.mean(np.array(self.MPCbyWealthRatio_hist)[self.ignore_periods:,:],axis=0)
HandToMouthPct = np.mean(np.array(self.HandToMouthPct_hist)[self.ignore_periods:,:],axis=0)
LorenzSim = np.hstack((np.array(0.0),np.mean(np.array(self.LorenzLong_hist)[self.ignore_periods:,:],axis=0),np.array(1.0)))
LorenzAxis = np.arange(101,dtype=float)
plt.plot(LorenzAxis,self.LorenzData,'-k',linewidth=1.5)
plt.plot(LorenzAxis,LorenzSim,'--k',linewidth=1.5)
plt.xlabel('Income percentile',fontsize=12)
plt.ylabel('Cumulative wealth share',fontsize=12)
plt.title('Simulated vs Actual Lorenz Curves',fontsize=16)
plt.legend(('Actual','Simulated'),loc=2,fontsize=12)
plt.ylim([-0.02,1.0])
pylab.savefig(os.path.join(figures_dir, 'LorenzCurvesRHetero.pdf'))
plt.show(block=False)
# Make a string of results to display
results_string = 'Estimate is center=' + str(self.center_estimate) + ', spread=' + str(self.spread_estimate) + '\n'
results_string += 'Lorenz distance is ' + str(self.LorenzDistance) + '\n'
results_string += 'Average MPC for all consumers is ' + mystr(MPCall) + '\n'
results_string += 'Average MPC in the top percentile of W/Y is ' + mystr(MPCbyWealthRatio[0]) + '\n'
results_string += 'Average MPC in the top decile of W/Y is ' + mystr(MPCbyWealthRatio[1]) + '\n'
results_string += 'Average MPC in the top quintile of W/Y is ' + mystr(MPCbyWealthRatio[2]) + '\n'
results_string += 'Average MPC in the second quintile of W/Y is ' + mystr(MPCbyWealthRatio[3]) + '\n'
results_string += 'Average MPC in the middle quintile of W/Y is ' + mystr(MPCbyWealthRatio[4]) + '\n'
results_string += 'Average MPC in the fourth quintile of W/Y is ' + mystr(MPCbyWealthRatio[5]) + '\n'
results_string += 'Average MPC in the bottom quintile of W/Y is ' + mystr(MPCbyWealthRatio[6]) + '\n'
results_string += 'Average MPC in the top percentile of y is ' + mystr(MPCbyIncome[0]) + '\n'
results_string += 'Average MPC in the top decile of y is ' + mystr(MPCbyIncome[1]) + '\n'
results_string += 'Average MPC in the top quintile of y is ' + mystr(MPCbyIncome[2]) + '\n'
results_string += 'Average MPC in the second quintile of y is ' + mystr(MPCbyIncome[3]) + '\n'
results_string += 'Average MPC in the middle quintile of y is ' + mystr(MPCbyIncome[4]) + '\n'
results_string += 'Average MPC in the fourth quintile of y is ' + mystr(MPCbyIncome[5]) + '\n'
results_string += 'Average MPC in the bottom quintile of y is ' + mystr(MPCbyIncome[6]) + '\n'
results_string += 'Average MPC for the employed is ' + mystr(MPCemployed) + '\n'
results_string += 'Average MPC for the unemployed is ' + mystr(MPCunemployed) + '\n'
results_string += 'Average MPC for the retired is ' + mystr(MPCretired) + '\n'
results_string += 'Of the population with the 1/3 highest MPCs...' + '\n'
results_string += mystr(HandToMouthPct[0]*100) + '% are in the bottom wealth quintile,' + '\n'
results_string += mystr(HandToMouthPct[1]*100) + '% are in the second wealth quintile,' + '\n'
results_string += mystr(HandToMouthPct[2]*100) + '% are in the third wealth quintile,' + '\n'
results_string += mystr(HandToMouthPct[3]*100) + '% are in the fourth wealth quintile,' + '\n'
results_string += 'and ' + mystr(HandToMouthPct[4]*100) + '% are in the top wealth quintile.' + '\n'
print(results_string)
# Save results to disk
if spec_name is not None:
with open(self.my_file_path + '/Results/' + 'RHeteroResults.txt','w') as f:
f.write(results_string)
f.close()
def getKYratioDifference(Economy,param_name,param_count,center,spread,dist_type):
'''
Finds the difference between simulated and target capital to income ratio in an economy when
a given parameter has heterogeneity according to some distribution.
Parameters
----------
Economy : cstwMPCmarket
An object representing the entire economy, containing the various AgentTypes as an attribute.
param_name : string
The name of the parameter of interest that varies across the population.
param_count : int
The number of different values the parameter of interest will take on.
center : float
A measure of centrality for the distribution of the parameter of interest.
spread : float
A measure of spread or diffusion for the distribution of the parameter of interest.
dist_type : string
The type of distribution to be used. Can be "lognormal" or "uniform" (can expand).
Returns
-------
diff : float
Difference between simulated and target capital to income ratio for this economy.
'''
Economy(LorenzBool = False, ManyStatsBool = False) # Make sure we're not wasting time calculating stuff
Economy.distributeParams(param_name,param_count,center,spread,dist_type) # Distribute parameters
Economy.solve()
diff = Economy.calcKYratioDifference()
print('getKYratioDifference tried center = ' + str(center) + ' and got ' + str(diff))
return diff
def findLorenzDistanceAtTargetKY(Economy,param_name,param_count,center_range,spread,dist_type):
'''
Finds the sum of squared distances between simulated and target Lorenz points in an economy when
a given parameter has heterogeneity according to some distribution. The class of distribution
and a measure of spread are given as inputs, but the measure of centrality such that the capital
to income ratio matches the target ratio must be found.
Parameters
----------
Economy : cstwMPCmarket
An object representing the entire economy, containing the various AgentTypes as an attribute.
param_name : string
The name of the parameter of interest that varies across the population.
param_count : int
The number of different values the parameter of interest will take on.
center_range : [float,float]
Bounding values for a measure of centrality for the distribution of the parameter of interest.
spread : float
A measure of spread or diffusion for the distribution of the parameter of interest.
dist_type : string
The type of distribution to be used. Can be "lognormal" or "uniform" (can expand).
Returns
-------
dist : float
Sum of squared distances between simulated and target Lorenz points for this economy (sqrt).
'''
# Define the function to search for the correct value of center, then find its zero
intermediateObjective = lambda center : getKYratioDifference(Economy = Economy,
param_name = param_name,
param_count = param_count,
center = center,
spread = spread,
dist_type = dist_type)
optimal_center = brentq(intermediateObjective,center_range[0],center_range[1],xtol=10**(-6))
Economy.center_save = optimal_center
# Get the sum of squared Lorenz distances given the correct distribution of the parameter
Economy(LorenzBool = True) # Make sure we actually calculate simulated Lorenz points
Economy.distributeParams(param_name,param_count,optimal_center,spread,dist_type) # Distribute parameters
Economy.solveAgents()
Economy.makeHistory()
dist = Economy.calcLorenzDistance()
Economy(LorenzBool = False)
print ('findLorenzDistanceAtTargetKY tried spread = ' + str(spread) + ' and got ' + str(dist))
return dist
def calcStationaryAgeDstn(LivPrb,terminal_period):
'''
Calculates the steady state proportions of each age given survival probability sequence LivPrb.
Assumes that agents who die are replaced by a newborn agent with t_age=0.
Parameters
----------
LivPrb : [float]
Sequence of survival probabilities in ordinary chronological order. Has length T_cycle.
terminal_period : bool
Indicator for whether a terminal period follows the last period in the cycle (with LivPrb=0).
Returns
-------
AgeDstn : np.array
Stationary distribution of age. Stochastic vector with frequencies of each age.
'''
T = len(LivPrb)
if terminal_period:
MrkvArray = np.zeros((T+1,T+1))
top = T
else:
MrkvArray = np.zeros((T,T))
top = T-1
for t in range(top):
MrkvArray[t,0] = 1.0 - LivPrb[t]
MrkvArray[t,t+1] = LivPrb[t]
MrkvArray[t+1,0] = 1.0
w, v = np.linalg.eig(np.transpose(MrkvArray))
idx = (np.abs(w-1.0)).argmin()
x = v[:,idx].astype(float)
AgeDstn = (x/np.sum(x))
return AgeDstn
def assignRdistribution(type_list,R_list):
'''
Assigns the interest rate values in R_list to the types in type_list. If
there is heterogeneity beyond the interest rate, then the same value is
assigned to consecutive types (that is why the function uses two while loops).
It allows to assign heterogeneity in the interest rate on saving and on debt.
Parameters
----------
type_list : [agent]
The list of types that should be assigned the different values.
R_list : [float] or np.array
List of values to assign to the types.
isRboro : boolean
Assigns the values in R_list to the interest rate for borrowing when
True, to the interest rate on savings when False
Returns
-------
none
'''
R_count = len(R_list)
type_N = len(type_list)/R_count
j = 0
b = 0
while j < len(type_list):
t = 0
while t < type_N:
type_list[j](Rsave = R_list[b])
t += 1
j += 1
b += 1
####################################################################################################
def main():
print("The execution takes about a minute with a RAM of 32GB and a processor of 3.6 GHz on macOS Sierra")
# Set targets for K/Y and the Lorenz curve based on the | |
| Units: C
Args:
value (float): value for IDD Field `Maximum Setpoint Temperature`
Raises:
ValueError: if `value` is not a valid value
Returns:
float: the value of `maximum_setpoint_temperature` or None if not set
"""
return self["Maximum Setpoint Temperature"]
@maximum_setpoint_temperature.setter
def maximum_setpoint_temperature(self, value=None):
"""Corresponds to IDD field `Maximum Setpoint Temperature`"""
self["Maximum Setpoint Temperature"] = value
@property
def minimum_setpoint_temperature(self):
"""field `Minimum Setpoint Temperature`
| Units: C
Args:
value (float): value for IDD Field `Minimum Setpoint Temperature`
Raises:
ValueError: if `value` is not a valid value
Returns:
float: the value of `minimum_setpoint_temperature` or None if not set
"""
return self["Minimum Setpoint Temperature"]
@minimum_setpoint_temperature.setter
def minimum_setpoint_temperature(self, value=None):
"""Corresponds to IDD field `Minimum Setpoint Temperature`"""
self["Minimum Setpoint Temperature"] = value
@property
def setpoint_node_or_nodelist_name(self):
"""field `Setpoint Node or NodeList Name`
| Node(s) at which control variable will be set
Args:
value (str): value for IDD Field `Setpoint Node or NodeList Name`
Raises:
ValueError: if `value` is not a valid value
Returns:
str: the value of `setpoint_node_or_nodelist_name` or None if not set
"""
return self["Setpoint Node or NodeList Name"]
@setpoint_node_or_nodelist_name.setter
def setpoint_node_or_nodelist_name(self, value=None):
"""Corresponds to IDD field `Setpoint Node or NodeList Name`"""
self["Setpoint Node or NodeList Name"] = value
class SetpointManagerCondenserEnteringReset(DataObject):
""" Corresponds to IDD object `SetpointManager:CondenserEnteringReset`
This setpoint manager uses one curve to determine the optimum condenser entering water temperature
for a given timestep and two other curves to place boundary conditions on the setpoint value.
"""
_schema = {'extensible-fields': OrderedDict(),
'fields': OrderedDict([(u'name',
{'name': u'Name',
'pyname': u'name',
'required-field': True,
'autosizable': False,
'autocalculatable': False,
'type': u'alpha'}),
(u'control variable',
{'name': u'Control Variable',
'pyname': u'control_variable',
'default': u'Temperature',
'required-field': True,
'autosizable': False,
'accepted-values': [u'Temperature'],
'autocalculatable': False,
'type': 'alpha'}),
(u'default condenser entering water temperature schedule name',
{'name': u'Default Condenser Entering Water Temperature Schedule Name',
'pyname': u'default_condenser_entering_water_temperature_schedule_name',
'required-field': True,
'autosizable': False,
'autocalculatable': False,
'type': u'object-list'}),
(u'minimum design wetbulb temperature curve name',
{'name': u'Minimum Design Wetbulb Temperature Curve Name',
'pyname': u'minimum_design_wetbulb_temperature_curve_name',
'required-field': True,
'autosizable': False,
'autocalculatable': False,
'type': u'object-list'}),
(u'minimum outside air wetbulb temperature curve name',
{'name': u'Minimum Outside Air Wetbulb Temperature Curve Name',
'pyname': u'minimum_outside_air_wetbulb_temperature_curve_name',
'required-field': True,
'autosizable': False,
'autocalculatable': False,
'type': u'object-list'}),
(u'optimized cond entering water temperature curve name',
{'name': u'Optimized Cond Entering Water Temperature Curve Name',
'pyname': u'optimized_cond_entering_water_temperature_curve_name',
'required-field': True,
'autosizable': False,
'autocalculatable': False,
'type': u'object-list'}),
(u'minimum lift',
{'name': u'Minimum Lift',
'pyname': u'minimum_lift',
'default': 11.1,
'required-field': False,
'autosizable': False,
'autocalculatable': False,
'type': u'real',
'unit': u'deltaC'}),
(u'maximum condenser entering water temperature',
{'name': u'Maximum Condenser Entering Water Temperature',
'pyname': u'maximum_condenser_entering_water_temperature',
'default': 32.0,
'required-field': False,
'autosizable': False,
'autocalculatable': False,
'type': u'real',
'unit': u'C'}),
(u'cooling tower design inlet air wet-bulb temperature',
{'name': u'Cooling Tower Design Inlet Air Wet-Bulb Temperature',
'pyname': u'cooling_tower_design_inlet_air_wetbulb_temperature',
'default': 25.56,
'required-field': False,
'autosizable': False,
'autocalculatable': False,
'type': u'real',
'unit': u'C'}),
(u'setpoint node or nodelist name',
{'name': u'Setpoint Node or NodeList Name',
'pyname': u'setpoint_node_or_nodelist_name',
'required-field': True,
'autosizable': False,
'autocalculatable': False,
'type': u'node'})]),
'format': None,
'group': u'Setpoint Managers',
'min-fields': 10,
'name': u'SetpointManager:CondenserEnteringReset',
'pyname': u'SetpointManagerCondenserEnteringReset',
'required-object': False,
'unique-object': False}
@property
def name(self):
"""field `Name`
Args:
value (str): value for IDD Field `Name`
Raises:
ValueError: if `value` is not a valid value
Returns:
str: the value of `name` or None if not set
"""
return self["Name"]
@name.setter
def name(self, value=None):
"""Corresponds to IDD field `Name`"""
self["Name"] = value
@property
def control_variable(self):
"""field `Control Variable`
| Default value: Temperature
Args:
value (str): value for IDD Field `Control Variable`
Raises:
ValueError: if `value` is not a valid value
Returns:
str: the value of `control_variable` or None if not set
"""
return self["Control Variable"]
@control_variable.setter
def control_variable(self, value="Temperature"):
"""Corresponds to IDD field `Control Variable`"""
self["Control Variable"] = value
@property
def default_condenser_entering_water_temperature_schedule_name(self):
"""field `Default Condenser Entering Water Temperature Schedule Name`
| This scheduled setpoint value is only used in a given timestep if the
| "Optimized" Condenser Entering Temperature does not fall within the prescribed
| boundary conditions.
Args:
value (str): value for IDD Field `Default Condenser Entering Water Temperature Schedule Name`
Raises:
ValueError: if `value` is not a valid value
Returns:
str: the value of `default_condenser_entering_water_temperature_schedule_name` or None if not set
"""
return self[
"Default Condenser Entering Water Temperature Schedule Name"]
@default_condenser_entering_water_temperature_schedule_name.setter
def default_condenser_entering_water_temperature_schedule_name(
self,
value=None):
"""Corresponds to IDD field `Default Condenser Entering Water
Temperature Schedule Name`"""
self[
"Default Condenser Entering Water Temperature Schedule Name"] = value
@property
def minimum_design_wetbulb_temperature_curve_name(self):
"""field `Minimum Design Wetbulb Temperature Curve Name`
Args:
value (str): value for IDD Field `Minimum Design Wetbulb Temperature Curve Name`
Raises:
ValueError: if `value` is not a valid value
Returns:
str: the value of `minimum_design_wetbulb_temperature_curve_name` or None if not set
"""
return self["Minimum Design Wetbulb Temperature Curve Name"]
@minimum_design_wetbulb_temperature_curve_name.setter
def minimum_design_wetbulb_temperature_curve_name(self, value=None):
"""Corresponds to IDD field `Minimum Design Wetbulb Temperature Curve
Name`"""
self["Minimum Design Wetbulb Temperature Curve Name"] = value
@property
def minimum_outside_air_wetbulb_temperature_curve_name(self):
"""field `Minimum Outside Air Wetbulb Temperature Curve Name`
Args:
value (str): value for IDD Field `Minimum Outside Air Wetbulb Temperature Curve Name`
Raises:
ValueError: if `value` is not a valid value
Returns:
str: the value of `minimum_outside_air_wetbulb_temperature_curve_name` or None if not set
"""
return self["Minimum Outside Air Wetbulb Temperature Curve Name"]
@minimum_outside_air_wetbulb_temperature_curve_name.setter
def minimum_outside_air_wetbulb_temperature_curve_name(self, value=None):
"""Corresponds to IDD field `Minimum Outside Air Wetbulb Temperature
Curve Name`"""
self["Minimum Outside Air Wetbulb Temperature Curve Name"] = value
@property
def optimized_cond_entering_water_temperature_curve_name(self):
"""field `Optimized Cond Entering Water Temperature Curve Name`
Args:
value (str): value for IDD Field `Optimized Cond Entering Water Temperature Curve Name`
Raises:
ValueError: if `value` is not a valid value
Returns:
str: the value of `optimized_cond_entering_water_temperature_curve_name` or None if not set
"""
return self["Optimized Cond Entering Water Temperature Curve Name"]
@optimized_cond_entering_water_temperature_curve_name.setter
def optimized_cond_entering_water_temperature_curve_name(self, value=None):
"""Corresponds to IDD field `Optimized Cond Entering Water Temperature
Curve Name`"""
self["Optimized Cond Entering Water Temperature Curve Name"] = value
@property
def minimum_lift(self):
"""field `Minimum Lift`
| Units: deltaC
| Default value: 11.1
Args:
value (float): value for IDD Field `Minimum Lift`
Raises:
ValueError: if `value` is not a valid value
Returns:
float: the value of `minimum_lift` or None if not set
"""
return self["Minimum Lift"]
@minimum_lift.setter
def minimum_lift(self, value=11.1):
"""Corresponds to IDD field `Minimum Lift`"""
self["Minimum Lift"] = value
@property
def maximum_condenser_entering_water_temperature(self):
"""field `Maximum Condenser Entering Water Temperature`
| Units: C
| Default value: 32.0
Args:
value (float): value for IDD Field `Maximum Condenser Entering Water Temperature`
Raises:
ValueError: if `value` is not a valid value
Returns:
float: the value of `maximum_condenser_entering_water_temperature` or None if not set
"""
return self["Maximum Condenser Entering Water Temperature"]
@maximum_condenser_entering_water_temperature.setter
def maximum_condenser_entering_water_temperature(self, value=32.0):
"""Corresponds to IDD field `Maximum Condenser Entering Water
Temperature`"""
self["Maximum Condenser Entering Water Temperature"] = value
@property
def cooling_tower_design_inlet_air_wetbulb_temperature(self):
"""field `Cooling Tower Design Inlet Air Wet-Bulb Temperature`
| Units: C
| Default value: 25.56
Args:
value (float): value for IDD Field `Cooling Tower Design Inlet Air Wet-Bulb Temperature`
Raises:
ValueError: if `value` is not a valid value
Returns:
float: the value of `cooling_tower_design_inlet_air_wetbulb_temperature` or None if not set
"""
return self["Cooling Tower Design Inlet Air Wet-Bulb Temperature"]
@cooling_tower_design_inlet_air_wetbulb_temperature.setter
def cooling_tower_design_inlet_air_wetbulb_temperature(self, value=25.56):
""" Corresponds to IDD field `Cooling Tower Design Inlet Air Wet-Bulb Temperature`
"""
self["Cooling Tower Design Inlet Air Wet-Bulb Temperature"] = value
@property
def setpoint_node_or_nodelist_name(self):
"""field `Setpoint Node or NodeList Name`
| Node(s) at which control variable will be set
Args:
value (str): value for IDD Field `Setpoint Node or NodeList Name`
Raises:
ValueError: if `value` is not a valid value
Returns:
str: the value of `setpoint_node_or_nodelist_name` or None if not set
"""
return self["Setpoint Node or NodeList Name"]
@setpoint_node_or_nodelist_name.setter
def setpoint_node_or_nodelist_name(self, value=None):
"""Corresponds to IDD field `Setpoint Node or NodeList Name`"""
self["Setpoint Node or NodeList Name"] = value
class SetpointManagerCondenserEnteringResetIdeal(DataObject):
""" Corresponds to IDD object `SetpointManager:CondenserEnteringReset:Ideal`
This setpoint manager determine the ideal optimum condenser entering water temperature
setpoint for a given timestep.
"""
_schema = {'extensible-fields': OrderedDict(),
'fields': OrderedDict([(u'name',
{'name': u'Name',
'pyname': u'name',
'required-field': True,
'autosizable': False,
'autocalculatable': False,
'type': u'alpha'}),
(u'control variable',
{'name': u'Control Variable',
'pyname': u'control_variable',
'default': u'Temperature',
'required-field': True,
'autosizable': False,
'accepted-values': [u'Temperature'],
'autocalculatable': False,
'type': 'alpha'}),
(u'minimum lift',
{'name': | |
list, since it has been combined when loading data " % seq_len)
else:
Train_data_keys[seq_len]=(train_featuredata_all)
if seq_len in Train_targets_keys:
raise Exception("Duplicate seq length %i in Train list, since it has been combined when loading data " % seq_len)
else:
Train_targets_keys[seq_len]=train_targets
#processing test data
test_feature_seq = test_feature.reshape(test_feature.shape[0],sequence_length,45)
test_feature_aa = test_feature_seq[:,:,0:20]
test_feature_ss = test_feature_seq[:,:,20:23]
test_feature_sa = test_feature_seq[:,:,23:25]
test_feature_pssm = test_feature_seq[:,:,25:45]
min_pssm=-8
max_pssm=16
test_feature_pssm_normalize = np.empty_like(test_feature_pssm)
test_feature_pssm_normalize[:] = test_feature_pssm
test_feature_pssm_normalize=(test_feature_pssm_normalize-min_pssm)/(max_pssm-min_pssm)
test_featuredata_all = np.concatenate((test_feature_aa,test_feature_ss,test_feature_sa,test_feature_pssm_normalize), axis=2)
test_targets = np.zeros((test_labels.shape[0], 1195 ), dtype=int)
for i in range(0, test_labels.shape[0]):
test_targets[i][int(test_labels[i])] = 1
print("Length: ",seq_len," ---> ",test_featuredata_all.shape[0]," testing seqs")
if test_featuredata_all.shape[0] > 20: # to speed up the training
test_featuredata_all = test_featuredata_all[0:20,:]
test_targets = test_targets[0:20,:]
if seq_len in Test_data_keys:
raise Exception("Duplicate seq length %i in Test list, since it has been combined when loading data " % seq_len)
else:
Test_data_keys[seq_len]=test_featuredata_all
if seq_len in Test_targets_keys:
raise Exception("Duplicate seq length %i in Test list, since it has been combined when loading data " % seq_len)
else:
Test_targets_keys[seq_len]=test_targets
### Re-loading training dataset for global evaluation
Trainlist_data_keys = dict()
Trainlist_targets_keys = dict()
sequence_file=open(train_list,'r').readlines()
for i in range(len(sequence_file)):
if sequence_file[i].find('Length') >0 :
print("Skip line ",sequence_file[i])
continue
pdb_name = sequence_file[i].split('\t')[0]
#print "Processing ",pdb_name
featurefile = feature_dir + '/' + pdb_name + '.fea_aa_ss_sa'
pssmfile = pssm_dir + '/' + pdb_name + '.pssm_fea'
if not os.path.isfile(featurefile):
#print "feature file not exists: ",featurefile, " pass!"
continue
if not os.path.isfile(pssmfile):
#print "pssm feature file not exists: ",pssmfile, " pass!"
continue
featuredata = import_DLS2FSVM(featurefile)
pssmdata = import_DLS2FSVM(pssmfile)
pssm_fea = pssmdata[:,1:]
fea_len = (featuredata.shape[1]-1)//(20+3+2)
train_labels = featuredata[:,0]
train_feature = featuredata[:,1:]
train_feature_seq = train_feature.reshape(fea_len,25)
train_feature_aa = train_feature_seq[:,0:20]
train_feature_ss = train_feature_seq[:,20:23]
train_feature_sa = train_feature_seq[:,23:25]
train_feature_pssm = pssm_fea.reshape(fea_len,20)
min_pssm=-8
max_pssm=16
train_feature_pssm_normalize = np.empty_like(train_feature_pssm)
train_feature_pssm_normalize[:] = train_feature_pssm
train_feature_pssm_normalize=(train_feature_pssm_normalize-min_pssm)/(max_pssm-min_pssm)
featuredata_all_tmp = np.concatenate((train_feature_aa,train_feature_ss,train_feature_sa,train_feature_pssm_normalize), axis=1)
if fea_len <ktop_node: # suppose k-max = ktop_node
fea_len = ktop_node
train_featuredata_all = np.zeros((ktop_node,featuredata_all_tmp.shape[1]))
train_featuredata_all[:featuredata_all_tmp.shape[0],:featuredata_all_tmp.shape[1]] = featuredata_all_tmp
else:
train_featuredata_all = featuredata_all_tmp
#print "train_featuredata_all: ",train_featuredata_all.shape
train_targets = np.zeros((train_labels.shape[0], 1195 ), dtype=int)
for i in range(0, train_labels.shape[0]):
train_targets[i][int(train_labels[i])] = 1
train_featuredata_all=train_featuredata_all.reshape(1,train_featuredata_all.shape[0],train_featuredata_all.shape[1])
if pdb_name in Trainlist_data_keys:
print("Duplicate pdb name %s in Train list " % pdb_name)
else:
Trainlist_data_keys[pdb_name]=train_featuredata_all
if pdb_name in Trainlist_targets_keys:
print("Duplicate pdb name %s in Train list " % pdb_name)
else:
Trainlist_targets_keys[pdb_name]=train_targets
Vallist_data_keys = dict()
Vallist_targets_keys = dict()
sequence_file=open(val_list,'r').readlines()
for i in range(len(sequence_file)):
if sequence_file[i].find('Length') >0 :
print("Skip line ",sequence_file[i])
continue
pdb_name = sequence_file[i].split('\t')[0]
#print "Processing ",pdb_name
featurefile = feature_dir + '/' + pdb_name + '.fea_aa_ss_sa'
pssmfile = pssm_dir + '/' + pdb_name + '.pssm_fea'
if not os.path.isfile(featurefile):
#print "feature file not exists: ",featurefile, " pass!"
continue
if not os.path.isfile(pssmfile):
#print "pssm feature file not exists: ",pssmfile, " pass!"
continue
featuredata = import_DLS2FSVM(featurefile)
pssmdata = import_DLS2FSVM(pssmfile)
pssm_fea = pssmdata[:,1:]
fea_len = (featuredata.shape[1]-1)//(20+3+2)
train_labels = featuredata[:,0]
train_feature = featuredata[:,1:]
train_feature_seq = train_feature.reshape(fea_len,25)
train_feature_aa = train_feature_seq[:,0:20]
train_feature_ss = train_feature_seq[:,20:23]
train_feature_sa = train_feature_seq[:,23:25]
train_feature_pssm = pssm_fea.reshape(fea_len,20)
min_pssm=-8
max_pssm=16
train_feature_pssm_normalize = np.empty_like(train_feature_pssm)
train_feature_pssm_normalize[:] = train_feature_pssm
train_feature_pssm_normalize=(train_feature_pssm_normalize-min_pssm)/(max_pssm-min_pssm)
featuredata_all_tmp = np.concatenate((train_feature_aa,train_feature_ss,train_feature_sa,train_feature_pssm_normalize), axis=1)
if fea_len <ktop_node: # suppose k-max = ktop_node
fea_len = ktop_node
train_featuredata_all = np.zeros((ktop_node,featuredata_all_tmp.shape[1]))
train_featuredata_all[:featuredata_all_tmp.shape[0],:featuredata_all_tmp.shape[1]] = featuredata_all_tmp
else:
train_featuredata_all = featuredata_all_tmp
train_targets = np.zeros((train_labels.shape[0], 1195 ), dtype=int)
for i in range(0, train_labels.shape[0]):
train_targets[i][int(train_labels[i])] = 1
train_featuredata_all=train_featuredata_all.reshape(1,train_featuredata_all.shape[0],train_featuredata_all.shape[1])
if pdb_name in Vallist_data_keys:
print("Duplicate pdb name %s in Val list " % pdb_name)
else:
Vallist_data_keys[pdb_name]=train_featuredata_all
if pdb_name in Vallist_targets_keys:
print("Duplicate pdb name %s in Val list " % pdb_name)
else:
Vallist_targets_keys[pdb_name]=train_targets
Testlist_data_keys = dict()
Testlist_targets_keys = dict()
sequence_file=open(test_list,'r').readlines()
for i in range(len(sequence_file)):
if sequence_file[i].find('Length') >0 :
print("Skip line ",sequence_file[i])
continue
pdb_name = sequence_file[i].split('\t')[0]
#print "Processing ",pdb_name
featurefile = feature_dir + '/' + pdb_name + '.fea_aa_ss_sa'
pssmfile = pssm_dir + '/' + pdb_name + '.pssm_fea'
if not os.path.isfile(featurefile):
#print "feature file not exists: ",featurefile, " pass!"
continue
if not os.path.isfile(pssmfile):
#print "pssm feature file not exists: ",pssmfile, " pass!"
continue
featuredata = import_DLS2FSVM(featurefile)
pssmdata = import_DLS2FSVM(pssmfile) # d1ft8e_ has wrong length, in pdb, it has 57, but in pdb, it has 44, why?
pssm_fea = pssmdata[:,1:]
fea_len = (featuredata.shape[1]-1)//(20+3+2)
#if fea_len < 40: # since kmax right now is ktop_node
# continue
train_labels = featuredata[:,0]
train_feature = featuredata[:,1:]
train_feature_seq = train_feature.reshape(fea_len,25)
train_feature_aa = train_feature_seq[:,0:20]
train_feature_ss = train_feature_seq[:,20:23]
train_feature_sa = train_feature_seq[:,23:25]
train_feature_pssm = pssm_fea.reshape(fea_len,20)
min_pssm=-8
max_pssm=16
train_feature_pssm_normalize = np.empty_like(train_feature_pssm)
train_feature_pssm_normalize[:] = train_feature_pssm
train_feature_pssm_normalize=(train_feature_pssm_normalize-min_pssm)/(max_pssm-min_pssm)
featuredata_all_tmp = np.concatenate((train_feature_aa,train_feature_ss,train_feature_sa,train_feature_pssm_normalize), axis=1)
if fea_len <ktop_node: # suppose k-max = ktop_node
fea_len = ktop_node
train_featuredata_all = np.zeros((ktop_node,featuredata_all_tmp.shape[1]))
train_featuredata_all[:featuredata_all_tmp.shape[0],:featuredata_all_tmp.shape[1]] = featuredata_all_tmp
else:
train_featuredata_all = featuredata_all_tmp
#print "test_featuredata_all: ",train_featuredata_all.shape
train_targets = np.zeros((train_labels.shape[0], 1195 ), dtype=int)
for i in range(0, train_labels.shape[0]):
train_targets[i][int(train_labels[i])] = 1
train_featuredata_all=train_featuredata_all.reshape(1,train_featuredata_all.shape[0],train_featuredata_all.shape[1])
if pdb_name in Testlist_data_keys:
print("Duplicate pdb name %s in Test list " % pdb_name)
else:
Testlist_data_keys[pdb_name]=train_featuredata_all
if pdb_name in Testlist_targets_keys:
print("Duplicate pdb name %s in Test list " % pdb_name)
else:
Testlist_targets_keys[pdb_name]=train_targets
### Define the model
model_out= "%s/model-train-%s.json" % (CV_dir,model_prefix)
model_weight_out = "%s/model-train-weight-%s.h5" % (CV_dir,model_prefix)
model_weight_out_best = "%s/model-train-weight-%s-best-val.h5" % (CV_dir,model_prefix)
if os.path.exists(model_out):
print("######## Loading existing model ",model_out);
# load json and create model
json_file_model = open(model_out, 'r')
loaded_model_json = json_file_model.read()
json_file_model.close()
print("######## Loaded model from disk")
DLS2F_CNN = model_from_json(loaded_model_json, custom_objects={'K_max_pooling1d': K_max_pooling1d})
else:
print("######## Setting initial model");
DLS2F_CNN = DLS2F_construct_withaa_complex_win_filter_layer_opt(win_array,ktop_node,1195,use_bias,hidden_type,nb_filters,nb_layers,opt,hidden_num) # class 284 for class a
if os.path.exists(model_weight_out):
print("######## Loading existing weights ",model_weight_out);
DLS2F_CNN.load_weights(model_weight_out)
DLS2F_CNN.compile(loss="categorical_crossentropy", metrics=['accuracy'], optimizer=opt)
else:
print("######## Setting initial weights");
DLS2F_CNN.compile(loss="categorical_crossentropy", metrics=['accuracy'], optimizer=opt)
train_acc_best = 0
val_acc_best = 0
print('Loading existing val accuracy is %.5f' % (val_acc_best))
for epoch in range(0,epoch_outside):
print("\n############ Running epoch ", epoch)
for key in list(data_all_dict_padding.keys()):
if key <start:
continue
if key > end:
continue
print('### Loading sequence length :', key)
seq_len=key
train_featuredata_all=Train_data_keys[seq_len]
train_targets=Train_targets_keys[seq_len]
test_featuredata_all=Test_data_keys[seq_len]
test_targets=Test_targets_keys[seq_len]
print("Train shape: ",train_featuredata_all.shape, " in outside epoch ", epoch)
print("Test shape: ",test_featuredata_all.shape, " in outside epoch ", epoch)
if two_stream:
#train_featuredata_all = train_featuredata_all.reshape(train_featuredata_all.shape[0],train_featuredata_all.shape[1], train_featuredata_all.shape[2], 1)
#train_featuredata_all = np.asarray([train_featuredata_all[:,:,:20,:], train_featuredata_all[:,:,20:,:]])
#train_featuredata_all = train_featuredata_all.reshape(train_featuredata_all.shape[0],train_featuredata_all.shape[1], train_featuredata_all.shape[2])
DLS2F_CNN.fit([train_featuredata_all[:,:,:20], train_featuredata_all[:, :, 20:]], train_targets,
batch_size=50,nb_epoch=epoch_inside,
validation_data=([test_featuredata_all[:,:,:20], test_featuredata_all[:,:,20:]], test_targets),
verbose=1)
else:
DLS2F_CNN.fit([train_featuredata_all], train_targets, batch_size=50,nb_epoch=epoch_inside, validation_data=([test_featuredata_all], test_targets), verbose=1)
# serialize model to JSON
model_json = DLS2F_CNN.to_json()
print("Saved model to disk")
with open(model_out, "w") as json_file:
json_file.write(model_json)
del train_featuredata_all
del train_targets
del test_featuredata_all
del test_targets
# serialize weights to HDF5
print("Saved weight to disk")
DLS2F_CNN.save_weights(model_weight_out)
#if epoch < epoch_outside*1/3:
if epoch%10 != 0:
continue
corrected_top1=0
corrected_top5=0
corrected_top10=0
corrected_top15=0
corrected_top20=0
sequence_file=open(test_list,'r').readlines()
#pdb_name='d1np7a1'
all_cases=0
corrected=0
for i in range(len(sequence_file)):
if sequence_file[i].find('Length') >0 :
print("Skip line ",sequence_file[i])
continue
pdb_name = sequence_file[i].split('\t')[0]
test_featuredata_all=Testlist_data_keys[pdb_name]
test_targets=Testlist_targets_keys[pdb_name]
if two_stream:
score, accuracy = DLS2F_CNN.evaluate([test_featuredata_all[:,:,:20], test_featuredata_all[:,:,20:]], test_targets, batch_size=10, verbose=0)
else:
score, accuracy = DLS2F_CNN.evaluate([test_featuredata_all], test_targets, batch_size=10, verbose=0)
all_cases +=1
if accuracy == 1:
corrected +=1
if two_stream:
predict_val= DLS2F_CNN.predict([test_featuredata_all[:,:,:20], test_featuredata_all[:,:,20:]])
else:
predict_val= DLS2F_CNN.predict([test_featuredata_all])
top1_prediction=predict_val[0].argsort()[-1:][::-1]
top5_prediction=predict_val[0].argsort()[-5:][::-1]
top10_prediction=predict_val[0].argsort()[-10:][::-1]
top15_prediction=predict_val[0].argsort()[-15:][::-1]
top20_prediction=predict_val[0].argsort()[-20:][::-1]
true_index = test_targets[0].argsort()[-1:][::-1][0]
if true_index in top1_prediction:
corrected_top1 +=1
if true_index in top5_prediction:
corrected_top5 +=1
if true_index in top10_prediction:
corrected_top10 +=1
if true_index in top15_prediction:
corrected_top15 +=1
if true_index in top20_prediction:
corrected_top20 +=1
del test_featuredata_all
del test_targets
test_acc = float(corrected)/all_cases
print('The test accuracy is %.5f' % (test_acc))
top1_acc = float(corrected_top1)/all_cases
top5_acc = float(corrected_top5)/all_cases
top10_acc = float(corrected_top10)/all_cases
top15_acc = float(corrected_top15)/all_cases
top20_acc = float(corrected_top20)/all_cases
print('The top1_acc accuracy2 is %.5f' % (top1_acc))
print('The top5_acc accuracy is %.5f' % (top5_acc))
print('The top10_acc accuracy is %.5f' % (top10_acc))
print('The top15_acc accuracy is %.5f' % (top15_acc))
print('The top20_acc accuracy is %.5f' % (top20_acc))
sequence_file=open(val_list,'r').readlines()
#pdb_name='d1np7a1'
all_cases=0
corrected=0
for i in range(len(sequence_file)):
if sequence_file[i].find('Length') >0 :
#print "Skip line ",sequence_file[i]
continue
pdb_name = sequence_file[i].split('\t')[0]
val_featuredata_all=Vallist_data_keys[pdb_name]
val_targets=Vallist_targets_keys[pdb_name]
if two_stream:
score, accuracy = DLS2F_CNN.evaluate([val_featuredata_all[:,:,:20], val_featuredata_all[:,:,20:]], val_targets, batch_size=10, verbose=0)
else:
score, accuracy = DLS2F_CNN.evaluate([val_featuredata_all], val_targets, batch_size=10, verbose=0)
del val_featuredata_all
del val_targets
all_cases +=1
if accuracy == 1:
corrected +=1
val_acc = float(corrected)/all_cases
if val_acc >= val_acc_best:
val_acc_best = val_acc
test_acc_best = test_acc
test_acc_best_top1=top1_acc
| |
<filename>doc/simple-cli/simple_cli.py
from nessaid_cli.cmd import NessaidCmd
from nessaid_cli.tokens import (
CliToken,
StringToken,
RangedStringToken,
AlternativeStringsToken,
RangedIntToken,
RangedDecimalToken,
BooleanToken,
NullTokenValue
)
from nessaid_cli.utils import (
convert_to_cli_string,
convert_to_python_string
)
# Example 9 Custom token class: Read with example 9
# This is one of the comparatively complex token implementation. For simpler ones please refer
# nessaid_cli/tokens.py
#
# This token is to match strings which are substrings of the previous token
# The previous token is a StringToken which can match any string. This token on attempted to
# match, will check what the previous token was, and prompt the user with the substrings (separated by commas)
# in the parent string token.
class CustomSubstringToken(CliToken):
# The parent_index parameter is the index of the parent string in the match sequence
def __init__(self, name, parent_index, helpstring, cli):
self._parent_index = parent_index
super().__init__(name, helpstring=helpstring, cli=cli)
@property
def completable(self):
# So the CLI will prompt the options and do auto completion
return True
async def get_options(self, cli, s): # noqa
matched_values = cli.get_matched_values() # This will fetch the matched tokens it will be: 'token-test' <EXAMPLE_9_PARENT_STRING>
parent_strings = matched_values[self._parent_index].split(",")
python_strings = []
# Parent strings needs to be converted to Python format, ie to strip quotes and replace escape characters
# '"as df"' will be converted to 'as df', 'a\\n' will be converted to 'a\n'
# and if geuine quotes are there do precautions to preserve them.
# CLI framework hopefully takes care of it for CLI read strings, since we are splitting it here,
# we should take care of them here
for s in parent_strings:
if s.startswith('\\"'):
s = '"' + s
if s.endswith('\\"'):
s += '"'
python_strings.append(convert_to_python_string(s))
# Now convert the substrings back to CLI format for presenting in CLI
substrings = [
convert_to_cli_string(s) for s in python_strings
]
return substrings
async def complete(self, input_str, cli):
options = await self.get_options(cli, input_str)
# The complete function should return a tuple (n, l) where n is the number of completions and l is the list of completions
# if argument s is empty, all options will be returned.
# It can also return (TOO_MANY_COMPLETIONS, []) if n is very large so that the CLI wont clutter, the completion size will come down
# as the user types and limited options can be printed as suggestions
return await CliToken.complete_from_multiple(options, input_str, cli)
async def match(self, input_str, cli):
# match should return either of [MATCH_SUCCESS, MATCH_FAILURE, MATCH_PARTIAL]
options = await self.get_options(cli, input_str)
return await CliToken.match_from_multiple(options, input_str, cli)
async def get_value(self, match_string=None, cli=None):
try:
n, comp = await self.complete(match_string, cli=cli)
if n == 1:
return convert_to_python_string(comp[0])
elif n > 1:
if match_string in comp:
return convert_to_python_string(match_string)
except:
pass
return NullTokenValue
@property
def helpstring(self):
return "A substring of the parent string"
class SimpleCli(NessaidCmd):
r"""
# Global grammars and token definitions
# Token definitions for Example 6: Types
token STRING_TOKEN StringToken(); # Matches any string
token RANGED_STRING_TOKEN RangedStringToken(5, 10); # Matches any string of length (5-10), inclusive
token ALT_STRING_TOKEN AlternativeStringsToken("alt1", "alt2", "alt3"); # Matches any one of "alt1", "alt2", "alt3"
token INT_TOKEN RangedIntToken(0, 100, 20); # Matches an integer from (0-100), shows 20 suggestions
token DECIMAL_TOKEN RangedDecimalToken(-100, 100);
token BOOLEAN_TOKEN BooleanToken(); # Matche true or False, case insensitive
# Token definitions for Example 6: Types : End
# Child grammar definitions for Example 7
child_grammar_1:
"child-grammar-1"
<< print("Matched child grammar 1"); >>
;
child_grammar_2[]:
"child-grammar-2"
<< print("Matched child grammar 2"); >>
;
child_grammar_3[$value1, $value2]:
"child-grammar-3"
<< $value1 = "Changed by child-grammar-3"; >>
;
child_grammar_4[$value1, $value2]:
"child-grammar-4"
<< $value1 = $1; >>
{
"child-grammar-4"
<< $value2 = $1; >>
}
;
# Child grammar definitions for Example 7: End
token EXAMPLE_9_PARENT_STRING StringToken();
token EXAMPLE_9_SUBSTRING CustomSubstringToken(1); # The argument 1 is needed to locate the parent_string token
"""
def __init__(self, *args, **kwargs):
kwargs.update(dict(
show_grammar=True, # NessaidCmd is a subclass of nessaid_cli.cli.NessaidCli
# It auto generates the grammar from class and function docstrings
# and feeds to the base class cli object. This flag will make
# the CLI object to print the generated grammar when starting.
disable_default_hooks=True, # There will be few default commands in base class. We don't want it
use_base_grammar=False, # Don't use grammar definitions used by the base class, NessaidCmd
use_parent_grammar=False, # Don't use grammar definitions used by the base class
match_parent_grammar=False, # If we chain CLIs, there's provision to match and switch to parent CLI context
# if the current input fails in the CLI but can be matched in parent. No need here
))
super().__init__(*args, **kwargs)
def get_token_classes(self):
# This has to be filled if we are using special token classes
return [
StringToken,
RangedStringToken,
AlternativeStringsToken,
RangedIntToken,
RangedDecimalToken,
BooleanToken,
CustomSubstringToken,
]
# Example 1: Constant string tokens
# Note 1: CLI handlers can be defined as async or non async functions
# Note 2: CLI handlers are detected by the presense of the designated prefix, by default 'do_', can be changed if needed
# Note 3: The doc string contains what to be matched for this handler. Once matched, the handler will be called
# Note 4: It's advised to use raw strings for CLI definition docstrings
# Note 5: Under the function name we are defining a section of the grammar. The actual grammar
# will be auto generated and fed to the CLI code (Handled by nessaid_cli.py)
# Here we will implement a hook to match either 'quit' or 'exit' and the handler will stop the CLI
async def do_exit(self):
r"""
# Note: This is a comment in CLI grammar expression.
# Here we are using constant string tokens. The colon after the token value
# is followed by the helpstring, which will be shown in the prompt
"exit": "Exit from the CLI session"
|
"quit": "Exit from the CLI session"
"""
self.exit_loop()
# Example 2: Optional tokens and function calls
# Note 1: Optional token or block is enclosed in { }
# Note 2: The CLI hook has an extra parameter optional_chosen. We will see how that can be set from CLI
# Note 3: Another parameter dummy is added but not used in CLI. By default that will be set to empty string.
# Note 4: The expressions inside << >> are executed according to their position. The blocks 1 and 2 will be executed
# initially. Rest of the blocks will be executed as per their relative positions with tokens
# Note 5: See the CLI getting matched with the following input
# : optional-token mandatory this-is-optional
# : o m t
# : optional-token mandatory
# : o m
async def do_optional(self, optional_chosen, dummy):
r"""
<<
# Set the initial values of the parameters. param in python code is $param here in CLI code
$optional_chosen = False;
# And we are not doing anything with $dummy
>>
<<
# This is to demonstrate multiple execution blocks and function calls. Nothing to do with grammar matching
print("This is to demonstrate inline calls inside grammar. print is one among few functions calls supported.");
call print("The above print was just inline in CLI. This print call will reach the CLI object's function");
# The cli object has a print method defined in utils.py
print("Multiple args:", 1, "2", 3.0); # Processed inline in CLI
call print("Multiple args:", 1, "2", 3.0); # Calls CLI objects method
>>
"optional-token": "Demonstration of optional token"
"mandatory": "This token is mandatory"
{
"this-is-optional"
<< $optional_chosen = True; >>
}
"""
if optional_chosen:
print("Optional token was chosen")
else:
print("Optional token was not chosen")
print("Value of optional_chosen:", type(optional_chosen), optional_chosen)
print("Value of dummy:", type(dummy), dummy)
print("dummy:", dummy)
# Example 3: Optional block
async def do_optional_block(self, outer_opt, inner_opt):
r"""
"optional-block"
{
"outer-optional"
<< $outer_opt = $1; >> # $1 matches the value of first token in the (local) sequence
{
"inner_optional"
<< $inner_opt = $1; >>
}
}
"""
print("outer_opt:", outer_opt)
print("inner_opt:", inner_opt)
# Example 4: Sets
# Note 1: We can | |
= alternatives or [] # type: List[str]
self._regex_text = None # type: Optional[str]
self._regex = None # type: Optional[Pattern]
self._sql_like_fragments = None # type: Optional[List[str]]
# ---------------------------------------------------------------------
# Things we know about psychotropics
# ---------------------------------------------------------------------
if (ssri or non_ssri_modern_antidepressant or
tricyclic_antidepressant or
tetracyclic_and_related_antidepressant or
monoamine_oxidase_inhibitor):
conventional_antidepressant = True
if conventional_antidepressant:
antidepressant = True
if first_generation_antipsychotic or second_generation_antipsychotic:
antipsychotic = True
if benzodiazepine or z_drug:
gaba_a_functional_agonist = True
if ((antidepressant or antipsychotic or stimulant or anticholinergic or
gaba_a_functional_agonist or gaba_b_functional_agonist or
mood_stabilizer) and
(psychotropic is not False)):
psychotropic = True
if psychotropic is None:
psychotropic = False
# ---------------------------------------------------------------------
# Things we know about other drugs
# ---------------------------------------------------------------------
if (sulfonylurea or biguanide or glifozin or glp1_agonist or
dpp4_inhibitor or meglitinide or thiazolidinedione):
antidiabetic = True
if beta_blocker or ace_inhibitor:
cardiovascular = True
# ---------------------------------------------------------------------
# Store category knowledge
# ---------------------------------------------------------------------
self.category_not_drug = category_not_drug
self.psychotropic = psychotropic
self.antidepressant = antidepressant
self.conventional_antidepressant = conventional_antidepressant
self.ssri = ssri
self.non_ssri_modern_antidepressant = non_ssri_modern_antidepressant
self.tricyclic = tricyclic_antidepressant
self.tetracyclic_and_related_antidepressant = tetracyclic_and_related_antidepressant # noqa
self.monoamine_oxidase_inhibitor = monoamine_oxidase_inhibitor
self.antipsychotic = antipsychotic
self.first_generation_antipsychotic = first_generation_antipsychotic
self.second_generation_antipsychotic = second_generation_antipsychotic
self.stimulant = stimulant
self.anticholinergic = anticholinergic
self.benzodiazepine = benzodiazepine
self.z_drug = z_drug
self.gaba_a_functional_agonist = gaba_a_functional_agonist
self.gaba_b_functional_agonist = gaba_b_functional_agonist
self.non_benzodiazepine_anxiolytic = non_benzodiazepine_anxiolytic
self.mood_stabilizer = mood_stabilizer
self.antidiabetic = antidiabetic
self.sulfonylurea = sulfonylurea
self.biguanide = biguanide
self.cardiovascular = cardiovascular
self.beta_blocker = beta_blocker
self.ace_inhibitor = ace_inhibitor
self.statin = statin
self.respiratory = respiratory
self.beta_agonist = beta_agonist
self.gastrointestinal = gastrointestinal
self.proton_pump_inhibitor = proton_pump_inhibitor
self.nonsteroidal_anti_inflammatory = nonsteroidal_anti_inflammatory
self.vitamin = vitamin
# ---------------------------------------------------------------------
# Store other flags
# ---------------------------------------------------------------------
self.slam_antidepressant_finder = slam_antidepressant_finder
@property
def regex_text(self) -> str:
"""
Return regex text (yet to be compiled) for this drug.
"""
if self._regex_text is None:
possibilities = [] # type: List[str]
for p in list(set(self.all_generics + self.alternatives)):
if self.add_preceding_word_boundary and not p.startswith(WB):
p = WB + p
if self.add_preceding_wildcards and not p.startswith(WILDCARD):
p = WILDCARD + p
if self.add_following_wildcards and not p.endswith(WILDCARD):
p = p + WILDCARD
possibilities.append(p)
self._regex_text = "|".join("(?:" + x + ")" for x in possibilities)
return self._regex_text
@property
def regex(self) -> Pattern:
"""
Returns a compiled regex for this drug.
"""
if self._regex is None:
self._regex = re.compile(self.regex_text,
re.IGNORECASE | re.DOTALL)
return self._regex
@staticmethod
def regex_to_sql_like(regex_text: str,
single_wildcard: str = "_",
zero_or_more_wildcard: str = "%") -> List[str]:
"""
Converts regular expression text to a reasonably close fragment
for the SQL ``LIKE`` operator.
NOT PERFECT, but works for current built-in regular expressions.
Args:
regex_text: regular expression text to work with
single_wildcard: SQL single wildcard, typically an underscore
zero_or_more_wildcard: SQL "zero/one/many" wildcard, probably always
a percent symbol
Returns:
string for an SQL string literal
Raises:
:exc:`ValueError` for some regex text that it doesn't understand
properly
"""
def append_to_all(new_content: str) -> None:
nonlocal results
results = [r + new_content for r in results]
def split_and_append(new_options: List[str]) -> None:
nonlocal results
newresults = [] # type: List[str]
for option in new_options:
newresults.extend([r + option for r in results])
results = newresults
def deduplicate_wildcards(text: str) -> str:
while zero_or_more_wildcard + zero_or_more_wildcard in text:
text = text.replace(
zero_or_more_wildcard + zero_or_more_wildcard,
zero_or_more_wildcard)
return text
# Basic processing
working = regex_text # strings are immutable
results = [zero_or_more_wildcard] # start with a wildcard
while working:
if working.startswith(".*"):
# e.g. ".*ozapi"
append_to_all(zero_or_more_wildcard)
working = working[2:]
elif working.startswith("["):
# e.g. "[io]peridol"
close_bracket = working.index("]") # may raise
bracketed = working[1:close_bracket]
option_groups = bracketed.split("|")
options = [c for group in option_groups for c in group]
split_and_append(options)
working = working[close_bracket + 1:]
elif len(working) > 1 and working[1] == "?":
# e.g. "r?azole"
split_and_append(["", working[0]])
# ... regex "optional character"
# ... SQL: some results with a single wildcard, some without
working = working[2:]
elif working.startswith("."):
# single character wildcard
append_to_all(single_wildcard)
working = working[1:]
else:
append_to_all(working[0])
working = working[1:]
append_to_all(zero_or_more_wildcard) # end with a wildcard
# Remove any duplicate (consecutive) % wildcards:
results = [deduplicate_wildcards(r) for r in results]
# Done
return results
@property
def sql_like_fragments(self) -> List[str]:
"""
Returns all the string literals to which a database column should be
compared using the SQL ``LIKE`` operator, to match this drug.
This isn't as accurate as the regex, but ``LIKE`` can do less.
``LIKE`` uses the wildcards ``?`` and ``%``.
"""
if self._sql_like_fragments is None:
self._sql_like_fragments = []
for p in list(set(self.all_generics + self.alternatives)):
self._sql_like_fragments.extend(self.regex_to_sql_like(p))
return self._sql_like_fragments
def name_matches(self, name: str) -> bool:
"""
Detects whether the name that's passed matches our knowledge of any of
things that this drug might be called: generic name, brand name(s),
common misspellings.
The parameter should be pre-stripped of edge whitespace.
"""
return bool(self.regex.match(name))
def sql_column_like_drug(self, column_name: str) -> str:
"""
Returns SQL like
.. code-block:: sql
(column_name LIKE '%drugname1%' OR
column_name LIKE '%drugname2%')
for the drug names that this Drug object knows about.
Args:
column_name: column name, pre-escaped if necessary
Returns:
SQL fragment as above
"""
clauses = [
f"{column_name} LIKE {sql_string_literal(f)}"
for f in self.sql_like_fragments
]
return f"({' OR '.join(clauses)})"
# Source data.
DRUGS = [
# In comments below: (*) misspelling, capitalized for brand name, (~)
# hybrid generic/brand name, (+) old name.
# -------------------------------------------------------------------------
# SSRIs
# -------------------------------------------------------------------------
Drug(
"citalopram",
["Cipramil", "Celexa"],
ssri=True,
slam_antidepressant_finder=True
),
Drug(
"escitalopram",
["Cipralex", "Lexapro"],
ssri=True,
slam_antidepressant_finder=True
),
Drug(
"fluoxetine",
["Prozac", "Bellzac", "Oxactin", "Prozep", "Sarafem", "fluox.*"],
# CPFT 2013: "fluoxetine Dec"
ssri=True,
slam_antidepressant_finder=True
),
Drug(
"fluvoxamine",
["Luvox", "Faverin", "fluvoxamine.*"], # e.g. "fluvoxamine maleate"
ssri=True,
slam_antidepressant_finder=True
),
Drug(
"paroxetine",
["Seroxat", "Paxil"], # there are other brands elsewhere...
ssri=True,
slam_antidepressant_finder=True
),
Drug(
"sertraline",
["Lustral", "Zoloft", "Bellsert"],
# NOT Seretra (cf. SLAM code, see email to self 2016-12-02); Seretra =
# seratrodast = for asthma
ssri=True,
slam_antidepressant_finder=True
),
# -------------------------------------------------------------------------
# FIRST-GENERATION ANTIPSYCHOTICS
# -------------------------------------------------------------------------
Drug("benperidol", ["Anquil"], first_generation_antipsychotic=True),
Drug("chlorpromazine", ["Largactil"], first_generation_antipsychotic=True),
Drug(
"flupentixol",
["Depixol", "Fluanxol", "flupent.*", "Depixol.*"],
# e.g. flupenthixol, flupenthixol decanoate, flupentixol decanoate
first_generation_antipsychotic=True,
antidepressant=True,
slam_antidepressant_finder=True
),
Drug(
"fluphenazine",
["Modecate", "fluphen.*", "Modecate.*"],
first_generation_antipsychotic=True
),
Drug(
"haloperidol",
[
"Haldol", "Serenase",
"hal[io]p.*", "Dozi.*", "Hald.*", "Serena.*",
# NB Serenase, Serenace.
# CPFT 2013: haloperidol, haloperidol decanoate, Haldol, Haldol
# decanoate, Serenase.
],
first_generation_antipsychotic=True
),
Drug("levomepromazine", ["Nozinan"], first_generation_antipsychotic=True),
Drug("pericyazine", first_generation_antipsychotic=True),
Drug("perphenazine", ["Fentazin"], first_generation_antipsychotic=True),
Drug(
["amitriptyline", "perphenazine"],
["Triptafen"], # special
tricyclic_antidepressant=True,
slam_antidepressant_finder=True
),
Drug("pimozide", ["Orap"], first_generation_antipsychotic=True),
Drug(
"pipotiazine",
["pipot.*", "Piport.*"],
# ... actually (CPFT 2013): pipotiazine, Piportil
first_generation_antipsychotic=True
),
Drug(
"prochlorperazine",
["Stemetil"],
first_generation_antipsychotic=True
),
Drug("promazine", first_generation_antipsychotic=True),
Drug(
"sulpiride",
["Dolmatil", "Sulpor"],
first_generation_antipsychotic=True
),
Drug(
"trifluoperazine",
["Stelazine"],
first_generation_antipsychotic=True
),
Drug(
"zuclopenthixol",
["zuclop.*", "Clopix.*", "Acc?uphase"],
# ... actually (CPFT 2013): zuclopenthixol, zuclopenthixol acetate,
# zuclopenthixol decanoate, Clopixol, Clopixol Decanoate, Acuphase
first_generation_antipsychotic=True
),
# -------------------------------------------------------------------------
# SECOND-GENERATION ANTIPSYCHOTICS
# -------------------------------------------------------------------------
Drug(
"amisulpride",
["amisulp.*", "Solian"],
# ... actually (CPFT 2013): amisulpiride(*), amisulpride, Solian
second_generation_antipsychotic=True
),
Drug(
"aripiprazole",
["Abilify", "ari?pr?ipr?azol.*"],
second_generation_antipsychotic=True
),
Drug(
"asenapine",
["Saphris", "Sycrest"],
second_generation_antipsychotic=True
),
Drug(
"clozapine",
["cloz.*", "Denz.*", "Zapon.*"],
# ... actually (CPFT 2013): clozapine, Clozaril, clozepine(*)
second_generation_antipsychotic=True
),
Drug(
"iloperidone",
["Fanapt", "Fanapta", "Zomaril"],
second_generation_antipsychotic=True
),
Drug("lurasidone", ["Latuda"], second_generation_antipsychotic=True),
Drug(
"olanzapine",
["olanz.*", "Zalast.*", "Zyprex.*", "Zypad.*"],
# ... actually (CPFT 2013): olanzapine, olanzapine embonate,
# olanz(*), olanzepine(*), olanzapin(*), Zyprexa
second_generation_antipsychotic=True
),
Drug(
"paliperidone",
["Invega", "Xeplion"],
second_generation_antipsychotic=True
),
Drug(
"quetiapine",
["quet.*", "Seroquel"],
# ... actually (CPFT 2013): quetiapine, quetiepine(*), Seroquel
second_generation_antipsychotic=True
),
Drug(
"risperidone",
["risp.*", "Consta"],
# ... actually (CPFT 2013): risperidone, risperadone(*), Risperidone
# Consta (~), Risperdal, Risperdal Consta
second_generation_antipsychotic=True
),
Drug(
"sertindole",
["Serdolect", "Serlect"],
second_generation_antipsychotic=True
),
Drug("ziprasidone", second_generation_antipsychotic=True),
Drug(
"zotepine", # not in UK
["Nipolept", "Losizopilon", "Lodopin", "Setous"],
second_generation_antipsychotic=True
),
# -------------------------------------------------------------------------
# STIMULANTS
# -------------------------------------------------------------------------
Drug(
"amfetamine",
[".*am[ph|f]etamine.*", "Adderall"],
# ... actually (CPFT 2013): dextroamphetamine(+), dexamfetamine
stimulant=True
),
Drug(
"methylphenidate",
["Ritalin", "Concerta.*", "Equasym.*", "Medikinet.*"],
# ... actually (CPFT 2013): methylphenidate, Ritalin, Concerta
stimulant=True
),
Drug("modafinil", ["Provigil"], stimulant=True),
# -------------------------------------------------------------------------
# ANTICHOLINERGICS
# -------------------------------------------------------------------------
Drug("benztropine", ["benzatropine"], anticholinergic=True),
Drug("orphenadrine", ["Biorphen", "Disipal"], anticholinergic=True),
Drug("procyclidine", ["Arpicolin", "Kemadrin"], anticholinergic=True),
Drug("trihexyphenidyl", ["Broflex"], anticholinergic=True),
# -------------------------------------------------------------------------
# OTHER MODERN ANTIDEPRESSANTS
# -------------------------------------------------------------------------
Drug(
"agomelatine",
["Valdoxan"],
non_ssri_modern_antidepressant=True,
slam_antidepressant_finder=True
),
Drug(
"bupropion",
["Zyban"],
non_ssri_modern_antidepressant=True
# antidepressant license in US, smoking cessation in UK
),
Drug(
"duloxetine",
["Cymbalta", "Yentreve", "duloxat.*"],
non_ssri_modern_antidepressant=True,
slam_antidepressant_finder=True
),
Drug(
"mirtazapine",
["mirtaz.*", "mirtazepine", | |
<gh_stars>1-10
""" AnalysisNode is the base class that all analysis nodes inherit from. """
import logging
import operator
from functools import reduce
from random import random
from time import time
from typing import Tuple, Sequence, List, Dict, Optional
from celery.canvas import Signature
from django.conf import settings
from django.core.cache import cache
from django.db import connection, models
from django.db.models import Value, IntegerField
from django.db.models.aggregates import Count
from django.db.models.deletion import CASCADE, SET_NULL
from django.db.models.query_utils import Q
from django.dispatch import receiver
from django.utils import timezone
from django_dag.models import node_factory, edge_factory
from django_extensions.db.models import TimeStampedModel
from lazy import lazy
from model_utils.managers import InheritanceManager
from analysis.exceptions import NonFatalNodeError, NodeParentErrorsException, NodeConfigurationException, \
NodeParentNotReadyException, NodeNotFoundException, NodeOutOfDateException
from analysis.models.enums import GroupOperation, NodeStatus, NodeColors, NodeErrorSource, AnalysisTemplateType
from analysis.models.models_analysis import Analysis
from analysis.models.nodes.node_counts import get_extra_filters_q, get_node_counts_and_labels_dict
from annotation.annotation_version_querysets import get_variant_queryset_for_annotation_version
from classification.models import Classification, post_delete
from library.database_utils import queryset_to_sql
from library.django_utils import thread_safe_unique_together_get_or_create
from library.log_utils import report_event
from library.utils import format_percent
from snpdb.models import BuiltInFilters, Sample, Variant, VCFFilter, Wiki, Cohort, VariantCollection, \
ProcessingStatus, GenomeBuild, AlleleSource
from snpdb.variant_collection import write_sql_to_variant_collection
from variantgrid.celery import app
def _default_position():
return 10 + random() * 50
class AnalysisNode(node_factory('AnalysisEdge', base_model=TimeStampedModel)):
model = Variant
objects = InheritanceManager()
analysis = models.ForeignKey(Analysis, on_delete=CASCADE)
name = models.TextField(blank=True)
x = models.IntegerField(default=_default_position)
y = models.IntegerField(default=_default_position)
version = models.IntegerField(default=0) # Queryset version
appearance_version = models.IntegerField(default=0)
auto_node_name = models.BooleanField(default=True)
output_node = models.BooleanField(default=False)
hide_node_and_descendants_upon_template_configuration_error = models.BooleanField(default=False)
ready = models.BooleanField(default=True)
valid = models.BooleanField(default=False)
visible = models.BooleanField(default=True)
count = models.IntegerField(null=True, default=None)
errors = models.TextField(null=True)
shadow_color = models.TextField(null=True)
load_seconds = models.FloatField(null=True)
parents_should_cache = models.BooleanField(default=False) # Node suggests parents use a cache
# This is set to node/version you cloned - cleared upon modification
cloned_from = models.ForeignKey('NodeVersion', null=True, on_delete=SET_NULL)
status = models.CharField(max_length=1, choices=NodeStatus.choices, default=NodeStatus.DIRTY)
PARENT_CAP_NOT_SET = -1
min_inputs = 1
max_inputs = 1
uses_parent_queryset = True
disabled = False
UPDATE_TASK = "analysis.tasks.node_update_tasks.update_node_task"
NODE_CACHE_TASK = "analysis.tasks.node_update_tasks.node_cache_task"
WAIT_FOR_CACHE_TASK = "analysis.tasks.node_update_tasks.wait_for_cache_task"
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.appearance_dirty = False
self.ancestor_input_samples_changed = False
self.parents_changed = False
self.queryset_dirty = False
self.update_children = True
def get_subclass(self):
""" Returns the node loaded as a subclass """
return AnalysisNode.objects.get_subclass(pk=self.pk)
def check_still_valid(self):
""" Checks that the node is still there and has the version we expect - or throw exception """
version_qs = AnalysisNode.objects.filter(pk=self.pk).values_list("version", flat=True)
if version_qs:
db_version = version_qs[0]
if db_version > self.version:
raise NodeOutOfDateException()
else:
raise NodeNotFoundException(self.pk)
def _get_cohorts_and_sample_visibility_for_node(self) -> Tuple[Sequence[Cohort], Dict]:
""" Visibility = can see on grid """
return [], {}
@staticmethod
def _get_visible_samples_from_cohort(cohorts, visibility):
samples = set()
for c in cohorts:
for s in c.get_samples():
if visibility.get(s):
samples.add(s)
return sorted(samples)
def _get_model_queryset(self):
self.analysis.check_valid()
return get_variant_queryset_for_annotation_version(self.analysis.annotation_version)
def get_cohorts_and_sample_visibility(self, sort=True) -> Tuple[Sequence[Cohort], Dict]:
""" Returns all node + ancestor cohorts (and visibilities of their samples)
The underlying data for all samples/cohorts/sub-cohorts/trios/pedigrees is Cohorts, so need to know which
to retrieve from DB (and what sample info to extract from packed columns) to filter + show on grid """
cohorts, visibility = self._get_cohorts_and_sample_visibility_for_node()
cohorts = set(cohorts)
if self.has_input():
parents, _ = self.get_parent_subclasses_and_errors()
for parent in parents:
c, v = parent.get_cohorts_and_sample_visibility(sort=False)
cohorts.update(c)
visibility.update(v)
# May have sub-cohorts, so get unique base cohorts
cohorts = {c.get_base_cohort() for c in cohorts}
if sort:
cohorts = sorted(cohorts)
return cohorts, visibility
def get_sample_ids(self) -> List[Sample]:
return [s.pk for s in self.get_samples()]
def get_samples_from_node_only_not_ancestors(self):
cohorts, visibility = self._get_cohorts_and_sample_visibility_for_node()
return self._get_visible_samples_from_cohort(cohorts, visibility)
def _get_proband_sample_for_node(self) -> Optional[Sample]:
""" Sample of the object of a study, if known """
return None
def get_proband_sample(self) -> Optional[Sample]:
""" Sample of the object of a study if known """
proband_samples = set()
if proband_sample := self._get_proband_sample_for_node():
proband_samples.add(proband_sample)
if self.has_input():
parents, _ = self.get_parent_subclasses_and_errors()
for parent in parents:
if parent_proband_sample := parent.get_proband_sample():
proband_samples.add(parent_proband_sample)
proband_sample = None
if len(proband_samples) == 1: # If ambiguous, then just give up
proband_sample = proband_samples.pop()
return proband_sample
def get_samples(self) -> List[Sample]:
""" Return all ancestor samples for a node"""
cohorts, visibility = self.get_cohorts_and_sample_visibility(sort=False)
return self._get_visible_samples_from_cohort(cohorts, visibility)
def get_bams_dict(self):
bams_dict = {}
for sample in self.get_samples():
if sample.bam_file_path:
bams_dict[sample.pk] = sample.bam_file_path
return bams_dict
def get_connection_data(self, parent):
""" Return dict of source_id/target_id for sending as JSON """
return {"source_id": parent.get_css_id(),
"target_id": self.get_css_id()}
def get_rendering_args(self):
return {}
def get_css_id(self):
if self.pk:
css_id = f"analysis-node-{self.pk}"
else:
css_id = None
return css_id
def get_update_task(self):
return Signature(self.UPDATE_TASK, args=(self.pk, self.version), immutable=True)
def get_cache_task_args_objs_set(self, force_cache=False):
""" returns Celery tasks which are called in node_utils.get_analysis_update_task before children are loaded
Uses tasks not signatures so they are hashable in a set to be able to remove dupes """
task_args_objs_set = set()
if self.is_valid() and (force_cache or self.use_cache):
if parent := self.get_unmodified_single_parent_node():
return parent.get_cache_task_args_objs_set(force_cache=force_cache)
node_cache, created = NodeCache.get_or_create_for_node(self)
if created:
task_args_objs_set.add((self.NODE_CACHE_TASK, (self.pk, self.version), node_cache))
else:
# Cache has been launched already, we just need to make sure it's ready, so launch a task
# waiting on it, to be used as a dependency
task_args_objs_set.add((self.WAIT_FOR_CACHE_TASK, (node_cache.pk, ), node_cache))
return task_args_objs_set
def get_parent_subclasses_and_errors(self):
qs = AnalysisNode.objects.filter(children=self.id, children__isnull=False)
parents = list(qs.select_subclasses())
num_parents = len(parents)
errors = []
if self.min_inputs != AnalysisNode.PARENT_CAP_NOT_SET and num_parents < self.min_inputs:
errors.append((NodeErrorSource.CONFIGURATION, f"{num_parents} parents < minimum of {self.min_inputs}"))
elif self.max_inputs != AnalysisNode.PARENT_CAP_NOT_SET and num_parents > self.max_inputs:
errors.append((NodeErrorSource.CONFIGURATION, f"{num_parents} parents > maximum of {self.max_inputs}"))
for parent in parents:
if NodeStatus.is_error(parent.status):
errors.append((NodeErrorSource.PARENT, "Parent has errors"))
break
return parents, errors
def get_parent_subclasses(self):
""" Gets parents, throws an Exception if any errors """
parents, errors = self.get_parent_subclasses_and_errors()
if errors:
AnalysisNode.throw_errors_exception(errors)
return parents
def get_non_empty_parents(self, require_parents_ready=True):
""" Returns non-empty (count > 0) parents.
If require_parents_ready=True, die if parents not ready
Otherwise, return them as we don't know if they're empty or not """
non_empty_parents = []
for p in self.get_parent_subclasses():
if p.is_ready():
if p.count == 0:
continue
elif require_parents_ready:
raise NodeParentNotReadyException(f"Parent {p} is not ready!")
non_empty_parents.append(p)
return non_empty_parents
def get_single_parent(self):
if self.min_inputs != 1:
msg = "get_single_parent() should only be called for single parent nodes"
raise ValueError(msg)
parents, errors = self.get_parent_subclasses_and_errors()
if errors:
errors = AnalysisNode.flatten_errors(errors)
msg = "Parent had errors: " + ', '.join(errors)
raise NonFatalNodeError(msg)
num_parents = len(parents)
if num_parents != 1:
msg = f"get_single_parent() called for node with {num_parents} parents"
raise ValueError(msg)
return parents[0]
def get_single_parent_q(self):
parent = self.get_single_parent()
if parent.is_ready():
if parent.count == 0:
q = self.q_none()
else:
q = parent.get_q()
else:
# This should never happen...
raise ValueError("get_single_parent_q called when single parent not ready!!!")
return q
def _get_annotation_kwargs_for_node(self) -> Dict:
""" Override this method per-node.
Any key/values in here MUST be consistent - as annotation_kwargs from multiple
nodes may be combined in the MergeNode
"""
annotation_kwargs = {}
if self.node_cache:
annotation_kwargs.update(self.node_cache.variant_collection.get_annotation_kwargs())
return annotation_kwargs
def get_annotation_kwargs(self) -> Dict:
""" Passed to Variant QuerySet annotate()
Can be used w/FilteredRelation to force a join to a partition, in which case you need to use
the alias given in annotate. @see https://github.com/SACGF/variantgrid/wiki/Data-Partitioning """
a_kwargs = {}
# Only apply parent annotation kwargs if you actually use their queryset
if self.has_input() and self.uses_parent_queryset:
for parent in self.get_non_empty_parents():
a_kwargs.update(parent.get_annotation_kwargs())
a_kwargs.update(self._get_annotation_kwargs_for_node())
return a_kwargs
@property
def queryset_requires_distinct(self):
if self._queryset_requires_distinct():
return True
if self.has_input() and self.uses_parent_queryset:
for parent in self.get_non_empty_parents():
if parent.queryset_requires_distinct:
return True
return False
def _queryset_requires_distinct(self):
""" Override if you need this - don't do by default as it's slow """
return False
@staticmethod
def q_all():
return Q(pk__isnull=False)
@staticmethod
def q_none():
return ~AnalysisNode.q_all()
def _get_cache_key(self) -> str:
nv = NodeVersion.get(self)
return str(nv.pk)
def get_q(self, disable_cache=False):
""" A Django Q object representing the Variant filters for this node.
This is the method to override in subclasses - not get_queryset() as:
Chains of filters to a reverse foreign key relationship causes
Multiple joins, so use Q objects which are combined at the end
qs = qs.filter(table_1__val=1)
qs = qs.filter(table_2__val=2)
This is not necessarily equal to:
qs.filter(table_1__val=1, table_2__val=2)
@see https://docs.djangoproject.com/en/2/topics/db/queries/#spanning-multi-valued-relationships
"""
# We need this for node counts, and doing a grid query (each page) - and it can take a few secs to generate
# for some nodes (Comp HET / pheno) so cache it
cache_key = self._get_cache_key() + f"q_cache={disable_cache}"
q: Optional[Q] = None
if settings.ANALYSIS_NODE_CACHE_Q: # Disable for unit tests
q = cache.get(cache_key)
if q is None:
if disable_cache is False:
if cache_q := self._get_node_cache_q():
return cache_q
if self.has_input():
q = self.get_parent_q()
if self.modifies_parents():
if node_q := self._get_node_q():
q &= node_q
else:
q = self.q_all()
if node_q := | |
<gh_stars>1-10
# Copyright 2021 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Different model implementation plus a general port for all the models."""
import functools
from typing import Any, Callable, Dict, Mapping, Optional, Sequence, Tuple
import gin
import immutabledict
import jax
import jax.numpy as jnp
from flax import linen as nn
from jax import random
# pylint: disable=unused-import
from conerf import model_utils, modules, types, warping
def filter_sigma(points, sigma, render_opts):
"""Filters the density based on various rendering arguments.
- `dust_threshold` suppresses any sigma values below a threshold.
- `bounding_box` suppresses any sigma values outside of a 3D bounding box.
Args:
points: the input points for each sample.
sigma: the array of sigma values.
render_opts: a dictionary containing any of the options listed above.
Returns:
A filtered sigma density field.
"""
if render_opts is None:
return sigma
# Clamp densities below the set threshold.
if "dust_threshold" in render_opts:
dust_thres = render_opts.get("dust_threshold", 0.0)
sigma = (sigma >= dust_thres).astype(jnp.float32) * sigma
if "bounding_box" in render_opts:
xmin, xmax, ymin, ymax, zmin, zmax = render_opts["bounding_box"]
render_mask = (
(points[..., 0] >= xmin)
& (points[..., 0] <= xmax)
& (points[..., 1] >= ymin)
& (points[..., 1] <= ymax)
& (points[..., 2] >= zmin)
& (points[..., 2] <= zmax)
)
sigma = render_mask.astype(jnp.float32) * sigma
return sigma
@gin.configurable(denylist=["name"])
class NerfModel(nn.Module):
"""Nerf NN Model with both coarse and fine MLPs.
Attributes:
embeddings_dict: a dictionary containing the embeddings of each metadata
key.
use_viewdirs: bool, use viewdirs as a condition.
noise_std: float, std dev of noise added to regularize sigma output.
nerf_trunk_depth: int, the depth of the first part of MLP.
nerf_trunk_width: int, the width of the first part of MLP.
nerf_rgb_branch_depth: int, the depth of the second part of MLP.
nerf_rgb_branch_width: int, the width of the second part of MLP.
nerf_skips: which layers to add skip layers in the NeRF model.
spatial_point_min_deg: min degree of positional encoding for positions.
spatial_point_max_deg: max degree of positional encoding for positions.
hyper_point_min_deg: min degree of positional encoding for hyper points.
hyper_point_max_deg: max degree of positional encoding for hyper points.
viewdir_min_deg: min degree of positional encoding for viewdirs.
viewdir_max_deg: max degree of positional encoding for viewdirs.
alpha_channels: int, the number of alpha_channelss.
rgb_channels: int, the number of rgb_channelss.
activation: the activation function used in the MLP.
sigma_activation: the activation function applied to the sigma density.
near: float, near clip.
far: float, far clip.
num_coarse_samples: int, the number of samples for coarse nerf.
num_fine_samples: int, the number of samples for fine nerf.
use_stratified_sampling: use stratified sampling.
use_white_background: composite rendering on to a white background.
use_linear_disparity: sample linearly in disparity rather than depth.
use_nerf_embed: whether to use the template metadata.
use_alpha_condition: whether to feed the appearance metadata to the alpha
branch.
use_rgb_condition: whether to feed the appearance metadata to the rgb
branch.
use_warp: whether to use the warp field or not.
warp_metadata_config: the config for the warp metadata encoder.
warp_min_deg: min degree of positional encoding for warps.
warp_max_deg: max degree of positional encoding for warps.
"""
embeddings_dict: Mapping[str, Sequence[int]] = gin.REQUIRED
near: float = gin.REQUIRED
far: float = gin.REQUIRED
num_attributes: int = gin.REQUIRED
# NeRF architecture.
use_viewdirs: bool = True
noise_std: Optional[float] = None
nerf_trunk_depth: int = 8
nerf_trunk_width: int = 256
nerf_rgb_branch_depth: int = 1
nerf_rgb_branch_width: int = 128
nerf_skips: Tuple[int] = (4,)
# Mask NeRF architecture
use_attributes_viewdirs: bool = False
mask_noise_std: Optional[float] = None
use_projected_hyper_as_warp: bool = False
use_masking: bool = False
nerf_attributes_trunk_depth: int = 8
nerf_attributes_trunk_width: int = 256
nerf_attributes_branch_depth: int = 1
nerf_attributes_branch_width: int = 128
nerf_attributes_skips: Tuple[int] = (4,)
# NeRF rendering.
num_coarse_samples: int = 196
num_fine_samples: int = 196
use_stratified_sampling: bool = True
use_white_background: bool = False
use_linear_disparity: bool = False
use_sample_at_infinity: bool = True
spatial_point_min_deg: int = 0
spatial_point_max_deg: int = 10
hyper_point_min_deg: int = 0
hyper_point_max_deg: int = 4
viewdir_min_deg: int = 0
viewdir_max_deg: int = 4
use_posenc_identity: bool = True
alpha_channels: int = 1
rgb_channels: int = 3
activation: types.Activation = nn.relu
norm_type: Optional[str] = None
sigma_activation: types.Activation = nn.softplus
# NeRF metadata configs.
use_nerf_embed: bool = False
nerf_embed_cls: Callable[..., nn.Module] = functools.partial(
modules.GLOEmbed, num_dims=8
)
nerf_embed_key: str = "appearance"
use_alpha_condition: bool = False
use_rgb_condition: bool = False
hyper_slice_method: str = "none"
hyper_embed_cls: Callable[..., nn.Module] = functools.partial(
modules.GLOEmbed, num_dims=8
)
hyper_embed_key: str = "appearance"
hyper_use_warp_embed: bool = True
hyper_sheet_mlp_cls: Callable[..., nn.Module] = modules.HyperSheetMLP
hyper_sheet_use_input_points: bool = True
attribute_sheet_mlp_cls: Callable[
..., nn.Module
] = modules.AttributeSheetMLP
# Warp configs.
use_warp: bool = False
warp_field_cls: Callable[..., nn.Module] = warping.SE3Field
warp_embed_cls: Callable[..., nn.Module] = functools.partial(
modules.GLOEmbed, num_dims=8
)
warp_embed_key: str = "warp"
# Attribution configs
decorrelate_hyper_dims: bool = False
use_attribute_conditioning: bool = False
use_attributes_mask_condition: bool = False
mask_with_original_points: bool = False
hyper_point_use_posenc_identity: bool = False
@property
def num_nerf_embeds(self):
return max(self.embeddings_dict[self.nerf_embed_key]) + 1
@property
def num_warp_embeds(self):
return max(self.embeddings_dict[self.warp_embed_key]) + 1
@property
def num_hyper_embeds(self):
return max(self.embeddings_dict[self.hyper_embed_key]) + 1
@property
def nerf_embeds(self):
return jnp.array(self.embeddings_dict[self.nerf_embed_key], jnp.uint32)
@property
def warp_embeds(self):
return jnp.array(self.embeddings_dict[self.warp_embed_key], jnp.uint32)
@property
def hyper_embeds(self):
return jnp.array(
self.embeddings_dict[self.hyper_embed_key], jnp.uint32
)
@property
def has_hyper(self):
"""Whether the model uses a separate hyper embedding."""
return self.hyper_slice_method != "none"
@property
def has_hyper_embed(self):
"""Whether the model uses a separate hyper embedding."""
# If the warp field outputs the hyper coordinates then there is no
# separate hyper embedding.
return self.has_hyper
@property
def has_embeds(self):
return self.has_hyper_embed or self.use_warp or self.use_nerf_embed
@staticmethod
def _encode_embed(embed, embed_fn):
"""Encodes embeddings.
If the channel size 1, it is just a single metadata ID.
If the channel size is 3:
the first channel is the left metadata ID,
the second channel is the right metadata ID,
the last channel is the progression from left to right (between 0
and 1).
Args:
embed: a (*, 1) or (*, 3) array containing metadata.
embed_fn: the embedding function.
Returns:
A (*, C) array containing encoded embeddings.
"""
if embed.shape[-1] == 3:
left, right, progression = jnp.split(embed, 3, axis=-1)
left = embed_fn(left.astype(jnp.uint32))
right = embed_fn(right.astype(jnp.uint32))
return (1.0 - progression) * left + progression * right
else:
return embed_fn(embed)
def encode_hyper_embed(self, metadata):
if self.hyper_slice_method == "axis_aligned_plane":
# return self._encode_embed(metadata[self.hyper_embed_key],
# self.hyper_embed)
if self.hyper_use_warp_embed:
return self._encode_embed(
metadata[self.warp_embed_key], self.warp_embed
)
else:
return self._encode_embed(
metadata[self.hyper_embed_key], self.hyper_embed
)
elif self.hyper_slice_method == "bendy_sheet":
# The bendy sheet shares the metadata of the warp.
if self.hyper_use_warp_embed:
return self._encode_embed(
metadata[self.warp_embed_key], self.warp_embed
)
else:
return self._encode_embed(
metadata[self.hyper_embed_key], self.hyper_embed
)
else:
raise RuntimeError(
f"Unknown hyper slice method {self.hyper_slice_method}."
)
def encode_nerf_embed(self, metadata):
return self._encode_embed(
metadata[self.nerf_embed_key], self.nerf_embed
)
def encode_warp_embed(self, metadata):
return self._encode_embed(
metadata[self.warp_embed_key], self.warp_embed
)
def predict_attributes(self, encoded_hyper):
original_shape = encoded_hyper.shape
return self.attribute_sheet_mlp(
encoded_hyper.reshape((-1, original_shape[-1]))
).reshape((original_shape[0], original_shape[1], self.num_attributes))
def setup(self):
if self.use_nerf_embed and not (
self.use_rgb_condition or self.use_alpha_condition
):
raise ValueError(
"Template metadata is enabled but none of the condition"
"branches are."
)
if self.use_nerf_embed:
self.nerf_embed = self.nerf_embed_cls(
num_embeddings=self.num_nerf_embeds
)
if self.use_warp:
self.warp_embed = self.warp_embed_cls(
num_embeddings=self.num_warp_embeds
)
self.attribute_sheet_mlp = self.attribute_sheet_mlp_cls(
output_channels=self.num_attributes
)
if self.hyper_slice_method == "axis_aligned_plane":
self.hyper_embed = self.hyper_embed_cls(
num_embeddings=self.num_hyper_embeds
)
elif self.hyper_slice_method == "bendy_sheet":
if not self.hyper_use_warp_embed:
self.hyper_embed = self.hyper_embed_cls(
num_embeddings=self.num_hyper_embeds
)
self.hyper_sheet_mlp = self.hyper_sheet_mlp_cls()
if self.use_warp:
self.warp_field = self.warp_field_cls()
norm_layer = modules.get_norm_layer(self.norm_type)
nerf_mlps = {
"coarse": modules.NerfMLP(
trunk_depth=self.nerf_trunk_depth,
trunk_width=self.nerf_trunk_width,
rgb_branch_depth=self.nerf_rgb_branch_depth,
rgb_branch_width=self.nerf_rgb_branch_width,
activation=self.activation,
norm=norm_layer,
skips=self.nerf_skips,
alpha_channels=self.alpha_channels,
rgb_channels=self.rgb_channels,
),
}
if self.use_attribute_conditioning and self.use_masking:
nerf_mlps["coarse_mask"] = modules.MaskNerfMLP(
trunk_depth=self.nerf_attributes_trunk_depth,
trunk_width=self.nerf_attributes_trunk_width,
attribute_branch_depth=self.nerf_attributes_branch_depth,
attribute_branch_width=self.nerf_attributes_branch_width,
activation=self.activation,
norm=norm_layer,
skips=self.nerf_attributes_skips,
attribute_channels=self.num_attributes,
)
if self.num_fine_samples > 0:
nerf_mlps["fine"] = modules.NerfMLP(
trunk_depth=self.nerf_trunk_depth,
trunk_width=self.nerf_trunk_width,
rgb_branch_depth=self.nerf_rgb_branch_depth,
rgb_branch_width=self.nerf_rgb_branch_width,
activation=self.activation,
norm=norm_layer,
skips=self.nerf_skips,
alpha_channels=self.alpha_channels,
rgb_channels=self.rgb_channels,
)
if self.use_attribute_conditioning and self.use_masking:
nerf_mlps["fine_mask"] = modules.MaskNerfMLP(
trunk_depth=self.nerf_attributes_branch_depth,
trunk_width=self.nerf_attributes_branch_width,
attribute_branch_depth=self.nerf_attributes_branch_depth,
attribute_branch_width=self.nerf_attributes_branch_width,
activation=self.activation,
norm=norm_layer,
skips=self.nerf_attributes_skips,
attribute_channels=self.num_attributes,
)
self.nerf_mlps = nerf_mlps
def get_condition_inputs(
self, viewdirs, metadata, metadata_encoded=False, extra_params=None
):
"""Create the condition inputs for the NeRF template."""
extra_params = {} if extra_params is None | |
't_4'), ('v_9', 't_3'), ('v_8', 't_4'), ('c_1', 't_3')},
'>': {('c_1', 'v_5'), ('t_3', 'v_8'), ('t_3', 'v_9'), ('c_1', 't_4'), ('t_2', 'v_5'), ('t_2', 'v_6'),
('t_3', 'v_10'), ('t_2', 'v_7'), ('c_1', 'v_7'), ('t_4', 'v_8'), ('c_1', 'v_6'), ('t_4', 'v_10'),
('c_1', 't_3'), ('t_2', 'v_9'), ('c_1', 'v_8'), ('c_1', 't_2'), ('c_1', 'v_9'), ('t_4', 'v_5'),
('t_4', 'v_6'), ('t_4', 'v_7'), ('t_3', 'v_5'), ('t_3', 'v_7'), ('t_2', 'v_8'), ('t_3', 'v_6'),
('t_2', 'v_10'), ('t_4', 'v_9'), ('c_1', 'v_10')}},
{'DOMAIN': {'v_5', 't_3', 'v_6', 'v_7', 't_4', 't_2', 'v_9', 'v_8', 'c_1', 'v_10'}, 'City': {'c_1'},
'Town': {'t_3', 't_4', 't_2'}, 'Village': {'v_5', 'v_6', 'v_7', 'v_9', 'v_8', 'v_10'},
'Road': {('t_2', 'c_1'), ('t_4', 'c_1'), ('t_3', 'c_1'), ('v_5', 't_2'), ('t_3', 'v_9'), ('c_1', 't_2'),
('c_1', 't_4'), ('v_10', 't_3'), ('t_2', 'v_5'), ('t_4', 'v_6'), ('v_6', 't_4'), ('t_3', 'v_10'),
('v_10', 't_2'), ('t_2', 'v_10'), ('v_7', 't_4'), ('v_9', 't_3'), ('t_4', 'v_7'), ('c_1', 't_3')},
'>': {('c_1', 'v_5'), ('t_3', 'v_8'), ('t_3', 'v_9'), ('c_1', 't_4'), ('t_2', 'v_5'), ('t_2', 'v_6'),
('t_3', 'v_10'), ('t_2', 'v_7'), ('c_1', 'v_7'), ('t_4', 'v_8'), ('c_1', 'v_6'), ('t_4', 'v_10'),
('c_1', 't_3'), ('t_2', 'v_9'), ('c_1', 'v_8'), ('c_1', 't_2'), ('c_1', 'v_9'), ('t_4', 'v_5'),
('t_4', 'v_6'), ('t_4', 'v_7'), ('t_3', 'v_5'), ('t_3', 'v_7'), ('t_2', 'v_8'), ('t_3', 'v_6'),
('t_2', 'v_10'), ('t_4', 'v_9'), ('c_1', 'v_10')}},
{'DOMAIN': {'v_5', 't_3', 'v_6', 'v_7', 't_4', 't_2', 'v_9', 'v_8', 'c_1', 'v_10'}, 'City': {'c_1'},
'Town': {'t_3', 't_4', 't_2'}, 'Village': {'v_5', 'v_6', 'v_7', 'v_9', 'v_8', 'v_10'},
'Road': {('t_2', 'c_1'), ('t_4', 'c_1'), ('t_3', 'c_1'), ('v_5', 't_2'), ('t_3', 'v_9'), ('t_4', 'v_8'),
('c_1', 't_2'), ('c_1', 't_4'), ('v_10', 't_3'), ('t_2', 'v_5'), ('t_4', 'v_6'), ('v_6', 't_4'),
('t_3', 'v_10'), ('v_10', 't_2'), ('t_2', 'v_10'), ('v_9', 't_3'), ('v_8', 't_4'), ('c_1', 't_3')},
'>': {('c_1', 'v_5'), ('t_3', 'v_8'), ('t_3', 'v_9'), ('c_1', 't_4'), ('t_2', 'v_5'), ('t_2', 'v_6'),
('t_3', 'v_10'), ('t_2', 'v_7'), ('c_1', 'v_7'), ('t_4', 'v_8'), ('c_1', 'v_6'), ('t_4', 'v_10'),
('c_1', 't_3'), ('t_2', 'v_9'), ('c_1', 'v_8'), ('c_1', 't_2'), ('c_1', 'v_9'), ('t_4', 'v_5'),
('t_4', 'v_6'), ('t_4', 'v_7'), ('t_3', 'v_5'), ('t_3', 'v_7'), ('t_2', 'v_8'), ('t_3', 'v_6'),
('t_2', 'v_10'), ('t_4', 'v_9'), ('c_1', 'v_10')}},
{'DOMAIN': {'v_5', 't_3', 'v_6', 'v_7', 't_4', 't_2', 'v_9', 'v_8', 'c_1', 'v_10'}, 'City': {'c_1'},
'Town': {'t_3', 't_4', 't_2'}, 'Village': {'v_5', 'v_6', 'v_7', 'v_9', 'v_8', 'v_10'},
'Road': {('t_2', 'c_1'), ('t_4', 'c_1'), ('t_3', 'c_1'), ('v_5', 't_2'), ('t_3', 'v_9'), ('t_4', 'v_8'),
('c_1', 't_2'), ('c_1', 't_4'), ('v_10', 't_3'), ('t_2', 'v_5'), ('t_3', 'v_10'), ('v_10', 't_2'),
('t_2', 'v_10'), ('v_7', 't_4'), ('v_9', 't_3'), ('t_4', 'v_7'), ('c_1', 't_3'), ('v_8', 't_4')},
'>': {('c_1', 'v_5'), ('t_3', 'v_8'), ('t_3', 'v_9'), ('c_1', 't_4'), ('t_2', 'v_5'), ('t_2', 'v_6'),
('t_3', 'v_10'), ('t_2', 'v_7'), ('c_1', 'v_7'), ('t_4', 'v_8'), ('c_1', 'v_6'), ('t_4', 'v_10'),
('c_1', 't_3'), ('t_2', 'v_9'), ('c_1', 'v_8'), ('c_1', 't_2'), ('c_1', 'v_9'), ('t_4', 'v_5'),
('t_4', 'v_6'), ('t_4', 'v_7'), ('t_3', 'v_5'), ('t_3', 'v_7'), ('t_2', 'v_8'), ('t_3', 'v_6'),
('t_2', 'v_10'), ('t_4', 'v_9'), ('c_1', 'v_10')}},
{'DOMAIN': {'v_5', 't_3', 'v_6', 'v_7', 't_4', 't_2', 'v_9', 'v_8', 'c_1', 'v_10'}, 'City': {'c_1'},
'Town': {'t_3', 't_4', 't_2'}, 'Village': {'v_5', 'v_6', 'v_7', 'v_9', 'v_8', 'v_10'},
'Road': {('t_2', 'c_1'), ('t_4', 'c_1'), ('t_3', 'c_1'), ('t_3', 'v_7'), ('v_5', 't_2'), ('t_4', 'v_8'),
('c_1', 't_2'), ('c_1', 't_4'), ('t_2', 'v_5'), ('t_3', 'v_6'), ('v_10', 't_2'), ('t_2', 'v_10'),
('t_4', 'v_10'), ('v_10', 't_4'), ('v_7', 't_3'), ('c_1', 't_3'), ('v_8', 't_4'), ('v_6', 't_3')},
'>': {('c_1', 'v_5'), ('t_3', 'v_8'), ('t_3', 'v_9'), ('c_1', 't_4'), ('t_2', 'v_5'), ('t_2', 'v_6'),
('t_3', 'v_10'), ('t_2', 'v_7'), ('c_1', 'v_7'), ('t_4', 'v_8'), ('c_1', 'v_6'), ('t_4', 'v_10'),
('c_1', 't_3'), ('t_2', 'v_9'), ('c_1', 'v_8'), ('c_1', 't_2'), ('c_1', 'v_9'), ('t_4', 'v_5'),
('t_4', 'v_6'), ('t_4', 'v_7'), ('t_3', 'v_5'), ('t_3', 'v_7'), ('t_2', 'v_8'), ('t_3', 'v_6'),
('t_2', 'v_10'), ('t_4', 'v_9'), ('c_1', 'v_10')}},
{'DOMAIN': {'v_5', 't_3', 'v_6', 'v_7', 't_4', 't_2', 'v_9', 'v_8', 'c_1', 'v_10'}, 'City': {'c_1'},
'Town': {'t_3', 't_4', 't_2'}, 'Village': {'v_5', 'v_6', 'v_7', 'v_9', 'v_8', 'v_10'},
'Road': {('t_2', 'c_1'), ('t_4', 'c_1'), ('t_3', 'c_1'), ('t_3', 'v_7'), ('v_9', 't_4'), ('v_5', 't_2'),
('c_1', 't_2'), ('c_1', 't_4'), ('t_2', 'v_5'), ('t_3', 'v_6'), ('v_10', 't_2'), ('t_2', 'v_10'),
('t_4', 'v_10'), ('v_10', 't_4'), ('v_7', 't_3'), ('c_1', 't_3'), ('t_4', 'v_9'), ('v_6', 't_3')},
'>': {('c_1', 'v_5'), ('t_3', 'v_8'), ('t_3', 'v_9'), ('c_1', 't_4'), ('t_2', 'v_5'), ('t_2', 'v_6'),
('t_3', 'v_10'), ('t_2', 'v_7'), ('c_1', 'v_7'), ('t_4', 'v_8'), ('c_1', 'v_6'), ('t_4', 'v_10'),
('c_1', 't_3'), ('t_2', 'v_9'), ('c_1', 'v_8'), ('c_1', 't_2'), ('c_1', 'v_9'), ('t_4', 'v_5'),
('t_4', 'v_6'), ('t_4', 'v_7'), ('t_3', 'v_5'), ('t_3', 'v_7'), ('t_2', 'v_8'), ('t_3', 'v_6'),
('t_2', 'v_10'), ('t_4', 'v_9'), ('c_1', 'v_10')}},
{'DOMAIN': {'v_5', 't_3', 'v_6', 'v_7', 't_4', 't_2', 'v_9', 'v_8', 'c_1', 'v_10'}, 'City': {'c_1'},
'Town': {'t_3', 't_4', 't_2'}, 'Village': {'v_5', 'v_6', 'v_7', 'v_9', 'v_8', 'v_10'},
'Road': {('t_2', 'c_1'), ('t_4', 'c_1'), ('t_3', 'c_1'), ('t_3', 'v_7'), ('v_9', 't_4'), ('v_5', 't_2'),
('t_4', 'v_8'), ('c_1', 't_2'), ('c_1', 't_4'), ('t_2', 'v_5'), ('t_3', 'v_6'), ('v_10', 't_2'),
('t_2', 'v_10'), ('v_7', 't_3'), ('c_1', 't_3'), ('t_4', 'v_9'), ('v_6', 't_3'), ('v_8', 't_4')},
'>': {('c_1', 'v_5'), ('t_3', 'v_8'), ('t_3', 'v_9'), ('c_1', 't_4'), ('t_2', 'v_5'), ('t_2', 'v_6'),
('t_3', 'v_10'), ('t_2', 'v_7'), ('c_1', 'v_7'), ('t_4', 'v_8'), ('c_1', 'v_6'), ('t_4', 'v_10'),
('c_1', 't_3'), ('t_2', 'v_9'), ('c_1', 'v_8'), ('c_1', 't_2'), ('c_1', 'v_9'), ('t_4', 'v_5'),
('t_4', 'v_6'), ('t_4', 'v_7'), ('t_3', 'v_5'), ('t_3', 'v_7'), ('t_2', 'v_8'), ('t_3', 'v_6'),
('t_2', 'v_10'), ('t_4', 'v_9'), ('c_1', 'v_10')}},
{'DOMAIN': {'v_5', 't_3', 'v_6', 'v_7', 't_4', 't_2', 'v_9', 'v_8', 'c_1', 'v_10'}, 'City': {'c_1'},
'Town': {'t_3', 't_4', 't_2'}, 'Village': {'v_5', 'v_6', 'v_7', 'v_9', 'v_8', 'v_10'},
'Road': {('t_2', 'c_1'), ('t_4', 'c_1'), ('t_3', 'c_1'), ('t_3', 'v_8'), ('v_5', 't_2'), ('c_1', 't_2'),
('c_1', 't_4'), ('t_2', 'v_5'), ('t_3', 'v_6'), ('v_10', 't_2'), ('t_2', 'v_10'), ('t_4', 'v_10'),
('v_10', 't_4'), ('v_7', 't_4'), ('v_8', 't_3'), ('t_4', 'v_7'), ('c_1', 't_3'), ('v_6', 't_3')},
'>': {('c_1', 'v_5'), ('t_3', 'v_8'), ('t_3', 'v_9'), ('c_1', 't_4'), ('t_2', 'v_5'), ('t_2', 'v_6'),
('t_3', 'v_10'), ('t_2', 'v_7'), ('c_1', 'v_7'), ('t_4', 'v_8'), ('c_1', 'v_6'), ('t_4', 'v_10'),
('c_1', 't_3'), ('t_2', 'v_9'), ('c_1', 'v_8'), ('c_1', 't_2'), ('c_1', 'v_9'), ('t_4', 'v_5'),
('t_4', 'v_6'), ('t_4', 'v_7'), ('t_3', 'v_5'), ('t_3', 'v_7'), ('t_2', 'v_8'), ('t_3', 'v_6'),
('t_2', 'v_10'), ('t_4', 'v_9'), ('c_1', 'v_10')}},
{'DOMAIN': {'v_5', 't_3', 'v_6', 'v_7', 't_4', 't_2', 'v_9', 'v_8', 'c_1', 'v_10'}, 'City': {'c_1'},
'Town': {'t_3', 't_4', 't_2'}, 'Village': {'v_5', 'v_6', 'v_7', 'v_9', 'v_8', 'v_10'},
'Road': {('t_2', 'c_1'), ('t_4', 'c_1'), ('t_3', 'c_1'), ('v_9', 't_4'), ('t_3', 'v_8'), ('v_5', 't_2'),
('c_1', 't_2'), ('c_1', 't_4'), ('t_2', 'v_5'), ('t_3', 'v_6'), ('v_10', 't_2'), ('t_2', 'v_10'),
('t_4', 'v_10'), ('v_10', 't_4'), ('v_8', 't_3'), ('c_1', 't_3'), ('t_4', 'v_9'), ('v_6', 't_3')},
'>': {('c_1', 'v_5'), ('t_3', 'v_8'), ('t_3', 'v_9'), ('c_1', 't_4'), ('t_2', 'v_5'), ('t_2', 'v_6'),
('t_3', 'v_10'), ('t_2', 'v_7'), ('c_1', 'v_7'), ('t_4', 'v_8'), ('c_1', 'v_6'), ('t_4', 'v_10'),
('c_1', 't_3'), ('t_2', 'v_9'), ('c_1', 'v_8'), ('c_1', 't_2'), ('c_1', 'v_9'), ('t_4', 'v_5'),
('t_4', 'v_6'), ('t_4', 'v_7'), ('t_3', 'v_5'), ('t_3', 'v_7'), ('t_2', 'v_8'), ('t_3', 'v_6'),
('t_2', 'v_10'), ('t_4', 'v_9'), ('c_1', 'v_10')}},
{'DOMAIN': {'v_5', 't_3', 'v_6', 'v_7', 't_4', 't_2', 'v_9', 'v_8', 'c_1', 'v_10'}, 'City': {'c_1'},
'Town': {'t_3', 't_4', 't_2'}, 'Village': {'v_5', 'v_6', 'v_7', 'v_9', 'v_8', 'v_10'},
'Road': {('t_2', 'c_1'), ('t_4', 'c_1'), ('t_3', 'c_1'), ('v_9', 't_4'), ('t_3', 'v_8'), ('v_5', 't_2'),
('c_1', 't_2'), ('c_1', 't_4'), ('t_2', 'v_5'), ('t_3', 'v_6'), ('v_10', 't_2'), ('t_2', 'v_10'),
('v_7', 't_4'), ('v_8', 't_3'), ('t_4', 'v_7'), ('c_1', 't_3'), ('t_4', 'v_9'), ('v_6', 't_3')},
'>': {('c_1', 'v_5'), ('t_3', 'v_8'), ('t_3', 'v_9'), ('c_1', 't_4'), ('t_2', 'v_5'), ('t_2', 'v_6'),
('t_3', 'v_10'), ('t_2', 'v_7'), ('c_1', 'v_7'), ('t_4', 'v_8'), ('c_1', 'v_6'), ('t_4', 'v_10'),
('c_1', 't_3'), ('t_2', 'v_9'), ('c_1', 'v_8'), ('c_1', 't_2'), ('c_1', 'v_9'), ('t_4', 'v_5'),
('t_4', 'v_6'), ('t_4', 'v_7'), ('t_3', 'v_5'), ('t_3', 'v_7'), ('t_2', 'v_8'), ('t_3', 'v_6'),
('t_2', 'v_10'), ('t_4', 'v_9'), ('c_1', 'v_10')}},
{'DOMAIN': {'v_5', 't_3', 'v_6', 'v_7', 't_4', 't_2', 'v_9', 'v_8', 'c_1', 'v_10'}, 'City': {'c_1'},
'Town': {'t_3', 't_4', 't_2'}, 'Village': {'v_5', 'v_6', 'v_7', 'v_9', 'v_8', 'v_10'},
'Road': {('t_2', 'c_1'), ('t_4', 'c_1'), ('t_3', 'c_1'), ('v_5', 't_2'), ('t_3', 'v_9'), ('c_1', 't_2'),
('c_1', 't_4'), ('t_2', 'v_5'), ('t_3', 'v_6'), ('v_10', 't_2'), ('t_2', 'v_10'), ('t_4', 'v_10'),
('v_10', 't_4'), ('v_7', 't_4'), ('v_9', 't_3'), ('t_4', 'v_7'), ('c_1', 't_3'), ('v_6', 't_3')},
'>': {('c_1', 'v_5'), ('t_3', 'v_8'), ('t_3', 'v_9'), ('c_1', 't_4'), ('t_2', 'v_5'), ('t_2', 'v_6'),
('t_3', 'v_10'), ('t_2', 'v_7'), ('c_1', 'v_7'), ('t_4', 'v_8'), ('c_1', 'v_6'), ('t_4', 'v_10'),
('c_1', 't_3'), ('t_2', 'v_9'), ('c_1', 'v_8'), ('c_1', 't_2'), ('c_1', 'v_9'), ('t_4', 'v_5'),
('t_4', 'v_6'), ('t_4', 'v_7'), ('t_3', 'v_5'), ('t_3', 'v_7'), ('t_2', 'v_8'), ('t_3', 'v_6'),
('t_2', 'v_10'), ('t_4', 'v_9'), ('c_1', 'v_10')}},
{'DOMAIN': {'v_5', 't_3', 'v_6', 'v_7', 't_4', 't_2', 'v_9', 'v_8', 'c_1', 'v_10'}, 'City': {'c_1'},
'Town': {'t_3', 't_4', 't_2'}, 'Village': {'v_5', 'v_6', 'v_7', 'v_9', 'v_8', 'v_10'},
'Road': {('t_2', 'c_1'), ('t_4', 'c_1'), ('t_3', 'c_1'), ('v_5', 't_2'), ('t_3', 'v_9'), ('t_4', 'v_8'),
('c_1', 't_2'), ('c_1', 't_4'), ('t_2', 'v_5'), ('t_3', 'v_6'), ('v_10', 't_2'), ('t_2', 'v_10'),
('t_4', 'v_10'), ('v_10', 't_4'), ('v_9', 't_3'), ('v_8', 't_4'), ('c_1', | |
length) - 1
return i
def _getuint(self):
"""Return data as an unsigned int."""
return self._readuint(self.len, 0)
def _setint(self, int_, length=None):
"""Reset the bitstring to have given signed int interpretation."""
# If no length given, and we've previously been given a length, use it.
if length is None and hasattr(self, 'len') and self.len != 0:
length = self.len
if length is None or length == 0:
raise CreationError("A non-zero length must be specified with an int initialiser.")
if int_ >= (1 << (length - 1)) or int_ < -(1 << (length - 1)):
raise CreationError("{0} is too large a signed integer for a bitstring of length {1}. "
"The allowed range is [{2}, {3}].", int_, length, -(1 << (length - 1)),
(1 << (length - 1)) - 1)
if int_ >= 0:
self._setuint(int_, length)
return
# TODO: We should decide whether to just use the _setuint, or to do the bit flipping,
# based upon which will be quicker. If the -ive number is less than half the maximum
# possible then it's probably quicker to do the bit flipping...
# Do the 2's complement thing. Add one, set to minus number, then flip bits.
int_ += 1
self._setuint(-int_, length)
self._invert_all()
def _readint(self, length, start):
"""Read bits and interpret as a signed int"""
ui = self._readuint(length, start)
if not ui >> (length - 1):
# Top bit not set, number is positive
return ui
# Top bit is set, so number is negative
tmp = (~(ui - 1)) & ((1 << length) - 1)
return -tmp
def _getint(self):
"""Return data as a two's complement signed int."""
return self._readint(self.len, 0)
def _setuintbe(self, uintbe, length=None):
"""Set the bitstring to a big-endian unsigned int interpretation."""
if length is not None and length % 8 != 0:
raise CreationError("Big-endian integers must be whole-byte. "
"Length = {0} bits.", length)
self._setuint(uintbe, length)
def _readuintbe(self, length, start):
"""Read bits and interpret as a big-endian unsigned int."""
if length % 8:
raise InterpretError("Big-endian integers must be whole-byte. "
"Length = {0} bits.", length)
return self._readuint(length, start)
def _getuintbe(self):
"""Return data as a big-endian two's complement unsigned int."""
return self._readuintbe(self.len, 0)
def _setintbe(self, intbe, length=None):
"""Set bitstring to a big-endian signed int interpretation."""
if length is not None and length % 8 != 0:
raise CreationError("Big-endian integers must be whole-byte. "
"Length = {0} bits.", length)
self._setint(intbe, length)
def _readintbe(self, length, start):
"""Read bits and interpret as a big-endian signed int."""
if length % 8:
raise InterpretError("Big-endian integers must be whole-byte. "
"Length = {0} bits.", length)
return self._readint(length, start)
def _getintbe(self):
"""Return data as a big-endian two's complement signed int."""
return self._readintbe(self.len, 0)
def _setuintle(self, uintle, length=None):
if length is not None and length % 8 != 0:
raise CreationError("Little-endian integers must be whole-byte. "
"Length = {0} bits.", length)
self._setuint(uintle, length)
self._reversebytes(0, self.len)
def _readuintle(self, length, start):
"""Read bits and interpret as a little-endian unsigned int."""
if length % 8:
raise InterpretError("Little-endian integers must be whole-byte. "
"Length = {0} bits.", length)
assert start + length <= self.len
absolute_pos = start + self._offset
startbyte, offset = divmod(absolute_pos, 8)
val = 0
if not offset:
endbyte = (absolute_pos + length - 1) // 8
chunksize = 4 # for 'L' format
while endbyte - chunksize + 1 >= startbyte:
val <<= 8 * chunksize
val += struct.unpack('<L', bytes(self._datastore.getbyteslice(endbyte + 1 - chunksize, endbyte + 1)))[0]
endbyte -= chunksize
for b in xrange(endbyte, startbyte - 1, -1):
val <<= 8
val += self._datastore.getbyte(b)
else:
data = self._slice(start, start + length)
assert data.len % 8 == 0
data._reversebytes(0, self.len)
for b in bytearray(data.bytes):
val <<= 8
val += b
return val
def _getuintle(self):
return self._readuintle(self.len, 0)
def _setintle(self, intle, length=None):
if length is not None and length % 8 != 0:
raise CreationError("Little-endian integers must be whole-byte. "
"Length = {0} bits.", length)
self._setint(intle, length)
self._reversebytes(0, self.len)
def _readintle(self, length, start):
"""Read bits and interpret as a little-endian signed int."""
ui = self._readuintle(length, start)
if not ui >> (length - 1):
# Top bit not set, number is positive
return ui
# Top bit is set, so number is negative
tmp = (~(ui - 1)) & ((1 << length) - 1)
return -tmp
def _getintle(self):
return self._readintle(self.len, 0)
def _setfloat(self, f, length=None):
# If no length given, and we've previously been given a length, use it.
if length is None and hasattr(self, 'len') and self.len != 0:
length = self.len
if length is None or length == 0:
raise CreationError("A non-zero length must be specified with a "
"float initialiser.")
if length == 32:
b = struct.pack('>f', f)
elif length == 64:
b = struct.pack('>d', f)
else:
raise CreationError("floats can only be 32 or 64 bits long, "
"not {0} bits", length)
self._setbytes_unsafe(bytearray(b), length, 0)
def _readfloat(self, length, start):
"""Read bits and interpret as a float."""
if not (start + self._offset) % 8:
startbyte = (start + self._offset) // 8
if length == 32:
f, = struct.unpack('>f', bytes(self._datastore.getbyteslice(startbyte, startbyte + 4)))
elif length == 64:
f, = struct.unpack('>d', bytes(self._datastore.getbyteslice(startbyte, startbyte + 8)))
else:
if length == 32:
f, = struct.unpack('>f', self._readbytes(32, start))
elif length == 64:
f, = struct.unpack('>d', self._readbytes(64, start))
try:
return f
except NameError:
raise InterpretError("floats can only be 32 or 64 bits long, not {0} bits", length)
def _getfloat(self):
"""Interpret the whole bitstring as a float."""
return self._readfloat(self.len, 0)
def _setfloatle(self, f, length=None):
# If no length given, and we've previously been given a length, use it.
if length is None and hasattr(self, 'len') and self.len != 0:
length = self.len
if length is None or length == 0:
raise CreationError("A non-zero length must be specified with a "
"float initialiser.")
if length == 32:
b = struct.pack('<f', f)
elif length == 64:
b = struct.pack('<d', f)
else:
raise CreationError("floats can only be 32 or 64 bits long, "
"not {0} bits", length)
self._setbytes_unsafe(bytearray(b), length, 0)
def _readfloatle(self, length, start):
"""Read bits and interpret as a little-endian float."""
startbyte, offset = divmod(start + self._offset, 8)
if not offset:
if length == 32:
f, = struct.unpack('<f', bytes(self._datastore.getbyteslice(startbyte, startbyte + 4)))
elif length == 64:
f, = struct.unpack('<d', bytes(self._datastore.getbyteslice(startbyte, startbyte + 8)))
else:
if length == 32:
f, = struct.unpack('<f', self._readbytes(32, start))
elif length == 64:
f, = struct.unpack('<d', self._readbytes(64, start))
try:
return f
except NameError:
raise InterpretError("floats can only be 32 or 64 bits long, "
"not {0} bits", length)
def _getfloatle(self):
"""Interpret the whole bitstring as a little-endian float."""
return self._readfloatle(self.len, 0)
def _setue(self, i):
"""Initialise bitstring with unsigned exponential-Golomb code for integer i.
Raises CreationError if i < 0.
"""
if i < 0:
raise CreationError("Cannot use negative initialiser for unsigned "
"exponential-Golomb.")
if not i:
self._setbin_unsafe('1')
return
tmp = i + 1
leadingzeros = -1
while tmp > 0:
tmp >>= 1
leadingzeros += 1
remainingpart = i + 1 - (1 << leadingzeros)
binstring = '0' * leadingzeros + '1' + Bits(uint=remainingpart,
length=leadingzeros).bin
self._setbin_unsafe(binstring)
def _readue(self, pos):
"""Return interpretation of next bits as unsigned exponential-Golomb code.
Raises ReadError if the end of the bitstring is encountered while
reading the code.
"""
oldpos = pos
try:
while not self[pos]:
pos += 1
except IndexError:
raise ReadError("Read off end of bitstring trying to read code.")
leadingzeros = pos - oldpos
codenum = (1 << leadingzeros) - 1
if leadingzeros > 0:
if pos + leadingzeros + 1 > self.len:
raise ReadError("Read off end of bitstring trying to read code.")
codenum += self._readuint(leadingzeros, pos + 1)
pos += leadingzeros + 1
else:
assert codenum == 0
pos += 1
return codenum, pos
def _getue(self):
"""Return data as unsigned exponential-Golomb code.
Raises InterpretError if bitstring is not a single exponential-Golomb code.
"""
try:
value, newpos = self._readue(0)
if value is None or newpos != self.len:
raise ReadError
except | |
<reponame>CoeJoder/spawningtool<filename>spawningtool/coop_constants.py
"""
spawningtool.coop_constants
~~~~~~~~~~~~~~~~~~~~~~~~~~~
Co-op uses Blizzard time like HotS (16 FPS), so it's easier to base the data off of that.
Co-op also uses the LotV launch chronoboost that stays continuously on one building
"""
from spawningtool.hots_constants import *
BO_EXCLUDED = BO_EXCLUDED.copy()
BO_EXCLUDED.update([
'Scarab',
# Raynor
'HyperionAdvancedPointDefenseDrone',
'HyperionVoidCoop', # summoned
'DuskWing', # summoned
'SpiderMine',
# Kerrigan
'KerriganReviveCocoon',
'KerriganVoidCoopEconDrop1',
'KerriganVoidCoopEconDrop2',
'KerriganVoidCoopEconDrop3',
'KerriganVoidCoopEconDrop4',
'KerriganVoidCoopEconDrop5',
'KerriganVoidCoopEconDropLT1',
'NydusNetworkAlly',
'NydusCanalAlly',
'GreaterNydusWormAlly',
'BroodLordCocoon',
# Artanis
'SOAPylonPowerAllyUnit',
'SOAPylonPowerUnit',
# Swann
'VoidCoopARES', # Calldown
# Zagara
'ZagaraReviveCocoon',
'HunterKillerBurrowed', # from Spawn Hunter Killers ability
'HotSSplitterlingBig', # Splitter probably from auto-spawn
'HotSSplitterlingMedium', # Splitter Baneling Spawn
# Vorazun
'VorazunShadowGuard', # calldown
'OracleStasisTrap', # Oracle - Stasis Ward
# Karax
'CarrierRepairDrone',
'SOAThermalLanceTargeter',
'SOAPurifierBeamUnit',
# Abathur
'BiomassPickup',
'ToxicNest',
'LocustFlying',
'BrutaliskPlacement', # Deep Tunnel
'AbathurSymbioteBrutalisk', # paired with building Brutalisk
'AbathurSymbioteLeviathan', # paired with building Leviathan
'ImpalerAbathurPlacement', # Deep Tunnel for Swarm Hosts
'ParasiticBombDummy',
# Alarak
'AlarakReviveBeacon',
'VoidRayTaldarim', # Destroyers that spawn with the Mothership
'AlarakSupplicantWarpTrainDummy', # actual supplicants show up as well
'AlarakSupplicantWarpTrainCreator',
# Nova
'NovaBoombot',
'NovaCoopDecoy',
'NovaReviveBeacon',
'SpiderMineBurrowed',
'HealingDrone',
'Marine_BlackOps',
'Ghost_BlackOps',
'GhostFemale_BlackOps',
'Liberator_BlackOps',
'Raven_BlackOps',
'Goliath_BlackOps',
'SiegeTank_BlackOps',
'HellbatBlackOps',
'Marauder_BlackOps',
'Banshee_BlackOps',
'NovaGriffinBombingRunStrafer',
# Stukov
# More accurate to track when cocoons started
'SISCV',
'SIOverlord',
'SICocoonInfestedCivilian',
'SIInfestedTrooper',
'SIInfestedCivilian',
'InfestedCivilianPlaceholder',
'StukovInfestBroodling',
'SIInfestedMarine',
'StukovInfestedDiamondBack',
'StukovInfestedSiegeTankUprooted',
'StukovInfestedSiegeTankDeepTunnelPlacementUnit',
'SILiberator',
'StukovInfestedBanshee',
'SIQueen',
'StukovApocalisk', # Calldown
'SIVolatileInfested', # not sure what this is but couldn't see it in-game
'StukovAleksander', # Calldown
'ALEKSANDERCRASH_PLACEHOLDER', # Calldown
'SNARE_PLACEHOLDER',
# Fenix
# Fenix Hero Units
'FenixCoop',
'FenixDragoon',
'FenixArbiter',
# Purifier Conclave
'FenixAdeptShade',
'FenixTalisAdeptPhaseShift',
'FenixChampionTaldarinImmortal',
'FenixChampionWarbringerColossus',
'FenixClolarionInterceptor',
'FenixClolarionBomber',
# Dehaka
'EssencePickup',
'DehakaCoopReviveCocoonFootPrint',
'DehakaLocust',
'LocustMPPrecursor',
'DehakaNydusDestroyerTimedNoFood',
'DehakaPlacement',
'NydusDestroyerDeepTunnelPlacement',
'GreaterNydusDestroyerDeepTunnelPlacement',
'DehakaCreeperFlying',
# Calldowns
'DehakaGlevig',
'DehakaGlevigDeepTunnelPlacement',
'CoopDehakaGlevigEggHydralisk',
'CoopDehakaGlevigEggRoach',
'CoopDehakaGlevigEggZergling',
'DehakaMurvar',
'DehakaDakrun',
# using cocoons instead
'DehakaDrone',
'DehakaZerglingLevel2',
'DehakaRoachLevel2',
'DehakaHydraliskLevel2',
'DehakaSwarmHost',
'DehakaUltraliskLevel2',
'DehakaUltraliskLevel3',
'DehakaPrimalSwarmHost',
# Han and Horner
'HHScrapPickup',
'HHMagneticMine_SpawnerUnit',
'HHMagneticMinePrep',
'HHD8SingleCluster',
'HHD8ClusterBomb',
'HHD8CenterCluster',
'HHD8CenterClusterUpgrade',
'HHWraith',
'HHVikingFighter',
'HHRaven',
'HHBattlecruiser',
'HHBomber', # calldown
'HHMercenarySpaceStation',
'HHGriffon',
'HornerAirFleetTargeter',
'HornerAirFleetStrafer',
# Tychus
'TychusResearchCenterUnlocked',
'TychusMedicTransportUnit',
'TychusWarhoundAutoTurret',
'TychusOdinPrecursor',
'TychusOdin',
'TychusMarauderHealingWard',
# Zeratul
'ZeratulCoopReviveBeacon',
'AutomatedAssimilatorZeratul', # ignore because they are built automatically
'ProphecyArtifactHintUnit',
'ZeratulTransportVoidSeeker',
'ZeratulArtifactPickup2',
'ZeratulSummonVoidRay',
'ZeratulDarkArchon',
'ZeratulDisruptorPhased',
'ZeratulArtifactTier3',
'ZeratulArtifactPickup3',
'ZeratulXelNagaConstruct',
'ZeratulSuppressionCrystal',
'ZeratulSummonZealot',
'ZeratulXelNagaConstructCyan',
'ZeratulKhaydarinMonolith',
'ZeratulStalkerGhost',
'ZeratulKhaydarinMonolithProjection',
'ZeratulPhotonCannonProjection',
'ZeratulXelNagaChargedCrystalCyan',
])
BO_CHANGED_EXCLUDED = BO_CHANGED_EXCLUDED.copy()
BO_CHANGED_EXCLUDED.update([
# speculative
'HHWidowMine',
'HHVikingAssault',
'HHVikingFighter',
'HHViking',
'HotsRaptor',
'HotSSwarmling',
'HHReaper',
])
BO_UPGRADES_EXCLUDED = BO_UPGRADES_EXCLUDED.copy()
BO_UPGRADES_EXCLUDED.update([
'SprayTerran',
'SprayProtoss',
'SprayZerg',
# Co-op
'GameTimeGreaterthan5Seconds',
'NydusNetworkCoopAllyLeft',
# Vorazun
'MasteryVorazunTimeStopHasteModifyPlayer',
# Dehaka
'DehakaCoopStage2',
'DehakaColossusLegs',
'DehakaCoopStage3',
'DehakaAirAttackUpgrade',
# Fenix
'FenixNetworkedSuperiorityZealot',
'FenixNetworkedSuperiorityAdept',
'FenixNetworkedSuperiorityImmortal',
'FenixNetworkedSuperiorityColossus',
'FenixNetworkedSuperiorityScout',
'FenixNetworkedSuperiorityCarrier',
# Tychus
'TychusACOwned',
'TychusReaperOwned',
'TychusWarhoundOwned',
'TychusFirebatOwned',
'TychusHERCOwned',
'TychusMarauderOwned',
'TychusGhostOwned',
'TychusSpectreOwned',
'TychusMedicOwned',
'TychusSquadAttackSpeedWithMastery',
'TychusSquadHealthMastery',
'TychusHeroMaxed', # not sure what this is, but with subsequent heros
# Zeratul
'ZeratulArtifactTier1',
'ZeratulTopBarZealotSquad',
'ZeratulTopBarVoidRaySquad',
'ZeratulWeaponsLevel1',
'ZeratulArmorsLevel1',
'ZeratulShieldsLevel1',
'ZeratulArtifactTier1_CyberneticsCore',
'ZeratulArtifactTier1_DarkShine',
'ZeratulArtifactTier1_RoboticsBay',
'ZeratulArtifactTier2',
'ZeratulTopBarZealotSquad',
'ZeratulTopBarVoidRaySquad',
'ZeratulWeaponsLevel2',
'ZeratulArmorsLevel2',
'ZeratulShieldsLevel2',
'ZeratulArtifactTier2_DarkShine',
'ZeratulArtifactUpgradeTier0A',
'ZeratulArtifactUpgradeTier0B',
'ZeratulArtifactUpgradeTier0C',
'ZeratulArtifactUpgradeTier1A',
'ZeratulArtifactUpgradeTier1B',
'ZeratulArtifactUpgradeTier1C',
'ZeratulArtifactUpgradeTier2A',
'ZeratulArtifactUpgradeTier2B',
'ZeratulArtifactUpgradeTier2C',
'ZeratulArtifactUpgradeTier3A',
'ZeratulArtifactUpgradeTier3B',
'ZeratulArtifactUpgradeTier3C',
'ZeratulArtifactTier1ZeratulTalentUpgrade',
'ZeratulArtifactTier2ZeratulTalentUpgrade',
'ZeratulArtifactTier2_CyberneticsCore',
'ZeratulArtifactTier2_RoboticsBay',
'ZeratulArtifactTier3',
'ZeratulWeaponsLevel3',
'ZeratulArmorsLevel3',
'ZeratulShieldsLevel3',
'ZeratulArtifactTier3ZeratulTalentUpgrade',
'VoidRayPrismaticRange',
'ZeratulArtifactTier3_CyberneticsCore',
'ZeratulArtifactTier3_DarkShine',
'ZeratulArtifactTier3_RoboticsBay',
'ProphecyArtifactsDiscovered',
])
NEW_BUILD_DATA = {
'Lair': {
'build_time': 60,
'built_from': ['Hatchery'],
'display_name': 'Lair',
'race': 'Zerg',
'type': 'Building',
'is_morph': True,
},
'Hive': {
'build_time': 60,
'built_from': ['Lair'],
'display_name': 'Hive',
'race': 'Zerg',
'type': 'Building',
'is_morph': True,
},
# shared across at least Kerrigan and Zagara
'overlordspeed': {
'build_time': 60,
'built_from': ['Hatchery', 'Lair', 'Hive'],
'display_name': 'Pneumatized Carapace',
'race': 'Zerg',
'type': 'Upgrade',
'is_morph': False,
},
'overlordtransport': {
'build_time': 60,
'built_from': ['Hatchery', 'Lair', 'Hive'],
'display_name': 'Ventral Sacs',
'race': 'Zerg',
'type': 'Upgrade',
'is_morph': False,
},
'zerglingmovementspeed': {
'build_time': 60,
'built_from': ['SpawningPool'],
'display_name': 'Metabolic Boost',
'race': 'Zerg',
'type': 'Upgrade',
'is_morph': False,
},
'HotSZerglingHealth': {
'build_time': 60,
'built_from': ['SpawningPool'],
'display_name': 'Hardened Carapace',
'race': 'Zerg',
'type': 'Upgrade',
'is_morph': False,
},
'zerglingattackspeed': {
'build_time': 60,
'built_from': ['SpawningPool'],
'display_name': 'Adrenal Overload',
'race': 'Zerg',
'type': 'Upgrade',
'is_morph': False,
},
'ZerglingArmorShred': {
'build_time': 90,
'built_from': ['SpawningPool'],
'display_name': 'Shredding Claws',
'race': 'Zerg',
'type': 'Upgrade',
'is_morph': False,
},
'QueenCoop': {
'build_time': 50,
'built_from': ['Hatchery', 'Lair', 'Hive'],
'display_name': 'Queen',
'race': 'Zerg',
'type': 'Unit',
'is_morph': False,
},
'VoidCoopHeroicFortitude': {
'build_time': 60,
'built_from': ['EvolutionChamber'],
'display_name': 'Heroic Fortitude',
'race': 'Zerg',
'type': 'Upgrade',
'is_morph': False,
},
'ChitinousPlating': {
'build_time': 60,
'built_from': ['UltraliskCavern'],
'display_name': 'Chitinous Plating',
'race': 'Zerg',
'type': 'Upgrade',
'is_morph': False,
},
'HotSUltraliskBurrowCharge': {
'build_time': 60,
'built_from': ['UltraliskCavern'],
'display_name': 'Burrow Charge',
'race': 'Zerg',
'type': 'Upgrade',
'is_morph': False,
},
'HotSTissueAssimilation': {
'build_time': 60,
'built_from': ['UltraliskCavern'],
'display_name': 'Tissue Assimilation',
'race': 'Zerg',
'type': 'Upgrade',
'is_morph': False,
},
# Kerrigan and Abathur
'HotSRapidRegeneration': {
'build_time': 60,
'built_from': ['Spire', 'GreaterSpire'],
'display_name': 'Rapid Regeneration',
'race': 'Zerg',
'type': 'Upgrade',
'is_morph': False,
},
'MutaliskSunderingGlave': {
'build_time': 120,
'built_from': ['Spire', 'GreaterSpire',],
'display_name': 'Sundering Glave',
'race': 'Zerg',
'type': 'Upgrade',
'is_morph': False,
},
'HotSViciousGlaive': {
'build_time': 90,
'built_from': ['Spire', 'GreaterSpire'],
'display_name': 'Vicious Glave',
'race': 'Zerg',
'type': 'Upgrade',
'is_morph': False,
},
# Artanis and Karax
'AutomatedAssimilator': {
'build_time': 0,
'built_from': [],
'display_name': 'Assimilator',
'race': 'Protoss',
'type': 'Building',
'is_morph': False,
},
'WarpGateResearch': {
'build_time': 60,
'built_from': ['CyberneticsCore'],
'display_name': 'Warp Gate',
'race': 'Protoss',
'type': 'Upgrade',
'is_morph': False,
},
'ImmortalAiur': {
'build_time': 55,
'built_from': ['RoboticsFacility'],
'display_name': 'Immortal',
'race': 'Protoss',
'type': 'Unit',
'is_morph': False,
},
'BlinkTech': {
'build_time': 60,
'built_from': ['TwilightCouncil'],
'display_name': 'Blink',
'race': 'Protoss',
'type': 'Upgrade',
'is_morph': False,
},
# Shared by at least Fenix and Karax
'Charge': {
'build_time': 60,
'built_from': ['TwilightCouncil'],
'display_name': 'Charge',
'race': 'Protoss',
'type': 'Upgrade',
'is_morph': False,
},
'ObserverGraviticBooster': {
'build_time': 60,
'built_from': ['RoboticsBay'],
'display_name': 'Gravitic Boosters',
'race': 'Protoss',
'race': 'Protoss',
'type': 'Upgrade',
'is_morph': False,
},
'ExtendedThermalLance': {
'build_time': 90,
'built_from': ['RoboticsBay'],
'display_name': 'Extended Thermal Lance',
'race': 'Protoss',
'type': 'Upgrade',
'is_morph': False,
},
# at least Nova
'HighCapacityBarrels': {
'build_time': 60,
'built_from': ['TechLab'],
'display_name': 'Infernal Pre-Igniter',
'race': 'Terran',
'type': 'Upgrade',
'is_morph': False,
},
# Nova and Swann
'BarracksTechReactor': {
'build_time': 0,
'built_from': [],
'display_name': 'Barracks Tech Reactor',
'race': 'Terran',
'type': 'Building',
'is_morph': False,
},
'FactoryTechReactor': {
'build_time': 0,
'built_from': [],
'display_name': 'Factory Tech Reactor',
'race': 'Terran',
'type': 'Building',
'is_morph': False,
},
'StarportTechReactor': {
'build_time': 0,
'built_from': [],
'display_name': 'Starport Tech Reactor',
'race': 'Terran',
'type': 'Building',
'is_morph': False,
},
'AutomatedRefinery': {
'build_time': 0,
'built_from': [],
'display_name': 'Refinery',
'race': 'Terran',
'type': 'Building',
'is_morph': False,
},
'AresClassWeaponsSystem': {
'build_time': 60,
'built_from': ['TechLab'],
'display_name': 'Ares-Class Targeting System',
'race': 'Terran',
'type': 'Upgrade',
'is_morph': False,
},
'HiSecAutoTracking': {
'build_time': 60,
'built_from': ['Engineering'],
'display_name': 'Hi-Sec Auto Tracking',
'race': 'Terran',
'type': 'Upgrade',
'is_morph': False,
},
'TerranBuildingArmor': {
'build_time': 60,
'built_from': ['Engineering'],
'display_name': 'Structure Armor',
'race': 'Terran',
'type': 'Upgrade',
'is_morph': False,
},
}
for value in NEW_BUILD_DATA.values():
value['build_time'] *= FRAMES_PER_SECOND
BUILD_DATA = BUILD_DATA.copy()
BUILD_DATA.update(NEW_BUILD_DATA)
COMMANDER_BUILD_DATA = {
'Raynor': {
# Units
# Rapid Recruitment halves all build times
'Marine': {
'build_time': 13,
'built_from': ['Barracks'],
'display_name': 'Marine',
'race': 'Terran',
'type': 'Unit',
'is_morph': False,
},
'Marauder': {
'build_time': 15,
'built_from': ['Barracks'],
'display_name': 'Marauder',
'race': 'Terran',
'type': 'Unit',
'is_morph': False,
},
'Firebat': {
'build_time': 15,
'built_from': ['Barracks'],
'display_name': 'Firebat',
'race': 'Terran',
'type': 'Unit',
'is_morph': False,
},
'Medic': {
'build_time': 13,
'built_from': ['Barracks'],
'display_name': 'Medic',
'race': 'Terran',
'type': 'Unit',
'is_morph': False,
},
'Vulture': {
'build_time': 13,
'built_from': ['Factory'],
'display_name': 'Vulture',
'race': 'Terran',
'type': 'Unit',
'is_morph': False,
},
'SiegeTank': {
'build_time': 23,
'built_from': ['Factory'],
'display_name': '<NAME>',
'race': 'Terran',
'type': 'Unit',
'is_morph': False,
},
'VikingFighter': {
'build_time': 21,
'built_from': ['Starport'],
'display_name': 'Viking',
'race': 'Terran',
'type': 'Unit',
'is_morph': False,
},
'Banshee': {
'build_time': 30,
'built_from': ['Starport'],
'display_name': 'Banshee',
'race': 'Terran',
'type': 'Unit',
'is_morph': False,
},
'Battlecruiser': {
'build_time': 45,
'built_from': ['Starport'],
'display_name': 'Battlecruiser',
'race': 'Terran',
'type': 'Unit',
'is_morph': False,
},
# Buildings
'SupplyDepotDrop': {
'build_time': 0,
'built_from': [],
'display_name': 'Supply Depot',
'race': 'Terran',
'type': 'Building',
'is_morph': False,
},
# Upgrades
'StabilizerMedPacks': {
'build_time': 60,
'built_from': ['TechLab'],
'display_name': 'Stabilizer Medpacks',
'race': 'Terran',
'type': 'Upgrade',
'is_morph': False,
},
'FirebatJuggernautPlating': {
'build_time': 60,
'built_from': ['TechLab'],
'display_name': 'Juggernaut Plating',
'race': 'Terran',
'type': 'Upgrade',
'is_morph': False,
},
'BearclawNozzles': {
'build_time': 60,
'built_from': ['TechLab'],
'display_name': 'Incinerator Gauntlets',
'race': 'Terran',
'type': 'Upgrade',
'is_morph': False,
},
'RaynorTalentedTerranInfantryArmorLevel1': {
'build_time': 160,
'built_from': ['EngineeringBay'],
'display_name': 'Terran Infantry Armor Level 1',
'race': 'Terran',
'type': 'Upgrade',
'is_morph': False,
},
'RaynorTalentedTerranInfantryArmorLevel2': {
'build_time': 190,
'built_from': ['EngineeringBay'],
'display_name': 'Terran Infantry Armor Level 2',
'race': 'Terran',
'type': 'Upgrade',
'is_morph': False,
},
'RaynorTalentedTerranInfantryArmorLevel3': {
'build_time': 220,
'built_from': ['EngineeringBay'],
'display_name': 'Terran Infantry Armor Level 3',
'race': 'Terran',
'type': 'Upgrade',
'is_morph': False,
},
'NanoConstructor': {
'build_time': 60,
'built_from': ['TechLab'],
'display_name': 'Replenishable Magazine',
'race': 'Terran',
'type': 'Upgrade',
'is_morph': False,
},
'CerberusMines': {
'build_time': 60,
'built_from': ['TechLab'],
'display_name': 'Cerberus Mines',
'race': 'Terran',
'type': 'Upgrade',
| |
in unset_edges if e in val}
else:
values = {e: val for e in unset_edges}
nx.set_edge_attributes(graph, name=key, values=values)
def nx_get_default_edge_attributes(graph, key, default=None):
import utool as ut
edge_list = list(graph.edges())
partial_attr_dict = nx.get_edge_attributes(graph, key)
attr_dict = ut.dict_subset(partial_attr_dict, edge_list, default=default)
return attr_dict
def nx_get_default_node_attributes(graph, key, default=None):
import utool as ut
node_list = list(graph.nodes())
partial_attr_dict = nx.get_node_attributes(graph, key)
attr_dict = ut.dict_subset(partial_attr_dict, node_list, default=default)
return attr_dict
def nx_gen_node_values(G, key, nodes, default=util_const.NoParam):
"""
Generates attributes values of specific nodes
"""
node_dict = nx_node_dict(G)
if default is util_const.NoParam:
return (node_dict[n][key] for n in nodes)
else:
return (node_dict[n].get(key, default) for n in nodes)
def nx_gen_node_attrs(G, key, nodes=None, default=util_const.NoParam,
on_missing='error', on_keyerr='default'):
"""
Improved generator version of nx.get_node_attributes
Args:
on_missing (str): Strategy for handling nodes missing from G.
Can be {'error', 'default', 'filter'}. defaults to 'error'.
on_keyerr (str): Strategy for handling keys missing from node dicts.
Can be {'error', 'default', 'filter'}. defaults to 'default'
if default is specified, otherwise defaults to 'error'.
Notes:
strategies are:
error - raises an error if key or node does not exist
default - returns node, but uses value specified by default
filter - skips the node
Example:
>>> # ENABLE_DOCTEST
>>> from utool.util_graph import * # NOQA
>>> import utool as ut
>>> G = nx.Graph([(1, 2), (2, 3)])
>>> nx.set_node_attributes(G, name='part', values={1: 'bar', 3: 'baz'})
>>> nodes = [1, 2, 3, 4]
>>> #
>>> assert len(list(ut.nx_gen_node_attrs(G, 'part', default=None, on_missing='error', on_keyerr='default'))) == 3
>>> assert len(list(ut.nx_gen_node_attrs(G, 'part', default=None, on_missing='error', on_keyerr='filter'))) == 2
>>> ut.assert_raises(KeyError, list, ut.nx_gen_node_attrs(G, 'part', on_missing='error', on_keyerr='error'))
>>> #
>>> assert len(list(ut.nx_gen_node_attrs(G, 'part', nodes, default=None, on_missing='filter', on_keyerr='default'))) == 3
>>> assert len(list(ut.nx_gen_node_attrs(G, 'part', nodes, default=None, on_missing='filter', on_keyerr='filter'))) == 2
>>> ut.assert_raises(KeyError, list, ut.nx_gen_node_attrs(G, 'part', nodes, on_missing='filter', on_keyerr='error'))
>>> #
>>> assert len(list(ut.nx_gen_node_attrs(G, 'part', nodes, default=None, on_missing='default', on_keyerr='default'))) == 4
>>> assert len(list(ut.nx_gen_node_attrs(G, 'part', nodes, default=None, on_missing='default', on_keyerr='filter'))) == 2
>>> ut.assert_raises(KeyError, list, ut.nx_gen_node_attrs(G, 'part', nodes, on_missing='default', on_keyerr='error'))
Example:
>>> # DISABLE_DOCTEST
>>> # ALL CASES
>>> from utool.util_graph import * # NOQA
>>> import utool as ut
>>> G = nx.Graph([(1, 2), (2, 3)])
>>> nx.set_node_attributes(G, name='full', values={1: 'A', 2: 'B', 3: 'C'})
>>> nx.set_node_attributes(G, name='part', values={1: 'bar', 3: 'baz'})
>>> nodes = [1, 2, 3, 4]
>>> attrs = dict(ut.nx_gen_node_attrs(G, 'full'))
>>> input_grid = {
>>> 'nodes': [None, (1, 2, 3, 4)],
>>> 'key': ['part', 'full'],
>>> 'default': [util_const.NoParam, None],
>>> }
>>> inputs = ut.all_dict_combinations(input_grid)
>>> kw_grid = {
>>> 'on_missing': ['error', 'default', 'filter'],
>>> 'on_keyerr': ['error', 'default', 'filter'],
>>> }
>>> kws = ut.all_dict_combinations(kw_grid)
>>> for in_ in inputs:
>>> for kw in kws:
>>> kw2 = ut.dict_union(kw, in_)
>>> #print(kw2)
>>> on_missing = kw['on_missing']
>>> on_keyerr = kw['on_keyerr']
>>> if on_keyerr == 'default' and in_['default'] is util_const.NoParam:
>>> on_keyerr = 'error'
>>> will_miss = False
>>> will_keyerr = False
>>> if on_missing == 'error':
>>> if in_['key'] == 'part' and in_['nodes'] is not None:
>>> will_miss = True
>>> if in_['key'] == 'full' and in_['nodes'] is not None:
>>> will_miss = True
>>> if on_keyerr == 'error':
>>> if in_['key'] == 'part':
>>> will_keyerr = True
>>> if on_missing == 'default':
>>> if in_['key'] == 'full' and in_['nodes'] is not None:
>>> will_keyerr = True
>>> want_error = will_miss or will_keyerr
>>> gen = ut.nx_gen_node_attrs(G, **kw2)
>>> try:
>>> attrs = list(gen)
>>> except KeyError:
>>> if not want_error:
>>> raise AssertionError('should not have errored')
>>> else:
>>> if want_error:
>>> raise AssertionError('should have errored')
"""
if on_missing is None:
on_missing = 'error'
if default is util_const.NoParam and on_keyerr == 'default':
on_keyerr = 'error'
if nodes is None:
nodes = G.nodes()
# Generate `node_data` nodes and data dictionary
node_dict = nx_node_dict(G)
if on_missing == 'error':
node_data = ((n, node_dict[n]) for n in nodes)
elif on_missing == 'filter':
node_data = ((n, node_dict[n]) for n in nodes if n in G)
elif on_missing == 'default':
node_data = ((n, node_dict.get(n, {})) for n in nodes)
else:
raise KeyError('on_missing={} must be error, filter or default'.format(
on_missing))
# Get `node_attrs` desired value out of dictionary
if on_keyerr == 'error':
node_attrs = ((n, d[key]) for n, d in node_data)
elif on_keyerr == 'filter':
node_attrs = ((n, d[key]) for n, d in node_data if key in d)
elif on_keyerr == 'default':
node_attrs = ((n, d.get(key, default)) for n, d in node_data)
else:
raise KeyError('on_keyerr={} must be error filter or default'.format(on_keyerr))
return node_attrs
def nx_gen_edge_values(G, key, edges=None, default=util_const.NoParam,
on_missing='error', on_keyerr='default'):
"""
Generates attributes values of specific edges
Args:
on_missing (str): Strategy for handling nodes missing from G.
Can be {'error', 'default'}. defaults to 'error'.
on_keyerr (str): Strategy for handling keys missing from node dicts.
Can be {'error', 'default'}. defaults to 'default'
if default is specified, otherwise defaults to 'error'.
"""
if edges is None:
edges = G.edges()
if on_missing is None:
on_missing = 'error'
if on_keyerr is None:
on_keyerr = 'default'
if default is util_const.NoParam and on_keyerr == 'default':
on_keyerr = 'error'
# Generate `data_iter` edges and data dictionary
if on_missing == 'error':
data_iter = (G.adj[u][v] for u, v in edges)
elif on_missing == 'default':
data_iter = (G.adj[u][v] if G.has_edge(u, v) else {}
for u, v in edges)
else:
raise KeyError('on_missing={} must be error, filter or default'.format(
on_missing))
# Get `value_iter` desired value out of dictionary
if on_keyerr == 'error':
value_iter = (d[key] for d in data_iter)
elif on_keyerr == 'default':
value_iter = (d.get(key, default) for d in data_iter)
else:
raise KeyError('on_keyerr={} must be error or default'.format(on_keyerr))
return value_iter
# if default is util_const.NoParam:
# return (G.adj[u][v][key] for u, v in edges)
# else:
# return (G.adj[u][v].get(key, default) for u, v in edges)
def nx_gen_edge_attrs(G, key, edges=None, default=util_const.NoParam,
on_missing='error', on_keyerr='default'):
"""
Improved generator version of nx.get_edge_attributes
Args:
on_missing (str): Strategy for handling nodes missing from G.
Can be {'error', 'default', 'filter'}. defaults to 'error'.
is on_missing is not error, then we allow any edge even if the
endpoints are not in the graph.
on_keyerr (str): Strategy for handling keys missing from node dicts.
Can be {'error', 'default', 'filter'}. defaults to 'default'
if default is specified, otherwise defaults to 'error'.
Example:
>>> # ENABLE_DOCTEST
>>> from utool.util_graph import * # NOQA
>>> import utool as ut
>>> G = nx.Graph([(1, 2), (2, 3), (3, 4)])
>>> nx.set_edge_attributes(G, name='part', values={(1, 2): 'bar', (2, 3): 'baz'})
>>> edges = [(1, 2), (2, 3), (3, 4), (4, 5)]
>>> func = ut.partial(ut.nx_gen_edge_attrs, G, 'part', default=None)
>>> #
>>> assert len(list(func(on_missing='error', on_keyerr='default'))) == 3
>>> assert len(list(func(on_missing='error', on_keyerr='filter'))) == 2
>>> ut.assert_raises(KeyError, list, func(on_missing='error', on_keyerr='error'))
>>> #
>>> assert len(list(func(edges, on_missing='filter', on_keyerr='default'))) == 3
>>> assert len(list(func(edges, on_missing='filter', on_keyerr='filter'))) == 2
>>> ut.assert_raises(KeyError, list, func(edges, on_missing='filter', on_keyerr='error'))
>>> #
>>> assert len(list(func(edges, on_missing='default', on_keyerr='default'))) == 4
>>> assert len(list(func(edges, on_missing='default', on_keyerr='filter'))) == 2
>>> ut.assert_raises(KeyError, list, func(edges, on_missing='default', on_keyerr='error'))
"""
if on_missing is None:
on_missing = 'error'
if default is util_const.NoParam and on_keyerr == 'default':
on_keyerr = 'error'
if edges is None:
if G.is_multigraph():
raise NotImplementedError('')
# uvk_iter = G.edges(keys=True)
else:
edges = G.edges()
# Generate `edge_data` edges and data dictionary
if on_missing == 'error':
edge_data = (((u, v), G.adj[u][v]) for u, v in edges)
elif on_missing == 'filter':
edge_data = (((u, v), G.adj[u][v]) for u, v in edges if G.has_edge(u, v))
elif on_missing == 'default':
edge_data = (((u, v), G.adj[u][v])
if G.has_edge(u, v) else ((u, v), {})
for u, v in edges)
else:
raise KeyError('on_missing={}'.format(on_missing))
# Get `edge_attrs` desired value out of dictionary
if on_keyerr == 'error':
edge_attrs = ((e, d[key]) for e, d in edge_data)
elif on_keyerr == 'filter':
edge_attrs = ((e, d[key]) for e, d in edge_data if key in d)
elif on_keyerr == 'default':
edge_attrs = ((e, d.get(key, default)) for e, d in edge_data)
else:
raise KeyError('on_keyerr={}'.format(on_keyerr))
return edge_attrs
# if edges is None:
# if G.is_multigraph():
# edges_ = G.edges(keys=True, data=True)
# else:
# edges_ = G.edges(data=True)
# if default is util_const.NoParam:
# return ((x[:-1], x[-1][key]) for x in edges_ | |
import errno
from optimus.helpers.core import val_to_list
import warnings
import os
from io import UnsupportedOperation
from pprint import pformat
from optimus.engines.base.basedataframe import BaseDataFrame
from optimus.infer import is_str, is_function, is_list_empty
from optimus.helpers.logger import logger
class TestCreator:
def __init__(self, op=None, df=None, name=None, path="", create_path="..", configs={}, **kwargs):
"""
Create python code with unit test functions for Optimus.
:param op: optimus instance
:param df: Spark Dataframe
:param name: Name of the Test Class
:type path: folder where tests will be written individually. run() nneds to be runned to merge all the tests.
:param options: dictionary with configuration options
import: Libraries to be added.
:param configs: Array of dictionaries with:
engine: Engine to use.
n_partitions: Number of partitions of created dataframes (if supported)
"""
if path and len(path):
create_path += f"/{path}"
self.op = op
self.df = df
self.name = name
self.path = path
self.create_path = create_path
self.options = kwargs
self.configs = configs
self.created = []
def run(self, clear=True):
"""
Return the tests in text format
:return:
"""
filename = self.create_path + "/" + "test_created__" + self.name + ".py"
test_file = open(filename, 'w', encoding='utf-8')
print("Creating file " + filename)
# Imports
_imports = [
"import datetime",
"import numpy as np",
"from optimus.tests.base import TestBase",
"from optimus.helpers.json import json_encoding",
"from optimus.helpers.functions import deep_sort, df_dicts_equal, results_equal",
"\n\ndef Timestamp(t):\n return datetime.datetime.strptime(t, \"%Y-%m-%d %H:%M:%S\")\n\n",
"NaT = np.datetime64('NaT')",
"nan = float(\"nan\")",
"inf = float(\"inf\")",
]
if self.options.get("imports", None) is not None:
for i in self.options["imports"]:
_imports.append(i)
for i in _imports:
test_file.write(i + "\n")
classes = {}
for name, config in self.configs.items():
key = "".join(w.title() for w in self.name.split("_")) + name
classes.update({key: config})
# Class name
base_class = "Test"+list(classes.keys())[0]
cls = "\n\nclass " + base_class + "(TestBase):\n"
test_file.write(cls)
# First Config
test_file.write(" config = " +
pformat(list(classes.values())[0], compact=True) + "\n")
# Global Dataframe
if self.df is not None:
test_file.write(" dict = " + self.df.export(data_types="internal") + "\n")
test_file.write(" maxDiff = None\n")
for root, dirs, files in os.walk(self.create_path):
files.sort()
for file in files:
if file.endswith(".test"):
full_path = os.path.join(root, file)
with open(full_path, 'r', encoding='utf-8') as opened_file:
try:
text = opened_file.read()
test_file.write(text)
opened_file.close()
except UnsupportedOperation:
print("file seems to be empty")
for name, config in list(classes.items())[1:]:
class_config = config.copy()
try_package = class_config.pop("try_package", None)
test_file.write("\n\n")
extra_tab = ""
if try_package:
extra_tab = " "
test_file.write("try:\n")
try_package = val_to_list(try_package)
for package in try_package:
test_file.write(f" import {package} # pyright: reportMissingImports=false\n")
test_file.write("except:\n")
test_file.write(" pass\n")
test_file.write("else:\n")
test_file.write(f"{extra_tab}class Test{name}({base_class}):\n")
test_file.write(f"{extra_tab} config = {pformat(class_config, compact=True)}\n")
test_file.close()
if clear:
self.clear()
print("Done")
def clear(self):
for method, variant in self.created:
self.delete(method, variant)
def create(self, method=None, variant=None, df=None, compare_by="df", select_cols=False, additional_method=[], args=(), **kwargs):
"""
This is a helper function that output python tests for Spark DataFrames.
:param method: Method to be tested
:param variant: The test name will be create using the method param. This will be added as a suffix in case you want to customize the test name.
:param df: Object to be tested
:param compare_by: 'df', 'json' or 'dict'
:param additional_method:
:param args: Arguments to be used in the method
:param kwargs: Keyword arguments to be used in the functions
:return:
"""
buffer = []
def add_buffer(value, tab=1):
buffer.append((" " * tab) + value)
# Create name
name = []
if method is not None:
name.append(method.replace(".", "_"))
additional_method = val_to_list(additional_method)
for a_method in additional_method:
name.append(a_method)
if variant is not None:
name.append(variant)
test_name = "_".join(name)
func_test_name = "test_" + test_name + "()"
print(f"Creating {func_test_name} test function...")
logger.print(func_test_name)
func_test_name = "test_" + test_name + "(self)"
filename = test_name + ".test"
add_buffer("\n", 0)
add_buffer("def " + func_test_name + ":\n")
if select_cols is True:
select_cols = kwargs["cols"] if "cols" in kwargs else args[0] if len(
args) else False
if df is None and self.df is None and (len(args)+len(kwargs)):
df = self.op.create.dataframe(*args, **kwargs)
df_func = df
if df is None:
# Use the main df
df = self.df
if select_cols:
df = df.cols.select(select_cols)
add_buffer(
f"df = self.df.copy().cols.select({pformat(select_cols, compact=True)})\n", 2)
else:
add_buffer("df = self.df.copy()\n", 2)
df_func = df
elif isinstance(df, (BaseDataFrame,)):
if select_cols:
df = df.cols.select(select_cols)
add_buffer("df = self.create_dataframe(data=" + df.export(data_types="internal") + ", force_data_types=True)\n", 2)
df_func = df
else:
if select_cols:
df = [df[col] for col in df if df in select_cols] if select_cols != "*" else df
add_buffer("df = self.create_dataframe(data=" + pformat(df, compact=True, sort_dicts=False) +
", force_data_types=True)\n", 2)
df_func = df
# Process simple arguments
_args = []
for v in args:
if is_function(v):
_args.append(v.__qualname__)
elif isinstance(v, (BaseDataFrame,)):
_df = "self.create_dataframe(data=" + v.export(data_types="internal") + ", force_data_types=True)"
_args.append(_df)
elif isinstance(v, list) and isinstance(v[0], (BaseDataFrame,)):
_dfs = ["self.create_dataframe(data=" + dv.export(data_types="internal") + ", force_data_types=True)" for dv in v]
_dfs = "[" + ", ".join(_dfs) + "]"
_args.append(_dfs)
elif isinstance(v, (str, bool, dict, list)):
_args.append(pformat(v, compact=True, sort_dicts=False, width=800))
else:
_args.append(str(v))
_args = ', '.join(_args)
_kwargs = []
# Process keywords arguments
for k, v in kwargs.items():
if is_function(v):
_kwargs.append(k + "=" + v.__qualname__)
elif isinstance(v, (BaseDataFrame,)):
_df = "self.create_dataframe(data=" + v.export(data_types="internal") + ", force_data_types=True)"
_kwargs.append(k + "=" + _df)
elif isinstance(v, list) and isinstance(v[0], (BaseDataFrame,)):
_dfs = ["self.create_dataframe(data=" + dv.export(data_types="internal") + ", force_data_types=True)" for dv in v]
_dfs = "[" + ", ".join(_dfs) + "]"
_kwargs.append(k + "=" + _dfs)
elif isinstance(v, (str, bool, dict, list)):
_kwargs.append(k + "=" + pformat(v, compact=True, sort_dicts=False, width=800))
else:
_kwargs.append(k + "=" + str(v))
# Separator if we have positional and keyword arguments
separator = ""
if (not is_list_empty(args)) & (not is_list_empty(kwargs)):
separator = ", "
if method is None:
add_buffer("result = df\n", 2)
else:
ams = ""
for m in additional_method:
ams += "." + m + "()"
add_buffer("result = df." + method + "(" + _args + separator + ', '.join(
_kwargs) + ")" + ams + "\n", 2)
# print("expected_df", expected_df)
failed = False
# Apply function
if method is None:
expected_df = df_func
else:
# Here we construct the method to be applied to the source object
_df_func = df_func
for f in method.split("."):
df_func = getattr(df_func, f)
try:
expected_df = df_func(*args, **kwargs)
except Exception as e:
warnings.warn(
f"The operation on test creation {func_test_name} failed, passing the same dataset instead")
print(e)
failed = True
expected_df = _df_func
# Additional Methods
for m in additional_method:
expected_df = getattr(expected_df, m)()
# Checks if output is ok
expected_is_df = isinstance(expected_df, (BaseDataFrame,))
if compare_by == "df" and not expected_is_df:
compare_by = "dict"
if compare_by != "df" and expected_is_df:
add_buffer("result = result.to_dict()\n", 2)
if failed:
add_buffer("# The following value does not represent a correct output of the operation\n", 2)
add_buffer("expected = self.dict\n", 2)
elif compare_by == "df":
if expected_is_df:
expected_df = expected_df.export(data_types="internal")
add_buffer(f"expected = self.create_dataframe(data={expected_df}, force_data_types=True)\n", 2)
else:
if expected_is_df:
expected_df = expected_df.export(data_types=False)
add_buffer(f"expected = {expected_df}\n", 2)
elif is_str(expected_df):
add_buffer(f"expected = {pformat(expected_df, compact=True, sort_dicts=False)}\n", 2)
else:
add_buffer(f"expected = {expected_df}\n", 2)
# Output
if compare_by == "df":
add_buffer("self.assertTrue(result.equals(expected, decimal=True, assertion=True))\n", 2)
elif compare_by == "dict":
if expected_is_df:
add_buffer("self.assertTrue(df_dicts_equal(result, expected, assertion=True))\n", 2)
else:
add_buffer("self.assertTrue(results_equal(result, expected, decimal=5, assertion=True))\n", 2)
elif compare_by == "json":
add_buffer("self.assertEqual(json_encoding(result), json_encoding(expected))\n", 2)
else:
add_buffer("self.assertEqual(result, expected)\n", 2)
filename = self.create_path + "/" + filename
if not os.path.exists(os.path.dirname(filename)):
try:
os.makedirs(os.path.dirname(filename))
except OSError as exc: # Guard against race condition
if exc.errno != errno.EEXIST:
raise
# Write file
test_file = open(filename, 'w', encoding='utf-8')
for b in buffer:
test_file.write(b)
test_file.close()
self.created.append((method, variant))
# return "".join(buffer)
def delete(self, method=None, variant=None):
"""
This is a helper function that delete python tests files used to construct the final Test file.
:param df: Do nothing, only for simplicity so you can delete a file test the same way you create it
:param variant: The create method will try to create a test function with the func param given.
If you want to test a function with different params you can use variant.
:param method: Method to be tested
:param method: Variant of the method
:return:
"""
if variant is None:
variant = ""
elif method is not None:
variant = "_" + variant
# Create func test name. If is None we just test the create.df function a not transform the data frame | |
# line tests are using the properly scaled noise.
if fit_stat=="RCHI2":
noise *= np.nanmean([mcpars_line["NOISE_SCALE"]["med"], mcpars_no_line["NOISE_SCALE"]["med"]])
# Determine wavelength bounds of F-test. For [OIII]5007, we use the full profile (core + outflow)
# and determine the 0.1 and 99.9 percentiles of the flux of the full profile to set the bounds
# of the test.
if isinstance(remove_lines,str):
full_profile = np.median(mccomps_line[remove_lines],axis=0)
elif isinstance(remove_lines,list):
full_profile = np.median(np.sum([mccomps_line[l] for l in remove_lines],axis=1),axis=0)
# min_wave, max_wave, eval_ind, nchannel = get_wavelength_range(lam_gal,noise,velscale,full_profile,line_list[test_line["line"]])
min_wave, max_wave, eval_ind, nchannel = get_wavelength_range(lam_gal[fit_mask],noise[fit_mask],velscale,full_profile[fit_mask])#,line_list[test_line["line"]])
# storage arrays for residuals in [OIII] test region
resid_line = np.empty((max_like_niter+1,nchannel))
resid_no_line = np.empty((max_like_niter+1,nchannel))
resid_total = np.empty((max_like_niter+1,len(lam_gal[fit_mask])))
for i in range(max_like_niter+1):
resid_line[i,:] = mccomps_line['RESID'][i,:][fit_mask][eval_ind]
resid_no_line[i,:] = mccomps_no_line['RESID'][i,:][fit_mask][eval_ind]
resid_total[i,:] = mccomps_line['RESID'][i,:][fit_mask]
# Perform Bayesian A/B test
pval, pval_upp, pval_low, conf, conf_upp, conf_low, dist, disp, signif, overlap = bayesian_AB_test(mccomps_line['RESID'][0,:][fit_mask], mccomps_no_line['RESID'][0,:][fit_mask], full_profile[fit_mask], lam_gal[fit_mask], noise[fit_mask], galaxy[fit_mask], min_wave, max_wave, eval_ind, nchannel, run_dir)
# Calculate sum-of-square of residuals and its uncertainty
ssr_ratio, ssr_ratio_err, ssr_no_line, ssr_no_line_err, ssr_line, ssr_line_err = ssr_test(resid_line,resid_no_line,run_dir)
# Perform f-test model comparison(for normally distributed model residuals)
f_stat, f_stat_err, f_pval, f_pval_err, f_conf, f_conf_err = f_test(resid_line,resid_no_line,1.0,4.0,run_dir)
# Calculate total residual noise
resid_noise_no_line = np.median([np.std(resid_no_line[i,:]) for i in range(np.shape(resid_no_line)[0])])
resid_noise_no_line_err = np.std([np.std(resid_no_line[i,:]) for i in range(np.shape(resid_no_line)[0])])
resid_noise_line = np.median([np.std(resid_line[i,:]) for i in range(np.shape(resid_line)[0])])
resid_noise_line_err = np.std([np.std(resid_line[i,:]) for i in range(np.shape(resid_line)[0])])
total_resid_noise = np.median([np.std(resid_total[i,:]) for i in range(np.shape(resid_total)[0])])
total_resid_noise_err = np.std([np.std(resid_total[i,:]) for i in range(np.shape(resid_total)[0])])
# Chi2 Metrics
# Chi-squared is evaluated in the region of the line for the two models
# The ratio of chi squared for the outflow to the no-outflow model indicates
# how much the model improved over the other.
chi2_line, chi2_line_err, chi2_no_line, chi2_no_line_err, chi2_ratio, chi2_ratio_err = chi2_metric(range(len(lam_gal)),mccomps_line, mccomps_no_line)
if verbose:
print('\n{0:<30}{1:<30}{2:<30}{3:<30}'.format('Parameter', 'Best-fit Value', '+/- 1-sigma','Flag'))
print('-----------------------------------------------------------------------------------------------------')
# Sort into arrays
pname = []
med = []
std = []
flag = []
for key in mcpars_line:
pname.append(key)
med.append(mcpars_line[key]['med'])
std.append(mcpars_line[key]['std'])
flag.append(mcpars_line[key]['flag'])
i_sort = np.argsort(pname)
pname = np.array(pname)[i_sort]
med = np.array(med)[i_sort]
std = np.array(std)[i_sort]
flag = np.array(flag)[i_sort]
if verbose:
for i in range(0,len(pname),1):
print('{0:<30}{1:<30.2f}{2:<30.2f}{3:<30}'.format(pname[i], med[i], std[i], flag[i]))
print('-----------------------------------------------------------------------------------------------------')
print('\n Test Statistics:')
print('-----------------------------------------------------------------------------------------------------')
print('{0:<30}{1:<30}{2:<30}{3:<30}'.format('','Statistic','Value','Uncertainty') )
print('-----------------------------------------------------------------------------------------------------')
print('{0:<30}'.format('A/B Likelihood Test::'))
print('{0:<30}{1:<30}{2:<30.6f}{3:<30}'.format('','Confidence:',conf,"(-%0.6f,+%0.6f)" % (conf_low,conf_upp )) )
print('{0:<30}{1:<30}{2:<30.6f}{3:<30}'.format('','p-value:',pval,"(-%0.6f,+%0.6f)" % (pval_low,pval_upp)))
print('{0:<30}{1:<30}{2:<30.6f}'.format('','Statistical Distance:',dist))
print('{0:<30}{1:<30}{2:<30.6f}'.format('','Disperson:',disp))
print('{0:<30}{1:<30}{2:<30.6f}'.format('','Significance (sigma):',signif))
print('{0:<30}{1:<30}{2:<30.6f}'.format('','Overlap (1-sigma):',overlap))
print('{0:<30}'.format('ANOVA (F-test):'))
print('{0:<30}{1:<30}{2:<30.4f}{3:<30.4f}'.format('','Confidence:',f_conf, f_conf_err ) )
print('{0:<30}{1:<30}{2:<30.4f}{3:<30.4f}'.format('','F-statistic:',f_stat,f_stat_err))
print('{0:<30}{1:<30}{2:<30.4e}{3:<30.4e}'.format('','p-value:',f_pval,f_pval_err))
print('{0:<30}'.format('Chi-Squared Metrics:'))
print('{0:<30}{1:<30}{2:<30.6f}{3:<30.6f}'.format('','Chi-squared Ratio:',chi2_ratio, chi2_ratio_err ) )
print('{0:<30}{1:<30}{2:<30.6f}{3:<30.6f}'.format('','Chi-squared no-outflow:',chi2_no_line,chi2_no_line_err))
print('{0:<30}{1:<30}{2:<30.6f}{3:<30.6f}'.format('','Chi-squared outflow:',chi2_line,chi2_line_err))
print('{0:<30}'.format('Sum-of-Squares of Residuals (SSR):'))
print('{0:<30}{1:<30}{2:<30.6f}{3:<30.6f}'.format('','SSR ratio:',ssr_ratio,ssr_ratio_err))
print('{0:<30}{1:<30}{2:<30.6f}{3:<30.6f}'.format('','SSR no-outflow:',ssr_no_line,ssr_no_line_err))
print('{0:<30}{1:<30}{2:<30.6f}{3:<30.6f}'.format('','SSR outflow:',ssr_line,ssr_line_err))
print('{0:<30}'.format('Residual Noise:'))
print('{0:<30}{1:<30}{2:<30.6f}{3:<30.6f}'.format('','Median spec noise:',np.median(noise),np.std(noise)))
print('{0:<30}{1:<30}{2:<30.6f}{3:<30.6f}'.format('','Total resid noise:',total_resid_noise,total_resid_noise_err))
print('{0:<30}{1:<30}{2:<30.6f}{3:<30.6f}'.format('','No-line resid:',resid_noise_no_line,resid_noise_no_line_err))
print('{0:<30}{1:<30}{2:<30.6f}{3:<30.6f}'.format('','Line resid:',resid_noise_line,resid_noise_line_err))
print('-----------------------------------------------------------------------------------------------------')
# Write to log
write_log(mcpars_no_line,'no_line_test',run_dir)
write_log(mcpars_line,'line_test',run_dir)
write_log((pval, pval_upp, pval_low, conf, conf_upp, conf_low, dist, disp, signif, overlap,
f_conf,f_conf_err,f_stat,f_stat_err,f_pval,f_pval_err,
chi2_ratio,chi2_ratio_err,chi2_no_line,chi2_no_line_err,chi2_line,chi2_line_err,
# amp_metric,fwhm_metric,voff_metric,voff_metric_err,
ssr_ratio,ssr_ratio_err,ssr_no_line,ssr_no_line_err,ssr_line,ssr_line_err,
np.median(noise), np.std(noise),
total_resid_noise,total_resid_noise_err,resid_noise_no_line,resid_noise_no_line_err,resid_noise_line,resid_noise_line_err),
'line_test_stats',run_dir)
# Write test statistics to FITS table
stats_dict = {
"PVAL": {"best": pval, "sigma_low": pval_low, "sigma_upp": pval_upp },
"CONF": {"best": conf, "sigma_low": conf_low, "sigma_upp": conf_upp},
"DIST": {"best": dist, "sigma_low": 0.0, "sigma_upp": 0.0},
"DISP": {"best": disp, "sigma_low": 0.0, "sigma_upp": 0.0},
"SIGNIF": {"best": signif, "sigma_low": 0.0, "sigma_upp": 0.0},
"OVERLAP": {"best": overlap, "sigma_low": 0.0, "sigma_upp": 0.0},
"F_CONF": {"best": f_conf, "sigma_low": f_conf_err, "sigma_upp": f_conf_err},
"F_STAT": {"best": f_stat, "sigma_low": f_stat_err, "sigma_upp": f_stat_err},
"F_PVAL": {"best": f_pval, "sigma_low": f_pval_err, "sigma_upp": f_pval_err},
"CHI2_LINE": {"best": chi2_line, "sigma_low": chi2_line_err, "sigma_upp": chi2_line_err},
"CHI2_NO_LINE": {"best": chi2_no_line, "sigma_low": chi2_no_line_err, "sigma_upp": chi2_no_line_err},
"CHI2_RATIO": {"best": chi2_ratio, "sigma_low": chi2_ratio_err, "sigma_upp": chi2_ratio_err},
"SSR_RATIO": {"best": ssr_ratio, "sigma_low": ssr_ratio_err, "sigma_upp": ssr_ratio_err},
"SSR_NO_LINE": {"best": ssr_no_line, "sigma_low": ssr_no_line_err, "sigma_upp": ssr_no_line_err},
"SSR_LINE": {"best": ssr_line, "sigma_low": ssr_line_err, "sigma_upp": ssr_line_err},
"MEDIAN_NOISE": {"best": np.median(noise), "sigma_low": np.std(noise), "sigma_upp": np.std(noise)},
"RESID_NOISE": {"best": total_resid_noise, "sigma_low": total_resid_noise_err, "sigma_upp": total_resid_noise_err},
"RESID_NOISE_NO_LINE": {"best": resid_noise_no_line, "sigma_low": resid_noise_no_line_err, "sigma_upp": resid_noise_no_line_err},
"RESID_NOISE_LINE": {"best": resid_noise_line, "sigma_low": resid_noise_line_err, "sigma_upp": resid_noise_line_err},
}
write_test_stats(stats_dict,run_dir)
# Reinstate the original line list
line_list = original_line_list
# Make plot
# Get best fit model components for each model
param_names_line = [key for key in param_dict ]
params_line = [mcpars_line[key]['med'] for key in param_dict ]
fit_type = 'line_test'
output_model = False
comp_dict_line = fit_model(params_line,
param_names_line,
line_list,
combined_line_list,
lam_gal,
galaxy,
noise,
comp_options,
losvd_options,
host_options,
power_options,
poly_options,
opt_feii_options,
uv_iron_options,
balmer_options,
outflow_test_options,
host_template,
opt_feii_templates,
uv_iron_template,
balmer_template,
stel_templates,
fwhm_gal,
fit_mask,
velscale,
run_dir,
fit_type,
fit_stat,
output_model)
param_names_no_line = [key for key in param_dict_no_line ]
params_no_line = [mcpars_no_line[key]['med'] for key in param_dict_no_line ]
fit_type = 'line_test'
output_model = False
comp_dict_no_line = fit_model(params_no_line,
param_names_no_line,
line_list_no_line,
combined_line_list_no_line,
lam_gal,
galaxy,
noise,
comp_options,
losvd_options,
host_options,
power_options,
poly_options,
opt_feii_options,
uv_iron_options,
balmer_options,
outflow_test_options,
host_template,
opt_feii_templates,
uv_iron_template,
balmer_template,
stel_templates,
fwhm_gal,
fit_mask,
velscale,
run_dir,
fit_type,
fit_stat,
output_model)
# Make comparison plots of outflow and no-outflow models
line_test_plot(lam_gal,comp_dict_line,comp_dict_no_line,line_list,line_list_no_line,
params_line,params_no_line,param_names_line,param_names_no_line,min_wave,max_wave,run_dir)
# Write results to FITS
write_line_test_results(mcpars_line,comp_dict_line,mcpars_no_line,comp_dict_no_line,fit_mask,run_dir,binnum,spaxelx,spaxely)
return
##################################################################################
def get_wavelength_range(lam_gal, noise, velscale, full_profile):#, line_dict):
# Get indices where we perform f-test
eval_ind = range(len(lam_gal))
# number of channels in the test region
nchannel = len(eval_ind)
# if the number of channels < 6 (number of degrees of freedom for double-Gaussian model), then the calculated f-statistic
# will be zero. To resolve this, we extend the range by one pixel on each side, i.e. nchannel = 8.
if nchannel <= 6:
add_chan = 7 - nchannel# number of channels to add to each side; minimum is 7 channels since deg. of freedom = 6
lower_pad = np.arange(eval_ind[0]-add_chan,eval_ind[0],1)#np.arange(eval_ind[0]-add_chan,eval_ind[0],1)
upper_pad = np.arange(eval_ind[-1]+1,eval_ind[-1]+1+add_chan,1)
eval_ind = np.concatenate([lower_pad, eval_ind, upper_pad],axis=0)
nchannel = len(eval_ind)
min_wave, max_wave = lam_gal[eval_ind[0]], lam_gal[eval_ind[-1]]
return min_wave, max_wave, eval_ind, nchannel
##################################################################################
def write_test_stats(stats_dict,run_dir):
"""
Writes statistics for outflow and line testing to a FITS table.
"""
#
#
# Write Outflow model FITS tables
# Extract elements from dictionaries
par_names = []
par_best = []
sig_low = []
sig_upp = []
for key in stats_dict:
par_names.append(key)
par_best.append(stats_dict[key]['best'])
sig_low.append(stats_dict[key]['sigma_low'])
sig_upp.append(stats_dict[key]['sigma_upp'])
if 0:
for i in range(0,len(par_names),1):
print(par_names[i],par_best[i],sig[i])
# Write best-fit parameters to FITS table
col1 = fits.Column(name='parameter', format='30A', array=par_names)
col2 = fits.Column(name='best_fit' , format='E' , array=par_best)
col3 = fits.Column(name='sigma_low' , format='E' , array=sig_low)
col4 = fits.Column(name='sigma_upp' , format='E' , array=sig_upp)
cols = fits.ColDefs([col1,col2,col3,col4])
hdu = fits.BinTableHDU.from_columns(cols)
hdu.writeto(run_dir.joinpath('log', 'test_stats.fits'),overwrite=True)
#
return
##################################################################################
def line_test_plot(lam_gal,comp_dict_outflow,comp_dict_no_outflow,line_list_outflows,line_list_no_outflows,
params_outflows,params_no_outflows,param_names_outflows,param_names_no_outflows,min_wave,max_wave,run_dir):
"""
The plotting function for test_line(). It plots both the outflow
and no_outflow results.
"""
def poly_label(kind):
if kind=="ppoly":
order = len([p for p in param_names_outflows if p.startswith("PPOLY_") ])-1
if kind=="apoly":
order = len([p for p in param_names_outflows if p.startswith("APOLY_")])-1
if kind=="mpoly":
order = len([p for p in param_names_outflows if p.startswith("MPOLY_")])-1
#
ordinal = lambda n: "%d%s" % (n,"tsnrhtdd"[(n//10%10!=1)*(n%10<4)*n%10::4])
return ordinal(order)
def calc_new_center(center,voff):
"""
Calculated new center shifted
by some velocity offset.
"""
c = 299792.458 # speed of light (km/s)
new_center = (voff*center)/c + center
return new_center
# Creat plot window and axes
fig = plt.figure(figsize=(14,11))
gs = gridspec.GridSpec(9,1)
ax1 = fig.add_subplot(gs[0:3,0]) # No outflow
ax2 = fig.add_subplot(gs[3:4,0]) # No outflow residuals
ax3 = fig.add_subplot(gs[5:8,0]) # Outflow
ax4 = fig.add_subplot(gs[8:9,0]) # Outflow residuals
gs.update(wspace=0.0, hspace=0.0) # set the spacing between axes.
# No outflow model (ax1,ax2)
# Put params in dictionary
p = dict(zip(param_names_outflows,params_outflows))
for key in comp_dict_outflow:
if (key=='DATA'):
ax1.plot(comp_dict_outflow['WAVE'],comp_dict_outflow['DATA'],linewidth=0.5,color='white',label='Data',zorder=0)
elif (key=='MODEL'):
ax1.plot(lam_gal,comp_dict_outflow[key], color='xkcd:bright red', linewidth=1.0, label='Model', zorder=15)
elif (key=='HOST_GALAXY'):
ax1.plot(comp_dict_outflow['WAVE'], comp_dict_outflow['HOST_GALAXY'], color='xkcd:bright green', linewidth=0.5, linestyle='-', label='Host/Stellar')
elif (key=='POWER'):
ax1.plot(comp_dict_outflow['WAVE'], comp_dict_outflow['POWER'], color='xkcd:red' , linewidth=0.5, linestyle='--', label='AGN Cont.')
elif (key=='PPOLY'):
ax1.plot(comp_dict_outflow['WAVE'], comp_dict_outflow['PPOLY'], color='xkcd:magenta' , linewidth=0.5, linestyle='-', label='%s-order Poly.' % (poly_label("ppoly")))
elif (key=='APOLY'):
ax1.plot(comp_dict_outflow['WAVE'], comp_dict_outflow['APOLY'], color='xkcd:bright purple' , linewidth=0.5, linestyle='-', label='%s-order Add. Poly.' % (poly_label("apoly")))
elif (key=='MPOLY'):
ax1.plot(comp_dict_outflow['WAVE'], comp_dict_outflow['MPOLY'], color='xkcd:lavender' , linewidth=0.5, linestyle='-', label='%s-order Mult. Poly.' % (poly_label("mpoly")))
elif (key in ['NA_OPT_FEII_TEMPLATE','BR_OPT_FEII_TEMPLATE']):
ax1.plot(comp_dict_outflow['WAVE'], comp_dict_outflow['NA_OPT_FEII_TEMPLATE'], color='xkcd:yellow', linewidth=0.5, linestyle='-' , label='Narrow FeII')
ax1.plot(comp_dict_outflow['WAVE'], comp_dict_outflow['BR_OPT_FEII_TEMPLATE'], color='xkcd:orange', linewidth=0.5, linestyle='-' , label='Broad FeII')
elif (key in ['F_OPT_FEII_TEMPLATE','S_OPT_FEII_TEMPLATE','G_OPT_FEII_TEMPLATE','Z_OPT_FEII_TEMPLATE']):
if key=='F_OPT_FEII_TEMPLATE':
ax1.plot(comp_dict_outflow['WAVE'], comp_dict_outflow['F_OPT_FEII_TEMPLATE'], color='xkcd:yellow', linewidth=0.5, linestyle='-' , label='F-transition FeII')
elif key=='S_OPT_FEII_TEMPLATE':
ax1.plot(comp_dict_outflow['WAVE'], comp_dict_outflow['S_OPT_FEII_TEMPLATE'], color='xkcd:mustard', linewidth=0.5, linestyle='-' , label='S-transition FeII')
elif key=='G_OPT_FEII_TEMPLATE':
ax1.plot(comp_dict_outflow['WAVE'], comp_dict_outflow['G_OPT_FEII_TEMPLATE'], color='xkcd:orange', linewidth=0.5, linestyle='-' , label='G-transition FeII')
elif key=='Z_OPT_FEII_TEMPLATE':
ax1.plot(comp_dict_outflow['WAVE'], comp_dict_outflow['Z_OPT_FEII_TEMPLATE'], color='xkcd:rust', linewidth=0.5, linestyle='-' , label='Z-transition FeII')
elif (key=='UV_IRON_TEMPLATE'):
ax1.plot(comp_dict_outflow['WAVE'], comp_dict_outflow['UV_IRON_TEMPLATE'], color='xkcd:bright purple', linewidth=0.5, linestyle='-' , label='UV Iron' )
elif (key=='BALMER_CONT'):
ax1.plot(comp_dict_outflow['WAVE'], comp_dict_outflow['BALMER_CONT'], color='xkcd:bright green', linewidth=0.5, linestyle='--' , label='Balmer Continuum' )
# Plot emission lines by cross-referencing comp_dict with line_list
if (key in line_list_outflows):
if (line_list_outflows[key]["line_type"]=="na"):
ax1.plot(comp_dict_outflow['WAVE'], comp_dict_outflow[key], color='xkcd:cerulean', linewidth=0.5, linestyle='-', label='Narrow/Core Comp.')
if (line_list_outflows[key]["line_type"]=="br"):
ax1.plot(comp_dict_outflow['WAVE'], comp_dict_outflow[key], color='xkcd:bright teal', linewidth=0.5, linestyle='-', label='Broad Comp.')
if (line_list_outflows[key]["line_type"]=="out"):
| |
right_value)
taxon_name_id = self.adaptor.taxon_name.insert(taxon_id = taxon_id,
name = taxon[2][:255],
name_class = 'scientific name')
#Note the name field is limited to 255, some SwissProt files
#have a multi-species name which can be longer. So truncate this.
left_value += 1
right_value -= 1
parent_taxon_id = taxon_id
if common_name:
taxon_name_id = self.adaptor.taxon_name.insert(taxon_id = taxon_id,
name = common_name,
name_class = 'common name')
return taxon_id
def _fix_name_class(self, entrez_name):
"""Map Entrez name terms to those used in taxdump (PRIVATE).
We need to make this conversion to match the taxon_name.name_class
values used by the BioSQL load_ncbi_taxonomy.pl script.
e.g.
"ScientificName" -> "scientific name",
"EquivalentName" -> "equivalent name",
"Synonym" -> "synonym",
"""
#Add any special cases here:
#
#known = {}
#try:
# return known[entrez_name]
#except KeyError:
# pass
#Try automatically by adding spaces before each capital
def add_space(letter):
if letter.isupper():
return " "+letter.lower()
else:
return letter
answer = "".join([add_space(letter) for letter in entrez_name]).strip()
assert answer == answer.lower()
return answer
def _get_taxon_id_from_ncbi_taxon_id(self, ncbi_taxon_id,
scientific_name = None,
common_name = None):
"""Get the taxon id for this record from the NCBI taxon ID (PRIVATE).
ncbi_taxon_id - string containing an NCBI taxon id
scientific_name - string, used if a stub entry is recorded
common_name - string, used if a stub entry is recorded
This searches the taxon table using ONLY the NCBI taxon ID
to find the matching taxon table entry's ID (database key).
If the species isn't in the taxon table, and the fetch_NCBI_taxonomy
flag is true, Biopython will attempt to go online using Bio.Entrez
to fetch the official NCBI lineage, recursing up the tree until an
existing entry is found in the database or the full lineage has been
fetched.
Otherwise the NCBI taxon ID, scientific name and common name are
recorded as a minimal stub entry in the taxon and taxon_name tables.
Any partial information about the lineage from the SeqRecord is NOT
recorded. This should mean that (re)running the BioSQL script
load_ncbi_taxonomy.pl can fill in the taxonomy lineage.
Returns the taxon id (database key for the taxon table, not
an NCBI taxon ID).
"""
assert ncbi_taxon_id
taxon_id = self.adaptor(self.adaptor.taxon.taxon_id == ncbi_taxon_id).select()
if taxon_id:
#Good, we have mapped the NCBI taxid to a taxon table entry
return taxon_id[0].taxon_id
# At this point, as far as we can tell, this species isn't
# in the taxon table already. So we'll have to add it.
parent_taxon_id = None
rank = "species"
genetic_code = None
mito_genetic_code = None
species_names = []
if scientific_name:
species_names.append(("scientific name", scientific_name))
if common_name:
species_names.append(("common name", common_name))
if self.fetch_NCBI_taxonomy:
#Go online to get the parent taxon ID!
handle = Entrez.efetch(db="taxonomy",id=ncbi_taxon_id,retmode="XML")
taxonomic_record = Entrez.read(handle)
if len(taxonomic_record) == 1:
assert taxonomic_record[0]["TaxId"] == str(ncbi_taxon_id), \
"%s versus %s" % (taxonomic_record[0]["TaxId"],
ncbi_taxon_id)
parent_taxon_id = self._get_taxon_id_from_ncbi_lineage( \
taxonomic_record[0]["LineageEx"])
rank = taxonomic_record[0]["Rank"]
genetic_code = taxonomic_record[0]["GeneticCode"]["GCId"]
mito_genetic_code = taxonomic_record[0]["MitoGeneticCode"]["MGCId"]
species_names = [("scientific name",
taxonomic_record[0]["ScientificName"])]
try:
for name_class, names in taxonomic_record[0]["OtherNames"].iteritems():
name_class = self._fix_name_class(name_class)
if not isinstance(names, list):
#The Entrez parser seems to return single entry
#lists as just a string which is annoying.
names = [names]
for name in names:
#Want to ignore complex things like ClassCDE entries
if isinstance(name, basestring):
species_names.append((name_class, name))
except KeyError:
#OtherNames isn't always present,
#e.g. NCBI taxon 41205, Bromheadia finlaysoniana
pass
else:
pass
# If we are not allowed to go online, we will record the bare minimum;
# as long as the NCBI taxon id is present, then (re)running
# load_ncbi_taxonomy.pl should fill in the taxonomomy lineage
# (and update the species names).
#
# I am NOT going to try and record the lineage, even if it
# is in the record annotation as a list of names, as we won't
# know the NCBI taxon IDs for these parent nodes.
taxon_id = self.adaptor.taxon.insert(parent_taxon_id = parent_taxon_id,
ncbi_taxon_id = ncbi_taxon_id,
node_rank = rank,
genetic_code = genetic_code,
mito_genetic_code = mito_genetic_code,
left_value = None,
right_value = None)
#Record the scientific name, common name, etc
for name_class, name in species_names:
taxon_name_id = self.adaptor.taxon_name.insert(taxon_id = taxon_id,
name = name[:255],
name_class = name_class)
return taxon_id
def _get_taxon_id_from_ncbi_lineage(self, taxonomic_lineage):# TODO
"""This is recursive! (PRIVATE).
taxonomic_lineage - list of taxonomy dictionaries from Bio.Entrez
First dictionary in list is the taxonomy root, highest would be the species.
Each dictionary includes:
- TaxID (string, NCBI taxon id)
- Rank (string, e.g. "species", "genus", ..., "phylum", ...)
- ScientificName (string)
(and that is all at the time of writing)
This method will record all the lineage given, returning the the taxon id
(database key, not NCBI taxon id) of the final entry (the species).
"""
ncbi_taxon_id = taxonomic_lineage[-1]["TaxId"]
#Is this in the database already? Check the taxon table...
taxon_id = self.adaptor(self.adaptor.taxon.taxon_id == ncbi_taxon_id).select()
if taxon_id:
# we could verify that the Scientific Name etc in the database
# is the same and update it or print a warning if not...
return taxon_id[0].taxon_id
#We have to record this.
if len(taxonomic_lineage) > 1:
#Use recursion to find out the taxon id (database key) of the parent.
parent_taxon_id = self._get_taxon_id_from_ncbi_lineage(taxonomic_lineage[:-1])
assert isinstance(parent_taxon_id, int) or isinstance(parent_taxon_id, long), repr(parent_taxon_id)
else:
parent_taxon_id = None
# INSERT new taxon
rank = taxonomic_lineage[-1].get("Rank", None)
taxon_id = self.adaptor.taxon.insert(parent_taxon_id = parent_taxon_id,
ncbi_taxon_id = ncbi_taxon_id,
node_rank = rank,)
assert isinstance(taxon_id, int) or isinstance(taxon_id, long), repr(taxon_id)
# ... and its name in taxon_name
scientific_name = taxonomic_lineage[-1].get("ScientificName", None)
if scientific_name:
taxon_name_id = self.adaptor.taxon_name.insert(taxon_id = taxon_id,
name = scientific_name[:255],
name_class = 'scientific name')
return taxon_id
def _get_accession_from_seqrecord(self, record):
'''defines accession and version given a seqrecord '''
if record.id.count(".") == 1: # try to get a version from the id
#This assumes the string is something like "XXXXXXXX.123"
accession, version = record.id.split('.')
try:
version = int(version)
except ValueError:
accession = record.id
version = 0
else: # otherwise just use a version of 0
accession = record.id
version = 0
if "accessions" in record.annotations \
and isinstance(record.annotations["accessions"],list) \
and record.annotations["accessions"]:
#Take the first accession (one if there is more than one)
accession = record.annotations["accessions"][0]
return accession, version
def _load_bioentry_table(self, record):
"""Fill the bioentry table with sequence information (PRIVATE).
record - SeqRecord object to add to the database.
"""
# get the pertinent info and insert it
accession,version = self._get_accession_from_seqrecord(record)
#Find the taxon id (this is not just the NCBI Taxon ID)
#NOTE - If the species isn't defined in the taxon table,
#a new minimal entry is created.
taxon_id = self._get_taxon_id(record)#TO DO
if "gi" in record.annotations:
identifier = record.annotations["gi"]
else:
if len(record.id) <= 40:
identifier = record.id
else:
identifier = None
#Allow description and division to default to NULL as in BioPerl.
description = getattr(record, 'description', None)
division = record.annotations.get("data_file_division", None)
bioentry_id = self.adaptor.bioentry.insert(biodatabase_id = self.dbid,
taxon_id = taxon_id,
name = record.name,
accession = accession,
identifier = identifier,
division = division,
description = description,
version = version)
return bioentry_id
def _load_bioentry_date(self, record, bioentry_id):
"""Add the effective date of the entry into the database.
record - a SeqRecord object with an annotated date
bioentry_id - corresponding database identifier
"""
# dates are GenBank style, like:
# 14-SEP-2000
date = record.annotations.get("date",
strftime("%d-%b-%Y", gmtime()).upper())
if isinstance(date, list) : date = date[0]
annotation_tags_id = self._get_ontology_id("Annotation Tags")
date_id = self._get_term_id("date_changed", annotation_tags_id)
date_oid = self.adaptor.bioentry_qualifier_value.insert(bioentry_id = bioentry_id,
term_id = date_id,
value = date,
rank = 1)
def _load_biosequence(self, record, bioentry_id):
"""Record a SeqRecord's sequence and alphabet in the database (PRIVATE).
record - a SeqRecord object with a seq property
bioentry_id - corresponding database identifier
"""
if record.seq is None:
#The biosequence table entry is optional, so if we haven't
#got a sequence, we don't need to write to the table.
return
# determine the string representation of the alphabet
if isinstance(record.seq.alphabet, Alphabet.DNAAlphabet):
alphabet = "dna"
elif isinstance(record.seq.alphabet, Alphabet.RNAAlphabet):
alphabet = "rna"
elif isinstance(record.seq.alphabet, Alphabet.ProteinAlphabet):
alphabet = "protein"
else:
alphabet = "unknown"
if isinstance(record.seq, UnknownSeq):
seq_str = | |
'''
if not self.stack.parseInner(self.rxPacket):
return
data = self.rxPacket.data
body = self.rxPacket.body.data
if not isinstance(body, basestring):
emsg = "Invalid format of cookie packet body"
console.terse(emsg + '\n')
self.stack.incStat('invalid_cookie')
self.remove()
return
#raise raeting.TransactionError(emsg)
if len(body) != raeting.COOKIE_PACKER.size:
emsg = "Invalid length of cookie packet body"
console.terse(emsg + '\n')
self.stack.incStat('invalid_cookie')
self.remove()
return
#raise raeting.TransactionError(emsg)
cipher, nonce = raeting.COOKIE_PACKER.unpack(body)
remote = self.stack.estates[self.reid]
msg = remote.privee.decrypt(cipher, nonce, remote.pubber.key)
if len(msg) != raeting.COOKIESTUFF_PACKER.size:
emsg = "Invalid length of cookie stuff"
console.terse(emsg + '\n')
self.stack.incStat('invalid_cookie')
self.remove()
return
#raise raeting.TransactionError(emsg)
shortraw, seid, deid, oreo = raeting.COOKIESTUFF_PACKER.unpack(msg)
if seid != remote.eid or deid != self.stack.estate.eid:
emsg = "Invalid seid or deid fields in cookie stuff"
console.terse(emsg + '\n')
self.stack.incStat('invalid_cookie')
self.remove()
return
#raeting.TransactionError(emsg)
self.oreo = binascii.hexlify(oreo)
remote.publee = nacling.Publican(key=shortraw)
self.initiate()
def initiate(self):
'''
Send initiate request to cookie response to hello request
'''
if self.reid not in self.stack.estates:
emsg = "Invalid remote destination estate id '{0}'".format(self.reid)
#raise raeting.TransactionError(emsg)
console.terse(emsg + '\n')
self.stack.incStat('invalid_remote_eid')
self.remove()
return
remote = self.stack.estates[self.reid]
vcipher, vnonce = self.stack.estate.priver.encrypt(remote.privee.pubraw,
remote.pubber.key)
fqdn = remote.fqdn.ljust(128, ' ')
stuff = raeting.INITIATESTUFF_PACKER.pack(self.stack.estate.priver.pubraw,
vcipher,
vnonce,
fqdn)
cipher, nonce = remote.privee.encrypt(stuff, remote.publee.key)
oreo = binascii.unhexlify(self.oreo)
body = raeting.INITIATE_PACKER.pack(remote.privee.pubraw,
oreo,
cipher,
nonce)
packet = packeting.TxPacket(stack=self.stack,
kind=raeting.pcktKinds.initiate,
embody=body,
data=self.txData)
try:
packet.pack()
except raeting.PacketError as ex:
console.terse(ex + '\n')
self.stack.incStat("packing_error")
self.remove()
return
self.transmit(packet)
console.concise("Allower Do Initiate at {0}\n".format(self.stack.store.stamp))
def allow(self):
'''
Process ackInitiate packet
Perform allowment in response to ack to initiate packet
'''
if not self.stack.parseInner(self.rxPacket):
return
self.stack.estates[self.reid].allowed = True
self.ackFinal()
#self.remove()
def rejected(self):
'''
Process nack packet
terminate in response to nack
'''
if not self.stack.parseInner(self.rxPacket):
return
self.remove()
console.concise("Allower rejected at {0}\n".format(self.stack.store.stamp))
self.stack.incStat(self.statKey())
def ackFinal(self):
'''
Send ack to ack Initiate to terminate transaction
'''
if self.reid not in self.stack.estates:
emsg = "Invalid remote destination estate id '{0}'".format(self.reid)
#raise raeting.TransactionError(emsg)
console.terse(emsg + '\n')
self.stack.incStat('invalid_remote_eid')
self.remove()
return
body = ""
packet = packeting.TxPacket(stack=self.stack,
kind=raeting.pcktKinds.ack,
embody=body,
data=self.txData)
try:
packet.pack()
except raeting.PacketError as ex:
console.terse(ex + '\n')
self.stack.incStat("packing_error")
self.remove()
return
self.transmit(packet)
self.remove()
console.concise("Allower Ack Final at {0}\n".format(self.stack.store.stamp))
self.stack.incStat("allow_initiate_complete")
class Allowent(Correspondent):
'''
RAET protocol Allowent Correspondent class Dual of Allower
CurveCP handshake
'''
Timeout = 4.0
RedoTimeoutMin = 0.25 # initial timeout
RedoTimeoutMax = 1.0 # max timeout
def __init__(self, redoTimeoutMin=None, redoTimeoutMax=None, **kwa):
'''
Setup instance
'''
kwa['kind'] = raeting.trnsKinds.allow
if 'reid' not in kwa:
emsg = "Missing required keyword argumens: '{0}'".format('reid')
raise TypeError(emsg)
super(Allowent, self).__init__(**kwa)
self.redoTimeoutMax = redoTimeoutMax or self.RedoTimeoutMax
self.redoTimeoutMin = redoTimeoutMin or self.RedoTimeoutMin
self.redoTimer = aiding.StoreTimer(self.stack.store,
duration=self.redoTimeoutMin)
remote = self.stack.estates[self.reid]
if not remote.joined:
emsg = "Must be joined first"
console.terse(emsg + '\n')
self.stack.incStat('unjoined_allow_attempt')
return
#raise raeting.TransactionError(emsg)
#Current .sid was set by stack from rxPacket.data sid so it is the new rsid
if not remote.validRsid(self.sid):
emsg = "Stale sid '{0}' in packet".format(self.sid)
console.terse(emsg + '\n')
self.stack.incStat('stale_sid_allow_attempt')
return
#raise raeting.TransactionError(emsg)
remote.rsid = self.sid #update last received rsid for estate
remote.rtid = self.tid #update last received rtid for estate
self.oreo = None #keep locally generated oreo around for redos
remote.refresh() # refresh short term keys and .allowed
self.prep() # prepare .txData
self.add(self.index)
def receive(self, packet):
"""
Process received packet belonging to this transaction
"""
super(Allowent, self).receive(packet) # self.rxPacket = packet
if packet.data['tk'] == raeting.trnsKinds.allow:
if packet.data['pk'] == raeting.pcktKinds.hello:
self.hello()
elif packet.data['pk'] == raeting.pcktKinds.initiate:
self.initiate()
elif packet.data['pk'] == raeting.pcktKinds.ack:
self.final()
elif packet.data['pk'] == raeting.pcktKinds.nack: # rejected
self.rejected()
def process(self):
'''
Perform time based processing of transaction
'''
if self.timeout > 0.0 and self.timer.expired:
self.nack()
console.concise("Allowent timed out at {0}\n".format(self.stack.store.stamp))
return
# need to perform the check for accepted status and then send accept
if self.redoTimer.expired:
duration = min(
max(self.redoTimeoutMin,
self.redoTimer.duration) * 2.0,
self.redoTimeoutMax)
self.redoTimer.restart(duration=duration)
if self.txPacket:
if self.txPacket.data['pk'] == raeting.pcktKinds.cookie:
self.transmit(self.txPacket) #redo
console.concise("Allowent Redo Cookie at {0}\n".format(self.stack.store.stamp))
if self.txPacket.data['pk'] == raeting.pcktKinds.ack:
self.transmit(self.txPacket) #redo
console.concise("Allowent Redo Ack at {0}\n".format(self.stack.store.stamp))
def prep(self):
'''
Prepare .txData
'''
remote = self.stack.estates[self.reid]
self.txData.update( sh=self.stack.estate.host,
sp=self.stack.estate.port,
dh=remote.host,
dp=remote.port,
se=self.stack.estate.eid,
de=self.reid,
tk=self.kind,
cf=self.rmt,
bf=self.bcst,
si=self.sid,
ti=self.tid, )
def hello(self):
'''
Process hello packet
'''
if not self.stack.parseInner(self.rxPacket):
return
data = self.rxPacket.data
body = self.rxPacket.body.data
if not isinstance(body, basestring):
emsg = "Invalid format of hello packet body"
console.terse(emsg + '\n')
self.stack.incStat('invalid_hello')
self.remove()
return
#raise raeting.TransactionError(emsg)
if len(body) != raeting.HELLO_PACKER.size:
emsg = "Invalid length of hello packet body"
console.terse(emsg + '\n')
self.stack.incStat('invalid_hello')
self.remove()
return
#raise raeting.TransactionError(emsg)
plain, shortraw, cipher, nonce = raeting.HELLO_PACKER.unpack(body)
remote = self.stack.estates[self.reid]
remote.publee = nacling.Publican(key=shortraw)
msg = self.stack.estate.priver.decrypt(cipher, nonce, remote.publee.key)
if msg != plain :
emsg = "Invalid plain not match decrypted cipher"
console.terse(emsg + '\n')
self.stack.incStat('invalid_hello')
self.remove()
return
#raise raeting.TransactionError(emsg)
self.cookie()
def cookie(self):
'''
Send Cookie Packet
'''
if self.reid not in self.stack.estates:
emsg = "Invalid remote destination estate id '{0}'".format(self.reid)
#raise raeting.TransactionError(emsg)
console.terse(emsg + '\n')
self.stack.incStat('invalid_remote_eid')
self.remove()
return
remote = self.stack.estates[self.reid]
oreo = self.stack.estate.priver.nonce()
self.oreo = binascii.hexlify(oreo)
stuff = raeting.COOKIESTUFF_PACKER.pack(remote.privee.pubraw,
self.stack.estate.eid,
remote.eid,
oreo)
cipher, nonce = self.stack.estate.priver.encrypt(stuff, remote.publee.key)
body = raeting.COOKIE_PACKER.pack(cipher, nonce)
packet = packeting.TxPacket(stack=self.stack,
kind=raeting.pcktKinds.cookie,
embody=body,
data=self.txData)
try:
packet.pack()
except raeting.PacketError as ex:
console.terse(ex + '\n')
self.stack.incStat("packing_error")
self.remove()
return
self.transmit(packet)
console.concise("Allowent Do Cookie at {0}\n".format(self.stack.store.stamp))
def initiate(self):
'''
Process initiate packet
'''
if not self.stack.parseInner(self.rxPacket):
return
data = self.rxPacket.data
body = self.rxPacket.body.data
if not isinstance(body, basestring):
emsg = "Invalid format of initiate packet body"
console.terse(emsg + '\n')
self.stack.incStat('invalid_initiate')
self.remove()
return
#raise raeting.TransactionError(emsg)
if len(body) != raeting.INITIATE_PACKER.size:
emsg = "Invalid length of initiate packet body"
console.terse(emsg + '\n')
self.stack.incStat('invalid_initiate')
self.remove()
return
#raise raeting.TransactionError(emsg)
shortraw, oreo, cipher, nonce = raeting.INITIATE_PACKER.unpack(body)
remote = self.stack.estates[self.reid]
if shortraw != remote.publee.keyraw:
emsg = "Mismatch of short term public key in initiate packet"
console.terse(emsg + '\n')
self.stack.incStat('invalid_initiate')
self.remove()
return
#raise raeting.TransactionError(emsg)
if (binascii.hexlify(oreo) != self.oreo):
emsg = "Stale or invalid cookie in initiate packet"
console.terse(emsg + '\n')
self.stack.incStat('invalid_initiate')
self.remove()
return
#raise raeting.TransactionError(emsg)
msg = remote.privee.decrypt(cipher, nonce, remote.publee.key)
if len(msg) != raeting.INITIATESTUFF_PACKER.size:
emsg = "Invalid length of initiate stuff"
console.terse(emsg + '\n')
self.stack.incStat('invalid_initiate')
self.remove()
return
#raise raeting.TransactionError(emsg)
pubraw, vcipher, vnonce, fqdn = raeting.INITIATESTUFF_PACKER.unpack(msg)
if pubraw != remote.pubber.keyraw:
emsg = "Mismatch of long term public key in initiate stuff"
console.terse(emsg + '\n')
self.stack.incStat('invalid_initiate')
self.remove()
return
#raise raeting.TransactionError(emsg)
fqdn = fqdn.rstrip(' ')
if fqdn != self.stack.estate.fqdn:
emsg = "Mismatch of fqdn in initiate stuff"
console.terse(emsg + '\n')
self.stack.incStat('invalid_initiate')
self.remove()
return
#raise raeting.TransactionError(emsg)
vouch = self.stack.estate.priver.decrypt(vcipher, vnonce, remote.pubber.key)
if vouch != remote.publee.keyraw or vouch != shortraw:
emsg = "Short term key vouch failed"
console.terse(emsg + '\n')
self.stack.incStat('invalid_initiate')
self.remove()
return
#raise raeting.TransactionError(emsg)
self.ackInitiate()
def ackInitiate(self):
'''
Send ack to initiate request
'''
if self.reid not in self.stack.estates:
msg = "Invalid remote destination estate id '{0}'".format(self.reid)
#raise raeting.TransactionError(msg)
console.terse(emsg + '\n')
self.stack.incStat('invalid_remote_eid')
self.remove()
return
body = ""
packet = packeting.TxPacket(stack=self.stack,
kind=raeting.pcktKinds.ack,
embody=body,
data=self.txData)
try:
packet.pack()
except raeting.PacketError as ex:
console.terse(ex + '\n')
self.stack.incStat("packing_error")
self.remove()
return
self.transmit(packet)
console.concise("Allowent Do Ack at {0}\n".format(self.stack.store.stamp))
self.allow()
def allow(self):
'''
Perform allowment
'''
self.stack.estates[self.reid].allowed = True
def final(self):
'''
Process ackFinal packet
'''
if not self.stack.parseInner(self.rxPacket):
return
self.remove()
console.concise("Allowent Do Final at {0}\n".format(self.stack.store.stamp))
self.stack.incStat("allow_correspond_complete")
def rejected(self):
'''
Process nack packet
terminate in response to nack
'''
if not self.stack.parseInner(self.rxPacket):
return
self.remove()
console.concise("Allowent rejected at {0}\n".format(self.stack.store.stamp))
self.stack.incStat(self.statKey())
def nack(self):
'''
Send nack to terminate allower transaction
'''
if self.reid not in self.stack.estates:
emsg = "Invalid remote destination estate id '{0}'".format(self.reid)
#raise raeting.TransactionError(emsg)
console.terse(emsg + '\n')
self.stack.incStat('invalid_remote_eid')
self.remove()
return
body = odict()
packet = packeting.TxPacket(stack=self.stack,
kind=raeting.pcktKinds.nack,
embody=body,
data=self.txData)
try:
packet.pack()
except raeting.PacketError as ex:
console.terse(ex + '\n')
self.stack.incStat("packing_error")
self.remove()
return
self.transmit(packet)
self.remove()
console.concise("Allowent Reject at {0}\n".format(self.stack.store.stamp))
self.stack.incStat(self.statKey())
class Messenger(Initiator):
'''
RAET protocol Messenger Initiator class Dual of Messengent
Generic messages
'''
Timeout = 10.0
RedoTimeoutMin = 1.0 # initial timeout
RedoTimeoutMax = 3.0 # max timeout
def __init__(self, redoTimeoutMin=None, redoTimeoutMax=None, **kwa):
'''
Setup instance
'''
kwa['kind'] = raeting.trnsKinds.message
super(Messenger, self).__init__(**kwa)
self.redoTimeoutMax = redoTimeoutMax or self.RedoTimeoutMax
self.redoTimeoutMin = redoTimeoutMin or self.RedoTimeoutMin
self.redoTimer = aiding.StoreTimer(self.stack.store,
duration=self.redoTimeoutMin)
if self.reid is None:
self.reid = self.stack.estates.values()[0].eid # zeroth is channel master
remote = self.stack.estates[self.reid]
if not remote.allowed:
emsg = "Must be allowed first"
console.terse(emsg + '\n')
self.stack.incStat('unallowed_message_attempt')
return
#raise raeting.TransactionError(emsg)
self.sid = remote.sid
self.tid = remote.nextTid()
self.prep() # prepare .txData
self.tray = packeting.TxTray(stack=self.stack)
self.add(self.index)
def receive(self, packet):
"""
Process received | |
<reponame>troup-system/troup<filename>troup/infrastructure.py
# Copyright 2016 <NAME>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
__author__ = 'pavle'
from types import FunctionType
from types import MethodType
from troup.observer import Observable
import logging
from queue import Queue
class ChannelError(Exception):
pass
class ChannelClosedError(ChannelError):
pass
class Channel:
CREATED = 'CREATED'
CONNECTING = 'CONNECTING'
OPEN = 'OPEN'
CLOSING = 'CLOSING'
CLOSED = 'CLOSED'
ERROR = 'ERROR'
def __init__(self, name, to_url):
self.name = name
self.status = 'CREATED'
self.listeners = []
self.event_listeners = {}
self.to_url = to_url
self.log = logging.getLogger(self.__class__.__name__)
def open(self):
if self.status is not Channel.CREATED:
raise ChannelError('Unable to open channel')
try:
self.status = Channel.CONNECTING
self.connect()
self.status = Channel.OPEN
except ChannelError:
self.status = Channel.ERROR
raise
except Exception as e:
self.status = Channel.ERROR
raise ChannelError() from e
def close(self):
if self.status is not Channel.OPEN:
raise ChannelError('Unable to close channel')
try:
self.status = Channel.CLOSING
self.disconnect()
self.status = Channel.CLOSED
except ChannelError:
self.status = Channel.ERROR
raise
except Exception as e:
self.status = Channel.ERROR
raise ChannelError() from e
def connect(self):
pass
def disconnect(self):
pass
def register_listener(self, callback):
listener = self.__wrap_listener__(callback)
self.listeners.append(listener)
def __wrap_listener__(self, callback):
return ListenerWrapper(callback)
def send(self, data):
self.log.debug('[CH<Channel>: %s]: empty send' % self.name)
def data_received(self, data):
for listener in self.listeners:
try:
listener.on_data(data)
except Exception as e:
self.log.exception('Listener error: %s', e)
def on(self, event_name, callback):
callbacks = self.event_listeners.get(event_name)
if not callbacks:
callbacks = self.event_listeners[event_name] = []
if not callback in callbacks:
callbacks.append(callback)
def trigger(self, event, *data):
callbacks = self.event_listeners.get(event)
if callbacks:
for callback in callbacks:
try:
callback(*data)
except Exception as e:
self.log.debug('An error while triggering event {}', event, e)
def __repr__(self):
return '<Channel %s> to %s' % (self.name, self.to_url)
class ListenerWrapper:
def __init__(self, delegate):
self.delegate = self.__get_callable__(delegate)
def __get_callable__(self, delegate):
if isinstance(delegate, FunctionType) or \
isinstance(delegate, MethodType):
return delegate
else:
if hasattr(delegate, 'on_data'):
return getattr(delegate, 'on_data')
raise ChannelError('Invalid listener. It is not a callable object and does not contain on_data method.')
def on_data(self, data):
self.delegate(data)
from ws4py.async_websocket import WebSocket
from threading import Event
class IncommingChannel(Channel):
def __init__(self, name, to_url, adapter=None):
super(IncommingChannel, self).__init__(name, to_url)
self.adapter = adapter
self.close_event = Event()
def disconnect(self):
self.adapter.close(code=1000, reason="client-closing")
# TODO: Wait to actually close
self.close_event.wait()
def notify_close(self):
self.close_event.set()
def send(self, data):
if self.status is Channel.OPEN:
self.adapter.send(payload=data)
else:
raise ChannelError('Not open')
class IncomingChannelWSAdapter(WebSocket):
def __init__(self, protocol):
WebSocket.__init__(self, protocol)
self.server = None
self.log = logging.getLogger(self.__class__.__name__)
self.channel = None
def opened(self):
self.server = self.proto.server
try:
self.channel = IncommingChannel(
name="channel[%s-%s]" % (self.local_address, self.peer_address),
to_url=str(self.peer_address),
adapter=self)
self.channel.open()
self.server.on_channel_open(self.channel)
except Exception as e:
logging.exception(e)
raise e
def closed(self, code, reason=None):
self.log.debug('closing ws. code=%s, reason=%s'%(str(code),str(reason)))
self.channel.notify_close()
self.server.on_channel_closed(self.channel)
def received_message(self, message):
#print(' -> %s' % str(message))
#print('Message is text %s - data[%s]' % (message.is_text,message.data))
try:
self.channel.data_received(str(message))
except Exception as e:
logging.exception(e)
@property
def local_address(self):
"""
Local endpoint address as a tuple
"""
if not self._local_address:
self._local_address = self.proto.reader._transport.get_extra_info('sockname')
if len(self._local_address) == 4:
self._local_address = self._local_address[:2]
return self._local_address
@property
def peer_address(self):
"""
Peer endpoint address as a tuple
"""
if not self._peer_address:
self._peer_address = self.proto.reader._transport.get_extra_info('peername')
if len(self._peer_address) == 4:
self._peer_address = self._peer_address[:2]
return self._peer_address
from ws4py.server.tulipserver import WebSocketProtocol
class ServerAwareWebSocketProtocol (WebSocketProtocol):
def __init__(self, handler_class, server):
super(ServerAwareWebSocketProtocol, self).__init__(handler_class)
self.server = server
import asyncio
class AsyncIOWebSocketServer:
def __init__(self, host='', port=1700, web_socket_class=IncomingChannelWSAdapter):
self.host = host
self.port = port
self.web_socket_class = web_socket_class
self.aio_loop = asyncio.get_event_loop()
self.running = False
self.channels = {}
self.listeners = []
self.aio_sf = None
self.server_address = None
self.log = logging.getLogger('AsyncIOWebSocketServer')
def start(self):
proto = lambda: ServerAwareWebSocketProtocol(self.web_socket_class, self)
asyncio.set_event_loop(self.aio_loop)
sf = self.aio_loop.create_server(proto, self.host, self.port)
s = self.aio_loop.run_until_complete(sf)
self.server_address = s.sockets[0].getsockname()
self.log.info('Server stared on %s' % str(s.sockets[0].getsockname()))
self.aio_sf = sf
self.aio_loop.run_forever()
self.aio_loop.close()
self.log.debug('Async Event loop closed.')
def stop(self):
def stop_server_and_loop():
self.aio_sf.close()
self.aio_loop.stop()
self.log.debug('Server closed. Event loop notified for stop.')
self.aio_loop.call_soon_threadsafe(stop_server_and_loop)
def on_channel_open(self, channel):
self.channels[channel.name] = channel
self.log.debug('Channel %s => %s added' % (channel.name, channel))
self.notify_event('channel.open', channel)
def on_channel_closed(self, channel):
del self.channels[channel]
self.notify_event('channel.closed', channel)
def on_event(self, callback):
self.listeners.append(callback)
def notify_event(self, event, channel):
for listener in self.listeners:
listener(event, channel)
def get_server_endpoint(self):
return 'ws://%s:%s' % (self.host or 'localhost', self.port)
# -- outgoing connection
from ws4py.client.threadedclient import WebSocketClient
class OutgoingChannelWSAdapter(WebSocketClient):
def __init__(self, url, handlers):
super(OutgoingChannelWSAdapter, self).__init__(url=url)
self.handlers = handlers
def __noop__(self, *args, **kwargs):
pass
def __handler__(self, name):
return self.handlers.get(name) or self.__noop__
def opened(self):
self.__handler__('opened')()
def closed(self, code, reason=None):
self.__handler__('closed')(code, reason)
def received_message(self, m):
if m and m.data:
if m.is_text:
self.__handler__('on_data')(str(m))
else:
self.__handler__('on_data')(m.data)
class OutgoingChannelOverWS(Channel):
def __init__(self, name, to_url, early_messages='queue', queue_max_size=1000):
super(OutgoingChannelOverWS, self).__init__(name, to_url)
self.web_socket = OutgoingChannelWSAdapter(url=to_url,
handlers={
'opened': self._on_open_handler_,
'closed': self._on_closed_handler_,
'on_data': self.data_received
})
self._early_messages = early_messages
self._queue_max_size = queue_max_size
self.queue = None
self.__setup_early_strategy()
def __setup_early_strategy(self):
if self._early_messages == 'queue':
self.queue = Queue(maxsize=self._queue_max_size)
def __handle_early_messages(self):
if self._early_messages == 'queue':
while not self.queue.empty():
msg = self.queue.get_nowait()
self.send(msg)
def _on_open_handler_(self):
self.trigger('open', self)
self.__handle_early_messages()
self.on_opened()
def on_opened(self):
pass
def _on_closed_handler_(self, code, reason=None):
self.trigger('closed', self, code, reason)
self.status = Channel.CLOSING
self.on_closed(code, reason)
self.status = Channel.CLOSED
def on_closed(self, code, reason=None):
pass
def connect(self):
try:
self.web_socket.connect()
except (ConnectionRefusedError, ConnectionAbortedError, ConnectionResetError) as e:
raise ChannelClosedError() from e
def disconnect(self):
self.web_socket.close()
def send(self, data):
if self.status == Channel.OPEN:
try:
self.web_socket.send(payload=data)
except (ConnectionRefusedError, ConnectionAbortedError, ConnectionResetError) as e:
raise ChannelClosedError() from e
elif self.status in [Channel.CREATED, Channel.CONNECTING]:
self.__send_early(data)
else:
raise ChannelClosedError('Cannot send: invalid channel status')
def __send_early(self, data):
if self._early_messages == 'queue':
self.queue.put(data)
elif self._early_messages == 'reject':
raise Exception('Early message rejected')
else:
logging.warn('Early message [%s] not send due to unknown early messages strategy: %s' %
(str(data), self._early_messages))
class ChannelManager(Observable):
def __init__(self, aio_server):
#self.config = config
super(ChannelManager, self).__init__()
self.aio_server = aio_server
self.channels = {}
self.by_url = {}
self.log = logging.getLogger('channel-manager')
self.aio_server.on_event(self._aio_server_event_)
def _aio_server_event_(self, event, channel):
if event == 'channel.open':
self._on_open_channel_(channel)
elif event == 'channel.closed':
pass
else:
pass
def channel(self, name=None, to_url=None):
if self.channels.get(name):
return self.channels[name]
if to_url and self.by_url.get(to_url):
return self.by_url[to_url]
if not to_url:
raise Exception('No channel URL specified')
if not name and to_url:
name = to_url
channel = self.open_channel_to(name, to_url)
self.channels[name] = channel
self.by_url[to_url] = channel
return channel
def open_channel_to(self, name, url):
och = OutgoingChannelOverWS(name=name, to_url=url)
self._on_open_channel_(och)
try:
och.open()
except ChannelClosedError:
self.trigger('channel.closed', och)
raise
return och
def close_channel(self, name=None, endpoint=None):
pass
def _on_open_channel_(self, channel):
channel.on('channel.closed', self._handle_closed_channel_)
def get_data_listener(chn):
def data_listener(data):
self.trigger('channel.data', data, chn)
return data_listener
channel.register_listener(get_data_listener(channel))
self.trigger('channel.open', channel)
def _handle_closed_channel_(self, channel, code, reason=None):
del self.channels[channel.name]
del self.by_url[channel.to_url]
self.trigger('channel.closed', channel)
def listen(self, name=None, to_url=None, listener=None):
channel = self.channel(name, to_url)
channel.register_listener(listener)
def send(self, name=None, to_url=None, data=None):
channel = self.channel(name, to_url)
try:
channel.send(data)
except ChannelClosedError as e:
channel.close()
self._handle_closed_channel_(channel, 1006, str(e))
def on_data(self, callback, from_channel=None):
def actual_callback_no_filter(data, chn):
callback(data)
def actual_callback_with_filter(data, channel):
if channel.name == from_channel:
callback(data)
if from_channel:
self.on('channel.data', actual_callback_with_filter)
else:
self.on('channel.data', actual_callback_no_filter)
# -- simplest message bus in the world
class MessageHandler:
def __init__(self, handler, message_filter):
self.handler = handler
self.message_filter = message_filter
def __call__(self, message):
if self.message_filter:
if not self.message_filter(message):
return
self.handler(message)
def __eq__(self, other):
if not type(self) is type(other):
return False
if self.handler and other.handler:
if not self.handler.__eq__(other.handler):
return False
if self.message_filter is not None:
if not other.message_filter:
return False
return self.message_filter.__eq__(other.message_filter)
else:
return not other.message_filter
def __hash__(self):
return self.handler.__hash__()
class MessageBus:
def __init__(self):
self.subscribers = {}
self.log = logging.getLogger(self.__class__.__name__)
def on(self, topic, handler, message_filter=None):
if not handler:
raise Exception('Handler not specified')
if not topic:
raise Exception('Topic not specified')
subscribers = self.__get_subscribers__(topic)
if handler in subscribers:
raise Exception('Handler already registered')
self.log.debug('Listening on topic %s. Handler %s (filter=%s)', topic, handler, message_filter)
subscribers.append(handler)
def __get_subscribers__(self, topic):
subscribers = self.subscribers.get(topic)
if not subscribers:
subscribers = []
self.subscribers[topic] = subscribers
return subscribers
def publish(self, topic, *events):
subscribers = self.subscribers.get(topic)
if subscribers:
for handler in subscribers:
try:
handler(*events)
except Exception as e:
self.log.exception(e)
def remove(self, topic, handler):
subscribers = self.subscribers.get(topic)
if subscribers:
subscribers.remove(handler)
message_bus = MessageBus()
class Subscribe:
def __init__(self, topic, filter=None, bus=None):
self.topic = topic
self.filter = filter
self.bus = bus
if not self.bus:
self.bus = message_bus
def __call__(self, method):
self.bus.on(self.topic, method)
return method
class Bus:
def __init__(self):
| |
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.BrightText, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.ButtonText, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 255))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Base, brush)
brush = QtGui.QBrush(QtGui.QColor(59, 255, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Window, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Shadow, brush)
brush = QtGui.QBrush(QtGui.QColor(157, 255, 127, 127))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.AlternateBase, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 220))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.ToolTipBase, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.ToolTipText, brush)
brush = QtGui.QBrush(QtGui.QColor(29, 127, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.WindowText, brush)
brush = QtGui.QBrush(QtGui.QColor(59, 255, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Button, brush)
brush = QtGui.QBrush(QtGui.QColor(157, 255, 127, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Light, brush)
brush = QtGui.QBrush(QtGui.QColor(108, 255, 63, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Midlight, brush)
brush = QtGui.QBrush(QtGui.QColor(29, 127, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Dark, brush)
brush = QtGui.QBrush(QtGui.QColor(39, 170, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Mid, brush)
brush = QtGui.QBrush(QtGui.QColor(29, 127, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Text, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 255))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.BrightText, brush)
brush = QtGui.QBrush(QtGui.QColor(29, 127, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.ButtonText, brush)
brush = QtGui.QBrush(QtGui.QColor(59, 255, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Base, brush)
brush = QtGui.QBrush(QtGui.QColor(59, 255, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Window, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Shadow, brush)
brush = QtGui.QBrush(QtGui.QColor(59, 255, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.AlternateBase, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 220))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.ToolTipBase, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.ToolTipText, brush)
self.legBack_L0_knee_ctl.setPalette(palette)
self.legBack_L0_knee_ctl.setAutoFillBackground(True)
self.legBack_L0_knee_ctl.setObjectName("legBack_L0_knee_ctl")
self.legBack_R0_roll_ctl = SelectBtn_greenCircle(biped_body)
self.legBack_R0_roll_ctl.setGeometry(QtCore.QRect(20, 336, 21, 20))
palette = QtGui.QPalette()
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.WindowText, brush)
brush = QtGui.QBrush(QtGui.QColor(59, 255, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Button, brush)
brush = QtGui.QBrush(QtGui.QColor(157, 255, 127, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Light, brush)
brush = QtGui.QBrush(QtGui.QColor(108, 255, 63, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Midlight, brush)
brush = QtGui.QBrush(QtGui.QColor(29, 127, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Dark, brush)
brush = QtGui.QBrush(QtGui.QColor(39, 170, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Mid, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Text, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 255))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.BrightText, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.ButtonText, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 255))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Base, brush)
brush = QtGui.QBrush(QtGui.QColor(59, 255, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Window, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Shadow, brush)
brush = QtGui.QBrush(QtGui.QColor(157, 255, 127, 127))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.AlternateBase, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 220))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.ToolTipBase, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.ToolTipText, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.WindowText, brush)
brush = QtGui.QBrush(QtGui.QColor(59, 255, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Button, brush)
brush = QtGui.QBrush(QtGui.QColor(157, 255, 127, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Light, brush)
brush = QtGui.QBrush(QtGui.QColor(108, 255, 63, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Midlight, brush)
brush = QtGui.QBrush(QtGui.QColor(29, 127, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Dark, brush)
brush = QtGui.QBrush(QtGui.QColor(39, 170, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Mid, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Text, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 255))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.BrightText, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.ButtonText, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 255))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Base, brush)
brush = QtGui.QBrush(QtGui.QColor(59, 255, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Window, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Shadow, brush)
brush = QtGui.QBrush(QtGui.QColor(157, 255, 127, 127))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.AlternateBase, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 220))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.ToolTipBase, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.ToolTipText, brush)
brush = QtGui.QBrush(QtGui.QColor(29, 127, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.WindowText, brush)
brush = QtGui.QBrush(QtGui.QColor(59, 255, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Button, brush)
brush = QtGui.QBrush(QtGui.QColor(157, 255, 127, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Light, brush)
brush = QtGui.QBrush(QtGui.QColor(108, 255, 63, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Midlight, brush)
brush = QtGui.QBrush(QtGui.QColor(29, 127, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Dark, brush)
brush = QtGui.QBrush(QtGui.QColor(39, 170, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Mid, brush)
brush = QtGui.QBrush(QtGui.QColor(29, 127, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Text, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 255))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.BrightText, brush)
brush = QtGui.QBrush(QtGui.QColor(29, 127, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.ButtonText, brush)
brush = QtGui.QBrush(QtGui.QColor(59, 255, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Base, brush)
brush = QtGui.QBrush(QtGui.QColor(59, 255, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Window, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Shadow, brush)
brush = QtGui.QBrush(QtGui.QColor(59, 255, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.AlternateBase, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 220))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.ToolTipBase, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.ToolTipText, brush)
self.legBack_R0_roll_ctl.setPalette(palette)
self.legBack_R0_roll_ctl.setAutoFillBackground(True)
self.legBack_R0_roll_ctl.setObjectName("legBack_R0_roll_ctl")
self.legFront_L0_fk0_ctl = SelectBtn_RFkBox(biped_body)
self.legFront_L0_fk0_ctl.setGeometry(QtCore.QRect(227, 218, 20, 15))
palette = QtGui.QPalette()
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.WindowText, brush)
brush = QtGui.QBrush(QtGui.QColor(59, 255, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Button, brush)
brush = QtGui.QBrush(QtGui.QColor(157, 255, 127, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Light, brush)
brush = QtGui.QBrush(QtGui.QColor(108, 255, 63, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Midlight, brush)
brush = QtGui.QBrush(QtGui.QColor(29, 127, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Dark, brush)
brush = QtGui.QBrush(QtGui.QColor(39, 170, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Mid, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Text, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 255))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.BrightText, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.ButtonText, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 255))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Base, brush)
brush = QtGui.QBrush(QtGui.QColor(59, 255, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Window, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Shadow, brush)
brush = QtGui.QBrush(QtGui.QColor(157, 255, 127, 127))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.AlternateBase, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 220))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.ToolTipBase, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.ToolTipText, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.WindowText, brush)
brush = QtGui.QBrush(QtGui.QColor(59, 255, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Button, brush)
brush = QtGui.QBrush(QtGui.QColor(157, 255, 127, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Light, brush)
brush = QtGui.QBrush(QtGui.QColor(108, 255, 63, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Midlight, brush)
brush = QtGui.QBrush(QtGui.QColor(29, 127, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Dark, brush)
brush = QtGui.QBrush(QtGui.QColor(39, 170, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Mid, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Text, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 255))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.BrightText, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.ButtonText, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 255))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Base, brush)
brush = QtGui.QBrush(QtGui.QColor(59, 255, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Window, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Shadow, brush)
brush = QtGui.QBrush(QtGui.QColor(157, 255, 127, 127))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.AlternateBase, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 220))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.ToolTipBase, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.ToolTipText, brush)
brush = QtGui.QBrush(QtGui.QColor(29, 127, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.WindowText, brush)
brush = QtGui.QBrush(QtGui.QColor(59, 255, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Button, brush)
brush = QtGui.QBrush(QtGui.QColor(157, 255, 127, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Light, brush)
brush = QtGui.QBrush(QtGui.QColor(108, 255, 63, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Midlight, brush)
brush = QtGui.QBrush(QtGui.QColor(29, 127, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Dark, brush)
brush = QtGui.QBrush(QtGui.QColor(39, 170, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Mid, brush)
brush = QtGui.QBrush(QtGui.QColor(29, 127, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Text, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 255))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.BrightText, brush)
brush = QtGui.QBrush(QtGui.QColor(29, 127, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.ButtonText, brush)
brush = QtGui.QBrush(QtGui.QColor(59, 255, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Base, brush)
brush = QtGui.QBrush(QtGui.QColor(59, 255, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Window, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Shadow, brush)
brush = QtGui.QBrush(QtGui.QColor(59, 255, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.AlternateBase, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 220))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.ToolTipBase, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.ToolTipText, brush)
self.legFront_L0_fk0_ctl.setPalette(palette)
self.legFront_L0_fk0_ctl.setAutoFillBackground(True)
self.legFront_L0_fk0_ctl.setObjectName("legFront_L0_fk0_ctl")
self.legFront_R0_knee_ctl = SelectBtn_greenCircle(biped_body)
self.legFront_R0_knee_ctl.setGeometry(QtCore.QRect(91, 222, 10, 10))
palette = QtGui.QPalette()
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.WindowText, brush)
brush = QtGui.QBrush(QtGui.QColor(59, 255, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Button, brush)
brush = QtGui.QBrush(QtGui.QColor(157, 255, 127, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Light, brush)
brush = QtGui.QBrush(QtGui.QColor(108, 255, 63, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Midlight, brush)
brush = QtGui.QBrush(QtGui.QColor(29, 127, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Dark, brush)
brush = QtGui.QBrush(QtGui.QColor(39, 170, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Mid, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Text, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 255))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.BrightText, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.ButtonText, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 255))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Base, brush)
brush = QtGui.QBrush(QtGui.QColor(59, 255, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Window, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Shadow, brush)
brush = QtGui.QBrush(QtGui.QColor(157, 255, 127, 127))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.AlternateBase, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 220))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.ToolTipBase, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.ToolTipText, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.WindowText, brush)
brush = QtGui.QBrush(QtGui.QColor(59, 255, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Button, brush)
brush = QtGui.QBrush(QtGui.QColor(157, 255, 127, | |
to be associated with the message
Size of the buffer is zero if no label is present
NOTE 2 See description of label above.
Returns:
outData - Encrypted output
"""
req = TPM2_RSA_Encrypt_REQUEST(keyHandle, message, inScheme, label)
respBuf = self.dispatchCommand(TPM_CC.RSA_Encrypt, req)
res = self.processResponse(respBuf, RSA_EncryptResponse)
return res.outData if res else None
# RSA_Encrypt()
def RSA_Decrypt(self, keyHandle, cipherText, inScheme, label):
""" This command performs RSA decryption using the indicated padding
scheme according to IETF RFC 8017 ((PKCS#1).
Args:
keyHandle (TPM_HANDLE): RSA key to use for decryption
Auth Index: 1
Auth Role: USER
cipherText (int): Cipher text to be decrypted
NOTE An encrypted RSA data block is the size of the public modulus.
inScheme (TPMU_ASYM_SCHEME): The padding scheme to use if scheme
associated with keyHandle is TPM_ALG_NULL
(One of [TPMS_KEY_SCHEME_ECDH, TPMS_KEY_SCHEME_ECMQV,
TPMS_SIG_SCHEME_RSASSA, TPMS_SIG_SCHEME_RSAPSS,
TPMS_SIG_SCHEME_ECDSA, TPMS_SIG_SCHEME_ECDAA,
TPMS_SIG_SCHEME_SM2, TPMS_SIG_SCHEME_ECSCHNORR,
TPMS_ENC_SCHEME_RSAES, TPMS_ENC_SCHEME_OAEP, TPMS_SCHEME_HASH,
TPMS_NULL_ASYM_SCHEME])
label (int): Label whose association with the message is to be verified
Returns:
message - Decrypted output
"""
req = TPM2_RSA_Decrypt_REQUEST(keyHandle, cipherText, inScheme, label)
respBuf = self.dispatchCommand(TPM_CC.RSA_Decrypt, req)
res = self.processResponse(respBuf, RSA_DecryptResponse)
return res.message if res else None
# RSA_Decrypt()
def ECDH_KeyGen(self, keyHandle):
""" This command uses the TPM to generate an ephemeral key pair (de, Qe
where Qe [de]G). It uses the private ephemeral key and a loaded public
key (QS) to compute the shared secret value (P [hde]QS).
Args:
keyHandle (TPM_HANDLE): Handle of a loaded ECC key public area.
Auth Index: None
Returns:
zPoint - Results of P h[de]Qs
pubPoint - Generated ephemeral public point (Qe)
"""
req = TPM2_ECDH_KeyGen_REQUEST(keyHandle)
respBuf = self.dispatchCommand(TPM_CC.ECDH_KeyGen, req)
return self.processResponse(respBuf, ECDH_KeyGenResponse)
# ECDH_KeyGen()
def ECDH_ZGen(self, keyHandle, inPoint):
""" This command uses the TPM to recover the Z value from a public point
(QB) and a private key (ds). It will perform the multiplication of the
provided inPoint (QB) with the private key (ds) and return the
coordinates of the resultant point (Z = (xZ , yZ) [hds]QB; where h is
the cofactor of the curve).
Args:
keyHandle (TPM_HANDLE): Handle of a loaded ECC key
Auth Index: 1
Auth Role: USER
inPoint (TPMS_ECC_POINT): A public key
Returns:
outPoint - X and Y coordinates of the product of the multiplication
Z = (xZ , yZ) [hdS]QB
"""
req = TPM2_ECDH_ZGen_REQUEST(keyHandle, inPoint)
respBuf = self.dispatchCommand(TPM_CC.ECDH_ZGen, req)
res = self.processResponse(respBuf, ECDH_ZGenResponse)
return res.outPoint if res else None
# ECDH_ZGen()
def ECC_Parameters(self, curveID):
""" This command returns the parameters of an ECC curve identified by
its TCG-assigned curveID.
Args:
curveID (TPM_ECC_CURVE): Parameter set selector
Returns:
parameters - ECC parameters for the selected curve
"""
req = TPM2_ECC_Parameters_REQUEST(curveID)
respBuf = self.dispatchCommand(TPM_CC.ECC_Parameters, req)
res = self.processResponse(respBuf, ECC_ParametersResponse)
return res.parameters if res else None
# ECC_Parameters()
def ZGen_2Phase(self, keyA, inQsB, inQeB, inScheme, counter):
""" This command supports two-phase key exchange protocols. The command
is used in combination with TPM2_EC_Ephemeral(). TPM2_EC_Ephemeral()
generates an ephemeral key and returns the public point of that
ephemeral key along with a numeric value that allows the TPM to
regenerate the associated private key.
Args:
keyA (TPM_HANDLE): Handle of an unrestricted decryption key ECC
The private key referenced by this handle is used as dS,A
Auth Index: 1
Auth Role: USER
inQsB (TPMS_ECC_POINT): Other partys static public key (Qs,B =
(Xs,B, Ys,B))
inQeB (TPMS_ECC_POINT): Other party's ephemeral public key (Qe,B =
(Xe,B, Ye,B))
inScheme (TPM_ALG_ID): The key exchange scheme
counter (int): Value returned by TPM2_EC_Ephemeral()
Returns:
outZ1 - X and Y coordinates of the computed value (scheme dependent)
outZ2 - X and Y coordinates of the second computed value (scheme dependent)
"""
req = TPM2_ZGen_2Phase_REQUEST(keyA, inQsB, inQeB, inScheme, counter)
respBuf = self.dispatchCommand(TPM_CC.ZGen_2Phase, req)
return self.processResponse(respBuf, ZGen_2PhaseResponse)
# ZGen_2Phase()
def ECC_Encrypt(self, keyHandle, plainText, inScheme):
""" This command performs ECC encryption as described in Part 1, Annex D.
Args:
keyHandle (TPM_HANDLE): Reference to public portion of ECC key to
use for encryption
Auth Index: None
plainText (int): Plaintext to be encrypted
inScheme (TPMU_KDF_SCHEME): The KDF to use if scheme associated with
keyHandle is TPM_ALG_NULL
(One of [TPMS_KDF_SCHEME_MGF1, TPMS_KDF_SCHEME_KDF1_SP800_56A,
TPMS_KDF_SCHEME_KDF2, TPMS_KDF_SCHEME_KDF1_SP800_108,
TPMS_SCHEME_HASH, TPMS_NULL_KDF_SCHEME])
Returns:
C1 - The public ephemeral key used for ECDH
C2 - The data block produced by the XOR process
C3 - The integrity value
"""
req = TPM2_ECC_Encrypt_REQUEST(keyHandle, plainText, inScheme)
respBuf = self.dispatchCommand(TPM_CC.ECC_Encrypt, req)
return self.processResponse(respBuf, ECC_EncryptResponse)
# ECC_Encrypt()
def ECC_Decrypt(self, keyHandle, C1, C2, C3, inScheme):
""" This command performs ECC decryption.
Args:
keyHandle (TPM_HANDLE): ECC key to use for decryption
Auth Index: 1
Auth Role: USER
C1 (TPMS_ECC_POINT): The public ephemeral key used for ECDH
C2 (int): The data block produced by the XOR process
C3 (int): The integrity value
inScheme (TPMU_KDF_SCHEME): The KDF to use if scheme associated with
keyHandle is TPM_ALG_NULL
(One of [TPMS_KDF_SCHEME_MGF1, TPMS_KDF_SCHEME_KDF1_SP800_56A,
TPMS_KDF_SCHEME_KDF2, TPMS_KDF_SCHEME_KDF1_SP800_108,
TPMS_SCHEME_HASH, TPMS_NULL_KDF_SCHEME])
Returns:
plainText - Decrypted output
"""
req = TPM2_ECC_Decrypt_REQUEST(keyHandle, C1, C2, C3, inScheme)
respBuf = self.dispatchCommand(TPM_CC.ECC_Decrypt, req)
res = self.processResponse(respBuf, ECC_DecryptResponse)
return res.plainText if res else None
# ECC_Decrypt()
def EncryptDecrypt(self, keyHandle, decrypt, mode, ivIn, inData):
""" NOTE 1 This command is deprecated, and TPM2_EncryptDecrypt2() is
preferred. This should be reflected in platform-specific specifications.
Args:
keyHandle (TPM_HANDLE): The symmetric key used for the operation
Auth Index: 1
Auth Role: USER
decrypt (int): If YES, then the operation is decryption; if NO, the
operation is encryption
mode (TPM_ALG_ID): Symmetric encryption/decryption mode
this field shall match the default mode of the key or be TPM_ALG_NULL.
ivIn (int): An initial value as required by the algorithm
inData (int): The data to be encrypted/decrypted
Returns:
outData - Encrypted or decrypted output
ivOut - Chaining value to use for IV in next round
"""
req = TPM2_EncryptDecrypt_REQUEST(keyHandle, decrypt, mode, ivIn, inData)
respBuf = self.dispatchCommand(TPM_CC.EncryptDecrypt, req)
return self.processResponse(respBuf, EncryptDecryptResponse)
# EncryptDecrypt()
def EncryptDecrypt2(self, keyHandle, inData, decrypt, mode, ivIn):
""" This command is identical to TPM2_EncryptDecrypt(), except that the
inData parameter is the first parameter. This permits inData to be
parameter encrypted.
Args:
keyHandle (TPM_HANDLE): The symmetric key used for the operation
Auth Index: 1
Auth Role: USER
inData (int): The data to be encrypted/decrypted
decrypt (int): If YES, then the operation is decryption; if NO, the
operation is encryption
mode (TPM_ALG_ID): Symmetric mode
this field shall match the default mode of the key or be TPM_ALG_NULL.
ivIn (int): An initial value as required by the algorithm
Returns:
outData - Encrypted or decrypted output
ivOut - Chaining value to use for IV in next round
"""
req = TPM2_EncryptDecrypt2_REQUEST(keyHandle, inData, decrypt, mode, ivIn)
respBuf = self.dispatchCommand(TPM_CC.EncryptDecrypt2, req)
return self.processResponse(respBuf, EncryptDecrypt2Response)
# EncryptDecrypt2()
def Hash(self, data, hashAlg, hierarchy):
""" This command performs a hash operation on a data buffer and returns
the results.
Args:
data (int): Data to be hashed
hashAlg (TPM_ALG_ID): Algorithm for the hash being computed shall
not be TPM_ALG_NULL
hierarchy (TPM_HANDLE): Hierarchy to use for the ticket (TPM_RH_NULL
allowed)
Returns:
outHash - Results
validation - Ticket indicating that the sequence of octets used to
compute outDigest did not start with TPM_GENERATED_VALUE
will be a NULL ticket if the digest may not be signed
with a restricted key
"""
req = TPM2_Hash_REQUEST(data, hashAlg, hierarchy)
respBuf = self.dispatchCommand(TPM_CC.Hash, req)
return self.processResponse(respBuf, HashResponse)
# Hash()
def HMAC(self, handle, buffer, hashAlg):
""" This command performs an HMAC on the supplied data using the
indicated hash algorithm.
Args:
handle (TPM_HANDLE): Handle for the symmetric signing key providing
the HMAC key
Auth Index: 1
Auth Role: USER
buffer (int): HMAC data
hashAlg (TPM_ALG_ID): Algorithm to use for HMAC
Returns:
outHMAC - The returned HMAC in a sized buffer
"""
req = TPM2_HMAC_REQUEST(handle, buffer, hashAlg)
respBuf = self.dispatchCommand(TPM_CC.HMAC, req)
res = self.processResponse(respBuf, HMACResponse)
return res.outHMAC if res else None
# HMAC()
def MAC(self, handle, buffer, inScheme):
""" This command performs an HMAC | |
import os
import numpy as np
import pickle
import time
from collections import deque
from mpi4py import MPI
import tensorflow as tf
from stable_baselines import logger
from stable_baselines.common import tf_util, SetVerbosity, TensorboardWriter
from stable_baselines import DDPG
from stable_baselines.common.buffers import ReplayBuffer
from stable_baselines.common.math_util import unscale_action, scale_action
from stable_baselines.common.vec_env import VecEnv
class DDPGfED(DDPG):
"""
Custom version of Deep Deterministic Policy Gradient (DDPG) to use with expert demonstrations.
Similar to DDPG from Demonstrations (DDPGfD).
"""
def __init__(self, policy, env, gamma=0.99, memory_policy=None, eval_env=None, nb_train_steps=50,
nb_rollout_steps=100, nb_eval_steps=100, param_noise=None, action_noise=None,
normalize_observations=False, tau=0.001, batch_size=128, param_noise_adaption_interval=50,
normalize_returns=False, enable_popart=False, observation_range=(-5., 5.), critic_l2_reg=0.,
return_range=(-np.inf, np.inf), actor_lr=1e-4, critic_lr=1e-3, clip_norm=None, reward_scale=1.,
render=False, render_eval=False, memory_limit=None, buffer_size=50000, random_exploration=0.0,
verbose=0, tensorboard_log=None, _init_setup_model=True, policy_kwargs=None,
full_tensorboard_log=False, seed=None, n_cpu_tf_sess=1,
expert_use=False, expert_data=None, expert_batch_size=64, expert_limit_success=0.5):
super(DDPGfED, self).__init__(policy=policy, env=env, gamma=gamma, memory_policy=memory_policy,
eval_env=eval_env, nb_train_steps=nb_train_steps,
nb_rollout_steps=nb_rollout_steps, nb_eval_steps=nb_eval_steps,
param_noise=param_noise, action_noise=action_noise,
normalize_observations=normalize_observations, tau=tau, batch_size=batch_size,
param_noise_adaption_interval=param_noise_adaption_interval,
normalize_returns=normalize_returns, enable_popart=enable_popart,
observation_range=observation_range, critic_l2_reg=critic_l2_reg,
return_range=return_range, actor_lr=actor_lr, critic_lr=critic_lr,
clip_norm=clip_norm, reward_scale=reward_scale, render=render,
render_eval=render_eval, memory_limit=memory_limit, buffer_size=buffer_size,
random_exploration=random_exploration, verbose=verbose,
tensorboard_log=tensorboard_log, _init_setup_model=_init_setup_model,
policy_kwargs=policy_kwargs, full_tensorboard_log=full_tensorboard_log,
seed=seed, n_cpu_tf_sess=n_cpu_tf_sess)
self.expert_use = expert_use
if self.expert_use:
self.expert_data = expert_data
self.expert_batch_size = expert_batch_size
self.expert_batch_size_current = expert_batch_size
self.expert_limit_success = expert_limit_success
self.demo_data = None
self.demo_size = None
self._init_demo_buffer()
else:
self.expert_data = None
self.expert_batch_size = 0
self.expert_batch_size_current = 0
self.expert_limit_success = 0
def get_random_action(self):
return np.random.uniform(-1.5, 1.5, self.env.action_space.shape[0])
def _init_demo_buffer(self):
rank = MPI.COMM_WORLD.Get_rank()
if rank == 0 and self.verbose >= 1:
print("Start init demo buffer")
data_path = "{}{}.npz".format(self.expert_data[:-4], rank)
if not os.path.exists(data_path):
import shutil
shutil.copy(self.expert_data, data_path)
demo_data = np.load(data_path)
self.demo_size = len(demo_data.f.actions)
self.demo_buffer = ReplayBuffer(self.demo_size)
self.demo_data = {
"obs": demo_data["obs"].copy(),
"actions": demo_data["actions"].copy(),
"rewards": demo_data["rewards"].copy(),
"episode_starts": demo_data["episode_starts"].copy()
}
for n in range(1, self.demo_size):
obs = self.demo_data["obs"][n - 1]
self.demo_buffer.add(obs,
self.demo_data["actions"][n],
self.demo_data["rewards"][n] * self.reward_scale,
self.demo_data["obs"][n],
self.demo_data["episode_starts"][n].astype(np.float32))
if self.normalize_observations:
self.obs_rms.update(np.array([obs]))
del demo_data
os.remove(data_path)
MPI.COMM_WORLD.Barrier()
if rank == 0 and self.verbose >= 1:
print("Done init demo buffer")
def _train_step(self, step, writer, log=False):
"""
run a step of training from batch
:param step: (int) the current step iteration
:param writer: (TensorFlow Summary.writer) the writer for tensorboard
:param log: (bool) whether or not to log to metadata
:return: (float, float) critic loss, actor loss
"""
if self.expert_use and self.expert_batch_size_current > 0:
# Get a batch
batch_size = self.batch_size - self.expert_batch_size_current
obs, actions, rewards, next_obs, terminals = self.replay_buffer.sample(batch_size=batch_size,
env=self._vec_normalize_env)
_obs, _actions, _rewards, _next_obs, _terminals = self.demo_buffer.sample(
batch_size=self.expert_batch_size_current,
env=self._vec_normalize_env)
obs = np.append(obs, _obs, axis=0)
actions = np.append(actions, _actions, axis=0)
rewards = np.append(rewards, _rewards, axis=0)
next_obs = np.append(next_obs, _next_obs, axis=0)
terminals = np.append(terminals, _terminals, axis=0)
else:
# Get a batch
obs, actions, rewards, next_obs, terminals = self.replay_buffer.sample(batch_size=self.batch_size,
env=self._vec_normalize_env)
# Reshape to match previous behavior and placeholder shape
rewards = rewards.reshape(-1, 1)
terminals = terminals.reshape(-1, 1)
if self.normalize_returns and self.enable_popart:
old_mean, old_std, target_q = self.sess.run([self.ret_rms.mean, self.ret_rms.std, self.target_q],
feed_dict={
self.obs_target: next_obs,
self.rewards: rewards,
self.terminals_ph: terminals
})
self.ret_rms.update(target_q.flatten())
self.sess.run(self.renormalize_q_outputs_op, feed_dict={
self.old_std: np.array([old_std]),
self.old_mean: np.array([old_mean]),
})
else:
target_q = self.sess.run(self.target_q, feed_dict={
self.obs_target: next_obs,
self.rewards: rewards,
self.terminals_ph: terminals
})
# Get all gradients and perform a synced update.
ops = [self.actor_grads, self.actor_loss, self.critic_grads, self.critic_loss]
td_map = {
self.obs_train: obs,
self.actions: actions,
self.action_train_ph: actions,
self.rewards: rewards,
self.critic_target: target_q,
self.param_noise_stddev: 0 if self.param_noise is None else self.param_noise.current_stddev
}
if writer is not None:
# run loss backprop with summary if the step_id was not already logged (can happen with the right
# parameters as the step value is only an estimate)
if self.full_tensorboard_log and log and step not in self.tb_seen_steps:
run_options = tf.RunOptions(trace_level=tf.RunOptions.FULL_TRACE)
run_metadata = tf.RunMetadata()
summary, actor_grads, actor_loss, critic_grads, critic_loss = \
self.sess.run([self.summary] + ops, td_map, options=run_options, run_metadata=run_metadata)
writer.add_run_metadata(run_metadata, 'step%d' % step)
self.tb_seen_steps.append(step)
else:
summary, actor_grads, actor_loss, critic_grads, critic_loss = self.sess.run([self.summary] + ops,
td_map)
writer.add_summary(summary, step)
else:
actor_grads, actor_loss, critic_grads, critic_loss = self.sess.run(ops, td_map)
self.actor_optimizer.update(actor_grads, learning_rate=self.actor_lr)
self.critic_optimizer.update(critic_grads, learning_rate=self.critic_lr)
return critic_loss, actor_loss
def learn(self, total_timesteps, callback=None, log_interval=100, tb_log_name="DDPG",
reset_num_timesteps=True, replay_wrapper=None):
new_tb_log = self._init_num_timesteps(reset_num_timesteps)
callback = self._init_callback(callback)
if replay_wrapper is not None:
self.replay_buffer = replay_wrapper(self.replay_buffer)
with SetVerbosity(self.verbose), TensorboardWriter(self.graph, self.tensorboard_log, tb_log_name, new_tb_log) \
as writer:
self._setup_learn()
# a list for tensorboard logging, to prevent logging with the same step number, if it already occured
self.tb_seen_steps = []
rank = MPI.COMM_WORLD.Get_rank()
if self.verbose >= 2:
logger.log('Using agent with the following configuration:')
logger.log(str(self.__dict__.items()))
eval_episode_rewards_history = deque(maxlen=100)
episode_rewards_history = deque(maxlen=100)
episode_successes = []
with self.sess.as_default(), self.graph.as_default():
# Prepare everything.
self._reset()
obs = self.env.reset()
# Retrieve unnormalized observation for saving into the buffer
if self._vec_normalize_env is not None:
obs_ = self._vec_normalize_env.get_original_obs().squeeze()
eval_obs = None
if self.eval_env is not None:
eval_obs = self.eval_env.reset()
episode_reward = 0.
episode_step = 0
episodes = 0
step = 0
total_steps = 0
start_time = time.time()
epoch_episode_rewards = []
epoch_episode_steps = []
epoch_actor_losses = []
epoch_critic_losses = []
epoch_adaptive_distances = []
eval_episode_rewards = []
eval_qs = []
epoch_actions = []
epoch_qs = []
epoch_episodes = 0
epoch = 0
callback.on_training_start(locals(), globals())
while True:
for _ in range(log_interval):
callback.on_rollout_start()
# Perform rollouts.
for _ in range(self.nb_rollout_steps):
if total_steps >= total_timesteps:
callback.on_training_end()
return self
# Predict next action.
action, q_value = self._policy(obs, apply_noise=True, compute_q=True)
assert action.shape == self.env.action_space.shape
# Execute next action.
if rank == 0 and self.render:
self.env.render()
# Randomly sample actions from a uniform distribution
# with a probability self.random_exploration (used in HER + DDPG)
if np.random.rand() < self.random_exploration:
# actions sampled from action space are from range specific to the environment
# but algorithm operates on tanh-squashed actions therefore simple scaling is used
unscaled_action = self.action_space.sample()
action = scale_action(self.action_space, unscaled_action)
else:
# inferred actions need to be transformed to environment action_space before stepping
unscaled_action = unscale_action(self.action_space, action)
new_obs, reward, done, info = self.env.step(unscaled_action)
self.num_timesteps += 1
if callback.on_step() is False:
callback.on_training_end()
return self
step += 1
total_steps += 1
if rank == 0 and self.render:
self.env.render()
# Book-keeping.
epoch_actions.append(action)
epoch_qs.append(q_value)
# Store only the unnormalized version
if self._vec_normalize_env is not None:
new_obs_ = self._vec_normalize_env.get_original_obs().squeeze()
reward_ = self._vec_normalize_env.get_original_reward().squeeze()
else:
# Avoid changing the original ones
obs_, new_obs_, reward_ = obs, new_obs, reward
self._store_transition(obs_, action, reward_, new_obs_, done)
obs = new_obs
# Save the unnormalized observation
if self._vec_normalize_env is not None:
obs_ = new_obs_
episode_reward += reward_
episode_step += 1
if writer is not None:
ep_rew = np.array([reward_]).reshape((1, -1))
ep_done = np.array([done]).reshape((1, -1))
tf_util.total_episode_reward_logger(self.episode_reward, ep_rew, ep_done,
writer, self.num_timesteps)
if done:
# Episode done.
epoch_episode_rewards.append(episode_reward)
episode_rewards_history.append(episode_reward)
epoch_episode_steps.append(episode_step)
episode_reward = 0.
episode_step = 0
epoch_episodes += 1
episodes += 1
maybe_is_success = info.get('is_success')
if maybe_is_success is not None:
episode_successes.append(float(maybe_is_success))
self._reset()
if not isinstance(self.env, VecEnv):
obs = self.env.reset()
callback.on_rollout_end()
# Train.
epoch_actor_losses = []
epoch_critic_losses = []
epoch_adaptive_distances = []
if total_steps % 1000 == 0 and rank == 0:
print("steps", total_steps, " rank", rank)
if total_steps % 1000 == 0 and rank == 1:
print("steps", total_steps, " rank", rank)
for t_train in range(self.nb_train_steps):
# Not enough samples in the replay buffer
if not self.replay_buffer.can_sample(self.batch_size) or total_steps < 1190:
MPI.COMM_WORLD.Barrier()
break
MPI.COMM_WORLD.Barrier()
# Adapt param noise, if necessary.
if len(self.replay_buffer) >= self.batch_size and \
t_train % self.param_noise_adaption_interval == 0:
distance = self._adapt_param_noise()
epoch_adaptive_distances.append(distance)
# weird equation to deal with the fact the nb_train_steps will be different
# to nb_rollout_steps
step = (int(t_train * (self.nb_rollout_steps / self.nb_train_steps)) +
self.num_timesteps - self.nb_rollout_steps)
critic_loss, actor_loss = self._train_step(step, writer, log=t_train == 0)
epoch_critic_losses.append(critic_loss)
epoch_actor_losses.append(actor_loss)
self._update_target_net()
# Evaluate.
eval_episode_rewards = []
eval_qs = []
MPI.COMM_WORLD.Barrier()
if self.eval_env is not None:
eval_episode_reward = 0.
for _ in range(self.nb_eval_steps):
if total_steps >= total_timesteps:
return self
eval_action, eval_q = self._policy(eval_obs, apply_noise=False, compute_q=True)
unscaled_action = unscale_action(self.action_space, eval_action)
eval_obs, eval_r, eval_done, _ = self.eval_env.step(unscaled_action)
if self.render_eval:
self.eval_env.render()
eval_episode_reward += eval_r
eval_qs.append(eval_q)
if eval_done:
if not isinstance(self.env, VecEnv):
eval_obs = self.eval_env.reset()
eval_episode_rewards.append(eval_episode_reward)
eval_episode_rewards_history.append(eval_episode_reward)
eval_episode_reward = 0.
mpi_size = MPI.COMM_WORLD.Get_size()
# Log stats.
# XXX shouldn't call np.mean on variable length lists
duration = time.time() - start_time
stats = self._get_stats()
combined_stats = stats.copy()
combined_stats['rollout/return'] = np.mean(epoch_episode_rewards)
combined_stats['rollout/return_history'] = np.mean(episode_rewards_history)
combined_stats['rollout/episode_steps'] = np.mean(epoch_episode_steps)
combined_stats['rollout/actions_mean'] = np.mean(epoch_actions)
combined_stats['rollout/Q_mean'] = np.mean(epoch_qs)
combined_stats['train/loss_actor'] = np.mean(epoch_actor_losses)
combined_stats['train/loss_critic'] = np.mean(epoch_critic_losses)
if len(epoch_adaptive_distances) != 0:
combined_stats['train/param_noise_distance'] = np.mean(epoch_adaptive_distances)
combined_stats['total/duration'] = duration
combined_stats['total/steps_per_second'] = float(step) / float(duration)
combined_stats['total/episodes'] = episodes
combined_stats['rollout/episodes'] = epoch_episodes
combined_stats['rollout/actions_std'] = np.std(epoch_actions)
# Evaluation statistics.
if self.eval_env is not None:
combined_stats['eval/return'] = np.mean(eval_episode_rewards)
combined_stats['eval/return_history'] = np.mean(eval_episode_rewards_history)
combined_stats['eval/Q'] = np.mean(eval_qs)
combined_stats['eval/episodes'] = len(eval_episode_rewards)
def as_scalar(scalar):
"""
| |
"""
Tests for the generic MLEModel
Author: <NAME>
License: Simplified-BSD
"""
from __future__ import division, absolute_import, print_function
import numpy as np
import pandas as pd
import os
import re
import warnings
from statsmodels.tsa.statespace import (sarimax, varmax, kalman_filter,
kalman_smoother)
from statsmodels.tsa.statespace.mlemodel import MLEModel, MLEResultsWrapper
from statsmodels.tsa.statespace.tools import compatibility_mode
from statsmodels.datasets import nile
from numpy.testing import assert_almost_equal, assert_equal, assert_allclose, assert_raises
from nose.exc import SkipTest
from statsmodels.tsa.statespace.tests.results import results_sarimax, results_var_misc
current_path = os.path.dirname(os.path.abspath(__file__))
try:
import matplotlib.pyplot as plt
have_matplotlib = True
except ImportError:
have_matplotlib = False
# Basic kwargs
kwargs = {
'k_states': 1, 'design': [[1]], 'transition': [[1]],
'selection': [[1]], 'state_cov': [[1]],
'initialization': 'approximate_diffuse'
}
def get_dummy_mod(fit=True, pandas=False):
# This tests time-varying parameters regression when in fact the parameters
# are not time-varying, and in fact the regression fit is perfect
endog = np.arange(100)*1.0
exog = 2*endog
if pandas:
index = pd.date_range('1960-01-01', periods=100, freq='MS')
endog = pd.Series(endog, index=index)
exog = pd.Series(exog, index=index)
mod = sarimax.SARIMAX(endog, exog=exog, order=(0,0,0), time_varying_regression=True, mle_regression=False)
if fit:
with warnings.catch_warnings():
warnings.simplefilter("ignore")
res = mod.fit(disp=-1)
else:
res = None
return mod, res
def test_wrapping():
# Test the wrapping of various Representation / KalmanFilter /
# KalmanSmoother methods / attributes
mod, _ = get_dummy_mod(fit=False)
# Test that we can get the design matrix
assert_equal(mod['design', 0, 0], 2.0 * np.arange(100))
# Test that we can set individual elements of the design matrix
mod['design', 0, 0, :] = 2
assert_equal(mod.ssm['design', 0, 0, :], 2)
assert_equal(mod.ssm['design'].shape, (1, 1, 100))
# Test that we can set the entire design matrix
mod['design'] = [[3.]]
assert_equal(mod.ssm['design', 0, 0], 3.)
# (Now it's no longer time-varying, so only 2-dim)
assert_equal(mod.ssm['design'].shape, (1, 1))
# Test that we can change the following properties: loglikelihood_burn,
# initial_variance, tolerance
assert_equal(mod.loglikelihood_burn, 1)
mod.loglikelihood_burn = 0
assert_equal(mod.ssm.loglikelihood_burn, 0)
assert_equal(mod.tolerance, mod.ssm.tolerance)
mod.tolerance = 0.123
assert_equal(mod.ssm.tolerance, 0.123)
assert_equal(mod.initial_variance, 1e10)
mod.initial_variance = 1e12
assert_equal(mod.ssm.initial_variance, 1e12)
# Test that we can use the following wrappers: initialization,
# initialize_known, initialize_stationary, initialize_approximate_diffuse
# Initialization starts off as none
assert_equal(mod.initialization, None)
# Since the SARIMAX model may be fully stationary or may have diffuse
# elements, it uses a custom initialization by default, but it can be
# overridden by users
mod.initialize_state()
# (The default initialization in this case is known because there is a non-
# stationary state corresponding to the time-varying regression parameter)
assert_equal(mod.initialization, 'known')
mod.initialize_approximate_diffuse(1e5)
assert_equal(mod.initialization, 'approximate_diffuse')
assert_equal(mod.ssm._initial_variance, 1e5)
mod.initialize_known([5.], [[40]])
assert_equal(mod.initialization, 'known')
assert_equal(mod.ssm._initial_state, [5.])
assert_equal(mod.ssm._initial_state_cov, [[40]])
mod.initialize_stationary()
assert_equal(mod.initialization, 'stationary')
# Test that we can use the following wrapper methods: set_filter_method,
# set_stability_method, set_conserve_memory, set_smoother_output
# The defaults are as follows:
assert_equal(mod.ssm.filter_method, kalman_filter.FILTER_CONVENTIONAL)
assert_equal(mod.ssm.stability_method, kalman_filter.STABILITY_FORCE_SYMMETRY)
assert_equal(mod.ssm.conserve_memory, kalman_filter.MEMORY_STORE_ALL)
assert_equal(mod.ssm.smoother_output, kalman_smoother.SMOOTHER_ALL)
# Now, create the Cython filter object and assert that they have
# transferred correctly
mod.ssm._initialize_filter()
kf = mod.ssm._kalman_filter
assert_equal(kf.filter_method, kalman_filter.FILTER_CONVENTIONAL)
assert_equal(kf.stability_method, kalman_filter.STABILITY_FORCE_SYMMETRY)
assert_equal(kf.conserve_memory, kalman_filter.MEMORY_STORE_ALL)
# (the smoother object is so far not in Cython, so there is no
# transferring)
# Change the attributes in the model class
if compatibility_mode:
assert_raises(NotImplementedError, mod.set_filter_method, 100)
else:
mod.set_filter_method(100)
mod.set_stability_method(101)
mod.set_conserve_memory(102)
mod.set_smoother_output(103)
# Assert that the changes have occurred in the ssm class
if not compatibility_mode:
assert_equal(mod.ssm.filter_method, 100)
assert_equal(mod.ssm.stability_method, 101)
assert_equal(mod.ssm.conserve_memory, 102)
assert_equal(mod.ssm.smoother_output, 103)
# Assert that the changes have *not yet* occurred in the filter object
assert_equal(kf.filter_method, kalman_filter.FILTER_CONVENTIONAL)
assert_equal(kf.stability_method, kalman_filter.STABILITY_FORCE_SYMMETRY)
assert_equal(kf.conserve_memory, kalman_filter.MEMORY_STORE_ALL)
# Re-initialize the filter object (this would happen automatically anytime
# loglike, filter, etc. were called)
# In this case, an error will be raised since filter_method=100 is not
# valid
# Note: this error is only raised in the compatibility case, since the
# newer filter logic checks for a valid filter mode at a different point
if compatibility_mode:
assert_raises(NotImplementedError, mod.ssm._initialize_filter)
# Now, test the setting of the other two methods by resetting the
# filter method to a valid value
mod.set_filter_method(1)
mod.ssm._initialize_filter()
# Retrieve the new kalman filter object (a new object had to be created
# due to the changing filter method)
kf = mod.ssm._kalman_filter
assert_equal(kf.filter_method, 1)
assert_equal(kf.stability_method, 101)
assert_equal(kf.conserve_memory, 102)
def test_fit_misc():
true = results_sarimax.wpi1_stationary
endog = np.diff(true['data'])[1:]
mod = sarimax.SARIMAX(endog, order=(1,0,1), trend='c')
# Test optim_hessian={'opg','oim','approx'}
with warnings.catch_warnings():
warnings.simplefilter("ignore")
res1 = mod.fit(method='ncg', disp=0, optim_hessian='opg', optim_complex_step=False)
res2 = mod.fit(method='ncg', disp=0, optim_hessian='oim', optim_complex_step=False)
# Check that the Hessians broadly result in the same optimum
assert_allclose(res1.llf, res2.llf, rtol=1e-2)
# Test return_params=True
mod, _ = get_dummy_mod(fit=False)
with warnings.catch_warnings():
warnings.simplefilter("ignore")
res_params = mod.fit(disp=-1, return_params=True)
# 5 digits necessary to accommodate 32-bit numpy / scipy with OpenBLAS 0.2.18
assert_almost_equal(res_params, [0, 0], 5)
def test_score_misc():
mod, res = get_dummy_mod()
# Test that the score function works
mod.score(res.params)
def test_from_formula():
assert_raises(NotImplementedError, lambda: MLEModel.from_formula(1,2,3))
def test_score_analytic_ar1():
# Test the score against the analytic score for an AR(1) model with 2
# observations
# Let endog = [1, 0.5], params=[0, 1]
mod = sarimax.SARIMAX([1, 0.5], order=(1,0,0))
def partial_phi(phi, sigma2):
return -0.5 * (phi**2 + 2*phi*sigma2 - 1) / (sigma2 * (1 - phi**2))
def partial_sigma2(phi, sigma2):
return -0.5 * (2*sigma2 + phi - 1.25) / (sigma2**2)
params = np.r_[0., 2]
# Compute the analytic score
analytic_score = np.r_[
partial_phi(params[0], params[1]),
partial_sigma2(params[0], params[1])]
# Check each of the approximations, transformed parameters
approx_cs = mod.score(params, transformed=True, approx_complex_step=True)
assert_allclose(approx_cs, analytic_score)
approx_fd = mod.score(params, transformed=True, approx_complex_step=False)
assert_allclose(approx_fd, analytic_score, atol=1e-5)
approx_fd_centered = (
mod.score(params, transformed=True, approx_complex_step=False,
approx_centered=True))
assert_allclose(approx_fd, analytic_score, atol=1e-5)
harvey_cs = mod.score(params, transformed=True, method='harvey',
approx_complex_step=True)
assert_allclose(harvey_cs, analytic_score)
harvey_fd = mod.score(params, transformed=True, method='harvey',
approx_complex_step=False)
assert_allclose(harvey_fd, analytic_score, atol=1e-5)
harvey_fd_centered = mod.score(params, transformed=True, method='harvey',
approx_complex_step=False,
approx_centered=True)
assert_allclose(harvey_fd_centered, analytic_score, atol=1e-5)
# Check the approximations for untransformed parameters. The analytic
# check now comes from chain rule with the analytic derivative of the
# transformation
# if L* is the likelihood evaluated at untransformed parameters and
# L is the likelihood evaluated at transformed parameters, then we have:
# L*(u) = L(t(u))
# and then
# L'*(u) = L'(t(u)) * t'(u)
def partial_transform_phi(phi):
return -1. / (1 + phi**2)**(3./2)
def partial_transform_sigma2(sigma2):
return 2. * sigma2
uparams = mod.untransform_params(params)
analytic_score = np.dot(
np.diag(np.r_[partial_transform_phi(uparams[0]),
partial_transform_sigma2(uparams[1])]),
np.r_[partial_phi(params[0], params[1]),
partial_sigma2(params[0], params[1])])
approx_cs = mod.score(uparams, transformed=False, approx_complex_step=True)
assert_allclose(approx_cs, analytic_score)
approx_fd = mod.score(uparams, transformed=False,
approx_complex_step=False)
assert_allclose(approx_fd, analytic_score, atol=1e-5)
approx_fd_centered = (
mod.score(uparams, transformed=False, approx_complex_step=False,
approx_centered=True))
assert_allclose(approx_fd, analytic_score, atol=1e-5)
harvey_cs = mod.score(uparams, transformed=False, method='harvey',
approx_complex_step=True)
assert_allclose(harvey_cs, analytic_score)
harvey_fd = mod.score(uparams, transformed=False, method='harvey',
approx_complex_step=False)
assert_allclose(harvey_fd, analytic_score, atol=1e-5)
harvey_fd_centered = mod.score(uparams, transformed=False, method='harvey',
approx_complex_step=False,
approx_centered=True)
assert_allclose(harvey_fd_centered, analytic_score, atol=1e-5)
# Check the Hessian: these approximations are not very good, particularly
# when phi is close to 0
params = np.r_[0.5, 1.]
def hessian(phi, sigma2):
hessian = np.zeros((2,2))
hessian[0,0] = (-phi**2 - 1) / (phi**2 - 1)**2
hessian[1,0] = hessian[0,1] = -1 / (2 * sigma2**2)
hessian[1,1] = (sigma2 + phi - 1.25) / sigma2**3
return hessian
analytic_hessian = hessian(params[0], params[1])
with warnings.catch_warnings():
warnings.simplefilter("ignore")
assert_allclose(mod._hessian_complex_step(params) * 2,
analytic_hessian, atol=1e-1)
assert_allclose(mod._hessian_finite_difference(params) * 2,
analytic_hessian, atol=1e-1)
def test_cov_params():
mod, res = get_dummy_mod()
# Smoke test for each of the covariance types
with warnings.catch_warnings():
warnings.simplefilter("ignore")
res = mod.fit(res.params, disp=-1, cov_type='none')
assert_equal(res.cov_kwds['description'], 'Covariance matrix not calculated.')
res = mod.fit(res.params, disp=-1, cov_type='approx')
assert_equal(res.cov_type, 'approx')
assert_equal(res.cov_kwds['description'], 'Covariance matrix calculated using numerical (complex-step) differentiation.')
res = mod.fit(res.params, disp=-1, cov_type='oim')
assert_equal(res.cov_type, 'oim')
assert_equal(res.cov_kwds['description'], 'Covariance matrix calculated using the observed information matrix (complex-step) described in Harvey (1989).')
res = mod.fit(res.params, disp=-1, cov_type='opg')
assert_equal(res.cov_type, 'opg')
assert_equal(res.cov_kwds['description'], 'Covariance matrix calculated using the outer product of gradients (complex-step).')
res = mod.fit(res.params, disp=-1, cov_type='robust')
assert_equal(res.cov_type, 'robust')
assert_equal(res.cov_kwds['description'], 'Quasi-maximum likelihood covariance matrix used for robustness to some misspecifications; calculated using the observed information matrix (complex-step) described in Harvey (1989).')
res = mod.fit(res.params, disp=-1, cov_type='robust_oim')
assert_equal(res.cov_type, 'robust_oim')
assert_equal(res.cov_kwds['description'], 'Quasi-maximum likelihood covariance matrix used for robustness to some misspecifications; calculated using the observed information matrix (complex-step) described in Harvey (1989).')
res = mod.fit(res.params, disp=-1, cov_type='robust_approx')
assert_equal(res.cov_type, 'robust_approx')
assert_equal(res.cov_kwds['description'], 'Quasi-maximum likelihood covariance matrix used for robustness to some misspecifications; calculated using numerical (complex-step) differentiation.')
assert_raises(NotImplementedError, mod.fit, res.params, disp=-1, cov_type='invalid_cov_type')
def test_transform():
# The transforms in MLEModel are noops
mod = MLEModel([1,2], **kwargs)
# Test direct transform, untransform
assert_allclose(mod.transform_params([2, 3]), [2, 3])
assert_allclose(mod.untransform_params([2, 3]), [2, 3])
# Smoke test for transformation in `filter`, `update`, `loglike`,
# `loglikeobs`
mod.filter([], transformed=False)
mod.update([], transformed=False)
mod.loglike([], transformed=False)
mod.loglikeobs([], transformed=False)
# Note that mod is an SARIMAX instance, and the two parameters are
# variances
mod, _ = | |
a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.delete_escalation_chain_by_id_with_http_info(id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param int id: (required)
:return: object
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['id'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method delete_escalation_chain_by_id" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'id' is set
if ('id' not in params or
params['id'] is None):
raise ValueError("Missing the required parameter `id` when calling `delete_escalation_chain_by_id`") # noqa: E501
if 'id' in params and not re.search('\d+', params['id'] if type(params['id']) is str else str(params['id'])): # noqa: E501
raise ValueError("Invalid value for parameter `id` when calling `delete_escalation_chain_by_id`, must conform to the pattern `/\d+/`") # noqa: E501
collection_formats = {}
path_params = {}
if 'id' in params:
path_params['id'] = params['id'] # noqa: E501
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['LMv1'] # noqa: E501
return self.api_client.call_api(
'/setting/alert/chains/{id}', 'DELETE',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='object', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def delete_netscan_by_id(self, id, **kwargs): # noqa: E501
"""delete a netscan # noqa: E501
# noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.delete_netscan_by_id(id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param int id: (required)
:return: object
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.delete_netscan_by_id_with_http_info(id, **kwargs) # noqa: E501
else:
(data) = self.delete_netscan_by_id_with_http_info(id, **kwargs) # noqa: E501
return data
def delete_netscan_by_id_with_http_info(self, id, **kwargs): # noqa: E501
"""delete a netscan # noqa: E501
# noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.delete_netscan_by_id_with_http_info(id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param int id: (required)
:return: object
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['id'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method delete_netscan_by_id" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'id' is set
if ('id' not in params or
params['id'] is None):
raise ValueError("Missing the required parameter `id` when calling `delete_netscan_by_id`") # noqa: E501
if 'id' in params and not re.search('\d+', params['id'] if type(params['id']) is str else str(params['id'])): # noqa: E501
raise ValueError("Invalid value for parameter `id` when calling `delete_netscan_by_id`, must conform to the pattern `/\d+/`") # noqa: E501
collection_formats = {}
path_params = {}
if 'id' in params:
path_params['id'] = params['id'] # noqa: E501
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['LMv1'] # noqa: E501
return self.api_client.call_api(
'/setting/netscans/{id}', 'DELETE',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='object', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def delete_ops_note_by_id(self, id, **kwargs): # noqa: E501
"""delete opsnote # noqa: E501
# noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.delete_ops_note_by_id(id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str id: (required)
:return: object
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.delete_ops_note_by_id_with_http_info(id, **kwargs) # noqa: E501
else:
(data) = self.delete_ops_note_by_id_with_http_info(id, **kwargs) # noqa: E501
return data
def delete_ops_note_by_id_with_http_info(self, id, **kwargs): # noqa: E501
"""delete opsnote # noqa: E501
# noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.delete_ops_note_by_id_with_http_info(id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str id: (required)
:return: object
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['id'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method delete_ops_note_by_id" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'id' is set
if ('id' not in params or
params['id'] is None):
raise ValueError("Missing the required parameter `id` when calling `delete_ops_note_by_id`") # noqa: E501
if 'id' in params and not re.search('[^\/]+', params['id'] if type(params['id']) is str else str(params['id'])): # noqa: E501
raise ValueError("Invalid value for parameter `id` when calling `delete_ops_note_by_id`, must conform to the pattern `/[^\/]+/`") # noqa: E501
collection_formats = {}
path_params = {}
if 'id' in params:
path_params['id'] = params['id'] # noqa: E501
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['LMv1'] # noqa: E501
return self.api_client.call_api(
'/setting/opsnotes/{id}', 'DELETE',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='object', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def delete_recipient_group_by_id(self, id, **kwargs): # noqa: E501
"""delete recipient group # noqa: E501
# noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.delete_recipient_group_by_id(id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param int id: (required)
:return: object
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.delete_recipient_group_by_id_with_http_info(id, **kwargs) # noqa: E501
else:
(data) = self.delete_recipient_group_by_id_with_http_info(id, **kwargs) # noqa: E501
return data
def delete_recipient_group_by_id_with_http_info(self, id, **kwargs): # noqa: E501
"""delete recipient group # noqa: E501
# noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.delete_recipient_group_by_id_with_http_info(id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param int id: (required)
:return: object
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['id'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method delete_recipient_group_by_id" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'id' is set
if ('id' not in params or
params['id'] is None):
raise ValueError("Missing the required parameter `id` when calling `delete_recipient_group_by_id`") # noqa: E501
if 'id' in params and not re.search('\d+', params['id'] if type(params['id']) is str else str(params['id'])): # noqa: E501
raise ValueError("Invalid value for parameter `id` when calling `delete_recipient_group_by_id`, must conform to the pattern `/\d+/`") # noqa: E501
collection_formats = {}
path_params = {}
if 'id' in params:
path_params['id'] = params['id'] # noqa: E501
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['LMv1'] # noqa: E501
return self.api_client.call_api(
'/setting/recipientgroups/{id}', 'DELETE',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='object', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def delete_report_by_id(self, id, **kwargs): # noqa: E501
"""delete report # noqa: E501
# noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.delete_report_by_id(id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param int id: (required)
:return: object
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.delete_report_by_id_with_http_info(id, **kwargs) # noqa: E501
else:
(data) = self.delete_report_by_id_with_http_info(id, **kwargs) # noqa: E501
| |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# CairoPlot.py
#
# Copyright (c) 2008 <NAME>
#
# Author: <NAME> <<EMAIL>>
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public License
# as published by the Free Software Foundation; either version 2 of
# the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307
# USA
#Contributor: <NAME>
#TODO: review BarPlot Code
#TODO: x_label colision problem on Horizontal Bar Plot
#TODO: y_label's eat too much space on HBP
__version__ = 1.1
import cairo, math, random
from Series import Serie, Group, Data
from gtk.gdk import CairoContext
HORZ = 0
VERT = 1
NORM = 2
COLORS = {"red" : (1.0,0.0,0.0,1.0), "lime" : (0.0,1.0,0.0,1.0), "blue" : (0.0,0.0,1.0,1.0),
"maroon" : (0.5,0.0,0.0,1.0), "green" : (0.0,0.5,0.0,1.0), "navy" : (0.0,0.0,0.5,1.0),
"yellow" : (1.0,1.0,0.0,1.0), "magenta" : (1.0,0.0,1.0,1.0), "cyan" : (0.0,1.0,1.0,1.0),
"orange" : (1.0,0.5,0.0,1.0), "white" : (1.0,1.0,1.0,1.0), "black" : (0.0,0.0,0.0,1.0),
"gray" : (0.5,0.5,0.5,1.0), "light_gray" : (0.9,0.9,0.9,1.0),
"transparent" : (0.0,0.0,0.0,0.0)}
THEMES = {"black_red" : [(0.0,0.0,0.0,1.0), (1.0,0.0,0.0,1.0)],
"red_green_blue" : [(1.0,0.0,0.0,1.0), (0.0,1.0,0.0,1.0), (0.0,0.0,1.0,1.0)],
"red_orange_yellow" : [(1.0,0.2,0.0,1.0), (1.0,0.7,0.0,1.0), (1.0,1.0,0.0,1.0)],
"yellow_orange_red" : [(1.0,1.0,0.0,1.0), (1.0,0.7,0.0,1.0), (1.0,0.2,0.0,1.0)],
"rainbow" : [(1.0,0.0,0.0,1.0), (1.0,0.5,0.0,1.0), (1.0,1.0,0.0,1.0), (0.0,1.0,0.0,1.0), (0.0,0.0,1.0,1.0), (0.3, 0.0, 0.5,1.0), (0.5, 0.0, 1.0, 1.0)]}
def colors_from_theme( theme, series_length, mode = 'solid' ):
colors = []
if theme not in THEMES.keys() :
raise Exception, "Theme not defined"
color_steps = THEMES[theme]
n_colors = len(color_steps)
if series_length <= n_colors:
colors = [color + tuple([mode]) for color in color_steps[0:n_colors]]
else:
iterations = [(series_length - n_colors)/(n_colors - 1) for i in color_steps[:-1]]
over_iterations = (series_length - n_colors) % (n_colors - 1)
for i in range(n_colors - 1):
if over_iterations <= 0:
break
iterations[i] += 1
over_iterations -= 1
for index,color in enumerate(color_steps[:-1]):
colors.append(color + tuple([mode]))
if iterations[index] == 0:
continue
next_color = color_steps[index+1]
color_step = ((next_color[0] - color[0])/(iterations[index] + 1),
(next_color[1] - color[1])/(iterations[index] + 1),
(next_color[2] - color[2])/(iterations[index] + 1),
(next_color[3] - color[3])/(iterations[index] + 1))
for i in range( iterations[index] ):
colors.append((color[0] + color_step[0]*(i+1),
color[1] + color_step[1]*(i+1),
color[2] + color_step[2]*(i+1),
color[3] + color_step[3]*(i+1),
mode))
colors.append(color_steps[-1] + tuple([mode]))
return colors
def other_direction(direction):
"explicit is better than implicit"
if direction == HORZ:
return VERT
else:
return HORZ
#Class definition
class Plot(object):
def __init__(self,
surface=None,
data=None,
width=640,
height=480,
background=None,
border = 0,
x_labels = None,
y_labels = None,
series_colors = None):
random.seed(2)
self.create_surface(surface, width, height)
self.dimensions = {}
self.dimensions[HORZ] = width
self.dimensions[VERT] = height
if type(self.surface) is not CairoContext:
self.context = cairo.Context(self.surface)
else:
self.context = self.surface
self.labels={}
self.labels[HORZ] = x_labels
self.labels[VERT] = y_labels
self.load_series(data, x_labels, y_labels, series_colors)
self.font_size = 10
self.set_background (background)
self.border = border
self.borders = {}
self.line_color = (0.5, 0.5, 0.5)
self.line_width = 0.5
self.label_color = (0.0, 0.0, 0.0)
self.grid_color = (0.8, 0.8, 0.8)
def create_surface(self, surface, width=None, height=None):
self.filename = None
if isinstance(surface, cairo.Surface) or type(surface) is CairoContext:
self.surface = surface
return
if not type(surface) in (str, unicode):
raise TypeError("Surface should be either a Cairo surface or a filename, not %s" % surface)
sufix = surface.rsplit(".")[-1].lower()
self.filename = surface
if sufix == "png":
self.surface = cairo.ImageSurface(cairo.FORMAT_ARGB32, width, height)
elif sufix == "ps":
self.surface = cairo.PSSurface(surface, width, height)
elif sufix == "pdf":
self.surface = cairo.PSSurface(surface, width, height)
else:
if sufix != "svg":
self.filename += ".svg"
self.surface = cairo.SVGSurface(self.filename, width, height)
def commit(self):
try:
self.context.show_page()
if self.filename and self.filename.endswith(".png"):
self.surface.write_to_png(self.filename)
elif type(self.surface) is not CairoContext:
self.surface.finish()
except cairo.Error:
pass
def load_series (self, data, x_labels=None, y_labels=None, series_colors=None):
#FIXME: implement Series class for holding series data,
# labels and presentation properties
#data can be a list, a list of lists or a dictionary with
#each item as a labeled data series.
#we should (for the time being) create a list of lists
#and set labels for teh series rom teh values provided.
self.series_labels = []
self.serie = None
# The pretty way...
#if not isinstance(data, Serie):
# # Not an instance of Series
# self.serie = Serie(data)
#else:
# self.serie = data
#
#self.series_labels = self.serie.get_names()
#TODO: In the next version remove this...
# The ugly way, just to keep the retrocompatibility...
if callable(data) or type(data) is list and callable(data[0]): # Lambda or List of lambdas
self.serie = data
self.series_labels = None
elif isinstance(data, Serie): # Instance of Serie
self.serie = data
self.series_labels = data.get_names()
else: # Anything else
self.serie = Serie(data)
self.series_labels = self.serie.get_names()
#TODO: Remove old code
##dictionary
#if hasattr(data, "keys"):
# self.series_labels = data.keys()
# for key in self.series_labels:
# self.data.append(data[key])
##lists of lists:
#elif max([hasattr(item,'__delitem__') for item in data]) :
# self.data = data
# self.series_labels = range(len(data))
##list
#else:
# self.data = [data]
# self.series_labels = None
#TODO: allow user passed series_widths
self.series_widths = [1.0 for group in self.serie]
self.process_colors( series_colors )
def process_colors( self, series_colors, length = None, mode = 'solid' ):
#series_colors might be None, a theme, a string of colors names or a string of color tuples
if length is None :
length = len( self.serie.to_list() )
#no colors passed
if not series_colors:
#Randomize colors
self.series_colors = [ [random.random() for i in range(3)] + [1.0, mode] for series in range( length ) ]
else:
#Just theme pattern
if not hasattr( series_colors, "__iter__" ):
theme = series_colors
self.series_colors = colors_from_theme( theme.lower(), length )
#Theme pattern and mode
elif not hasattr(series_colors, '__delitem__') and not hasattr( series_colors[0], "__iter__" ):
theme = series_colors[0]
mode = series_colors[1]
self.series_colors = colors_from_theme( theme.lower(), length, mode )
#List
else:
self.series_colors = series_colors
for index, color in enumerate( self.series_colors ):
#element is a color name
if not hasattr(color, "__iter__"):
self.series_colors[index] = COLORS[color.lower()] + tuple([mode])
#element is rgb tuple instead of rgba
elif len( color ) == 3 :
self.series_colors[index] += (1.0,mode)
#element has 4 elements, might be rgba tuple or rgb tuple with mode
elif len( color ) == 4 :
#last element is mode
if not hasattr(color[3], "__iter__"):
self.series_colors[index] += tuple([color[3]])
self.series_colors[index][3] = 1.0
#last element is alpha
else:
self.series_colors[index] += tuple([mode])
def get_width(self):
return self.surface.get_width()
def get_height(self):
return self.surface.get_height()
def set_background(self, background):
if background is None:
self.background = (0.0,0.0,0.0,0.0)
elif type(background) in (cairo.LinearGradient, tuple):
self.background = background
elif not hasattr(background,"__iter__"):
colors = background.split(" ")
if len(colors) == 1 and colors[0] in COLORS:
self.background = COLORS[background]
elif len(colors) > 1:
self.background = cairo.LinearGradient(self.dimensions[HORZ] / 2, 0, self.dimensions[HORZ] / 2, self.dimensions[VERT])
for index,color in enumerate(colors):
self.background.add_color_stop_rgba(float(index)/(len(colors)-1),*COLORS[color])
else:
raise TypeError ("Background should be either cairo.LinearGradient or a 3-tuple, not %s" % type(background))
def render_background(self):
if isinstance(self.background, cairo.LinearGradient):
self.context.set_source(self.background)
else:
self.context.set_source_rgba(*self.background)
self.context.rectangle(0,0, self.dimensions[HORZ], self.dimensions[VERT])
self.context.fill()
def render_bounding_box(self):
self.context.set_source_rgba(*self.line_color)
self.context.set_line_width(self.line_width)
self.context.rectangle(self.border, self.border,
self.dimensions[HORZ] - 2 * self.border,
self.dimensions[VERT] - 2 * self.border)
self.context.stroke()
def render(self):
pass
class ScatterPlot( Plot ):
def __init__(self,
surface=None,
data=None,
errorx=None,
errory=None,
width=640,
height=480,
background=None,
border=0,
axis = False,
dash = False,
discrete = False,
dots = 0,
grid = False,
series_legend = False,
x_labels = None,
y_labels = None,
x_bounds = None,
y_bounds = None,
z_bounds = None,
x_title = None,
y_title = None,
series_colors = None,
circle_colors = None ):
self.bounds = {}
self.bounds[HORZ] = x_bounds
self.bounds[VERT] = y_bounds
self.bounds[NORM] = z_bounds
self.titles = {}
self.titles[HORZ] = x_title
self.titles[VERT] = y_title
self.max_value = {}
self.axis = axis
self.discrete = discrete
self.dots = dots
self.grid = grid
self.series_legend = series_legend
self.variable_radius = False
self.x_label_angle = math.pi / 2.5
self.circle_colors = circle_colors
Plot.__init__(self, surface, data, width, height, background, border, x_labels, y_labels, series_colors)
self.dash = None
if dash:
if hasattr(dash, "keys"):
self.dash = [dash[key] for key in self.series_labels]
elif max([hasattr(item,'__delitem__') for item | |
self.bonus_link and (folder.organization or folder.sponsored_by):
self.bonus_link = False
user.bonus_links = user.bonus_links + 1
self.save(update_fields=['organization', 'bonus_link'])
user.save(update_fields=['bonus_links'])
def guid_as_path(self):
# For a GUID like ABCD-1234, return a path like AB/CD/12.
stripped_guid = re.sub('[^0-9A-Za-z]+', '', self.guid)
guid_parts = [stripped_guid[i:i + 2] for i in range(0, len(stripped_guid), 2)]
return '/'.join(guid_parts[:-1])
def warc_storage_file(self):
return os.path.join(settings.WARC_STORAGE_DIR, self.guid_as_path(), '%s.warc.gz' % self.guid)
# def get_thumbnail(self, image_data=None):
# if self.thumbnail_status == 'failed' or self.thumbnail_status == 'generating':
# return None
#
# thumbnail_path = os.path.join(settings.THUMBNAIL_STORAGE_PATH, self.guid_as_path(), 'thumbnail.png')
#
# if self.thumbnail_status == 'generated' and default_storage.exists(thumbnail_path):
# return default_storage.open(thumbnail_path)
#
# try:
#
# warc_url = None
# image = None
#
# if image_data:
# image = Image(blob=image_data)
# else:
#
# if self.screenshot_capture and self.screenshot_capture.status == 'success':
# warc_url = self.screenshot_capture.url
# else:
# pdf_capture = self.captures.filter(content_type__istartswith='application/pdf').first()
# if pdf_capture:
# warc_url = pdf_capture.url
#
# if warc_url:
# self.thumbnail_status = 'generating'
# self.save(update_fields=['thumbnail_status'])
#
# headers, data = self.replay_url(warc_url)
# temp_file = tempfile.NamedTemporaryFile(suffix='.' + warc_url.rsplit('.', 1)[-1])
# for chunk in data:
# temp_file.write(chunk)
# temp_file.flush()
# image = Image(filename=temp_file.name + "[0]") # [0] limits ImageMagick to first page of PDF
#
# if image:
# with imagemagick_temp_dir():
# with image as opened_img:
# opened_img.transform(resize='600')
# # opened_img.resize(600,600)
# with Image(width=600, height=600) as dst_image:
# dst_image.composite(opened_img, 0, 0)
# dst_image.compression_quality = 60
# default_storage.store_data_to_file(dst_image.make_blob('png'), thumbnail_path, overwrite=True)
#
# self.thumbnail_status = 'generated'
# self.save(update_fields=['thumbnail_status'])
#
# return default_storage.open(thumbnail_path)
#
# except Exception as e:
# print "Thumbnail generation failed for %s: %s" % (self.guid, e)
#
# self.thumbnail_status = 'failed'
# self.save(update_fields=['thumbnail_status'])
def delete_related_captures(self):
Capture.objects.filter(link_id=self.pk).delete()
def has_capture_job(self):
try:
self.capture_job
except CaptureJob.DoesNotExist:
return False
return True
def mark_capturejob_superseded(self):
try:
job = self.capture_job
job.superseded = True
job.save()
except CaptureJob.DoesNotExist:
pass
@cached_property
def screenshot_capture(self):
return self.captures.filter(role='screenshot').first()
@cached_property
def primary_capture(self):
return self.captures.filter(role='primary').first()
@cached_property
def favicon_capture(self):
return self.captures.filter(role='favicon').first()
def write_uploaded_file(self, uploaded_file, cache_break=False):
"""
Given a file uploaded by a user, create a Capture record and warc.
"""
from api.utils import get_mime_type, mime_type_lookup # local import to avoid circular import
# normalize file name to upload.jpg, upload.png, upload.gif, or upload.pdf
mime_type = get_mime_type(uploaded_file.name)
file_name = 'upload.%s' % mime_type_lookup[mime_type]['new_extension']
warc_url = "file:///%s/%s" % (self.guid, file_name)
# append a random number to warc_url if we're replacing a file, to avoid browser cache
if cache_break:
r = random.SystemRandom()
warc_url += "?version=%s" % (str(r.random()).replace('.', ''))
capture = Capture(link=self,
role='primary',
status='success',
record_type='resource',
user_upload='True',
content_type=mime_type,
url=warc_url)
warc_size = [] # pass a mutable container to the context manager, so that it can populate it with the size of the finished warc
with preserve_perma_warc(self.guid, self.creation_timestamp, self.warc_storage_file(), warc_size) as warc:
uploaded_file.file.seek(0)
write_resource_record_from_asset(uploaded_file.file.read(), warc_url, mime_type, warc)
self.warc_size = warc_size[0]
self.save(update_fields=['warc_size'])
capture.save()
def safe_delete_warc(self):
old_name = self.warc_storage_file()
if default_storage.exists(old_name):
new_name = old_name.replace('.warc.gz', '_replaced_%d.warc.gz' % timezone.now().timestamp())
with default_storage.open(old_name) as old_file:
default_storage.store_file(old_file, new_name)
default_storage.delete(old_name)
def accessible_to(self, user):
return user.can_edit(self)
def can_play_back(self):
"""
Reports whether a Perma Link has been successfully captured (or uploaded)
and is ready for playback.
See also /perma/perma_web/static/js/helpers/link.helpers.js
"""
if self.cached_can_play_back is not None:
return self.cached_can_play_back
if self.user_deleted:
return False
successful_metadata = self.has_successful_capture()
# Early Perma Links and direct uploads do not have CaptureJobs;
# if no CaptureJob, judge based on Capture statuses alone;
# otherwise, inspect CaptureJob status
job = None
try:
job = self.capture_job
except CaptureJob.DoesNotExist:
pass
if job and not job.superseded and job.status != 'completed':
successful_metadata = False
if settings.CHECK_WARC_BEFORE_PLAYBACK:
# I assert that the presence of a warc in default_storage means a Link
# can be played back. If there is a disconnect between our metadata and
# the contents of default_storage... something is wrong and needs fixing.
has_warc = default_storage.exists(self.warc_storage_file())
if successful_metadata != has_warc:
logger.error(f"Conflicting metadata about {self.guid}: has_warc={has_warc}, successful_metadata={successful_metadata}")
# Trust our records (the metadata) more than has_warc
return successful_metadata
###
### Methods for playback via Webrecorder
###
@cached_property
def wr_collection_slug(self):
return self.guid.lower()
def wr_iframe_prefix(self, wr_username):
return "{}/{}/{}/".format(settings.PLAYBACK_HOST, wr_username, self.wr_collection_slug)
def init_replay_for_user(self, request):
"""
Set up a Webrecorder collection for playback.
Private Perma Links are uploaded to a private, temporary
collection (unique per visitor and per GUID) protected by
a session cookie (views.common.set_iframe_session_cookie).
Public Perma Links are uploaded to a public, longer-lived
collection belonging to a persistent, Perma-managed WR user
(shared by all visitors, to permit caching and reduce churn).
If the collection already exists, this method is a no-op.
"""
json = {
'title': self.wr_collection_slug,
'external': True
}
if self.is_private:
session_key = 'wr_private_session_cookie'
else:
session_key = 'wr_public_session_cookie'
json['username'] = settings.WR_PERMA_USER
json['password'] = settings.WR_PERMA_PASSWORD
json['public'] = True
# If a visitor has a usable WR session already, reuse it.
# If they don't, WR will start a fresh session and will return
# a new cookie.
logger.info(f"{self.guid}: Getting cookie")
wr_session_cookie = get_wr_session_cookie(request, session_key)
logger.info(f"{self.guid}: Getting session")
response, data = query_wr_api(
method='post',
path='/auth/ensure_login',
cookie=wr_session_cookie,
json=json,
valid_if=lambda code, data: code == 200 and all(key in data for key in {'username', 'coll_empty'})
)
new_session_cookie = response.cookies.get('__wr_sesh')
if new_session_cookie:
wr_session_cookie = new_session_cookie
request.session[session_key + '_timestamp'] = datetime.utcnow().timestamp()
request.session[session_key] = wr_session_cookie
# Store the temp username in the session so that we can
# force the deletion of this WR user in the future
# (e.g. on logout, etc.).
if self.is_private:
request.session['wr_temp_username'] = data['username']
if data['coll_empty']:
logger.info(f"{self.guid}: Uploading to WR for {data['username']}")
try:
self.upload_to_wr(data['username'], wr_session_cookie)
except WebrecorderException:
clear_wr_session(request)
raise
return data['username']
def upload_to_wr(self, wr_username, wr_session_cookie):
warc_path = self.warc_storage_file()
upload_data = None
start_time = time.time()
logger.info(f"{self.guid}: opening warc")
with default_storage.open(warc_path, 'rb') as warc_file:
logger.info(f"{self.guid}: making PUT API call")
_, upload_data = query_wr_api(
method='put',
path='/upload?force-coll={coll}&filename={coll}.warc.gz'.format(coll=self.wr_collection_slug),
data=warc_file,
cookie=wr_session_cookie,
valid_if=lambda code, data: code == 200 and data.get('upload_id')
)
# wait for WR to finish uploading the WARC
while True:
logger.info(f"{self.guid}: Waiting for WR to be ready.")
if time.time() - start_time > settings.WR_REPLAY_UPLOAD_TIMEOUT:
raise WebrecorderException("Upload timed out; check Webrecorder logs.")
_, upload_data = query_wr_api(
method='get',
path='/upload/{upload_id}?user={user}'.format(user=wr_username, upload_id=upload_data.get('upload_id')),
cookie=wr_session_cookie,
valid_if=lambda code, data: code == 200)
if upload_data.get('done'):
break
time.sleep(0.5)
def delete_from_wr(self, request):
"""
In general, it should not be necessary to manually delete
anything from Webrecorder. This utility method is useful
only in the rare case where Webrecorder has an out-of-date
copy of the Perma Link's warc and a user is awaiting a
playback of the up-to-date warc. This should only happen
when a user is "replacing" a capture.
"""
if self.is_private:
user = request.session.get('wr_temp_username')
cookie = request.session.get('wr_private_session_cookie')
response, data = query_wr_api(
method='delete',
path='/collection/{}?user={}'.format(self.wr_collection_slug, user),
cookie=cookie,
valid_if=lambda code, data: code == 200 or code == 404 and data.get('error') in ['no_such_collection', 'no_such_user']
)
else:
response, data = query_wr_api(
method='post',
path='/auth/login',
cookie=None,
json={
'username': settings.WR_PERMA_USER,
'password': settings.WR_PERMA_PASSWORD
},
valid_if=lambda code, data: code == 200
)
cookie = response.cookies.get('__wr_sesh')
response, data = query_wr_api(
method='delete',
path='/collection/{}?user={}'.format(self.wr_collection_slug, settings.WR_PERMA_USER),
cookie=cookie,
valid_if=lambda code, data: code == 200 or code == 404 and data.get('error') == 'no_such_collection'
)
class Capture(models.Model):
link = models.ForeignKey(Link, null=False, related_name='captures', on_delete=models.CASCADE)
role = models.CharField(max_length=10, choices=(('primary','primary'),('screenshot','screenshot'),('favicon','favicon')))
status = models.CharField(max_length=10, choices=(('pending','pending'),('failed','failed'),('success','success')))
url = models.CharField(max_length=2100, blank=True, null=True)
record_type = models.CharField(max_length=10, choices=(
('response','WARC Response record -- recorded from web'),
('resource','WARC Resource record -- file without web headers')))
content_type = models.CharField(max_length=255, null=False, default='', help_text="HTTP Content-type header.")
user_upload = models.BooleanField(default=False, help_text="True if the user uploaded this capture.")
CAN_PLAY_BACK_FILTER = (Q(role="primary") & Q(status="success")) | (Q(role="screenshot") & Q(status="success"))
def __str__(self):
return "%s %s" % (self.role, self.status)
def mime_type(self):
"""
Return normalized mime type from content_type.
Stuff after semicolon is stripped, type is lowercased, and x- prefix is removed.
"""
return self.content_type.split(";", 1)[0].lower().replace('/x-', '/')
def use_sandbox(self):
"""
Whether the iframe we use to display this capture should be sandboxed.
Answer is yes unless we're playing back a PDF, which currently can't
be sandboxed in Chrome.
"""
return not self.mime_type().startswith("application/pdf")
INLINE_TYPES = {'image/jpeg', 'image/gif', 'image/png', 'image/tiff', 'text/html', 'text/plain', 'application/pdf',
'application/xhtml', 'application/xhtml+xml'}
def show_interstitial(self):
"""
Whether we should show an interstitial view/download button instead of showing the content directly.
True unless we recognize the mime type as something that should be shown inline (PDF/HTML/image).
"""
return self.mime_type() not in self.INLINE_TYPES
class CaptureJob(models.Model):
"""
This class tracks capture jobs for purposes of:
(1) sorting the capture queue | |
#Venice, June 15, 2014
#Begin of license
#Copyright (c) 2014 <NAME>
#
#Permission is hereby granted, free of charge, to any person obtaining a copy
#of this software and associated documentation files (the "Software"), to deal
#in the Software without restriction, including without limitation the rights
#to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
#copies of the Software, and to permit persons to whom the Software is
#furnished to do so, subject to the following conditions:
#
#1)The above copyright notice and this permission notice shall be included in
#all copies or substantial portions of the Software.
#2)When using this software, in particular for publications, please cite the related paper:
#"<NAME> and <NAME>. MAD: robust image texture analysis for applications in high resolution geomorphometry. Computer & Geosciences (2015), 10.1016/j.cageo.2015.04.003."
#(substitute the DOI with the correct volume and pages numbers when available).
#
#THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
#IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
#FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
#AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
#LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
#OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
#THE SOFTWARE.
#End of License
#In this code we define the core functions of MAD based indexes presented
#in the paper "<NAME>., <NAME>.,MAD: robust image texture analysis for applications
#in high resolution geomorphometry. Submitted to Computer & Geosciences, 2014".
#These functions are written in order to be easy to understand and
#to modify or re implement in other softwares. Where relevant, we suggest possible modifications,
#generalizations and special use of functions. We also inserted some "classical" geostatistical
#functions such as variogram and madogram for comparison purposes (but we use our modified sampling
#approach, i.e. the search window increases with the lag size).
#We avoided to be too much pythonic in programming styles so as to make
#clear the different steps of algorithms. Moreover we opted to code the program
#with "static" kernels files in order to facilitate their use in other softwares,
#i.e. just copy the weights. However it is clear that these kernels can be created
#on the fly in relation to specific needs and also more directions can be calculated.
#Important note: in ArcGIS is not possible to calculate
#the median with focalstastics on a float raster (!!!).
#So, to do that we need to convert temporary the float raster to an integer
# using a multiplying factor (1000)see the function "medFloat()"
#These functions make use of spatial analyst extension
#Anyway, in all those software codes implementing custom kernels
# these function can be easily implemented
from arcpy.sa import *
###Directional differences
#The basic step for MAD as well as other bivariate spatial continuity indexes (variogram, madogram, etc.)
#is to calculate quickly and accurately differences between points pairs. This is accomplished via ad-hoc
#defined kernels (in the folder kernels) that permit via focal statistic function to perform bilinear interpolation
# and the difference between interpolated points in one step. We mainly defined kernels for different lag distances
# considering four directions (N-S,SW-NE,W-E, NW-SE). Clearly, other kernels can be defined,
#in order to perform the calculation in more directions (if necessary).
#The calculation of directional differences, at least
#for not large kernels is not computationally demanding.
##Core directional differences functions
#This is the core function for performing multiscale directional analysis of surface texture
#Use these directional differences for calculating anisotropy parameters and basic surface texture indexes
def calcDelta(inRaster,kernel):
"""
Calculate directional differences on a raster "inRaster" using a kernel file
"kernel". calcDelta returns a raster with directional differences with the same
resolution of input raster. The "kernel" should report the full path to kernel file.
"""
myNbrWeight=NbrWeight(kernel)
delta=FocalStatistics(inRaster,myNbrWeight,"SUM","NODATA")
return delta
#End calcDelta.
#A special function for playing with increments of order-K
def calcDeltak2(inRaster,kernel):
"""
This function calculate difference of differences, i.e. increments of order 2, and can be
easily expanded to higher order k. Applying this function directly on a DTM, without detrending,
permits to highlight fine scale morphologies.
"""
deltak2=calcDelta(calcDelta(inRaster,kernel),kernel)
return deltak2
#End calcDelta.
#This functions is the most used for deriving directional differences for surface texture
#analysis
#For a given list of kernels for directional differences and a given input raster (likely a residual DTM)
# return a list of rasters with the corresponding directional differences
def calcAllDelta(inRaster,kernels):
"""
This function calculates for an input raster "inRaster" all the directional differences
reported in a list of kernels "kernels", reporting the full path to kernels.
"""
return [calcDelta(inRaster,X) for X in kernels]
#End calcAllDelta.
##End Core directional differences functions
##Special (Absolute) directional differences functions for lag of 1 pixel and 1.44 pixels.
#these functions are useful for relative roughness calculation and for special applications.
#These are not intended for the calculation of anisotropy parameters.
##Roughness computed with these kernels can be also useful if you need
#to filter out some fine scale artifacts from the DTM still mantaining
#high detail.
#Lag 1 pixel
#This function calculates the absolute value of directional differences for lag of 1 cell.
#Given that for a specific direction we have two opposite directional differences (e.g. in NS direction
# we have the difference between central pixel and the pixel one step at N and the difference between
# the central pixel and the pixel at one step at S) we need two average their absolute value to have a
#symmetric value to associate to the central pixel and to be used for MAD calculation. So the results of this
# kernels can be used in analogy to output of the basic kernels, but consider some smoothing of roughness.
#This kind of kernel can be also modified to compute non-symmetrical surface texture indexes i.e. where Index(h) is
#different from index(-h).
def calcAbsDelta1c(inRaster,kernelDir):
"""
Function for calculating the absolute value of directional differences for lag o 1 pixel.
It take 2 inputs: inRaster (the input raster) and kernelDir (the directory where kernels are stored).
The output, even if we have already derived the absolute value, can be used with MAD functions
as the Basic kernels. We don't suggest the use of these kernels for the calculation of anisotropy indexes.
These are conceived mainly for the calculation of relative roughness or other specific needs.
"""
asym1c=["N1cAsym","NE1cAsym","E1cAsym","SE1cAsym","S1cAsym","SW1cAsym","W1cAsym","NW1cAsym"]
dirAsym=fullPath(kernelDir,asym1c,".txt")
deltas1c=calcAllDelta(inRaster,dirAsym)
#number of differences
nDeltas=len(deltas1c)
symAbsDelta1c=[(Abs(deltas1c[i])+Abs(deltas1c[i+nDeltas/2]))/2 for i in range(nDeltas/2)]
return symAbsDelta1c
#End calcAbsDelta1c.
##End lag 1 pixel
#Lag 1.4142 Pixel
#These set of functions are used for calculating directional
#differences using the shortest lag along diagonals. This function is a variant
#for calculating directional differences and can be useful in some circumstances.
#The first step is to use the kernels:
#myKernels=((WeightDir+"NE1.txt"),(myWeightDir+"SE1.txt"))
#these are 2x2 kernels, that calculate differences along diagonals and the output value is stored on the
#NW corner, but geometrically these differences are representative of the center on the kernel 2x2.
#These differences are then manipulated to derive cell centered absolute directional differences.
#Using a kernel 2x2 we can use simple trigonometry to derive directional differences in any direction
#starting from the 2 calculated differences along diagonals. In this case we consider simply NS and EW directions
#to be compatible with the output of basic kernels (the value 0.7071 come from the sin or cos of 45):
#It is easy to generalize this function for calculating directional differences in any direction.
#This function can be useful for special needs and eventually for calculation of relative roughness.
#Also in this case some smoothing of roughness is expected.
def deltaDiag(inRaster,kernels):
"""
Utility function.
Calculates directional differences in four directions (N-S, NE-SW, E-W, SE-NW) using
2x2 kernels calculating differences along diagonals. The value is stored on the NW pixel.
The kernels to be used are "NE1Diag.txt" and "SE1Diag.txt"
"""
myNbrWeight = NbrWeight(kernels[0])
deltaR2NE=FocalStatistics(inRaster,myNbrWeight,"SUM","NODATA")
myNbrWeight = NbrWeight(kernels[1])
deltaR2SE=FocalStatistics(inRaster,myNbrWeight,"SUM","NODATA")
deltaR2N=(-deltaR2SE+deltaR2NE)*0.7071
deltaR2E=(deltaR2SE+deltaR2NE)*0.7071
return deltaR2N,deltaR2NE,deltaR2E,deltaR2SE
#end deltaDiag
def centerDiag(inDelta,shifted):
"""
Utility function:
We use the mean of the Abs of four deltas to re center on the cell
using kernel "shifted", see kernel "shifted3x3.txt".
"""
myNbrWeight = NbrWeight(shifted)
delta=FocalStatistics(Abs(inDelta),myNbrWeight,"SUM","NODATA")/4
return delta
#End absDiag.
def absDiagCentered(inRaster,kernelDir):
"""
Function for calculating the absolute value of directional differences for lag o 1.4142 pixel.
It take 2 inputs: inRaster (the input raster) and kernelDir (the directory where kernels are stored).
The output, even if we have already derived the absolute value, can be used with MAD functions
as the Basic kernels. We don't suggest the use of these kernels for the calculation of anisotropy indexes.
These are conceived for the calculation of surface roughness, relative roughness and in those situations
where we need to calculate | |
# -*- coding: utf-8 -*-
#
# K2HR3 OpenStack Notification Listener
#
# Copyright 2018 Yahoo! Japan Corporation.
#
# K2HR3 is K2hdkc based Resource and Roles and policy Rules, gathers
# common management information for the cloud.
# K2HR3 can dynamically manage information as "who", "what", "operate".
# These are stored as roles, resources, policies in K2hdkc, and the
# client system can dynamically read and modify these information.
#
# For the full copyright and license information, please view
# the licenses file that was distributed with this source code.
#
# AUTHOR: <NAME>
# CREATE: Tue Sep 11 2018
# REVISION:
#
"""Sends http requests to the k2hr3 api. Classes in this module are not public."""
from __future__ import (absolute_import, division, print_function,
unicode_literals)
from enum import Enum
import json
import logging
import re
import socket
import ssl
import sys
import time
import urllib
import urllib.parse
import urllib.request
from urllib.error import ContentTooShortError, HTTPError, URLError
import uuid
from typing import List, Set, Dict, Tuple, Optional, Union # noqa: pylint: disable=unused-import
from k2hr3_osnl.cfg import K2hr3Conf
from k2hr3_osnl.exceptions import _K2hr3UserAgentError
from k2hr3_osnl.httpresponse import _K2hr3HttpResponse
LOG = logging.getLogger(__name__)
class _AgentError(Enum):
NONE = 1
TEMP = 2
FATAL = 3
class _K2hr3UserAgent:
"""Send a http/https request to the K2hr3 WebAPI."""
def __init__(self, conf: K2hr3Conf) -> None:
"""Initializes attributes.
:param conf: K2hr3Conf object.
:type K2hr3Conf: K2hr3Conf
:raises K2hr3UserAgentError: api_url validation error.
"""
# api_url validated for myself.
if isinstance(conf, K2hr3Conf) is False:
raise _K2hr3UserAgentError(
'conf is a K2hr3Conf instance, not {}'.format(type(conf)))
try:
_K2hr3UserAgent.validate_url(conf.k2hr3.api_url)
except _K2hr3UserAgentError as error:
raise _K2hr3UserAgentError(
'a valid url is expected, not {}'.format(
conf.k2hr3.api_url)) from error
self._conf = conf
self._url = conf.k2hr3.api_url
# other params validated in oslo_config.
self._retries = conf.k2hr3.max_retries
self._allow_self_signed_cert = conf.k2hr3.allow_self_signed_cert
# init the others.
self._ips = [] # type: List[str]
self._instance_id = ''
self._method = 'DELETE'
self._params = {'extra': 'openstack-auto-v1'}
self._headers = {
'User-Agent':
'Python-k2hr3_ua/{}.{}'.format(sys.version_info[0],
sys.version_info[1])
}
self._response = _K2hr3HttpResponse()
LOG.debug('useragent initialized.')
@property
def headers(self) -> Dict[str, str]:
"""Returns the headers.
:returns: Request headers
:rtype: Dict
"""
return self._headers
@property
def params(self) -> Dict[str, str]:
"""Returns the url params.
:returns: Url params
:rtype: Dict
"""
return self._params
@property
def code(self) -> int:
"""Returns the HTTP status code.
:returns: HTTP status code
:rtype: int
"""
return self._response.code
@property
def error(self) -> str:
"""Returns the error string.
:returns: error string
:rtype: str
"""
return self._response.error
@property
def method(self) -> str:
"""Returns the http request method string.
:returns: url string
:rtype: str
"""
return self._method
@method.setter
def method(self, value: str) -> None:
"""Sets the http request method string.
:param value: http request method string
:type value: str
"""
if isinstance(value, str) is True:
LOG.debug('http request method is %s', value)
self._method = value
else:
raise _K2hr3UserAgentError(
'method should be string, not {}'.format(value))
@property
def url(self) -> str: # public.
"""Returns the url string.
:returns: url string
:rtype: str
"""
return self._url
@url.setter
def url(self, value: str) -> None: # public.
"""Sets the url string.
:param value: url string
:type value: str
"""
try:
if _K2hr3UserAgent.validate_url(value):
self._url = value
except _K2hr3UserAgentError:
raise
@staticmethod
def validate_url(value):
"""Returns True if given string is a url.
:param value: a url like string
:type value: str
:returns: True if given string is a url.
:rtype: bool
"""
# scheme
try:
scheme, url_string = value.split('://', maxsplit=2)
except ValueError as error:
raise _K2hr3UserAgentError(
'scheme should contain ://, not {}'.format(value)) from error
if scheme not in ('http', 'https'):
raise _K2hr3UserAgentError(
'scheme should be http or http, not {}'.format(scheme))
else:
LOG.debug('scheme is %s', scheme)
matches = re.match(
r'(?P<domain>[\w|\.]+)?(?P<port>:\d{2,5})?(?P<path>[\w|/]*)?',
url_string)
if matches is None:
raise _K2hr3UserAgentError(
'the argument seems not to be a url string, {}'.format(value))
# domain must be resolved.
domain = matches.group('domain')
if domain is None:
raise _K2hr3UserAgentError(
'url contains no domain, {}'.format(value))
try:
# https://github.com/python/cpython/blob/master/Modules/socketmodule.c#L5729
ipaddress = socket.gethostbyname(domain)
except OSError as error: # resolve failed
raise _K2hr3UserAgentError('unresolved domain, {} {}'.format(
domain, error))
else:
LOG.debug('%s resolved %s', domain, ipaddress)
# path(optional)
if matches.group('path') is None:
raise _K2hr3UserAgentError(
'url contains no path, {}'.format(value))
path = matches.group('path')
# port(optional)
port = matches.group('port')
LOG.debug('url=%s domain=%s port=%s path=%s', value, domain, port,
path)
return True
@property
def ips(self) -> List[str]: # public.
"""Gets the ipaddress list.
:returns: url string
:rtype: str
"""
return self._ips
@ips.setter
def ips(self, value: str) -> None: # public.
"""Sets ip or ips to the ipaddress list.
:param value: ipaddress(str or list)
:type value: object
"""
ips = [] # type: List[str]
if isinstance(value, list):
ips += value
elif isinstance(value, str):
ips = [value]
else:
raise _K2hr3UserAgentError(
'ips must be list or str, not {}'.format(value))
for ipaddress in ips:
if isinstance(ipaddress, str) is False:
raise _K2hr3UserAgentError(
'ip must be str, not {}'.format(ipaddress))
try:
# https://github.com/python/cpython/blob/master/Modules/socketmodule.c#L6172
socket.inet_pton(socket.AF_INET, ipaddress)
self._ips += [ipaddress]
except OSError:
LOG.debug('not ip version4 string %s', ipaddress)
try:
socket.inet_pton(socket.AF_INET6, ipaddress)
self._ips += [ipaddress]
except OSError as error:
LOG.error('neither ip version4 nor version6 string %s %s',
ipaddress, error)
raise _K2hr3UserAgentError(
'ip must be valid string, not {} {}'.format(
ipaddress, error))
self._ips = ips # overwrite
LOG.debug('ips=%s', ips)
# Note:
# parameter name is 'host' when calling r3api.
self._params['host'] = json.dumps(self._ips)
@property
def instance_id(self) -> str: # public.
"""Gets the instance id.
:returns: instance id
:rtype: str
"""
return self._instance_id
@instance_id.setter
def instance_id(self, value: str) -> None: # publc.
"""Sets instance id.
:param value: instance id
:type value: str
"""
if isinstance(value, str) is False:
raise _K2hr3UserAgentError(
'Please pass UUID as a string, not {}'.format(value))
try:
if value:
uuid.UUID(value)
self._instance_id = value
except ValueError as error:
raise _K2hr3UserAgentError('Invalid UUID, {} {}'.format(
value, error))
# Note:
# parameter name is 'cuk' when calling r3api.
self._params['cuk'] = self._instance_id
@property
def allow_self_signed_cert(self) -> bool: # public.
"""Gets the flag of self signed certificate or not.
:returns: True if allow self signed certificate to use.
:rtype: bool
"""
return self._allow_self_signed_cert
@allow_self_signed_cert.setter
def allow_self_signed_cert(self, value: bool) -> None: # public.
"""Sets the flag of self signed certificate or not.
:param value: True if allow self signed certificate to use.
:type value: bool
"""
if isinstance(value, bool):
self._allow_self_signed_cert = value
else:
raise _K2hr3UserAgentError(
'Boolean value expected, not {}'.format(value))
def _send_internal(self, url: str, params: Dict[str, str],
headers: Dict[str, str],
method: str) -> bool: # non-public.
"""Sends a http request.
:returns: True if success, otherwise False
:rtype: bool
"""
assert [
isinstance(url, str),
isinstance(params, dict),
isinstance(headers, dict),
isinstance(method, str),
]
LOG.debug('_send called by url %s params %s headers %s method %s', url,
params, headers, method)
qstring = urllib.parse.urlencode(params, quote_via=urllib.parse.quote) # type: ignore
req = urllib.request.Request(
'?'.join([url, qstring]), headers=headers, method=method)
if req.type not in ('http', 'https'):
self._response.error = 'http or https, not {}'.format(req.type)
LOG.error(self._response)
return False
agent_error = _AgentError.NONE
try:
ctx = None
if req.type == 'https':
# https://docs.python.jp/3/library/ssl.html#ssl.create_default_context
ctx = ssl.create_default_context()
if self._allow_self_signed_cert:
# https://github.com/python/cpython/blob/master/Lib/ssl.py#L567
ctx.check_hostname = False
ctx.verify_mode = ssl.CERT_NONE
with urllib.request.urlopen(
req, timeout=self._conf.k2hr3.timeout_seconds,
context=ctx) as res:
self._response.code = res.getcode()
LOG.debug('code=[%s]\nurl=[%s]\nbody=[%s]\ninfo=[%s]\n',
res.getcode(), res.geturl(), res.read(), res.info())
except HTTPError as error:
LOG.error(
'Could not complete the request. code %s reason %s headers %s',
error.code, error.reason, error.headers)
agent_error = _AgentError.FATAL
except (ContentTooShortError, URLError) as error:
# https://github.com/python/cpython/blob/master/Lib/urllib/error.py#L73
LOG.error('Could not read the server. reason %s', error.reason)
agent_error = _AgentError.FATAL
except (socket.timeout, OSError) as error: # temporary error
LOG.error('error(OSError, socket) %s', error)
agent_error = _AgentError.TEMP
finally:
if agent_error == _AgentError.TEMP:
self._retries -= 1 # decrement the retries value.
if self._retries >= 0:
LOG.warning('sleeping for %s. remaining retries=%s',
self._conf.k2hr3.retry_interval_seconds,
self._retries)
time.sleep(self._conf.k2hr3.retry_interval_seconds)
self._send_internal(url, params, headers, method)
else:
self._response.error = 'reached the max retry count.'
LOG.error(self._response.error)
agent_error = _AgentError.FATAL
if agent_error == _AgentError.NONE:
LOG.debug('no problem.')
return True
LOG.debug('problem %s', self._response)
return False
def send(self) -> bool: # public.
"""Sends a http request.
:returns: True if success, otherwise False
:rtype: bool
"""
assert [
isinstance(self._url, str),
isinstance(self._params, dict),
self._params.get('host', None) is not None,
isinstance(self._params, dict),
self._params.get('cuk', None) is not None,
isinstance(self._params, dict),
self._params.get('extra', None) is not None,
]
return self._send_internal(self._url, self._params, self._headers,
self._method)
def __repr__(self):
attrs = []
for attr in ['_url', '_params', '_headers', '_method']:
val = getattr(self, attr)
if val:
attrs.append((attr, repr(val)))
values = ', '.join(['%s=%s' % i for i in attrs])
return '<_K2hr3UserAgent ' + values + '>'
def __str__(self):
attrs = {}
for attr | |
<reponame>eshanking/fears-figures
import numpy as np
from numpy.ma.core import get_data
import scipy
from seascapes_figures.utils import dir_manager, results_manager
import matplotlib.pyplot as plt
import os
class Fitness:
def __init__(self):
return
def gen_fitness_curves(self,pop=None,conc=None):
if pop is None:
pop = self
if conc is None:
conc = np.logspace(-3,5,num=1000)
n_genotype = pop.n_genotype
fc = {}
for g in range(n_genotype):
f = np.zeros(len(conc))
i = 0
for c in conc:
f[i] = self.gen_fitness(g,c) - pop.death_rate
i+=1
fc[g] = f
return fc
# compute fitness given a drug concentration
def gen_fitness(self,genotype,conc,drugless_rate=None,ic50=None,pop=None):
if pop is None:
pop = self
if pop.fitness_data == 'estimate':
fitness = self.sl_to_fitness(genotype,conc)
fitness = fitness*(60**2)
else:
if drugless_rate is None:
drugless_rate = pop.drugless_rates
if ic50 is None:
ic50 = pop.ic50
# logistic equation from Ogbunugafor 2016
conc = conc/10**6 # concentration in uM, convert to M
c = -.6824968 # empirical curve fit
log_eqn = lambda d,i: d/(1+np.exp((i-np.log10(conc))/c))
if conc <= 0:
fitness = drugless_rate[genotype]
else:
fitness = log_eqn(drugless_rate[genotype],ic50[genotype])
return fitness
def logistic_equation(self,conc,drugless_rate,ic50):
"""
Logistic equation from ogbunugafor et al, PLOS CB, 2016
Parameters
----------
dugless_rate : float
Drugless growth rate of genotype.
ic50 : float
ic50 of genotype.
conc : float
Drug concentration (in Molarity (M)).
c : float, optional
Logistic curve steepness parameter. The default is -0.6824968.
Returns
-------
f : float
Replication rate.
"""
c=-0.6824968
conc = conc/10**6
f = drugless_rate/(1+np.exp((ic50-np.log10(conc))/c))
return f
def gen_static_landscape(self,conc,pop=None):
if pop is None:
pop = self
# get final landscape and seascape
landscape = np.zeros(pop.n_genotype)
for kk in range(pop.n_genotype):
landscape[kk] = self.gen_fitness(pop,
kk,
pop.static_topo_dose,
pop.drugless_rates,
pop.ic50)
if min(landscape) == 0:
zero_indx_land = np.argwhere(landscape==0)
landscape_t = np.delete(landscape,zero_indx_land)
min_landscape = min(landscape_t)
else:
min_landscape = min(landscape)
zero_indx_land = []
seascape = np.zeros(pop.n_genotype)
for gen in range(pop.n_genotype):
seascape[gen] = self.gen_fitness(pop,gen,conc,pop.drugless_rates,pop.ic50)
if min(seascape) == 0:
zero_indx_sea = np.argwhere(seascape==0)
seascape_t = np.delete(seascape,zero_indx_sea)
min_seascape = min(seascape_t)
else:
min_seascape = min(seascape)
landscape = landscape - min_landscape
landscape = landscape/max(landscape)
rng = max(seascape) - min_seascape
landscape = landscape*rng + min_seascape
landscape[zero_indx_land] = 0
return landscape
def gen_digital_seascape(self,conc,gen,min_fitness=0,pop=None):
if pop is None:
pop = self
if pop.mic_estimate is not None:
mic = self.est_mic(pop,gen,Kmic=pop.mic_estimate)
else:
mic = self.est_mic(pop,gen,growth_rate=pop.death_rate)
if conc >= mic:
fitness = min_fitness
else:
fitness = pop.drugless_rates[gen]
return fitness
def gen_fit_land(self,conc,mode=None,pop=None):
if pop is None:
pop = self
fit_land = np.zeros(pop.n_genotype)
if pop.fitness_data == 'manual' or mode=='manual':
fit_land = pop.landscape_data/pop.doubling_time
else:
if pop.static_topology:
fit_land = self.gen_static_landscape(pop,conc)
if pop.digital_seascape:
for kk in range(pop.n_genotype):
fit_land[kk] = self.gen_digital_seascape(pop, conc, kk)
else:
for kk in range(pop.n_genotype):
fit_land[kk] = self.gen_fitness(kk,
conc)/pop.doubling_time
return fit_land
# Generate fitness landscape for use in the abm method
# Private to avoid confusion with gen_fit_land
def gen_fl_for_abm(self,conc,counts,pop=None):
if pop is None:
pop = self
fit_land = self.gen_fit_land(conc)
# # takes the landscape at the max dose and scales the replication rate
# # according to drug concentration
# if pop.static_landscape:
# # max_fitness = max(fit_land)
# # fit_land = pop.gen_fit_land(pop.max_dose)
# # fit_land = fit_land*max_fitness/max(fit_land)
# fit_land = gen_fit_land(pop,conc)
# if pop.static_topology:
# fit_land = gen_fit_land(pop,conc)
# Scale division rates based on carrying capacity
if pop.carrying_cap:
division_scale = 1-np.sum(counts)/pop.max_cells
if counts.sum()>pop.max_cells:
division_scale = 0
else:
division_scale = 1
fit_land = fit_land*division_scale
return fit_land
def gen_random_seascape(self,n_allele,
drugless_limits=[1,1.5],
ic50_limits=[-6.5,-1.5]):
n_genotype = 2**n_allele
drugless_rates = np.random.uniform(min(drugless_limits),
max(drugless_limits),
n_genotype)
ic50 = np.random.uniform(min(ic50_limits),
max(ic50_limits),
n_genotype)
return drugless_rates,ic50
def randomize_seascape(self,pop=None,
drugless_limits=[1,1.5],
ic50_limits=[-6.5,-1.5]):
if pop is None:
pop = self
n_genotype = pop.n_genotype
pop.drugless_rates = np.random.uniform(min(drugless_limits),
max(drugless_limits),
n_genotype)
pop.ic50 = np.random.uniform(min(ic50_limits),
max(ic50_limits),
n_genotype)
def fit_logistic_curve(self,xdata,ydata):
from scipy.optimize import curve_fit
popt,var = curve_fit(self.logistic_equation,xdata,ydata)
return popt
def gen_null_seascape(self,conc,pop=None):
if pop is None:
pop = self
landscape = self.gen_fit_land(conc,pop=pop)
start_rates = self.gen_fit_land(10**-3,pop=pop)
final_rates = self.gen_fit_land(10**5,pop=pop)
# mid_rates = gen_fit_land(pop,10**1)
start_points = self.scale_and_ignore_zeros(landscape,start_rates)
end_points = self.scale_and_ignore_zeros(landscape,final_rates)
# mid_points = scale_and_ignore_zeros(landscape,mid_rates)
mid_points = landscape
xdata = [10**-3,conc,10**5]
ic50_new = []
drugless_rates_new = []
for genotype in range(len(landscape)):
ydata = [start_points[genotype],
mid_points[genotype],
end_points[genotype]]
params = self.fit_logistic_curve(xdata,ydata)
ic50_new.append(params[1])
drugless_rates_new.append(params[0])
# find the null landscape drugless rates
drugless_rates_new = self.scale_and_ignore_zeros(drugless_rates_new,
pop.drugless_rates)
return drugless_rates_new,ic50_new
def scale_and_ignore_zeros(self,data,target):
"""
Scale data to range of target while ignoring the zero values in data and
target.
Parameters
----------
data : numpy array
Data to be scaled to the range of target.
target : numpy array
Target data range.
Returns
-------
scaled_data : numpy array
Scaled data to range of target. Zero values in data are set to zero
in scaled_data and zero values in target are not used to calculate
range.
"""
# make sure inputs are numpy arrays
if not isinstance(data,np.ndarray):
data=np.array(data)
if not isinstance(target,np.ndarray):
target=np.array(target)
if min(data) == 0:
zero_indx_data = np.argwhere(data==0)
data_t = np.delete(data,zero_indx_data)
min_data = min(data_t)
else:
min_data = min(data)
zero_indx_data = []
if min(target) == 0:
zero_indx_target = np.argwhere(target==0)
target_t = np.delete(target,zero_indx_target)
min_target = min(target_t)
else:
min_target = min(target)
data = data - min_data
data = data/max(data)
rng = max(target) - min_target
scaled_data = data*rng + min_target
scaled_data[zero_indx_data] = 0
return scaled_data
def est_mic(self,gen,Kmic=None,growth_rate=None,pop=None):
"""
est_mic: estimates the mic based on a given Kmic (ratio of growth rate to
max growth rate at MIC) or based on a given growth rate.
Parameters
----------
pop : population class object
gen : int
Genotype under consideration.
Kmic : float, optional
Ratio of growth rate to max growth rate at MIC. The default is None.
growth_rate : float, optional
Growth rate at MIC. The default is None.
Raises
------
Exception
Function requires Kmic OR growth_rate to calculate MIC.
Returns
-------
mic : float
MIC at a given growth rate or Kmic.
"""
if pop is None:
pop = self
if Kmic is None:
if growth_rate is None:
raise Exception('Need a growth rate or Kmic threshold to estimate mic.')
else:
Kmic = growth_rate/pop.drugless_rates[gen]
c=-0.6824968
mic = 10**(pop.ic50[gen]+6 - c*np.log((1/Kmic)-1))
return mic
################################################################
# Code for estimating fitness seascapes from OD data
def get_background_keys(self,df):
"""Gets the dataframe keys for the background assuming a 1-well moat
Args:
df (pandas dataframe): dataframe containing raw OD data
Returns:
list: list of background keys
"""
# row A, row H, col 1, and col 12
k = df.keys()
k = k[2:]
bg_keys = [y for y in k if int(y[1:]) == 1] # col 1
bg_keys = bg_keys + [y for y in k if (int(y[1:]) == 12 and y not in bg_keys)]
bg_keys = bg_keys + [y for y in k if (y[0] == 'A' and y not in bg_keys)]
bg_keys = bg_keys + [y for y in k if (y[0] == 'H' and y not in bg_keys)]
return bg_keys
def get_data_keys(self,df):
"""Gets the dataframe keys for the data assuming a 1-well moat
Args:
df (pandas dataframe): datafram containing raw OD data
Returns:
list: list of keys
"""
bg_keys = self.get_background_keys(df)
data_keys = [k for k in df.keys() if k not in bg_keys]
data_keys = data_keys[2:]
return data_keys
def estimate_background(self,df):
"""Estimates the OD background assuming a 1-well moat
Args:
df (pandas dataframe): datafram containing raw OD data
Returns:
float: background OD
"""
bg_keys = self.get_background_keys(df)
s = 0
for key in bg_keys:
s += np.average(df[key])
bg = s/len(bg_keys)
return bg
def subtract_background(self,df):
"""Subtracts OD background from raw OD data
Args:
df (pandas dataframe): datafram containing raw OD data
Returns:
pandas dataframe: background-subtracted data
"""
bg = self.estimate_background(df)
datakeys = df.keys()[2:]
if (df.values < bg).any():
bg = np.min(df.values)
for key in datakeys:
df[key] = df[key] - bg
return df
def get_growth_rate_data(self,data_path):
"""Loads and background subtracts growth rate data
Args:
data_path (str): path to raw csv data
Returns:
pandas dataframe: background-subtracted raw data
"""
df = dir_manager.load_growth_rate_data(data_path)
df = self.subtract_background(df)
return df
def get_growth_rates_from_df(self,df,carrying_cap=None):
"""Estimates the growth rates from timeseries growth data in a dataframe
Arguments:
df: pandas DataFrame
data frame containing growth rate data
Returns:
growth_rates: dict
dictionary | |
oute.oute == 0:
depth1.append(moves)
if re.match(r'\+r|r', Wboard.w2a)and Wboard.w4a==''\
and board.s3a=='':
moves = '2a4a'
kaihimore(moves)
if oute.oute == 0:
depth1.append(moves)
if re.match(r'\+r|r', Wboard.w2a)and Wboard.w5a==''\
and board.s3a+board.s4a=='':
moves = '2a5a'
kaihimore(moves)
if oute.oute == 0:
depth1.append(moves)
if re.match(r'\+r|r', Wboard.w2a)and Wboard.w6a==''\
and board.s3a+board.s4a+board.s5a=='':
moves = '2a6a'
kaihimore(moves)
if oute.oute == 0:
depth1.append(moves)
if re.match(r'\+r|r', Wboard.w2a)and Wboard.w7a==''\
and board.s3a+board.s4a+board.s5a+board.s6a=='':
moves = '2a7a'
kaihimore(moves)
if oute.oute == 0:
depth1.append(moves)
if re.match(r'\+r|r', Wboard.w2a)and Wboard.w8a==''\
and board.s3a+board.s4a+board.s5a+board.s6a+board.s7a=='':
moves = '2a8a'
kaihimore(moves)
if oute.oute == 0:
depth1.append(moves)
if re.match(r'\+r|r', Wboard.w2a)and Wboard.w9a==''\
and board.s3a+board.s4a+board.s5a+board.s6a+board.s7a+board.s8a=='':
moves = '2a9a'
kaihimore(moves)
if oute.oute == 0:
depth1.append(moves)
if re.match('b',Wboard.w2a)and Wboard.w8g==''\
and board.s3b+board.s4c+board.s5d+board.s6e+board.s7f=='':
moves = '2a8g+'
kaihimore(moves)
if oute.oute == 0:
depth1.append(moves)
if re.match('b',Wboard.w2a)and Wboard.w9h==''\
and board.s3b+board.s4c+board.s5d+board.s6e+board.s7f+board.s8g=='':
moves = '2a9h+'
kaihimore(moves)
if oute.oute == 0:
depth1.append(moves)
if re.match(r'\+b|b', Wboard.w2a)and Wboard.w4c==''\
and board.s3b=='':
moves = '2a4c'
kaihimore(moves)
if oute.oute == 0:
depth1.append(moves)
if re.match(r'\+b|b', Wboard.w2a)and Wboard.w5d==''\
and board.s3b+board.s4c=='':
moves = '2a5d'
kaihimore(moves)
if oute.oute == 0:
depth1.append(moves)
if re.match(r'\+b|b', Wboard.w2a)and Wboard.w6e==''\
and board.s3b+board.s4c+board.s5d=='':
moves = '2a6e'
kaihimore(moves)
if oute.oute == 0:
depth1.append(moves)
if re.match(r'\+b|b', Wboard.w2a)and Wboard.w7f==''\
and board.s3b+board.s4c+board.s5d+board.s6e=='':
moves = '2a7f'
kaihimore(moves)
if oute.oute == 0:
depth1.append(moves)
if re.match('\+b', Wboard.w2a)and Wboard.w8g==''\
and board.s3b+board.s4c+board.s5d+board.s6e+board.s7f=='':
moves = '2a8g'
kaihimore(moves)
if oute.oute == 0:
depth1.append(moves)
if re.match('\+b', Wboard.w2a)and Wboard.w9h==''\
and board.s3b+board.s4c+board.s5d+board.s6e+board.s7f+board.s8g=='':
moves = '2a9h'
kaihimore(moves)
if oute.oute == 0:
depth1.append(moves)
if Wboard.w3a !='':
if re.match(r'[plsgrk+]', Wboard.w3a)and Wboard.w3b=='':
moves = '3a3b'
kaihimore(moves)
if oute.oute == 0:
depth1.append(moves)
if re.match(r'[sgbk+]', Wboard.w3a)and Wboard.w2b=='':
moves = '3a2b'
kaihimore(moves)
if oute.oute == 0:
depth1.append(moves)
if re.match(r'[sgbk+]', Wboard.w3a)and Wboard.w4b=='':
moves = '3a4b'
kaihimore(moves)
if oute.oute == 0:
depth1.append(moves)
if re.match(r'[grk+]', Wboard.w3a)and Wboard.w2a=='':
moves = '3a2a'
kaihimore(moves)
if oute.oute == 0:
depth1.append(moves)
if re.match(r'[grk+]', Wboard.w3a)and Wboard.w4a=='':
moves = '3a4a'
kaihimore(moves)
if oute.oute == 0:
depth1.append(moves)
if re.match('n', Wboard.w3a)and Wboard.w2c=='':
moves = '3a2c'
kaihimore(moves)
if oute.oute == 0:
depth1.append(moves)
if re.match('n', Wboard.w3a)and Wboard.w4c=='':
moves = '3a4c'
kaihimore(moves)
if oute.oute == 0:
depth1.append(moves)
if re.match('\+r', Wboard.w3a)and Wboard.w3i==''\
and board.s3h+board.s3g+board.s3f+board.s3e+board.s3d+board.s3c+board.s3b=='':
moves = '3a3i'
kaihimore(moves)
if oute.oute == 0:
depth1.append(moves)
if re.match(r'r|l', Wboard.w3a)and Wboard.w3i==''\
and board.s3h+board.s3g+board.s3f+board.s3e+board.s3d+board.s3c+board.s3b=='':
moves = '3a3i+'
kaihimore(moves)
if oute.oute == 0:
depth1.append(moves)
if re.match('\+r', Wboard.w3a)and Wboard.w3h==''\
and board.s3g+board.s3f+board.s3e+board.s3d+board.s3c+board.s3b=='':
moves = '3a3h'
kaihimore(moves)
if oute.oute == 0:
depth1.append(moves)
if re.match(r'r|l', Wboard.w3a)and Wboard.w3h==''\
and board.s3g+board.s3f+board.s3e+board.s3d+board.s3c+board.s3b=='':
moves = '3a3h+'
kaihimore(moves)
if oute.oute == 0:
depth1.append(moves)
if re.match(r'\+r|l', Wboard.w3a)and Wboard.w3g==''\
and board.s3f+board.s3e+board.s3d+board.s3c+board.s3b=='':
moves = '3a3g'
kaihimore(moves)
if oute.oute == 0:
depth1.append(moves)
if re.match(r'r|l', Wboard.w3a)and Wboard.w3g==''\
and board.s3f+board.s3e+board.s3d+board.s3c+board.s3b=='':
moves = '3a3g+'
kaihimore(moves)
if oute.oute == 0:
depth1.append(moves)
if re.match(r'\+r|r|l', Wboard.w3a)and Wboard.w3f==''\
and board.s3e+board.s3d+board.s3c+board.s3b=='':
moves = '3a3f'
kaihimore(moves)
if oute.oute == 0:
depth1.append(moves)
if re.match(r'\+r|r|l', Wboard.w3a)and Wboard.w3e==''\
and board.s3d+board.s3c+board.s3b=='':
moves = '3a3e'
kaihimore(moves)
if oute.oute == 0:
depth1.append(moves)
if re.match(r'\+r|r|l', Wboard.w3a)and Wboard.w3d==''\
and board.s3c+board.s3b=='':
moves = '3a3d'
kaihimore(moves)
if oute.oute == 0:
depth1.append(moves)
if re.match(r'\+r|r|l', Wboard.w3a)and Wboard.w3c==''\
and board.s3b=='':
moves = '3a3c'
kaihimore(moves)
if oute.oute == 0:
depth1.append(moves)
if re.match(r'\+r|r', Wboard.w3a)and Wboard.w1a==''\
and board.s2a=='':
moves = '3a1a'
kaihimore(moves)
if oute.oute == 0:
depth1.append(moves)
if re.match(r'\+r|r', Wboard.w3a)and Wboard.w5a==''\
and board.s4a=='':
moves = '3a5a'
kaihimore(moves)
if oute.oute == 0:
depth1.append(moves)
if re.match(r'\+r|r', Wboard.w3a)and Wboard.w6a==''\
and board.s4a+board.s5a=='':
moves = '3a6a'
kaihimore(moves)
if oute.oute == 0:
depth1.append(moves)
if re.match(r'\+r|r', Wboard.w3a)and Wboard.w7a==''\
and board.s4a+board.s5a+board.s6a=='':
moves = '3a7a'
kaihimore(moves)
if oute.oute == 0:
depth1.append(moves)
if re.match(r'\+r|r', Wboard.w3a)and Wboard.w8a==''\
and board.s4a+board.s5a+board.s6a+board.s7a=='':
moves = '3a8a'
kaihimore(moves)
if oute.oute == 0:
depth1.append(moves)
if re.match(r'\+r|r', Wboard.w3a)and Wboard.w9a==''\
and board.s4a+board.s5a+board.s6a+board.s7a+board.s8a=='':
moves = '3a9a'
kaihimore(moves)
if oute.oute == 0:
depth1.append(moves)
if re.match('b',Wboard.w3a)and Wboard.w9g==''\
and board.s4b+board.s5c+board.s6d+board.s7e+board.s8f=='':
moves = '3a9g+'
kaihimore(moves)
if oute.oute == 0:
depth1.append(moves)
if re.match(r'\+b|b', Wboard.w3a)and Wboard.w1c==''\
and board.s2b=='':
moves = '3a1c'
kaihimore(moves)
if oute.oute == 0:
depth1.append(moves)
if re.match(r'\+b|b', Wboard.w3a)and Wboard.w5c==''\
and board.s4b=='':
moves = '3a5c'
kaihimore(moves)
if oute.oute == 0:
depth1.append(moves)
if re.match(r'\+b|b', Wboard.w3a)and Wboard.w6d==''\
and board.s4b+board.s5c=='':
moves = '3a6d'
kaihimore(moves)
if oute.oute == 0:
depth1.append(moves)
if re.match(r'\+b|b', Wboard.w3a)and Wboard.w6e==''\
and board.s4b+board.s5c+board.s6d=='':
moves = '3a7e'
kaihimore(moves)
if oute.oute == 0:
depth1.append(moves)
if re.match(r'\+b|b', Wboard.w3a)and Wboard.w7f==''\
and board.s4b+board.s5c+board.s6d+board.s7e=='':
moves = '3a8f'
kaihimore(moves)
if oute.oute == 0:
depth1.append(moves)
if re.match('\+b', Wboard.w3a)and Wboard.w9g==''\
and board.s4b+board.s5c+board.s6d+board.s7e+board.s8f=='':
moves = '3a9g'
kaihimore(moves)
if oute.oute == 0:
depth1.append(moves)
if Wboard.w4a !='':
if re.match(r'[plsgrk+]', Wboard.w4a)and Wboard.w4b=='':
moves = '4a4b'
kaihimore(moves)
if oute.oute == 0:
depth1.append(moves)
if re.match(r'[sgbk+]', Wboard.w4a)and Wboard.w3b=='':
moves = '4a3b'
kaihimore(moves)
if oute.oute == 0:
depth1.append(moves)
if re.match(r'[sgbk+]', Wboard.w4a)and Wboard.w5b=='':
moves = '4a5b'
kaihimore(moves)
if oute.oute == 0:
depth1.append(moves)
if re.match(r'[grk+]', Wboard.w4a)and Wboard.w3a=='':
moves = '4a3a'
kaihimore(moves)
if oute.oute == 0:
depth1.append(moves)
if re.match(r'[grk+]', Wboard.w4a)and Wboard.w5a=='':
moves = '4a5a'
kaihimore(moves)
if oute.oute == 0:
depth1.append(moves)
if re.match('n', Wboard.w4a)and Wboard.w3c=='':
moves = '4a3c'
kaihimore(moves)
if oute.oute == 0:
depth1.append(moves)
if re.match('n', Wboard.w4a)and Wboard.w5c=='':
moves = '4a5c'
kaihimore(moves)
if oute.oute == 0:
depth1.append(moves)
if re.match('\+r', Wboard.w4a)and Wboard.w4i==''\
and board.s4h+board.s4g+board.s4f+board.s4e+board.s4d+board.s4c+board.s4b=='':
moves = '4a4i'
kaihimore(moves)
if oute.oute == 0:
depth1.append(moves)
if re.match(r'r|l', Wboard.w4a)and Wboard.w4i==''\
and board.s4h+board.s4g+board.s4f+board.s4e+board.s4d+board.s4c+board.s4b=='':
moves = '4a4i+'
kaihimore(moves)
if oute.oute == 0:
depth1.append(moves)
if re.match('\+r', Wboard.w4a)and Wboard.w4h==''\
and board.s4g+board.s4f+board.s4e+board.s4d+board.s4c+board.s4b=='':
moves = '4a4h'
kaihimore(moves)
if oute.oute == 0:
depth1.append(moves)
if re.match(r'r|l', Wboard.w4a)and Wboard.w4h==''\
and board.s4g+board.s4f+board.s4e+board.s4d+board.s4c+board.s4b=='':
moves = '4a4h+'
kaihimore(moves)
if oute.oute == 0:
depth1.append(moves)
if re.match(r'\+r|l', Wboard.w4a)and Wboard.w4g==''\
and board.s4f+board.s4e+board.s4d+board.s4c+board.s4b=='':
moves = '4a4g'
kaihimore(moves)
if oute.oute == 0:
depth1.append(moves)
if re.match(r'r|l', Wboard.w4a)and Wboard.w4g==''\
and board.s4f+board.s4e+board.s4d+board.s4c+board.s4b=='':
moves = '4a4g+'
kaihimore(moves)
if oute.oute == 0:
depth1.append(moves)
if re.match(r'\+r|r|l', Wboard.w4a)and Wboard.w4f==''\
and board.s4e+board.s4d+board.s4c+board.s4b=='':
moves = '4a4f'
kaihimore(moves)
if oute.oute == 0:
depth1.append(moves)
if re.match(r'\+r|r|l', Wboard.w4a)and Wboard.w4e==''\
and board.s4d+board.s4c+board.s4b=='':
moves = '4a4e'
kaihimore(moves)
if oute.oute == 0:
depth1.append(moves)
if re.match(r'\+r|r|l', Wboard.w4a)and Wboard.w4d==''\
and board.s4c+board.s4b=='':
moves = '4a4d'
kaihimore(moves)
if oute.oute == 0:
depth1.append(moves)
if re.match(r'\+r|r|l', Wboard.w4a)and Wboard.w4c==''\
and board.s4b=='':
moves = '4a4c'
kaihimore(moves)
if oute.oute == 0:
depth1.append(moves)
if re.match(r'\+r|r', Wboard.w4a)and Wboard.w1a==''\
and board.s2a+board.s3a=='':
moves = '4a1a'
kaihimore(moves)
if oute.oute == 0:
depth1.append(moves)
if re.match(r'\+r|r', Wboard.w4a)and Wboard.w2a==''\
and board.s3a=='':
moves = '4a2a'
kaihimore(moves)
if oute.oute == 0:
depth1.append(moves)
if re.match(r'\+r|r', Wboard.w4a)and Wboard.w6a==''\
and board.s5a=='':
moves = '4a6a'
kaihimore(moves)
if oute.oute == 0:
depth1.append(moves)
if re.match(r'\+r|r', Wboard.w4a)and Wboard.w7a==''\
and board.s5a+board.s6a=='':
moves = '4a7a'
kaihimore(moves)
if oute.oute == 0:
depth1.append(moves)
if re.match(r'\+r|r', Wboard.w4a)and Wboard.w8a==''\
and board.s5a+board.s6a+board.s7a=='':
moves = '4a8a'
kaihimore(moves)
if oute.oute == 0:
depth1.append(moves)
if re.match(r'\+r|r', Wboard.w4a)and Wboard.w9a==''\
and board.s5a+board.s6a+board.s7a+board.s8a=='':
moves = '4a9a'
kaihimore(moves)
if oute.oute == 0:
depth1.append(moves)
if re.match(r'\+b|b', Wboard.w4a)and Wboard.w6c==''\
and board.s5b=='':
moves = '4a6c'
kaihimore(moves)
if oute.oute == 0:
depth1.append(moves)
if re.match(r'\+b|b', Wboard.w4a)and Wboard.w7d==''\
and board.s5b+board.s6c=='':
moves = '4a7d'
kaihimore(moves)
if oute.oute == 0:
depth1.append(moves)
if re.match(r'\+b|b', Wboard.w4a)and Wboard.w8e==''\
and board.s5b+board.s6c+board.s7d=='':
moves = '4a8e'
kaihimore(moves)
if oute.oute == 0:
depth1.append(moves)
if re.match(r'\+b|b', Wboard.w4a)and Wboard.w9f==''\
and board.s5b+board.s6c+board.s7d+board.s8e=='':
moves = '4a9f'
kaihimore(moves)
if oute.oute == 0:
depth1.append(moves)
if re.match(r'\+b|b', Wboard.w4a)and Wboard.w1d==''\
and board.s2c+board.s3b=='':
moves = '4a1d'
kaihimore(moves)
if oute.oute == 0:
depth1.append(moves)
if re.match(r'\+b|b', Wboard.w4a)and Wboard.w2c==''\
and board.s3b=='':
moves = '4a2c'
kaihimore(moves)
if oute.oute == 0:
depth1.append(moves)
if Wboard.w5a !='':
if re.match(r'[plsgrk+]', Wboard.w5a)and Wboard.w5b=='':
moves = '5a5b'
kaihimore(moves)
if oute.oute == 0:
depth1.append(moves)
if re.match(r'[sgbk+]', Wboard.w5a)and Wboard.w4b=='':
moves = '5a4b'
kaihimore(moves)
if oute.oute == 0:
depth1.append(moves)
if re.match(r'[sgbk+]', Wboard.w5a)and Wboard.w6b=='':
moves = '5a6b'
kaihimore(moves)
if oute.oute == 0:
depth1.append(moves)
if re.match(r'[grk+]', Wboard.w5a)and Wboard.w4a=='':
moves = '5a4a'
kaihimore(moves)
if oute.oute == 0:
depth1.append(moves)
if re.match(r'[grk+]', Wboard.w5a)and Wboard.w6a=='':
moves = '5a6a'
kaihimore(moves)
if oute.oute == 0:
depth1.append(moves)
if re.match('n', Wboard.w5a)and Wboard.w4c=='':
moves = '5a4c'
kaihimore(moves)
if oute.oute == 0:
depth1.append(moves)
if re.match('n', Wboard.w5a)and Wboard.w6c=='':
moves = '5a6c'
kaihimore(moves)
if oute.oute == 0:
depth1.append(moves)
if re.match('\+r', Wboard.w5a)and Wboard.w5i==''\
and board.s5h+board.s5g+board.s5f+board.s5e+board.s5d+board.s5c+board.s5b=='':
moves = '5a5i'
kaihimore(moves)
if oute.oute == 0:
depth1.append(moves)
if re.match(r'r|l', Wboard.w5a)and Wboard.w5i==''\
and board.s5h+board.s5g+board.s5f+board.s5e+board.s5d+board.s5c+board.s5b=='':
moves = '5a5i+'
kaihimore(moves)
if oute.oute == 0:
depth1.append(moves)
if re.match('\+r', Wboard.w5a)and Wboard.w5h==''\
and board.s5g+board.s5f+board.s5e+board.s5d+board.s5c+board.s5b=='':
moves = '5a5h'
kaihimore(moves)
if oute.oute == 0:
depth1.append(moves)
if re.match(r'r|l', Wboard.w5a)and Wboard.w5h==''\
and board.s5g+board.s5f+board.s5e+board.s5d+board.s5c+board.s5b=='':
moves = '5a5h+'
kaihimore(moves)
if oute.oute == 0:
depth1.append(moves)
if re.match(r'\+r|l', Wboard.w5a)and Wboard.w5g==''\
and board.s5f+board.s5e+board.s5d+board.s5c+board.s5b=='':
moves = '5a5g'
kaihimore(moves)
if oute.oute == 0:
depth1.append(moves)
if re.match(r'r|l', Wboard.w5a)and Wboard.w5g==''\
and board.s5f+board.s5e+board.s5d+board.s5c+board.s5b=='':
moves = '5a5g+'
kaihimore(moves)
if oute.oute == 0:
depth1.append(moves)
if re.match(r'\+r|r|l', Wboard.w5a)and Wboard.w5f==''\
and board.s5e+board.s5d+board.s5c+board.s5b=='':
moves = '5a5f'
kaihimore(moves)
if oute.oute == 0:
depth1.append(moves)
if re.match(r'\+r|r|l', Wboard.w5a)and Wboard.w5e==''\
and board.s5d+board.s5c+board.s5b=='':
moves | |
<gh_stars>10-100
#encoding : utf-8
"""This file contains the nelpy io functions.
This entire module will probably be deprecated soon, so don't rely
on any of this to keep working!
Example
=======
datadirs = ['/home/etienne/Dropbox/neoReader/Data',
'C:/etienne/Dropbox/neoReader/Data',
'C:/Users/etien/Dropbox/neoReader/Data',
'/Users/etienne/Dropbox/neoReader/Data',
'D:/Dropbox/neoReader/Data']
fileroot = next( (dir for dir in datadirs if os.path.isdir(dir)), None)
if fileroot is None:
raise FileNotFoundError('datadir not found')
exp_data = dict()
myhmm = dict()
data = dict()
sessiontime = dict()
sessions = ['session1', 'session2']
animal = 'gor01'; month,day = (6,7); sessiontime['session1'] = '11-26-53'; sessiontime['session2'] = '16-40-19' # 91 units, but session one has missing position data
# animal = 'gor01'; month,day = (6,12); sessiontime['session1'] = '15-55-31'; sessiontime['session2'] = '16-53-46' # 55 units
# animal = 'gor01'; month,day = (6,13); sessiontime['session1'] = '14-42-6'; sessiontime['session2'] = '15-22-3'
for session in sessions:
exp_data[session] = dict()
exp_kws = dict(fileroot = fileroot,
animal = animal,
session = sessiontime[session],
month = month,
day = day,
includeUnsortedSpikes=False, # should be True for MUA analysis!
verbose = False)
exp_data[session]['spikes'] = nel.load_hc3_data(datatype='spikes', fs=32552, **exp_kws)
# exp_data[session]['eeg'] = load_data(datatype='eeg', channels=[0,1,2], fs=1252, starttime=0, **exp_kws)
# exp_data[session]['posdf'] = load_data(datatype='pos',**exp_kws)
# exp_data[session]['speed'] = klab.get_smooth_speed(exp_data[session]['posdf'],fs=60,th=8,cutoff=0.5,showfig=False,verbose=False)
# make st1 and st2 explicitly available:
st1 = exp_data['session1']['spikes']
st2 = exp_data['session2']['spikes']
"""
__all__ = ['load_hc3_data']
import os.path
import copy
import pandas as pd
import numpy as np
import re
from ..core import *
# from mymap import Map
def get_num_electrodes(sessiondir, verbose=False):
numelec = 0
files = [f for f in os.listdir(sessiondir) if (os.path.isfile(os.path.join(sessiondir, f)))]
for ff in files:
try:
found = re.search('\.clu\.[0-9]+$', ff).group(0)
if verbose:
print(found)
numelec+=1
except:
found=''
if numelec > 0:
return numelec
else:
raise ValueError('number of electrodes (shanks) could not be established...')
#datatype = ['spikes', 'eeg', 'pos', '?']
def load_hc3_data(fileroot, animal='gor01', year=2006, month=6, day=7, sessiontime='11-26-53', track=None, datatype='spikes', channels='all', fs=None,starttime=0, ctx=None, verbose=False, includeUnsortedSpikes=False, includeWaveforms=False):
fileroot = os.path.normpath(fileroot)
if track is None:
anim_prefix = "{}-{}-{}".format(animal,month,day)
session_prefix = "{}-{}-{}_{}".format(year,month,day,sessiontime)
sessiondir = "{}/{}/{}".format(fileroot, anim_prefix, session_prefix)
else:
anim_prefix = "{}".format(animal)
session_prefix = "{}-{}-{}_{}".format(year,month,str(day).zfill(2),sessiontime)
sessiondir = "{}/{}/{}/{}".format(fileroot, anim_prefix, track, session_prefix) # track can be 'one', 'two', or 'sleep'
if (datatype=='spikes'):
if fs is None:
fs = 32552
# NOTE: st_array[0] always corresponds to unsortable spikes (not mechanical noise). However, when includeUnsortedSpikes==True, then it gets populated
# with spike times; else, it just remains an empty list []
#filename = "{}/{}/{}/{}.clu.1".format(fileroot, anim_prefix, session_prefix, session_prefix)
filename = "{}/{}".format(sessiondir, session_prefix)
#print(filename)
if verbose:
print("Loading data for session in directory '{}'...".format(sessiondir))
num_elec = get_num_electrodes(sessiondir, verbose=verbose)
if verbose:
print('Number of electrode (.clu) files found:', num_elec)
if includeUnsortedSpikes:
st_array = [[]]
wf_array = [[]]
else:
st_array = []
wf_array = []
wfdt = np.dtype('<h', (54,8)) # waveform datatype (.spk files)
# note: using pandas.read_table is orders of magnitude faster here than using numpy.loadtxt
for ele in np.arange(num_elec):
#%time dt1a = np.loadtxt( base_filename1 + '.clu.' + str(ele + 1), skiprows=1,dtype=int)
eudf = pd.read_table( filename + '.clu.' + str(ele + 1), header=None, names='u' ) # read unit numbers within electrode
tsdf = pd.read_table( filename + '.res.' + str(ele + 1), header=None, names='t' ) # read sample numbers for spikes
if includeWaveforms:
waveforms = np.fromfile(filename + '.spk.' + str(ele + 1), dtype=wfdt)
waveforms = np.reshape(waveforms, (int(len(waveforms)/(54*8)), 54, 8))
waveforms = waveforms[:,26,:]
max_units = eudf.u.values[0]
eu = eudf.u.values[1:]
ts = tsdf.t.values
if includeWaveforms:
noise_idx = np.argwhere(eu==0).squeeze()
hash_idx = np.argwhere(eu==1).squeeze()
all_idx = set(np.arange(len(eu)))
discard_idx = set(noise_idx)
# discard units labeled as '0' or '1', as these correspond to mechanical noise and unsortable units
ts = ts[eu!=0] # always discard mechanical noise
eu = eu[eu!=0] # always discard mechanical noise
if not includeUnsortedSpikes:
ts = ts[eu!=1] # potentially discard unsortable spikes
eu = eu[eu!=1] # potentially discard unsortable spikes
if includeWaveforms:
discard_idx = discard_idx.union(set(hash_idx))
if includeWaveforms:
keep_idx = all_idx - discard_idx
waveforms = waveforms[sorted(list(keep_idx))]
for uu in np.arange(max_units-2):
st_array.append(ts[eu==uu+2])
if includeWaveforms:
wf_array.append(waveforms[eu==uu+2])
if includeUnsortedSpikes:
st_array[0] = np.append(st_array[0], ts[eu==1]) # unit 0 now corresponds to unsortable spikes
if includeWaveforms:
if len(wf_array[0]) > 0:
wf_array[0] = np.vstack((wf_array[0],waveforms[eu==1]))
else:
wf_array[0] = waveforms[eu==1]
if verbose:
print('Spike times (in sample numbers) for a total of {} units were read successfully...'.format(len(st_array)))
if includeUnsortedSpikes:
unit_ids = np.arange(len(st_array))
else:
unit_ids = np.arange(1, len(st_array)+1)
# make sure that spike times are sorted! (this is not true for unit 0 of the hc-3 dataset, for example):
for unit, spikes in enumerate(st_array):
order = np.argsort(spikes)
st_array[unit] = spikes[order]/fs
if includeWaveforms:
wf_array[unit] = wf_array[unit][order]
if includeWaveforms:
# spikes = MarkedSpikeTrainArray(st_array, marks=wf_array, label=session_prefix, fs=fs, unit_ids=unit_ids)
spikes = SpikeTrainArray(st_array, label=session_prefix, fs=fs, unit_ids=unit_ids)
spikes._marks = wf_array
else:
spikes = SpikeTrainArray(st_array, label=session_prefix, fs=fs, unit_ids=unit_ids)
# spikes = Map()
# spikes['data'] = st_array
# spikes['num_electrodes'] = num_elec
# spikes['num_units'] = len(st_array)
# spikes['samprate'] = fs
# spikes['session'] = session_prefix
return spikes
## continue from here... we want to keep cells that are inactive in some, but not all environments...
# hence when extracting info, we must take all sessions in a recording day into account, and not just a specific recording session
if (datatype=='clusterless'):
if fs is None:
fs = 32552
filename = "{}/{}".format(sessiondir, session_prefix)
if verbose:
print("Loading data for session in directory '{}'...".format(sessiondir))
num_elec = get_num_electrodes(sessiondir, verbose=verbose)
if verbose:
print('Number of electrode (.clu) files found:', num_elec)
st_array = []
mark_array = []
wfdt = np.dtype('<h', (54,8)) # waveform datatype (.spk files)
# note: using pandas.read_table is orders of magnitude faster here than using numpy.loadtxt
for ele in np.arange(num_elec):
#%time dt1a = np.loadtxt( base_filename1 + '.clu.' + str(ele + 1), skiprows=1,dtype=int)
eudf = pd.read_table( filename + '.clu.' + str(ele + 1), header=None, names='u' ) # read unit numbers within electrode
tsdf = pd.read_table( filename + '.res.' + str(ele + 1), header=None, names='t' ) # read sample numbers for spikes
if verbose:
print(len(tsdf), 'spikes detected')
try:
waveforms = np.fromfile(filename + '.spk.' + str(ele + 1), dtype=wfdt)
except FileNotFoundError:
print('could not find {}, skipping clusterless for this session'.format(filename + '.spk.' + str(ele + 1)))
return None, None
if verbose:
print(len(waveforms)/(54*8), 'waveforms detected')
if len(tsdf) - len(waveforms)/(54*8) != 0:
print('could not find a one-to-one match between spike times and waveforms... skipping clusterless data for {}'.format(filename))
return None, None
waveforms = np.reshape(waveforms, (int(len(waveforms)/(54*8)), 54, 8))
marks = waveforms[:,26,:] # this assumed that spikes have been aligned to have peak at index 26
max_units = eudf.u.values[0]
eu = eudf.u.values[1:]
ts = tsdf.t.values
noise_idx = np.argwhere(eu==0).squeeze()
hash_idx = np.argwhere(eu==1).squeeze()
all_idx = set(np.arange(len(eu)))
discard_idx = set(noise_idx)
# discard units labeled as '0' or '1', as these correspond to mechanical noise and unsortable units
ts = ts[eu!=0] # always discard mechanical noise
eu = eu[eu!=0] # always discard mechanical noise
if not includeUnsortedSpikes:
ts = ts[eu!=1] # potentially discard unsortable spikes
eu = eu[eu!=1] # potentially discard unsortable spikes
discard_idx = discard_idx.union(set(hash_idx))
keep_idx = all_idx - discard_idx
marks = marks[sorted(list(keep_idx))]
st_array.append(ts)
mark_array.append(marks)
if verbose:
print('Spike times and marks for a total of {} electrodes were read successfully...'.format(num_elec))
# make sure that spike times are sorted! (this is not true for unit 0 of the hc-3 dataset, for example):
for ele, spikes in enumerate(st_array):
order = np.argsort(spikes)
st_array[ele] = spikes[order]/fs
mark_array[ele] = mark_array[ele][order]
return np.array(st_array), np.array(mark_array)
elif (datatype=='eeg'):
if fs is None:
fs = 1252
filename = "{}/{}.eeg".format(sessiondir, session_prefix)
if verbose:
print("Loading EEG data from file '{}'".format(filename))
num_elec = get_num_electrodes(sessiondir)
num_channels = num_elec*8
if channels=='all':
channels = list(range(0,num_channels))
if verbose:
print('Number of electrode (.clu) files found: {}, with a total of {} channels'.format(num_elec, num_channels))
dtype = np.dtype([(('ch' + str(ii)), 'i2') for ii in range(num_channels) ])
# read eeg data:
try:
eegdata = np.fromfile(filename, dtype=dtype, count=-1)
except:
print( "Unexpected error:", sys.exc_info()[0] )
raise
num_records = len(eegdata)
if verbose:
print("Successfully read {} samples for each of the {} channel(s).".format(num_records, len(channels)))
data_arr = eegdata.astype(dtype).view('i2')
data_arr = data_arr.reshape(num_records,num_channels)
eeg = AnalogSignalArray(np.transpose(data_arr[:,channels]), fs=fs)
eeg._metahc3channels = channels
eeg._metahc3session = session_prefix
# eeg['data'] = data_arr[:,channels]
# eeg['channels'] = channels
# eeg['samprate'] = fs
# eeg['starttime'] = starttime
# eeg['session'] = session_prefix
return eeg
elif (datatype=='pos'):
if fs is None:
fs = 60
filename = "{}/{}.whl".format(sessiondir, session_prefix)
if verbose:
print("reading {} Hz position data from '{}'".format(fs, filename))
dfwhl = pd.read_table(filename,sep='\t', skiprows=0, names=['x1', 'y1', 'x2', 'y2'] )
dfwhl['x'] = (dfwhl['x1'] + dfwhl['x2']) / 2
dfwhl['y'] = (dfwhl['y1'] + dfwhl['y2']) / 2
dfwhl['fps'] = fs
| |
# encoding: utf-8
# CachedFeed, WillNotGenerateExpensiveFeed
import datetime
import logging
from collections import namedtuple
from sqlalchemy import Column, DateTime, ForeignKey, Index, Integer, Unicode
from sqlalchemy.sql.expression import and_
from ..util.datetime_helpers import utc_now
from ..util.flask_util import OPDSFeedResponse
from . import Base, flush, get_one, get_one_or_create
class CachedFeed(Base):
__tablename__ = "cachedfeeds"
id = Column(Integer, primary_key=True)
# Every feed is associated with a lane. If null, this is a feed
# for a WorkList. If work_id is also null, it's a feed for the
# top-level.
lane_id = Column(Integer, ForeignKey("lanes.id"), nullable=True, index=True)
# Every feed has a timestamp reflecting when it was created.
timestamp = Column(DateTime(timezone=True), nullable=True, index=True)
# A feed is of a certain type--such as 'page' or 'groups'.
type = Column(Unicode, nullable=False)
# A feed associated with a WorkList can have a unique key.
# This should be null if the feed is associated with a Lane.
unique_key = Column(Unicode, nullable=True)
# A 'page' feed is associated with a set of values for the facet
# groups.
facets = Column(Unicode, nullable=True)
# A 'page' feed is associated with a set of values for pagination.
pagination = Column(Unicode, nullable=False)
# The content of the feed.
content = Column(Unicode, nullable=True)
# Every feed is associated with a Library.
library_id = Column(Integer, ForeignKey("libraries.id"), index=True)
# A feed may be associated with a Work.
work_id = Column(Integer, ForeignKey("works.id"), nullable=True, index=True)
# Distinct types of feeds that might be cached.
GROUPS_TYPE = "groups"
PAGE_TYPE = "page"
NAVIGATION_TYPE = "navigation"
CRAWLABLE_TYPE = "crawlable"
RELATED_TYPE = "related"
RECOMMENDATIONS_TYPE = "recommendations"
SERIES_TYPE = "series"
CONTRIBUTOR_TYPE = "contributor"
# Special constants for cache durations.
CACHE_FOREVER = object()
IGNORE_CACHE = object()
log = logging.getLogger("CachedFeed")
@classmethod
def fetch(
cls,
_db,
worklist,
facets,
pagination,
refresher_method,
max_age=None,
raw=False,
**response_kwargs
):
"""Retrieve a cached feed from the database if possible.
Generate it from scratch and store it in the database if
necessary.
Return it in the most useful form to the caller.
:param _db: A database connection.
:param worklist: The WorkList associated with this feed.
:param facets: A Facets object that distinguishes this feed from
others (for instance, by its sort order).
:param pagination: A Pagination object that explains which
page of a larger feed is being cached.
:param refresher_method: A function to call if it turns out
the contents of the feed need to be regenerated. This
function must take no arguments and return an object that
implements __unicode__. (A Unicode string or an OPDSFeed is fine.)
:param max_age: If a cached feed is older than this, it will
be considered stale and regenerated. This may be either a
number of seconds or a timedelta. If no value is
specified, a default value will be calculated based on
WorkList and Facets configuration. Setting this value to
zero will force a refresh.
:param raw: If this is False (the default), a Response ready to be
converted into a Flask Response object will be returned. If this
is True, the CachedFeed object itself will be returned. In most
non-test situations the default is better.
:return: A Response or CachedFeed containing up-to-date content.
"""
# Gather the information necessary to uniquely identify this
# page of this feed.
keys = cls._prepare_keys(_db, worklist, facets, pagination)
# Calculate the maximum cache age, converting from timedelta
# to seconds if necessary.
max_age = cls.max_cache_age(worklist, keys.feed_type, facets, max_age)
# These arguments will probably be passed into get_one, and
# will be passed into get_one_or_create in the event of a cache
# miss.
# TODO: this constraint_clause might not be necessary anymore.
# ISTR it was an attempt to avoid race conditions, and we do a
# better job of that now.
constraint_clause = and_(cls.content != None, cls.timestamp != None)
kwargs = dict(
on_multiple="interchangeable",
constraint=constraint_clause,
type=keys.feed_type,
library=keys.library,
work=keys.work,
lane_id=keys.lane_id,
unique_key=keys.unique_key,
facets=keys.facets_key,
pagination=keys.pagination_key,
)
feed_data = None
if max_age is cls.IGNORE_CACHE or isinstance(max_age, int) and max_age <= 0:
# Don't even bother checking for a CachedFeed: we're
# just going to replace it.
feed_obj = None
else:
feed_obj = get_one(_db, cls, **kwargs)
should_refresh = cls._should_refresh(feed_obj, max_age)
if should_refresh:
# This is a cache miss. Either feed_obj is None or
# it's no good. We need to generate a new feed.
feed_data = str(refresher_method())
generation_time = utc_now()
if max_age is not cls.IGNORE_CACHE:
# Having gone through all the trouble of generating
# the feed, we want to cache it in the database.
# Since it can take a while to generate a feed, and we know
# that the feed in the database is stale, it's possible that
# another thread _also_ noticed that feed was stale, and
# generated a similar feed while we were working.
#
# To avoid a database error, fetch the feed _again_ from the
# database rather than assuming we have the up-to-date
# object.
feed_obj, is_new = get_one_or_create(_db, cls, **kwargs)
if feed_obj.timestamp is None or feed_obj.timestamp < generation_time:
# Either there was no contention for this object, or there
# was contention but our feed is more up-to-date than
# the other thread(s). Our feed takes priority.
feed_obj.content = feed_data
feed_obj.timestamp = generation_time
elif feed_obj:
feed_data = feed_obj.content
if raw and feed_obj:
return feed_obj
# We have the information necessary to create a useful
# response-type object.
#
# Set some defaults in case the caller didn't pass them in.
if isinstance(max_age, int):
response_kwargs.setdefault("max_age", max_age)
if max_age == cls.IGNORE_CACHE:
# If we were asked to ignore our internal cache, we should
# also tell the client not to store this document in _its_
# internal cache.
response_kwargs["max_age"] = 0
if keys.library and keys.library.has_root_lanes:
# If this feed is associated with a Library that guides
# patrons to different lanes based on their patron type,
# all CachedFeeds need to be treated as private (but
# cacheable) on the client side. Otherwise, a change of
# client credentials might cause a cached representation
# to be reused when it should have been discarded.
#
# TODO: it might be possible to make this decision in a
# more fine-grained way, which would allow intermediaries
# to cache these feeds.
response_kwargs["private"] = True
return OPDSFeedResponse(response=feed_data, **response_kwargs)
@classmethod
def feed_type(cls, worklist, facets):
"""Determine the 'type' of the feed.
This may be defined either by `worklist` or by `facets`, with
`facets` taking priority.
:return: A string that can go into cachedfeeds.type.
"""
type = CachedFeed.PAGE_TYPE
if worklist:
type = worklist.CACHED_FEED_TYPE or type
if facets:
type = facets.CACHED_FEED_TYPE or type
return type
@classmethod
def max_cache_age(cls, worklist, type, facets, override=None):
"""Determine the number of seconds that a cached feed
of a given type can remain fresh.
Order of precedence: `override`, `facets`, `worklist`.
:param worklist: A WorkList which may have an opinion on this
topic.
:param type: The type of feed being generated.
:param facets: A faceting object that may have an opinion on this
topic.
:param override: A specific value passed in by the caller. This
may either be a number of seconds or a timedelta.
:return: A number of seconds, or CACHE_FOREVER or IGNORE_CACHE
"""
value = override
if value is None and facets is not None:
value = facets.max_cache_age
if value is None and worklist is not None:
value = worklist.max_cache_age(type)
if value in (cls.CACHE_FOREVER, cls.IGNORE_CACHE):
# Special caching rules apply.
return value
if value is None:
# Assume the feed should not be cached at all.
value = 0
if isinstance(value, datetime.timedelta):
value = value.total_seconds()
return value
@classmethod
def _should_refresh(cls, feed_obj, max_age):
"""Should we try to get a new representation of this CachedFeed?
:param feed_obj: A CachedFeed. This may be None, which is why
this is a class method.
:param max_age: Either a number of seconds, or one of the constants
CACHE_FOREVER or IGNORE_CACHE.
"""
should_refresh = False
if feed_obj is None:
# If we didn't find a CachedFeed (maybe because we didn't
# bother looking), we must always refresh.
should_refresh = | |
from itertools import product
from pathlib import Path
from warnings import warn
import numpy as np
import pandas as pd
import sep
from astropy.io import fits
from astropy.modeling.functional_models import Gaussian2D
from astropy.nddata import CCDData, Cutout2D, VarianceUncertainty
from astropy.stats import sigma_clipped_stats
from astropy.time import Time
from astropy.visualization import ImageNormalize, SqrtStretch, ZScaleInterval
from photutils.aperture import CircularAnnulus, CircularAperture
from ysfitsutilpy import (CCDData_astype, add_to_header, bdf_process,
bezel_ccd, errormap, fitsxy2py, imcombine, load_ccd,
medfilt_bpm, propagate_ccdmask, select_fits,
set_ccd_gain_rdnoise, trim_ccd)
from ysphotutilpy import (LinPolOE4, apphot_annulus, ellip_ap_an,
fit_Gaussian2D, sep_back, sep_extract, sky_fit)
from .preproc import (cr_reject_nic, find_fourier_peaks, fit_fourier,
vertical_correct)
from .util import (DARK_PATHS, FLAT_PATHS, FOURIERSECTS, GAIN, MASK_PATHS,
OBJSECTS, OBJSLICES, RDNOISE, USEFUL_KEYS, VERTICALSECTS,
infer_filter, parse_fpath, split_oe, summary_nic)
try:
import fitsio
HAS_FITSIO = True
except ImportError:
warn("python version of fitsio is strongly recommended (https://github.com/esheldon/fitsio/tree/master/)")
HAS_FITSIO = False
__all__ = [
"NICPolDir", "NICPolPhot", "read_pols",
"NICPolImage"]
_PHOT_COLNAMES = ['id', 'xcenter', 'ycenter', 'aparea',
'aperture_sum', 'aperture_sum_err',
'msky', 'nrej', 'nsky', 'ssky',
'source_sum', 'source_sum_err',
'mag', 'merr', 'x_fwhm', 'y_fwhm', 'theta']
MEDCOMB_SC3_F4 = dict(combine='med', reject='sc', sigma=3, maxiters=50, use_cfitsio=True, dtype='float32')
def DONE2HDR(header, verbose):
add_to_header(header, 'h', verbose=verbose, fmt=None, s="{:-^72s}".format(' DONE'))
def FINDFITS(tab, filt, oe, exptime=None, objname=None, loadccd=False, verbose=False):
type_key = ["FILTER", "OERAY"]
type_val = [filt.upper(), oe.lower()]
for k, v in zip(["EXPTIME", "OBJECT"], [exptime, objname]):
if v is not None:
type_key.append(k)
type_val.append(v)
return select_fits(summary_table=tab, type_key=type_key, type_val=type_val,
loadccd=loadccd, verbose=verbose)
def SAVENIC(ccd, original_path, object_name, savedir, combined=False, verbose=False):
def _set_fname(original_path, object_name, combined=False):
''' If combined, COUNTER, POL-AGL1, INSROT are meaningless, so remove these.
'''
es = parse_fpath(original_path)
es['OBJECT'] = object_name
if combined:
fstem = '_'.join([es['filt'], es['yyyymmdd'], es['OBJECT'], es['EXPTIME'], es['oe']])
else:
fstem = '_'.join(es.values())
return fstem + '.fits'
ccd.header["OBJECT"] = object_name
newpath = savedir/_set_fname(original_path, object_name, combined=combined)
ccd.write(newpath, overwrite=True)
if verbose:
print(f"Writing FITS to {newpath}")
return ccd, newpath
class NICPolDirMixin:
@staticmethod
def mkpuredark(tab, caldir, filt, oe, exptime, dark_min=10., verbose_combine=False):
_t = Time.now()
dark_fpaths = FINDFITS(tab, filt, oe, exptime=exptime, verbose=False)
comb = imcombine(dark_fpaths, **MEDCOMB_SC3_F4, verbose=verbose_combine)
comb.data[comb.data < dark_min] = 0
add_to_header(comb.header,
'h',
f"Images combined and dark_min {dark_min} applied.",
t_ref=_t,
verbose=verbose_combine)
_, comb_dark_path = SAVENIC(comb, dark_fpaths[0], "DARK", caldir, combined=True)
return comb_dark_path
@staticmethod
def mkskydark_single(fpath, tmpcal, skyname, mflat, dark_min=10., skydark_medfilt_bpm_kw={},
verbose_bdf=False, verbose_combine=False):
_t = Time.now()
sky = load_ccd(fpath)
add_to_header(sky.header, 'h', verbose=verbose_bdf, fmt=None,
s="{:=^72s}".format(' Estimating DARK from this sky frame '))
# Sky / Flat
# flat division to prevent artificial CR rejection:
sky_f = sky.copy()
sky_f.data = sky.data/mflat
sky_f, _ = SAVENIC(sky_f, fpath, f"{skyname}_FLAT", tmpcal)
# (Sky/Flat)_cr
# sky_f_cr = cr_reject_nic(sky_f, crrej_kw=crrej_kw, verbose=verbose_crrej)
sky_f_cr = medfilt_bpm(sky_f, **skydark_medfilt_bpm_kw)
sky_f_cr, _ = SAVENIC(sky_f_cr, fpath, f"{skyname}_FLAT_CRREJ", tmpcal)
# (Sky/Flat)_cr * Flat
sky_cr = sky_f_cr.copy()
sky_cr.data *= mflat
sky_cr, _ = SAVENIC(sky_cr, fpath, f"{skyname}_FLAT_CRREJ_DEFLATTED", tmpcal)
# Dark ~ Sky - (Sky/Flat)_cr * Flat
sky_dark = sky_f_cr.copy() # retain CRREJ header info
sky_dark.data = sky.data - sky_f_cr.data*mflat
sky_dark.data[sky_dark.data < dark_min] = 0
add_to_header(
sky_dark.header, 'h', t_ref=_t, verbose=verbose_combine,
s=("Dark from this frame estimated by sky - (sky/flat)_cr*flat "
+ f"and replaced pixel value < {dark_min} = 0.")
)
add_to_header(
sky_dark.header, 'h', verbose=verbose_bdf, fmt=None,
s="{:=^72s}".format(' Similar SKYDARK frames will be combined ')
)
_, sky_dark_path = SAVENIC(sky_dark, fpath, f"{skyname}_SKYDARK", self.tmpcal)
return sky_dark_path
@staticmethod
def mkskydark_comb(fpaths, caldir, skyname, verbose_combine=False, verbose_bdf=False):
comb_sky_dark = imcombine(fpaths, **MEDCOMB_SC3_F4, verbose=verbose_combine)
DONE2HDR(comb_sky_dark.header, verbose_bdf)
_, comb_sky_dark_path = SAVENIC(comb_sky_dark, fpaths[0],
f"{skyname}_SKYDARK", caldir, combined=True)
return comb_sky_dark_path
@staticmethod
def mkfringe_single(fpath, tmpcal, skyname, mdark, mflat, mdarkpath, mflatpath,
verbose_bdf=False, verbose_crrej=False):
sky = load_ccd(fpath)
# give mdark/mflat so that the code does not read the FITS files repeatedly:
add_to_header(sky.header, 'h', verbose=verbose_bdf, fmt=None,
s="{:=^72s}".format(' Estimating FRINGE from this sky frame '))
sky_fringe = bdf_process(sky,
mdark=mdark,
mflat=CCDData(mflat, unit='adu'),
mdarkpath=mdarkpath,
mflatpath=mflatpath,
verbose_bdf=verbose_bdf,
verbose_crrej=verbose_crrej)
add_to_header(sky_fringe.header, 'h', verbose=verbose_bdf, fmt=None,
s="{:=^72s}".format(' Similar SKYFRINGE frames will be combined '))
_, sky_tocomb_path = SAVENIC(sky_fringe, fpath, f"{skyname}_FRINGE", tmpcal)
return sky_tocomb_path
@staticmethod
def mkfringe_comb(fpaths, logpath, skyname, caldir, scale_section, scale='avg',
scale_to_0th=False, fringe_min_value=0.0, verbose_combine=False, verbose_bdf=False):
# FRINGE must not be smoothed as remaining DARK signal may reside here.
comb_sky_fringe = imcombine(fpaths,
**MEDCOMB_SC3_F4,
scale=scale,
scale_to_0th=scale_to_0th,
scale_section=scale_section,
verbose=verbose_combine,
logfile=logpath)
# Normalize using the section
_t = Time.now()
norm_value = np.mean(comb_sky_fringe.data[fitsxy2py(scale_section)])
comb_sky_fringe.data /= norm_value
comb_sky_fringe.data[comb_sky_fringe.data < fringe_min_value] = 0
add_to_header(comb_sky_fringe.header, 'h', t_ref=_t, verbose=verbose_combine,
s="Normalized by mean of NORMSECT (NORMVALU), replaced value < FRINMINV to 0")
comb_sky_fringe.header["NORMSECT"] = scale_section
comb_sky_fringe.header["NORMVALU"] = norm_value
comb_sky_fringe.header["FRINMINV"] = fringe_min_value
DONE2HDR(comb_sky_fringe.header, verbose_bdf)
_, comb_sky_fringe_path = SAVENIC(comb_sky_fringe, fpaths[0],
f"{skyname}_SKYFRINGE", caldir, combined=True)
return comb_sky_fringe_path
@staticmethod
def _set_mflat(summary_flat, filt, oe, flatdir, flat_min_value=0.):
''' Note that it returns ndarray, not CCDData.
'''
if summary_flat is None:
return 1, None
if flatdir is not None:
mflatpath = FINDFITS(summary_flat, filt, oe, verbose=False)
if len(mflatpath) > 1:
raise ValueError(f"More than 1 flat for (FILTER, OERAY) = ({filt}, {oe}) found.")
elif len(mflatpath) == 0:
raise ValueError(f"No FITS file for (FILTER, OERAY) = ({filt}, {oe}) found.")
mflatpath = mflatpath[0]
mflat = load_ccd(mflatpath).data
mflat[mflat < flat_min_value] = 1.
else:
mflatpath = None
mflat = 1
return mflat, mflatpath
@staticmethod
def _set_dark(prefer_skydark, paths_skydark, paths_puredark, objname, filt, oe, exptime, verbose):
if prefer_skydark:
try:
mdarkpath = paths_skydark[(f"{objname}_sky", filt, oe, exptime)]
mdark = load_ccd(mdarkpath)
except (KeyError, IndexError, FileNotFoundError):
if verbose:
print(f"prefer_skydark but skydark for ({objname}_sky, {filt}, {oe}, "
+ f"{exptime}) not found. Trying to use pure dark.")
try:
mdarkpath = paths_puredark[(filt, oe, exptime)]
mdark = load_ccd(mdarkpath)
except (KeyError, IndexError, FileNotFoundError):
mdarkpath = None
mdark = None
if verbose:
print("\nNo dark file found. Turning off dark subtraction.")
else:
try:
mdarkpath = paths_puredark[(filt, oe, exptime)]
mdark = load_ccd(mdarkpath)
except (KeyError, IndexError, FileNotFoundError):
if verbose:
print(f"Pure dark for ({filt}, {oe}, {exptime}) not found. "
+ f"Trying to use SKYDARK of ({objname}_sky, {filt}, {oe}, {exptime})",
end='... ')
try:
mdarkpath = paths_skydark[(f"{objname}_sky", filt, oe, exptime)]
mdark = load_ccd(mdarkpath)
if verbose:
print("Loaded successfully.")
except (KeyError, IndexError, FileNotFoundError):
mdarkpath = None
mdark = None
if verbose:
print("No dark file found. Turning off dark subtraction.")
return mdark, mdarkpath
@staticmethod
def _set_fringe(paths_skyfringe, objname, filt, oe, exptime, verbose):
try:
mfringepath = paths_skyfringe[(f"{objname}_sky", filt, oe, exptime)]
mfringe = load_ccd(mfringepath)
except (KeyError, IndexError, FileNotFoundError):
mfringepath = None
mfringe = None
if verbose:
print("No finge file found. Turning off fringe subtraction.")
return mfringe, mfringepath
@staticmethod
def _find_obj(arr, var,
thresh_tests=[30, 20, 10, 6, 5, 4, 3], bezel_x=(30, 30), bezel_y=(180, 120),
box_size=(64, 64), filter_size=(12, 12), deblend_cont=1,
minarea=314,
**extract_kw):
"""
Note
----
This includes ``sep``'s ``extract`` and ``background``.
Equivalent processes in photutils may include ``detect_sources``
and ``source_properties``, and ``Background2D``, respectively.
Parameters
----------
thresh : float, optional.
The SNR threshold. It is not an absolute pixel value because
internally the ``self.err_o`` and ``self.err_e`` will be
used.
bezel_x, bezel_y : int, float, list of such, optional.
The x and y bezels, in ``[lower, upper]`` convention.
box_size : int or array-like (int) optional.
The background smooting box size. Default is ``(64, 64)``
for NIC. **Note**: If array-like, order must be ``[height,
width]``, i.e., y and x size.
filter_size : int or array-like (int) optional.
The 2D median filter size. Default is ``(12, 12)`` for NIC.
**Note**: If array-like, order must be ``[height, width]``,
i.e., y and x size.
minarea : int, optional
Minimum number of pixels required for an object. Default is
100 for NIC.
deblend_cont : float, optional
Minimum contrast ratio used for object deblending. To
entirely disable deblending, set to 1.0.
# gauss_fbox : int, float, array-like of such, optional.
# The fitting box size to fit a Gaussian2D function to the
# objects found by ``sep``. This is done to automatically set
# aperture sizes of the object.
Returns
-------
bkg, obj, segm
"""
bkg_kw = dict(maskthresh=0.0, filter_threshold=0.0, box_size=box_size, filter_size=filter_size)
bkg = sep_back(arr, **bkg_kw)
sepv = sep.__version__
s_bkg = f"Background estimated from sep (v {sepv}) with {bkg_kw}."
thresh_tests = np.sort(np.atleast_1d(thresh_tests))[::-1]
for thresh in thresh_tests:
ext_kw = dict(thresh=thresh, minarea=minarea, deblend_cont=deblend_cont,
bezel_x=bezel_x, bezel_y=bezel_y, **extract_kw)
obj, seg = sep_extract(arr, bkg=bkg, var=var, **ext_kw)
nobj = len(obj)
if nobj < 1:
continue
else:
s_obj = f"Objects found from sep (v {sepv}) with {ext_kw}."
break
found = nobj >= 1
if not found:
s_obj = f"NO object found from sep (v {sepv}) with {ext_kw}."
return bkg, obj, seg, s_bkg, s_obj, found
class NICPolDir(NICPolDirMixin):
def __init__(self, location, rawdir="raw", caldir="calib", tmpcal="tmp_calib", tmpred="tmp_reduc",
flatdir=None, verbose=False):
self.location = Path(location)
if rawdir is None:
self.rawdir = self.location
else:
self.rawdir = self.location/rawdir
if not self.rawdir.exists():
raise FileNotFoundError("Raw data directory not found.")
self.caldir = self.location/caldir
self.tmpcal = self.location/tmpcal
self.tmpred = self.location/tmpred
# == | |
is not None:
pulumi.set(__self__, "node_size", node_size)
if node_size_family is not None:
pulumi.set(__self__, "node_size_family", node_size_family)
if pool_name is not None:
pulumi.set(__self__, "pool_name", pool_name)
if resource_group is not None:
pulumi.set(__self__, "resource_group", resource_group)
if spark_version is not None:
pulumi.set(__self__, "spark_version", spark_version)
if subscription_id is not None:
pulumi.set(__self__, "subscription_id", subscription_id)
if workspace_name is not None:
pulumi.set(__self__, "workspace_name", workspace_name)
@property
@pulumi.getter(name="autoPauseProperties")
def auto_pause_properties(self) -> Optional[pulumi.Input['AutoPausePropertiesArgs']]:
"""
Auto pause properties.
"""
return pulumi.get(self, "auto_pause_properties")
@auto_pause_properties.setter
def auto_pause_properties(self, value: Optional[pulumi.Input['AutoPausePropertiesArgs']]):
pulumi.set(self, "auto_pause_properties", value)
@property
@pulumi.getter(name="autoScaleProperties")
def auto_scale_properties(self) -> Optional[pulumi.Input['AutoScalePropertiesArgs']]:
"""
Auto scale properties.
"""
return pulumi.get(self, "auto_scale_properties")
@auto_scale_properties.setter
def auto_scale_properties(self, value: Optional[pulumi.Input['AutoScalePropertiesArgs']]):
pulumi.set(self, "auto_scale_properties", value)
@property
@pulumi.getter(name="nodeCount")
def node_count(self) -> Optional[pulumi.Input[int]]:
"""
The number of compute nodes currently assigned to the compute.
"""
return pulumi.get(self, "node_count")
@node_count.setter
def node_count(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "node_count", value)
@property
@pulumi.getter(name="nodeSize")
def node_size(self) -> Optional[pulumi.Input[str]]:
"""
Node size.
"""
return pulumi.get(self, "node_size")
@node_size.setter
def node_size(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "node_size", value)
@property
@pulumi.getter(name="nodeSizeFamily")
def node_size_family(self) -> Optional[pulumi.Input[str]]:
"""
Node size family.
"""
return pulumi.get(self, "node_size_family")
@node_size_family.setter
def node_size_family(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "node_size_family", value)
@property
@pulumi.getter(name="poolName")
def pool_name(self) -> Optional[pulumi.Input[str]]:
"""
Pool name.
"""
return pulumi.get(self, "pool_name")
@pool_name.setter
def pool_name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "pool_name", value)
@property
@pulumi.getter(name="resourceGroup")
def resource_group(self) -> Optional[pulumi.Input[str]]:
"""
Name of the resource group in which workspace is located.
"""
return pulumi.get(self, "resource_group")
@resource_group.setter
def resource_group(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "resource_group", value)
@property
@pulumi.getter(name="sparkVersion")
def spark_version(self) -> Optional[pulumi.Input[str]]:
"""
Spark version.
"""
return pulumi.get(self, "spark_version")
@spark_version.setter
def spark_version(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "spark_version", value)
@property
@pulumi.getter(name="subscriptionId")
def subscription_id(self) -> Optional[pulumi.Input[str]]:
"""
Azure subscription identifier.
"""
return pulumi.get(self, "subscription_id")
@subscription_id.setter
def subscription_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "subscription_id", value)
@property
@pulumi.getter(name="workspaceName")
def workspace_name(self) -> Optional[pulumi.Input[str]]:
"""
Name of Azure Machine Learning workspace.
"""
return pulumi.get(self, "workspace_name")
@workspace_name.setter
def workspace_name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "workspace_name", value)
@pulumi.input_type
class SynapseSparkArgs:
def __init__(__self__, *,
compute_type: pulumi.Input[str],
compute_location: Optional[pulumi.Input[str]] = None,
description: Optional[pulumi.Input[str]] = None,
disable_local_auth: Optional[pulumi.Input[bool]] = None,
properties: Optional[pulumi.Input['SynapseSparkPoolPropertiesPropertiesArgs']] = None,
resource_id: Optional[pulumi.Input[str]] = None):
"""
A SynapseSpark compute.
:param pulumi.Input[str] compute_type: The type of compute
Expected value is 'SynapseSpark'.
:param pulumi.Input[str] compute_location: Location for the underlying compute
:param pulumi.Input[str] description: The description of the Machine Learning compute.
:param pulumi.Input[bool] disable_local_auth: Opt-out of local authentication and ensure customers can use only MSI and AAD exclusively for authentication.
:param pulumi.Input['SynapseSparkPoolPropertiesPropertiesArgs'] properties: AKS properties
:param pulumi.Input[str] resource_id: ARM resource id of the underlying compute
"""
pulumi.set(__self__, "compute_type", 'SynapseSpark')
if compute_location is not None:
pulumi.set(__self__, "compute_location", compute_location)
if description is not None:
pulumi.set(__self__, "description", description)
if disable_local_auth is not None:
pulumi.set(__self__, "disable_local_auth", disable_local_auth)
if properties is not None:
pulumi.set(__self__, "properties", properties)
if resource_id is not None:
pulumi.set(__self__, "resource_id", resource_id)
@property
@pulumi.getter(name="computeType")
def compute_type(self) -> pulumi.Input[str]:
"""
The type of compute
Expected value is 'SynapseSpark'.
"""
return pulumi.get(self, "compute_type")
@compute_type.setter
def compute_type(self, value: pulumi.Input[str]):
pulumi.set(self, "compute_type", value)
@property
@pulumi.getter(name="computeLocation")
def compute_location(self) -> Optional[pulumi.Input[str]]:
"""
Location for the underlying compute
"""
return pulumi.get(self, "compute_location")
@compute_location.setter
def compute_location(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "compute_location", value)
@property
@pulumi.getter
def description(self) -> Optional[pulumi.Input[str]]:
"""
The description of the Machine Learning compute.
"""
return pulumi.get(self, "description")
@description.setter
def description(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "description", value)
@property
@pulumi.getter(name="disableLocalAuth")
def disable_local_auth(self) -> Optional[pulumi.Input[bool]]:
"""
Opt-out of local authentication and ensure customers can use only MSI and AAD exclusively for authentication.
"""
return pulumi.get(self, "disable_local_auth")
@disable_local_auth.setter
def disable_local_auth(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "disable_local_auth", value)
@property
@pulumi.getter
def properties(self) -> Optional[pulumi.Input['SynapseSparkPoolPropertiesPropertiesArgs']]:
"""
AKS properties
"""
return pulumi.get(self, "properties")
@properties.setter
def properties(self, value: Optional[pulumi.Input['SynapseSparkPoolPropertiesPropertiesArgs']]):
pulumi.set(self, "properties", value)
@property
@pulumi.getter(name="resourceId")
def resource_id(self) -> Optional[pulumi.Input[str]]:
"""
ARM resource id of the underlying compute
"""
return pulumi.get(self, "resource_id")
@resource_id.setter
def resource_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "resource_id", value)
@pulumi.input_type
class TensorFlowArgs:
def __init__(__self__, *,
distribution_type: pulumi.Input[str],
parameter_server_count: Optional[pulumi.Input[int]] = None,
worker_count: Optional[pulumi.Input[int]] = None):
"""
TensorFlow distribution configuration.
:param pulumi.Input[str] distribution_type: Enum to determine the job distribution type.
Expected value is 'TensorFlow'.
:param pulumi.Input[int] parameter_server_count: Number of parameter server tasks.
:param pulumi.Input[int] worker_count: Number of workers. Overwrites the node count in compute binding.
"""
pulumi.set(__self__, "distribution_type", 'TensorFlow')
if parameter_server_count is not None:
pulumi.set(__self__, "parameter_server_count", parameter_server_count)
if worker_count is not None:
pulumi.set(__self__, "worker_count", worker_count)
@property
@pulumi.getter(name="distributionType")
def distribution_type(self) -> pulumi.Input[str]:
"""
Enum to determine the job distribution type.
Expected value is 'TensorFlow'.
"""
return pulumi.get(self, "distribution_type")
@distribution_type.setter
def distribution_type(self, value: pulumi.Input[str]):
pulumi.set(self, "distribution_type", value)
@property
@pulumi.getter(name="parameterServerCount")
def parameter_server_count(self) -> Optional[pulumi.Input[int]]:
"""
Number of parameter server tasks.
"""
return pulumi.get(self, "parameter_server_count")
@parameter_server_count.setter
def parameter_server_count(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "parameter_server_count", value)
@property
@pulumi.getter(name="workerCount")
def worker_count(self) -> Optional[pulumi.Input[int]]:
"""
Number of workers. Overwrites the node count in compute binding.
"""
return pulumi.get(self, "worker_count")
@worker_count.setter
def worker_count(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "worker_count", value)
@pulumi.input_type
class TrialComponentArgs:
def __init__(__self__, *,
command: pulumi.Input[str],
code_id: Optional[pulumi.Input[str]] = None,
distribution: Optional[pulumi.Input[Union['MpiArgs', 'PyTorchArgs', 'TensorFlowArgs']]] = None,
environment_id: Optional[pulumi.Input[str]] = None,
environment_variables: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
input_data_bindings: Optional[pulumi.Input[Mapping[str, pulumi.Input['InputDataBindingArgs']]]] = None,
output_data_bindings: Optional[pulumi.Input[Mapping[str, pulumi.Input['OutputDataBindingArgs']]]] = None,
timeout: Optional[pulumi.Input[str]] = None):
"""
Trial component definition.
:param pulumi.Input[str] command: The command to execute on startup of the job. eg. "python train.py"
:param pulumi.Input[str] code_id: ARM resource ID of the code asset.
:param pulumi.Input[Union['MpiArgs', 'PyTorchArgs', 'TensorFlowArgs']] distribution: Distribution configuration of the job. If set, this should be one of Mpi, Tensorflow, PyTorch, or null.
:param pulumi.Input[str] environment_id: The ARM resource ID of the Environment specification for the job.
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] environment_variables: Environment variables included in the job.
:param pulumi.Input[Mapping[str, pulumi.Input['InputDataBindingArgs']]] input_data_bindings: Mapping of input data bindings used in the job.
:param pulumi.Input[Mapping[str, pulumi.Input['OutputDataBindingArgs']]] output_data_bindings: Mapping of output data bindings used in the job.
:param pulumi.Input[str] timeout: The max run duration in ISO 8601 format, after which the trial component will be cancelled.
Only supports duration with precision as low as Seconds.
"""
pulumi.set(__self__, "command", command)
if code_id is not None:
pulumi.set(__self__, "code_id", code_id)
if distribution is not None:
pulumi.set(__self__, "distribution", distribution)
if environment_id is not None:
pulumi.set(__self__, "environment_id", environment_id)
if environment_variables is not None:
pulumi.set(__self__, "environment_variables", environment_variables)
if input_data_bindings is not None:
pulumi.set(__self__, "input_data_bindings", input_data_bindings)
if output_data_bindings is not None:
pulumi.set(__self__, "output_data_bindings", output_data_bindings)
if timeout is not None:
pulumi.set(__self__, "timeout", timeout)
@property
@pulumi.getter
def command(self) -> pulumi.Input[str]:
"""
The command to execute on startup of the job. eg. "python train.py"
"""
return pulumi.get(self, "command")
@command.setter
def command(self, value: pulumi.Input[str]):
pulumi.set(self, "command", value)
@property
@pulumi.getter(name="codeId")
def code_id(self) -> Optional[pulumi.Input[str]]:
"""
ARM resource ID of the code asset.
"""
return pulumi.get(self, "code_id")
@code_id.setter
def code_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "code_id", value)
@property
@pulumi.getter
def distribution(self) -> Optional[pulumi.Input[Union['MpiArgs', 'PyTorchArgs', 'TensorFlowArgs']]]:
"""
Distribution configuration of the job. If set, this should be one of Mpi, Tensorflow, PyTorch, or null.
"""
return pulumi.get(self, "distribution")
@distribution.setter
def distribution(self, value: Optional[pulumi.Input[Union['MpiArgs', 'PyTorchArgs', 'TensorFlowArgs']]]):
pulumi.set(self, "distribution", value)
@property
@pulumi.getter(name="environmentId")
def environment_id(self) -> Optional[pulumi.Input[str]]:
"""
The ARM resource ID of the Environment specification for the job.
"""
return pulumi.get(self, "environment_id")
@environment_id.setter
def environment_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "environment_id", value)
@property
@pulumi.getter(name="environmentVariables")
def environment_variables(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]:
"""
Environment variables included in the job.
"""
return pulumi.get(self, "environment_variables")
@environment_variables.setter
def environment_variables(self, value: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]):
pulumi.set(self, "environment_variables", value)
@property
@pulumi.getter(name="inputDataBindings")
def input_data_bindings(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input['InputDataBindingArgs']]]]:
"""
Mapping of input data bindings used in the job.
"""
return pulumi.get(self, "input_data_bindings")
@input_data_bindings.setter
def input_data_bindings(self, value: Optional[pulumi.Input[Mapping[str, pulumi.Input['InputDataBindingArgs']]]]):
pulumi.set(self, "input_data_bindings", value)
@property
@pulumi.getter(name="outputDataBindings")
def output_data_bindings(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input['OutputDataBindingArgs']]]]:
"""
Mapping of output data bindings used in the job.
"""
return pulumi.get(self, "output_data_bindings")
@output_data_bindings.setter
def output_data_bindings(self, value: Optional[pulumi.Input[Mapping[str, pulumi.Input['OutputDataBindingArgs']]]]):
pulumi.set(self, "output_data_bindings", value)
@property
@pulumi.getter
def timeout(self) -> Optional[pulumi.Input[str]]:
"""
The max run duration in ISO 8601 format, after which the trial component will be cancelled.
Only supports duration with precision as low as Seconds.
"""
return pulumi.get(self, "timeout")
@timeout.setter
def timeout(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "timeout", value)
@pulumi.input_type
class TruncationSelectionPolicyArgs:
def __init__(__self__, *,
policy_type: pulumi.Input[str],
delay_evaluation: Optional[pulumi.Input[int]] = None,
evaluation_interval: Optional[pulumi.Input[int]] = None,
truncation_percentage: Optional[pulumi.Input[int]] = None):
"""
Defines an early termination policy that cancels a given percentage of runs at each evaluation interval.
:param pulumi.Input[str] policy_type:
Expected value is 'TruncationSelection'.
:param pulumi.Input[int] delay_evaluation: Number of intervals by which to delay the | |
#
# data preprocessing
#
from src.const import wells, carotage_types, las_dir, log_dir, slices_dir, wellheads, \
raw_cube, sourceX, sourceY, INLINE_3D, CROSSLINE_3D, nsamples, dt, ilines, xlines, \
well_width, slice_coord_path, norm_dict_path, slice_range
from src.utils import projection
from src.gen1 import dump_normalization_values
from src.data_types import Point
import numpy as np
import segyio
import pandas as pd
import pickle
import re
from pathlib import Path
from typing import Tuple, List, Optional
def create_slice_coord_dict(path: Path or str) -> None:
"""Create a dictionary with slice coordinates
original seismic cube has notches resulting in irregular horizontal projection
assign inline/xline rectangular grid filling in holes"""
d = dict()
for c in xlines:
idx = CROSSLINE_3D == c
lines = INLINE_3D[idx]
x = sourceX[idx]
y = sourceY[idx]
ax = np.empty(len(ilines)) # len(ilines) = 651
ax[:] = np.nan
ay = np.empty(len(ilines))
ay[:] = np.nan
for l, xx, yy in zip(lines, x, y):
ax[l - min(ilines)] = xx # min(ilines) = 100
ay[l - min(ilines)] = yy
if len(lines) < len(ilines):
stepx = (-x.max() + x.min()) / (lines.max() - lines.min())
stepy = (y.max() - y.min()) / (lines.max() - lines.min())
for i in range(len(ax)): # using the fact that holes start in higher addresses
if np.isnan(ax[i]):
ax[i] = ax[i - 1] + stepx
ay[i] = ay[i - 1] + stepy
d.update({(c, i + min(ilines)): (xx, yy) for i, xx, yy in zip(range(len(ax)), ax, ay)})
# create coord dictionary
slice_coord_dict = {'iline': {}, 'xline': {}}
for iline in ilines:
slice_coord_dict['iline'][iline] = np.array([d[(xline, iline)] for xline in xlines])
for xline in xlines:
slice_coord_dict['xline'][xline] = np.array([d[(xline, iline)] for iline in ilines])
with open(path, 'wb') as f:
pickle.dump(slice_coord_dict, f)
def get_slice_coord_dict():
"""Load dictionary with slice coordinates"""
if not slice_coord_path.exists():
create_slice_coord_dict(slice_coord_path)
with open(slice_coord_path, 'rb') as f:
slice_coord_dict = pickle.load(f)
return slice_coord_dict
slice_coord_dict = get_slice_coord_dict()
def create_zero_cube(seg_path: Path or str, zero_file_path: Path or str,
samples: List, ilines: List, xlines: List) -> None:
"""code for seismic cube creation"""
spec = segyio.spec()
spec.sorting = 2
spec.format = 1
spec.samples = samples
spec.ilines = ilines
spec.xlines = xlines
trace = np.zeros(len(spec.samples), dtype=np.float32)
with segyio.open(seg_path) as segyfile:
header = segyfile.header
with segyio.create(zero_file_path, spec) as f:
tr = 0
for il in spec.ilines:
for xl in spec.xlines:
f.header[tr] = {
segyio.su.offset : 1,
segyio.su.iline : il,
segyio.su.xline : xl,
segyio.TraceField.TRACE_SAMPLE_COUNT : len(spec.samples),
segyio.TraceField.SourceX: header[tr][segyio.TraceField.SourceX],
segyio.TraceField.SourceY: header[tr][segyio.TraceField.SourceY]
}
f.trace[tr] = trace
tr += 1
f.bin.update(
tsort=segyio.TraceSortingFormat.INLINE_SORTING
)
def filter_carotage(well_log: pd.DataFrame, carotage_types: List,
max_depth: float, max_sigma: float=2) -> pd.DataFrame:
"""Filter carotage outliers"""
window = 599
diff = well_log[carotage_types] - well_log[carotage_types].rolling(window, center=True).median().\
fillna(method='ffill').fillna(method='bfill')
sigma = diff.std()
well_log_filtered = well_log.copy()
mask = diff.abs() > (max_sigma * sigma)
mask[well_log['tvd'] >= max_depth] = False
well_log_filtered[mask] = np.nan
return well_log_filtered
def generate_logs(well_list: List, log_dir: Path, min_depth: float=0, max_depth: float=nsamples * dt) -> None:
"""Preprocess las logs"""
for well_name in well_list:
print(well_name)
las_df = pd.read_csv(las_dir / f'{well_name}.log.csv',
delimiter='\t', na_values='1.000000e+30')
idx = np.logical_and(las_df['Inline'].values[0] == INLINE_3D, las_df['Crossline'].values[0] == CROSSLINE_3D)
las_df['x'] = sourceX[idx][0]
las_df['y'] = sourceY[idx][0]
fun = lambda s: re.match('(.+)\(.+\)', s)[1] if re.match('(.+)\(.+\)', s) else s
las_df = las_df.rename(columns=fun)
las_df = las_df.rename(columns={'Time': 't'})
las_df = las_df.loc[(min_depth <= las_df['t']) & (las_df['t'] <= max_depth)]
las_df.to_csv(log_dir / (well_name + '.csv'), index=False)
def gen_tgt_mask(slice_coords: np.ndarray, vertical_grid: np.ndarray, las_df: pd.DataFrame,
carotage_types: List, well_width: int) -> Tuple[np.ndarray, ...]:
"""
Generates target and mask for a well projection on a vertical seismic slice
slice_coords: seismic traces coords of a slice, sorted, meters
v_grid: vertical grid of a seismic slice, milliseconds
las_df: las data frame
carotage_types: ...
trace_width: width of the target channel
returns: target and mask, HxWxC numpy arrays
"""
horizontal_grid = np.sqrt(np.square(slice_coords - slice_coords[0]).sum(axis=1))
assert all(np.diff(horizontal_grid) > 0)
assert all(np.diff(vertical_grid) > 0)
pt1 = Point(*slice_coords[0])
pt2 = Point(*slice_coords[-1])
# horizontal projection on the slice in the original coordinates
well_projection_xy = np.array([projection(pt1, pt2, Point(*p)) for p in las_df[['x', 'y']].values])
# horizontal projection on the slice in the slice coordinates
well_projection_1d = np.sqrt(np.square(well_projection_xy - slice_coords[0]).sum(axis=1))
target = np.zeros((len(vertical_grid), len(horizontal_grid), len(carotage_types)), dtype=np.float32)
mask = np.zeros_like(target, dtype=bool)
idx = np.digitize(well_projection_1d, horizontal_grid)
idx[idx == 0] += 1
idx[idx == len(horizontal_grid)] -= 1
a1 = np.abs(well_projection_1d - horizontal_grid[idx])
a2 = np.abs(well_projection_1d - horizontal_grid[idx - 1])
idx[a2 < a1] -= 1
las_df['h_index'] = idx
idx = np.digitize(las_df['t'], vertical_grid)
idx[idx == 0] += 1
idx[idx == len(vertical_grid)] -= 1
a1 = np.abs(las_df['t'] - vertical_grid[idx])
a2 = np.abs(las_df['t'] - vertical_grid[idx - 1])
idx[a2 < a1] -= 1
las_df['v_index'] = idx
gp = las_df.groupby(['h_index', 'v_index']).mean().reset_index().sort_values('t')
iy_ = tuple(np.repeat(gp.v_index.values[..., None], well_width, axis=1))
ix_ = []
for i in gp.h_index:
x1 = i - (well_width // 2)
x1 = max(0, x1)
x1 = min(len(horizontal_grid) - well_width, x1)
ix_.append(range(x1, x1 + well_width))
mask[iy_, ix_] = ~np.isnan(gp[carotage_types].values[:, None, :])
target[iy_, ix_] = gp[carotage_types].values[:, None, :]
target[~mask] = 0
return target, mask
def project_wells_onto_slice(slice_num: int, slice_type: str, well_list: List, carotage_types: List,
well_width: int, verbose: bool=False) -> Tuple[np.ndarray, ...]:
"""Finds projections of wells onto a given seismic slice"""
slice_coords = slice_coord_dict[slice_type][slice_num]
vertical_grid = np.arange(nsamples) * dt
target, mask = None, None
for well_name in well_list:
if verbose:
print(' ', well_name)
las_df = pd.read_csv(log_dir / (well_name + '.csv'))
t, m = gen_tgt_mask(slice_coords, vertical_grid, las_df, carotage_types, well_width)
if target is None:
target = t.copy()
mask = m.copy()
else:
target[m] = t[m]
mask = np.logical_or(mask, m)
return target, mask
def find_proxi_wells(slice_coords: np.ndarray, wells: List, max_distance: float) -> List:
"""Find nearest wells for a given coordinate on a slice"""
pt1 = Point(*slice_coords[0])
pt2 = Point(*slice_coords[-1])
proxi_wells = []
for well_name in wells:
ptw = Point(*wellheads.loc[well_name, ['X-Coord', 'Y-Coord']].values)
ptp = projection(pt1, pt2, ptw)
dist = np.sqrt(np.square(np.array(ptw) - np.array(ptp)).sum())
if dist <= max_distance:
proxi_wells.append(well_name)
return proxi_wells
def find_nearest_slice(wells: List) -> List[Tuple[str, int, str]]:
"""Return nearest slice for a well list"""
iline_coords = []
for iline in ilines:
coords = slice_coord_dict['iline'][iline]
iline_coords.append((Point(*coords[0]), Point(*coords[-1])))
xline_coords = []
for xline in xlines:
coords = slice_coord_dict['xline'][xline]
xline_coords.append((Point(*coords[0]), Point(*coords[-1])))
slice_well_list = []
for well_name in wells:
ptw = Point(*wellheads.loc[well_name, ['X-Coord', 'Y-Coord']].values)
proj = [projection(pt1, pt2, ptw) for pt1, pt2 in iline_coords]
dist = np.sqrt(np.square(np.array(ptw) - np.array(proj)).sum(axis=1))
iline = ilines[np.argmin(dist)]
proj = [projection(pt1, pt2, ptw) for pt1, pt2 in xline_coords]
dist = np.sqrt(np.square(np.array(ptw) - np.array(proj)).sum(axis=1))
xline = xlines[np.argmin(dist)]
slice_well_list.append(('iline', iline, well_name))
slice_well_list.append(('xline', xline, well_name))
return slice_well_list
def create_slice_well_list(wells: List, slice_range: int=slice_range) -> List[Tuple[str, int, str]]:
"""Return range of nearest slices for a well list"""
slice_well_list = []
for well_name in wells:
las_df = pd.read_csv(log_dir / (well_name + '.csv'))
inline, clossline = las_df[['Inline', 'Crossline']].values[0]
for slice_type, line in zip(['iline', 'xline'], [inline, clossline]):
for s in range(line - slice_range, line + slice_range + 1):
slice_well_list.append((slice_type, s, well_name))
return slice_well_list
def slice_crossline(crossline: int) -> np.ndarray:
"""cut seismic slice along crossline"""
idx = CROSSLINE_3D == crossline
assert len(idx) > 0, 'crossline out of range'
assert all(np.diff(INLINE_3D[idx]) > 0)
a = np.zeros((nsamples, max(ilines) - min(ilines) + 1), dtype=raw_cube.dtype)
a[:, INLINE_3D[idx].min() - min(ilines): INLINE_3D[idx].min() - min(ilines) + raw_cube[idx].shape[0]] = \
raw_cube[idx].T
return a
def slice_inline(inline: int) -> np.ndarray:
"""cut seismic slice along inline"""
idx = INLINE_3D == inline
assert len(idx) > 0, 'inline out of range'
assert all(np.diff(CROSSLINE_3D[idx]) > 0)
a = np.zeros((nsamples, max(xlines) - min(xlines) + 1), dtype=raw_cube.dtype)
a[:, CROSSLINE_3D[idx].min() - min(xlines): CROSSLINE_3D[idx].min() - min(xlines) + raw_cube[idx].shape[0]] = \
raw_cube[idx].T
return a
def get_slice_data(slice_num: int, slice_type: str, wells_list: list, max_distance: float,
carotage_types: list, well_width: int) -> Optional[dict]:
"""Prepare 1-st type data unit as multiple wells projected on a single slice as dictionary:
1) seismic slice,
2) carotage projections and masks,
3) list of projected wells"""
if slice_type == 'iline':
seismic_slice = slice_inline(slice_num)
elif slice_type == 'xline':
seismic_slice = slice_crossline(slice_num)
else:
raise ValueError('wrong slice type')
slice_coords = slice_coord_dict[slice_type][slice_num]
proxi_wells = find_proxi_wells(slice_coords, wells_list, max_distance)
if len(proxi_wells) == 0:
print(f'{slice_type} {slice_num} has no proxi wells at {max_distance}m')
return None
target, mask = project_wells_onto_slice(slice_num, slice_type, proxi_wells, carotage_types, well_width)
projections = {carotage_type: {'target': target[..., i], 'mask': mask[..., i]}
for i, carotage_type in enumerate(carotage_types)}
slice_data = {'seismic': seismic_slice,
'projections': projections,
'proxi_wells': proxi_wells}
return slice_data
def get_slice_data_single_well(slice_num: int, slice_type: str, well_name: str,
carotage_types: List, well_width: int) -> dict:
"""Prepare 2-nd type data unit as a single well projected on a single slice as dictionary:
1) seismic slice,
2) carotage projections and masks,
3) list of projected wells (made of | |
<filename>sgmcmcjax/diffusions.py
# """A diffusion is modeled as an ``(init_fun, update_fun, get_params)`` triple of
# functions, where the component functions have these signatures:
# init_fn
# ::
# init_fn(params)
# Args:
# params: pytree representing the initial parameters.
# Returns:
# A pytree representing the initial diffusion state, which includes the
# initial parameters and may also include auxiliary values like initial
# momentum. The optimizer state pytree structure generally differs from that
# of `params`.
# ::
# update_fn
# ::
# update_fn(key, step, grads, diffusion_state)
# Args:
# key: random key
# step: integer representing the step index.
# grads: a pytree with the same structure as `get_params(opt_state)`
# representing the gradients to be used in updating the diffusion state.
# diffusion_state: a pytree representing the diffusion state to be updated.
# Returns:
# A pytree with the same structure as the `diffusion_state` argument representing
# the updated optimizer state.
# ::
# get_params
# ::
# get_params(diffusion_state)
# Args:
# diffusion_state: pytree representing an optimizer state.
# Returns:
# A pytree representing the parameters extracted from `diffusion_state`, such that
# the invariant `params == get_params(init_fun(params))` holds true.
# """
from typing import Callable, Tuple, Union
import jax.numpy as jnp
from jax import lax, random
from .diffusion_util import diffusion, diffusion_palindrome, diffusion_sghmc
@diffusion
def sgld(dt) -> Tuple[Callable, Callable, Callable]:
"""SGLD diffusion
https://www.ics.uci.edu/~welling/publications/papers/stoclangevin_v6.pdf
This is an Euler-Maruyam solver for an overdamped Langevin diffusion
Args:
dt (float): step size
Returns:
Tuple[Callable, Callable, Callable]: An (init_fun, update_fun, get_params) triple.
"""
dt = make_schedule(dt)
def init_fn(x):
return x
def update(i, k, g, x):
return (
x + dt(i) * g + jnp.sqrt(2 * dt(i)) * random.normal(k, shape=jnp.shape(x))
)
def get_params(x):
return x
return init_fn, update, get_params
@diffusion
def psgld(
dt, alpha: float = 0.99, eps: float = 1e-5
) -> Tuple[Callable, Callable, Callable]:
"""Preconditioned SGLD diffusion
See algorithm 1 in paper: https://arxiv.org/pdf/1512.07666.pdf
Args:
dt ([type]): step size
alpha (float, optional): decay weights for gradients. Defaults to 0.99.
eps ([type], optional): controls extreme in curvature. Defaults to 1e-5.
Returns:
Tuple[Callable, Callable, Callable]: An (init_fun, update_fun, get_params) triple.
"""
dt = make_schedule(dt)
def init_fn(x):
v = jnp.zeros_like(x)
return x, v
def update(i, k, g, state):
x, v = state
v = alpha * v + (1 - alpha) * jnp.square(g)
G = 1.0 / (jnp.sqrt(v) + eps)
return (
x
+ dt(i) * 0.5 * G * g
+ jnp.sqrt(dt(i) * G) * random.normal(k, shape=jnp.shape(x)),
v,
)
def get_params(state):
x, _ = state
return x
return init_fn, update, get_params
@diffusion
def sgldAdam(
dt, beta1: float = 0.9, beta2: float = 0.999, eps: float = 1e-8
) -> Tuple[Callable, Callable, Callable]:
"""'ADAM'-like SGMCMC diffusion. See appendix in paper: https://arxiv.org/abs/2105.13059v1
Args:
dt (float): step size
beta1 (float, optional): weights for the first moment of the gradients. Defaults to 0.9.
beta2 (float, optional): weights for the second moment of the gradients. Defaults to 0.999.
eps (float, optional): small value to avoid instabilities. Defaults to 1e-8.
Returns:
Tuple[Callable, Callable, Callable]: An (init_fun, update_fun, get_params) triple.
"""
dt = make_schedule(dt)
def init_fn(x):
m = jnp.zeros_like(x)
v = jnp.zeros_like(x)
return x, m, v
def update(i, k, g, state):
x, m, v = state
m = beta1 * m + (1 - beta1) * g
v = beta2 * v + (1 - beta2) * jnp.square(g)
m_hat = m / (1 - beta1 ** (i + 1))
v_hat = v / (1 - beta2 ** (i + 1))
adapt_dt = dt(i) / (jnp.sqrt(v_hat) + eps)
return (
x
+ adapt_dt * 0.5 * m_hat
+ jnp.sqrt(adapt_dt) * random.normal(key=k, shape=jnp.shape(x)),
m,
v,
)
def get_params(state):
x, _, _ = state
return x
return init_fn, update, get_params
@diffusion_sghmc
def sghmc(
dt, alpha: float = 0.01, beta: float = 0
) -> Tuple[Callable, Callable, Callable, Callable]:
"""diffusion for stochastic gradient HMC.
See paper: https://arxiv.org/abs/1402.4102. Uses the parametrisation in section G (appendix)
Args:
dt (float): step size
alpha (float, optional): friction coefficient. Defaults to 0.01.
beta (float, optional): estimation of the stochastic gradient noise. Defaults to 0.
Returns:
Tuple[Callable, Callable, Callable, Callable]: An (init_fun, update_fun, get_params, resample_momentum) triple.
"""
dt = make_schedule(dt)
def init_fn(x):
v = jnp.zeros_like(x)
return x, v
def update(i, k, g, state):
x, v = state
x = x + v
v = (
v
+ dt(i) * g
- alpha * v
+ jnp.sqrt(2 * (alpha - beta) * dt(i))
* random.normal(k, shape=jnp.shape(x))
)
return x, v
def get_params(state):
x, _ = state
return x
def resample_momentum(i, k, x):
v = jnp.sqrt(dt(i)) * random.normal(k, shape=jnp.shape(x))
return x, v
return init_fn, update, get_params, resample_momentum
@diffusion_palindrome
def baoab(
dt, gamma: float, tau: float = 1.0
) -> Tuple[Callable, Tuple[Callable, Callable], Callable]:
"""BAOAB splitting scheme for the underdampled Langevin diffusion. https://aip.scitation.org/doi/abs/10.1063/1.4802990
Args:
dt (float): step size
gamma (float): friction coefficient
tau (float, optional): temperature. Defaults to 1.
Returns:
Tuple[Callable, Tuple[Callable, Callable], Callable]: An (init_fun, (update1, update2), get_params) triple.
"""
dt = make_schedule(dt)
def init_fn(x):
v = jnp.zeros_like(x)
return x, v
def update1(i, k, g, state):
x, v = state
v = v + dt(i) * 0.5 * g
x = x + v * dt(i) * 0.5
c1 = jnp.exp(-gamma * dt(i))
c2 = jnp.sqrt(1 - c1**2)
v = c1 * v + tau * c2 * random.normal(k, shape=jnp.shape(v))
x = x + v * dt(i) * 0.5
return x, v
def update2(i, k, g, state):
x, v = state
v = v + dt(i) * 0.5 * g
return x, v
def get_params(state):
x, _ = state
return x
return init_fn, (update1, update2), get_params
@diffusion
def sgnht(dt, a: float = 0.01) -> Tuple[Callable, Callable, Callable]:
"""Euler solver for the SG-NHT diffusion
See algorithm 2 in http://people.ee.duke.edu/~lcarin/sgnht-4.pdf
Args:
dt (float): step size
a (float, optional): diffusion factor. Defaults to 0.01.
Returns:
Tuple[Callable, Callable, Callable]: An (init_fun, update_fun, get_params) triple.
"""
dt = make_schedule(dt)
def init_fn(x):
v = jnp.zeros_like(x)
alpha = a
return x, v, alpha
def initial_momentum(kv):
"sample momentum at the first iteration"
k, v = kv
key, subkey = random.split(k)
v = jnp.sqrt(dt(0)) * random.normal(subkey, shape=v.shape)
return key, v
def update(i, k, g, state):
x, v, alpha = state
k, v = lax.cond(i == 0, initial_momentum, lambda kv: (k, v), (k, v))
v = (
v
- alpha * v
+ dt(i) * g
+ jnp.sqrt(2 * a * dt(i)) * random.normal(k, shape=jnp.shape(x))
)
x = x + v
alpha = alpha + (jnp.linalg.norm(v) ** 2) / v.size - dt(i)
return x, v, alpha
def get_params(state):
x, _, _ = state
return x
return init_fn, update, get_params
@diffusion_palindrome
def badodab(
dt, a: float = 0.01
) -> Tuple[Callable, Tuple[Callable, Callable], Callable]:
"""Splitting scheme for the 3-equation Langevin diffusion. See https://arxiv.org/abs/1505.06889
This is a more stable discretisation than SG-NHT
Args:
dt (float): step size
a (float, optional): initial value of alpha. Defaults to 0.01.
Returns:
Tuple[Callable, Tuple[Callable, Callable], Callable]: An (init_fun, update_fun, get_params) triple.
"""
dt = make_schedule(dt)
def init_fn(x):
v = jnp.zeros_like(x)
alpha = a
return x, v, alpha
def update(i, k, g, state):
x, v, alpha = state
dt2 = dt(i) / 2
mu = 1.0
sigma = 1.0
v = v + dt2 * g
x = x + dt2 * v
alpha = alpha + (dt2 / mu) * (jnp.linalg.norm(v) - v.size)
c1 = jnp.exp(-alpha * dt(i))
c2 = jnp.where(
alpha == 0, jnp.sqrt(dt(i)), jnp.sqrt(jnp.abs((1 - c1**2) / (2 * alpha)))
)
v = c1 * v + c2 * sigma * random.normal(k, shape=jnp.shape(v))
alpha = alpha + (dt2 / mu) * (jnp.linalg.norm(v) - v.size)
x = x + dt2 * v
return x, v, alpha
def update2(i, k, g, state):
x, v, alpha = state
v = v + dt(i) * 0.5 * g
return x, v, alpha
def get_params(state):
x, _, _ = state
return x
return init_fn, (update, update2), get_params
### step size schedules
def constant(step_size: float) -> Callable:
def schedule(i):
return step_size
return schedule
def welling_teh_schedule(a: float, b: float, gamma: float = 0.55) -> Callable:
"Polynomial schedule from https://www.ics.uci.edu/~welling/publications/papers/stoclangevin_v6.pdf"
def schedule(i):
return a * (b + i) ** (-gamma)
return schedule
def cyclical_schedule(alpha_0: float, M: int, K: int) -> Callable:
"https://arxiv.org/abs/1902.03932"
def schedule(i):
mod_term = (i - 1) % jnp.ceil(K | |
"""
if variable:
index = self.chemicals.get_index(variable)
else:
index = self._X_index
stoichiometry_by_wt = self._get_stoichiometry_by_wt()
if self.phases:
stoichiometry_by_wt = stoichiometry_by_wt.sum(0)
def f(x):
stoichiometry_by_wt[index] = x
return stoichiometry_by_wt.sum()
x = flx.aitken_secant(f, 1)
if self._basis == 'mol':
x /= self.MWs[index]
if self.phases:
row = np.where(self._stoichiometry[:, index])
self._stoichiometry[row, index] = x
else:
self._stoichiometry[index] = x
self._rescale()
def correct_atomic_balance(self, constants=None):
"""
Correct stoichiometry coffecients to satisfy atomic balance.
Parameters
----------
constants : str, optional
IDs of chemicals for which stoichiometric coefficients are held constant.
Examples
--------
Balance glucose fermentation to ethanol:
>>> import thermosteam as tmo
>>> from biorefineries import lipidcane as lc
>>> tmo.settings.set_thermo(lc.chemicals)
>>> fermentation = tmo.Reaction('Glucose + O2 -> Ethanol + CO2',
... reactant='Glucose', X=0.9)
>>> fermentation.correct_atomic_balance()
>>> fermentation.show()
Reaction (by mol):
stoichiometry reactant X[%]
Glucose -> 2 Ethanol + 2 CO2 Glucose 90.00
Balance methane combustion:
>>> combustion = tmo.Reaction('CH4 + O2 -> Water + CO2',
... reactant='CH4', X=1)
>>> combustion.correct_atomic_balance()
>>> combustion.show()
Reaction (by mol):
stoichiometry reactant X[%]
2 O2 + CH4 -> 2 Water + CO2 CH4 100.00
Balance electrolysis of water (with chemical phases specified):
>>> electrolysis = tmo.Reaction('H2O,l -> H2,g + O2,g',
... chemicals=tmo.Chemicals(['H2O', 'H2', 'O2']),
... reactant='H2O', X=1)
>>> electrolysis.correct_atomic_balance()
>>> electrolysis.show()
Reaction (by mol):
stoichiometry reactant X[%]
H2O,l -> H2,g + 0.5 O2,g H2O,l 100.00
Note that if the reaction is underspecified, there are infinite
ways to balance the reaction and a runtime error is raised:
>>> rxn_underspecified = tmo.Reaction('CH4 + Glucose + O2 -> Water + CO2',
... reactant='CH4', X=1)
>>> rxn_underspecified.correct_atomic_balance()
Traceback (most recent call last):
RuntimeError: reaction stoichiometry is underspecified; pass the
`constants` argument to the `<Reaction>.correct_atomic_balance` method
to specify which stoichiometric coefficients to hold constant
Chemical coefficients can be held constant to prevent this error:
>>> rxn_underspecified = tmo.Reaction('CH4 + Glucose + O2 -> Water + CO2',
... reactant='CH4', X=1)
>>> rxn_underspecified.correct_atomic_balance(['Glucose', 'CH4'])
>>> rxn_underspecified.show()
Reaction (by mol):
stoichiometry reactant X[%]
Glucose + 8 O2 + CH4 -> 8 Water + 7 CO2 CH4 100.00
"""
stoichiometry_by_mol = self._get_stoichiometry_by_mol()
phases = self.phases
if phases:
stoichiometry_by_mol = stoichiometry_by_mol.sum(0)
chemicals = self.chemicals
if constants:
if isinstance(constants, str): constants = [constants]
constants = set(constants)
constant_index = chemicals.indices(constants)
else:
constant_index = [self._X_index[1] if phases else self._X_index]
chemical_index, = np.where(stoichiometry_by_mol)
chemical_index = np.setdiff1d(chemical_index, constant_index)
formula_array = chemicals.formula_array
b = - (formula_array[:, constant_index]
* stoichiometry_by_mol[constant_index]).sum(1, keepdims=True)
atomic_bool_index = np.any(formula_array * stoichiometry_by_mol, axis=1)
atomic_index, = np.where(atomic_bool_index)
b = b[atomic_index, :]
A = formula_array[atomic_index, :][:, chemical_index]
M_atoms, N_chemicals = A.shape
if M_atoms != N_chemicals:
x, _, rank, *_ = np.linalg.lstsq(A, b, rcond=None)
if N_chemicals > rank:
raise RuntimeError(
"reaction stoichiometry is underspecified (i.e. there are "
"infinite ways to balance the reaction); pass the "
"`constants` argument to the `<Reaction>.correct_atomic_balance` "
"method to specify which stoichiometric coefficients to hold constant"
)
residual_mass = ((A @ x - b) * self.MWs).sum()
if residual_mass > 1e-6:
warn(f'atomic balance was solved with a residual mass error of {residual_mass} g / mol of reactant')
else:
x = np.linalg.solve(A, b)
stoichiometry_by_mol[chemical_index] = x.flatten()
by_wt = self._basis == 'wt'
stoichiometry = stoichiometry_by_mol * self.MWs if by_wt else stoichiometry_by_mol
if phases:
self._stoichiometry[:] = (self._stoichiometry != 0.) * stoichiometry
elif by_wt:
self._stoichiometry[:] = stoichiometry
self._rescale()
def _rescale(self):
"""Scale stoichiometry to a per reactant basis."""
new_scale = -self._stoichiometry[self._X_index]
if new_scale == 0.:
raise RuntimeError(f"reactant '{self.reactant}' does not participate in stoichiometric reaction")
self._stoichiometry /= new_scale
def to_df(self, index=None):
columns = [f'Stoichiometry (by {self.basis})', 'Reactant', 'Conversion [%]']
stoichiometry = get_stoichiometric_string(self.stoichiometry, self.phases, self.chemicals)
reactant = self.reactant
conversion = 100. * self.X
df = pd.DataFrame(data=[[stoichiometry, reactant, conversion]], columns=columns, index=[index] if index else None)
df.index.name = 'Reaction'
return df
def __repr__(self):
reaction = get_stoichiometric_string(self.stoichiometry, self.phases, self.chemicals)
return f"{type(self).__name__}('{reaction}', reactant='{self.reactant}', X={self.X:.3g}, basis={repr(self.basis)})"
def _info(self):
info = f"{type(self).__name__} (by {self.basis}):"
rxn = get_stoichiometric_string(self.stoichiometry, self.phases, self.chemicals)
if self.phases:
phase, ID = self.reactant
cmp = ID + ',' + phase
else:
cmp = self.reactant
lrxn = len(rxn)
lcmp = len(cmp)
maxrxnlen = max([13, lrxn]) + 2
maxcmplen = max([8, lcmp]) + 2
X = self.X
info += "\nstoichiometry" + " "*(maxrxnlen - 13) + "reactant" + " "*(maxcmplen - 8) + ' X[%]'
rxn_spaces = " "*(maxrxnlen - lrxn)
cmp_spaces = " "*(maxcmplen - lcmp)
info += f"\n{rxn}{rxn_spaces}{cmp}{cmp_spaces}{X*100: >6.2f}"
return info
def show(self):
print(self._info())
_ipython_display_ = show
class ReactionItem(Reaction):
"""
Create a ReactionItem object from the a ReactionSet and reaction index.
Parameters
----------
rxnset : ReactionSet
index : int
Index of reaction.
"""
__slots__ = ('_index', '_parent')
phases = MaterialIndexer.phases
def __init__(self, rxnset, index):
self._stoichiometry = rxnset._stoichiometry[index]
self._phases = rxnset._phases
self._basis = rxnset._basis
self._X = rxnset._X
self._chemicals = rxnset._chemicals
self._X_index = rxnset._X_index[index]
self._index = index
self._parent = rxnset
def reset_chemicals(self, chemicals):
if self._chemicals is chemicals: return
parent = self._parent
parent.reset_chemicals(chemicals)
index = self._index
self._stoichiometry = parent._stoichiometry[index]
self._X_index = parent._X_index[index]
self._chemicals = chemicals
@property
def basis(self):
"""{'mol', 'wt'} Basis of reaction"""
return self._basis
@basis.setter
def basis(self, basis):
raise TypeError('cannot change basis of reaction item')
def copy(self, basis=None):
"""Return copy of Reaction object."""
copy = Reaction.__new__(Reaction)
copy._basis = self._basis
copy._phases = self._phases
copy._stoichiometry = self._stoichiometry.copy()
copy._X_index = self._X_index
copy._chemicals = self._chemicals
copy._X = self.X
if basis: set_reaction_basis(copy, basis)
return copy
@property
def X(self):
"""[float] Reaction converion as a fraction."""
return self._X[self._index]
@X.setter
def X(self, X):
self._X[self._index] = X
class ReactionSet:
"""
Create a ReactionSet that contains all reactions and conversions as an array.
Parameters
----------
reactions : Iterable[Reaction]
"""
__slots__ = (*Reaction.__slots__, '_parent_index')
copy = Reaction.copy
phases = MaterialIndexer.phases
_get_stoichiometry_by_mol = Reaction._get_stoichiometry_by_mol
_get_stoichiometry_by_wt = Reaction._get_stoichiometry_by_wt
force_reaction = Reaction.force_reaction
adiabatic_reaction = Reaction.adiabatic_reaction
__call__ = Reaction.__call__
def __init__(self, reactions):
if not reactions: raise ValueError('no reactions passed')
phases_set = set([i.phases for i in reactions])
if len(phases_set) > 1:
raise ValueError('all reactions must implement the same phases')
self._phases, = phases_set
chemicals = {i.chemicals for i in reactions}
try: self._chemicals, = chemicals
except: raise ValueError('all reactions must have the same chemicals')
basis = {i.basis for i in reactions}
try: self._basis, = basis
except: raise ValueError('all reactions must have the same basis')
self._stoichiometry = np.array([i._stoichiometry for i in reactions])
self._X = np.array([i.X for i in reactions])
X_index = [i._X_index for i in reactions]
self._X_index = tuple(X_index) if self._phases else np.array(X_index)
def __getitem__(self, index):
stoichiometry = self._stoichiometry[index]
if (self.phases and stoichiometry.ndim == 2) or stoichiometry.ndim == 1:
return ReactionItem(self, index)
else:
rxnset = self.__new__(self.__class__)
rxnset._basis = self._basis
rxnset._phases = self._phases
rxnset._stoichiometry = stoichiometry
rxnset._X = self._X[index]
rxnset._X_index = self._X_index[index]
rxnset._chemicals = self._chemicals
rxnset._parent_index = (self, index)
return rxnset
def reset_chemicals(self, chemicals):
if chemicals is self._chemicals: return
if hasattr(self, '_parent_index'):
parent, index = self._parent_index
parent.reset_chemicals(chemicals)
self._stoichiometry = parent._stoichiometry[index]
self._X_index = parent._X_index[index]
self._chemicals = parent._chemicals
return
phases = self.phases
stoichiometry = self._stoichiometry
reactants = self.reactants
if phases:
A, B, C = stoichiometry.shape
new_stoichiometry = np.zeros([A, B, chemicals.size])
IDs = self._chemicals.IDs
for i in range(A):
for j in range(B):
for k in range(C):
value = stoichiometry[i, j, k]
if value: new_stoichiometry[i, j, chemicals.index(IDs[k])] = value
X_index = tuple([(phases.index(i), chemicals.index(j)) for i, j in reactants])
else:
A, B = stoichiometry.shape
new_stoichiometry = np.zeros([A, chemicals.size])
IDs = self._chemicals.IDs
for i in range(A):
for j in range(B):
value = stoichiometry[i, j]
if value: new_stoichiometry[i, chemicals.index(IDs[j])] = value
X_index = tuple([chemicals.index(i) for i in reactants])
self._chemicals = chemicals
self._stoichiometry = new_stoichiometry
self._X_index = X_index
@property
def reaction_chemicals(self):
"""Return all chemicals involved in the reaction."""
return [i for i,j in zip(self._chemicals, self._stoichiometry.any(axis=0)) if j]
@property
def basis(self):
"""{'mol', 'wt'} Basis of reaction"""
return self._basis
@basis.setter
def basis(self, basis):
raise TypeError('cannot change basis of reaction set')
@property
def X(self):
"""[1d array] Reaction converions."""
return self._X
@X.setter
def X(self, X):
"""[1d array] Reaction converions."""
if X is not self._X: self._X[:] = X
@property
def chemicals(self):
| |
0.0000000, 0.0000000, 0.0000000, 0.0000000, 0.0000000, 0.0000000,
0.0000000, 0.0000000, 0.0000000, 0.0000000, 0.0000000, 0.0000000,
0.0000000, 0.0000000, 0.0000000, 0.0000000, 0.0000000, 0.0000000,
0.0000000, 0.0000000, 0.0000000, 0.0000000, 0.0000000, 0.0000000,
0.0000000, 0.0000000, 0.0000000, 0.0000000, 0.0000000, 0.0000000,
0.0000000, 0.0000000, 0.0000000, 0.0000000, 0.0000000, 0.0000000,
0.0000000, 0.0000000, 0.0000000, 0.0000000, 0.0000000, 0.0000000,
0.0000000, 0.0000000, 0.0000000, 0.0000000, 0.0000000, 0.0000000,
0.0000000, 0.0000000, 0.0000000, 0.0000000, 0.0000000, 0.0000000,
0.0000000, 0.0000000, 0.0000000, 0.0000000, 0.0000000, 0.0000000,
0.0000000, 0.0000000, 0.0000000, 0.0000000, 0.0000000, 0.0000000,
0.0000000, 0.0000000, 0.0000000, 0.0000000, 0.0000000, 0.0000000,
0.0000000, 0.0000000, 0.0000000, 0.0000000, 0.0000000, 0.0000000,
0.0000000, 0.0000000, 0.0000000, 0.0000000, 0.0000000, 0.0195312,
0.0390625, 0.0585938, 0.0781250, 0.0976562, 0.1171875, 0.1367188,
0.1562500, 0.1757812, 0.1953125, 0.2148438, 0.2343750, 0.2539062,
0.2734375, 0.2929688, 0.3125000, 0.3281250, 0.3437500, 0.3593750,
0.3750000, 0.3945312, 0.4140625, 0.4335938, 0.4531250, 0.4726562,
0.4921875, 0.5117188, 0.5312500, 0.5507812, 0.5703125, 0.5898438,
0.6093750, 0.6210938, 0.6328125, 0.6445312, 0.6562500, 0.6679688,
0.6796875, 0.6914062, 0.7031250, 0.7148438, 0.7265625, 0.7382812,
0.7500000, 0.7617188, 0.7734375, 0.7851562, 0.7968750, 0.8085938,
0.8203125, 0.8320312, 0.8437500, 0.8554688, 0.8671875, 0.8789062,
0.8906250, 0.9023438, 0.9140625, 0.9257812, 0.9375000, 0.9492188,
0.9609375, 0.9726562, 0.9843750, 0.9843750, 0.9843750, 0.9843750,
0.9843750, 0.9843750, 0.9843750, 0.9843750, 0.9843750, 0.9843750,
0.9843750, 0.9843750, 0.9843750, 0.9843750, 0.9843750, 0.9843750,
0.9843750, 0.9843750, 0.9843750, 0.9843750, 0.9843750, 0.9843750,
0.9843750, 0.9843750, 0.9843750, 0.9843750, 0.9843750, 0.9843750,
0.9843750, 0.9843750, 0.9843750, 0.9843750, 0.9843750, 0.9843750,
0.9843750, 0.9843750, 0.9843750, 0.9843750, 0.9843750, 0.9843750,
0.9843750, 0.9843750, 0.9843750, 0.9843750, 0.9843750, 0.9843750,
0.9843750, 0.9843750, 0.9843750, 0.9843750, 0.9843750, 0.9843750,
0.9843750, 0.9843750, 0.9843750, 0.9843750, 0.9843750, 0.9843750,
0.9843750, 0.9843750, 0.9843750, 0.9843750, 0.9843750, 0.9843750,
0.9843750, 0.9882812, 0.9921875, 0.9960938]),
array([ 0.0000000, 0.0000000, 0.0000000, 0.0000000, 0.0546875, 0.1093750,
0.1679688, 0.2226562, 0.2812500, 0.3164062, 0.3515625, 0.3867188,
0.4218750, 0.4570312, 0.4921875, 0.5273438, 0.5625000, 0.5976562,
0.6328125, 0.6679688, 0.7031250, 0.6992188, 0.6953125, 0.6914062,
0.6875000, 0.6835938, 0.6796875, 0.6757812, 0.6718750, 0.6679688,
0.6640625, 0.6601562, 0.6562500, 0.6523438, 0.6484375, 0.6445312,
0.6406250, 0.6367188, 0.6328125, 0.6289062, 0.6250000, 0.6210938,
0.6171875, 0.6132812, 0.6093750, 0.6015625, 0.5937500, 0.5859375,
0.5781250, 0.5742188, 0.5703125, 0.5664062, 0.5625000, 0.5585938,
0.5546875, 0.5507812, 0.5468750, 0.5429688, 0.5390625, 0.5351562,
0.5312500, 0.5273438, 0.5234375, 0.5195312, 0.5156250, 0.5117188,
0.5078125, 0.5039062, 0.5000000, 0.4921875, 0.4843750, 0.4765625,
0.4687500, 0.4648438, 0.4609375, 0.4570312, 0.4531250, 0.4492188,
0.4453125, 0.4414062, 0.4375000, 0.4335938, 0.4296875, 0.4257812,
0.4218750, 0.4179688, 0.4140625, 0.4101562, 0.4062500, 0.4023438,
0.3984375, 0.3945312, 0.3906250, 0.3867188, 0.3828125, 0.3789062,
0.3750000, 0.3671875, 0.3593750, 0.3515625, 0.3437500, 0.3398438,
0.3359375, 0.3320312, 0.3281250, 0.3242188, 0.3203125, 0.3164062,
0.3125000, 0.3085938, 0.3046875, 0.3007812, 0.2968750, 0.2929688,
0.2890625, 0.2851562, 0.2812500, 0.2773438, 0.2734375, 0.2695312,
0.2656250, 0.2578125, 0.2500000, 0.2421875, 0.2343750, 0.2304688,
0.2265625, 0.2226562, 0.2187500, 0.2109375, 0.2031250, 0.1953125,
0.1875000, 0.1835938, 0.1796875, 0.1757812, 0.1718750, 0.1640625,
0.1562500, 0.1484375, 0.1406250, 0.1328125, 0.1250000, 0.1171875,
0.1093750, 0.1015625, 0.0937500, 0.0859375, 0.0781250, 0.0742188,
0.0703125, 0.0664062, 0.0625000, 0.0546875, 0.0468750, 0.0390625,
0.0312500, 0.0234375, 0.0156250, 0.0078125, 0.0000000, 0.0000000,
0.0000000, 0.0000000, 0.0000000, 0.0000000, 0.0000000, 0.0000000,
0.0000000, 0.0000000, 0.0000000, 0.0000000, 0.0000000, 0.0000000,
0.0000000, 0.0000000, 0.0000000, 0.0000000, 0.0000000, 0.0000000,
0.0000000, 0.0000000, 0.0000000, 0.0000000, 0.0000000, 0.0000000,
0.0000000, 0.0000000, 0.0000000, 0.0156250, 0.0312500, 0.0468750,
0.0625000, 0.0781250, 0.0937500, 0.1093750, 0.1250000, 0.1406250,
0.1562500, 0.1718750, 0.1875000, 0.2031250, 0.2187500, 0.2343750,
0.2500000, 0.2656250, 0.2812500, 0.2968750, 0.3125000, 0.3281250,
0.3437500, 0.3593750, 0.3750000, 0.3906250, 0.4062500, 0.4218750,
0.4375000, 0.4531250, 0.4687500, 0.4843750, 0.5000000, 0.5156250,
0.5312500, 0.5468750, 0.5625000, 0.5742188, 0.5859375, 0.5976562,
0.6093750, 0.6250000, 0.6406250, 0.6562500, 0.6718750, 0.6875000,
0.7031250, 0.7187500, 0.7343750, 0.7500000, 0.7656250, 0.7812500,
0.7968750, 0.8125000, 0.8281250, 0.8437500, 0.8593750, 0.8750000,
0.8906250, 0.9062500, 0.9218750, 0.9375000, 0.9531250, 0.9687500,
0.9843750, 0.9882812, 0.9921875, 0.9960938]),
array([ 0.0000000, 0.0000000, 0.0000000, 0.0000000, 0.0000000, 0.0000000,
0.0000000, 0.0000000, 0.0000000, 0.0000000, 0.0000000, 0.0000000,
0.0000000, 0.0000000, 0.0000000, 0.0000000, 0.0000000, 0.0000000,
0.0000000, 0.0000000, 0.0000000, 0.0039062, 0.0117188, 0.0156250,
0.0234375, 0.0312500, 0.0390625, 0.0468750, 0.0546875, 0.0585938,
0.0664062, 0.0742188, 0.0820312, 0.0898438, 0.0976562, 0.1054688,
0.1132812, 0.1210938, 0.1289062, 0.1367188, 0.1445312, 0.1484375,
0.1562500, 0.1640625, 0.1718750, 0.1796875, 0.1875000, 0.1953125,
0.2070312, 0.2109375, 0.2187500, 0.2265625, 0.2343750, 0.2421875,
0.2500000, 0.2578125, 0.2656250, 0.2695312, 0.2773438, 0.2851562,
0.2929688, 0.3007812, 0.3085938, 0.3164062, 0.3242188, 0.3281250,
0.3359375, 0.3437500, 0.3515625, 0.3593750, 0.3671875, 0.3750000,
0.3867188, 0.3906250, 0.3984375, 0.4062500, 0.4140625, 0.4218750,
0.4296875, 0.4375000, 0.4453125, 0.4531250, 0.4609375, 0.4687500,
0.4765625, 0.4804688, 0.4882812, 0.4960938, 0.5039062, 0.5117188,
0.5195312, 0.5273438, 0.5351562, 0.5390625, 0.5468750, 0.5546875,
0.5625000, 0.5703125, 0.5781250, 0.5859375, 0.5976562, 0.6015625,
0.6093750, 0.6171875, 0.6250000, 0.6328125, 0.6406250, 0.6484375,
0.6562500, 0.6640625, 0.6718750, 0.6796875, 0.6875000, 0.6914062,
0.6992188, 0.7070312, 0.7148438, 0.7226562, 0.7304688, 0.7382812,
0.7460938, 0.7539062, 0.7617188, 0.7695312, 0.7773438, 0.7773438,
0.7734375, 0.7695312, 0.7656250, 0.7656250, 0.7656250, 0.7656250,
0.7617188, 0.7617188, 0.7578125, 0.7539062, 0.7500000, 0.7500000,
0.7460938, 0.7460938, 0.7421875, 0.7421875, 0.7421875, 0.7421875,
0.7421875, 0.7421875, 0.7421875, 0.7421875, 0.7382812, 0.7382812,
0.7343750, 0.7304688, 0.7265625, 0.7265625, 0.7226562, 0.7226562,
0.7187500, 0.7187500, 0.7226562, 0.7265625, 0.7304688, 0.7304688,
0.7304688, 0.7304688, 0.7304688, 0.7304688, 0.7304688, 0.7304688,
0.7343750, 0.7343750, 0.7343750, 0.7343750, 0.7382812, 0.7382812,
0.7382812, 0.7382812, 0.7382812, 0.7382812, 0.7382812, 0.7382812,
0.7421875, 0.7421875, 0.7421875, 0.7421875, 0.7460938, 0.7460938,
0.7460938, 0.7460938, 0.7460938, 0.7500000, 0.7539062, 0.7578125,
0.7617188, 0.7656250, 0.7695312, 0.7734375, 0.7773438, 0.7812500,
0.7851562, 0.7890625, 0.7929688, 0.7929688, 0.7968750, 0.8007812,
0.8046875, 0.8085938, 0.8125000, 0.8164062, 0.8203125, 0.8242188,
0.8281250, 0.8320312, 0.8359375, 0.8359375, 0.8398438, 0.8437500,
0.8476562, 0.8515625, 0.8554688, 0.8593750, 0.8632812, 0.8671875,
0.8710938, 0.8750000, 0.8789062, 0.8828125, 0.8867188, 0.8906250,
0.8945312, 0.8984375, 0.9023438, 0.9062500, 0.9101562, 0.9140625,
0.9179688, 0.9218750, 0.9257812, 0.9257812, 0.9296875, 0.9335938,
0.9375000, 0.9414062, 0.9453125, 0.9492188, 0.9531250, 0.9570312,
0.9609375, 0.9648438, 0.9687500, 0.9726562, 0.9765625, 0.9804688,
0.9843750, 0.9882812, 0.9921875, 0.9960938]),
array([ 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0,
1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0,
1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0,
1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0,
1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0,
1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0,
1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0,
1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0,
1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0,
1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0,
1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0,
1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0,
1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0,
1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0,
1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0,
1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0,
1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0,
1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0,
1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0,
1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0,
1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0,
1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0,
1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0,
1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0,
1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0,
1.0, 1.0, 1.0, 1.0, 1.0, 1.0]),
)
### IDL colormap 11 :: BLUE-RED ###
color_map_luts['idl11'] = \
(
array([ 0.0000000, 0.0000000, 0.0000000, 0.0000000, 0.0000000, 0.0000000,
0.0000000, 0.0000000, 0.0000000, 0.0000000, 0.0000000, 0.0000000,
0.0000000, 0.0000000, 0.0000000, 0.0000000, 0.0000000, 0.0000000,
0.0000000, 0.0000000, 0.0000000, 0.0000000, 0.0000000, 0.0000000,
0.0000000, 0.0000000, 0.0000000, 0.0000000, 0.0000000, 0.0000000,
0.0000000, 0.0000000, 0.0000000, 0.0000000, 0.0000000, 0.0000000,
0.0000000, 0.0000000, 0.0000000, 0.0000000, 0.0000000, 0.0000000,
0.0000000, 0.0000000, 0.0000000, 0.0000000, 0.0000000, 0.0000000,
0.0000000, 0.0000000, 0.0000000, 0.0000000, 0.0000000, 0.0000000,
0.0000000, 0.0000000, 0.0000000, 0.0000000, 0.0000000, 0.0000000,
0.0000000, 0.0000000, 0.0000000, 0.0000000, 0.0000000, 0.0000000,
0.0000000, 0.0000000, 0.0000000, 0.0000000, 0.0000000, 0.0000000,
0.0000000, 0.0000000, 0.0000000, 0.0000000, 0.0000000, 0.0000000,
0.0000000, 0.0000000, 0.0000000, 0.0000000, 0.0000000, 0.0000000,
0.0000000, 0.0000000, 0.0000000, 0.0000000, 0.0000000, 0.0000000,
0.0000000, 0.0000000, 0.0000000, 0.0000000, 0.0000000, 0.0000000,
0.0000000, 0.0000000, 0.0000000, 0.0000000, 0.0000000, 0.0000000,
0.0000000, 0.0000000, 0.0000000, 0.0000000, 0.0000000, 0.0000000,
0.0000000, 0.0000000, 0.0000000, 0.0000000, 0.0000000, 0.0000000,
0.0000000, 0.0000000, 0.0000000, 0.0000000, 0.0000000, 0.0000000,
0.0000000, 0.0000000, 0.0000000, 0.0000000, 0.0000000, 0.0000000,
0.0000000, 0.0000000, 0.0000000, 0.0156250, 0.0312500, 0.0468750,
0.0625000, 0.0781250, 0.0937500, 0.1093750, 0.1250000, 0.1406250,
0.1562500, 0.1718750, 0.1875000, 0.2031250, 0.2187500, 0.2343750,
0.2500000, 0.2656250, 0.2812500, 0.2968750, 0.3125000, 0.3320312,
0.3476562, 0.3632812, 0.3789062, 0.3945312, 0.4101562, 0.4257812,
0.4414062, 0.4570312, 0.4726562, 0.4882812, 0.5039062, 0.5195312,
0.5351562, 0.5507812, 0.5664062, 0.5820312, 0.5976562, 0.6132812,
0.6289062, 0.6445312, 0.6640625, 0.6796875, 0.6953125, 0.7109375,
0.7265625, 0.7421875, 0.7578125, 0.7734375, 0.7890625, 0.8046875,
0.8203125, 0.8359375, 0.8515625, 0.8671875, 0.8828125, 0.8984375,
0.9140625, 0.9296875, 0.9453125, 0.9609375, 0.9765625, 0.9960938,
0.9960938, 0.9960938, 0.9960938, 0.9960938, 0.9960938, 0.9960938,
0.9960938, 0.9960938, 0.9960938, 0.9960938, 0.9960938, 0.9960938,
0.9960938, 0.9960938, 0.9960938, 0.9960938, 0.9960938, 0.9960938,
0.9960938, 0.9960938, 0.9960938, 0.9960938, 0.9960938, 0.9960938,
0.9960938, 0.9960938, 0.9960938, 0.9960938, 0.9960938, 0.9960938,
0.9960938, 0.9960938, 0.9960938, 0.9960938, 0.9960938, 0.9960938,
0.9960938, 0.9960938, 0.9960938, 0.9960938, 0.9960938, 0.9960938,
0.9960938, 0.9960938, 0.9960938, 0.9960938, 0.9960938, 0.9960938,
0.9960938, 0.9960938, 0.9960938, 0.9960938, 0.9960938, 0.9960938,
0.9960938, 0.9960938, 0.9960938, 0.9960938, 0.9960938, 0.9960938,
0.9960938, 0.9960938, 0.9960938, 0.9960938]),
array([ 0.0000000, 0.0039062, 0.0078125, 0.0117188, 0.0156250, 0.0312500,
0.0468750, 0.0625000, 0.0820312, 0.0976562, 0.1132812, 0.1289062,
0.1484375, 0.1640625, 0.1796875, 0.1953125, 0.2148438, 0.2304688,
0.2460938, 0.2617188, 0.2812500, 0.2968750, 0.3125000, 0.3281250,
0.3476562, 0.3632812, 0.3789062, 0.3945312, 0.4140625, 0.4296875,
0.4453125, 0.4609375, | |
import json
import logging
import types
import uuid
from StringIO import StringIO
import validator
from validator import constants, unicodehelper
from validator.constants import SIGNING_SEVERITIES
from validator.outputhandlers.shellcolors import OutputHandler
log = logging.getLogger('amo.validator')
def maybe_tuple(value):
"""Return `value` as a tuple. If it is already a tuple, return it
unchanged. Otherwise return a 1-element tuple containing `value`."""
if isinstance(value, tuple):
return value
return (value,)
def merge_description(base, description):
"""Merge a description with a base message.
`description` may be one of:
* A string, which is set as the message's "description" property.
* A dict, which is merged into the base message.
If `description` is a dict, and contains an `err_id` property which is
a string, that string will be added as the third element of the base
message's `err_id` property.
"""
msg = base.copy()
if isinstance(description, dict):
msg.update(description)
elif description not in (True, False, None):
msg['description'] = description
# If `err_id` is a string, add it as the third element of the
# previous error id.
if isinstance(msg.get('err_id'), basestring):
assert isinstance(base.get('err_id'), tuple), (
'No valid base error ID to append string to.')
msg['err_id'] = base['err_id'][:2] + (msg['err_id'],)
return msg
def format_message(message, **kw):
"""Format the text properties of an ErrorBundle message dict with the given
keyword args, and the string `.format` method.
The following keys will be formatted, if present:
* 'message'
* 'error'
* 'warning'
* 'notice'
* 'description'
* 'signing_help'
Any of these properties may be either tuples or strings. If they are
tuples, each element will be formatted, and must be a string.
Example:
`format_message({'warning': 'Access to {thing} is deprecated',
'description': ('Please do not ever use {thing}.',
'It is simply uncool.')},
thing='eval')`
Becomes:
`{'warning': 'Access to eval is deprecated',
'description': ('Please do not ever use eval.',
'It is simply uncool.')}`
"""
for key in ('message', 'error', 'warning', 'notice', 'description',
'signing_help'):
if key in message:
if isinstance(message[key], tuple):
message[key] = tuple(string.format(**kw)
for string in message[key])
else:
message[key] = message[key].format(**kw)
class ErrorBundle(object):
"""This class does all sorts of cool things. It gets passed around
from test to test and collects up all the errors like the candy man
'separating the sorrow and collecting up all the cream.' It's
borderline magical.
Keyword Arguments
**determined**
Whether the validator should continue after a tier fails
**listed**
True if the add-on is destined for AMO, false if not
**instant**
Who knows what this does
**overrides**
dict of install.rdf values to override. Possible keys:
targetapp_minVersion, targetapp_maxVersion
**for_appversions**
A dict of app GUIDs referencing lists of versions. Determines which
version-dependant tests should be run.
"""
def __init__(self, determined=True, listed=True, instant=False,
overrides=None, for_appversions=None, debug=0):
self.handler = None
self.debug_level = debug
if debug == 0 and constants.IN_TESTS:
self.debug_level = 1
self.errors = []
self.warnings = []
self.notices = []
self.message_tree = {}
self.compat_summary = {'errors': 0,
'warnings': 0,
'notices': 0}
self.signing_summary = {s: 0 for s in SIGNING_SEVERITIES}
self.ending_tier = 1
self.tier = 1
self.subpackages = []
self.package_stack = []
self.detected_type = 0
self.unfinished = False
# TODO: Break off into resource helper
self.resources = {}
self.pushable_resources = {}
self.final_context = None
self.metadata = {'requires_chrome': False, 'listed': listed,
'validator_version': validator.__version__}
if listed:
self.resources['listed'] = True
self.instant = instant
self.determined = determined
self.version_requirements = None
self.overrides = overrides or None
self.supported_versions = self.for_appversions = for_appversions
def _message(type_, message_type):
def wrap(self, *args, **kwargs):
message = {
'description': (),
'file': '',
'editors_only': False,
}
if 'location' in kwargs:
loc = kwargs['location']
message.update({'file': loc.file,
'line': loc.line,
'column': loc.column})
# Has to go.
if 'err_id' in kwargs:
kwargs['id'] = kwargs['err_id']
if 'filename' in kwargs:
kwargs['file'] = kwargs['filename']
if message_type in kwargs:
kwargs['message'] = kwargs[message_type]
positional_args = ('id',
'message',
'description',
'file',
'line',
'column')
keys = positional_args + (
'tier',
'for_appversions',
'compatibility_type',
'editors_only',
'context_data',
)
# This is absurd.
# Copy positional args into the kwargs dict, if they're missing.
for key, arg in zip(positional_args, args):
assert key not in kwargs
kwargs[key] = arg
for key in keys:
message.setdefault(key, None)
if key in kwargs:
message[key] = kwargs[key]
if 'signing_severity' in kwargs:
severity = kwargs['signing_severity']
assert severity in SIGNING_SEVERITIES
self.signing_summary[severity] += 1
message['signing_severity'] = severity
if 'signing_help' in kwargs:
message['signing_help'] = kwargs['signing_help']
self._save_message(getattr(self, type_), type_, message,
context=kwargs.get('context'))
return message
wrap.__name__ = message_type
return wrap
# And then all the real functions. Ahh, how clean!
error = _message('errors', 'error')
warning = _message('warnings', 'warning')
notice = _message('notices', 'notice')
def report(self, base_message, *messages):
"""Create a message from the given base message, updating it with
properties from each message in a non-keyword argument, and report
it to the correct reporting function.
The correct reporting function is determined by the presence of either
a "error", "warning", or "notice" key in the test properties, as
expected by the so-named error bundle method.
Example:
`report({'err_id': ('javascript', 'dangerous_global', 'generic'),
'warning': 'Access to dangerous global',
'description': 'Evil. *hiss*'},
{'err_id': 'eval',
'warning': 'Do not use eval. Ugh.'})`
Reports a new warning:
`warning(err_id=('javascript', 'dangerous_global', 'eval'),
warning='Do not use eval. Ugh.',
description='Evil. *hiss*')`
"""
# Merge additional message properties into the base message.
message = reduce(merge_description, messages, base_message)
# Get the message type based on which message key is included in the
# properties.
TYPES = 'error', 'warning', 'notice'
message_type = next(type_ for type_ in TYPES if type_ in message)
return getattr(self, message_type)(**message)
def system_error(self, msg_id=None, message=None, description=None,
validation_timeout=False, exc_info=None, **kw):
"""Add an error message for an unexpected exception in validator
code, and move it to the front of the error message list. If
`exc_info` is supplied, the error will be logged.
If the error is a validation timeout, it is re-raised unless
`msg_id` is "validation_timeout"."""
if constants.IN_TESTS:
# Exceptions that happen during tests should generally end the
# test prematurely rather than just generating a message.
raise exc_info[0], exc_info[1], exc_info[2]
if (isinstance(exc_info[1], validator.ValidationTimeout) and
msg_id != 'validation_timeout'):
# These should always propagate to the top-level exception
# handler, and be reported only once.
raise exc_info[0], exc_info[1], exc_info[2]
log.error('Unexpected error during validation: %s: %s'
% (exc_info[0].__name__, exc_info[1]),
exc_info=exc_info)
full_id = ('validator', 'unexpected_exception')
if msg_id:
full_id += (msg_id,)
self.error(full_id,
message or 'An unexpected error has occurred.',
description or
('Validation was unable to complete successfully due '
'to an unexpected error.',
'The error has been logged, but please consider '
'filing an issue report here: '
'http://mzl.la/1DG0sFd'),
tier=1, **kw)
# Move the error message to the beginning of the list.
self.errors.insert(0, self.errors.pop())
def drop_message(self, message):
"""Drop the given message object from the appropriate message list.
Returns True if the message was found, otherwise False."""
for type_ in 'errors', 'warnings', 'notices':
list_ = getattr(self, type_)
if message in list_:
list_.remove(message)
if 'signing_severity' in message:
self.signing_summary[message['signing_severity']] -= 1
return True
return False
def set_tier(self, tier):
'Updates the tier and ending tier'
self.tier = tier
if tier > self.ending_tier:
self.ending_tier = tier
@property
def message_count(self):
return len(self.errors) + len(self.warnings) + len(self.notices)
def _save_message(self, stack, type_, message, context=None):
"""Store a message in the appropriate message stack."""
uid = uuid.uuid4().hex
message['uid'] = uid
# Get the context for the message (if there's a context available)
if context is not None:
if isinstance(context, tuple):
message['context'] = context
else:
message['context'] = (
context.get_context(line=message['line'],
column=message['column']))
else:
message['context'] = None
if self.package_stack:
if not isinstance(message['file'], list):
message['file'] = [message['file']]
message['file'] = self.package_stack + message['file']
# Test that if for_appversions is set that we're only applying to
# supported add-ons. THIS IS THE LAST FILTER BEFORE THE MESSAGE IS
# ADDED TO THE STACK!
if message['for_appversions']:
if not self.supports_version(message['for_appversions']):
if self.instant:
print '(Instant error discarded)'
self._print_message(type_ + ': ', message, verbose=True)
return
elif self.version_requirements:
# If there was no for_appversions but there were version
# requirements detailed in the decorator, use the ones from the
# decorator.
message['for_appversions'] = self.version_requirements
# Save the message to the stack.
stack.append(message)
# Mark the tier that the error occurred at.
if message['tier'] is None:
message['tier'] = self.tier
# Build out the compatibility summary if possible.
if message['compatibility_type']:
self.compat_summary['%ss' % message['compatibility_type']] += 1
# Build out the message tree entry.
if message['id']:
| |
"""
.. testsetup:: min,sum,value-count,top-hits,stats,percentiles,percentile-ranks
from __future__ import print_function
from mock import Mock
from elasticmagic import agg, Cluster, SearchQuery, DynamicDocument
from elasticmagic.compiler import DefaultCompiler
class SaleDocument(DynamicDocument):
__doc_type__ = 'sale'
class GradeDocument(DynamicDocument):
__doc_type__ = 'sale'
class PageLoadDoc(DynamicDocument):
__doc_type__ = 'load_page'
def sq(aggs_raw_result):
cluster = Cluster(Mock(
search=Mock(
return_value={
'hits': {'max_score': 1, 'total': 1, 'hits': []},
'aggregations': aggs_raw_result})),
compiler=DefaultCompiler)
return cluster.search_query()
"""
from itertools import chain
from .document import DynamicDocument
from .expression import ParamsExpression, Params
from .compat import force_unicode
from .types import instantiate, Type
from .util import _with_clone, cached_property, maybe_float, merge_params
class AggExpression(ParamsExpression):
__visit_name__ = 'agg'
result_cls = None
def clone(self):
return self.__class__(**self.params)
def build_agg_result(
self, raw_data, doc_cls_map=None, mapper_registry=None,
):
raise NotImplementedError()
class AggResult(object):
def __init__(self, agg_expr):
self.expr = agg_expr
class MetricsAgg(AggExpression):
def build_agg_result(
self, raw_data, doc_cls_map=None, mapper_registry=None,
):
return self.result_cls(self, raw_data)
class BucketAgg(AggExpression):
__visit_name__ = 'bucket_agg'
def __init__(self, aggs=None, **kwargs):
super(BucketAgg, self).__init__(**kwargs)
self._aggregations = Params(
aggs or {}, **kwargs.pop('aggregations', {})
)
def clone(self):
return self.__class__(aggs=self._aggregations, **self.params)
@_with_clone
def aggregations(self, *args, **kwargs):
if len(args) == 1 and args[0] is None:
self._aggregations = Params()
else:
self._aggregations = merge_params(self._aggregations, args, kwargs)
aggs = aggregations
def build_agg_result(
self, raw_data, doc_cls_map=None, mapper_registry=None,
):
return self.result_cls(
self, raw_data,
doc_cls_map=doc_cls_map,
mapper_registry=mapper_registry,
)
class SingleValueMetricsAggResult(AggResult):
def __init__(self, agg_expr, raw_data):
super(SingleValueMetricsAggResult, self).__init__(agg_expr)
# TODO: Do we really need to coerce to float?
self.value = maybe_float(raw_data['value'])
self.value_as_string = raw_data.get(
'value_as_string', force_unicode(raw_data['value'])
)
class SingleValueMetricsAgg(MetricsAgg):
result_cls = SingleValueMetricsAggResult
def __init__(self, field=None, script=None, **kwargs):
super(SingleValueMetricsAgg, self).__init__(
field=field, script=script, **kwargs
)
class MultiValueMetricsAggResult(AggResult):
def __init__(self, agg_expr, raw_data):
super(MultiValueMetricsAggResult, self).__init__(agg_expr)
if 'values' in raw_data:
self.values = raw_data['values']
else:
self.values = raw_data
class MultiValueMetricsAgg(MetricsAgg):
result_cls = MultiValueMetricsAggResult
def __init__(self, field=None, script=None, **kwargs):
super(MultiValueMetricsAgg, self).__init__(
field=field, script=script, **kwargs
)
class Min(SingleValueMetricsAgg):
"""A single-value metric aggregation that returns the minimum value among
all extracted numeric values. See
`min agg <https://www.elastic.co/guide/en/elasticsearch/reference/current/search-aggregations-metrics-min-aggregation.html>`_.
.. testsetup:: min
search_query = sq({'min_price': {'value': 10.0}})
.. testcode:: min
search_query = search_query.aggs({
'min_price': agg.Min(SaleDocument.price)
})
assert search_query.to_dict() == {
'aggregations': {
'min_price': {'min': {'field': 'price'}}}}
min_price_agg = search_query.get_result().get_aggregation('min_price')
print(min_price_agg.value)
print(min_price_agg.value_as_string)
.. testoutput:: min
10.0
10.0
""" # noqa:E501
__agg_name__ = 'min'
class Max(SingleValueMetricsAgg):
"""A single-value metric aggregation that returns the maximum value among
all extracted numeric values. See
`max agg <https://www.elastic.co/guide/en/elasticsearch/reference/current/search-aggregations-metrics-max-aggregation.html>`_.
""" # noqa:E501
__agg_name__ = 'max'
class Sum(SingleValueMetricsAgg):
"""A single-value metric aggregation that sums up all extracted numeric
values. See
`sum agg <https://www.elastic.co/guide/en/elasticsearch/reference/current/search-aggregations-metrics-sum-aggregation.html>`_.
.. testsetup:: sum
search_query = sq({'prices': {'value': 450.0}})
.. testcode:: sum
search_query = search_query.aggs({
'prices': agg.Sum(SaleDocument.price)
})
assert search_query.to_dict() == {
'aggregations': {
'prices': {'sum': {'field': 'price'}}}}
prices_agg = search_query.get_result().get_aggregation('prices')
print(prices_agg.value)
print(prices_agg.value_as_string)
.. testoutput:: sum
450.0
450.0
""" # noqa:E501
__agg_name__ = 'sum'
class Avg(SingleValueMetricsAgg):
"""A single-value metric aggregation that computes average of all extracted
numeric values. See
`avg agg <https://www.elastic.co/guide/en/elasticsearch/reference/current/search-aggregations-metrics-avg-aggregation.html>`_.
""" # noqa:E501
__agg_name__ = 'avg'
class ValueCount(SingleValueMetricsAgg):
"""A single-value metric aggregation that counts the number of all extracted
values. See
`value_count agg <https://www.elastic.co/guide/en/elasticsearch/reference/current/search-aggregations-metrics-valuecount-aggregation.html>`_.
.. testsetup:: value-count
search_query = sq({'types_count': {'value': 7}})
.. testcode:: value-count
from elasticmagic import Script
search_query = search_query.aggs({
'types_count': agg.ValueCount(script=Script(
inline='doc[params.field].value',
params={'field': SaleDocument.type}
))
})
assert search_query.to_dict() == {
'aggregations': {
'types_count': {
'value_count': {
'script': {
'inline': 'doc[params.field].value',
'params': {'field': 'type'}}}}}}
print(search_query.get_result().get_aggregation('types_count').value)
.. testoutput:: value-count
7.0
""" # noqa:E501
__agg_name__ = 'value_count'
class TopHitsResult(AggResult):
def __init__(
self, agg_expr, raw_data, doc_cls_map,
mapper_registry, instance_mapper,
):
super(TopHitsResult, self).__init__(agg_expr)
hits_data = raw_data['hits']
self.total = hits_data['total']
self.max_score = hits_data['max_score']
self.hits = []
for hit in hits_data['hits']:
doc_cls = doc_cls_map.get(hit['_type'], DynamicDocument)
self.hits.append(doc_cls(_hit=hit, _result=self))
if isinstance(instance_mapper, dict):
self._instance_mappers = instance_mapper
else:
self._instance_mappers = {
doc_cls: instance_mapper for doc_cls in doc_cls_map.values()
}
if mapper_registry is None:
self._mapper_registry = {}
else:
self._mapper_registry = mapper_registry
if self._instance_mappers:
for instance_mapper in self._instance_mappers.values():
self._mapper_registry \
.setdefault(instance_mapper, []) \
.append(self)
def _populate_instances(self, doc_cls):
instance_mapper = self._instance_mappers.get(doc_cls)
hits = list(chain(
*(
map(
lambda r: filter(
lambda hit: isinstance(hit, doc_cls), r.hits
),
self._mapper_registry.get(instance_mapper, [self])
)
)
))
ids = [hit._id for hit in hits]
instances = instance_mapper(ids) if instance_mapper else {}
for hit in hits:
hit.__dict__['instance'] = instances.get(hit._id)
class TopHits(MetricsAgg):
"""A `top_hits` metric aggregation that groups result set by certain fields
via a bucket aggregator. See
`top_hits agg <https://www.elastic.co/guide/en/elasticsearch/reference/current/search-aggregations-metrics-avg-aggregation.html>`_.
.. testsetup:: top-hits
b1 = {
'key': 'hat',
'doc_count': 3,
'top_sales_hits': {'hits': {'total': 3, 'max_score': None, 'hits': [{
'_index': 'sales', '_type': 'sale', '_id': 'AVnNBmauCQpcRyxw6ChK',
'_source': {'date': '2015/03/01 00:00:00', 'price': 200}
}]}}
}
b2 = {
'key': 't-shirt',
'doc_count': 3,
'top_sales_hits': {'hits': {'total': 3, 'max_score': None, 'hits': [{
'_index': 'sales', '_type': 'sale', '_id': 'AVnNBmauCQpcRyxw6ChL',
'_source': {'date': '2015/03/01 00:00:00', 'price': 175}
}]}}
}
b3 = {
'key': 'bag',
'doc_count': 1,
'top_sales_hits': {'hits': {'total': 1, 'max_score': None, 'hits': [{
'_index': 'sales', '_type': 'sale', '_id': 'AVnNBmatCQpcRyxw6ChH',
'_source': {'date': '2015/01/01 00:00:00', 'price': 150}
}]}}
}
search_query = sq({'top_tags': {'buckets': [b1, b2, b3]}})
.. testcode:: top-hits
search_query = search_query.aggs({
'top_tags': agg.Terms(
SaleDocument.type, size=3,
aggs={'top_sales_hits': agg.TopHits(
size=1,
sort=SaleDocument.date.desc(),
_source={
'includes': [SaleDocument.date, SaleDocument.price]
}
)}
)
})
assert search_query.to_dict() == {
'aggregations': {
'top_tags': {
'terms': {'field': 'type', 'size': 3},
'aggregations': {
'top_sales_hits': {
'top_hits': {
'size': 1,
'sort': {'date': 'desc'},
'_source': {'includes': ['date', 'price']}}}}}}}
top_tags = search_query.get_result().get_aggregation('top_tags')
for tag_bucket in top_tags.buckets:
top_hit = tag_bucket.get_aggregation('top_sales_hits').hits[0]
print(
'{0.key} ({0.doc_count}) - {1.price}'.format(
tag_bucket, top_hit
)
)
.. testoutput:: top-hits
hat (3) - 200
t-shirt (3) - 175
bag (1) - 150
""" # noqa:E501
__agg_name__ = 'top_hits'
result_cls = TopHitsResult
def __init__(
self, size=None, from_=None, sort=None, _source=None,
instance_mapper=None, **kwargs
):
super(TopHits, self).__init__(
size=size, from_=from_, sort=sort, _source=_source, **kwargs
)
self._instance_mapper = instance_mapper
def build_agg_result(
self, raw_data, doc_cls_map=None, mapper_registry=None
):
doc_cls_map = doc_cls_map or {}
return self.result_cls(
self, raw_data, doc_cls_map, mapper_registry, self._instance_mapper
)
class StatsResult(MultiValueMetricsAggResult):
def __init__(self, agg_expr, values):
super(StatsResult, self).__init__(agg_expr, values)
self.count = self.values['count']
self.min = self.values['min']
self.max = self.values['max']
self.avg = self.values['avg']
self.sum = self.values['sum']
class Stats(MultiValueMetricsAgg):
"""A multi-value metrics aggregation that computes stats over all extracted
numeric values. See
`stats agg <https://www.elastic.co/guide/en/elasticsearch/reference/current/search-aggregations-metrics-stats-aggregation.html>`_.
.. testsetup:: stats
search_query = sq({'grades_stats': {
'count': 6, 'min': 60, 'max': 98, 'avg': 78.5, 'sum': 471}})
.. testcode:: stats
search_query = search_query.aggs({
'grades_stats': agg.Stats(GradeDocument.grade)
})
assert search_query.to_dict() == {
'aggregations': {
'grades_stats': {'stats': {'field': 'grade'}}}}
grades_stats = search_query.get_result().get_aggregation('grades_stats')
print('count:', grades_stats.count)
print('min:', grades_stats.min)
print('max:', grades_stats.max)
print('avg:', grades_stats.avg)
print('sum:', grades_stats.sum)
.. testoutput:: stats
count: 6
min: 60
max: 98
avg: 78.5
sum: 471
""" # noqa:E501
__agg_name__ = 'stats'
result_cls = StatsResult
def __init__(self, field=None, script=None, **kwargs):
super(Stats, self).__init__(field=field, script=script, **kwargs)
class ExtendedStatsResult(StatsResult):
def __init__(self, agg_expr, values):
super(ExtendedStatsResult, self).__init__(agg_expr, values)
self.sum_of_squares = self.values['sum_of_squares']
self.variance = self.values['variance']
self.std_deviation = self.values['std_deviation']
class ExtendedStats(Stats):
"""A multi-value metrics aggregation that computes stats over all extracted
numeric values. See
`extended_stats agg <https://www.elastic.co/guide/en/elasticsearch/reference/current/search-aggregations-metrics-extendedstats-aggregation.html>`_.
This aggregation is an extended version of the :class:`Stats` aggregation.
There are some additional metrics:
`sum_of_squares`, `variance`, `std_deviation`.
""" # noqa:E501
__agg_name__ = 'extended_stats'
result_cls = ExtendedStatsResult
def __init__(self, field=None, script=None, **kwargs):
super(ExtendedStats, self).__init__(
field=field, script=script, **kwargs
)
class BasePercentilesAggResult(MultiValueMetricsAggResult):
def __init__(self, *args, **kwargs):
super(BasePercentilesAggResult, self).__init__(*args, **kwargs)
# TODO: Add support for keyed response
values = []
for k, v in self.values.items():
# TODO: Do we need try-catch there?
try:
values.append((float(k), maybe_float(v)))
except ValueError:
pass
self.values = sorted(values, key=lambda e: e[0])
class PercentilesAggResult(BasePercentilesAggResult):
def get_value(self, percent):
for p, v in self.values:
if round(abs(p - percent), 7) == 0:
return v
class Percentiles(MultiValueMetricsAgg):
"""A multi-value metrics aggregation that calculates percentiles over all
extracted numeric values. See
`percentiles agg <https://www.elastic.co/guide/en/elasticsearch/reference/current/search-aggregations-metrics-percentile-aggregation.html>`_.
.. note::
Percentiles are usually calculated approximately. Elasticsearch
calculates them using
`TDigest <https://github.com/tdunning/t-digest/blob/master/docs/t-digest-paper/histo.pdf>`_
algotighm.
.. testsetup:: percentiles
search_query = sq({'load_time_outlier': {'values': {
'1.0': 15,
'5.0': 20,
'25.0': 23,
'50.0': 25,
'75.0': 29,
'95.0': 60,
'99.0': 150,
}}})
.. testcode:: percentiles
search_query = search_query.aggs(
load_time_outlier=agg.Percentiles(field=PageLoadDoc.load_time)
)
assert search_query.to_dict() == {
'aggregations': {
'load_time_outlier': {
'percentiles': {'field': 'load_time'}}}}
load_time_agg = search_query.get_result() \
.get_aggregation('load_time_outlier')
for p, v in load_time_agg.values[:-1]:
print('{:<4} - {}'.format(p, v))
print('99 percentile is: {}'.format(load_time_agg.get_value(99)))
.. testoutput:: percentiles
1.0 - 15.0
5.0 - 20.0
25.0 - 23.0
50.0 - 25.0
75.0 - 29.0
95.0 - 60.0
99 percentile is: 150.0
""" # noqa:E501
__agg_name__ = 'percentiles'
result_cls = PercentilesAggResult
def __init__(
self, field=None, script=None, percents=None, compression=None,
**kwargs
):
super(Percentiles, self).__init__(
field=field, script=script, percents=percents,
compression=compression, **kwargs
)
class PercentileRanksAggResult(BasePercentilesAggResult):
def get_percent(self, value):
for v, p in self.values:
if round(abs(v - value), 7) == 0:
return p
class PercentileRanks(MultiValueMetricsAgg):
"""A multi-value metrics | |
not None: # noqa: E501
query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
if 'depth' in local_var_params and local_var_params['depth'] is not None: # noqa: E501
query_params.append(('depth', local_var_params['depth'])) # noqa: E501
header_params = {}
if 'x_contract_number' in local_var_params:
header_params['X-Contract-Number'] = local_var_params['x_contract_number'] # noqa: E501
form_params = []
local_var_files = {}
body_params = None
if 'label' in local_var_params:
body_params = local_var_params['label']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['Basic Authentication', 'Token Authentication'] # noqa: E501
response_type = 'LabelResource'
if 'response_type' in kwargs:
response_type = kwargs['response_type']
return self.api_client.call_api(
'/datacenters/{datacenterId}/volumes/{volumeId}/labels', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type=response_type, # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats,
_request_auth=local_var_params.get('_request_auth'))
def datacenters_volumes_labels_put(self, datacenter_id, volume_id, key, label, **kwargs): # noqa: E501
"""Modify a Label of Volume # noqa: E501
This will modify the value of the label on a volume. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.datacenters_volumes_labels_put(datacenter_id, volume_id, key, label, async_req=True)
>>> result = thread.get()
:param datacenter_id: The unique ID of the Datacenter (required)
:type datacenter_id: str
:param volume_id: The unique ID of the Volume (required)
:type volume_id: str
:param key: The key of the Label (required)
:type key: str
:param label: Modified Label (required)
:type label: LabelResource
:param pretty: Controls whether response is pretty-printed (with indentation and new lines)
:type pretty: bool
:param depth: Controls the details depth of response objects. Eg. GET /datacenters/[ID] - depth=0: only direct properties are included. Children (servers etc.) are not included - depth=1: direct properties and children references are included - depth=2: direct properties and children properties are included - depth=3: direct properties and children properties and children's children are included - depth=... and so on
:type depth: int
:param x_contract_number: Users having more than 1 contract need to provide contract number, against which all API requests should be executed
:type x_contract_number: int
:param async_req: Whether to execute the request asynchronously.
:type async_req: bool, optional
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:type _preload_content: bool, optional
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: Returns the result object.
If the method is called asynchronously,
returns the request thread.
:rtype: LabelResource
"""
kwargs['_return_http_data_only'] = True
return self.datacenters_volumes_labels_put_with_http_info(datacenter_id, volume_id, key, label, **kwargs) # noqa: E501
def datacenters_volumes_labels_put_with_http_info(self, datacenter_id, volume_id, key, label, **kwargs): # noqa: E501
"""Modify a Label of Volume # noqa: E501
This will modify the value of the label on a volume. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.datacenters_volumes_labels_put_with_http_info(datacenter_id, volume_id, key, label, async_req=True)
>>> result = thread.get()
:param datacenter_id: The unique ID of the Datacenter (required)
:type datacenter_id: str
:param volume_id: The unique ID of the Volume (required)
:type volume_id: str
:param key: The key of the Label (required)
:type key: str
:param label: Modified Label (required)
:type label: LabelResource
:param pretty: Controls whether response is pretty-printed (with indentation and new lines)
:type pretty: bool
:param depth: Controls the details depth of response objects. Eg. GET /datacenters/[ID] - depth=0: only direct properties are included. Children (servers etc.) are not included - depth=1: direct properties and children references are included - depth=2: direct properties and children properties are included - depth=3: direct properties and children properties and children's children are included - depth=... and so on
:type depth: int
:param x_contract_number: Users having more than 1 contract need to provide contract number, against which all API requests should be executed
:type x_contract_number: int
:param async_req: Whether to execute the request asynchronously.
:type async_req: bool, optional
:param _return_http_data_only: response data without head status code
and headers
:type _return_http_data_only: bool, optional
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:type _preload_content: bool, optional
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:param _request_auth: set to override the auth_settings for an a single
request; this effectively ignores the authentication
in the spec for a single request.
:type _request_auth: dict, optional
:return: Returns the result object.
If the method is called asynchronously,
returns the request thread.
:rtype: tuple(LabelResource, status_code(int), headers(HTTPHeaderDict))
"""
local_var_params = locals()
all_params = [
'datacenter_id',
'volume_id',
'key',
'label',
'pretty',
'depth',
'x_contract_number'
]
all_params.extend(
[
'async_req',
'_return_http_data_only',
'_preload_content',
'_request_timeout',
'_request_auth',
'response_type'
]
)
for local_var_params_key, local_var_params_val in six.iteritems(local_var_params['kwargs']):
if local_var_params_key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method datacenters_volumes_labels_put" % local_var_params_key
)
local_var_params[local_var_params_key] = local_var_params_val
del local_var_params['kwargs']
# verify the required parameter 'datacenter_id' is set
if self.api_client.client_side_validation and ('datacenter_id' not in local_var_params or # noqa: E501
local_var_params['datacenter_id'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `datacenter_id` when calling `datacenters_volumes_labels_put`") # noqa: E501
# verify the required parameter 'volume_id' is set
if self.api_client.client_side_validation and ('volume_id' not in local_var_params or # noqa: E501
local_var_params['volume_id'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `volume_id` when calling `datacenters_volumes_labels_put`") # noqa: E501
# verify the required parameter 'key' is set
if self.api_client.client_side_validation and ('key' not in local_var_params or # noqa: E501
local_var_params['key'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `key` when calling `datacenters_volumes_labels_put`") # noqa: E501
# verify the required parameter 'label' is set
if self.api_client.client_side_validation and ('label' not in local_var_params or # noqa: E501
local_var_params['label'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `label` when calling `datacenters_volumes_labels_put`") # noqa: E501
if self.api_client.client_side_validation and 'depth' in local_var_params and local_var_params['depth'] > 10: # noqa: E501
raise ApiValueError("Invalid value for parameter `depth` when calling `datacenters_volumes_labels_put`, must be a value less than or equal to `10`") # noqa: E501
if self.api_client.client_side_validation and 'depth' in local_var_params and local_var_params['depth'] < 0: # noqa: E501
raise ApiValueError("Invalid value for parameter `depth` when calling `datacenters_volumes_labels_put`, must be a value greater than or equal to `0`") # noqa: E501
collection_formats = {}
path_params = {}
if 'datacenter_id' in local_var_params:
path_params['datacenterId'] = local_var_params['datacenter_id'] # noqa: E501
if 'volume_id' in local_var_params:
path_params['volumeId'] = local_var_params['volume_id'] # noqa: E501
if 'key' in local_var_params:
path_params['key'] = local_var_params['key'] # noqa: E501
query_params = []
if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501
query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
if 'depth' in local_var_params and local_var_params['depth'] is not None: # noqa: E501
query_params.append(('depth', local_var_params['depth'])) # noqa: E501
header_params = {}
if 'x_contract_number' in local_var_params:
header_params['X-Contract-Number'] = local_var_params['x_contract_number'] # noqa: E501
form_params = []
local_var_files = {}
body_params = None
if 'label' in local_var_params:
body_params = local_var_params['label']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['Basic Authentication', 'Token Authentication'] # noqa: E501
response_type = 'LabelResource'
if 'response_type' in kwargs:
response_type = kwargs['response_type']
return self.api_client.call_api(
'/datacenters/{datacenterId}/volumes/{volumeId}/labels/{key}', 'PUT',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type=response_type, # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats,
_request_auth=local_var_params.get('_request_auth'))
def ipblocks_labels_delete(self, ipblock_id, key, **kwargs): # noqa: E501
"""Delete a Label from IP Block # noqa: E501
This will remove a label from the Ip Block. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.ipblocks_labels_delete(ipblock_id, key, async_req=True)
>>> result = thread.get()
:param ipblock_id: The unique ID of the Ip Block (required)
:type ipblock_id: str
:param key: The key of the Label (required)
| |
tags.tagExists("retry"):
retry = int(tags.tagGet("retry"))
else:
retry = -1
if tags.tagExists("regex"):
regex = tags.tagGet("regex")
else:
regex = None
if len(descr) > 30 and ttype not in ("dict", "multiline"):
self._log_info(descr)
descr = ""
# print "type:'%s'"%ttype
if ttype == "str":
result = j.tools.console.askString(question=descr, defaultparam=default, regex=regex, retry=retry)
elif ttype == "password":
result = j.tools.console.askPassword(question=descr, confirm=False)
elif ttype == "list":
result = self._j.tools.console.askString(question=descr, defaultparam=default, regex=regex, retry=retry)
elif ttype == "multiline":
result = self._j.tools.console.askMultiline(question=descr)
elif ttype == "float":
result = self._j.tools.console.askString(question=descr, defaultparam=default, regex=None)
# check getFloat
try:
result = float(result)
except BaseException:
raise self._j.exceptions.Input("Please provide float.", "system.self.ask.neededfloat")
result = str(result)
elif ttype == "int":
if tags.tagExists("minValue"):
minValue = int(tags.tagGet("minValue"))
else:
minValue = None
if tags.tagExists("maxValue"):
maxValue = int(tags.tagGet("maxValue"))
else:
maxValue = None
if not default:
default = None
result = self._j.tools.console.askInteger(
question=descr, defaultValue=default, minValue=minValue, maxValue=maxValue, retry=retry
)
elif ttype == "bool":
if descr != "":
self._log_info(descr)
result = self._j.tools.console.askYesNo()
if result:
result = True
else:
result = False
elif ttype == "dropdown":
if tags.tagExists("dropdownvals"):
dropdownvals = tags.tagGet("dropdownvals")
else:
raise self._j.exceptions.Input(
"When type is dropdown in ask, then dropdownvals needs to be specified as well."
)
choicearray = [item.strip() for item in dropdownvals.split(",")]
result = self._j.tools.console.askChoice(choicearray, descr=descr, sort=True)
elif ttype == "dict":
rawresult = self._j.tools.console.askMultiline(question=descr)
result = "\n"
for line in rawresult.splitlines():
result += " %s,\n" % line.strip().strip(",")
else:
raise self._j.exceptions.Input(
"Input type:%s is invalid (only: bool,int,str,string,dropdown,list,dict,float)" % ttype
)
out += "%s%s\n" % (prefix, result)
# if endlf==False:
out = out[:-1]
return ttype, out
def getMacroCandidates(self, txt):
"""
look for \{\{\}\} return as list
"""
result = []
items = txt.split("{{")
for item in items:
if item.find("}}") != -1:
item = item.split("}}")[0]
if item not in result:
result.append("{{%s}}" % item)
return result
def _str2var(self, string):
"""
try to check int or float or bool
"""
if not isinstance(string, str):
string = str(string)
if string.lower() == "empty":
return "n", None
if string.lower() == "none":
return "n", None
if string == "":
return "s", ""
string2 = string.strip()
if string2.lower() == "true":
return "b", True
if string2.lower() == "false":
return "b", False
# check int
if re_nondigit.search(string2) is None and string2 != "":
# print "int:'%s'"%string2
return "i", int(string2)
# check float
match = re_float.search(string2)
if match is not None and match.start() == 0 and match.end() == len(string2):
return "f", float(string2)
return "s", self.machinetext2str(string)
def parseArgs(self, args):
"""
@param args e.g.
msg,f = 'f',g = 1, x=[1,2,3]
result is dict with key the name, val is the default val
if empty like for msg then None
"""
args = args.rstrip("):")
amMethodArgs = {}
for arg in args.split(","):
if "=" in arg:
argname, default = arg.split("=", 1)
argname = argname.strip()
default = default.strip()
if default[0] == '"':
default = default.strip('"')
elif default[0] == "'":
default = default.strip("'")
elif default == "[]":
default = []
elif default == "{}":
default = {}
elif default[0] in ("[", "{"):
default = eval(default)
elif "." in default:
default = float(default)
else:
default = int(default)
else:
argname = arg.strip()
default = None
amMethodArgs[argname] = default
return amMethodArgs
def parseDefLine(self, line, parseArgs=True):
"""
will return name & args
args is dict, with support for int, str, list, dict, float
example line:
def echo('f',g = 1, x=[1,2,3])
async def echo('f',g = 1, x=[1,2,3])
"""
# async = False
definition = ""
if line.find("async") == 0:
# async = True
line = line[len("async ") :]
definition, args = line.split("(", 1)
amName = definition[4:].strip()
args = args.strip()
if parseArgs:
args = self.parseArgs(args)
return amName, args
def str2var(self, string):
"""
convert list, dict of strings
or convert 1 string to python objects
"""
if self._j.data.types.list.check(string):
ttypes = []
for item in string:
ttype, val = self._str2var(item)
if ttype not in ttypes:
ttypes.append(ttype)
if "s" in ttypes:
result = [str(self.machinetext2val(item)) for item in string]
elif "f" in ttypes and "b" not in ttypes:
result = [self.getFloat(item) for item in string]
elif "i" in ttypes and "b" not in ttypes:
result = [self.getInt(item) for item in string]
elif "b" == ttypes:
result = [self.getBool(item) for item in string]
else:
result = [str(self.machinetext2val(item)) for item in string]
elif self._j.data.types.dict.check(string):
ttypes = []
result = {}
for key, item in list(string.items()):
ttype, val = self._str2var(item)
if ttype not in ttypes:
ttypes.append(ttype)
if "s" in ttypes:
for key, item in list(string.items()):
result[key] = str(self.machinetext2val(item))
elif "f" in ttypes and "b" not in ttypes:
for key, item in list(string.items()):
result[key] = self.getFloat(item)
elif "i" in ttypes and "b" not in ttypes:
for key, item in list(string.items()):
result[key] = self.getInt(item)
elif "b" == ttypes:
for key, item in list(string.items()):
result[key] = self.getBool(item)
else:
for key, item in list(string.items()):
result[key] = str(self.machinetext2val(item))
elif isinstance(string, str) or isinstance(string, float) or isinstance(string, int):
ttype, result = self._str2var(self._j.core.text.toStr(string))
else:
raise self._j.exceptions.Input(
"Could not convert '%s' to basetype, input was %s. Expected string, dict or list."
% (string, type(string)),
"self.str2var",
)
return result
def eval(self, code):
"""
look for {{}} in code and evaluate as python result is converted back to str
"""
candidates = self.getMacroCandidates(code)
for item in candidates:
if "{{" and "}}" in item:
item = item.strip("{{").strip("}}")
try:
result = eval(item)
except Exception as e:
raise self._j.exceptions.RuntimeError(
"Could not execute code in self._j.core.text.,%s\n%s. Error was:%s" % (item, code, e)
)
result = self.pythonObjToStr(result, multiline=False).strip()
code = code.replace(item, result)
return code
def pythonObjToStr1line(self, obj):
return self.pythonObjToStr(obj, False, canBeDict=False)
def pythonObjToStr(self, obj, multiline=True, canBeDict=True, partial=False):
"""
try to convert a python object to string representation works for None, bool, integer, float, dict, list
"""
if obj is None:
return ""
elif isinstance(obj, bytes):
obj = obj.decode("utf8")
return obj
elif self._j.data.types.bool.check(obj):
if obj:
obj = "True"
else:
obj = "False"
return obj
elif self._j.data.types.string.check(obj):
isdict = canBeDict and obj.find(":") != -1
if obj.strip() == "":
return ""
if obj.find("\n") != -1 and multiline:
obj = "\n%s" % self.prefix(" ", obj.strip())
elif not isdict or obj.find(" ") != -1 or obj.find("/") != -1 or obj.find(",") != -1:
if not partial:
obj = "'%s'" % obj.strip("'")
else:
obj = "%s" % obj.strip("'")
return obj
elif self._j.data.types.int.check(obj) or self._j.data.types.float.check(obj):
return str(obj)
elif self._j.data.types.list.check(obj):
obj.sort()
tmp = []
for item in obj:
if item is None:
continue
if isinstance(item, str):
if item.strip() == "" or item.strip() == "''":
continue
tmp.append(item)
obj = tmp
# if not canBeDict:
# raise self._j.exceptions.RuntimeError("subitem cannot be list or dict for:%s"%obj)
if multiline:
resout = "\n"
for item in obj:
resout += " %s,\n" % self.pythonObjToStr1line(item)
resout = resout.rstrip().strip(",") + ",\n"
else:
resout = "["
for item in obj:
resout += "%s," % self.pythonObjToStr1line(item)
resout = resout.rstrip().strip(",") + "]"
return resout
elif self._j.data.types.dict.check(obj):
if not canBeDict:
raise self._j.exceptions.RuntimeError("subitem cannot be list or dict for:%s" % obj)
if multiline:
resout = "\n"
keys = sorted(obj.keys())
for key in keys:
val = obj[key]
val = self.pythonObjToStr1line(val)
# resout+="%s:%s, "%(key,val)
resout += " %s:%s,\n" % (key, self.pythonObjToStr1line(val))
resout = resout.rstrip().rstrip(",") + ",\n"
else:
resout = ""
keys = sorted(obj.keys())
for key in keys:
val = obj[key]
val = self.pythonObjToStr1line(val)
resout += "%s:%s," % (key, val)
resout = resout.rstrip().rstrip(",") + ","
return resout
else:
raise self._j.exceptions.RuntimeError("Could not convert %s to string" % obj)
def replaceQuotes(self, value, replacewith):
for item in re.findall(matchquote, value):
value = value.replace(item, replacewith)
return value
def machinetext2val(self, value):
"""
do reverse of:
SPACE -> \\S
" -> \\Q
, -> \\K
: -> \\D
\\n -> return
"""
# value=value.strip("'")
value2 = value.replace("\\K", ",")
value2 = value2.replace("\\Q", '"')
value2 = value2.replace("\\S", " ")
value2 = value2.replace("\\D", ":")
value2 = value2.replace("\\N", "\n")
value2 = value2.replace("\\n", "\n")
# change = False
# if value != value2:
# change = True
if value2.strip() == "":
return value2
if value2.strip().strip("'").startswith("[") and value2.strip().strip("'").endswith("]"):
value2 = value2.strip().strip("'").strip("[]")
res = []
for item in value2.split(","):
if item.strip() == "":
continue
if self.isInt(item):
item = self.getInt(item)
elif self.isFloat(item):
item = self.getFloat(item)
res.append(item)
return res
# Check if it's not | |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import absolute_import, division
from __future__ import print_function, unicode_literals
import os
import re
import subprocess
import time
def convert_project(project, repos, svn_repos, authors_file=None,
latest_release_version=None):
'''
Parameters
----------
project: str
component name (brainvisa-share, axon etc)
repos: str
git base repos directory. The project repos will be a subdirectory of
it so it's safe to use the same repos directory for several projects
svn_repos: str
svn repos URL, including project/component dir
(ex: https://bioproj.extra.cea.fr/neurosvn/brainvisa/soma/soma-base)
authors_file: str
correspondance map file betweeen svn and git[hub] logins.
format: see git-svn manpage (--authors-file)
'''
cur_dir = os.getcwd()
os.chdir(repos)
auth_args = ''
if authors_file:
auth_args = ' --authors-file %s' % authors_file
cmd = 'git svn clone --stdlayout --follow-parent%s %s %s' \
% (auth_args, svn_repos, project)
try:
print(cmd)
subprocess.check_call(cmd.split())
except subprocess.CalledProcessError:
# git-svn died with signal 11
print('conversion fails at some point... trying again...')
fetch_project(project, '.', authors_file)
make_branches(os.path.join(repos, project))
make_tags(os.path.join(repos, project),
latest_release_version=latest_release_version)
os.chdir(cur_dir)
def update_project(project, repos, authors_file=None,
latest_release_version=None):
'''
Incorporate new changes from the SVN repo into the Git repo.
Parameters
----------
project: str
component name (brainvisa-share, axon etc)
repos: str
git base repos directory. The project repos will be a subdirectory of
it so it's safe to use the same repos directory for several projects
authors_file: str
correspondance map file betweeen svn and git[hub] logins.
format: see git-svn manpage (--authors-file)
'''
fetch_project(project, repos, authors_file)
update_branches(os.path.join(repos, project))
make_tags(os.path.join(repos, project),
latest_release_version=latest_release_version)
def fetch_project(project, repos, authors_file=None):
'''
Parameters
----------
project: str
component name (brainvisa-share, axon etc)
repos: str
git base repos directory. The project repos will be a subdirectory of
it so it's safe to use the same repos directory for several projects
authors_file: str
correspondance map file betweeen svn and git[hub] logins.
format: see git-svn manpage (--authors-file)
'''
cur_dir = os.getcwd()
os.chdir(repos)
auth_args = ''
if authors_file:
auth_args = ' --authors-file %s' % authors_file
os.chdir(project)
ok = False # try several times in case git-svn crashes...
while not ok:
cmd = 'git svn fetch' + auth_args
try:
print(cmd)
subprocess.check_call(cmd.split())
ok = True
except subprocess.CalledProcessError:
print('conversion fails at some point... trying again in 5 seconds...')
time.sleep(5)
os.chdir(cur_dir)
def make_branches(repos):
'''
Make master / integration branches matching resp. bug_fix and trunk
branches in svn
Parameters
----------
repos: str
git repos directory, including the project dir.
'''
cur_dir = os.getcwd()
os.chdir(repos)
cmd = 'git branch -a'
print(cmd)
branches = subprocess.check_output(cmd.split(),
universal_newlines=True).split('\n')
for branch in branches:
branch = branch.strip()
if branch.startswith('remotes/origin/'):
svn_branch_name = branch[len('remotes/origin/'):]
if '/' in svn_branch_name:
continue # probably a tag, handled in make_tags()
print('branch:', svn_branch_name)
if svn_branch_name == 'bug_fix':
git_branch_name = 'master'
elif svn_branch_name == 'trunk':
git_branch_name = 'integration'
else:
git_branch_name = svn_branch_name
cmd = ['git', 'checkout', '-B', git_branch_name,
'refs/remotes/origin/' + svn_branch_name]
print(' '.join(cmd))
subprocess.check_call(cmd)
os.chdir(cur_dir)
def update_branches(repos):
'''
Update master / integration branches matching resp. bug_fix and trunk
branches in svn
Parameters
----------
repos: str
git repos directory, including the project dir.
'''
cur_dir = os.getcwd()
os.chdir(repos)
raise NotImplementedError('update_branches is not supported anymore')
cmd = 'git checkout integration'
print(cmd)
# is allowed to fail for projects that do not have trunk
returncode = subprocess.call(cmd.split())
if returncode == 0:
cmd = 'git merge --ff-only refs/remotes/origin/trunk'
print(cmd)
subprocess.check_call(cmd.split())
cmd = 'git checkout master'
print(cmd)
subprocess.check_call(cmd.split())
cmd = 'git merge --ff-only refs/remotes/origin/bug_fix'
print(cmd)
subprocess.check_call(cmd.split())
os.chdir(cur_dir)
def make_tags(repos, latest_release_version=None):
'''
Make tags
Parameters
----------
repos: str
git repos directory, including the project dir.
latest_release_version: str
version number that will replace the latest_release SVN tag
'''
cur_dir = os.getcwd()
os.chdir(repos)
cmd = 'git branch -a'
print(cmd)
branches = subprocess.check_output(cmd.split(),
universal_newlines=True).split('\n')
for branch in branches:
branch = branch.strip()
if branch.startswith('remotes/origin/tags/'):
svn_tag_name = branch[len('remotes/origin/tags/'):]
print('tag:', svn_tag_name)
# The SVN tag can have a history that deviates from the main line
# of history, which typically consists of empty commits that are
# created when the branch is moved from latest_release to a named
# version. We want the tag to point to a commit that is on the main
# line of history as far as possible, so that e.g. "git describe"
# can give useful output. Therefore, we search for the closest
# commit on the mainline with "git merge-base", then we validate
# with "git diff" that the contents of this commit are the same as
# the tag.
ancestor_commit = subprocess.check_output(
['git', 'merge-base', branch, 'master'],
universal_newlines=True,
).strip()
returncode = subprocess.call(['git', 'diff', '--quiet',
ancestor_commit, branch])
if returncode == 0:
tag_cmd_env = {}
if (re.match(r'^\d+\.\d+\.\d+$', svn_tag_name)
or (svn_tag_name == 'latest_release'
and latest_release_version is not None)):
if svn_tag_name == 'latest_release':
tag_version = latest_release_version
else:
tag_version = svn_tag_name
git_tag_name = 'v' + tag_version
# Skip the tag if it already exists in git
returncode = subprocess.call(
['git', 'rev-parse', '--quiet', '--verify',
git_tag_name + '^{tag}'],
stdout=open(os.devnull, 'w'))
if returncode == 0:
continue
tag_cmd = ['git', 'tag', '-a', '-m',
"Version %s (from SVN tag %s)" % (tag_version, svn_tag_name),
git_tag_name, ancestor_commit]
# We want the tag object to carry the date and committer
# who created the tag in SVN in the first place (typically,
# the person who moved the branch to tags/latest_release).
tag_commit = subprocess.check_output(
['git', 'rev-list', '--reverse',
ancestor_commit + '..' + branch],
universal_newlines=True,
).split('\n', 1)[0]
tag_date, tagger_name, tagger_email = subprocess.check_output(
['git', 'show', '--format=%cI%n%cn%n%ce', '--no-patch',
tag_commit],
universal_newlines=True,
).strip().split('\n')
tag_cmd_env = {'GIT_COMMITTER_NAME': tagger_name,
'GIT_COMMITTER_EMAIL': tagger_email,
'GIT_COMMITTER_DATE': tag_date}
print(tag_cmd)
tag_cmd_env.update(os.environ)
subprocess.check_call(tag_cmd, env=tag_cmd_env)
elif svn_tag_name in ('latest_release', 'release_candidate'):
pass # Drop these branches
else:
print("WARNING: not converting the SVN tag '%s' to Git "
"because it does not match the X.Y.Z format."
% svn_tag_name)
else:
print('WARNING: cannot find a mainline commit that matches '
'the SVN tag %s, no git tag will be created.'
% svn_tag_name)
os.chdir(cur_dir)
def convert_perforce_directory(project, repos, svn_repos, authors_file=None):
'''
Parameters
----------
project: str
component name (brainvisa-share, axon etc)
repos: str
git base repos directory. The project repos will be a subdirectory of
it so it's safe to use the same repos directory for several projects
svn_repos: str
svn repos URL, including project/component dir
(ex: https://bioproj.extra.cea.fr/neurosvn/perforce/brainvisa)
authors_file: str
correspondance map file betweeen svn and git[hub] logins.
format: see git-svn manpage (--authors-file)
'''
cur_dir = os.getcwd()
os.chdir(repos)
auth_args = ''
if authors_file:
auth_args = '--authors-file %s ' % authors_file
cmd = 'git svn clone --trunk=main --branches=. %s%s' \
% (auth_args, svn_repos)
try:
try:
print(cmd)
subprocess.check_call(cmd.split())
except subprocess.CalledProcessError:
# some errors are due to non-understood history items
print('conversion fails at some point...')
finally:
os.chdir(cur_dir)
def graft_history(project, old_project, repos, old_repos, branch='master',
old_branch='trunk'):
'''
branch older commits (perforce) to the beginning of master
Parameters
----------
project: str
later project name
old_project: str
former project name
repos: str
later project git repos directory (including project name)
old_repos: str
former project git repos directory (including project name)
branch: str
later project branch to graft
old_branch: str
former project branch
'''
cur_dir = os.getcwd()
os.chdir(old_repos)
cmd = 'git checkout %s' % old_branch
print(cmd)
subprocess.check_call(cmd.split())
os.chdir(repos)
cmd = 'git remote add old %s' % old_repos
print(cmd)
subprocess.check_call(cmd.split())
cmd = 'git fetch old'
print(cmd)
subprocess.check_call(cmd.split())
cmd = 'git replace --graft `git rev-list %s | tail -n 1` old/%s' \
% (branch, old_branch)
print(cmd)
subprocess.check_call(cmd, shell=True)
os.chdir(cur_dir)
# --
def main():
import argparse
bioproj = 'https://bioproj.extra.cea.fr/neurosvn'
parser = argparse.ArgumentParser('Convert some svn repositories to git')
parser.add_argument('-u', '--update', action='store_true',
help='update projects instead of cloning them')
parser.add_argument('-p', '--project', action='append', default=[],
help='project (component) to be converted. A project or component name may precise which sub-directory in the svn repos they are in, using a ":", ex: "soma-base:soma/soma-base". If not specified, the project dir is supposed to be found directly under the project name directory in the base svn repository.'
'Multiple projects can be processed using multiple '
'-p arguments')
parser.add_argument('-r', '--repos',
help='git local repository directory '
'[default: current directory]')
parser.add_argument('-s', '--svn',
help='svn repository base URL [default: %s]' % bioproj)
parser.add_argument('-A', '--authors-file',
help='authors file passed to git-svn: Syntax is '
'compatible with the file used by git cvsimport:\n'
'loginname = <NAME> <<EMAIL>>')
parser.add_argument('--latest-release-version', default=None,
help='version number (without the v prefix) of the '
'Git tag which will be created from the '
'latest_release SVN tag')
| |
# coding=utf-8
# ==============================================================================
# beelbe
# Copyright © 2016 <NAME>. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
# import the logging library
import logging
from django.contrib.auth.models import User
from django.core.exceptions import (ValidationError, NON_FIELD_ERRORS)
from django.core.validators import validate_comma_separated_integer_list
from django.db import models, transaction
from django.db.models import Max, F, Sum
from django.db.models.signals import post_save
from django.dispatch import receiver
from django.utils import timezone
from django.utils.translation import ugettext_lazy as _
from nltk.corpus import words
from numpy import random
from beelbe.settings import LANGUAGES
from experiments.constants import Constants, SurveyStrings
# Get an instance of a logger
logger = logging.getLogger(__name__)
# Create your models here.
class Experiment(models.Model):
experiment_name = models.CharField(max_length=100)
experiment_metadata = models.CharField(max_length=400)
created = models.DateTimeField('Creation Date', editable=False)
last_modified = models.DateTimeField('Last modified')
experiment_status = models.IntegerField(
choices=Constants.EXPERIMENT_STATUS,
default=Constants.UNFINISHED,
)
def save(self, *args, **kwargs):
"""On save, update timestamps"""
if not self.id:
self.created = timezone.now()
self.last_modified = timezone.now()
return super(Experiment, self).save(*args, **kwargs)
def __str__(self):
return self.experiment_name
class Game(models.Model):
game_uid = models.CharField("Unique Identifier", unique=True, max_length=10)
game_name = models.CharField("Name", max_length=100)
game_metadata = models.CharField("Metadata", max_length=400)
topology = models.CharField(
max_length=10,
choices=Constants.TOPOLOGY_CHOICES,
default=Constants.NONE,
)
num_players = models.IntegerField('Number of players')
is_using_emus = models.BooleanField("Uses EMUs", default=True)
real_currency = models.CharField("Real monetary currency", max_length=10,
default="EUR")
conversion_rate = models.FloatField("Conversion rate", default=1.0)
def __str__(self):
return self.game_name
class EndProbability(models.Model):
"""Is associated to each game and determines"""
round = models.IntegerField("Round at which the experiment might finish", default=0)
probability = models.FloatField("Probability that the experiment might finish at the given round", default=0)
class Meta:
unique_together = ('round', 'probability',)
def __str__(self):
return "round: {round} | p: {probability}".format(round=self.round, probability=self.probability)
class CollectiveRiskGame(Game):
threshold = models.IntegerField('Threshold')
risk_prob = models.FloatField('Risk probability', default=0.9)
group_size = models.IntegerField("Group Size")
endowment = models.IntegerField("Endowment", default=Constants.ENDOWMENT)
valid_actions = models.CharField("Valid actions", max_length=10, default="0,2,4",
validators=[validate_comma_separated_integer_list])
rounds = models.IntegerField("Game rounds")
is_round_variable = models.BooleanField('Deadline variable?', default=False)
round_distribution = models.CharField(
"Stochastic model to calculate final round",
max_length=10,
choices=Constants.DISTRIBUTION_CHOICES,
default=Constants.UNIFORM
)
round_variance = models.FloatField("Threshold variance", default=0)
distribution_steps = models.IntegerField("Number of rounds for which there will be a"
"certain probability that the game will finish", default=0)
end_probabilities = models.ManyToManyField(EndProbability, related_name="end_probabilities", blank=True)
termination_probability = models.FloatField("Probability that the game will finish at a certain round", default=0.0)
min_round = models.IntegerField("Minimum number of rounds that the game will take", default=0)
dice_faces = models.IntegerField("Number of faces for the dice representation of the random generator", default=6)
def get_initial_public_account(self):
return self.endowment * self.group_size
def get_valid_actions_as_list(self):
return self.valid_actions.split(",")
def set_rounds_model(self):
pass
def get_rounds(self):
if not self.is_round_variable:
return self.rounds
def is_probability_correct(self):
aggr = self.end_probabilities.aggregate(sum_prob=Sum('probabilities'))
if aggr['sum_prob'] > 1:
return False
return True
get_rounds.short_description = "Returns final round according to the specified model"
class Treatment(models.Model):
experiment = models.ForeignKey(Experiment, on_delete=models.CASCADE, related_name="treatments")
game = models.ForeignKey(Game, on_delete=models.CASCADE, related_name="treatments")
treatment_name = models.CharField(max_length=100)
treatment_metadata = models.CharField(max_length=400, blank=True)
def __str__(self):
return "Experiment {} | {}".format(self.experiment.experiment_name, self.treatment_name)
class Meta:
ordering = ('experiment', 'game',)
class Session(models.Model):
experiment = models.ForeignKey(Experiment, on_delete=models.CASCADE, related_name="sessions")
treatment = models.ForeignKey(Treatment, on_delete=models.CASCADE, related_name="sessions")
session_number = models.IntegerField("Session number")
session_metadata = models.CharField(max_length=200, blank=True)
scheduled_date = models.DateTimeField('Scheduled for')
time_start = models.DateTimeField('Start at', null=True, blank=True)
time_finish = models.DateTimeField('Finished at', null=True, blank=True)
finished = models.BooleanField('finished', default=False)
structure_assigned = models.BooleanField(
verbose_name='structure assigned',
default=False
)
group_size = models.IntegerField('group size', default=0)
def set_time_start(self):
pass
def set_time_finish_and_duration(self):
pass
def set_session_number(self):
pass
def get_session_duration(self):
return self.time_finish - self.time_start
get_session_duration.admin_order_field = 'experiment'
get_session_duration.short_description = "Duration of the session"
def __str__(self):
return "Session {}".format(self.session_number)
class Meta:
ordering = ('experiment', 'treatment', 'session_number',)
class Player(models.Model):
user = models.OneToOneField(User, on_delete=models.PROTECT, related_name="player", primary_key=True)
experiment = models.ForeignKey(Experiment, on_delete=models.PROTECT, related_name="players", null=True)
session = models.ForeignKey(Session, on_delete=models.PROTECT, related_name="players", null=True)
group = models.ForeignKey("Group", related_name="members", null=True, blank=True, on_delete=models.PROTECT)
def get_last_round(self):
return self.game_data.aggregate(Max('round'))
def get_last_action(self):
return self.game_data.filter(round=self.get_last_round()).action
def get_last_round_actions_others(self):
return GameData.objects.filter(round=self.profile.last_round - 1, session=self.session,
group=self.group).exclude(player=self).values_list('action', flat=True).order_by(
'player')
def get_last_round_action(self):
try:
action = GameData.objects.get(round=self.profile.last_round - 1, session=self.session,
group=self.group, player=self).action
except GameData.DoesNotExist:
logger.exception("[ERROR] There is no game data for player {}".format(self))
return None
return action
def __str__(self):
return "{} | {} | {}".format(self.experiment, self.session, self.pk)
@receiver(post_save, sender=User)
def create_player(sender, instance, created, **kwargs):
if created:
if not instance.is_superuser:
Player.objects.create(user=instance)
instance.player.save()
class Profile(models.Model):
"""
Holds extra information of each user needed for the experiment,
and has a link to the game data
"""
player = models.OneToOneField(
Player,
on_delete=models.CASCADE,
primary_key=True,
related_name='profile',
)
group_number = models.IntegerField('Group number', null=True, blank=True)
private_account = models.IntegerField('Private account', default=Constants.ENDOWMENT)
time_start_experiment = models.DateTimeField('Time session starts', null=True, blank=True)
time_ends_experiment = models.DateTimeField('Time session ends', null=True, blank=True)
last_round = models.IntegerField('Round number', default=0)
finished = models.BooleanField(default=False)
participated = models.BooleanField(default=False)
experiment_state = models.CharField(
"State of the experiment (e.g. Instructions)",
max_length=10,
choices=Constants.EXPERIMENT_STATES,
default=Constants.STATE_INACTIVE
)
transition_state = models.CharField(
"If player is in a transition",
max_length=10,
choices=Constants.TRANSITION_STATES,
default=Constants.STATE_NO_TRANSITION
)
created = models.DateTimeField('Creation Date', editable=False)
language = models.CharField(
"Language in which the player did the experiment.",
max_length=10,
choices=LANGUAGES,
blank=True
)
threshold_state = models.IntegerField("End state of the game", choices=Constants.THRESHOLD_STATES, null=True,
blank=True)
def get_experiment_duration(self):
""":returns timedelta"""
return self.time_ends_experiment - self.time_start_experiment
def get_value_in_euros(self):
""":returns float - convert EMUs to euros"""
if self.threshold_state != Constants.LOSS:
value = self.private_account * self.player.session.treatment.game.conversion_rate + 2.5
else:
value = 2.5
return value
def __str__(self):
return 'Player {} Profile'.format(self.player.pk)
class Meta:
ordering = ('player', 'group_number',)
@receiver(post_save, sender=Player)
def create_player_profile(sender, instance, created, **kwargs):
if created:
Profile.objects.create(player=instance,
created=timezone.now(),
experiment_state=Constants.STATE_LOGIN)
@receiver(post_save, sender=Player)
def save_player_profile(sender, instance, **kwargs):
instance.profile.save()
class GameData(models.Model):
player = models.ForeignKey(Player, on_delete=models.CASCADE, related_name="game_data")
session = models.ForeignKey(Session, on_delete=models.PROTECT, related_name="game_data")
opponent = models.ForeignKey(Player, on_delete=models.PROTECT, related_name="opponent")
group = models.ForeignKey("Group", null=True, related_name="game_data", on_delete=models.PROTECT)
round = models.IntegerField('Round number')
action = models.IntegerField('Action', choices=Constants.CRD_VALID_ACTIONS, null=True, blank=True)
private_account = models.IntegerField('Private account', default=0, null=True, blank=True)
public_account = models.IntegerField('Public account', default=0, null=True, blank=True)
time_round_start = models.DateTimeField('Time round starts', null=True, blank=True)
time_round_ends = models.DateTimeField('Time round ends', null=True, blank=True)
time_elapsed = models.TimeField('Time taken to make the action', null=True, blank=True)
prediction_question = models.CharField(Constants.PREDICT_PUBLIC_ACCOUNT, blank=True, max_length=50)
time_question_start = models.DateTimeField('Time question starts', null=True)
time_question_end = models.DateTimeField('Time question ends', null=True)
time_question_elapsed = models.TimeField('Time taken to make the prediction', null=True, blank=True)
def get_decision_interval(self):
""":returns timedelta"""
return self.time_round_ends - self.time_round_start
def get_question_interval(self):
""":returns timedelta"""
return self.time_question_start - self.time_question_end
get_decision_interval.admin_order_field = 'player'
get_decision_interval.short_description = "Time to make a decision"
@staticmethod
def get_last_round(session, group):
return GameData.objects.filter(session=session, group=group).aggregate(Max('round'))['round__max']
@staticmethod
def get_last_round_actions(session, group):
return GameData.objects.filter(round=GameData.get_last_round(session, group), session=session,
group=group).values_list('action', flat=True).order_by('player')
def __str__(self):
return "{} | {} | {} | round {}".format(self.session, self.player, self.player.group, self.round)
class Meta:
ordering = ('session', 'player__group', 'player')
def generate_random_password():
psw = ""
word_list = words.words()
rr = [word_list[random.randint(0, len(word_list))] for _ in range(2)]
for wrd in rr:
psw += str(wrd)
return psw
class RunNow(models.Model):
experiment_id = models.IntegerField('Experiment ID', default=0)
treatment_id = models.IntegerField('Treatment ID', default=0)
session_id = models.IntegerField('Session ID', default=0)
experiment_on = models.BooleanField(default=False)
def __str__(self):
return 'Run now experiment' + str(self.experiment_id) + ' treatment' + str(self.treatment_id) + \
' session ' + str(self.session_id)
class RequestMonitor(models.Model):
name = models.CharField(max_length=20)
var = models.IntegerField('Queue 1', default=0)
condition = models.BooleanField('Condition', default=False)
group = models.ForeignKey("Group", related_name="monitors", on_delete=models.PROTECT)
queue = models.ManyToManyField(Player, related_name='monitors', blank=True)
@staticmethod
@transaction.atomic
def wait(player, monitor_name):
monitor = RequestMonitor.objects.get(group=player.group,
name=monitor_name)
if player not in monitor.queue.all():
monitor.queue.add(player)
RequestMonitor.objects.filter(id=monitor.id).update(var=F('var') + 1)
@staticmethod
@transaction.atomic
def check_condition(group, monitor_name, condition, update_value):
monitor = RequestMonitor.objects.get(group=group, name=monitor_name)
# If the condition is not update_value, then we update the condition if it's true
if monitor.condition is not update_value:
if condition(monitor.var):
RequestMonitor.objects.filter(id=monitor.id).update(condition=update_value)
monitor.refresh_from_db()
return monitor.condition
@staticmethod
@transaction.atomic
def signal(player, monitor_name):
monitor = RequestMonitor.objects.get(group=player.group,
name=monitor_name)
if player in monitor.queue.all():
monitor.queue.remove(player)
RequestMonitor.objects.filter(id=monitor.id).update(var=F('var') - 1)
def validate_unique(self, *args, **kwargs):
super(RequestMonitor, self).validate_unique(*args, **kwargs)
if not self.id:
if self.__class__.objects.filter(name=self.name, group=self.group).exists():
raise ValidationError(
{
NON_FIELD_ERRORS: [
'Monitor with group {} and name {} already exists.'.format(self.group, self.name),
],
}
)
class Meta:
ordering = ('group',)
def __str__(self):
return "{} {}".format(self.group, self.name)
class Group(models.Model):
group_number = models.IntegerField('Group number', default=0)
session = models.ForeignKey(Session, related_name="groups", on_delete=models.PROTECT)
dice_results = models.CharField("Dice trials until success (game ends)", max_length=1000, default="0",
validators=[validate_comma_separated_integer_list])
finishing_round = models.IntegerField("Indicate at which round the game finishes", default=0)
finishing_round_selected = models.BooleanField("Indicates if the finishing round has been selected", default=False)
game_finished = models.BooleanField("Indicate if the game has finished", default=False)
public_account = models.IntegerField("Public account of the group", default=0)
current_round = models.IntegerField("indicates the current round of the game", default=0)
random_value = models.FloatField("Random value generated | |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Created on Thu Sep 21 16:48:12 2017
Simple GUI for automated patterning with an inverted microscope
and its controlling by the Micro-Manager (MM) application. An Arduino microcontroller is used
as triggering device for UV illumination together with a physical shutter.
The x and y positions are imported from a text file (see "position-folder" for example) and can be directly
extracted from AutoCAD files using the provided lisp script.
Warning: This script might not run directly, since the MM config file depends on the individual hardware settings.
@author: <NAME>
"""
from PyQt5.QtCore import *
from PyQt5.QtGui import *
from PyQt5.QtWidgets import *
import numpy as np
import os
import sys
import time
from threading import Thread
import struct
MM_PATH = os.path.join('C:', os.path.sep, 'Program Files', 'Micro-Manager-1.4')
sys.path.append(MM_PATH)
os.environ['PATH'] = MM_PATH + ';' + os.environ['PATH']
try:
import MMCorePy
import serial
except:
pass
from skimage import img_as_ubyte
from skimage.exposure import rescale_intensity
from skimage.io import imsave
from skimage.feature import peak_local_max
# Config file has to be created within the Micro Manager configuration wizard
MM_CONFIG_FILE = "C:/Program Files/Micro-Manager-1.4/axiovert200m-woReflector.cfg"
ARDUINO_COM_PORT = "COM8"
# Dimensions of the camera (Here, sCMOS with 16-bit resolution)
CAMERA_HEIGHT = 2160
CAMERA_WIDTH = 2560
# Position (in pixel) of the UV illumination spot on the camera
Y_ORIGIN_POSITION = 1149
X_ORIGIN_POSITION = 1106
# Conversions of motor to camera to pixel dimensions
MOTORSTEPS_UM_CONVERSION = 0.8
UM_PIXEL_CONVERSION = 0.1705
class Arduino():
def __init__(self, port=None):
if port is None:
self.serial_port = serial.Serial(ARDUINO_COM_PORT, 57600, timeout=0.1)
else:
self.serial_port = serial.Serial(port, 57600, timeout=0.1)
self.write_cmd(42, 5, 0)
def write_cmd(self, *args):
cmd = struct.pack('>B', args[0])
for arg in args[1:]:
cmd += struct.pack('>B', arg)
self.serial_port.write(cmd)
return self.serial_port.readline()
def open_shutter(self):
# Mode digital write = 42, pin number (pin 8 = 0), on
self.write_cmd(42, 5, 1)
def close_shutter(self):
self.write_cmd(42, 5, 0)
def close(self):
self.serial_port.close()
class ImageView(QLabel):
def __init__(self, d=None, status_bar=None, parent=None):
super(ImageView, self).__init__(parent)
if d is None:
self.data = np.random.randint(0, 2 ** 4 - 1, size=(CAMERA_HEIGHT, CAMERA_WIDTH)).astype(np.uint16)
# self.data[100:200, 100:500] = 2**16-1
idy, idx = np.indices((CAMERA_HEIGHT, CAMERA_WIDTH))
id = np.sqrt((idx - Y_ORIGIN_POSITION) ** 2 + (idy - X_ORIGIN_POSITION / 2) ** 2) < 15
self.data[id] = 2 ** 16 - 1
self.H, self.W = self.data.shape
self.numpy_image = self.data.copy()
self.data = img_as_ubyte(self.data)
v_min, v_max = np.percentile(self.data, (5, 100))
self.data = rescale_intensity(self.data, in_range=(self.data.min(), self.data.max()))
self.scaling_factor = 0.3
self.window_w = self.W * self.scaling_factor
self.window_h = self.H * self.scaling_factor
self.qimage = QPixmap(QImage(self.data, self.W, self.H, QImage.Format_Grayscale8))
self.setPixmap(self.getScalePixelMap())
self.setSizePolicy(QSizePolicy.Fixed, QSizePolicy.Fixed)
self.painter = QPainter(self.qimage)
self.pen = QPen(Qt.red)
self.pen.setWidth(10)
self.painter.setPen(self.pen)
self.mouse_position = None
self.position1 = None
self.position2 = None
self.position_list = []
self.setMouseTracking(1)
self.mouseMoveEvent = self.hoverFunction
self.status_bar = None
self.main_application = None
def set_image(self, img):
self.numpy_image = img.copy()
self.H = img.shape[0]
self.W = img.shape[1]
self.window_w = self.W * self.scaling_factor
self.window_h = self.H * self.scaling_factor
if self.painter.isActive():
self.painter.end()
if self.side_panel_hard.auto_scaling.isChecked():
v_min, v_max = np.percentile(self.numpy_image, (5, 95))
else:
v_min, v_max = self.side_panel_hard.getLimits()
self.data = rescale_intensity(self.numpy_image, in_range=(v_min, v_max), out_range=np.uint8)
self.data = img_as_ubyte(self.data)
self.qimage = QPixmap(QImage(self.data, self.W, self.H, QImage.Format_Grayscale8))
self.setPixmap(self.getScalePixelMap())
self.update()
self.painter.begin(self.qimage)
self.painter.setPen(self.pen)
def scaleImage(self, factor):
self.scaling_factor *= factor
self.setPixmap(self.getScalePixelMap())
self.resize(QSize(self.window_w, self.window_h))
self.update()
def getWindowSize(self):
return self.size()
def getImageSize(self):
return self.qimage.size()
def getScaling(self):
return [x / y for x, y in zip(self.getWindowSize(), self.getImageSize())]
def getScalePixelMap(self):
self.window_w = self.W * self.scaling_factor
self.window_h = self.H * self.scaling_factor
return self.qimage.scaled(self.window_w, self.window_h, Qt.KeepAspectRatio)
def hoverFunction(self, event):
pos = event.pos()
self.mouse_position = (pos.x() / self.scaling_factor, pos.y() / self.scaling_factor)
x, y = self.mouse_position
calib = self.side_panel_hard.getCalibration()
um_x, um_y = x * calib, y * calib
if not self.main_application.exposure_active:
self.status_bar.showMessage(
'x: {0} ({2:0.2f} um), y: {1} ({3:0.2f} um), value: {4}'.format(int(x), int(y), um_x, um_y,
self.numpy_image[int(y), int(x)]),
2000)
class SidePanel(QDockWidget):
def __init__(self, name=None, parent=None):
super(SidePanel, self).__init__(parent)
if name is None:
self.setWindowTitle("Daisy-Control")
self.setMinimumSize(QSize(270, 530))
self.grid_widget = QWidget(self)
self.grid_widget.setGeometry(QRect(10, 50, 256, 550))
self.grid = QGridLayout(self.grid_widget)
self.create_btn = QPushButton(" Overlay ")
self.expose_btn = QPushButton(" Expose ")
self.stop_btn = QPushButton(" Stop ")
self.load_position_btn = QPushButton(" Load ")
self.label1 = QLabel("- Push 1 -", self.grid_widget)
self.label2 = QLabel("- Push 2 -", self.grid_widget)
self.time_exp_field = QSpinBox(self.grid_widget)
self.time_exp_field.setRange(0, 1000000)
self.time_exp_field.setValue(15000)
time_exp_label = QLabel("Exposure [ms]: ")
time_exp_label.setBuddy(self.time_exp_field)
self.scaling_field = QDoubleSpinBox(self.grid_widget)
self.scaling_field.setRange(0, 2)
self.scaling_field.setDecimals(4)
self.scaling_field.setValue(0.9890)
scaling_field_label = QLabel("Scaling [x]: ")
scaling_field_label.setBuddy(self.scaling_field)
# Before x=1114, y=1146
self.label_oriX = QLabel("Origin-X [pixel]", self.grid_widget)
self.label_oriY = QLabel("Origin-Y [pixel]", self.grid_widget)
self.origin_x = QSpinBox(self.grid_widget)
self.origin_x.setRange(0, CAMERA_WIDTH)
self.origin_x.setValue(X_ORIGIN_POSITION)
self.origin_y = QSpinBox(self.grid_widget)
self.origin_y.setRange(0, CAMERA_HEIGHT)
self.origin_y.setValue(Y_ORIGIN_POSITION)
self.grid.addWidget(self.stop_btn, 0, 1, 1, 1)
self.grid.addWidget(self.expose_btn, 0, 0, 1, 1)
self.grid.addWidget(time_exp_label, 1, 0)
self.grid.addWidget(self.time_exp_field, 1, 1)
pattern_label = QLabel("-- Pattern --", self.grid_widget)
self.grid.addWidget(pattern_label, 3, 0, 1, 2)
self.grid.addWidget(self.load_position_btn, 4, 0)
self.grid.addWidget(self.create_btn, 4, 1)
self.grid.addWidget(scaling_field_label, 5, 0)
self.grid.addWidget(self.scaling_field, 5, 1)
ill_label = QLabel("-- Illumination origin --", self.grid_widget)
self.grid.addWidget(ill_label, 11, 0, 1, 2)
self.grid.addWidget(self.label_oriX, 12, 0)
self.grid.addWidget(self.origin_x, 12, 1)
self.grid.addWidget(self.label_oriY, 13, 0)
self.grid.addWidget(self.origin_y, 13, 1)
self.find_center_btn = QPushButton(" Estimate ")
self.grid.addWidget(self.find_center_btn, 14, 0)
self.reset_center_btn = QPushButton(" Reset ")
self.grid.addWidget(self.reset_center_btn, 14, 1)
calib_label = QLabel("-- Pattern calibration --", self.grid_widget)
self.grid.addWidget(calib_label, 15, 0, 1, 2)
self.grid.addWidget(self.label1, 16, 0)
self.grid.addWidget(self.label2, 16, 1)
self.grid.setSizeConstraint(QLayout.SetDefaultConstraint)
self.setMouseTracking(1)
self.mouseMoveEvent = self.hoverFunction
def hoverFunction(self, event):
self.time_exp_field.clearFocus()
self.origin_y.clearFocus()
self.origin_x.clearFocus()
class SidePanelHard(QDockWidget):
def __init__(self, parent=None):
super(SidePanelHard, self).__init__(parent)
self.setupUi(self)
self.setMouseTracking(1)
self.mouseMoveEvent = self.hoverFunction
def setupUi(self, Form):
Form.setObjectName("Scope-Control")
Form.resize(278, 278)
Form.setMinimumSize(QSize(270, 400))
self.gridLayoutWidget = QWidget(Form)
self.gridLayoutWidget.setGeometry(QRect(10, 50, 256, 400))
self.gridLayoutWidget.setObjectName("gridLayoutWidget")
self.gridLayout = QGridLayout(self.gridLayoutWidget)
self.gridLayout.setSizeConstraint(QLayout.SetDefaultConstraint)
self.gridLayout.setContentsMargins(0, 0, 0, 0)
self.gridLayout.setObjectName("gridLayout")
self.label = QLabel(self.gridLayoutWidget)
self.label.setText("-- XY control --")
self.gridLayout.addWidget(self.label, 3, 1, 1, 1)
self.step_size_box = QSpinBox(self.gridLayoutWidget)
self.step_size_box.setRange(0, 100000)
self.step_size_box.setValue(1000)
self.gridLayout.addWidget(self.step_size_box, 5, 1, 1, 1)
self.expose_box = QSpinBox(self.gridLayoutWidget)
self.expose_box.setRange(0, 10000)
self.expose_box.setValue(50)
self.gridLayout.addWidget(self.expose_box, 1, 1)
self.auto_scaling = QCheckBox(self.gridLayoutWidget)
self.auto_scaling.setChecked(True)
self.auto_scaling.setText("auto-scaling")
self.gridLayout.addWidget(self.auto_scaling, 1, 2)
self.expose_label = QLabel(self.gridLayoutWidget)
self.expose_label.setText("Exposure [ms]: ")
self.gridLayout.addWidget(self.expose_label, 1, 0, 1, 1)
self.max_limit = QSpinBox(self.gridLayoutWidget)
self.min_limit = QSpinBox(self.gridLayoutWidget)
self.max_limit.setValue(0)
self.min_limit.setValue(0)
self.max_limit.setRange(0, 2 ** 16 - 1)
self.min_limit.setRange(0, 2 ** 16 - 1)
limits_label = QLabel("Limits (min/max): ")
limits_label.setBuddy(self.min_limit)
self.gridLayout.addWidget(self.max_limit, 2, 2)
self.gridLayout.addWidget(self.min_limit, 2, 1)
self.gridLayout.addWidget(limits_label, 2, 0)
self.down_btn = QPushButton(self.gridLayoutWidget)
self.down_btn.setText("DOWN")
self.gridLayout.addWidget(self.down_btn, 6, 1, 1, 1)
self.right_btn = QPushButton(self.gridLayoutWidget)
self.right_btn.setText("RIGHT")
self.gridLayout.addWidget(self.right_btn, 5, 2, 1, 1)
self.up_btn = QPushButton(self.gridLayoutWidget)
self.up_btn.setText("UP")
self.gridLayout.addWidget(self.up_btn, 4, 1, 1, 1)
self.left_btn = QPushButton(self.gridLayoutWidget)
self.left_btn.setText("LEFT")
self.gridLayout.addWidget(self.left_btn, 5, 0, 1, 1)
self.label = QLabel(self.gridLayoutWidget)
self.label.setText("-- Calibration --")
self.gridLayout.addWidget(self.label, 7, 1, 1, 1)
self.um_label = QLabel(self.gridLayoutWidget)
self.um_label.setText("[um/pixel]: ")
self.gridLayout.addWidget(self.um_label, 8, 0, 1, 1)
self.steps_label = QLabel(self.gridLayoutWidget)
self.steps_label.setText("[steps/um]: ")
self.gridLayout.addWidget(self.steps_label, 9, 0, 1, 1)
# Olympus 60x/1.2NA:
# um_pixel = 0.1127; steps_um = 0.800
self.um_pixel = QDoubleSpinBox(self.gridLayoutWidget)
self.um_pixel.setRange(0, 10)
self.um_pixel.setDecimals(4)
self.um_pixel.setValue(UM_PIXEL_CONVERSION)
self.gridLayout.addWidget(self.um_pixel, 8, 1, 1, 1)
self.steps_um = QDoubleSpinBox(self.gridLayoutWidget)
self.steps_um.setRange(0, 10)
self.steps_um.setDecimals(4)
self.steps_um.setValue(MOTORSTEPS_UM_CONVERSION)
self.gridLayout.addWidget(self.steps_um, 9, 1, 1, 1)
self.label = QLabel(self.gridLayoutWidget)
self.label.setText("-- Z control --")
self.gridLayout.addWidget(self.label, 10, 1, 1, 1)
self.z_up_btn = QPushButton(self.gridLayoutWidget)
self.z_up_btn.setText("UP")
self.gridLayout.addWidget(self.z_up_btn, 11, 0, 1, 1)
self.z_down_btn = QPushButton(self.gridLayoutWidget)
self.z_down_btn.setText("DOWN")
self.gridLayout.addWidget(self.z_down_btn, 11, 2, 1, 1)
self.z_box = QSpinBox(self.gridLayoutWidget)
self.z_box.setRange(0, 10000)
self.z_box.setValue(0)
self.gridLayout.addWidget(self.z_box, 11, 1, 1, 1)
self.per_label = QLabel("-- Peripherals --", self.gridLayoutWidget)
self.gridLayout.addWidget(self.per_label, 12, 1, 1, 1)
self.shutter_box = QCheckBox(" Shutter", self.gridLayoutWidget)
self.shutter_box.setChecked(False)
self.gridLayout.addWidget(self.shutter_box, 13, 1, 1, 2)
self.acquire_btn = QPushButton(self.gridLayoutWidget)
self.acquire_btn.setText("Acquire!")
self.gridLayout.addWidget(self.acquire_btn, 0, 0, 1, 3)
self.gridLayoutWidget.raise_()
self.acquire_btn.raise_()
self.retranslateUi(Form)
QMetaObject.connectSlotsByName(Form)
def retranslateUi(self, Form):
_translate = QCoreApplication.translate
Form.setWindowTitle(_translate("Form", "FoScope-Control"))
def getCalibration(self):
return self.um_pixel.value()
def getLimits(self):
limits = (self.min_limit.value(), self.max_limit.value())
if limits[0] == limits[1]:
return min(limits), max(limits) + 1
return min(limits), max(limits)
def hoverFunction(self, event):
self.um_pixel.clearFocus()
self.step_size_box.clearFocus()
self.expose_box.clearFocus()
self.step_size_box.clearFocus()
self.steps_um.clearFocus()
self.z_box.clearFocus()
class MainWindow(QMainWindow):
def __init__(self, parent=None):
super(MainWindow, self).__init__(parent)
self.application_running = True
self.acquire_images = False
self.camera_thread = Thread(target=self.acquire_fcn)
self.camera_thread.daemon = True
self.camera_thread.start()
self.exposure_active = False
self.stop_exposure = False
self.exposure_list = []
# self.form_widget = QWidget()
self.status_bar = QStatusBar()
self.setStatusBar(self.status_bar)
self.side_panel = SidePanel()
self.side_panel.create_btn.clicked.connect(self.create_fcn)
self.side_panel.expose_btn.clicked.connect(self.expose_fcn)
self.side_panel.load_position_btn.clicked.connect(self.load_position_fcn)
self.side_panel.stop_btn.clicked.connect(self.stop_exposure_fcn)
self.side_panel.find_center_btn.clicked.connect(self.find_center)
self.side_panel.reset_center_btn.clicked.connect(self.reset_center)
self.side_panel_hard = SidePanelHard()
self.image_view_widget = QWidget(parent=self)
self.image_view = ImageView(parent=self.image_view_widget)
self.image_view.main_application = self
self.image_view.status_bar = self.status_bar
self.image_view.side_panel_hard = self.side_panel_hard
self.setCentralWidget(self.image_view_widget)
self.addDockWidget(Qt.RightDockWidgetArea, self.side_panel)
self.addDockWidget(Qt.LeftDockWidgetArea, self.side_panel_hard)
self.setMouseTracking(1)
self.setWindowTitle("DAISY")
self.resize(1000, 700)
self.last_file_path = QDir.currentPath()
self.hardware_detected = False
try:
self.mmc = MMCorePy.CMMCore()
self.mmc.loadSystemConfiguration(MM_CONFIG_FILE)
print(self.mmc.getLoadedDevices())
self.mmc.setExposure(50.)
self.hardware_detected = True
except Exception as e:
print(e)
self.hardware_detected = False
self.status_bar.showMessage("No hardware detected...", 5000)
self.side_panel_hard.down_btn.clicked.connect(self.move_down)
self.side_panel_hard.up_btn.clicked.connect(self.move_up)
self.side_panel_hard.right_btn.clicked.connect(self.move_right)
self.side_panel_hard.left_btn.clicked.connect(self.move_left)
self.side_panel_hard.acquire_btn.clicked.connect(self.acquire)
self.side_panel_hard.z_up_btn.clicked.connect(self.move_z_up)
self.side_panel_hard.z_down_btn.clicked.connect(self.move_z_down)
self.side_panel_hard.shutter_box.clicked.connect(self.shutter)
# Hardware stuff
self.abs_x = 0
self.abs_y = 0
if self.hardware_detected:
self.mmc.unloadDevice(ARDUINO_COM_PORT)
# self.mmc.unloadDevice("Focus")
# self.mmc.unloadDevice("ZeissScope")
# self.mmc.unloadDevice("ZeissReflectorTurret")
# self.mmc.unloadDevice("ZeissBasePortSlider")
self.mmc.unloadDevice("ZeissObjectives")
print(self.mmc.getLoadedDevices())
self.arduino = Arduino()
self.arduino.close_shutter()
self.side_panel_hard.shutter_box.setChecked(True)
self.initial_origin_x = self.side_panel.origin_x.value()
self.initial_origin_y = self.side_panel.origin_y.value()
self.createActions()
self.createMenus()
def createActions(self):
self.openAct = QAction("&Open...", self, shortcut="Ctrl+O", triggered=self.open)
self.exitAct = QAction("E&xit", self, shortcut="Ctrl+Q", triggered=self.close)
self.saveAct = QAction("Save Image...", self, shortcut="Ctrl+S", triggered=self.saveImage)
self.zoomInAct = QAction("Zoom &In (25%)", self, shortcut="Ctrl++", enabled=True, triggered=self.zoomIn)
self.zoomOutAct = QAction("Zoom &Out (25%)", self, shortcut="Ctrl+-", enabled=True, triggered=self.zoomOut)
def createMenus(self):
self.fileMenu = | |
<reponame>mlweilert/bpnet<gh_stars>10-100
import sys
import os
import pickle
import json
import subprocess
from pathlib import Path
import numpy as np
import pandas as pd
import papermill as pm # Render the ipython notebook
from kipoi_utils.external.flatten_json import flatten, unflatten
from weakref import WeakValueDictionary
import uuid
from threading import Lock
import logging
logger = logging.getLogger(__name__)
logger.addHandler(logging.NullHandler())
def jupyter_nbconvert(input_ipynb):
# NOTE: cwd is used since the input_ipynb could contain some strange output
# characters like '[' which would mess up the paths
subprocess.call(["jupyter",
"nbconvert",
os.path.basename(input_ipynb),
"--to", "html"],
cwd=os.path.dirname(input_ipynb))
def render_ipynb(template_ipynb, rendered_ipynb, params=dict()):
"""Render the ipython notebook
Args:
template_ipynb: template ipython notebook where one cell defines the following metadata:
{"tags": ["parameters"]}
render_ipynb: output ipython notebook path
params: parameters used to execute the ipython notebook
"""
import jupyter_client
os.makedirs(os.path.dirname(rendered_ipynb), exist_ok=True)
kernel_name = os.environ.get("CONDA_DEFAULT_ENV", 'python3')
if kernel_name not in jupyter_client.kernelspec.find_kernel_specs():
logger.info(f"Installing the ipython kernel for the current conda environment: {kernel_name}")
from ipykernel.kernelspec import install
install(user=True, kernel_name=kernel_name)
pm.execute_notebook(
template_ipynb, # input template
rendered_ipynb,
kernel_name=kernel_name, # default kernel
parameters=params
)
jupyter_nbconvert(rendered_ipynb)
def tqdm_restart():
"""Restart tqdm to not print to every line
"""
from tqdm import tqdm as tqdm_cls
inst = tqdm_cls._instances
for i in range(len(inst)):
inst.pop().close()
def touch_file(file, verbose=True):
import subprocess
if verbose:
add = "v"
else:
add = ""
subprocess.run(["vmtouch", f'-{add}tf', file])
def remove_exists(output_path, overwrite=False):
if os.path.exists(output_path):
if overwrite:
os.remove(output_path)
else:
raise ValueError(f"File exists {str(output_path)}. Use overwrite=True to overwrite it")
def write_pkl(obj, fname, create_dirs=True, protocol=2):
import cloudpickle
if create_dirs:
if os.path.dirname(fname):
os.makedirs(os.path.dirname(fname), exist_ok=True)
cloudpickle.dump(obj, open(fname, 'wb'), protocol=protocol)
def read_pkl(fname):
import cloudpickle
return cloudpickle.load(open(fname, 'rb'))
def read_json(fname):
with open(fname) as f:
return json.load(f)
class NumpyAwareJSONEncoder(json.JSONEncoder):
def default(self, obj):
if isinstance(obj, Path):
return str(obj)
elif isinstance(obj, np.ndarray):
return obj.tolist()
elif isinstance(obj, np.generic):
return obj.item()
return json.JSONEncoder.default(self, obj)
def write_json(obj, fname, **kwargs):
with open(fname, "w") as f:
return json.dump(obj, f, cls=NumpyAwareJSONEncoder, **kwargs)
dump = write_pkl
load = read_pkl
def _listify(arg):
if hasattr(type(arg), '__len__'):
return arg
return [arg, ]
def reverse_complement(seq):
alt_map = {'ins': '0'}
complement = {'A': 'T', 'C': 'G', 'G': 'C', 'T': 'A'}
for k, v in alt_map.items():
seq = seq.replace(k, v)
bases = list(seq)
bases = reversed([complement.get(base, base) for base in bases])
bases = ''.join(bases)
for k, v in alt_map.items():
bases = bases.replace(v, k)
return bases
def related_dump_yaml(obj, path, verbose=False):
import related
generated_yaml = related.to_yaml(obj,
suppress_empty_values=False,
suppress_map_key_values=True) # .strip()
if verbose:
print(generated_yaml)
with open(path, "w") as f:
f.write(generated_yaml)
def shuffle_list(l):
return pd.Series(l).sample(frac=1).tolist()
def flatten_list(l):
"""Flattens a nested list
"""
return [x for nl in l for x in nl]
class Logger(object):
"""tee functionality in python. If this object exists,
then all of stdout gets logged to the file
Adoped from:
https://stackoverflow.com/questions/616645/how-do-i-duplicate-sys-stdout-to-a-log-file-in-python/3423392#3423392
"""
def __init__(self, name, mode='a'):
self.file = open(name, mode)
self.stdout = sys.stdout
sys.stdout = self
def __del__(self):
sys.stdout = self.stdout
self.file.close()
def write(self, data):
self.file.write(data)
self.stdout.write(data)
# flush right away
self.file.flush()
self.stdout.flush()
def flush(self):
self.file.flush()
self.stdout.flush()
def add_file_logging(output_dir, logger, name='stdout'):
os.makedirs(os.path.join(output_dir, 'log'), exist_ok=True)
log = Logger(os.path.join(output_dir, 'log', name + '.log'), 'a+') # log to the file
fh = logging.FileHandler(os.path.join(output_dir, 'log', name + '.log'), 'a+')
fh.setFormatter(logging.Formatter('[%(asctime)s] - [%(levelname)s] - %(message)s'))
fh.setLevel(logging.INFO)
logger.addHandler(fh)
return log
def halve(n):
"""Halve an integer"""
return n // 2 + n % 2, n // 2
def expand_str_list(l, prefix="", suffix=""):
"""add strings to the beginning or to the end of the string
"""
return [prefix + x + suffix for x in l]
def kv_string2dict(s):
"""Convert a key-value string: k=v,k2=v2,... into a dictionary
"""
import yaml
return yaml.load(s.replace(",", "\n").replace("=", ": "))
def dict_suffix_key(d, suffix):
return {k + suffix: v for k, v in d.items()}
def dict_prefix_key(d, prefix):
return {prefix + k: v for k, v in d.items()}
def kwargs_str2kwargs(hparams):
"""Converts a string to a dictionary of kwargs
In[22]: params_str2kwargs("a=1;b=[1,2]")
Out[22]: {'a': 1, 'b': [1, 2]}
In[30]: hparams_str2kwargs("a=null")
Out[30]: {'a': None}
"""
import yaml
return yaml.load(hparams.replace(";", "\n").replace("=", ": "))
def apply_parallel(df_grouped, func, n_jobs=-1, verbose=True):
from joblib import Parallel, delayed
import pandas as pd
from tqdm import tqdm
retLst = Parallel(n_jobs=n_jobs)(delayed(func)(group)
for name, group in tqdm(df_grouped, disable=not verbose))
return pd.concat(retLst)
def get_timestamp():
"""Get current time-stamp as a string: 2018-12-10_14:20:04
"""
import datetime
import time
return datetime.datetime.fromtimestamp(time.time()).strftime('%Y-%m-%d_%H:%M:%S')
class ConditionalRun:
"""Simple class keeping track whether the command has already
been ran or not
"""
def __init__(self, main_cmd, cmd, output_dir, force=False):
self.main_cmd = main_cmd
self.cmd = cmd
self.output_dir = output_dir
self.force = force
def set_cmd(self, cmd):
self.cmd = cmd
return self
def get_done_file(self):
return os.path.join(self.output_dir, f".{self.main_cmd}/{self.cmd}.done")
def done(self):
ret = os.path.exists(self.get_done_file())
if self.force:
# always run the command
ret = False
if ret:
logger.info(f"Skipping {self.cmd}")
else:
logger.info(f"Running {self.cmd}")
return ret
def write(self):
fname = self.get_done_file()
os.makedirs(os.path.dirname(fname),
exist_ok=True)
with open(fname, "w") as f:
f.write(get_timestamp())
class Logger(object):
"""tee functionality in python. If this object exists,
then all of stdout gets logged to the file
Adoped from:
https://stackoverflow.com/questions/616645/how-do-i-duplicate-sys-stdout-to-a-log-file-in-python/3423392#3423392
"""
def __init__(self, name, mode='a'):
self.file = open(name, mode)
self.stdout = sys.stdout
sys.stdout = self
def __del__(self):
sys.stdout = self.stdout
self.file.close()
def write(self, data):
self.file.write(data)
self.stdout.write(data)
# flush right away
self.file.flush()
self.stdout.flush()
def flush(self):
self.file.flush()
self.stdout.flush()
def fnmatch_any(s, patterns):
from fnmatch import fnmatch # unix-style pattern matching
return any([fnmatch(s, p) for p in patterns])
def to_list(l):
if isinstance(l, list):
return l
else:
return [l]
def pd_first_cols(df: pd.DataFrame, cols):
"""Set `cols` to be the first columns in pd.DataFrame df
"""
for c in cols:
assert c in df
other_cols = [c for c in df.columns if c not in cols]
return df[cols + other_cols]
def pd_col_prepend(df: pd.DataFrame, column, prefix='', suffix=''):
"""Add a prefix or suffix to all the columns names in pd.DataFrame
"""
if isinstance(column, list):
for c in column:
df[c] = prefix + df[c] + suffix
else:
df[column] = prefix + df[column] + suffix
return df
def create_tf_session(visiblegpus, per_process_gpu_memory_fraction=0.45):
import os
import tensorflow as tf
import keras.backend as K
os.environ['CUDA_VISIBLE_DEVICES'] = str(visiblegpus)
session_config = tf.ConfigProto()
# session_config.gpu_options.deferred_deletion_bytes = DEFER_DELETE_SIZE
if per_process_gpu_memory_fraction==1:
session_config.gpu_options.allow_growth = True
else:
session_config.gpu_options.per_process_gpu_memory_fraction = per_process_gpu_memory_fraction
session_config.gpu_options.polling_inactive_delay_msecs = 50
session = tf.Session(config=session_config)
K.set_session(session)
#K.backend.set_session(session)
return session
class SerializableLock(object):
_locks = WeakValueDictionary()
""" A Serializable per-process Lock
This wraps a normal ``threading.Lock`` object and satisfies the same
interface. However, this lock can also be serialized and sent to different
processes. It will not block concurrent operations between processes (for
this you should look at ``multiprocessing.Lock`` or ``locket.lock_file``
but will consistently deserialize into the same lock.
So if we make a lock in one process::
lock = SerializableLock()
And then send it over to another process multiple times::
bytes = pickle.dumps(lock)
a = pickle.loads(bytes)
b = pickle.loads(bytes)
Then the deserialized objects will operate as though they were the same
lock, and collide as appropriate.
This is useful for consistently protecting resources on a per-process
level.
The creation of locks is itself not threadsafe.
This class was taken from dask.utils.py
Copyright (c) 2014-2018, Anaconda, Inc. and contributors
All rights reserved.
Redistribution and use in source and binary forms, with or without modification,
are permitted provided that the following conditions are met:
Redistributions of source code must retain the above copyright notice,
this list of conditions and the following disclaimer.
Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
Neither the name of Anaconda nor the names of any contributors may be used to
endorse or promote products derived from this software without specific prior
written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
THE POSSIBILITY OF SUCH DAMAGE.
"""
def __init__(self, token=None):
self.token = token or str(uuid.uuid4())
if self.token in SerializableLock._locks:
self.lock = SerializableLock._locks[self.token]
else:
self.lock = Lock()
SerializableLock._locks[self.token] = self.lock
def acquire(self, *args, | |
# Linkpad: A command-line bookmark manager
# https://github.com/tonyduckles/linkpad
# Copyright (c) 2017 <NAME>
# Features:
# =========
# - Supports multiple separate bookmark databases under ~/.linkpad/<dbname>/.
# - Each database is version-controlled via Git, which [aside from version
# control!] provides an easy way to synchronize databases between machines.
#
# Database Structure:
# ===================
# - Bookmarks are stored as a JSON dict at "$dbpath/entries.json".
# - Optional webpage archive is stored at "$dbpath/archive/<$id[0:2]>/<$id[2:-1]>/index.html".
# - Internal schema veraion stored at "$dbpath/format".
#
# Dependencies:
# =============
# - python 3.x
# - git
# - wget (for archiving)
import os
import sys
import collections
import copy
import click
import yaml
import json
import sh
import datetime
import uuid
import urllib.parse
import bs4
import http.client
import tempfile
import requests
import time
import random
import multiprocessing.dummy
import functools
import tqdm
import configparser
import shlex
# Workaround for "http.client.HTTPException: got more than 100 headers" exceptions.
# Some servers can be misconfigured and can return an expected # of headers.
http.client._MAXHEADERS = 1000
VERSION = 1.2
PROGRAM = os.path.basename(sys.argv[0])
USER_AGENT = 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_11_6) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/58.0.3029.110 Safari/537.36'
LINKPAD_BASEDIR = os.environ.get('LINKPAD_BASEDIR') or os.path.expanduser('~/.linkpad')
# User-editable entry fields
DB_ENTRY_PUBLIC_FIELDS = [ 'url',
'title',
'extended',
'tags',
'created_date' ]
# Internal non-editable entry fields
DB_ENTRY_PRIVATE_FIELDS = [ 'id',
'archived',
'archived_date',
'removed',
'removed_date',
'removed_reason' ]
###
### Misc helpers
###
def datetime_utc_to_local(utc_dt):
""" Convert a UTC datetime to local datetime """
# https://stackoverflow.com/a/13287083
return utc_dt.replace(tzinfo=datetime.timezone.utc).astimezone(tz=None)
def datetime_format_relative(utc_dt):
""" Format date relative to the current time, e.g. "2 hours ago" """
delta = datetime.datetime.now(datetime.timezone.utc) - utc_dt
if delta.days < 2:
seconds = (delta.days * 86400) + delta.seconds
minutes = seconds // 60
hours = minutes // 60
if seconds < 120:
return "{} seconds ago".format(seconds)
if minutes < 120:
return "{} minutes ago".format(minutes)
return "{} hours ago".format(hours)
else:
days = delta.days
weeks = days // 7
months = int(days / (365/12))
years = days // 365
if days < 14:
return "{} days ago".format(days)
if weeks < 8:
return "{} weeks ago".format(weeks)
if years < 1:
return "{} months ago".format(months)
months_mod = months % 12
return "{} years, {} months ago".format(years, months_mod) if months_mod > 0 else "{} years ago".format(years)
def format_colorize(format):
"""
Given a format template string, replace any format mnemonics
with literal ANSI color escape sequences.
Support Tmux-style formatting strings: #[...]
"""
retval=""
if '#[' in format:
pos1=0
pos2=0
pos3=format.find('#[', pos1) # Find first format-start marker
retval += format[pos1:pos3] # Append any text before the first format-start marker
while True:
pos1 = pos3
pos2 = format.find(']', pos1+2) # Find next format-end marker
if pos2 < 0:
retval += format[pos1:] # No counterpart format-end marker, just append remainder of string
break
for style in format[pos1+2:pos2].split(','):
# ANSI styles
if style == 'none': retval += "\x1b[0m"
if style == 'bold': retval += "\x1b[1m"
if style == 'bright': retval += "\x1b[1m"
if style == 'dim': retval += "\x1b[2m"
if style == 'italics': retval += "\x1b[3m"
if style == 'underscore': retval += "\x1b[4m"
if style == 'blink': retval += "\x1b[5m"
if style == 'reverse': retval += "\x1b[7m"
# ANSI foreground
if style == 'fg=black': retval += "\x1b[30m"
if style == 'fg=red': retval += "\x1b[31m"
if style == 'fg=green': retval += "\x1b[32m"
if style == 'fg=yellow': retval += "\x1b[33m"
if style == 'fg=blue': retval += "\x1b[34m"
if style == 'fg=magenta': retval += "\x1b[35m"
if style == 'fg=cyan': retval += "\x1b[36m"
if style == 'fg=white': retval += "\x1b[37m"
if style == 'fg=default': retval += "\x1b[39m"
if style == 'fg=brightblack': retval += "\x1b[90m"
if style == 'fg=brightred': retval += "\x1b[91m"
if style == 'fg=brightgreen': retval += "\x1b[92m"
if style == 'fg=brightyellow': retval += "\x1b[93m"
if style == 'fg=brightblue': retval += "\x1b[94m"
if style == 'fg=brightmagenta': retval += "\x1b[95m"
if style == 'fg=brightcyan': retval += "\x1b[96m"
if style == 'fg=brightwhite': retval += "\x1b[97m"
# ANSI background
if style == 'bg=black': retval += "\x1b[40m"
if style == 'bg=red': retval += "\x1b[41m"
if style == 'bg=green': retval += "\x1b[42m"
if style == 'bg=yellow': retval += "\x1b[43m"
if style == 'bg=blue': retval += "\x1b[44m"
if style == 'bg=magenta': retval += "\x1b[45m"
if style == 'bg=cyan': retval += "\x1b[46m"
if style == 'bg=white': retval += "\x1b[47m"
if style == 'bg=default': retval += "\x1b[49m"
if style == 'bg=brightblack': retval += "\x1b[100m"
if style == 'bg=brightred': retval += "\x1b[101m"
if style == 'bg=brightgreen': retval += "\x1b[102m"
if style == 'bg=brightyellow': retval += "\x1b[103m"
if style == 'bg=brightblue': retval += "\x1b[104m"
if style == 'bg=brightmagenta': retval += "\x1b[105m"
if style == 'bg=brightcyan': retval += "\x1b[106m"
if style == 'bg=brightwhite': retval += "\x1b[107m"
# 256-color (8-bit) palette, e.g. 'fg=color:NNN' [https://en.wikipedia.org/wiki/ANSI_escape_code#8-bit]
if style[0:9] == 'fg=color:': retval += "\x1b[38;5;{}m".format(style[9:])
if style[0:9] == 'bg=color:': retval += "\x1b[48;5;{}m".format(style[9:])
# Truecolor (24-bit) palette, e.g. 'fg=truecolor:RRGGBB' [https://en.wikipedia.org/wiki/ANSI_escape_code#24-bit]
if style[0:13] == 'fg=truecolor:': retval += "\x1b[38;2;{};{};{}m".format(int(style[13:15],16), int(style[15:17],16), int(style[17:19],16))
pos3 = format.find('#[',pos2+1) # Find next format-start marker
retval += format[pos2+1:pos3 if (pos3 > 0) else None] # Append text between current format-end and next format-start marker
if pos3 < 0:
break
else:
retval=format
return retval
def url_open(url, timeout=None):
""" Get a webpage, check if it exists """
headers = requests.utils.default_headers()
headers.update({'User-Agent': USER_AGENT})
page_exists = False
error = None
content = None
try:
response = requests.get(url, headers=headers, timeout=timeout)
if response.ok:
page_exists = True
content = response.content
else:
error = 'HTTP error: {} {}'.format(response.status_code, response.reason.title())
except requests.exceptions.SSLError as e:
error = "SSL error: {}".format(e)
except requests.exceptions.HTTPError as e:
error = 'HTTP error: {}'.format(e)
except requests.exceptions.ConnectionError as e:
# Prettify common connection errors
if hasattr(e, 'args') and len(e.args) > 0:
if type(e.args[0]) == requests.packages.urllib3.exceptions.MaxRetryError:
reason = e.args[0].reason
if type(reason) == requests.packages.urllib3.exceptions.NewConnectionError:
if hasattr(reason, 'args') and len(reason.args) > 0:
if type(reason.args[0]) == str:
message = reason.args[0]
# Filter DNS lookup error from other connection errors
# (until https://github.com/shazow/urllib3/issues/1003 is resolved)
if ("[Errno 11001] getaddrinfo failed" in message or # Windows
"[Errno -2] Name or service not known" in message or # Linux
"[Errno 8] nodename nor servname " in message): # OS X
error = 'Connection error: DNS lookup error'
else:
error = 'Connection error{}'.format(message[message.find(':'):])
if type(reason) == requests.packages.urllib3.exceptions.ConnectTimeoutError:
if hasattr(reason, 'args') and len(reason.args) > 0:
if type(reason.args[0]) == requests.packages.urllib3.connection.HTTPConnection:
conn = reason.args[0]
error = 'Connection error: HTTP connection timeout (connect timeout={})'.format(conn.timeout)
if type(reason.args[0]) == requests.packages.urllib3.connection.VerifiedHTTPSConnection:
conn = reason.args[0]
error = 'Connection error: HTTPS connection timeout (connect timeout={})'.format(conn.timeout)
if error is None:
error = "Connection error: {}".format(e)
except requests.exceptions.MissingSchema as e:
error = "Invalid URL: No schema supplied."
except requests.exceptions.InvalidSchema as e:
error = "Invalid URL: Invalid schema supplied."
except requests.exceptions.RequestException as e:
error = "Error: {}".format(e)
except Exception as e:
error = "Exception: {}".format(e)
return page_exists, error, content
def is_page_exists(url, timeout=None):
""" Check if a webpage exists """
exists, error, resp = url_open(url, timeout=timeout)
return exists, error
def page_title(url):
""" Get webpage title """
exists, error, resp = url_open(url)
if not exists:
return ''
if error is not None and len(error) > 0:
return ''
try:
page = bs4.BeautifulSoup(resp, "html.parser")
return page.title.string.strip() if page.title else ''
except Exception as e:
return ''
def archive_url(url, archive_dir, verbose=False, throttle_downloads=False):
""" Save an archived version of a webpage, along with all the
required media you'll need to view the page offline """
# Abort early if target url doesn't exist
page_exists, error = is_page_exists(url)
if not page_exists:
click.echo('error: '+error)
return None
# Use 'wget' to download an archive version of the webpage
tmpdir = tempfile.TemporaryDirectory()
wget_args = [
'--no-verbose', # turn off verboseness, without being quiet
'--span-hosts', # go to foreign hosts when recursive
'--timestamping', # don't re-retrieve files unless newer than local
'--convert-links', # make links in downloaded HTML or CSS point to local files
'--page-requisites', # get all images, etc. needed to display HTML page
'--directory-prefix', tmpdir.name, # save files to PREFIX/..
'--user-agent', USER_AGENT ] # identify as AGENT instead of Wget/VERSION
if throttle_downloads:
wget_args.extend([
'--wait=3', # wait SECONDS between retrievals
'--random-wait' ] # wait from 0.5*WAIT...1.5*WAIT secs between retrievals
)
html_file = None
for line in sh.wget(wget_args, url,
#_err_to_out=True,
#_out=sys.stdout,
_ok_code=[ 0, 4, 8 ],
_iter="err"):
# Get the target filename by scraping the wget output
if html_file is None and ' URL:{} ['.format(url) in line:
base = os.path.join(tmpdir.name, | |
So, well, let's do
# that.
processor = None
if invoice.processor:
processor = self.get_invoice_processor(invoice, logger=logger)
if not processor:
# get_invoice_processor() has already logged
return (self.RESULT_PROCESSORFAIL, None, None)
try:
with transaction.atomic():
processor.process_invoice_payment(invoice)
except Exception as ex:
logger("Failed to run invoice processor '%s': %s" % (invoice.processor, ex))
return (self.RESULT_PROCESSORFAIL, None, None)
# Generate a PDF receipt for this, since it's now paid
wrapper = InvoiceWrapper(invoice)
invoice.pdf_receipt = base64.b64encode(wrapper.render_pdf_receipt())
# Save and we're done!
invoice.save()
# Create an accounting entry for this invoice. If we have the required
# information on the invoice, we can finalize it. If not, we will
# need to create an open ended one.
accountingtxt = 'Invoice #%s: %s' % (invoice.id, invoice.title)
accrows = [
(incomeaccount, accountingtxt, invoice.total_amount - transcost, None),
]
if transcost > 0:
# If there was a transaction cost known at this point (which
# it typically is with Paypal), make sure we book a row for it.
accrows.append(
(costaccount, accountingtxt, transcost, invoice.accounting_object),
)
if invoice.total_vat:
# If there was VAT on this invoice, create a separate accounting row for this
# part. As there can in theory (though maybe not in practice?) be multiple different
# VATs on the invoice, we need to summarize the rows.
vatsum = defaultdict(int)
for r in invoice.invoicerow_set.all():
if r.vatrate_id:
vatsum[r.vatrate.vataccount.num] += (r.rowamount * r.rowcount * r.vatrate.vatpercent / Decimal(100)).quantize(Decimal('0.01'))
total_vatsum = sum(vatsum.values())
if invoice.total_vat != total_vatsum:
raise Exception("Stored VAT total %s does not match calculated %s" % (invoice.total_vat, total_vatsum))
for accountnum, s in list(vatsum.items()):
accrows.append(
(accountnum, accountingtxt, -s, None),
)
if invoice.accounting_account:
accrows.append(
(invoice.accounting_account, accountingtxt, -(invoice.total_amount - invoice.total_vat), invoice.accounting_object),
)
leaveopen = False
else:
leaveopen = True
urls = ['%s/invoices/%s/' % (settings.SITEBASE, invoice.pk), ]
if extraurls:
urls.extend(extraurls)
create_accounting_entry(date.today(), accrows, leaveopen, urls)
# Send the receipt to the user if possible - that should make
# them happy :)
wrapper.email_receipt()
# Write a log, because it's always nice..
InvoiceHistory(invoice=invoice, txt='Processed payment').save()
InvoiceLog(
message="Processed payment of %s %s for invoice %s (%s)" % (
invoice.total_amount,
settings.CURRENCY_ABBREV,
invoice.pk,
invoice.title),
timestamp=datetime.now()
).save()
return (self.RESULT_OK, invoice, processor)
def get_invoice_processor(self, invoice, logger=None):
if invoice.processor:
try:
pieces = invoice.processor.classname.split('.')
modname = '.'.join(pieces[:-1])
classname = pieces[-1]
mod = __import__(modname, fromlist=[classname, ])
return getattr(mod, classname)()
except Exception as ex:
if logger:
logger("Failed to instantiate invoice processor '%s': %s" % (invoice.processor, ex))
return None
else:
raise Exception("Failed to instantiate invoice processor '%s': %s" % (invoice.processor, ex))
else:
return None
# Cancel the specified invoice, calling any processor set on it if necessary
def cancel_invoice(self, invoice, reason, who):
# If this invoice has a processor, we need to start by calling it
processor = self.get_invoice_processor(invoice)
if processor:
try:
with transaction.atomic():
processor.process_invoice_cancellation(invoice)
except Exception as ex:
raise Exception("Failed to run invoice processor '%s': %s" % (invoice.processor, ex))
invoice.deleted = True
invoice.deletion_reason = reason
invoice.save()
InvoiceHistory(invoice=invoice, txt='Canceled by {}'.format(who)).save()
# Send the receipt to the user if possible - that should make
# them happy :)
wrapper = InvoiceWrapper(invoice)
wrapper.email_cancellation(reason)
InvoiceLog(timestamp=datetime.now(), message="Deleted invoice %s (deleted by %s): %s" % (invoice.id, who, invoice.deletion_reason)).save()
def refund_invoice(self, invoice, reason, amount, vatamount, vatrate):
# Initiate a refund of an invoice if there is a payment provider that supports it.
# Otherwise, flag the invoice as refunded, and assume the user took care of it manually.
r = InvoiceRefund(invoice=invoice, reason=reason, amount=amount, vatamount=vatamount, vatrate=vatrate)
r.save()
InvoiceHistory(invoice=invoice,
txt='Registered refund of {0}{1}'.format(settings.CURRENCY_SYMBOL, amount + vatamount)).save()
wrapper = InvoiceWrapper(invoice)
if invoice.can_autorefund:
# Send an initial notice to the user.
wrapper.email_refund_initiated(r)
# Accounting record is created when we send the API call to the
# provider.
InvoiceLog(timestamp=datetime.now(),
message="Initiated refund of {0}{1} of invoice {2}: {3}".format(settings.CURRENCY_SYMBOL, amount + vatamount, invoice.id, reason),
).save()
else:
# No automatic refund, so this is flagging something that has
# already been done. Update accordingly.
r.issued = r.registered
r.completed = r.registered
r.payment_reference = "MANUAL"
r.save()
# Create accounting record, since we flagged it manually. As we
# don't know which account it was refunded from, leave that
# end open.
if invoice.accounting_account:
accountingtxt = 'Refund of invoice #{0}: {1}'.format(invoice.id, invoice.title)
accrows = [
(invoice.accounting_account, accountingtxt, invoice.total_amount - vatamount, invoice.accounting_object),
]
if vatamount:
accrows.append(
(r.vatrate.vataccount.num, accountingtxt, vatamount, None),
)
urls = ['%s/invoices/%s/' % (settings.SITEBASE, invoice.pk), ]
create_accounting_entry(date.today(), accrows, True, urls)
InvoiceHistory(invoice=invoice,
txt='Flagged refund of {0}{1}'.format(settings.CURRENCY_SYMBOL, amount + vatamount)).save()
wrapper.email_refund_sent(r)
InvoiceLog(timestamp=datetime.now(),
message="Flagged invoice {0} as refunded by {1}{2}: {3}".format(invoice.id, settings.CURRENCY_SYMBOL, amount + vatamount, reason),
).save()
return r
def autorefund_invoice(self, refund):
# Send an API call to initiate a refund
if refund.invoice.autorefund(refund):
refund.issued = datetime.now()
refund.save()
InvoiceHistory(invoice=refund.invoice, txt='Sent refund request to provider').save()
return True
else:
InvoiceHistory(invoice=refund.invoice, txt='Failed to send refund request to provider').save()
return False
def complete_refund(self, refundid, refundamount, refundfee, incomeaccount, costaccount, extraurls, method):
# Process notification from payment provider that refund has completed
refund = InvoiceRefund.objects.get(id=refundid)
invoice = refund.invoice
if refund.completed:
raise Exception("Refund {0} has already been completed".format(refundid))
if not refund.issued:
raise Exception("Refund {0} has not been issued, yet signaled completed!".format(refundid))
if refundamount != refund.amount + refund.vatamount:
raise Exception("Refund {0} attempted to process amount {1} but refund should be {2}".format(refundid, refundamount, refund.amount + refund.vatamount))
accountingtxt = 'Refund ({0}) of invoice #{1}'.format(refundid, invoice.id)
accrows = [
(incomeaccount, accountingtxt, -(refundamount - refundfee), None),
]
if refund.vatamount:
accrows.append(
(refund.vatrate.vataccount.num, accountingtxt, refund.vatamount, None),
)
if refundfee != 0:
accrows.append(
(costaccount, accountingtxt, -refundfee, invoice.accounting_object),
)
if invoice.accounting_account:
accrows.append(
(invoice.accounting_account, accountingtxt, refundamount - refund.vatamount, invoice.accounting_object),
)
leaveopen = False
else:
leaveopen = True
urls = ['%s/invoices/%s/' % (settings.SITEBASE, invoice.pk), ]
if extraurls:
urls.extend(extraurls)
create_accounting_entry(date.today(), accrows, leaveopen, urls)
# Also flag the refund as done
refund.completed = datetime.now()
refund.save()
wrapper = InvoiceWrapper(invoice)
wrapper.email_refund_sent(refund)
InvoiceHistory(invoice=invoice, txt='Completed refund {0}'.format(refund.id)).save()
# This creates a complete invoice, and finalizes it
def create_invoice(self,
recipient_user,
recipient_email,
recipient_name,
recipient_address,
title,
invoicedate,
duedate,
invoicerows,
paymentmethods,
processor=None,
processorid=None,
accounting_account=None,
accounting_object=None,
canceltime=None,
reverse_vat=False,
extra_bcc_list=None):
invoice = Invoice(
recipient_email=recipient_email,
recipient_name=recipient_name,
recipient_address=recipient_address,
title=title,
invoicedate=invoicedate,
duedate=duedate,
total_amount=-1,
accounting_account=accounting_account,
accounting_object=accounting_object,
canceltime=canceltime,
reverse_vat=reverse_vat,
extra_bcc_list=extra_bcc_list or '')
if recipient_user:
invoice.recipient_user = recipient_user
if processor:
invoice.processor = processor
if processorid:
invoice.processorid = processorid
# Add our rows. Need to save the invoice first so it has an id.
# But we expect to be in a transaction anyway.
invoice.save()
for r in invoicerows:
invoice.invoicerow_set.add(InvoiceRow(invoice=invoice,
rowtext=_trunc_string(r[0], 100),
rowcount=r[1],
rowamount=r[2],
vatrate=r[3],
), bulk=False)
# Add the ways it can be paid
invoice.allowedmethods = paymentmethods
invoice.save()
# That should be it. Finalize so we get a PDF, and then
# return whatever we have.
wrapper = InvoiceWrapper(invoice)
wrapper.finalizeInvoice()
return invoice
def postpone_invoice_autocancel(self, invoice, mintime, reason, silent=False):
# Extend an invoice to be valid at least mintime into the future. Unless
# silent is set, a notification will be sent to the invoice address if
# this happens. No notification is sent to the end user.
if invoice.paidat:
# Already paid. Could happen if payment notification is delivered concurrently,
# so just ignore it.
return False
if not invoice.canceltime:
return False
if invoice.canceltime > datetime.now() + mintime:
return False
# Else we need to extend it, so do it
oldtime = invoice.canceltime
invoice.canceltime = datetime.now() + mintime
invoice.save()
InvoiceHistory(invoice=invoice, txt='Extended until {0}: {1}'.format(invoice.canceltime, reason)).save()
if not silent:
send_simple_mail(settings.INVOICE_SENDER_EMAIL,
settings.INVOICE_NOTIFICATION_RECEIVER,
"Invoice {0} automatically extended".format(invoice.id),
"""The invoice with id {0} has had it's automatic cancel time extended
from {1} to {2}.
The reason for this was:
{3}
The invoice remains active regardless of the original cancel time, and will
keep getting extended until the process is manually stopped. A new notification
will be sent after each extension.
""".format(invoice.id, oldtime, invoice.canceltime, reason))
# This is purely for testing, obviously
class TestProcessor(object):
def process_invoice_payment(self, invoice):
print("Callback processing invoice with title '%s', for my own id %s" % (invoice.title, invoice.processorid))
def process_invoice_cancellation(self, invoice):
raise Exception("This processor can't cancel invoices.")
def get_return_url(self, invoice):
print("Trying to get the return url, but I can't!")
return "http://unknown.postgresql.eu/"
def get_admin_url(self, invoice):
return None
# Calculate the number of workdays between two datetimes.
def diff_workdays(start, end):
weekdays = len(list(rrule.rrule(rrule.DAILY, byweekday=list(range(0, 5)), dtstart=start, until=end)))
if end.hour < 8:
weekdays -= 1
if start.hour > 17:
weekdays -= 1
# We | |
# py_mob/py_mob.py
import numpy, scipy.stats, sklearn.isotonic, sklearn.cluster, lightgbm, tabulate, pkg_resources
def get_data(data):
"""
The function loads a testing dataset.
Parameters:
data : The name of dataset. It is either "hmeq" or "accepts", both of
which are loan performance data.
Returns:
A dict with the dataset.
Example:
data = py_mob.get_data("accepts")
data.keys()
# ['bankruptcy', 'bad', 'app_id', 'tot_derog', 'tot_tr', 'age_oldest_tr',
# 'tot_open_tr', 'tot_rev_tr', 'tot_rev_debt', 'tot_rev_line', 'rev_util',
# 'bureau_score', 'purch_price', 'msrp', 'down_pyt', 'purpose',
# 'loan_term', 'loan_amt', 'ltv', 'tot_income', 'used_ind', 'weight']
py_mob.view_bin(py_mob.qtl_bin(data["ltv"], data["bad"]))
"""
_p = pkg_resources.resource_filename("py_mob", "data/" + data + ".csv")
_d = numpy.recfromcsv(_p, delimiter = ',', names = True, encoding = 'latin-1')
return(dict((_2, [_[_1] for _ in _d]) for _1, _2 in enumerate(_d.dtype.fields)))
########## 01. cal_woe() ##########
def cal_woe(x, bin):
"""
The function applies the woe transformation to a numeric vector based on
the binning outcome.
Parameters:
x : A numeric vector, which can be a list, 1-D numpy array, or pandas
series
bin : An object containing the binning outcome.
Returns:
A list of dictionaries with three keys
Example:
ltv_bin = qtl_bin(ltv, bad)
for x in cal_woe(ltv[:3], ltv_bin):
print(x)
# {'x': 109.0, 'bin': 6, 'woe': 0.2694}
# {'x': 97.0, 'bin': 3, 'woe': 0.0045}
# {'x': 105.0, 'bin': 5, 'woe': 0.1829}
"""
_cut = sorted(bin['cut'] + [numpy.PINF, numpy.NINF])
_dat = [[_1[0], _1[1], _2] for _1, _2 in zip(enumerate(x), ~numpy.isnan(x))]
_m1 = [_[:2] for _ in _dat if _[2] == 0]
_l1 = [_[:2] for _ in _dat if _[2] == 1]
_l2 = [[*_1, _2] for _1, _2 in zip(_l1, numpy.searchsorted(_cut, [_[1] for _ in _l1]).tolist())]
flatten = lambda l: [item for subl in l for item in subl]
_l3 = flatten([[[*l, b['woe']] for l in _l2 if l[2] == b['bin']] for b in bin['tbl'] if b['bin'] > 0])
if len(_m1) > 0:
if len([_ for _ in bin['tbl'] if _['miss'] > 0]) > 0:
_m2 = [l + [_['bin'] for _ in bin['tbl'] if _['miss'] > 0]
+ [_['woe'] for _ in bin['tbl'] if _['miss'] > 0] for l in _m1]
else:
_m2 = [l + [0, 0] for l in _m1]
_l3.extend(_m2)
_key = ["x", "bin", "woe"]
return(list(dict(zip(_key, _[1:])) for _ in sorted(_l3, key = lambda x: x[0])))
########## 02. summ_bin() ##########
def summ_bin(x):
"""
The function summarizes the binning outcome generated from a binning function,
e.g. qtl_bin() or iso_bin().
Parameters:
x: An object containing the binning outcome.
Returns:
A dictionary with statistics derived from the binning outcome
Example:
summ_bin(iso_bin(ltv, bad))
# {'sample size': 5837, 'bad rate': 0.2049, 'iv': 0.185, 'ks': 16.88, 'missing': 0.0002}
"""
_freq = sum(_['freq'] for _ in x['tbl'])
_bads = sum(_['bads'] for _ in x['tbl'])
_miss = sum(_['miss'] for _ in x['tbl'])
_iv = round(sum(_['iv'] for _ in x['tbl']), 4)
_ks = round(max(_["ks"] for _ in x["tbl"]), 2)
_br = round(_bads / _freq, 4)
_mr = round(_miss / _freq, 4)
return({"sample size": _freq, "bad rate": _br, "iv": _iv, "ks": _ks, "missing": _mr})
########## 03. view_bin() ##########
def view_bin(x):
"""
The function displays the binning outcome generated from a binning function,
e.g. qtl_bin() or iso_bin().
Parameters:
x: An object containing the binning outcome.
Returns:
None
Example:
view_bin(qtl_bin(df.ltv, df.bad))
"""
tabulate.PRESERVE_WHITESPACE = True
_sel = ["bin", "freq", "miss", "bads", "rate", "woe", "iv", "ks"]
_tbl = [{**(lambda v: {k: v[k] for k in _sel})(_), "rule": _["rule"].ljust(45)} for _ in x["tbl"]]
print(tabulate.tabulate(_tbl, headers = "keys", tablefmt = "github",
colalign = ["center"] + ["right"] * 7 + ["center"],
floatfmt = (".0f", ".0f", ".0f", ".0f", ".4f", ".4f", ".4f", ".2f")))
########## 04. qcut() ##########
def qcut(x, n):
"""
The function discretizes a numeric vector into n pieces based on quantiles.
Parameters:
x: A numeric vector.
n: An integer indicating the number of categories to discretize.
Returns:
A list of numeric values to divide the vector x into n categories.
Example:
qcut(range(10), 3)
# [3, 6]
"""
_q = numpy.linspace(0, 100, n, endpoint = False)[1:]
_x = [_ for _ in x if not numpy.isnan(_)]
_c = numpy.unique(numpy.percentile(_x, _q, interpolation = "lower"))
return([_ for _ in _c])
########## 05. manual_bin() ##########
def manual_bin(x, y, cuts):
"""
The function discretizes the x vector and then summarizes over the y vector
based on the discretization result.
Parameters:
x : A numeric vector to discretize without missing values,
e.g. numpy.nan or math.nan
y : A numeric vector with binary values of 0/1 and with the same length
of x
cuts : A list of numeric values as cut points to discretize x.
Returns:
A list of dictionaries for the binning outcome.
Example:
for x in manual_bin(scr, bad, [650, 700, 750]):
print(x)
# {'bin': 1, 'freq': 1311, 'miss': 0, 'bads': 520.0, 'minx': 443.0, 'maxx': 650.0}
# {'bin': 2, 'freq': 1688, 'miss': 0, 'bads': 372.0, 'minx': 651.0, 'maxx': 700.0}
# {'bin': 3, 'freq': 1507, 'miss': 0, 'bads': 157.0, 'minx': 701.0, 'maxx': 750.0}
# {'bin': 4, 'freq': 1016, 'miss': 0, 'bads': 42.0, 'minx': 751.0, 'maxx': 848.0}
"""
_x = [_ for _ in x]
_y = [_ for _ in y]
_c = sorted([_ for _ in set(cuts)] + [numpy.NINF, numpy.PINF])
_g = numpy.searchsorted(_c, _x).tolist()
_l1 = sorted(zip(_g, _x, _y), key = lambda x: x[0])
_l2 = zip(set(_g), [[l for l in _l1 if l[0] == g] for g in set(_g)])
return(sorted([dict(zip(["bin", "freq", "miss", "bads", "minx", "maxx"],
[_1, len(_2), 0,
sum([_[2] for _ in _2]),
min([_[1] for _ in _2]),
max([_[1] for _ in _2])])) for _1, _2 in _l2],
key = lambda x: x["bin"]))
########## 06. miss_bin() ##########
def miss_bin(y):
"""
The function summarizes the y vector with binary values of 0/1 and is not
supposed to be called directly by users.
Parameters:
y : A numeric vector with binary values of 0/1.
Returns:
A dictionary.
"""
return({"bin": 0, "freq": len([_ for _ in y]), "miss": len([_ for _ in y]),
"bads": sum([_ for _ in y]), "minx": numpy.nan, "maxx": numpy.nan})
########## 07. gen_rule() ##########
def gen_rule(tbl, pts):
"""
The function generates binning rules based on the binning outcome table and
a list of cut points and is an utility function that is not supposed to be
called directly by users.
Parameters:
tbl : A intermediate table of the binning outcome within each binning
function
pts : A list cut points for the binning
Returns:
A list of dictionaries with binning rules
"""
for _ in tbl:
if _["bin"] == 0:
_["rule"] = "numpy.isnan($X$)"
elif _["bin"] == len(pts) + 1:
if _["miss"] == 0:
_["rule"] = "$X$ > " + str(pts[-1])
else:
_["rule"] = "$X$ > " + str(pts[-1]) + " or numpy.isnan($X$)"
elif _["bin"] == 1:
if _["miss"] == 0:
_["rule"] = "$X$ <= " + str(pts[0])
else:
_["rule"] = "$X$ <= " + str(pts[0]) + " or numpy.isnan($X$)"
else:
_["rule"] = "$X$ > " + str(pts[_["bin"] - 2]) + " and $X$ <= " + str(pts[_["bin"] - 1])
_sel = ["bin", "freq", "miss", "bads", "rate", "woe", "iv", "ks", "rule"]
return([{k: _[k] for k in _sel} for _ in tbl])
########## 08. gen_woe() ##########
def gen_woe(x):
"""
The function calculates weight of evidence and information value based on the
binning outcome within each binning function and is an utility function that
is not supposed to be called directly by users.
Parameters:
x : A list of dictionaries for the binning outcome.
Returns:
A list of dictionaries with additional keys to the input.
"""
_freq = sum(_["freq"] for _ in x)
_bads = sum(_["bads"] for _ in x)
_l1 = sorted([{**_,
"rate": round(_["bads"] / _["freq"], 4),
"woe" : round(numpy.log((_["bads"] / _bads) / ((_["freq"] - _["bads"]) / (_freq - _bads))), 4),
"iv" : round((_["bads"] / _bads - (_["freq"] - _["bads"]) / (_freq - _bads)) *
numpy.log((_["bads"] / _bads) / ((_["freq"] - _["bads"]) / (_freq - _bads))), 4)
} for _ in x], key = lambda _x: _x["bin"])
cumsum = lambda x: [sum([_ for _ in x][0:(i | |
import fnmatch
import inspect
import os
from importlib.util import spec_from_file_location
import logging
import sys
__dirname, __init_python_script = os.path.split(os.path.abspath(__file__))
logger = logging.getLogger("SimplePlugins")
formatter = logging.Formatter('%(asctime)s %(name)-12s %(levelname)-8s %(message)s')
console_log_handler = logging.StreamHandler(stream=sys.stdout)
console_log_handler.setFormatter(formatter)
console_log_handler.setLevel(logging.DEBUG)
logger.addHandler(console_log_handler)
logger.setLevel(logging.DEBUG)
def get_files_recursive(path, match='*.py'):
"""
Perform a recursive search to find all the files matching the
specified search criteria.
:param path: Path to begin the recursive search.
:param match: String/Regex used to match files with a pattern.
:return: Full path of all the files found.
:rtype: list
"""
matches = []
for root, dirnames, filenames in os.walk(path):
for filename in fnmatch.filter(filenames, match):
matches.append(os.path.join(root, filename))
return matches
def get_filename(file):
"""
Safe method to retrieve only the name of the file.
:param file: Path of the file to retrieve the name from.
:return: None if the file is non-existant, otherwise the filename (extension included)
:rtype: None, str
"""
if not os.path.exists(file):
return None
return "%s%s" % os.path.splitext(file)
def import_module_from_file(full_path_to_module):
"""
Import a module given the full path/filename of the .py file
Python 3.4
"""
if inspect.ismodule(full_path_to_module):
return full_path_to_module
module = None
try:
# Get module name and path from full path
module_dir, module_file = os.path.split(full_path_to_module)
module_name, module_ext = os.path.splitext(module_file)
# Get module "spec" from filename
spec = spec_from_file_location(module_name, full_path_to_module)
module = spec.loader.load_module()
except Exception as ec:
# Simple error printing
# Insert "sophisticated" stuff here
print(ec)
finally:
return module
class PluginException(Exception):
pass
class Plugin(object):
"""
Base class that all plugins derive from.
"""
def __init__(self, **kwargs):
self.name = kwargs.pop('name', self.__class__.__name__)
self.version = kwargs.pop('version', "1.0.0")
self.description = kwargs.pop('description', "No Description Available")
self.active = False
def activate(self):
"""
Operations to perform whenever the plugin is activated.
:return:
"""
raise NotImplementedError("Activation for %s is not implemented" % self.name)
def deactivate(self):
"""
Operations to perform whenever the plugin is deactivated.
:return:
"""
raise NotImplementedError("Deactivation for %s is not implemented" % self.name)
def perform(self, **kwargs):
"""
Operations that will be performed when invoked.
This method is where the actual "use" logic of plugins will be defined.
:param kwargs:
:return:
"""
raise NotImplementedError("Perform for %s is not implemented" % self.name)
class PluginManager(object):
"""
Holds instances, and information for each plugin.
Provides functionality to interact, activate, deactivate, perform, and operate with or on plugins.
"""
def __init__(self):
self.plugins = {}
def register(self, plugin=None, plugin_file=None, directory=None, skip_types=None, override=False, activate=True):
"""
Register a plugin, or plugins to be managed and recognized by the plugin manager.
Will take a plugin instance, file where a plugin / plugin(s) reside, parent directory
that holds plugin(s), or sub-folders with plugin(s).
Will optionally "activate" the plugins, and perform any operations defined in their "activate" method.
:param plugin: Plugin Instance to register.
:param plugin_file: str: File (full path) to scan for Plugins.
:param directory: str: Directory to perform a recursive scan on for Plugins.
:param skip_types: list: Types of plugins to skip when found, during a scan / search.
:param override: bool: Whether or not to override registered plugin when it's being registered again.
:param activate: bool: Whether or not to activate the plugins upon registration.
:return: Does not Return.
"""
# Double verify that there's types to skip. We don't want to register "Base" types (Plugin)
if not isinstance(skip_types, list):
skip_types = [skip_types]
logger.debug("Skip Types must be a list. Created list with values passed.")
if skip_types is None:
skip_types = [Plugin]
else:
skip_types.append(Plugin)
# Check if they've passed a method of registration!
if plugin is None and plugin_file is None and directory is None:
raise PluginException("Unable to perform registration without a plugin, module, or directory.")
# First we'll check if they're registering via directory (Scanning)
# as it might be best for bigger applications / apps with many plugins to register them via
# a folder, where plugins are expected!
if directory is not None:
plugins_in_dir = PluginManager.scan_for_plugins(directory)
# Loop through all the plugins in the directory, associated by file -> list[] (or none)
for file, plugins in plugins_in_dir.items():
# If there's no plugins in that file then just continue.
if plugins is None:
continue
for plugin in plugins:
# If there's a duplicate plugin and we're not overriding, then we'll skip it.
if plugin.name in self.plugins:
if not override:
logger.warn("Failed to register %s: Duplicate plugin found!" % plugin.name)
continue
# Now verify if we're supposed to skip the type of the plugin that's being attempted to register.
# Useful when plugins classes extend a base-class (Plugin, for example)
# but you don't want to register the base class.
if type(plugin) in skip_types:
logger.warn(
"Skipping registration of %s, as it's not to be registered." % plugin.__class__.__name__)
continue
# Assign the plugin (via name) to the dictionary of registered plugins
self.plugins[plugin.name] = plugin
# Give a little output of the plugin!
logger.debug("Registered plugin %s from %s in %s" % (plugin.name, file, directory))
# Then if we're going to activate the plugin, do so!
if activate:
self.plugins[plugin.name].activate()
# Now we're going to check if they're registering the plugins
# either by file, or module
if plugin_file is not None:
# If the plugin_file is not a module, then we're going to verify the file actually exists!
if not inspect.ismodule(plugin_file):
# Verify if there's a ~ (Home dir call) inside the path, and if so then expand it.
plugin_file = os.path.expanduser(plugin_file)
# Then verify if the path of the plugin exists, raising an exception if not!
if not os.path.exists(plugin_file):
raise FileNotFoundError("Unable to locate file %s" % plugin_file)
# Next after verifying, we get all the plugins inside the file or module.`
plugins_in_file = PluginManager.get_plugins_in_module(plugin_file)
# If there's no plugins inside, then we're going to throw an exception. There's nothing to register in here.
if plugins_in_file is None or len(plugins_in_file) == 0:
raise PluginException("Unable to locate plugins inside %s" % plugin_file)
# Loop through every plugin inside the file/module and attempt to register it.
for fplugin in plugins_in_file:
# If there's a duplicate plugin and we're not overriding, then we'll skip it.
if fplugin.name in self.plugins:
if not override:
logger.warn("Failed to register %s: Duplicate plugin found!" % fplugin.name)
continue
# Now verify if we're supposed to skip the type of the plugin that's being attempted to register.
# Useful when plugins classes extend a base-class (Plugin, for example)
# but you don't want to register the base class.
if type(fplugin) in skip_types:
logger.warn(
"Skipping registration of %s, as it's not to be registered." % fplugin.__class__.__name__)
continue
# Assign the plugin (via name) to the dictionary of registered plugins
self.plugins[fplugin.name] = fplugin
# Give a little output of the plugin!
logger.debug("Registered plugin %s from %s %s" % (
fplugin.name, "module" if inspect.ismodule(plugin_file) else "file",
get_filename(plugin_file) if not inspect.ismodule(plugin_file) else plugin_file.__name__)
)
# Then if we're going to activate the plugin, do so!
if activate:
self.plugins[fplugin.name].activate()
# Now we're checking if they actually passed a plugin instance to register.
if plugin is not None:
# If it's already in the plugins and we're not overriding, then we'll skip it.
if plugin.name in self.plugins:
if override is False:
return
# Otherwise register the plugin, and (potentially) activate it!
self.plugins[plugin.name] = plugin
logger.debug("Registered plugin %s" % plugin.name)
if activate:
self.plugins[plugin.name].activate()
def activate(self, plugins=[]):
# If there's no plugins passed as a list, then we'll just assume
# That all the plugins are to be registered.
if len(plugins) == 0:
if not self.has_plugin():
raise PluginException("Unable to perform activation as no plugins have been registered")
for plugin in self.get_plugins():
if not plugin.active:
plugin.activate()
return
# Otherwise, we're going to loop through all the values in the list.
for plugin in plugins:
# Check if the value they've passed is a string (plugin name, presumably)
if isinstance(plugin, str):
if plugin not in self.plugins:
continue
pl = self.plugins[plugin]
# We don't want to activate plugins that are already active.
if pl.active:
continue
pl.activate()
elif isinstance(plugin, Plugin):
if plugin.active:
continue
if plugin.name not in self.plugins:
continue
plugin.activate()
def unregister(self, plugin=None, plugin_file=None):
"""
Unregister all plugins, or a specific plugin, via an instance, or file (path) | |
floats > 20 or None, default=None
If a list, the list of delays (in miliseconds) of length n_voices.
If None, the individual delay parameters are chosen automatically
to be between 40 and 60 miliseconds.
decays : list of floats or None, default=None
If a list, the list of decays (as a fraction of gain_in) of length
n_voices.
If None, the individual decay parameters are chosen automatically
to be between 0.3 and 0.4.
speeds : list of floats or None, default=None
If a list, the list of modulation speeds (in Hz) of length n_voices
If None, the individual speed parameters are chosen automatically
to be between 0.25 and 0.4 Hz.
depths : list of floats or None, default=None
If a list, the list of depths (in miliseconds) of length n_voices.
If None, the individual delay parameters are chosen automatically
to be between 1 and 3 miliseconds.
shapes : list of 's' or 't' or None, deault=None
If a list, the list of modulation shapes - 's' for sinusoidal or
't' for triangular - of length n_voices.
If None, the individual shapes are chosen automatically.
'''
if not is_number(gain_in) or gain_in <= 0 or gain_in > 1:
raise ValueError("gain_in must be a number between 0 and 1.")
if not is_number(gain_out) or gain_out <= 0 or gain_out > 1:
raise ValueError("gain_out must be a number between 0 and 1.")
if not isinstance(n_voices, int) or n_voices <= 0:
raise ValueError("n_voices must be a positive integer.")
# validate delays
if not (delays is None or isinstance(delays, list)):
raise ValueError("delays must be a list or None")
if delays is not None:
if len(delays) != n_voices:
raise ValueError("the length of delays must equal n_voices")
if any((not is_number(p) or p < 20) for p in delays):
raise ValueError("the elements of delays must be numbers > 20")
else:
delays = [random.uniform(40, 60) for _ in range(n_voices)]
# validate decays
if not (decays is None or isinstance(decays, list)):
raise ValueError("decays must be a list or None")
if decays is not None:
if len(decays) != n_voices:
raise ValueError("the length of decays must equal n_voices")
if any((not is_number(p) or p <= 0 or p > 1) for p in decays):
raise ValueError(
"the elements of decays must be between 0 and 1"
)
else:
decays = [random.uniform(0.3, 0.4) for _ in range(n_voices)]
# validate speeds
if not (speeds is None or isinstance(speeds, list)):
raise ValueError("speeds must be a list or None")
if speeds is not None:
if len(speeds) != n_voices:
raise ValueError("the length of speeds must equal n_voices")
if any((not is_number(p) or p <= 0) for p in speeds):
raise ValueError("the elements of speeds must be numbers > 0")
else:
speeds = [random.uniform(0.25, 0.4) for _ in range(n_voices)]
# validate depths
if not (depths is None or isinstance(depths, list)):
raise ValueError("depths must be a list or None")
if depths is not None:
if len(depths) != n_voices:
raise ValueError("the length of depths must equal n_voices")
if any((not is_number(p) or p <= 0) for p in depths):
raise ValueError("the elements of depths must be numbers > 0")
else:
depths = [random.uniform(1.0, 3.0) for _ in range(n_voices)]
# validate shapes
if not (shapes is None or isinstance(shapes, list)):
raise ValueError("shapes must be a list or None")
if shapes is not None:
if len(shapes) != n_voices:
raise ValueError("the length of shapes must equal n_voices")
if any((p not in ['t', 's']) for p in shapes):
raise ValueError("the elements of shapes must be 's' or 't'")
else:
shapes = [random.choice(['t', 's']) for _ in range(n_voices)]
effect_args = ['chorus', '{}'.format(gain_in), '{}'.format(gain_out)]
for i in range(n_voices):
effect_args.extend([
'{:f}'.format(delays[i]),
'{:f}'.format(decays[i]),
'{:f}'.format(speeds[i]),
'{:f}'.format(depths[i]),
'-{}'.format(shapes[i])
])
self.effects.extend(effect_args)
self.effects_log.append('chorus')
return self
def compand(self, attack_time=0.3, decay_time=0.8, soft_knee_db=6.0,
tf_points=[(-70, -70), (-60, -20), (0, 0)],
):
'''Compand (compress or expand) the dynamic range of the audio.
Parameters
----------
attack_time : float, default=0.3
The time in seconds over which the instantaneous level of the input
signal is averaged to determine increases in volume.
decay_time : float, default=0.8
The time in seconds over which the instantaneous level of the input
signal is averaged to determine decreases in volume.
soft_knee_db : float or None, default=6.0
The ammount (in dB) for which the points at where adjacent line
segments on the transfer function meet will be rounded.
If None, no soft_knee is applied.
tf_points : list of tuples
Transfer function points as a list of tuples corresponding to
points in (dB, dB) defining the compander's transfer function.
See Also
--------
mcompand, contrast
'''
if not is_number(attack_time) or attack_time <= 0:
raise ValueError("attack_time must be a positive number.")
if not is_number(decay_time) or decay_time <= 0:
raise ValueError("decay_time must be a positive number.")
if attack_time > decay_time:
logger.warning(
"attack_time is larger than decay_time.\n"
"For most situations, attack_time should be shorter than "
"decay time because the human ear is more sensitive to sudden "
"loud music than sudden soft music."
)
if not (is_number(soft_knee_db) or soft_knee_db is None):
raise ValueError("soft_knee_db must be a number or None.")
if not isinstance(tf_points, list):
raise TypeError("tf_points must be a list.")
if len(tf_points) == 0:
raise ValueError("tf_points must have at least one point.")
if any(not isinstance(pair, tuple) for pair in tf_points):
raise ValueError("elements of tf_points must be pairs")
if any(len(pair) != 2 for pair in tf_points):
raise ValueError("Tuples in tf_points must be length 2")
if any(not (is_number(p[0]) and is_number(p[1])) for p in tf_points):
raise ValueError("Tuples in tf_points must be pairs of numbers.")
if any((p[0] > 0 or p[1] > 0) for p in tf_points):
raise ValueError("Tuple values in tf_points must be <= 0 (dB).")
if len(tf_points) > len(set([p[0] for p in tf_points])):
raise ValueError("Found duplicate x-value in tf_points.")
tf_points = sorted(
tf_points,
key=lambda tf_points: tf_points[0]
)
transfer_list = []
for point in tf_points:
transfer_list.extend([
"{:f}".format(point[0]), "{:f}".format(point[1])
])
effect_args = [
'compand',
"{:f},{:f}".format(attack_time, decay_time)
]
if soft_knee_db is not None:
effect_args.append(
"{:f}:{}".format(soft_knee_db, ",".join(transfer_list))
)
else:
effect_args.append(",".join(transfer_list))
self.effects.extend(effect_args)
self.effects_log.append('compand')
return self
def contrast(self, amount=75):
'''Comparable with compression, this effect modifies an audio signal to
make it sound louder.
Parameters
----------
amount : float
Amount of enhancement between 0 and 100.
See Also
--------
compand, mcompand
'''
if not is_number(amount) or amount < 0 or amount > 100:
raise ValueError('amount must be a number between 0 and 100.')
effect_args = ['contrast', '{:f}'.format(amount)]
self.effects.extend(effect_args)
self.effects_log.append('contrast')
return self
def convert(self, samplerate=None, n_channels=None, bitdepth=None):
'''Converts output audio to the specified format.
Parameters
----------
samplerate : float, default=None
Desired samplerate. If None, defaults to the same as input.
n_channels : int, default=None
Desired number of channels. If None, defaults to the same as input.
bitdepth : int, default=None
Desired bitdepth. If None, defaults to the same as input.
See Also
--------
rate
'''
bitdepths = [8, 16, 24, 32, 64]
if bitdepth is not None:
if bitdepth not in bitdepths:
raise ValueError(
"bitdepth must be one of {}.".format(str(bitdepths))
)
self.output_format.extend(['-b', '{}'.format(bitdepth)])
if n_channels is not None:
if not isinstance(n_channels, int) or n_channels <= 0:
raise ValueError(
"n_channels must be a positive integer."
)
self.output_format.extend(['-c', '{}'.format(n_channels)])
if samplerate is not None:
if not is_number(samplerate) or samplerate <= 0:
raise ValueError("samplerate must be a positive number.")
self.rate(samplerate)
return self
def dcshift(self, shift=0.0):
'''Apply a DC shift to the audio.
Parameters
----------
shift : float
Amount to shift audio between -2 and 2. (Audio is between -1 and 1)
See Also
--------
highpass
'''
if not is_number(shift) or shift < -2 or shift > 2:
raise ValueError('shift must be a number between -2 and 2.')
effect_args = ['dcshift', '{:f}'.format(shift)]
self.effects.extend(effect_args)
self.effects_log.append('dcshift')
return self
def deemph(self):
'''Apply Compact Disc (IEC 60908) de-emphasis (a treble attenuation
shelving filter). Pre-emphasis was applied in the mastering of some
CDs issued in the early 1980s. These included many classical music
albums, as well as now sought-after issues of albums by The Beatles,
<NAME> and others. Pre-emphasis should be removed at playback time
by | |
<reponame>LRydin/NeuroKit<gh_stars>1-10
import nolds
import numpy as np
import pandas as pd
from pyentrp import entropy as pyentrp
import neurokit2 as nk
"""
For the testing of complexity, we test our implementations against existing and established ones.
However, some of these other implementations are not really packaged in a way
SO THAT we can easily import them. Thus, we directly copied their content in this file
(below the tests).
"""
# =============================================================================
# Some sanity checks
# =============================================================================
def test_complexity_sanity():
signal = np.cos(np.linspace(start=0, stop=30, num=1000))
# Entropy
assert np.allclose(nk.entropy_fuzzy(signal), nk.entropy_sample(signal, fuzzy=True), atol=0.000001)
# Fractal
assert np.allclose(nk.fractal_dfa(signal, windows=np.array([4, 8, 12, 20])), 2.1009048365682133, atol=0.000001)
assert np.allclose(nk.fractal_dfa(signal), 1.957966586191164, atol=0.000001)
assert np.allclose(nk.fractal_dfa(signal, multifractal=True), 1.957966586191164, atol=0.000001)
assert np.allclose(nk.fractal_correlation(signal), 0.7884473170763334, atol=0.000001)
assert np.allclose(nk.fractal_correlation(signal, r="nolds"), nolds.corr_dim(signal, 2), atol=0.0001)
# =============================================================================
# Comparison against R
# =============================================================================
"""
R code:
library(TSEntropies)
library(pracma)
signal <- read.csv("https://raw.githubusercontent.com/neuropsychology/NeuroKit/master/data/bio_eventrelated_100hz.csv")$RSP
r <- 0.2 * sd(signal)
# ApEn --------------------------------------------------------------------
TSEntropies::ApEn(signal, dim=2, lag=1, r=r)
0.04383386
TSEntropies::ApEn(signal, dim=3, lag=2, r=1)
0.0004269369
pracma::approx_entropy(signal[1:200], edim=2, r=r, elag=1)
0.03632554
# SampEn ------------------------------------------------------------------
TSEntropies::SampEn(signal[1:300], dim=2, lag=1, r=r)
0.04777648
TSEntropies::FastSampEn(signal[1:300], dim=2, lag=1, r=r)
0.003490405
pracma::sample_entropy(signal[1:300], edim=2, tau=1, r=r)
0.03784376
pracma::sample_entropy(signal[1:300], edim=3, tau=2, r=r)
0.09185509
"""
def test_complexity_vs_R():
signal = pd.read_csv(
"https://raw.githubusercontent.com/neuropsychology/NeuroKit/master/data/bio_eventrelated_100hz.csv"
)["RSP"].values
r = 0.2 * np.std(signal, ddof=1)
# ApEn
apen = nk.entropy_approximate(signal, dimension=2, r=r)
assert np.allclose(apen, 0.04383386, atol=0.0001)
apen = nk.entropy_approximate(signal, dimension=3, delay=2, r=1)
assert np.allclose(apen, 0.0004269369, atol=0.0001)
apen = nk.entropy_approximate(signal[0:200], dimension=2, delay=1, r=r)
assert np.allclose(apen, 0.03632554, atol=0.0001)
# SampEn
sampen = nk.entropy_sample(signal[0:300], dimension=2, r=r)
assert np.allclose(sampen, nk.entropy_sample(signal[0:300], dimension=2, r=r, distance="infinity"), atol=0.001)
assert np.allclose(sampen, 0.03784376, atol=0.001)
sampen = nk.entropy_sample(signal[0:300], dimension=3, delay=2, r=r)
assert np.allclose(sampen, 0.09185509, atol=0.01)
# =============================================================================
# Comparison against Python implementations
# =============================================================================
def test_complexity_vs_Python():
signal = np.cos(np.linspace(start=0, stop=30, num=100))
# Shannon
shannon = nk.entropy_shannon(signal)
# assert scipy.stats.entropy(shannon, pd.Series(signal).value_counts())
assert np.allclose(shannon - pyentrp.shannon_entropy(signal), 0)
# Approximate
assert np.allclose(nk.entropy_approximate(signal), 0.17364897858477146)
assert np.allclose(
nk.entropy_approximate(signal, dimension=2, r=0.2 * np.std(signal, ddof=1)) - entropy_app_entropy(signal, 2), 0
)
assert nk.entropy_approximate(signal, dimension=2, r=0.2 * np.std(signal, ddof=1)) != pyeeg_ap_entropy(
signal, 2, 0.2 * np.std(signal, ddof=1)
)
# Sample
assert np.allclose(
nk.entropy_sample(signal, dimension=2, r=0.2 * np.std(signal, ddof=1)) - entropy_sample_entropy(signal, 2), 0
)
assert np.allclose(nk.entropy_sample(signal, dimension=2, r=0.2) - nolds.sampen(signal, 2, 0.2), 0)
assert np.allclose(nk.entropy_sample(signal, dimension=2, r=0.2) - entro_py_sampen(signal, 2, 0.2, scale=False), 0)
assert np.allclose(nk.entropy_sample(signal, dimension=2, r=0.2) - pyeeg_samp_entropy(signal, 2, 0.2), 0)
# import sampen
# sampen.sampen2(signal[0:300], mm=2, r=r)
assert nk.entropy_sample(signal, dimension=2, r=0.2) != pyentrp.sample_entropy(signal, 2, 0.2)[1]
assert (
nk.entropy_sample(signal, dimension=2, r=0.2 * np.sqrt(np.var(signal)))
!= MultiscaleEntropy_sample_entropy(signal, 2, 0.2)[0.2][2]
)
# MSE
# assert nk.entropy_multiscale(signal, 2, 0.2*np.sqrt(np.var(signal))) != np.trapz(MultiscaleEntropy_mse(signal, [i+1 for i in range(10)], 2, 0.2, return_type="list"))
# assert nk.entropy_multiscale(signal, 2, 0.2*np.std(signal, ddof=1)) != np.trapz(pyentrp.multiscale_entropy(signal, 2, 0.2, 10))
# Fuzzy
assert np.allclose(
nk.entropy_fuzzy(signal, dimension=2, r=0.2, delay=1) - entro_py_fuzzyen(signal, 2, 0.2, 1, scale=False), 0
)
# DFA
assert nk.fractal_dfa(signal, windows=np.array([4, 8, 12, 20])) != nolds.dfa(
signal, nvals=[4, 8, 12, 20], fit_exp="poly"
)
# =============================================================================
# Wikipedia
# =============================================================================
def wikipedia_sampen(signal, m=2, r=1):
N = len(signal)
B = 0.0
A = 0.0
# Split time series and save all templates of length m
xmi = np.array([signal[i : i + m] for i in range(N - m)])
xmj = np.array([signal[i : i + m] for i in range(N - m + 1)])
# Save all matches minus the self-match, compute B
B = np.sum([np.sum(np.abs(xmii - xmj).max(axis=1) <= r) - 1 for xmii in xmi])
# Similar for computing A
m += 1
xm = np.array([signal[i : i + m] for i in range(N - m + 1)])
A = np.sum([np.sum(np.abs(xmi - xm).max(axis=1) <= r) - 1 for xmi in xm])
# Return SampEn
return -np.log(A / B)
# =============================================================================
# entropy_estimators (https://github.com/paulbrodersen/entropy_estimators)
# =============================================================================
"""
import numpy as np
from entropy_estimators import continuous
x = np.random.randn(10000)
# I don't know what this compute though
continuous.get_h_mvn(x)
continuous.get_h(x, k=5)
"""
# =============================================================================
# Pyeeg
# =============================================================================
def pyeeg_embed_seq(time_series, tau, embedding_dimension):
if not type(time_series) == np.ndarray:
typed_time_series = np.asarray(time_series)
else:
typed_time_series = time_series
shape = (typed_time_series.size - tau * (embedding_dimension - 1), embedding_dimension)
strides = (typed_time_series.itemsize, tau * typed_time_series.itemsize)
return np.lib.stride_tricks.as_strided(typed_time_series, shape=shape, strides=strides)
def pyeeg_bin_power(X, Band, Fs):
C = np.fft.fft(X)
C = abs(C)
Power = np.zeros(len(Band) - 1)
for Freq_Index in range(0, len(Band) - 1):
Freq = float(Band[Freq_Index])
Next_Freq = float(Band[Freq_Index + 1])
Power[Freq_Index] = sum(C[int(np.floor(Freq / Fs * len(X))) : int(np.floor(Next_Freq / Fs * len(X)))])
Power_Ratio = Power / sum(Power)
return Power, Power_Ratio
def pyeeg_ap_entropy(X, M, R):
N = len(X)
Em = pyeeg_embed_seq(X, 1, M)
A = np.tile(Em, (len(Em), 1, 1))
B = np.transpose(A, [1, 0, 2])
D = np.abs(A - B) # D[i,j,k] = |Em[i][k] - Em[j][k]|
InRange = np.max(D, axis=2) <= R
# Probability that random M-sequences are in range
Cm = InRange.mean(axis=0)
# M+1-sequences in range if M-sequences are in range & last values are close
Dp = np.abs(np.tile(X[M:], (N - M, 1)) - np.tile(X[M:], (N - M, 1)).T)
Cmp = np.logical_and(Dp <= R, InRange[:-1, :-1]).mean(axis=0)
Phi_m, Phi_mp = np.sum(np.log(Cm)), np.sum(np.log(Cmp))
Ap_En = (Phi_m - Phi_mp) / (N - M)
return Ap_En
def pyeeg_samp_entropy(X, M, R):
N = len(X)
Em = pyeeg_embed_seq(X, 1, M)[:-1]
A = np.tile(Em, (len(Em), 1, 1))
B = np.transpose(A, [1, 0, 2])
D = np.abs(A - B) # D[i,j,k] = |Em[i][k] - Em[j][k]|
InRange = np.max(D, axis=2) <= R
np.fill_diagonal(InRange, 0) # Don't count self-matches
Cm = InRange.sum(axis=0) # Probability that random M-sequences are in range
Dp = np.abs(np.tile(X[M:], (N - M, 1)) - np.tile(X[M:], (N - M, 1)).T)
Cmp = np.logical_and(Dp <= R, InRange).sum(axis=0)
# Avoid taking log(0)
Samp_En = np.log(np.sum(Cm + 1e-100) / np.sum(Cmp + 1e-100))
return Samp_En
# =============================================================================
# Entropy
# =============================================================================
from sklearn.neighbors import KDTree
def entropy_embed(x, order=3, delay=1):
N = len(x)
if order * delay > N:
raise ValueError("Error: order * delay should be lower than x.size")
if delay < 1:
raise ValueError("Delay has to be at least 1.")
if order < 2:
raise ValueError("Order has to be at least 2.")
Y = np.zeros((order, N - (order - 1) * delay))
for i in range(order):
Y[i] = x[i * delay : i * delay + Y.shape[1]]
return Y.T
def entropy_app_samp_entropy(x, order, metric="chebyshev", approximate=True):
_all_metrics = KDTree.valid_metrics
if metric not in _all_metrics:
raise ValueError(
"The given metric (%s) is not valid. The valid " "metric names are: %s" % (metric, _all_metrics)
)
phi = np.zeros(2)
r = 0.2 * np.std(x, axis=-1, ddof=1)
# compute phi(order, r)
_emb_data1 = entropy_embed(x, order, 1)
if approximate:
emb_data1 = _emb_data1
else:
emb_data1 = _emb_data1[:-1]
count1 = KDTree(emb_data1, metric=metric).query_radius(emb_data1, r, count_only=True).astype(np.float64)
# compute phi(order + 1, r)
emb_data2 = entropy_embed(x, order + 1, 1)
count2 = KDTree(emb_data2, metric=metric).query_radius(emb_data2, r, count_only=True).astype(np.float64)
if approximate:
phi[0] = np.mean(np.log(count1 / emb_data1.shape[0]))
phi[1] = np.mean(np.log(count2 / emb_data2.shape[0]))
else:
phi[0] = np.mean((count1 - 1) / (emb_data1.shape[0] - 1))
phi[1] = np.mean((count2 - 1) / (emb_data2.shape[0] - 1))
return phi
def entropy_app_entropy(x, order=2, metric="chebyshev"):
phi = entropy_app_samp_entropy(x, order=order, metric=metric, approximate=True)
return np.subtract(phi[0], phi[1])
def entropy_sample_entropy(x, order=2, metric="chebyshev"):
x = np.asarray(x, dtype=np.float64)
phi = entropy_app_samp_entropy(x, order=order, metric=metric, approximate=False)
return -np.log(np.divide(phi[1], phi[0]))
# =============================================================================
# entro-py
# =============================================================================
def entro_py_sampen(x, dim, r, scale=True):
return entro_py_entropy(x, dim, r, scale=scale)
def entro_py_cross_sampen(x1, x2, dim, r, scale=True):
return entro_py_entropy([x1, x2], dim, r, scale)
def entro_py_fuzzyen(x, dim, r, n, scale=True):
return entro_py_entropy(x, dim, r, n=n, scale=scale, remove_baseline=True)
def entro_py_cross_fuzzyen(x1, x2, dim, r, n, scale=True):
return entro_py_entropy([x1, x2], dim, r, n, scale=scale, remove_baseline=True)
def entro_py_pattern_mat(x, m):
x = np.asarray(x).ravel()
if m == 1:
return x
else:
N = len(x)
patterns = np.zeros((m, N - m + 1))
for i in range(m):
patterns[i, :] = x[i : N - m + i + 1]
return patterns
def entro_py_entropy(x, dim, r, n=1, scale=True, remove_baseline=False):
fuzzy = True if remove_baseline else False
cross = True if type(x) == list else False
N = len(x[0]) if cross else len(x)
if scale:
if cross:
x = [entro_py_scale(np.copy(x[0])), entro_py_scale(np.copy(x[1]))]
else:
x = entro_py_scale(np.copy(x))
phi = [0, 0] # phi(m), phi(m+1)
for j in [0, 1]:
m = dim + j
npat = N - dim # https://github.com/ixjlyons/entro-py/pull/2/files
if cross:
# patterns = [entro_py_pattern_mat(x[0], m), entro_py_pattern_mat(x[1], m)]
patterns = [
entro_py_pattern_mat(x[0], m)[:, :npat],
entro_py_pattern_mat(x[1], m)[:, :npat],
] # https://github.com/ixjlyons/entro-py/pull/2/files
else:
# patterns = entro_py_pattern_mat(x, m)
patterns = entro_py_pattern_mat(x, m)[:, :npat]
if remove_baseline:
if cross:
patterns[0] = entro_py_remove_baseline(patterns[0], axis=0)
patterns[1] = entro_py_remove_baseline(patterns[1], axis=0)
else:
patterns = entro_py_remove_baseline(patterns, axis=0)
# count = np.zeros(N-m) # https://github.com/ixjlyons/entro-py/pull/2/files
# for i in range(N-m): # https://github.com/ixjlyons/entro-py/pull/2/files
count = np.zeros(npat)
for i in range(npat):
if cross:
if m == 1:
sub = | |
"""
@package mi.instrument.seabird.sbe16plus_v2.ctdpf_jb.test.test_driver
@file marine-integrations/mi/instrument/seabird/sbe16plus_v2/ctdpf_jb/driver.py
@author <NAME>
@brief Test cases for ctdpf_jb driver
USAGE:
Make tests verbose and provide stdout
* From the IDK
$ bin/test_driver
$ bin/test_driver -u [-t testname]
$ bin/test_driver -i [-t testname]
$ bin/test_driver -q [-t testname]
"""
__author__ = '<NAME>'
__license__ = 'Apache 2.0'
import time
from nose.plugins.attrib import attr
from mi.core.log import get_logger
log = get_logger()
from mi.idk.unit_test import InstrumentDriverTestCase
from mi.idk.unit_test import ParameterTestConfigKey
from mi.idk.unit_test import AgentCapabilityType
from mi.core.exceptions import InstrumentCommandException
from mi.core.instrument.chunker import StringChunker
from mi.core.instrument.instrument_driver import DriverConfigKey
from mi.instrument.seabird.sbe16plus_v2.ctdpf_jb.driver import InstrumentDriver
from mi.instrument.seabird.sbe16plus_v2.ctdpf_jb.driver import DataParticleType
from mi.instrument.seabird.sbe16plus_v2.ctdpf_jb.driver import ProtocolState
from mi.instrument.seabird.sbe16plus_v2.driver import ProtocolEvent
from mi.instrument.seabird.sbe16plus_v2.driver import Capability
from mi.instrument.seabird.sbe16plus_v2.ctdpf_jb.driver import Parameter
from mi.instrument.seabird.sbe16plus_v2.ctdpf_jb.driver import SendOptodeCommand
from mi.instrument.seabird.sbe16plus_v2.ctdpf_jb.driver import SBE19DataParticleKey
from mi.instrument.seabird.sbe16plus_v2.ctdpf_jb.driver import SBE19ConfigurationParticleKey
from mi.instrument.seabird.sbe16plus_v2.ctdpf_jb.driver import SBE19HardwareParticleKey
from mi.instrument.seabird.sbe16plus_v2.ctdpf_jb.driver import SBE19StatusParticleKey
from mi.instrument.seabird.sbe16plus_v2.ctdpf_jb.driver import SBE19CalibrationParticleKey
from mi.instrument.seabird.sbe16plus_v2.ctdpf_jb.driver import OptodeSettingsParticleKey
from mi.instrument.seabird.sbe16plus_v2.driver import NEWLINE
from mi.instrument.seabird.sbe16plus_v2.test.test_driver import Sbe16plusUnitTestCase, Sbe16plusQualTestCase, Sbe16plusIntegrationTestCase, \
SeaBird16plusMixin
from mi.instrument.seabird.sbe16plus_v2.ctdpf_jb.driver import SBE19Protocol
from mi.core.instrument.instrument_driver import ResourceAgentState
###
# Driver parameters for the tests
###
InstrumentDriverTestCase.initialize(
driver_module='mi.instrument.seabird.sbe16plus_v2.ctdpf_jb.driver',
driver_class="InstrumentDriver",
instrument_agent_resource_id = 'JI22B5',
instrument_agent_name = 'seabird_sbe16plus_v2_ctdpf_jb',
instrument_agent_packet_config = DataParticleType(),
driver_startup_config = {DriverConfigKey.PARAMETERS:
{
Parameter.PTYPE: 1,
Parameter.VOLT0: True,
Parameter.VOLT1: True,
Parameter.VOLT2: False,
Parameter.VOLT3: False,
Parameter.VOLT4: False,
Parameter.VOLT5: False,
Parameter.SBE38: False,
Parameter.WETLABS: False,
Parameter.GTD: False,
Parameter.DUAL_GTD: False,
Parameter.SBE63: False,
Parameter.OPTODE: True,
Parameter.OUTPUT_FORMAT: 0,
Parameter.NUM_AVG_SAMPLES: 4,
Parameter.MIN_COND_FREQ: 500,
Parameter.PUMP_DELAY: 60,
Parameter.AUTO_RUN: False,
Parameter.IGNORE_SWITCH: True}}
)
#################################### RULES ####################################
# #
# Common capabilities in the base class #
# #
# Instrument specific stuff in the derived class #
# #
# Generator spits out either stubs or comments describing test this here, #
# test that there. #
# #
# Qualification tests are driven through the instrument_agent #
# #
###############################################################################
###############################################################################
# DRIVER TEST MIXIN #
# Defines a set of constants and assert methods used for data particle #
# verification #
# #
# In python mixin classes are classes designed such that they wouldn't be #
# able to stand on their own, but are inherited by other classes generally #
# using multiple inheritance. #
# #
# This class defines a configuration structure for testing and common assert #
# methods for validating data particles. #
###############################################################################
class SeaBird19plusMixin(SeaBird16plusMixin):
InstrumentDriver = InstrumentDriver
# Create some short names for the parameter test config
TYPE = ParameterTestConfigKey.TYPE
READONLY = ParameterTestConfigKey.READONLY
STARTUP = ParameterTestConfigKey.STARTUP
DA = ParameterTestConfigKey.DIRECT_ACCESS
VALUE = ParameterTestConfigKey.VALUE
REQUIRED = ParameterTestConfigKey.REQUIRED
DEFAULT = ParameterTestConfigKey.DEFAULT
STATES = ParameterTestConfigKey.STATES
###
# Instrument output (driver input) Definitions
###
VALID_SAMPLE = "04570F0A1E910828FC47BC59F199952C64C9" + NEWLINE
VALID_GETHD_RESPONSE = "" + \
"<HardwareData DeviceType = 'SBE19plus' SerialNumber = '01906914'>" + NEWLINE + \
" <Manufacturer>Sea-Bird Electronics, Inc.</Manufacturer>" + NEWLINE + \
" <FirmwareVersion>2.3</FirmwareVersion>" + NEWLINE + \
" <FirmwareDate>16 March 2011 08:50</FirmwareDate>" + NEWLINE + \
" <CommandSetVersion>1.2</CommandSetVersion>" + NEWLINE + \
" <PCBAssembly PCBSerialNum = '49577' AssemblyNum = '41054H'/>" + NEWLINE + \
" <PCBAssembly PCBSerialNum = '46750' AssemblyNum = '41580B'/>" + NEWLINE + \
" <PCBAssembly PCBSerialNum = '49374' AssemblyNum = '41606'/>" + NEWLINE + \
" <PCBAssembly PCBSerialNum = '38071' AssemblyNum = '41057A'/>" + NEWLINE + \
" <MfgDate>29 SEP 2011</MfgDate>" + NEWLINE + \
" <InternalSensors>" + NEWLINE + \
" <Sensor id = 'Main Temperature'>" + NEWLINE + \
" <type>temperature0</type>" + NEWLINE + \
" <SerialNumber>01906914</SerialNumber>" + NEWLINE + \
" </Sensor>" + NEWLINE + \
" <Sensor id = 'Main Conductivity'>" + NEWLINE + \
" <type>conductivity-0</type>" + NEWLINE + \
" <SerialNumber>01906914</SerialNumber>" + NEWLINE + \
" </Sensor>" + NEWLINE + \
" <Sensor id = 'Main Pressure'>" + NEWLINE + \
" <type>strain-0</type>" + NEWLINE + \
" <SerialNumber>3313899</SerialNumber>" + NEWLINE + \
" </Sensor>" + NEWLINE + \
" </InternalSensors>" + NEWLINE + \
" <ExternalSensors>" + NEWLINE + \
" <Sensor id = 'volt 0'>" + NEWLINE + \
" <type>not assigned</type>" + NEWLINE + \
" <SerialNumber>not assigned</SerialNumber>" + NEWLINE + \
" </Sensor>" + NEWLINE + \
" <Sensor id = 'volt 1'>" + NEWLINE + \
" <type>not assigned</type>" + NEWLINE + \
" <SerialNumber>not assigned</SerialNumber>" + NEWLINE + \
" </Sensor>" + NEWLINE + \
" <Sensor id = 'volt 2'>" + NEWLINE + \
" <type>not assigned</type>" + NEWLINE + \
" <SerialNumber>not assigned</SerialNumber>" + NEWLINE + \
" </Sensor>" + NEWLINE + \
" <Sensor id = 'volt 3'>" + NEWLINE + \
" <type>not assigned</type>" + NEWLINE + \
" <SerialNumber>not assigned</SerialNumber>" + NEWLINE + \
" </Sensor>" + NEWLINE + \
" <Sensor id = 'volt 4'>" + NEWLINE + \
" <type>not assigned</type>" + NEWLINE + \
" <SerialNumber>not assigned</SerialNumber>" + NEWLINE + \
" </Sensor>" + NEWLINE + \
" <Sensor id = 'volt 5'>" + NEWLINE + \
" <type>not assigned</type>" + NEWLINE + \
" <SerialNumber>not assigned</SerialNumber>" + NEWLINE + \
" </Sensor>" + NEWLINE + \
" <Sensor id = 'serial'>" + NEWLINE + \
" <type>not assigned</type>" + NEWLINE + \
" <SerialNumber>not assigned</SerialNumber>" + NEWLINE + \
" </Sensor>" + NEWLINE + \
" </ExternalSensors>" + NEWLINE + \
"</HardwareData>" + NEWLINE
VALID_GETCC_RESPONSE = "" + \
"<CalibrationCoefficients DeviceType = 'SBE19plus' SerialNumber = '01906914'>" + NEWLINE + \
" <Calibration format = 'TEMP1' id = 'Main Temperature'>" + NEWLINE + \
" <SerialNum>01906914</SerialNum>" + NEWLINE + \
" <CalDate>09-Oct-11</CalDate>" + NEWLINE + \
" <TA0>1.254755e-03</TA0>" + NEWLINE + \
" <TA1>2.758871e-04</TA1>" + NEWLINE + \
" <TA2>-1.368268e-06</TA2>" + NEWLINE + \
" <TA3>1.910795e-07</TA3>" + NEWLINE + \
" <TOFFSET>0.000000e+00</TOFFSET>" + NEWLINE + \
" </Calibration>" + NEWLINE + \
" <Calibration format = 'WBCOND0' id = 'Main Conductivity'>" + NEWLINE + \
" <SerialNum>01906914</SerialNum>" + NEWLINE + \
" <CalDate>09-Oct-11</CalDate>" + NEWLINE + \
" <G>-9.761799e-01</G>" + NEWLINE + \
" <H>1.369994e-01</H>" + NEWLINE + \
" <I>-3.523860e-04</I>" + NEWLINE + \
" <J>4.404252e-05</J>" + NEWLINE + \
" <CPCOR>-9.570000e-08</CPCOR>" + NEWLINE + \
" <CTCOR>3.250000e-06</CTCOR>" + NEWLINE + \
" <CSLOPE>1.000000e+00</CSLOPE>" + NEWLINE + \
" </Calibration>" + NEWLINE + \
" <Calibration format = 'STRAIN0' id = 'Main Pressure'>" + NEWLINE + \
" <SerialNum>3313899</SerialNum>" + NEWLINE + \
" <CalDate>06-Oct-11</CalDate>" + NEWLINE + \
" <PA0>-3.689246e-02</PA0>" + NEWLINE + \
" <PA1>1.545570e-03</PA1>" + NEWLINE + \
" <PA2>6.733197e-12</PA2>" + NEWLINE + \
" <PTCA0>5.249034e+05</PTCA0>" + NEWLINE + \
" <PTCA1>1.423189e+00</PTCA1>" + NEWLINE + \
" <PTCA2>-1.206562e-01</PTCA2>" + NEWLINE + \
" <PTCB0>2.501288e+01</PTCB0>" + NEWLINE + \
" <PTCB1>-2.250000e-04</PTCB1>" + NEWLINE + \
" <PTCB2>0.000000e+00</PTCB2>" + NEWLINE + \
" <PTEMPA0>-5.677620e+01</PTEMPA0>" + NEWLINE + \
" <PTEMPA1>5.424624e+01</PTEMPA1>" + NEWLINE + \
" <PTEMPA2>-2.278113e-01</PTEMPA2>" + NEWLINE + \
" <POFFSET>0.000000e+00</POFFSET>" + NEWLINE + \
" <PRANGE>5.080000e+02</PRANGE>" + NEWLINE + \
" </Calibration>" + NEWLINE + \
" <Calibration format = 'VOLT0' id = 'Volt 0'>" + NEWLINE + \
" <OFFSET>-4.650526e-02</OFFSET>" + NEWLINE + \
" <SLOPE>1.246381e+00</SLOPE>" + NEWLINE + \
" </Calibration>" + NEWLINE + \
" <Calibration format = 'VOLT0' id = 'Volt 1'>" + NEWLINE + \
" <OFFSET>-4.618105e-02</OFFSET>" + NEWLINE + \
" <SLOPE>1.247197e+00</SLOPE>" + NEWLINE + \
" </Calibration>" + NEWLINE + \
" <Calibration format = 'VOLT0' id = 'Volt 2'>" + NEWLINE + \
" <OFFSET>-4.659790e-02</OFFSET>" + NEWLINE + \
" <SLOPE>1.247601e+00</SLOPE>" + NEWLINE + \
" </Calibration>" + NEWLINE + \
" <Calibration format = 'VOLT0' id = 'Volt 3'>" + NEWLINE + \
" <OFFSET>-4.502421e-02</OFFSET>" + NEWLINE + \
" <SLOPE>1.246911e+00</SLOPE>" + NEWLINE + \
" </Calibration>" + NEWLINE + \
" <Calibration format = 'VOLT0' id = 'Volt 4'>" + NEWLINE + \
" <OFFSET>-4.589158e-02</OFFSET>" + NEWLINE + \
" <SLOPE>1.246346e+00</SLOPE>" + NEWLINE + \
" </Calibration>" + NEWLINE + \
" <Calibration format = 'VOLT0' id = 'Volt 5'>" + NEWLINE + \
" <OFFSET>-4.609895e-02</OFFSET>" + NEWLINE + \
" <SLOPE>1.247868e+00</SLOPE>" + NEWLINE + \
" </Calibration>" + NEWLINE + \
" <Calibration format = 'FREQ0' id = 'external frequency channel'>" + NEWLINE + \
" <EXTFREQSF>1.000008e+00</EXTFREQSF>" + NEWLINE + \
" </Calibration>" + NEWLINE + \
"</CalibrationCoefficients>" + NEWLINE
VALID_GETCD_RESPONSE = "" + \
"<ConfigurationData DeviceType = 'SBE19plus' SerialNumber = '01906914'>" + NEWLINE + \
" <ProfileMode>" + NEWLINE + \
" <ScansToAverage>4</ScansToAverage>" + NEWLINE + \
" <MinimumCondFreq>2500</MinimumCondFreq>" + NEWLINE + \
" <PumpDelay>15</PumpDelay>" + NEWLINE + \
" <AutoRun>no</AutoRun>" + NEWLINE + \
" <IgnoreSwitch>yes</IgnoreSwitch>" + NEWLINE + \
" </ProfileMode>" + NEWLINE + \
" <Battery>" + NEWLINE + \
" <Type>alkaline</Type>" + NEWLINE + \
" <CutOff>7.5</CutOff>" + NEWLINE + \
" </Battery>" + NEWLINE + \
" <DataChannels>" + NEWLINE + \
" <ExtVolt0>yes</ExtVolt0>" + NEWLINE + \
" <ExtVolt1>yes</ExtVolt1>" + NEWLINE + \
" <ExtVolt2>no</ExtVolt2>" + NEWLINE + \
" <ExtVolt3>no</ExtVolt3>" + NEWLINE + \
" <ExtVolt4>no</ExtVolt4>" + NEWLINE + \
" <ExtVolt5>no</ExtVolt5>" + NEWLINE + \
" <SBE38>no</SBE38>" + NEWLINE + \
" <WETLABS>no</WETLABS>" + NEWLINE + \
" <OPTODE>yes</OPTODE>" + NEWLINE + \
" <SBE63>no</SBE63>" + NEWLINE + \
" <GTD>no</GTD>" + NEWLINE + \
" </DataChannels>" + NEWLINE + \
" <EchoCharacters>yes</EchoCharacters>" + NEWLINE + \
" <OutputExecutedTag>no</OutputExecutedTag>" + NEWLINE + \
" <OutputFormat>raw HEX</OutputFormat>" + NEWLINE + \
"</ConfigurationData>" | |
<gh_stars>10-100
import tensorflow as tf
import tensorflow.contrib as tf_contrib
import numpy as np
from tensorflow.python.ops import rnn
from utils import pytorch_kaiming_weight_factor
##################################################################################
# Initialization
##################################################################################
# factor, mode, uniform = pytorch_kaiming_weight_factor(a=0.0, uniform=False)
# weight_init = tf_contrib.layers.variance_scaling_initializer(factor=factor, mode=mode, uniform=uniform)
weight_init = tf.random_normal_initializer(mean=0.0, stddev=0.02)
weight_regularizer = None
weight_regularizer_fully = None
##################################################################################
# Layer
##################################################################################
def conv(x, channels, kernel=4, stride=2, pad=0, pad_type='zero', use_bias=True, sn=False, scope='conv_0'):
with tf.variable_scope(scope):
if pad > 0:
h = x.get_shape().as_list()[1]
if h % stride == 0:
pad = pad * 2
else:
pad = max(kernel - (h % stride), 0)
pad_top = pad // 2
pad_bottom = pad - pad_top
pad_left = pad // 2
pad_right = pad - pad_left
if pad_type == 'zero':
x = tf.pad(x, [[0, 0], [pad_top, pad_bottom], [pad_left, pad_right], [0, 0]])
if pad_type == 'reflect':
x = tf.pad(x, [[0, 0], [pad_top, pad_bottom], [pad_left, pad_right], [0, 0]], mode='REFLECT')
if sn:
w = tf.get_variable("kernel", shape=[kernel, kernel, x.get_shape()[-1], channels], initializer=weight_init,
regularizer=weight_regularizer)
x = tf.nn.conv2d(input=x, filter=spectral_norm(w),
strides=[1, stride, stride, 1], padding='VALID')
if use_bias:
bias = tf.get_variable("bias", [channels], initializer=tf.constant_initializer(0.0))
x = tf.nn.bias_add(x, bias)
else:
x = tf.layers.conv2d(inputs=x, filters=channels,
kernel_size=kernel, kernel_initializer=weight_init,
kernel_regularizer=weight_regularizer,
strides=stride, use_bias=use_bias)
return x
def fully_connected(x, units, use_bias=True, sn=False, scope='linear'):
with tf.variable_scope(scope):
x = flatten(x)
shape = x.get_shape().as_list()
channels = shape[-1]
if sn:
w = tf.get_variable("kernel", [channels, units], tf.float32,
initializer=weight_init, regularizer=weight_regularizer_fully)
if use_bias:
bias = tf.get_variable("bias", [units],
initializer=tf.constant_initializer(0.0))
x = tf.matmul(x, spectral_norm(w)) + bias
else:
x = tf.matmul(x, spectral_norm(w))
else:
x = tf.layers.dense(x, units=units, kernel_initializer=weight_init,
kernel_regularizer=weight_regularizer_fully,
use_bias=use_bias)
return x
def flatten(x):
return tf.layers.flatten(x)
def various_rnn(x, n_layer=1, n_hidden=128, dropout_rate=0.5, bidirectional=True, rnn_type='lstm', scope='rnn') :
if rnn_type.lower() == 'lstm' :
cell_type = tf.nn.rnn_cell.LSTMCell
elif rnn_type.lower() == 'gru' :
cell_type = tf.nn.rnn_cell.GRUCell
else :
raise NotImplementedError
with tf.variable_scope(scope):
if bidirectional:
if n_layer > 1 :
fw_cells = [cell_type(n_hidden) for _ in range(n_layer)]
bw_cells = [cell_type(n_hidden) for _ in range(n_layer)]
if dropout_rate > 0.0:
fw_cell = [tf.nn.rnn_cell.DropoutWrapper(cell=fw_cell, output_keep_prob=1 - dropout_rate) for fw_cell in fw_cells]
bw_cell = [tf.nn.rnn_cell.DropoutWrapper(cell=bw_cell, output_keep_prob=1 - dropout_rate) for bw_cell in bw_cells]
fw_cell = tf.nn.rnn_cell.MultiRNNCell(fw_cell)
bw_cell = tf.nn.rnn_cell.MultiRNNCell(bw_cell)
else :
fw_cell = cell_type(n_hidden)
bw_cell = cell_type(n_hidden)
if dropout_rate > 0.0 :
fw_cell = tf.nn.rnn_cell.DropoutWrapper(cell=fw_cell, output_keep_prob=1 - dropout_rate)
bw_cell = tf.nn.rnn_cell.DropoutWrapper(cell=bw_cell, output_keep_prob=1 - dropout_rate)
outputs, states = rnn.bidirectional_dynamic_rnn(cell_fw=fw_cell, cell_bw=bw_cell, inputs=x, dtype=tf.float32)
# outputs = 모든 state
# states = 마지막 state = output[-1]
output_fw, output_bw = outputs[0], outputs[1] # [bs, seq_len, n_hidden]
state_fw, state_bw = states[0], states[1]
words_emb = tf.concat([output_fw, output_bw], axis=-1) # [bs, seq_len, n_hidden * 2]
# state_fw[0] = cell state
# state_fw[1] = hidden state
if rnn_type.lower() == 'lstm':
sent_emb = tf.concat([state_fw[1], state_bw[1]], axis=-1) # [bs, n_hidden * 2]
elif rnn_type.lower() == 'gru':
sent_emb = tf.concat([state_fw, state_bw], axis=-1) # [bs, n_hidden * 2]
else :
raise NotImplementedError
else :
if n_layer > 1 :
cells = [cell_type(n_hidden) for _ in range(n_layer)]
if dropout_rate > 0.0 :
cell = [tf.nn.rnn_cell.DropoutWrapper(cell=cell, output_keep_prob=1 - dropout_rate) for cell in cells]
cell = tf.nn.rnn_cell.MultiRNNCell(cell)
else :
cell = cell_type(n_hidden)
if dropout_rate > 0.0 :
cell = tf.nn.rnn_cell.DropoutWrapper(cell=cell, output_keep_prob=1 - dropout_rate)
outputs, states = rnn.dynamic_rnn(cell, inputs=x, dtype=tf.float32)
words_emb = outputs # [bs, seq_len, n_hidden]
# states[0] = cell state
# states[1] = hidden state
if rnn_type.lower() == 'lstm' :
sent_emb = states[1] # [bs, n_hidden]
elif rnn_type.lower() == 'gru' :
sent_emb = states # [bs, n_hidden]
else :
raise NotImplementedError
return words_emb, sent_emb
##################################################################################
# Residual-block
##################################################################################
def resblock(x_init, channels, is_training=True, use_bias=True, sn=False, scope='resblock'):
with tf.variable_scope(scope):
with tf.variable_scope('res1'):
x = conv(x_init, channels * 2, kernel=3, stride=1, pad=1, pad_type='reflect', use_bias=use_bias, sn=sn)
x = batch_norm(x, is_training)
x = glu(x)
with tf.variable_scope('res2'):
x = conv(x, channels, kernel=3, stride=1, pad=1, pad_type='reflect', use_bias=use_bias, sn=sn)
x = batch_norm(x, is_training)
return x + x_init
def up_block(x_init, channels, is_training=True, use_bias=True, sn=False, scope='up_block'):
with tf.variable_scope(scope):
x = up_sample(x_init, scale_factor=2)
x = conv(x, channels * 2, kernel=3, stride=1, pad=1, pad_type='reflect', use_bias=use_bias, sn=sn)
x = batch_norm(x, is_training)
x = glu(x)
return x
def down_block(x_init, channels, is_training=True, use_bias=True, sn=False, scope='down_block'):
with tf.variable_scope(scope):
x = conv(x_init, channels, kernel=4, stride=2, pad=1, pad_type='reflect', use_bias=use_bias, sn=sn)
x = batch_norm(x, is_training)
x = lrelu(x, 0.2)
return x
def attention_net(x, sent_vec, word_emb, mask, channels, use_bias=True, sn=False, scope='attention_net'):
with tf.variable_scope(scope):
# channels = x.shape[3], idf
bs, h, w = x.shape[0], x.shape[1], x.shape[2]
hw = h * w # length of query
seq_len = word_emb.shape[1] # length of source
x = tf.reshape(x, shape=[bs, hw, -1])
word_emb = tf.expand_dims(word_emb, axis=1)
word_emb = conv(word_emb, channels, kernel=1, stride=1, use_bias=use_bias, sn=sn, scope='word_conv')
word_emb = tf.squeeze(word_emb, axis=1)
attn = tf.matmul(x, word_emb, transpose_b=True) # [bs, hw, seq_len]
attn = tf.reshape(attn, shape=[bs*hw, seq_len])
mask = tf.tile(mask, multiples=[hw, 1])
attn = tf.where(tf.equal(mask, True), x=tf.constant(-float('inf'), dtype=tf.float32, shape=mask.shape), y=attn)
attn = tf.nn.softmax(attn)
attn = tf.reshape(attn, shape=[bs, hw, seq_len])
weighted_context = tf.matmul(word_emb, attn, transpose_a=True, transpose_b=True)
weighted_context = tf.reshape(weighted_context, shape=[bs, h, w, -1])
word_attn = tf.reshape(attn, shape=[bs, h, w, -1])
# Eq(5) in MirrorGAN: global-level attention
sent_vec = fully_connected(sent_vec, units=channels, use_bias=True, sn=sn, scope='sent_fc')
sent_vec = tf.reshape(sent_vec, shape=[bs, 1, 1, -1])
sent_vec = tf.tile(sent_vec, multiples=[1, h, w, 1])
x = tf.reshape(x, shape=[bs, h, w, -1])
sent_vec_ = x * sent_vec
sent_vec_ = conv(sent_vec_, channels, kernel=1, stride=1, use_bias=use_bias, sn=sn, scope='sent_conv')
sent_attn = tf.nn.softmax(sent_vec_)
weighted_sentence = sent_vec * sent_attn
return weighted_context, weighted_sentence, word_attn, sent_attn
##################################################################################
# Sampling
##################################################################################
def dropout(x, drop_rate=0.5, is_training=True):
return tf.layers.dropout(x, drop_rate, training=is_training)
def up_sample(x, scale_factor=2):
_, h, w, _ = x.get_shape().as_list()
new_size = [h * scale_factor, w * scale_factor]
return tf.image.resize_nearest_neighbor(x, size=new_size)
def resize(x, target_size):
return tf.image.resize_bilinear(x, size=target_size)
def down_sample_avg(x, scale_factor=2):
return tf.layers.average_pooling2d(x, pool_size=3, strides=scale_factor, padding='SAME')
def global_avg_pooling(x):
gap = tf.reduce_mean(x, axis=[1, 2], keepdims=True)
return gap
def reparametrize(mean, logvar):
eps = tf.random_normal(tf.shape(mean), mean=0.0, stddev=1.0, dtype=tf.float32)
return mean + tf.exp(logvar * 0.5) * eps
##################################################################################
# Activation function
##################################################################################
def lrelu(x, alpha=0.01):
# pytorch alpha is 0.01
return tf.nn.leaky_relu(x, alpha)
def relu(x):
return tf.nn.relu(x)
def tanh(x):
return tf.tanh(x)
def simoid(x) :
return tf.sigmoid(x)
def glu(x) :
ch = x.shape[-1]
ch = ch // 2
n_dim = len(np.shape(x))
if n_dim == 2:
return x[:, :ch] * simoid(x[:, ch:])
else : # n_dim = 4
return x[:, :, :, :ch] * simoid(x[:, :, :, ch:])
##################################################################################
# Normalization function
##################################################################################
def batch_norm(x, decay=0.9, is_training=False, scope='batch_norm'):
"""
if x_norm = tf.layers.batch_normalization
# ...
with tf.control_dependencies(tf.get_collection(tf.GraphKeys.UPDATE_OPS)):
train_op = optimizer.minimize(loss)
"""
return tf_contrib.layers.batch_norm(x,
decay=decay, epsilon=1e-05,
center=True, scale=True, updates_collections=None,
is_training=is_training, scope=scope)
# return tf.layers.batch_normalization(x, momentum=0.9, epsilon=1e-05, center=True, scale=True, training=is_training, name=scope)
def spectral_norm(w, iteration=1):
w_shape = w.shape.as_list()
w = tf.reshape(w, [-1, w_shape[-1]])
u = tf.get_variable("u", [1, w_shape[-1]], initializer=tf.random_normal_initializer(), trainable=False)
u_hat = u
v_hat = None
for i in range(iteration):
"""
power iteration
Usually iteration = 1 will be enough
"""
v_ = tf.matmul(u_hat, tf.transpose(w))
v_hat = tf.nn.l2_normalize(v_)
u_ = tf.matmul(v_hat, w)
u_hat = tf.nn.l2_normalize(u_)
u_hat = tf.stop_gradient(u_hat)
v_hat = tf.stop_gradient(v_hat)
sigma = tf.matmul(tf.matmul(v_hat, w), tf.transpose(u_hat))
with tf.control_dependencies([u.assign(u_hat)]):
w_norm = w / sigma
w_norm = tf.reshape(w_norm, w_shape)
return w_norm
##################################################################################
# Loss function
##################################################################################
def L1_loss(x, y):
loss = tf.reduce_mean(tf.abs(x - y)) # [64, h, w, c]
return loss
def discriminator_loss(gan_type, real_logit, fake_logit):
real_loss = 0
fake_loss = 0
if real_logit is None :
if gan_type == 'lsgan':
fake_loss = tf.reduce_mean(tf.square(fake_logit))
if gan_type == 'gan':
fake_loss = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(labels=tf.zeros_like(fake_logit), logits=fake_logit))
if gan_type == 'hinge':
fake_loss = tf.reduce_mean(relu(1 + fake_logit))
else :
if gan_type == 'lsgan':
real_loss = tf.reduce_mean(tf.squared_difference(real_logit, 1.0))
fake_loss = tf.reduce_mean(tf.square(fake_logit))
if gan_type == 'gan':
real_loss = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(labels=tf.ones_like(real_logit), logits=real_logit))
fake_loss = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(labels=tf.zeros_like(fake_logit), logits=fake_logit))
if gan_type == 'hinge':
real_loss = tf.reduce_mean(relu(1 - real_logit))
fake_loss = tf.reduce_mean(relu(1 + fake_logit))
return real_loss, fake_loss
def generator_loss(gan_type, fake_logit):
fake_loss = 0
if gan_type == 'lsgan':
fake_loss = tf.reduce_mean(tf.squared_difference(fake_logit, 1.0))
if gan_type == 'gan':
fake_loss = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(labels=tf.ones_like(fake_logit), logits=fake_logit))
if gan_type == 'hinge':
fake_loss = -tf.reduce_mean(fake_logit)
return fake_loss
def get_inception_feature(x) :
from keras.applications.inception_v3 import preprocess_input as inception_preprocess
from keras.applications.inception_v3 import InceptionV3
from keras.models import Model
x = resize(x, [299, 299])
x = ((x + 1) / 2) * 255.0
x = inception_preprocess(x)
inception_v3_model = InceptionV3(weights='imagenet', include_top=False)
inception_v3_model.trainable = False
mixed_7_feature = Model(inputs=inception_v3_model.input, outputs=inception_v3_model.get_layer('mixed7').output)
mixed_7_features = mixed_7_feature.predict(x)
last_feature = inception_v3_model.predict(x)
return mixed_7_features, last_feature
def regularization_loss(scope_name):
"""
If you want to use "Regularization"
g_loss += regularization_loss('generator')
d_loss += regularization_loss('discriminator')
"""
collection_regularization = tf.get_collection(tf.GraphKeys.REGULARIZATION_LOSSES)
loss = []
for item in collection_regularization:
if scope_name in item.name:
loss.append(item)
return tf.reduce_sum(loss)
def kl_loss(mean, logvar):
# shape : [batch_size, channel]
# loss = 0.5 * tf.reduce_sum(tf.square(mean) + tf.exp(logvar) - 1 - logvar, axis=-1)
# loss = tf.reduce_mean(loss)
loss | |
# coding: utf-8
# Copyright 2016 The LUCI Authors. All rights reserved.
# Use of this source code is governed under the Apache License, Version 2.0
# that can be found in the LICENSE file.
import base64
import datetime
import hashlib
import logging
import os
import threading
import time
import traceback
from six.moves import urllib
from utils import net
from bot_code.remote_client_errors import BotCodeError
from bot_code.remote_client_errors import InitializationError
from bot_code.remote_client_errors import InternalError
from bot_code.remote_client_errors import MintOAuthTokenError
from bot_code.remote_client_errors import PollError
# RemoteClient will attempt to refresh the authentication headers once they are
# this close to the expiration.
#
# The total possible delay between the headers are checked and used is the sum:
# 1) FileRefresherThread update interval (15 sec).
# 2) FileReaderThread update interval (15 sec).
# 3) NET_CONNECTION_TIMEOUT_SEC, when resending requests on errors (3 min).
# 4) Various random delays if Swarming bot process is preempted by task
# processes (e.g. heavy tests) that consume 100% of CPU.
#
# AUTH_HEADERS_EXPIRATION_SEC must be larger than this sum.
#
# Additionally, there's an upper limit: AUTH_HEADERS_EXPIRATION_SEC must be less
# than the minimum expiration time of headers produced by bot_config's
# get_authentication_headers hook (otherwise we'll be calling this hook all the
# time). On GCE machines it is usually 10 min.
AUTH_HEADERS_EXPIRATION_SEC = 9*60+30
# How long to wait for a response from the server. Must not be greater than
# AUTH_HEADERS_EXPIRATION_SEC, since otherwise there's a chance auth headers
# will expire while we wait for connection.
NET_CONNECTION_TIMEOUT_SEC = 3*60
def createRemoteClient(server, auth, hostname, work_dir):
return RemoteClientNative(server, auth, hostname, work_dir)
def utcnow():
return datetime.datetime.utcnow()
def make_appengine_id(hostname, work_dir):
"""Generate a value to use in the GOOGAPPUID cookie for AppEngine.
AppEngine looks for this cookie: if it contains a value in the range 0-999,
it is used to split traffic. For more details, see:
https://cloud.google.com/appengine/docs/flexible/python/splitting-traffic
The bot code will send requests with a value generated locally:
GOOGAPPUID = sha1('YYYY-MM-DD-hostname:work_dir') % 1000
(from go/swarming-release-canaries)
This scheme should result in the values being roughly uniformly distributed.
The date is included in the hash to ensure that across different rollouts,
it's not the same set of bots being used as the canary (otherwise we might
be unlucky and get a unrepresentative sample).
Args:
hostname: The short hostname of the bot.
work_dir: The working directory used by the bot.
Returns:
An integer in the range [0, 999].
"""
s = '%s-%s:%s' % (utcnow().strftime('%Y-%m-%d'), hostname, work_dir)
googappuid = int(hashlib.sha1(s.encode('utf-8')).hexdigest(), 16) % 1000
logging.debug('GOOGAPPUID = sha1(%s) %% 1000 = %d', s, googappuid)
return googappuid
class RemoteClientNative(object):
"""RemoteClientNative knows how to make authenticated calls to the backend.
It also holds in-memory cache of authentication headers and periodically
refreshes them (by calling supplied callback, that usually is implemented in
terms of bot_config.get_authentication_headers() function).
If the callback is None, skips authentication (this is used during initial
stages of the bot bootstrap).
If the callback returns (*, None), disables authentication. This allows
bot_config.py to disable strong authentication on machines that don't have any
credentials (the server uses only IP whitelist check in this case).
If the callback returns (*, 0), effectively disables the caching of headers:
the callback will be called for each request.
"""
def __init__(self, server, auth_headers_callback, hostname, work_dir):
self._server = server
self._auth_headers_callback = auth_headers_callback
self._lock = threading.Lock()
self._headers = None
self._exp_ts = None
self._disabled = not auth_headers_callback
self._bot_hostname = hostname
self._bot_work_dir = work_dir
self._bot_id = None
@property
def server(self):
return self._server
@property
def bot_id(self):
return self._bot_id
@bot_id.setter
def bot_id(self, bid):
self._bot_id = bid
def initialize(self, quit_bit=None):
"""Grabs initial auth headers, retrying on errors a bunch of times.
Disabled authentication (when auth_headers_callback returns None) is not
an error. Retries only real exceptions raised by the callback.
Raises InitializationError if all attempts fail. Aborts attempts and returns
if quit_bit is signaled. If quit_bit is None, retries until success or until
all attempts fail.
"""
attempts = 30
while not quit_bit or not quit_bit.is_set():
try:
logging.info('Fetching initial auth headers')
headers = self._get_headers_or_throw()
logging.info('Got auth headers: %s', headers.keys() or 'none')
return
except Exception as e:
last_error = '%s\n%s' % (e, traceback.format_exc()[-2048:])
logging.exception('Failed to grab initial auth headers')
attempts -= 1
if not attempts:
raise InitializationError(last_error)
time.sleep(2)
@property
def uses_auth(self):
"""Returns True if get_authentication_headers() returns some headers.
If bot_config.get_authentication_headers() is not implement it will return
False.
"""
return bool(self.get_authentication_headers())
def get_headers(self, include_auth=False):
"""Returns the headers to use to send a request.
Args:
include_auth: Whether or not to include authentication headers.
Returns:
A dict of HTTP headers.
"""
googappuid = make_appengine_id(self._bot_hostname, self._bot_work_dir)
headers = {'Cookie': 'GOOGAPPUID=%d' % googappuid}
if self.bot_id:
headers['X-Luci-Swarming-Bot-ID'] = self._bot_id
if include_auth:
headers.update(self.get_authentication_headers())
return headers
def get_authentication_headers(self):
"""Returns a dict with the headers, refreshing them if necessary.
Will always return a dict (perhaps empty if no auth headers are provided by
the callback or it has failed).
"""
try:
return self._get_headers_or_throw()
except Exception:
logging.exception('Failed to refresh auth headers, using cached ones')
return self._headers or {}
@property
def authentication_headers_expiration(self):
"""Returns int unix timestamp of when current cached auth headers expire.
Returns 0 if unknown or None if not using auth at all.
"""
return int(self._exp_ts) if not self._disabled else None
def _get_headers_or_throw(self):
if self._disabled:
return {}
with self._lock:
if (not self._exp_ts or
self._exp_ts - time.time() < AUTH_HEADERS_EXPIRATION_SEC):
self._headers, self._exp_ts = self._auth_headers_callback()
if self._exp_ts is None:
logging.info('Headers callback returned None, disabling auth')
self._disabled = True
self._headers = {}
elif self._exp_ts:
next_check = max(
0, self._exp_ts - AUTH_HEADERS_EXPIRATION_SEC - time.time())
if self._headers:
logging.info(
'Fetched auth headers (%s), they expire in %d sec. '
'Next check in %d sec.', self._headers.keys(),
self._exp_ts - time.time(), next_check)
else:
logging.info(
'No headers available yet, next check in %d sec.', next_check)
else:
logging.info('Using auth headers (%s).', self._headers.keys())
return self._headers or {}
def _url_read_json(self, url_path, data=None):
"""Does POST (if data is not None) or GET request to a JSON endpoint."""
return net.url_read_json(
self._server + url_path,
data=data,
headers=self.get_headers(include_auth=True),
timeout=NET_CONNECTION_TIMEOUT_SEC,
follow_redirects=False)
def _url_retrieve(self, filepath, url_path):
"""Fetches the file from the given URL path on the server."""
return net.url_retrieve(
filepath,
self._server + url_path,
headers=self.get_headers(include_auth=True),
timeout=NET_CONNECTION_TIMEOUT_SEC)
def post_bot_event(self, event_type, message, attributes):
"""Logs bot-specific info to the server"""
data = attributes.copy()
data['event'] = event_type
data['message'] = message
self._url_read_json('/swarming/api/v1/bot/event', data=data)
def post_task_update(self,
task_id,
params,
stdout_and_chunk=None,
exit_code=None):
"""Posts task update to task_update.
Arguments:
stdout: Incremental output since last call, if any.
stdout_chunk_start: Total number of stdout previously sent, for coherency
with the server.
params: Default JSON parameters for the POST.
exit_code: if None, this is an intermediate update. If non-None, this is
the final update.
Returns:
False if the task should stop.
Raises:
InternalError if can't contact the server after many attempts or the
server replies with an error.
"""
data = {
'id': self.bot_id,
'task_id': task_id,
}
data.update(params)
# Preserving prior behaviour: empty stdout is not transmitted
if stdout_and_chunk and stdout_and_chunk[0]:
data['output'] = base64.b64encode(stdout_and_chunk[0]).decode()
data['output_chunk_start'] = stdout_and_chunk[1]
if exit_code != None:
data['exit_code'] = exit_code
resp = self._url_read_json(
'/swarming/api/v1/bot/task_update/%s' % task_id, data)
logging.debug('post_task_update() = %s', resp)
if not resp or resp.get('error'):
raise InternalError(
resp.get('error') if resp else 'Failed to contact server')
return not resp.get('must_stop', False)
def post_task_error(self, task_id, message):
"""Logs task-specific info to the server"""
data = {
'id': self.bot_id,
'message': message,
'task_id': task_id,
}
resp = self._url_read_json(
'/swarming/api/v1/bot/task_error/%s' % task_id,
data=data)
return resp and resp['resp'] == 1
def do_handshake(self, attributes):
"""Performs the initial handshake. Returns a dict (contents TBD)"""
return self._url_read_json(
'/swarming/api/v1/bot/handshake',
data=attributes)
def poll(self, attributes):
"""Polls for new work or other commands; returns a (cmd, value) pair as
shown below.
Raises:
PollError if can't contact the server after many attempts, the server
replies with an error or the returned dict does not have the correct
values set.
"""
resp = self._url_read_json('/swarming/api/v1/bot/poll', data=attributes)
if not resp or resp.get('error'):
raise PollError(
resp.get('error') if resp else 'Failed to contact server')
cmd = resp['cmd']
if cmd == 'sleep':
return (cmd, resp['duration'])
if cmd == 'terminate':
return (cmd, resp['task_id'])
if cmd == 'run':
return (cmd, resp['manifest'])
if cmd == 'update':
return (cmd, resp['version'])
if cmd in ('restart', 'host_reboot'):
return (cmd, resp['message'])
if cmd == 'bot_restart':
return (cmd, resp['message'])
raise PollError('Unexpected command: %s\n%s' % (cmd, resp))
def get_bot_code(self, new_zip_path, bot_version):
"""Downloads code into the file | |
args and args.has_key('hrn'):
hrn = args['hrn']
if credential is None:
raise Exception("Register missing credential")
# Validate user credential
creds = list()
creds.append(credential)
privs = ()
self._cred_verifier.verify_from_strings(user_certstr, creds, None, privs)
# confirm type is Slice or User
if not type:
self.logger.error("Missing type to Resolve")
raise Exception("Missing type to Resolve")
if not type.lower() == 'slice':
self.logger.error("Tried to register type %s" % type)
raise Exception("Can't register non slice %s" % type)
if not urn and hrn is not None:
# Convert hrn to urn
urn = sfa.util.xrn.hrn_to_urn(hrn, "slice")
#raise Exception("hrn to Register not supported")
if not urn or not urn_util.is_valid_urn(urn):
raise Exception("invalid slice urn to create: %s" % urn)
if self.gcf:
return self.CreateSlice(urn)
else:
# Infer owner_id from current user's cert and uuid in there
# pull out slice name from urn
# but what about project_id? look for something after authority before +authority+?
try:
owner_id = str(uuidModule.UUID(int=user_gid.get_uuid()))
except Exception, e:
self.logger.error("Register(urn=%s): Failed to find owner account ID from UUID in user cert: %s", urn, e)
raise
sUrn = urn_util.URN(urn=urn)
slice_name = sUrn.getName()
slice_auth = sUrn.getAuthority()
# Compare that with SLICE_AUTHORITY
project_id = ''
if slice_auth and slice_auth.startswith(SLICE_AUTHORITY) and len(slice_auth) > len(SLICE_AUTHORITY)+1:
project_name = slice_auth[len(SLICE_AUTHORITY)+2:]
self.logger.info("Creating slice in project %s" % project_name)
if project_name.strip() == '':
self.logger.warn("Empty project name will fail")
argsdict = dict(project_name=project_name)
projtriple = None
try:
# CAUTION: untested use of inside cert/key
user_uuid = str(uuidModule.UUID(int=user_gid.get_uuid()))
inside_key, inside_certs = self.getInsideKeys(user_uuid)
projtriple = invokeCH(self.pa_url, "lookup_project",
self.logger, argsdict, inside_certs,
inside_key)
except Exception, e:
self.logger.error("Exception getting project of name %s: %s", project_name, e)
#raise
if projtriple:
projval = getValueFromTriple(projtriple, self.logger, "lookup_project for create_slice", unwrap=True)
project_id = projval['project_id']
argsdict = dict(project_id=project_id, slice_name=slice_name, owner_id=owner_id, project_name=project_name)
slicetriple = None
try:
# CAUTION: untested use of inside cert/key
user_uuid = str(uuidModule.UUID(int=user_gid.get_uuid()))
inside_key, inside_certs = self.getInsideKeys(user_uuid)
slicetriple = invokeCH(self.sa_url, "create_slice", self.logger,
argsdict, inside_certs, inside_key)
except Exception, e:
self.logger.error("Exception creating slice %s: %s" % (urn, e))
raise
# Will raise an exception if triple malformed
slicetriple = getValueFromTriple(slicetriple, self.logger, "create_slice")
if not slicetriple['value']:
self.logger.error("No slice created. Return the triple with the error")
return slicetriple
if slicetriple['code'] != 0:
self.logger.error("Return code != 0. Return the triple")
return slicetriple
sliceval = getValueFromTriple(slicetriple, self.logger, "create_slice", unwrap=True)
# OK, this gives us the info about the slice.
# Now though we need the slice credential
argsdict = dict(experimenter_certificate=user_certstr, slice_id=sliceval['slice_id'])
res = None
try:
# CAUTION: untested use of inside cert/key
user_uuid = str(uuidModule.UUID(int=user_gid.get_uuid()))
inside_key, inside_certs = self.getInsideKeys(user_uuid)
res = invokeCH(self.sa_url, 'get_slice_credential', self.logger,
argsdict, inside_certs, inside_key)
except Exception, e:
self.logger.error("Exception doing get_slice_cred after create_slice: %s" % e)
raise
getValueFromTriple(res, self.logger, "get_slice_credential after create_slice")
if not res['value']:
return res
if not isinstance(res['value'], dict) and res['value'].has_key('slice_credential'):
return res
return res['value']['slice_credential']
def RenewSlice(self, args):
# args are credential, expiration
# cred is user cred
# returns renewed slice credential
user_certstr = addMACert(self._server.pem_cert, self.logger, self.macert)
try:
user_gid = gid.GID(string=user_certstr)
except Exception, exc:
self.logger.error("RenewSlice failed to create user_gid from SSL client cert: %s", traceback.format_exc())
raise Exception("Failed to RenewSlice. Cant get user GID from SSL client certificate." % exc)
try:
user_gid.verify_chain(self.trusted_roots)
except Exception, exc:
self.logger.error("RenewSlice got unverifiable experimenter cert: %s", exc)
raise
expiration = None
if args and args.has_key('expiration'):
expiration = args['expiration']
credential = None
if args and args.has_key('credential'):
credential = args['credential']
if credential is None:
self.logger.error("RenewSlice has no slice credential in its arguments")
raise Exception("RenewSlice has no slice credential in its arguments")
# Validate slice credential
creds = list()
creds.append(credential)
privs = ()
self._cred_verifier.verify_from_strings(user_certstr, creds, None, privs)
# get Slice UUID (aka slice_id)
slice_cert = sfa.trust.credential.Credential(string=credential).get_gid_object()
try:
slice_uuid = str(uuidModule.UUID(int=slice_cert.get_uuid()))
self.logger.error("Got UUID from slice cert: %s", slice_uuid)
except Exception, e:
self.logger.error("Failed to get a UUID from slice cert: %s", e)
if self.gcf:
# Pull urn from slice credential
urn = sfa.trust.credential.Credential(string=credential).get_gid_object().get_urn()
if self.RenewSlice(urn, expiration):
# return the new slice credential
return self.slices[urn]
else:
# error
raise "Failed to renew slice %s until %s" % (urn, expiration)
else:
argsdict = dict(slice_id=slice_uuid,expiration=expiration)#HEREHERE
slicetriple = None
try:
# CAUTION: untested use of inside cert/key
user_uuid = str(uuidModule.UUID(int=user_gid.get_uuid()))
inside_key, inside_certs = self.getInsideKeys(user_uuid)
slicetriple = invokeCH(self.sa_url, "renew_slice", self.logger,
argsdict, inside_certs, inside_key)
except Exception, e:
self.logger.error("Exception renewing slice %s: %s" % (urn, e))
raise
# Will raise an exception if triple malformed
slicetriple = getValueFromTriple(slicetriple, self.logger, "renew_slice")
if not slicetriple['value']:
self.logger.error("No slice renewed. Return the triple with the error")
return slicetriple
if slicetriple['code'] != 0:
self.logger.error("Return code != 0. Return the triple")
return slicetriple
sliceval = getValueFromTriple(slicetriple, self.logger, "renew_slice", unwrap=True)
# OK, this gives us the info about the slice.
# Now though we need the _updated_ slice credential
argsdict = dict(experimenter_certificate=user_certstr, slice_id=sliceval['slice_id'])
res = None
try:
# CAUTION: untested use of inside cert/key
user_uuid = str(uuidModule.UUID(int=user_gid.get_uuid()))
inside_key, inside_certs = self.getInsideKeys(user_uuid)
res = invokeCH(self.sa_url, 'get_slice_credential', self.logger,
argsdict, inside_certs, inside_key)
except Exception, e:
self.logger.error("Exception doing get_slice_cred after create_slice: %s" % e)
raise
getValueFromTriple(res, self.logger, "get_slice_credential after create_slice")
if not res['value']:
return res
if not isinstance(res['value'], dict) and res['value'].has_key('slice_credential'):
return res
return res['value']['slice_credential']
def GetKeys(self, args):
credential = None
if args and args.has_key('credential'):
credential = args['credential']
# cred is user cred
# return list( of dict(type='ssh', key=$key))
user_certstr = addMACert(self._server.pem_cert, self.logger, self.macert)
try:
user_gid = gid.GID(string=user_certstr)
except Exception, exc:
self.logger.error("GetCredential failed to create user_gid from SSL client cert: %s", traceback.format_exc())
raise Exception("Failed to GetCredential. Cant get user GID from SSL client certificate." % exc)
try:
user_gid.verify_chain(self.trusted_roots)
except Exception, exc:
self.logger.error("GetCredential got unverifiable experimenter cert: %s", exc)
raise
if credential is None:
raise Exception("Resolve missing credential")
# self.logger.info("in delegate getkeys about to do cred verify")
# Validate user credential
creds = list()
creds.append(credential)
privs = ()
# self.logger.info("type of credential: %s. Type of creds: %s", type(credential), type(creds))
self._cred_verifier.verify_from_strings(user_certstr, creds, None, privs)
# self.logger.info("getkeys did cred verify")
# With the real CH, the SSH keys are held by the portal, not the CH
# see db-util.php#fetchSshKeys which queries the ssh_key table in the portal DB
# it takes an account_id
try:
user_uuid = str(uuidModule.UUID(int=user_gid.get_uuid()))
except:
self.logger.error("GetKeys Failed to find user account ID from cert")
raise
user_urn = user_gid.get_urn()
if not user_uuid:
self.logger.warn("GetKeys couldnt get uuid for user from cert with urn %s" % user_urn)
else:
self.logger.info("GetKeys called for user with uuid %s" % user_uuid)
# Use new MA lookup_ssh_keys method
inside_key, inside_certs = self.getInsideKeys(user_uuid)
argsdict=dict(member_id=user_uuid);
keys_triple=invokeCH(self.ma_url, "lookup_ssh_keys", self.logger,
argsdict, inside_certs, inside_key)
self.logger.info("lookup_ssh_keys: " + str(keys_triple));
if not keys_triple['value']:
self.logger.error("No SSH key structure. Return the triple with error");
return keys_triple;
if keys_triple['code'] != 0:
self.logger.error("Error extracting SSH keys");
return keys_triple;
keys = keys_triple['value'];
if (len(keys) == 0):
self.logger.error("No SSH keys found");
return keys;
ret = list();
for key in keys:
ssh_key = key['public_key'];
entry = dict(type='ssh',
key=ssh_key);
self.logger.info("KEYS = %r", entry);
ret.append(entry);
return ret
def ListComponents(self, args):
credential = None
if args and args.has_key('credential'):
credential = args['credential']
# Returns list of CMs (AMs)
# cred is user cred or slice cred - Omni uses user cred
# return list( of dict(gid=<cert>, hrn=<hrn>, url=<AM URL>))
# Matt seems to say hrn is not critical, and can maybe even skip cert
user_certstr = addMACert(self._server.pem_cert, self.logger, self.macert)
try:
user_gid = gid.GID(string=user_certstr)
except Exception, exc:
self.logger.error("GetCredential failed to create user_gid from SSL client cert: %s", traceback.format_exc())
raise Exception("Failed to GetCredential. Cant get user GID from SSL client certificate." % exc)
try:
user_gid.verify_chain(self.trusted_roots)
except Exception, exc:
self.logger.error("GetCredential got unverifiable experimenter cert: %s", exc)
raise
if credential is None:
raise Exception("Resolve missing credential")
# Validate user credential
creds = list()
creds.append(credential)
privs = ()
self._cred_verifier.verify_from_strings(user_certstr, creds, None, privs)
if self.gcf:
ret = list()
for (urn, url) in self.aggs:
# convert urn to hrn
hrn = sfa.util.xrn.urn_to_hrn(urn)
ret.append(dict(gid='amcert', hrn=hrn, url=url, urn=urn))
return ret
else:
argsdict = dict(service_type=0)
amstriple = None
try:
# CAUTION: untested use of inside cert/key
user_uuid = str(uuidModule.UUID(int=user_gid.get_uuid()))
inside_key, inside_certs = self.getInsideKeys(user_uuid)
amstriple = invokeCH(self.sr_url, "get_services_of_type",
self.logger, argsdict, inside_certs,
inside_key)
except Exception, e:
self.logger.error("Exception looking up AMs at SR: %s", e)
raise
self.logger.debug("Got list of ams: %s", amstriple)
if amstriple and amstriple.has_key("value") and amstriple["value"]:
amstriple = getValueFromTriple(amstriple, | |
<filename>rdtools/test/soiling_test.py<gh_stars>100-1000
import pandas as pd
import numpy as np
from rdtools.soiling import soiling_srr
from rdtools.soiling import SRRAnalysis
from rdtools.soiling import annual_soiling_ratios
from rdtools.soiling import monthly_soiling_rates
from rdtools.soiling import NoValidIntervalError
import pytest
def test_soiling_srr(soiling_normalized_daily, soiling_insolation, soiling_times):
reps = 10
np.random.seed(1977)
sr, sr_ci, soiling_info = soiling_srr(soiling_normalized_daily, soiling_insolation, reps=reps)
assert 0.964369 == pytest.approx(sr, abs=1e-6),\
'Soiling ratio different from expected value'
assert np.array([0.962540, 0.965295]) == pytest.approx(sr_ci, abs=1e-6),\
'Confidence interval different from expected value'
assert 0.960205 == pytest.approx(soiling_info['exceedance_level'], abs=1e-6),\
'Exceedance level different from expected value'
assert 0.984079 == pytest.approx(soiling_info['renormalizing_factor'], abs=1e-6),\
'Renormalizing factor different from expected value'
assert len(soiling_info['stochastic_soiling_profiles']) == reps,\
'Length of soiling_info["stochastic_soiling_profiles"] different than expected'
assert isinstance(soiling_info['stochastic_soiling_profiles'], list),\
'soiling_info["stochastic_soiling_profiles"] is not a list'
# Check soiling_info['soiling_interval_summary']
expected_summary_columns = ['start', 'end', 'soiling_rate', 'soiling_rate_low',
'soiling_rate_high', 'inferred_start_loss', 'inferred_end_loss',
'length', 'valid']
actual_summary_columns = soiling_info['soiling_interval_summary'].columns.values
for x in actual_summary_columns:
assert x in expected_summary_columns,\
f"'{x}' not an expected column in soiling_info['soiling_interval_summary']"
for x in expected_summary_columns:
assert x in actual_summary_columns,\
f"'{x}' was expected as a column, but not in soiling_info['soiling_interval_summary']"
assert isinstance(soiling_info['soiling_interval_summary'], pd.DataFrame),\
'soiling_info["soiling_interval_summary"] not a dataframe'
expected_means = pd.Series({'soiling_rate': -0.002644544,
'soiling_rate_low': -0.002847504,
'soiling_rate_high': -0.002455915,
'inferred_start_loss': 1.020124,
'inferred_end_loss': 0.9566552,
'length': 24.0,
'valid': 1.0})
expected_means = expected_means[['soiling_rate', 'soiling_rate_low', 'soiling_rate_high',
'inferred_start_loss', 'inferred_end_loss',
'length', 'valid']]
actual_means = soiling_info['soiling_interval_summary'][expected_means.index].mean()
pd.testing.assert_series_equal(expected_means, actual_means, check_exact=False)
# Check soiling_info['soiling_ratio_perfect_clean']
pd.testing.assert_index_equal(soiling_info['soiling_ratio_perfect_clean'].index, soiling_times,
check_names=False)
sr_mean = soiling_info['soiling_ratio_perfect_clean'].mean()
assert 0.968265 == pytest.approx(sr_mean, abs=1e-6),\
"The mean of soiling_info['soiling_ratio_perfect_clean'] differs from expected"
assert isinstance(soiling_info['soiling_ratio_perfect_clean'], pd.Series),\
'soiling_info["soiling_ratio_perfect_clean"] not a pandas series'
@pytest.mark.filterwarnings("ignore:.*20% or more of the daily data.*:UserWarning")
@pytest.mark.parametrize('method,expected_sr',
[('random_clean', 0.936177),
('half_norm_clean', 0.915093),
('perfect_clean', 0.977116)])
def test_soiling_srr_consecutive_invalid(soiling_normalized_daily, soiling_insolation,
soiling_times, method, expected_sr):
reps = 10
np.random.seed(1977)
sr, sr_ci, soiling_info = soiling_srr(soiling_normalized_daily, soiling_insolation, reps=reps,
max_relative_slope_error=20.0, method=method)
assert expected_sr == pytest.approx(sr, abs=1e-6),\
f'Soiling ratio different from expected value for {method} with consecutive invalid intervals' # noqa: E501
@pytest.mark.parametrize('clean_criterion,expected_sr',
[('precip_and_shift', 0.982546),
('precip_or_shift', 0.973433),
('precip', 0.976196),
('shift', 0.964369)])
def test_soiling_srr_with_precip(soiling_normalized_daily, soiling_insolation, soiling_times,
clean_criterion, expected_sr):
precip = pd.Series(index=soiling_times, data=0)
precip['2019-01-18 00:00:00-07:00'] = 1
precip['2019-02-20 00:00:00-07:00'] = 1
kwargs = {
'reps': 10,
'precipitation_daily': precip
}
np.random.seed(1977)
sr, sr_ci, soiling_info = soiling_srr(soiling_normalized_daily, soiling_insolation,
clean_criterion=clean_criterion, **kwargs)
assert expected_sr == pytest.approx(sr, abs=1e-6),\
f"Soiling ratio with clean_criterion='{clean_criterion}' different from expected"
def test_soiling_srr_confidence_levels(soiling_normalized_daily, soiling_insolation):
'Tests SRR with different confidence level settingsf from above'
np.random.seed(1977)
sr, sr_ci, soiling_info = soiling_srr(soiling_normalized_daily, soiling_insolation,
confidence_level=95, reps=10, exceedance_prob=80.0)
assert np.array([0.959322, 0.966066]) == pytest.approx(sr_ci, abs=1e-6),\
'Confidence interval with confidence_level=95 different than expected'
assert 0.962691 == pytest.approx(soiling_info['exceedance_level'], abs=1e-6),\
'soiling_info["exceedance_level"] different than expected when exceedance_prob=80'
def test_soiling_srr_dayscale(soiling_normalized_daily, soiling_insolation):
'Test that a long dayscale can prevent valid intervals from being found'
with pytest.raises(NoValidIntervalError):
np.random.seed(1977)
sr, sr_ci, soiling_info = soiling_srr(soiling_normalized_daily, soiling_insolation,
confidence_level=68.2, reps=10, day_scale=91)
def test_soiling_srr_clean_threshold(soiling_normalized_daily, soiling_insolation):
'''Test that clean test_soiling_srr_clean_threshold works with a float and
can cause no soiling intervals to be found'''
np.random.seed(1977)
sr, sr_ci, soiling_info = soiling_srr(soiling_normalized_daily, soiling_insolation, reps=10,
clean_threshold=0.01)
assert 0.964369 == pytest.approx(sr, abs=1e-6),\
'Soiling ratio with specified clean_threshold different from expected value'
with pytest.raises(NoValidIntervalError):
np.random.seed(1977)
sr, sr_ci, soiling_info = soiling_srr(soiling_normalized_daily, soiling_insolation,
reps=10, clean_threshold=0.1)
def test_soiling_srr_trim(soiling_normalized_daily, soiling_insolation):
np.random.seed(1977)
sr, sr_ci, soiling_info = soiling_srr(soiling_normalized_daily, soiling_insolation, reps=10,
trim=True)
assert 0.978093 == pytest.approx(sr, abs=1e-6),\
'Soiling ratio with trim=True different from expected value'
assert len(soiling_info['soiling_interval_summary']) == 1,\
'Wrong number of soiling intervals found with trim=True'
@pytest.mark.parametrize('method,expected_sr',
[('random_clean', 0.920444),
('perfect_clean', 0.966912)
])
def test_soiling_srr_method(soiling_normalized_daily, soiling_insolation, method, expected_sr):
np.random.seed(1977)
sr, sr_ci, soiling_info = soiling_srr(soiling_normalized_daily, soiling_insolation, reps=10,
method=method)
assert expected_sr == pytest.approx(sr, abs=1e-6),\
f'Soiling ratio with method="{method}" different from expected value'
def test_soiling_srr_min_interval_length(soiling_normalized_daily, soiling_insolation):
'Test that a long minimum interval length prevents finding shorter intervals'
with pytest.raises(NoValidIntervalError):
np.random.seed(1977)
# normalized_daily intervals are 25 days long, so min=26 should fail:
_ = soiling_srr(soiling_normalized_daily, soiling_insolation, confidence_level=68.2,
reps=10, min_interval_length=26)
# but min=24 should be fine:
_ = soiling_srr(soiling_normalized_daily, soiling_insolation, confidence_level=68.2,
reps=10, min_interval_length=24)
def test_soiling_srr_recenter_false(soiling_normalized_daily, soiling_insolation):
np.random.seed(1977)
sr, sr_ci, soiling_info = soiling_srr(soiling_normalized_daily, soiling_insolation, reps=10,
recenter=False)
assert 1 == soiling_info['renormalizing_factor'],\
'Renormalizing factor != 1 with recenter=False'
assert 0.966387 == pytest.approx(sr, abs=1e-6),\
'Soiling ratio different than expected when recenter=False'
def test_soiling_srr_negative_step(soiling_normalized_daily, soiling_insolation):
stepped_daily = soiling_normalized_daily.copy()
stepped_daily.iloc[37:] = stepped_daily.iloc[37:] - 0.1
np.random.seed(1977)
with pytest.warns(UserWarning, match='20% or more of the daily data'):
sr, sr_ci, soiling_info = soiling_srr(stepped_daily, soiling_insolation, reps=10)
assert list(soiling_info['soiling_interval_summary']['valid'].values) == [True, False, True],\
'Soiling interval validity differs from expected when a large negative step\
is incorporated into the data'
assert 0.936932 == pytest.approx(sr, abs=1e-6),\
'Soiling ratio different from expected when a large negative step is incorporated into the data' # noqa: E501
def test_soiling_srr_max_negative_slope_error(soiling_normalized_daily, soiling_insolation):
np.random.seed(1977)
with pytest.warns(UserWarning, match='20% or more of the daily data'):
sr, sr_ci, soiling_info = soiling_srr(soiling_normalized_daily, soiling_insolation,
reps=10, max_relative_slope_error=45.0)
assert list(soiling_info['soiling_interval_summary']['valid'].values) == [True, True, False],\
'Soiling interval validity differs from expected when max_relative_slope_error=45.0'
assert 0.958761 == pytest.approx(sr, abs=1e-6),\
'Soiling ratio different from expected when max_relative_slope_error=45.0'
def test_soiling_srr_with_nan_interval(soiling_normalized_daily, soiling_insolation):
'''
Previous versions had a bug which would have raised an error when an entire interval
was NaN. See https://github.com/NREL/rdtools/issues/129
'''
reps = 10
normalized_corrupt = soiling_normalized_daily.copy()
normalized_corrupt[26:50] = np.nan
np.random.seed(1977)
with pytest.warns(UserWarning, match='20% or more of the daily data'):
sr, sr_ci, soiling_info = soiling_srr(normalized_corrupt, soiling_insolation, reps=reps)
assert 0.948792 == pytest.approx(sr, abs=1e-6),\
'Soiling ratio different from expected value when an entire interval was NaN'
def test_soiling_srr_outlier_factor(soiling_normalized_daily, soiling_insolation):
_, _, info = soiling_srr(soiling_normalized_daily, soiling_insolation,
reps=1, outlier_factor=8)
assert len(info['soiling_interval_summary']) == 2,\
'Increasing the outlier_factor did not result in the expected number of soiling intervals'
def test_soiling_srr_kwargs(monkeypatch, soiling_normalized_daily, soiling_insolation):
'''
Make sure that all soiling_srr parameters get passed on to SRRAnalysis and
SRRAnalysis.run(), i.e. all necessary inputs to SRRAnalysis are provided by
soiling_srr. Done by removing the SRRAnalysis default param values
and making sure everything still runs.
'''
# the __defaults__ attr is the tuple of default values in py3
monkeypatch.delattr(SRRAnalysis.__init__, "__defaults__")
monkeypatch.delattr(SRRAnalysis.run, "__defaults__")
_ = soiling_srr(soiling_normalized_daily, soiling_insolation, reps=10)
@pytest.mark.parametrize(('start,expected_sr'),
[(18, 0.984779), (17, 0.981258)])
def test_soiling_srr_min_interval_length_default(soiling_normalized_daily, soiling_insolation,
start, expected_sr):
'''
Make sure that the default value of min_interval_length is 7 days by testing
on a cropped version of the example data
'''
reps = 10
np.random.seed(1977)
sr, sr_ci, soiling_info = soiling_srr(soiling_normalized_daily[start:],
soiling_insolation[start:], reps=reps)
assert expected_sr == pytest.approx(sr, abs=1e-6),\
'Soiling ratio different from expected value'
@pytest.mark.parametrize('test_param', ['energy_normalized_daily',
'insolation_daily',
'precipitation_daily'])
def test_soiling_srr_non_daily_inputs(test_param):
'''
Validate the frequency check for input time series
'''
dummy_daily_explicit = pd.Series(0, index=pd.date_range('2019-01-01', periods=10, freq='d'))
dummy_daily_implicit = pd.Series(0, index=pd.date_range('2019-01-01', periods=10, freq='d'))
dummy_daily_implicit.index.freq = None
dummy_nondaily = pd.Series(0, index=dummy_daily_explicit.index[::2])
kwargs = {
'energy_normalized_daily': dummy_daily_explicit,
'insolation_daily': dummy_daily_explicit,
'precipitation_daily': dummy_daily_explicit,
}
# no error for implicit daily inputs
kwargs[test_param] = dummy_daily_implicit
_ = SRRAnalysis(**kwargs)
# yes error for non-daily inputs
kwargs[test_param] = dummy_nondaily
with pytest.raises(ValueError, match='must have daily frequency'):
_ = SRRAnalysis(**kwargs)
def test_soiling_srr_argument_checks(soiling_normalized_daily, soiling_insolation):
'''
Make sure various argument validation warnings and errors are raised
'''
kwargs = {
'energy_normalized_daily': soiling_normalized_daily,
'insolation_daily': soiling_insolation,
'reps': 10
}
with pytest.warns(UserWarning, match='An even value of day_scale was passed'):
_ = soiling_srr(day_scale=12, **kwargs)
with pytest.raises(ValueError, match='clean_criterion must be one of'):
_ = soiling_srr(clean_criterion='bad', **kwargs)
with pytest.raises(ValueError, match='Invalid method specification'):
_ = soiling_srr(method='bad', **kwargs)
# ###########################
# annual_soiling_ratios tests
# ###########################
@pytest.fixture()
def multi_year_profiles():
times = pd.date_range('01-01-2018', '11-30-2019', freq='D')
data = np.array([0]*365 + [10]*334)
profiles = [pd.Series(x + data, times) for x in range(10)]
# make insolation slighly longer to test for proper normalization
times = pd.date_range('01-01-2018', '12-31-2019', freq='D')
insolation = 350*[0.8] + (len(times)-350)*[1]
insolation = pd.Series(insolation, index=times)
return profiles, insolation
def test_annual_soiling_ratios(multi_year_profiles):
expected_data = np.array([[2018, 4.5, 1.431, 7.569],
[2019, 14.5, 11.431, 17.569]])
expected = pd.DataFrame(data=expected_data,
columns=['year', 'soiling_ratio_median', 'soiling_ratio_low',
'soiling_ratio_high'])
expected['year'] = expected['year'].astype(int)
srr_profiles, insolation = multi_year_profiles
result = annual_soiling_ratios(srr_profiles, insolation)
pd.testing.assert_frame_equal(result, expected, check_dtype=False)
def test_annual_soiling_ratios_confidence_interval(multi_year_profiles):
expected_data = np.array([[2018, 4.5, 0.225, 8.775],
[2019, 14.5, 10.225, 18.775]])
expected = pd.DataFrame(data=expected_data,
columns=['year', 'soiling_ratio_median', 'soiling_ratio_low',
'soiling_ratio_high'])
expected['year'] = expected['year'].astype(int)
srr_profiles, insolation = multi_year_profiles
result = annual_soiling_ratios(srr_profiles, insolation, confidence_level=95)
pd.testing.assert_frame_equal(result, expected, check_dtype=False)
def test_annual_soiling_ratios_warning(multi_year_profiles):
srr_profiles, insolation = multi_year_profiles
insolation = insolation.iloc[:-200]
match = ('The indexes of stochastic_soiling_profiles are not entirely contained '
'within the index of insolation_daily. Every day in stochastic_soiling_profiles '
'should be represented in insolation_daily. This may cause erroneous results.')
with pytest.warns(UserWarning, match=match):
_ = annual_soiling_ratios(srr_profiles, insolation)
# ###########################
# monthly_soiling_rates tests
# ###########################
@pytest.fixture()
def soiling_interval_summary():
starts = ['2019/01/01', '2019/01/16', '2019/02/08', '2019/03/06']
starts = pd.to_datetime(starts).tz_localize('America/Denver')
ends = ['2019/01/15', '2019/02/07', '2019/03/05', '2019/04/07']
ends = pd.to_datetime(ends).tz_localize('America/Denver')
slopes = [-0.005, -0.002, -0.001, -0.002]
slopes_low = [-0.0055, -0.0025, -0.0015, -0.003]
slopes_high = [-0.004, 0, 0, -0.001]
valids = [True, True, False, True]
soiling_interval_summary = pd.DataFrame()
soiling_interval_summary['start'] = starts
soiling_interval_summary['end'] = ends
soiling_interval_summary['soiling_rate'] = slopes
soiling_interval_summary['soiling_rate_low'] = slopes_low
soiling_interval_summary['soiling_rate_high'] = slopes_high
soiling_interval_summary['inferred_start_loss'] = | |
<gh_stars>1-10
# -*- coding: utf-8 -*-
from __future__ import unicode_literals, absolute_import
import functools
from functools import partial
from decimal import Decimal
from uuid import UUID
import logging
import os
import sys
from collections import Iterable, Sized
from itertools import chain
import django
from django.conf import settings, LazySettings # type: ignore
from django.core.exceptions import ImproperlyConfigured # type: ignore
from django.utils import six
try:
from django.urls import path, re_path
except ImportError:
from django.conf.urls import url as re_path
path = None
from django.utils.six import integer_types, string_types # type: ignore
from django.utils.functional import SimpleLazyObject as SLO # type: ignore
from django.utils.module_loading import import_string # type: ignore
from environ import Env # type: ignore
try:
# noinspection PyUnresolvedReferences
from typing import (
TYPE_CHECKING,
Any,
Set,
AnyStr,
Union,
List,
Dict,
Tuple,
Optional,
Type,
Sequence,
)
except ImportError:
TYPE_CHECKING = False
try:
from json import JSONDecodeError # type: ignore
except ImportError:
class JSONDecodeError(NotImplementedError): # type: ignore
pass
try:
from importlib.abc import MetaPathFinder
except ImportError:
MetaPathFinder = object # type: ignore
__version_info__ = '0.1.1'
__version__ = '0.1.1'
version = '0.1.1'
VERSION = '0.1.1'
def get_version():
return version
__all__ = ["app", "config", "run", "env", "urlconf", "routes", "setup", "get_version"]
logger = logging.getLogger(__name__)
# logging without having yet called basicConfig (or setting up
# django's logging ... which won't necessarily have happened yet either) just
# spews out jazz about no configured handlers instead of printing anything.
logger.setLevel(logging.INFO)
logger.addHandler(logging.StreamHandler())
class TrackedEnv(Env):
def __init__(self, **scheme):
# type: (dict) -> None
super(TrackedEnv, self).__init__(**scheme)
self.seen_settings = set() # type: Set[str]
def get_value(self, var, *args, **kwargs): # type: ignore
val = super(TrackedEnv, self).get_value(var, *args, **kwargs)
if var in self.ENVIRON:
self.seen_settings.add(var)
return val
def __str__(self): # type: ignore
return ", ".join(sorted(self.seen_settings))
def __unicode__(self): # type: ignore
# noinspection PyUnresolvedReferences
return unicode(self.__str__())
def __repr__(self): # type: ignore
return "<Env of {0!s}>".format(self)
def __bool__(self):
return len(self.seen_settings) > 0
env = TrackedEnv()
# noinspection PyClassHasNoInit
class SimpleLazyObject(SLO):
def __str__(self): # type: ignore
name = self._setupfunc.__name__
main = getattr(self._setupfunc, "__code__", None)
main = getattr(main, "co_filename", "__main__")
return "{prefix!s} -> {func!s}()".format(func=name, prefix=main)
def __unicode__(self): # type: ignore
# noinspection PyUnresolvedReferences
return unicode(self.__str__())
def __hash__(self): # type: ignore
return hash(tuple(self))
def flatten(items):
""" https://stackoverflow.com/a/40857703 """
for x in items:
if isinstance(x, Iterable) and not (
isinstance(x, six.string_types) or isinstance(x, six.binary_type)
):
for item in flatten(x):
yield item
else:
yield x
def urlconf(dotted_path):
# type: (str) -> partial[Any]
lazy = functools.partial(import_string, dotted_path)
lazy.__name__ = "microscope.urlconf('{}')".format(dotted_path) # type: ignore
lazy.__doc__ = "Deferred importer for another set of urlpatterns"
return lazy
class Routes(list):
def add(self, item):
self.append(item)
def __call__(self):
return tuple(self)
def _decorator(self, handler, url, view, name=None, kwargs=None):
@functools.wraps(view)
def decorator(view):
if hasattr(view, "as_view") and callable(view.as_view):
view = view.as_view()
decorated = handler(url, view, name=name, kwargs=kwargs)
self.add(decorated)
return view
return decorator
if path is None:
def path(self, url, view=None, name=None, kwargs=None):
raise NotImplementedError(
"This version of Django doesn't have django.urls.path(...)"
)
else:
def path(self, url, view=None, name=None, kwargs=None):
if callable(url) and name is None and kwargs is None:
raise ValueError(
"Used @routes.path instead of @routes.path('path/', 'viewname', kwargs={...})"
)
return self._decorator(
url=url, name=name, view=view, kwargs=kwargs, handler=path
)
def regex(self, url, view=None, name=None, kwargs=None):
if callable(url) and name is None and kwargs is None:
raise ValueError(
"Used @routes.regex instead of @routes.regex('^path$', 'viewname', kwargs={...})"
)
return self._decorator(
url=url, name=name, view=view, kwargs=kwargs, handler=re_path
)
routes = Routes()
routes.__name__ = "microscope.routes"
# noinspection PyPep8Naming
def config(name_hint=None, file_hint=None, **DEFAULTS):
# type: (Optional[str], Optional[str], Dict[str, Any]) -> LazySettings
if settings.configured:
raise RuntimeError(
"config() has already been called, OR django.conf.settings.configure() was already called"
)
setup(name_hint, file_hint)
options = {} # type: Dict[str, Any]
try:
intended_urls = DEFAULTS.pop("ROOT_URLCONF")
except KeyError:
raise ImproperlyConfigured("I need a ROOT_URLCONF to work properly ...")
if not callable(intended_urls):
raise ImproperlyConfigured(
"I need a function or whatever for ROOT_URLCONF, currently"
)
urlpatterns = SimpleLazyObject(intended_urls)
options["ROOT_URLCONF"] = urlpatterns
def cant_handle_complex(var, default):
# type: (Any, Any) -> Any
logger.error("Can't currently read %s from the env", var)
return default
for key, value in DEFAULTS.items():
option_type = type(value)
if isinstance(value, bool):
env_func = env.bool
elif isinstance(value, string_types):
env_func = env.str
elif isinstance(value, integer_types):
env_func = env.int
elif isinstance(value, float):
env_func = env.float
elif isinstance(value, Decimal):
env_func = partial(env.get_value, cast=Decimal)
elif isinstance(value, UUID):
env_func = partial(env.get_value, cast=UUID)
elif isinstance(value, Iterable) and isinstance(value, Sized):
# can't be a string now.
# noinspection PyTypeChecker
flattened = tuple(flatten(value))
if len(value) != len(flattened):
env_func = env.json # changed length, must be nested or whatever.
else:
if issubclass(option_type, list):
env_func = env.list
elif issubclass(option_type, tuple):
env_func = env.tuple
elif issubclass(option_type, dict):
env_func = env.dict # type: ignore
elif issubclass(option_type, set):
env_func = partial(env.get_value, cast=frozenset)
else:
env_func = env.json
else:
env_func = cant_handle_complex # type: ignore
value = env_func(var=key, default=value)
if not isinstance(value, bool) and not value:
logger.warning("config value %s=%s evaluates as falsey", key, value)
options[key] = value
del env_func, value
settings.configure(**options)
del options
return settings
class BoundaryWarning(MetaPathFinder):
# http://xion.org.pl/2012/05/06/hacking-python-imports/
__slots__ = (
"root_location",
"app_location",
"root_location_length",
"app_location_length",
"already_warned",
"ok_roots",
)
def __init__(self, root_location, this_app):
# type: (str, str) -> None
self.root_location = root_location
self.root_location_length = len(root_location)
self.app_location = this_app
self.app_location_length = len(this_app)
self.already_warned = set() # type: Set[Tuple[str, str, str]]
self.ok_roots = tuple(
{
self.app_location,
# site-packages?
os.path.dirname(os.path.dirname(django.__file__)),
# stdlib/builtin *module*
os.path.dirname(os.__file__),
# stdlib/builtin *package*
os.path.dirname(os.path.dirname(logging.__file__)),
}
)
def find_module(self, fullname, path=None):
if path is None:
return None
for package_path in path:
# Check our expected roots to see if we're within a sanctioned location.
# Under py2, this may yield more results than desired for packages outside
# the roots, if they don't use `from __future__ import absolute_import`
# as they'll look for package local files first...
if package_path.startswith(self.ok_roots):
continue
else:
msgparts = (fullname, "".join(path), self.app_location)
if msgparts not in self.already_warned:
logger.error(
"Attempted import `%s` (%s) which is outside of %s", *msgparts
)
self.already_warned.add(msgparts)
return None
class Setup(object):
__slots__ = ("name", "runner", "in_app", "done")
def __init__(self):
self.name = None # type: Optional[str]
self.runner = None # type: Optional[str]
self.in_app = False # type: bool
self.done = False # type: bool
def __call__(self, name, runner):
# type: (Optional[str], Optional[str]) -> Tuple[str, str]
if self.done is True:
assert self.name is not None
assert self.runner is not None
return self.name, self.runner
self.name, self.runner = self.get_name_runner(name, runner)
assert (
self.runner is not None
), "Couldn't figure out which file had __name__ == '__main__'"
assert self.name is not None, "Couldn't figure out if __name__ == '__main__'"
self.in_app = self.determine_if_in_app_root(self.runner)
self.done = True
return self.name, self.runner
def determine_if_in_app_root(self, runner):
# type: (str) -> bool
join = os.path.join
exists = os.path.exists
abspath = os.path.abspath
dirname = os.path.dirname
root = abspath(runner)
app_dir = abspath(dirname(root))
app_parent = abspath(dirname(app_dir))
# If it looks like a normal Django app, and it might be downstream of a
# project folder (eg: project/appname) then we may need to add the
# parent dir (eg: project) to the path.
# To try and make sure this doesn't bring in a whole load of
# inter-dependencies, we insert a class which raises a warning if an import
# into another app within the parent dir (eg: project/app2) occurs.
app_heuristics = ("admin", "apps", "forms", "models", "views", "urls")
appish_files = (
join(app_dir, "{0!s}.py".format(heuristic)) for heuristic in app_heuristics
)
appish_dirs = (
join(app_dir, heuristic, "__init__.py") for heuristic in app_heuristics
)
in_app = any(exists(path) for path in chain(appish_files, appish_dirs))
if in_app:
sys.meta_path.insert(0, BoundaryWarning(app_parent, app_dir))
if app_parent not in sys.path:
sys.path.insert(0, app_parent)
return True
return False
def get_name_runner(self, name=None, runner=None):
# type: (Optional[str], Optional[str]) -> Tuple[Optional[str], Optional[str]]
if name is None or runner is None:
runner = None
name = None
parent_frame = sys._getframe()
while parent_frame.f_locals:
if "__name__" in parent_frame.f_locals:
runner = parent_frame.f_code.co_filename
name = parent_frame.f_locals["__name__"]
break
parent_frame = parent_frame.f_back
return name, runner
setup = Setup()
def app(name_hint=None, file_hint=None):
# type: (Optional[str], Optional[str]) -> Optional['django.core.handlers.wsgi.WSGIHandler']
if not settings.configured:
raise RuntimeError("config() has not been called")
name, runner = setup(name_hint, file_hint)
if env:
logger.info("Read %s from environment variables", env)
if name == "__main__":
if len(sys.argv) > 1 and sys.argv[1] == "diffsettings":
sys.stderr.write(
"Yeah, that doesn't work, see https://code.djangoproject.com/ticket/29236\n"
)
sys.exit(1)
from django.core.management import execute_from_command_line # type: ignore
execute_from_command_line(sys.argv)
return None
from django.core.wsgi import get_wsgi_application # type: ignore
return get_wsgi_application()
# noinspection PyPep8Naming
def run(name_hint=None, file_hint=None, **DEFAULTS):
# type: (Optional[str], Optional[str], Dict[str, Any]) -> Optional['django.core.handlers.wsgi.WSGIHandler']
name, runner = setup(name_hint, file_hint)
config(name_hint=name, | |
431 540 541 431
536 431 430 540 431
537 432 541 542 432
538 432 431 541 432
539 446 530 420 446
540 446 529 530 446
541 452 542 543 452
542 452 432 542 452
543 602 554 603 602
544 602 553 554 602
545 604 556 605 604
546 604 555 556 604
547 603 555 604 603
548 603 554 555 603
549 472 553 602 472
550 472 473 553 472
551 611 522 523 611
552 611 562 522 611
553 608 560 609 608
554 608 559 560 608
555 609 561 610 609
556 609 560 561 609
557 610 562 611 610
558 610 561 562 610
559 607 559 608 607
560 607 558 559 607
561 606 558 607 606
562 606 557 558 606
563 605 557 606 605
564 605 556 557 605
565 505 634 504 505
566 632 506 631 632
567 632 633 506 632
568 633 505 506 633
569 633 634 505 633
570 634 628 504 634
571 631 507 629 631
572 631 506 507 631
573 628 503 504 628
574 507 508 629 507
575 637 633 636 637
576 637 634 633 637
577 633 632 636 633
578 635 632 631 635
579 635 636 632 635
580 627 634 637 627
581 627 628 634 627
582 636 569 637 636
583 570 636 635 570
584 570 569 636 570
585 568 637 569 568
586 568 627 637 568
587 571 635 630 571
588 571 570 635 571
589 630 631 629 630
590 630 635 631 630
591 469 638 468 469
592 469 572 638 469
593 572 630 638 572
594 572 571 630 572
595 638 629 468 638
596 638 630 629 638
597 629 467 468 629
598 629 508 467 629
599 508 466 467 508
600 641 628 627 641
601 642 640 641 642
602 642 643 640 642
603 643 639 640 643
604 643 644 639 643
605 640 628 641 640
606 644 552 639 644
607 644 551 552 644
608 643 564 644 643
609 643 565 564 643
610 564 563 644 564
611 551 563 550 551
612 551 644 563 551
613 642 565 643 642
614 642 566 565 642
615 641 566 642 641
616 641 567 566 641
617 627 567 641 627
618 627 568 567 627
619 640 503 628 640
620 640 502 503 640
621 640 639 502 640
622 639 501 502 639
623 639 552 501 639
624 555 650 556 555
625 555 649 650 555
626 650 651 556 650
627 554 649 555 554
628 554 647 649 554
629 557 651 646 557
630 557 556 651 557
631 654 511 653 654
632 654 655 511 654
633 653 512 652 653
634 653 511 512 653
635 512 513 652 512
636 513 645 652 513
637 655 510 511 655
638 655 648 510 655
639 513 514 645 513
640 648 509 510 648
641 647 655 649 647
642 647 648 655 647
643 649 654 650 649
644 649 655 654 649
645 654 653 650 654
646 651 653 652 651
647 651 650 653 651
648 646 652 645 646
649 646 651 652 646
650 553 647 554 553
651 553 656 647 553
652 473 656 553 473
653 473 474 656 473
654 656 648 647 656
655 656 474 648 656
656 474 475 648 474
657 475 509 648 475
658 660 560 559 660
659 660 661 560 660
660 646 558 557 646
661 646 659 558 646
662 659 559 558 659
663 659 660 559 659
664 521 562 662 521
665 521 522 562 521
666 561 662 562 561
667 661 561 560 661
668 661 662 561 661
669 662 520 521 662
670 660 657 661 660
671 660 659 657 660
672 657 658 661 657
673 658 662 661 658
674 659 645 657 659
675 658 520 662 658
676 659 646 645 659
677 658 516 520 658
678 658 515 516 658
679 657 515 658 657
680 657 514 515 657
681 657 645 514 657
682 475 476 509 475
683 663 494 498 663
684 664 494 663 664
685 664 493 494 664
686 665 492 493 665
687 665 666 492 665
688 666 667 492 666
689 493 664 665 493
690 675 669 670 675
691 675 491 669 675
692 675 490 491 675
693 675 670 490 675
694 676 492 667 676
695 676 491 492 676
696 676 668 491 676
697 676 667 668 676
698 668 669 491 668
699 671 490 670 671
700 672 490 671 672
701 672 489 490 672
702 672 673 489 672
703 673 488 489 673
704 673 674 488 673
705 674 519 488 674
706 684 667 683 684
707 684 696 667 684
708 684 685 696 684
709 696 668 667 696
710 696 685 668 696
711 685 686 668 685
712 697 669 687 697
713 697 670 669 697
714 669 686 687 669
715 688 697 687 688
716 688 670 697 688
717 688 689 670 688
718 669 668 686 669
719 681 665 680 681
720 681 682 665 681
721 682 666 665 682
722 682 683 666 682
723 678 663 677 678
724 678 679 663 678
725 679 664 663 679
726 679 680 664 679
727 663 498 677 663
728 498 499 677 498
729 680 665 664 680
730 683 667 666 683
731 694 674 693 694
732 694 695 674 694
733 674 673 693 674
734 673 692 693 673
735 695 519 674 695
736 695 518 519 695
737 673 672 692 673
738 690 671 689 690
739 690 672 671 690
740 691 672 690 691
741 691 692 672 691
742 671 670 689 671
743 722 518 695 722
744 722 517 518 722
745 719 693 718 719
746 719 720 693 719
747 720 694 693 720
748 720 721 694 720
749 721 695 694 721
750 721 722 695 721
751 718 692 717 718
752 718 693 692 718
753 691 717 692 691
754 716 691 690 716
755 716 717 691 716
756 710 727 709 710
757 710 711 727 710
758 711 726 727 711
759 727 728 709 727
760 711 712 726 711
761 712 725 726 712
762 728 708 709 728
763 728 729 708 728
764 707 729 724 707
765 707 708 729 707
766 713 725 712 713
767 713 723 725 713
768 727 686 728 727
769 727 726 686 727
770 726 687 686 726
771 686 685 728 686
772 726 725 687 726
773 725 688 687 725
774 685 729 728 685
775 685 684 729 685
776 723 688 725 723
777 723 689 688 723
778 724 684 683 724
779 724 729 684 724
780 706 730 705 706
781 706 724 730 706
782 706 707 724 706
783 724 683 730 724
784 705 682 704 705
785 705 730 682 705
786 730 683 682 730
787 714 723 713 714
788 714 731 723 714
789 714 715 731 714
790 731 689 723 731
791 | |
<reponame>chrahunt/quicken
import logging
import os
import subprocess
import sys
from contextlib import contextmanager
from pathlib import Path
from textwrap import dedent
import pytest
from quicken._internal.cli.cli import get_arg_parser, parse_file
from quicken._internal.constants import (
DEFAULT_IDLE_TIMEOUT,
ENV_IDLE_TIMEOUT,
ENV_LOG_FILE,
)
from .utils import (
captured_std_streams,
chdir,
env,
isolated_filesystem,
load_json,
local_module,
write_text,
)
from .utils.process import contained_children
from .utils.pytest import non_windows
from .utils.subprocess_helper import track_state
logger = logging.getLogger(__name__)
pytestmark = non_windows
@contextmanager
def sys_path(path):
current_sys_path = sys.path
sys.path = sys.path.copy()
sys.path.append(path)
try:
yield
finally:
sys.path = current_sys_path
def test_args_passthru():
parser = get_arg_parser()
args = parser.parse_args(["run", "--file", "./script.py", "--", "--help"])
assert args.action == "run"
assert args.file == "./script.py"
assert args.args == ["--", "--help"]
# def test_args_module_passthru():
# _, args = parse_args(['-m', 'pytest', '--', '-s', '-ra'])
# assert args.m == 'pytest'
# assert args.args == ['-s', '-ra']
def test_file_args_passthru():
parser = get_arg_parser()
args = parser.parse_args(["stop", "--file", "foo"])
assert args.action == "stop"
assert args.file == "foo"
def test_file_evaluation():
# Given a package hello with
#
# hello/
# __init__.py
# foo.py
#
# # hello/__init__.py
# foo = 1
#
# # script.py
# from hello import foo
# import hello.foo
#
# if __name__ == '__main__':
# print(foo)
#
# should print 1
with local_module():
module = Path("hello")
module.mkdir()
write_text(module / "__init__.py", "foo = 1")
write_text(module / "foo.py", "")
write_text(
Path("script.py"),
"""
from hello import foo
import hello.foo
if __name__ == '__main__':
print(foo)
""",
)
prelude, main = parse_file("script.py")
prelude()
with captured_std_streams() as (stdin, stdout, stderr):
main()
output = stdout.read()
assert output == "1\n"
def pytest_exception_location(exc_info):
entry = exc_info.traceback[1]
# The pytest traceback information line number is one less than actual.
return str(entry.path), entry.lineno + 1
def test_file_prelude_backtrace_line_numbering():
# Given a file `script.py` that raises an exception in its prelude
# And the file is parsed
# When the prelude section is executed
# Then the backtrace should have the correct exception
# And the line number should match the line in the file
with isolated_filesystem():
write_text(
Path("script.py"),
"""\
import os
raise RuntimeError('example')
if __name__ == '__main__':
raise RuntimeError('example2')
""",
)
prelude, main = parse_file("script.py")
with pytest.raises(RuntimeError) as e:
prelude()
assert "example" in str(e)
filename, lineno = pytest_exception_location(e)
assert filename == str(Path("script.py").absolute())
assert lineno == 2
def test_file_main_backtrace_line_numbering():
# Given a file `script.py` that raises an exception in its main part
# And the file is parsed
# When the prelude section is executed
# Then the backtrace should have the correct exception
# And the line number should match the line in the file
with isolated_filesystem():
write_text(
Path("script.py"),
"""\
import os
if __name__ == '__main__':
os.getpid
raise RuntimeError('example')
""",
)
prelude, main = parse_file("script.py")
prelude()
with pytest.raises(RuntimeError) as e:
main()
filename, lineno = pytest_exception_location(e)
assert filename == str(Path("script.py").absolute())
assert lineno == 5
def test_python_sets_file_path_using_argument():
# Given a script, a/script.py
# And a symlink a/foo pointing to script.py
# When python executes <target> from <cwd>
# Then __file__ should be <__file__>
with isolated_filesystem() as path:
parent = path / "a"
parent.mkdir()
script = parent / "script.py"
write_text(
script,
"""
print(__file__)
""",
)
symlink = parent / "foo"
symlink.symlink_to(script.name)
cases = [
["a", symlink.name],
["a", symlink],
["a", script.name],
["a", script],
[".", f"a/{symlink.name}"],
[".", symlink],
[".", f"a/{script.name}"],
[".", script],
]
for cwd, file in cases:
result = subprocess.run(
[sys.executable, file], stdout=subprocess.PIPE, cwd=cwd
)
output = result.stdout.decode("utf-8").strip()
assert output == str(file)
def test_file_path_set_absolute():
# Given a file `script.py`
# And the code is split into prelude and main
# When executed with the results of parse_file
# Then __file__ should be the full, resolved path to the file
with isolated_filesystem() as path:
script = path / "script.py"
write_text(
script,
"""
print(__file__)
if __name__ == '__main__':
print(__file__)
""",
)
prelude, main = parse_file(str(script))
with captured_std_streams() as (stdin, stdout, stderr):
prelude()
assert stdout.read().strip() == str(script)
with captured_std_streams() as (stdin, stdout, stderr):
main()
assert stdout.read().strip() == str(script)
def test_file_path_symlink_uses_resolved_path():
# Given a file `script.py`
# And a symlink `foo` that points to it
# When executed with the results of parse_file
# Then __file__ should be the full, resolved path to the file
with isolated_filesystem() as path:
script = path / "script.py"
write_text(
script,
"""
print(__file__)
if __name__ == '__main__':
print(__file__)
""",
)
symlink = path / "foo"
symlink.symlink_to(script.name)
prelude, main = parse_file(str(script))
with captured_std_streams() as (stdin, stdout, stderr):
prelude()
assert stdout.read().strip() == str(script)
with captured_std_streams() as (stdin, stdout, stderr):
main()
assert stdout.read().strip() == str(script)
@pytest.fixture
def quicken_script(quicken_venv):
path = os.environ["PATH"]
bin_dir = quicken_venv.path / "bin"
with env(PATH=f"{bin_dir}:{path}"):
yield
@pytest.fixture
def logged(log_file_path):
with env(**{ENV_LOG_FILE: str(log_file_path.absolute())}):
yield
def test_file_argv_set(quicken_script, logged):
# Given a file `script.py`
# sys.argv should start with `script.py` and be followed by any
# other arguments
with isolated_filesystem():
Path("script.py").write_text(
dedent(
"""
import sys
if __name__ == '__main__':
print(sys.argv[0])
print(sys.argv[1])
"""
)
)
args = ["hello"]
with contained_children():
result = subprocess.run(
["quicken", "run", "--file", "script.py", "hello"],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
)
assert result.returncode == 0, f"process must succeed: {result}"
assert result.stdout.decode("utf-8") == f"script.py\n{args[0]}\n"
def test_file_server_name_uses_absolute_resolved_path(quicken_script, logged):
# Given a file `a/script.py`
# And a symlink `a/foo` pointing to `script.py`
# And a server started from `a/script.py`
# When `quicken -f a/script.py` is executed from `.`
# And `quicken -f a/foo` is executed from `.`
# And `quicken -f script.py` is executed from `a`
# And `quicken -f foo` is executed from `a`
# Then the same server should be used to handle all of them
with isolated_filesystem():
base_dir = Path("a")
base_dir.mkdir()
script = base_dir / "script.py"
write_text(
script,
"""
import __test_helper__
if __name__ == '__main__':
__test_helper__.record()
""",
)
symlink = base_dir / "foo"
symlink.symlink_to(script.name)
with contained_children():
with track_state() as run1:
result = subprocess.run(["quicken", "run", "--file", str(script)])
assert result.returncode == 0
run1.assert_unrelated_to_current_process()
with track_state() as run2:
result = subprocess.run(["quicken", "run", "--file", str(symlink)])
assert result.returncode == 0
run2.assert_same_parent_as(run1)
with chdir("a"):
with track_state() as run3:
result = subprocess.run(["quicken", "run", "--file", script.name])
assert result.returncode == 0
run3.assert_same_parent_as(run1)
with track_state() as run4:
result = subprocess.run(["quicken", "run", "--file", symlink.name])
assert result.returncode == 0
run4.assert_same_parent_as(run1)
def test_file_path_symlink_modified(quicken_script, logged):
# Given a file `script.py`
# And a symlink `foo` that points to it
# And the server is already up, having been executed via the symlink
# And `script.py` is updated
# When the script is executed again via the symlink
# Then the server will be reloaded
with isolated_filesystem():
base_dir = Path("a")
base_dir.mkdir()
script = base_dir / "script.py"
write_text(
script,
"""
import __test_helper__
if __name__ == '__main__':
__test_helper__.record()
""",
)
symlink = base_dir / "foo"
symlink.symlink_to(script.name)
def update_file_mtime(path):
result = os.stat(path)
new_times = (result.st_atime, result.st_mtime + 1)
os.utime(path, new_times)
with contained_children():
with track_state() as run1:
result = subprocess.run(["quicken", "run", "--file", str(symlink)])
assert result.returncode == 0
run1.assert_unrelated_to_current_process()
update_file_mtime(script)
with track_state() as run2:
result = subprocess.run(["quicken", "run", "--file", str(symlink)])
assert result.returncode == 0
run2.assert_unrelated_to_current_process()
run2.assert_unrelated_to(run1)
def test_default_idle_timeout_is_used_cli(quicken_script, logged):
# Given a script
# And no QUICKEN_IDLE_TIMEOUT is set
# When the server is started
# Then it will have the default idle timeout
with isolated_filesystem():
script = Path("script.py")
write_text(
script,
"""
import __test_helper__
if __name__ == '__main__':
__test_helper__.record()
""",
)
with contained_children():
with track_state() as run1:
result = subprocess.run(["quicken", "run", "--file", str(script)])
assert result.returncode == 0
run1.assert_unrelated_to_current_process()
result = subprocess.run(
["quicken", "status", "--json", "--file", str(script)],
stdout=subprocess.PIPE,
)
assert result.returncode == 0
stdout = result.stdout.decode("utf-8")
server_state = load_json(stdout)
assert server_state["status"] == "up"
assert server_state["idle_timeout"] == DEFAULT_IDLE_TIMEOUT
def test_idle_timeout_is_used_cli(quicken_script, logged):
# Given a script
# And no QUICKEN_IDLE_TIMEOUT is set
# When the server is started
# Then it will have the specified idle timeout
with isolated_filesystem():
script = Path("script.py")
write_text(
script,
"""
import __test_helper__
if __name__ == '__main__':
__test_helper__.record()
""",
)
test_idle_timeout = 100
with env(**{ENV_IDLE_TIMEOUT: str(test_idle_timeout)}):
print(os.environ[ENV_IDLE_TIMEOUT])
with contained_children():
with track_state() as run1:
result = subprocess.run(["quicken", "run", "--file", str(script)])
assert result.returncode == 0
run1.assert_unrelated_to_current_process()
result = subprocess.run(
["quicken", "status", "--json", "--file", str(script)],
stdout=subprocess.PIPE,
)
assert result.returncode == 0
stdout = result.stdout.decode("utf-8")
server_state = load_json(stdout)
assert server_state["status"] == "up"
assert server_state["idle_timeout"] == test_idle_timeout
def test_log_file_unwritable_fails_fast_cli(quicken_script):
# Given a QUICKEN_LOG path pointing to a location that is not writable
# When the CLI is executed
# Then it should fail with a nonzero exit code and reasonable message
with isolated_filesystem():
script = Path("script.py")
write_text(
script,
"""
if __name__ |