edited_code
stringlengths 17
978k
| original_code
stringlengths 17
978k
|
---|---|
import torch
import re
import copy
import numpy
from torch.utils.data.dataloader import default_collate
from netdissect import nethook, imgviz, tally, unravelconv, upsample
def acts_image(model, dataset,
layer=None, unit=None,
thumbsize=None,
cachedir=None,
return_as='strip', # or individual, or tensor
k=100, r=4096, q=0.01,
batch_size=10,
sample_size=None,
num_workers=30):
assert return_as in ['strip', 'individual', 'tensor']
topk, rq, run = acts_stats(model, dataset, layer=layer, unit=unit,
k=max(200, k), r=r, batch_size=batch_size, num_workers=num_workers,
sample_size=sample_size, cachedir=cachedir)
result = window_images(dataset, topk, rq, run,
thumbsize=thumbsize, return_as=return_as, k=k, q=q,
cachedir=cachedir)
if unit is not None and not hasattr(unit, '__len__'):
result = result[0]
return result
def grad_image(model, dataset,
layer=None, unit=None,
thumbsize=None,
cachedir=None,
return_as='strip', # or individual, or tensor
k=100, r=4096, q=0.01,
batch_size=10,
sample_size=None,
num_workers=30):
assert return_as in ['strip', 'individual', 'tensor']
topk, botk, rq, run = grad_stats(model, dataset, layer=layer, unit=unit,
k=max(200, k), r=r,
batch_size=batch_size, num_workers=num_workers,
sample_size=sample_size, cachedir=cachedir)
result = window_images(dataset, topk, rq, run,
thumbsize=thumbsize, return_as=return_as, k=k, q=q,
cachedir=cachedir)
if unit is not None and not hasattr(unit, '__len__'):
result = result[0]
return result
def update_image(model, dataset,
layer=None, unit=None,
thumbsize=None,
cachedir=None,
return_as='strip', # or individual, or tensor
k=100, r=4096, q=0.01,
cinv=None,
batch_size=10,
sample_size=None,
num_workers=30):
assert return_as in ['strip', 'individual', 'tensor']
topk, botk, rq, run = update_stats(model, dataset, layer=layer, unit=unit,
k=max(200, k), r=r, cinv=cinv,
batch_size=batch_size, num_workers=num_workers,
sample_size=sample_size, cachedir=cachedir)
result = window_images(dataset, topk, rq, run,
thumbsize=thumbsize, return_as=return_as, k=k, q=q,
cachedir=cachedir)
if unit is not None and not hasattr(unit, '__len__'):
result = result[0]
return result
def proj_image(model, dataset,
layer=None, unit=None,
thumbsize=None,
cachedir=None,
return_as='strip', # or individual, or tensor
k=100, r=4096, q=0.01,
batch_size=10,
sample_size=None,
num_workers=30):
assert return_as in ['strip', 'individual', 'tensor']
topk, botk, rq, run = proj_stats(model, dataset, layer=layer, unit=unit,
k=max(200, k), r=r, batch_size=batch_size, num_workers=num_workers,
sample_size=sample_size, cachedir=cachedir)
result = window_images(dataset, topk, rq, run,
thumbsize=thumbsize, return_as=return_as, k=k, q=q,
cachedir=cachedir)
if unit is not None and not hasattr(unit, '__len__'):
result = result[0]
return result
def acts_stats(model, dataset,
layer=None, unit=None,
cachedir=None,
k=100, r=4096,
batch_size=10,
sample_size=None,
num_workers=30):
assert not model.training
if unit is not None:
if not hasattr(unit, '__len__'):
unit = [unit]
assert unit is None or len(unit) > 0
if layer is not None:
module = nethook.get_module(model, layer)
else:
module = model
device = next(model.parameters()).device
pin_memory = (device.type != 'cpu')
def run(x, *args):
with nethook.Trace(module, stop=True) as ret, torch.no_grad():
model(x.to(device))
r = ret.output
if unit is not None:
r = r[:, unit]
return r
run.name = 'acts'
def compute_samples(batch, *args):
r = run(batch)
flat_r = r.view(r.shape[0], r.shape[1], -1)
top_r = flat_r.max(2)[0]
all_r = r.permute(0, 2, 3, 1).reshape(-1, r.shape[1])
return top_r, all_r
topk, rq = tally.tally_topk_and_quantile(
compute_samples, dataset, k=k, r=r,
batch_size=batch_size,
num_workers=num_workers, pin_memory=pin_memory,
sample_size=sample_size,
cachefile=f'{cachedir}/acts_topk_rq.npz' if cachedir else None)
return topk, rq, run
def grad_stats(model, dataset, layer,
unit=None,
cachedir=None,
k=100, r=4096,
batch_size=10,
sample_size=None,
num_workers=30,
):
assert not model.training
if unit is not None:
if not hasattr(unit, '__len__'):
unit = [unit]
assert unit is None or len(unit) > 0
# Make a copy so we can disable grad on parameters
cloned_model = copy.deepcopy(model)
nethook.set_requires_grad(False, cloned_model)
if layer is not None:
module = nethook.get_module(cloned_model, layer)
else:
module = cloned_model
device = next(cloned_model.parameters()).device
pin_memory = (device.type != 'cpu')
def run(x, y, *args):
with nethook.Trace(module, retain_grad=True) as ret, (
torch.enable_grad()):
out = cloned_model(x.to(device))
r = ret.output
loss = torch.nn.functional.cross_entropy(out, y.to(device))
loss.backward()
r = -r.grad
if unit is not None:
r = r[:, unit]
return r
run.name = 'grad'
def compute_samples(x, y, *args):
r = run(x, y)
flat_r = r.view(r.shape[0], r.shape[1], -1)
top_r = flat_r.max(2)[0]
bot_r = flat_r.min(2)[0]
all_r = r.permute(0, 2, 3, 1).reshape(-1, r.shape[1])
return top_r, bot_r, all_r
topk, botk, rq = tally.tally_extremek_and_quantile(
compute_samples, dataset, k=k, r=r,
batch_size=batch_size,
num_workers=num_workers, pin_memory=pin_memory,
sample_size=sample_size,
cachefile=f'{cachedir}/grad_exk_rq.npz' if cachedir else None)
return topk, botk, rq, run
def weight_grad(model, dataset, layer,
unit=None,
cachedir=None,
batch_size=10,
sample_size=None,
num_workers=30):
# Make a copy so we can disable grad on parameters
cloned_model = copy.deepcopy(model)
nethook.set_requires_grad(False, cloned_model)
module = nethook.get_module(cloned_model, layer)
nethook.set_requires_grad(True, module)
device = next(cloned_model.parameters()).device
pin_memory = (device.type != 'cpu')
def accumulate_grad(x, y, *args):
with torch.enable_grad():
out = cloned_model(x.to(device))
loss = torch.nn.functional.cross_entropy(out, y.to(device))
loss.backward()
def weight_grad():
return dict(wgrad=module.weight.grad)
module.weight.grad = None
wg = tally.tally_each(accumulate_grad, dataset, summarize=weight_grad,
batch_size=batch_size,
num_workers=num_workers, pin_memory=pin_memory,
sample_size=sample_size,
cachefile=f'{cachedir}/weight_grad.npz' if cachedir else None)['wgrad']
return wg
def update_stats(model, dataset, layer,
unit=None,
cachedir=None,
k=100, r=4096,
batch_size=10,
cinv=None,
sample_size=None,
num_workers=30,
):
assert not model.training
if unit is not None:
if not hasattr(unit, '__len__'):
unit = [unit]
assert unit is None or len(unit) > 0
# get weight grad (assumes layer has a weight param)
wg = weight_grad(model, dataset, layer,
cachedir=cachedir,
batch_size=batch_size,
sample_size=sample_size,
num_workers=num_workers)
if cinv is not None:
wg = torch.mm(wg.view(-1,
cinv.shape[0]).cpu(),
cinv.cpu()).view(wg.shape)
# copy the model so we can change its weights.
cloned_model = copy.deepcopy(model)
nethook.set_requires_grad(False, cloned_model)
module = nethook.get_module(cloned_model, layer)
device = next(cloned_model.parameters()).device
pin_memory = (device.type != 'cpu')
with torch.no_grad():
module.weight[...] = -wg.to(device)
if hasattr(module, 'bias') and module.bias is not None:
module.bias[...] = 0
def run(x, *args):
with nethook.Trace(module, stop=True) as ret, torch.no_grad():
cloned_model(x.to(device))
r = ret.output
if unit is not None:
r = r[:, unit]
return r
run.name = 'update' if cinv is None else 'proj'
def compute_samples(batch, *args):
r = run(batch)
flat_r = r.view(r.shape[0], r.shape[1], -1)
top_r = flat_r.max(2)[0]
bot_r = flat_r.min(2)[0]
all_r = r.permute(0, 2, 3, 1).reshape(-1, r.shape[1])
return top_r, bot_r, all_r
topk, botk, rq = tally.tally_extremek_and_quantile(
compute_samples, dataset, k=k, r=r,
batch_size=batch_size,
num_workers=num_workers, pin_memory=pin_memory,
sample_size=sample_size,
cachefile=f'{cachedir}/{run.name}_exk_rq.npz' if cachedir else None)
return topk, botk, rq, run
def proj_c2m(model, dataset, layer,
cachedir=None,
batch_size=10,
sample_size=None,
num_workers=30,
):
assert not model.training
device = next(model.parameters()).device
pin_memory = (device.type != 'cpu')
cloned_model = copy.deepcopy(model)
module = nethook.get_module(cloned_model, layer)
assert isinstance(module, torch.nn.Conv2d)
nethook.set_requires_grad(False, cloned_model)
unraveled = unravelconv.unravel_left_conv2d(module)
unraveled.wconv.weight.requires_grad = True
unraveled.wconv.weight.grad = None
nethook.replace_module(cloned_model, layer, unraveled)
tconv = unraveled.tconv
def ex_run(x, *args):
with nethook.Trace(tconv, stop=True) as unrav:
cloned_model(x.to(device))
return unrav.output
def ex_sample(x, *args):
r = ex_run(x, *args)
return r.permute(0, 2, 3, 1).reshape(-1, r.shape[1])
c2m = tally.tally_second_moment(ex_sample,
dataset,
batch_size=batch_size,
num_workers=num_workers, pin_memory=pin_memory,
sample_size=sample_size,
cachefile=f'{cachedir}/input_cov_moment.npz' if cachedir else None)
return c2m, ex_run
def proj_stats(model, dataset, layer,
unit=None,
cachedir=None,
k=100, r=4096,
batch_size=10,
sample_size=None,
num_workers=30,
):
c2m, ex_run = proj_c2m(model, dataset, layer,
batch_size=batch_size, sample_size=sample_size,
cachedir=cachedir)
# old obsolete method - not stable.
# Cinv = c2m.momentPSD().cholesky_inverse()
moment = c2m.moment()
# TODO: consider uncommenting the following, which uses
# correlation for a better-conditioned inverse.
# Change 2.0 to 3.0 to reduce amplifying near-zero feats.
# rn = moment.diag().clamp(1e-30).pow(-1/2.0)
# moment = moment * rn[None,:] * rn[:,None]
# The following is standard regularization, to try.
# moment.diagonal.add_(1e-3)
Cinv = moment.pinverse()
return update_stats(model, dataset, layer, unit=unit,
cinv=Cinv,
k=k, r=r, batch_size=batch_size, sample_size=sample_size,
cachedir=cachedir)
def window_images(dataset, topk, rq, run,
thumbsize=None,
return_as='strip', # or individual, or tensor
k=None, q=0.01,
border_color=None,
vizname=None,
cachedir=None):
assert return_as in ['strip', 'individual', 'tensor']
input_sample = default_collate([dataset[0]])
r_sample = run(*input_sample)
x_size = tuple(input_sample[0].shape[2:])
if thumbsize is None:
thumbsize = x_size
if not isinstance(thumbsize, (list, tuple)):
thumbsize = (thumbsize, thumbsize)
if topk is None:
topk = tally.range_topk(r_sample.size(1), size=(k or 1))
default_vizname = 'top' if topk.largest else 'bot'
if border_color in ['red', 'green', 'yellow']:
default_vizname += border_color
border_color = dict(red=[255.0, 0.0, 0.0], green=[0.0, 255.0, 0.0],
yellow=[255.0, 255.0, 0.0])[border_color]
if vizname is None:
vizname = default_vizname
iv = imgviz.ImageVisualizer(
thumbsize, image_size=x_size, source=dataset,
level=rq.quantiles((1.0 - q) if topk.largest else q))
func = dict(
strip=iv.masked_images_for_topk,
individual=iv.individual_masked_images_for_topk,
tensor=iv.masked_image_grid_for_topk)[return_as]
acts_images = func(run, dataset, topk, k=k, largest=topk.largest,
border_color=border_color,
cachefile=f'{cachedir}/{vizname}{k or ''}images.npz' if cachedir else None)
return acts_images
def label_stats(dataset_with_seg, num_seglabels,
run, level, upfn=None,
negate=False,
cachedir=None,
batch_size=10,
sample_size=None,
num_workers=30):
# Create upfn
data_sample = default_collate([dataset_with_seg[0]])
input_sample = data_sample[:-2] + data_sample[-1:]
seg_sample = data_sample[-2]
r_sample = run(*input_sample)
r_size = tuple(r_sample.shape[2:])
seg_size = tuple(seg_sample.shape[2:])
device = r_sample.device
pin_memory = (device.type != 'cpu')
if upfn is None:
upfn = upsample.upsampler(seg_size, r_size)
def compute_concept_pair(batch, seg, *args):
seg = seg.to(device)
acts = run(batch, *args)
hacts = upfn(acts)
iacts = (hacts < level if negate else hacts > level) # indicator
iseg = torch.zeros(seg.shape[0], num_seglabels,
seg.shape[2], seg.shape[3],
dtype=torch.bool, device=seg.device)
iseg.scatter_(dim=1, index=seg, value=1)
flat_segs = iseg.permute(0, 2, 3, 1).reshape(-1, iseg.shape[1])
flat_acts = iacts.permute(0, 2, 3, 1).reshape(-1, iacts.shape[1])
return flat_segs, flat_acts
neg = 'neg' if negate else ''
iu99 = tally.tally_all_intersection_and_union(
compute_concept_pair,
dataset_with_seg,
sample_size=sample_size,
num_workers=num_workers, pin_memory=pin_memory,
cachefile=f'{cachedir}/{neg}{run.name}_iu.npz' if cachedir else None)
return iu99
def topk_label_stats(dataset_with_seg, num_seglabels,
run, level, topk, k=None,
upfn=None,
negate=False,
cachedir=None,
batch_size=10,
sample_size=None,
num_workers=30):
# Create upfn
data_sample = default_collate([dataset_with_seg[0]])
input_sample = data_sample[:-2] + data_sample[-1:]
seg_sample = data_sample[-2]
r_sample = run(*input_sample)
r_size = tuple(r_sample.shape[2:])
seg_size = tuple(seg_sample.shape[2:])
device = r_sample.device
num_units = r_sample.shape[1]
pin_memory = (device.type != 'cpu')
if upfn is None:
upfn = upsample.upsampler(seg_size, r_size)
intersections = torch.zeros(num_units, num_seglabels).to(device)
unions = torch.zeros(num_units, num_seglabels).to(device)
def collate_unit_iou(units, imgs, seg, labels):
seg = seg.to(device)
acts = run(imgs, labels)
hacts = upfn(acts)
iacts = (hacts > level) # indicator
iseg = torch.zeros(seg.shape[0], num_seglabels,
seg.shape[2], seg.shape[3],
dtype=torch.bool, device=seg.device)
iseg.scatter_(dim=1, index=seg, value=1)
for i in range(len(imgs)):
ulist = units[i]
for unit, _ in ulist:
im_i = (iacts[i, unit][None] & iseg[i]).view(
num_seglabels, -1).float().sum(1)
im_u = (iacts[i, unit][None] | iseg[i]).view(
num_seglabels, -1).float().sum(1)
intersections[unit] += im_i
unions[unit] += im_u
return []
tally.gather_topk(collate_unit_iou, dataset_with_seg, topk, k=100)
return intersections / (unions + 1e-20)
### Experiment below - find the best representative with gradient in the consensus directioin.
# 1. Tally weight grad over the dataset.
# 2. For each unit, find the topk images with gradients in the same direction as this
# consensus weight grad.
def wgrad_stats(model, dataset, layer, cachedir=None,
k=100, r=4096,
batch_size=10,
sample_size=None,
num_workers=30,
):
assert not model.training
if layer is not None:
module = nethook.get_module(model, layer)
else:
module = model
device = next(model.parameters()).device
pin_memory = (device.type != 'cpu')
cloned_model = copy.deepcopy(model)
nethook.set_requires_grad(False, cloned_model)
module = nethook.get_module(cloned_model, layer)
module.weight.requires_grad = True
module.weight.grad = None
wg = weight_grad(model, dataset, layer,
cachedir=cachedir,
batch_size=batch_size,
sample_size=sample_size,
num_workers=num_workers)
wg = wg.to(device)
module.weight.requires_grad = False
ks = module.kernel_size
unfolder = torch.nn.Conv2d(
in_channels=module.in_channels, out_channels=module.out_channels,
kernel_size=ks, padding=module.padding,
dilation=module.dilation, stride=module.stride,
bias=False)
nethook.set_requires_grad(False, unfolder)
unfolder.to(device)
unfolder.weight[...] = wg
def run(x, y, *args, return_details=False):
with nethook.Trace(module, retain_grad=True, retain_input=True) as ret, (
torch.enable_grad()):
out = cloned_model(x.to(device))
r = ret.output
inp = ret.input
loss = torch.nn.functional.cross_entropy(out, y.to(device))
loss.backward()
# The contribution to the weight gradient from every patch.
# If we were to sum unfgrad.sum(dim=(0,5,6)) it would equal module.weight.grad
# Now to reduce things, we need to score it per-patch somehow. We will dot-product
# the average grad per-unit to see which patches push most in the consensus direction.
# This gives a per-unit score at every patch.
score = unfolder(inp) * r.grad
# Hack: it is interesting to separate the cases where rgrad is positive
# (the patch should look more like this to decrease the loss) from cases
# where it is negative (where the patch should look less like this. So
# we will drop cases here the score is negative, and then negate the
# score when ograd is negative.
signed_score = score.clamp(0) * (r.grad.sign())
if return_details:
return {k: v.detach().cpu() for k, v in dict(
model_output=out,
loss=loss,
layer_output=r,
layer_output_grad=r.grad,
layer_input=inp,
layer_input_by_Edw=unfolder(inp),
weight_grad=wg,
score=score,
signed_score=signed_score).items()}
return signed_score
# Equivalent unrolled code below.
# scores = []
# for i in range(0, len(unf), 2):
# ug = unf[i:i+2,None,:,:,:,:,:] * r.grad[i:i+2,:,None,None,None,:,:]
# # Now to reduce things, we need to score it per-patch somehow. We will dot-product
# # the average grad per-unit to see which patches push most in the consensus direction.
# # This gives a per-unit score at every patch.
# score = (ug * wg[None,:,:,:,:,None,None]
# ).view(ug.shape[0], ug.shape[1], -1, ug.shape[5], ug.shape[6]).sum(2)
# scores.append(score)
# return torch.cat(scores)
run.name = 'wgrad'
def compute_samples(batch, labels, *args):
score = run(batch, labels)
flat_score = score.view(score.shape[0], score.shape[1], -1)
top_score = flat_score.max(2)[0]
bot_score = flat_score.min(2)[0]
all_score = score.permute(0, 2, 3, 1).reshape(-1, score.shape[1])
return top_score, bot_score, all_score
topk, botk, rq = tally.tally_extremek_and_quantile(
compute_samples, dataset, k=k, r=r,
batch_size=batch_size,
num_workers=num_workers, pin_memory=pin_memory,
sample_size=sample_size,
cachefile=f'{cachedir}/swgrad_exk_rq.npz' if cachedir else None)
return topk, botk, rq, run
### Experiment below:
# tally p-v times every post-relu activation in a layer
# and also sum up every activation
# This is intended to measure how well a (simple linear) model
# of the given feature can help solve the error p-v.
def sep_stats(model, dataset, layer=None, cachedir=None,
batch_size=10, sample_size=None, num_workers=30):
assert not model.training
if layer is not None:
module = nethook.get_module(model, layer)
else:
module = model
device = next(model.parameters()).device
pin_memory = (device.type != 'cpu')
def run(x, labels, *args):
with nethook.Trace(module) as ret, torch.no_grad():
logits = model(x.to(device))
labels = labels.to(device)
r = ret.output
p = torch.nn.functional.softmax(logits, dim=1)
y = torch.zeros_like(p)
y.scatter_(1, labels[:,None], 1)
return r, p, y
def compute_samples(batch, labels, *args):
r, p, y = run(batch, labels)
err = p-y
sep_t = torch.cat((err, y, torch.ones(err.shape[0], 1, device=device)), dim=1)
flat_r = r.view(r.shape[0], r.shape[1], -1).mean(2)[:,:,None]
r_times_sep_t = flat_r * sep_t[:,None,:]
# Number of stats to track is units * (classes + 1)
sep_data = r_times_sep_t.view(len(batch), -1)
return sep_data
sepmv = tally.tally_mean(
compute_samples, dataset,
batch_size=batch_size,
num_workers=num_workers, pin_memory=pin_memory,
sample_size=sample_size,
cachefile=f'{cachedir}/sep_stats.npz' if cachedir else None)
return sepmv
| import torch
import re
import copy
import numpy
from torch.utils.data.dataloader import default_collate
from netdissect import nethook, imgviz, tally, unravelconv, upsample
def acts_image(model, dataset,
layer=None, unit=None,
thumbsize=None,
cachedir=None,
return_as='strip', # or individual, or tensor
k=100, r=4096, q=0.01,
batch_size=10,
sample_size=None,
num_workers=30):
assert return_as in ['strip', 'individual', 'tensor']
topk, rq, run = acts_stats(model, dataset, layer=layer, unit=unit,
k=max(200, k), r=r, batch_size=batch_size, num_workers=num_workers,
sample_size=sample_size, cachedir=cachedir)
result = window_images(dataset, topk, rq, run,
thumbsize=thumbsize, return_as=return_as, k=k, q=q,
cachedir=cachedir)
if unit is not None and not hasattr(unit, '__len__'):
result = result[0]
return result
def grad_image(model, dataset,
layer=None, unit=None,
thumbsize=None,
cachedir=None,
return_as='strip', # or individual, or tensor
k=100, r=4096, q=0.01,
batch_size=10,
sample_size=None,
num_workers=30):
assert return_as in ['strip', 'individual', 'tensor']
topk, botk, rq, run = grad_stats(model, dataset, layer=layer, unit=unit,
k=max(200, k), r=r,
batch_size=batch_size, num_workers=num_workers,
sample_size=sample_size, cachedir=cachedir)
result = window_images(dataset, topk, rq, run,
thumbsize=thumbsize, return_as=return_as, k=k, q=q,
cachedir=cachedir)
if unit is not None and not hasattr(unit, '__len__'):
result = result[0]
return result
def update_image(model, dataset,
layer=None, unit=None,
thumbsize=None,
cachedir=None,
return_as='strip', # or individual, or tensor
k=100, r=4096, q=0.01,
cinv=None,
batch_size=10,
sample_size=None,
num_workers=30):
assert return_as in ['strip', 'individual', 'tensor']
topk, botk, rq, run = update_stats(model, dataset, layer=layer, unit=unit,
k=max(200, k), r=r, cinv=cinv,
batch_size=batch_size, num_workers=num_workers,
sample_size=sample_size, cachedir=cachedir)
result = window_images(dataset, topk, rq, run,
thumbsize=thumbsize, return_as=return_as, k=k, q=q,
cachedir=cachedir)
if unit is not None and not hasattr(unit, '__len__'):
result = result[0]
return result
def proj_image(model, dataset,
layer=None, unit=None,
thumbsize=None,
cachedir=None,
return_as='strip', # or individual, or tensor
k=100, r=4096, q=0.01,
batch_size=10,
sample_size=None,
num_workers=30):
assert return_as in ['strip', 'individual', 'tensor']
topk, botk, rq, run = proj_stats(model, dataset, layer=layer, unit=unit,
k=max(200, k), r=r, batch_size=batch_size, num_workers=num_workers,
sample_size=sample_size, cachedir=cachedir)
result = window_images(dataset, topk, rq, run,
thumbsize=thumbsize, return_as=return_as, k=k, q=q,
cachedir=cachedir)
if unit is not None and not hasattr(unit, '__len__'):
result = result[0]
return result
def acts_stats(model, dataset,
layer=None, unit=None,
cachedir=None,
k=100, r=4096,
batch_size=10,
sample_size=None,
num_workers=30):
assert not model.training
if unit is not None:
if not hasattr(unit, '__len__'):
unit = [unit]
assert unit is None or len(unit) > 0
if layer is not None:
module = nethook.get_module(model, layer)
else:
module = model
device = next(model.parameters()).device
pin_memory = (device.type != 'cpu')
def run(x, *args):
with nethook.Trace(module, stop=True) as ret, torch.no_grad():
model(x.to(device))
r = ret.output
if unit is not None:
r = r[:, unit]
return r
run.name = 'acts'
def compute_samples(batch, *args):
r = run(batch)
flat_r = r.view(r.shape[0], r.shape[1], -1)
top_r = flat_r.max(2)[0]
all_r = r.permute(0, 2, 3, 1).reshape(-1, r.shape[1])
return top_r, all_r
topk, rq = tally.tally_topk_and_quantile(
compute_samples, dataset, k=k, r=r,
batch_size=batch_size,
num_workers=num_workers, pin_memory=pin_memory,
sample_size=sample_size,
cachefile=f'{cachedir}/acts_topk_rq.npz' if cachedir else None)
return topk, rq, run
def grad_stats(model, dataset, layer,
unit=None,
cachedir=None,
k=100, r=4096,
batch_size=10,
sample_size=None,
num_workers=30,
):
assert not model.training
if unit is not None:
if not hasattr(unit, '__len__'):
unit = [unit]
assert unit is None or len(unit) > 0
# Make a copy so we can disable grad on parameters
cloned_model = copy.deepcopy(model)
nethook.set_requires_grad(False, cloned_model)
if layer is not None:
module = nethook.get_module(cloned_model, layer)
else:
module = cloned_model
device = next(cloned_model.parameters()).device
pin_memory = (device.type != 'cpu')
def run(x, y, *args):
with nethook.Trace(module, retain_grad=True) as ret, (
torch.enable_grad()):
out = cloned_model(x.to(device))
r = ret.output
loss = torch.nn.functional.cross_entropy(out, y.to(device))
loss.backward()
r = -r.grad
if unit is not None:
r = r[:, unit]
return r
run.name = 'grad'
def compute_samples(x, y, *args):
r = run(x, y)
flat_r = r.view(r.shape[0], r.shape[1], -1)
top_r = flat_r.max(2)[0]
bot_r = flat_r.min(2)[0]
all_r = r.permute(0, 2, 3, 1).reshape(-1, r.shape[1])
return top_r, bot_r, all_r
topk, botk, rq = tally.tally_extremek_and_quantile(
compute_samples, dataset, k=k, r=r,
batch_size=batch_size,
num_workers=num_workers, pin_memory=pin_memory,
sample_size=sample_size,
cachefile=f'{cachedir}/grad_exk_rq.npz' if cachedir else None)
return topk, botk, rq, run
def weight_grad(model, dataset, layer,
unit=None,
cachedir=None,
batch_size=10,
sample_size=None,
num_workers=30):
# Make a copy so we can disable grad on parameters
cloned_model = copy.deepcopy(model)
nethook.set_requires_grad(False, cloned_model)
module = nethook.get_module(cloned_model, layer)
nethook.set_requires_grad(True, module)
device = next(cloned_model.parameters()).device
pin_memory = (device.type != 'cpu')
def accumulate_grad(x, y, *args):
with torch.enable_grad():
out = cloned_model(x.to(device))
loss = torch.nn.functional.cross_entropy(out, y.to(device))
loss.backward()
def weight_grad():
return dict(wgrad=module.weight.grad)
module.weight.grad = None
wg = tally.tally_each(accumulate_grad, dataset, summarize=weight_grad,
batch_size=batch_size,
num_workers=num_workers, pin_memory=pin_memory,
sample_size=sample_size,
cachefile=f'{cachedir}/weight_grad.npz' if cachedir else None)['wgrad']
return wg
def update_stats(model, dataset, layer,
unit=None,
cachedir=None,
k=100, r=4096,
batch_size=10,
cinv=None,
sample_size=None,
num_workers=30,
):
assert not model.training
if unit is not None:
if not hasattr(unit, '__len__'):
unit = [unit]
assert unit is None or len(unit) > 0
# get weight grad (assumes layer has a weight param)
wg = weight_grad(model, dataset, layer,
cachedir=cachedir,
batch_size=batch_size,
sample_size=sample_size,
num_workers=num_workers)
if cinv is not None:
wg = torch.mm(wg.view(-1,
cinv.shape[0]).cpu(),
cinv.cpu()).view(wg.shape)
# copy the model so we can change its weights.
cloned_model = copy.deepcopy(model)
nethook.set_requires_grad(False, cloned_model)
module = nethook.get_module(cloned_model, layer)
device = next(cloned_model.parameters()).device
pin_memory = (device.type != 'cpu')
with torch.no_grad():
module.weight[...] = -wg.to(device)
if hasattr(module, 'bias') and module.bias is not None:
module.bias[...] = 0
def run(x, *args):
with nethook.Trace(module, stop=True) as ret, torch.no_grad():
cloned_model(x.to(device))
r = ret.output
if unit is not None:
r = r[:, unit]
return r
run.name = 'update' if cinv is None else 'proj'
def compute_samples(batch, *args):
r = run(batch)
flat_r = r.view(r.shape[0], r.shape[1], -1)
top_r = flat_r.max(2)[0]
bot_r = flat_r.min(2)[0]
all_r = r.permute(0, 2, 3, 1).reshape(-1, r.shape[1])
return top_r, bot_r, all_r
topk, botk, rq = tally.tally_extremek_and_quantile(
compute_samples, dataset, k=k, r=r,
batch_size=batch_size,
num_workers=num_workers, pin_memory=pin_memory,
sample_size=sample_size,
cachefile=f'{cachedir}/{run.name}_exk_rq.npz' if cachedir else None)
return topk, botk, rq, run
def proj_c2m(model, dataset, layer,
cachedir=None,
batch_size=10,
sample_size=None,
num_workers=30,
):
assert not model.training
device = next(model.parameters()).device
pin_memory = (device.type != 'cpu')
cloned_model = copy.deepcopy(model)
module = nethook.get_module(cloned_model, layer)
assert isinstance(module, torch.nn.Conv2d)
nethook.set_requires_grad(False, cloned_model)
unraveled = unravelconv.unravel_left_conv2d(module)
unraveled.wconv.weight.requires_grad = True
unraveled.wconv.weight.grad = None
nethook.replace_module(cloned_model, layer, unraveled)
tconv = unraveled.tconv
def ex_run(x, *args):
with nethook.Trace(tconv, stop=True) as unrav:
cloned_model(x.to(device))
return unrav.output
def ex_sample(x, *args):
r = ex_run(x, *args)
return r.permute(0, 2, 3, 1).reshape(-1, r.shape[1])
c2m = tally.tally_second_moment(ex_sample,
dataset,
batch_size=batch_size,
num_workers=num_workers, pin_memory=pin_memory,
sample_size=sample_size,
cachefile=f'{cachedir}/input_cov_moment.npz' if cachedir else None)
return c2m, ex_run
def proj_stats(model, dataset, layer,
unit=None,
cachedir=None,
k=100, r=4096,
batch_size=10,
sample_size=None,
num_workers=30,
):
c2m, ex_run = proj_c2m(model, dataset, layer,
batch_size=batch_size, sample_size=sample_size,
cachedir=cachedir)
# old obsolete method - not stable.
# Cinv = c2m.momentPSD().cholesky_inverse()
moment = c2m.moment()
# TODO: consider uncommenting the following, which uses
# correlation for a better-conditioned inverse.
# Change 2.0 to 3.0 to reduce amplifying near-zero feats.
# rn = moment.diag().clamp(1e-30).pow(-1/2.0)
# moment = moment * rn[None,:] * rn[:,None]
# The following is standard regularization, to try.
# moment.diagonal.add_(1e-3)
Cinv = moment.pinverse()
return update_stats(model, dataset, layer, unit=unit,
cinv=Cinv,
k=k, r=r, batch_size=batch_size, sample_size=sample_size,
cachedir=cachedir)
def window_images(dataset, topk, rq, run,
thumbsize=None,
return_as='strip', # or individual, or tensor
k=None, q=0.01,
border_color=None,
vizname=None,
cachedir=None):
assert return_as in ['strip', 'individual', 'tensor']
input_sample = default_collate([dataset[0]])
r_sample = run(*input_sample)
x_size = tuple(input_sample[0].shape[2:])
if thumbsize is None:
thumbsize = x_size
if not isinstance(thumbsize, (list, tuple)):
thumbsize = (thumbsize, thumbsize)
if topk is None:
topk = tally.range_topk(r_sample.size(1), size=(k or 1))
default_vizname = 'top' if topk.largest else 'bot'
if border_color in ['red', 'green', 'yellow']:
default_vizname += border_color
border_color = dict(red=[255.0, 0.0, 0.0], green=[0.0, 255.0, 0.0],
yellow=[255.0, 255.0, 0.0])[border_color]
if vizname is None:
vizname = default_vizname
iv = imgviz.ImageVisualizer(
thumbsize, image_size=x_size, source=dataset,
level=rq.quantiles((1.0 - q) if topk.largest else q))
func = dict(
strip=iv.masked_images_for_topk,
individual=iv.individual_masked_images_for_topk,
tensor=iv.masked_image_grid_for_topk)[return_as]
acts_images = func(run, dataset, topk, k=k, largest=topk.largest,
border_color=border_color,
cachefile=f'{cachedir}/{vizname}{k or ""}images.npz' if cachedir else None)
return acts_images
def label_stats(dataset_with_seg, num_seglabels,
run, level, upfn=None,
negate=False,
cachedir=None,
batch_size=10,
sample_size=None,
num_workers=30):
# Create upfn
data_sample = default_collate([dataset_with_seg[0]])
input_sample = data_sample[:-2] + data_sample[-1:]
seg_sample = data_sample[-2]
r_sample = run(*input_sample)
r_size = tuple(r_sample.shape[2:])
seg_size = tuple(seg_sample.shape[2:])
device = r_sample.device
pin_memory = (device.type != 'cpu')
if upfn is None:
upfn = upsample.upsampler(seg_size, r_size)
def compute_concept_pair(batch, seg, *args):
seg = seg.to(device)
acts = run(batch, *args)
hacts = upfn(acts)
iacts = (hacts < level if negate else hacts > level) # indicator
iseg = torch.zeros(seg.shape[0], num_seglabels,
seg.shape[2], seg.shape[3],
dtype=torch.bool, device=seg.device)
iseg.scatter_(dim=1, index=seg, value=1)
flat_segs = iseg.permute(0, 2, 3, 1).reshape(-1, iseg.shape[1])
flat_acts = iacts.permute(0, 2, 3, 1).reshape(-1, iacts.shape[1])
return flat_segs, flat_acts
neg = 'neg' if negate else ''
iu99 = tally.tally_all_intersection_and_union(
compute_concept_pair,
dataset_with_seg,
sample_size=sample_size,
num_workers=num_workers, pin_memory=pin_memory,
cachefile=f'{cachedir}/{neg}{run.name}_iu.npz' if cachedir else None)
return iu99
def topk_label_stats(dataset_with_seg, num_seglabels,
run, level, topk, k=None,
upfn=None,
negate=False,
cachedir=None,
batch_size=10,
sample_size=None,
num_workers=30):
# Create upfn
data_sample = default_collate([dataset_with_seg[0]])
input_sample = data_sample[:-2] + data_sample[-1:]
seg_sample = data_sample[-2]
r_sample = run(*input_sample)
r_size = tuple(r_sample.shape[2:])
seg_size = tuple(seg_sample.shape[2:])
device = r_sample.device
num_units = r_sample.shape[1]
pin_memory = (device.type != 'cpu')
if upfn is None:
upfn = upsample.upsampler(seg_size, r_size)
intersections = torch.zeros(num_units, num_seglabels).to(device)
unions = torch.zeros(num_units, num_seglabels).to(device)
def collate_unit_iou(units, imgs, seg, labels):
seg = seg.to(device)
acts = run(imgs, labels)
hacts = upfn(acts)
iacts = (hacts > level) # indicator
iseg = torch.zeros(seg.shape[0], num_seglabels,
seg.shape[2], seg.shape[3],
dtype=torch.bool, device=seg.device)
iseg.scatter_(dim=1, index=seg, value=1)
for i in range(len(imgs)):
ulist = units[i]
for unit, _ in ulist:
im_i = (iacts[i, unit][None] & iseg[i]).view(
num_seglabels, -1).float().sum(1)
im_u = (iacts[i, unit][None] | iseg[i]).view(
num_seglabels, -1).float().sum(1)
intersections[unit] += im_i
unions[unit] += im_u
return []
tally.gather_topk(collate_unit_iou, dataset_with_seg, topk, k=100)
return intersections / (unions + 1e-20)
### Experiment below - find the best representative with gradient in the consensus directioin.
# 1. Tally weight grad over the dataset.
# 2. For each unit, find the topk images with gradients in the same direction as this
# consensus weight grad.
def wgrad_stats(model, dataset, layer, cachedir=None,
k=100, r=4096,
batch_size=10,
sample_size=None,
num_workers=30,
):
assert not model.training
if layer is not None:
module = nethook.get_module(model, layer)
else:
module = model
device = next(model.parameters()).device
pin_memory = (device.type != 'cpu')
cloned_model = copy.deepcopy(model)
nethook.set_requires_grad(False, cloned_model)
module = nethook.get_module(cloned_model, layer)
module.weight.requires_grad = True
module.weight.grad = None
wg = weight_grad(model, dataset, layer,
cachedir=cachedir,
batch_size=batch_size,
sample_size=sample_size,
num_workers=num_workers)
wg = wg.to(device)
module.weight.requires_grad = False
ks = module.kernel_size
unfolder = torch.nn.Conv2d(
in_channels=module.in_channels, out_channels=module.out_channels,
kernel_size=ks, padding=module.padding,
dilation=module.dilation, stride=module.stride,
bias=False)
nethook.set_requires_grad(False, unfolder)
unfolder.to(device)
unfolder.weight[...] = wg
def run(x, y, *args, return_details=False):
with nethook.Trace(module, retain_grad=True, retain_input=True) as ret, (
torch.enable_grad()):
out = cloned_model(x.to(device))
r = ret.output
inp = ret.input
loss = torch.nn.functional.cross_entropy(out, y.to(device))
loss.backward()
# The contribution to the weight gradient from every patch.
# If we were to sum unfgrad.sum(dim=(0,5,6)) it would equal module.weight.grad
# Now to reduce things, we need to score it per-patch somehow. We will dot-product
# the average grad per-unit to see which patches push most in the consensus direction.
# This gives a per-unit score at every patch.
score = unfolder(inp) * r.grad
# Hack: it is interesting to separate the cases where rgrad is positive
# (the patch should look more like this to decrease the loss) from cases
# where it is negative (where the patch should look less like this. So
# we will drop cases here the score is negative, and then negate the
# score when ograd is negative.
signed_score = score.clamp(0) * (r.grad.sign())
if return_details:
return {k: v.detach().cpu() for k, v in dict(
model_output=out,
loss=loss,
layer_output=r,
layer_output_grad=r.grad,
layer_input=inp,
layer_input_by_Edw=unfolder(inp),
weight_grad=wg,
score=score,
signed_score=signed_score).items()}
return signed_score
# Equivalent unrolled code below.
# scores = []
# for i in range(0, len(unf), 2):
# ug = unf[i:i+2,None,:,:,:,:,:] * r.grad[i:i+2,:,None,None,None,:,:]
# # Now to reduce things, we need to score it per-patch somehow. We will dot-product
# # the average grad per-unit to see which patches push most in the consensus direction.
# # This gives a per-unit score at every patch.
# score = (ug * wg[None,:,:,:,:,None,None]
# ).view(ug.shape[0], ug.shape[1], -1, ug.shape[5], ug.shape[6]).sum(2)
# scores.append(score)
# return torch.cat(scores)
run.name = 'wgrad'
def compute_samples(batch, labels, *args):
score = run(batch, labels)
flat_score = score.view(score.shape[0], score.shape[1], -1)
top_score = flat_score.max(2)[0]
bot_score = flat_score.min(2)[0]
all_score = score.permute(0, 2, 3, 1).reshape(-1, score.shape[1])
return top_score, bot_score, all_score
topk, botk, rq = tally.tally_extremek_and_quantile(
compute_samples, dataset, k=k, r=r,
batch_size=batch_size,
num_workers=num_workers, pin_memory=pin_memory,
sample_size=sample_size,
cachefile=f'{cachedir}/swgrad_exk_rq.npz' if cachedir else None)
return topk, botk, rq, run
### Experiment below:
# tally p-v times every post-relu activation in a layer
# and also sum up every activation
# This is intended to measure how well a (simple linear) model
# of the given feature can help solve the error p-v.
def sep_stats(model, dataset, layer=None, cachedir=None,
batch_size=10, sample_size=None, num_workers=30):
assert not model.training
if layer is not None:
module = nethook.get_module(model, layer)
else:
module = model
device = next(model.parameters()).device
pin_memory = (device.type != 'cpu')
def run(x, labels, *args):
with nethook.Trace(module) as ret, torch.no_grad():
logits = model(x.to(device))
labels = labels.to(device)
r = ret.output
p = torch.nn.functional.softmax(logits, dim=1)
y = torch.zeros_like(p)
y.scatter_(1, labels[:,None], 1)
return r, p, y
def compute_samples(batch, labels, *args):
r, p, y = run(batch, labels)
err = p-y
sep_t = torch.cat((err, y, torch.ones(err.shape[0], 1, device=device)), dim=1)
flat_r = r.view(r.shape[0], r.shape[1], -1).mean(2)[:,:,None]
r_times_sep_t = flat_r * sep_t[:,None,:]
# Number of stats to track is units * (classes + 1)
sep_data = r_times_sep_t.view(len(batch), -1)
return sep_data
sepmv = tally.tally_mean(
compute_samples, dataset,
batch_size=batch_size,
num_workers=num_workers, pin_memory=pin_memory,
sample_size=sample_size,
cachefile=f'{cachedir}/sep_stats.npz' if cachedir else None)
return sepmv
|
import django_filters
import json
from django import forms
from django.db.models import Count
from django.conf import settings
from django.contrib.auth.models import User
from django.forms import BoundField
from django.urls import reverse
from taggit.forms import TagField
from .constants import *
from .fields import ColorSelect, CommentField, SlugField
from .models import ObjectChange, Tag
def add_blank_choice(choices):
"""
Add a blank choice to the beginning of a choices list.
"""
return ((None, "---------"),) + tuple(choices)
class BulkEditForm(forms.Form):
"""
Base form for editing several objects at the same time.
"""
def __init__(self, model, parent_object=None, *args, **kwargs):
super().__init__(*args, **kwargs)
self.model = model
self.parent_object = parent_object
self.nullable_fields = []
if hasattr(self.Meta, "nullable_fields"):
self.nullable_fields = self.Meta.nullable_fields
class BootstrapMixin(forms.BaseForm):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
custom_widgets = [forms.CheckboxInput, forms.RadioSelect]
for field_name, field in self.fields.items():
if field.widget.__class__ in custom_widgets:
css = field.widget.attrs.get("class", "")
field.widget.attrs["class"] = " ".join(
[css, "custom-control-input"]
).strip()
else:
css = field.widget.attrs.get("class", "")
field.widget.attrs["class"] = " ".join([css, "form-control"]).strip()
if field.required:
field.widget.attrs["required"] = "required"
if "placeholder" not in field.widget.attrs:
field.widget.attrs["placeholder"] = field.label
class ConfirmationForm(BootstrapMixin, forms.Form):
"""
A generic confirmation form. The form is not valid unless the confirm field
is checked.
"""
confirm = forms.BooleanField(
required=True, widget=forms.HiddenInput(), initial=True
)
class TableConfigurationForm(BootstrapMixin, forms.Form):
"""
Form used to configure table and store the result in user's preferences.
"""
columns = forms.MultipleChoiceField(
choices=[],
widget=forms.SelectMultiple(attrs={"size": 10}),
help_text="Use the buttons below to arrange columns in the desired order, then select all columns to display.",
)
def __init__(self, table, *args, **kwargs):
super().__init__(*args, **kwargs)
self.fields["columns"].choices = table.configurable_columns
self.fields["columns"].initial = table.visible_columns
class SmallTextarea(forms.Textarea):
"""
Just to be used as small text area.
"""
pass
class APISelect(forms.Select):
"""
Select widget using API calls to populate its choices.
"""
def __init__(
self,
api_url=None,
display_field=None,
value_field=None,
disabled_indicator=None,
filter_for=None,
conditional_query_params=None,
additional_query_params=None,
null_option=False,
full=False,
*args,
**kwargs,
):
super().__init__(*args, **kwargs)
self.attrs["class"] = "custom-select2-api"
if api_url:
self.attrs["data-url"] = f"/{settings.BASE_PATH}{api_url.lstrip("/")}"
if full:
self.attrs["data-full"] = full
if display_field:
self.attrs["display-field"] = display_field
if value_field:
self.attrs["value-field"] = value_field
if disabled_indicator:
self.attrs["disabled-indicator"] = disabled_indicator
if filter_for:
for key, value in filter_for.items():
self.add_filter_for(key, value)
if conditional_query_params:
for key, value in conditional_query_params.items():
self.add_conditional_query_param(key, value)
if additional_query_params:
for key, value in additional_query_params.items():
self.add_additional_query_param(key, value)
if null_option:
self.attrs["data-null-option"] = 1
def add_filter_for(self, name, value):
"""
Add details for an additional query param in the form of a data-filter-for-*
attribute.
"""
self.attrs[f"data-filter-for-{name}"] = value
def add_additional_query_param(self, name, value):
"""
Add details for an additional query param in the form of a data-* JSON-encoded
list attribute.
"""
key = f"data-additional-query-param-{name}"
values = json.loads(self.attrs.get(key, "[]"))
values.append(value)
self.attrs[key] = json.dumps(values)
def add_conditional_query_param(self, condition, value):
"""
Add details for a URL query strings to append to the URL if the condition is
met. The condition is specified in the form `<field_name>__<field_value>`.
"""
self.attrs[f"data-conditional-query-param-{condition}"] = value
class APISelectMultiple(APISelect, forms.SelectMultiple):
"""
Same API select widget using select2 but allowing multiple choices.
"""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.attrs["data-multiple"] = 1
self.attrs["data-close-on-select"] = 0
class StaticSelect(forms.Select):
"""
Select widget for static choices leveraging the select2 component.
"""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.attrs["class"] = "custom-select2-static"
class StaticSelectMultiple(StaticSelect, forms.SelectMultiple):
"""
Same static select widget using select2 but allowing multiple choices.
"""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.attrs["data-multiple"] = 1
self.attrs["data-close-on-select"] = 0
class CustomNullBooleanSelect(StaticSelect):
"""
Do not enforce True/False when not selecting an option.
"""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.choices = (("unknown", "---------"), ("true", "Yes"), ("false", "No"))
class DynamicModelChoiceMixin(object):
filter = django_filters.ModelChoiceFilter
widget = APISelect
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
def get_bound_field(self, form, field_name):
bound_field = BoundField(form, self, field_name)
# Modify the QuerySet of the field before we return it. Limit choices to any
# data already bound: Options will be populated on-demand via the APISelect
# widget
data = self.prepare_value(bound_field.data or bound_field.initial)
if data:
filter = self.filter(
field_name=self.to_field_name or "pk", queryset=self.queryset
)
self.queryset = filter.filter(self.queryset, data)
else:
self.queryset = self.queryset.none()
# Set the data URL on the APISelect widget (if not already set)
widget = bound_field.field.widget
if not widget.attrs.get("data-url"):
data_url = reverse(
f"{self.queryset.model._meta.app_label}-api:{self.queryset.model._meta.model_name}-list"
)
widget.attrs["data-url"] = data_url
return bound_field
class DynamicModelChoiceField(DynamicModelChoiceMixin, forms.ModelChoiceField):
"""
Override get_bound_field() to avoid pre-populating field choices with a SQL query.
The field will be rendered only with choices set via bound data.
Choices are populated on-demand via the APISelect widget.
"""
pass
class DynamicModelMultipleChoiceField(
DynamicModelChoiceMixin, forms.ModelMultipleChoiceField
):
"""
A multiple-choice version of DynamicModelChoiceField.
"""
filter = django_filters.ModelMultipleChoiceFilter
widget = APISelectMultiple
class ObjectChangeFilterForm(BootstrapMixin, forms.Form):
model = ObjectChange
q = forms.CharField(required=False, label="Search")
time_after = forms.DateTimeField(
label="After",
required=False,
widget=forms.TextInput(attrs={"placeholder": "YYYY-MM-DD hh:mm:ss"}),
)
time_before = forms.DateTimeField(
label="Before",
required=False,
widget=forms.TextInput(attrs={"placeholder": "YYYY-MM-DD hh:mm:ss"}),
)
action = forms.ChoiceField(
required=False,
choices=OBJECT_CHANGE_ACTION_CHOICES,
widget=StaticSelectMultiple,
)
user = forms.ModelChoiceField(
required=False,
queryset=User.objects.order_by("username"),
widget=StaticSelectMultiple,
)
class TagBulkEditForm(BootstrapMixin, BulkEditForm):
pk = forms.ModelMultipleChoiceField(
queryset=Tag.objects.all(), widget=forms.MultipleHiddenInput
)
color = forms.CharField(max_length=6, required=False, widget=ColorSelect())
class Meta:
nullable_fields = ["comments"]
class TagFilterForm(BootstrapMixin, forms.Form):
model = Tag
q = forms.CharField(required=False, label="Search")
class TagForm(BootstrapMixin, forms.ModelForm):
slug = SlugField()
comments = CommentField()
class Meta:
model = Tag
fields = ["name", "slug", "color", "comments"]
class AddRemoveTagsForm(forms.Form):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.fields["add_tags"] = TagField(required=False)
self.fields["remove_tags"] = TagField(required=False)
class TagFilterField(forms.MultipleChoiceField):
"""
A filter field for the tags of a model.
Only the tags used by a model are displayed.
"""
widget = StaticSelectMultiple
def __init__(self, model, *args, **kwargs):
def get_choices():
tags = model.tags.annotate(count=Count("utils_taggeditem_items")).order_by(
"name"
)
return [(str(tag.slug), f"{tag.name} ({tag.count})") for tag in tags]
# Choices are fetched each time the form is initialized
super().__init__(
label="Tags", choices=get_choices, required=False, *args, **kwargs
)
| import django_filters
import json
from django import forms
from django.db.models import Count
from django.conf import settings
from django.contrib.auth.models import User
from django.forms import BoundField
from django.urls import reverse
from taggit.forms import TagField
from .constants import *
from .fields import ColorSelect, CommentField, SlugField
from .models import ObjectChange, Tag
def add_blank_choice(choices):
"""
Add a blank choice to the beginning of a choices list.
"""
return ((None, "---------"),) + tuple(choices)
class BulkEditForm(forms.Form):
"""
Base form for editing several objects at the same time.
"""
def __init__(self, model, parent_object=None, *args, **kwargs):
super().__init__(*args, **kwargs)
self.model = model
self.parent_object = parent_object
self.nullable_fields = []
if hasattr(self.Meta, "nullable_fields"):
self.nullable_fields = self.Meta.nullable_fields
class BootstrapMixin(forms.BaseForm):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
custom_widgets = [forms.CheckboxInput, forms.RadioSelect]
for field_name, field in self.fields.items():
if field.widget.__class__ in custom_widgets:
css = field.widget.attrs.get("class", "")
field.widget.attrs["class"] = " ".join(
[css, "custom-control-input"]
).strip()
else:
css = field.widget.attrs.get("class", "")
field.widget.attrs["class"] = " ".join([css, "form-control"]).strip()
if field.required:
field.widget.attrs["required"] = "required"
if "placeholder" not in field.widget.attrs:
field.widget.attrs["placeholder"] = field.label
class ConfirmationForm(BootstrapMixin, forms.Form):
"""
A generic confirmation form. The form is not valid unless the confirm field
is checked.
"""
confirm = forms.BooleanField(
required=True, widget=forms.HiddenInput(), initial=True
)
class TableConfigurationForm(BootstrapMixin, forms.Form):
"""
Form used to configure table and store the result in user's preferences.
"""
columns = forms.MultipleChoiceField(
choices=[],
widget=forms.SelectMultiple(attrs={"size": 10}),
help_text="Use the buttons below to arrange columns in the desired order, then select all columns to display.",
)
def __init__(self, table, *args, **kwargs):
super().__init__(*args, **kwargs)
self.fields["columns"].choices = table.configurable_columns
self.fields["columns"].initial = table.visible_columns
class SmallTextarea(forms.Textarea):
"""
Just to be used as small text area.
"""
pass
class APISelect(forms.Select):
"""
Select widget using API calls to populate its choices.
"""
def __init__(
self,
api_url=None,
display_field=None,
value_field=None,
disabled_indicator=None,
filter_for=None,
conditional_query_params=None,
additional_query_params=None,
null_option=False,
full=False,
*args,
**kwargs,
):
super().__init__(*args, **kwargs)
self.attrs["class"] = "custom-select2-api"
if api_url:
self.attrs["data-url"] = f"/{settings.BASE_PATH}{api_url.lstrip('/')}"
if full:
self.attrs["data-full"] = full
if display_field:
self.attrs["display-field"] = display_field
if value_field:
self.attrs["value-field"] = value_field
if disabled_indicator:
self.attrs["disabled-indicator"] = disabled_indicator
if filter_for:
for key, value in filter_for.items():
self.add_filter_for(key, value)
if conditional_query_params:
for key, value in conditional_query_params.items():
self.add_conditional_query_param(key, value)
if additional_query_params:
for key, value in additional_query_params.items():
self.add_additional_query_param(key, value)
if null_option:
self.attrs["data-null-option"] = 1
def add_filter_for(self, name, value):
"""
Add details for an additional query param in the form of a data-filter-for-*
attribute.
"""
self.attrs[f"data-filter-for-{name}"] = value
def add_additional_query_param(self, name, value):
"""
Add details for an additional query param in the form of a data-* JSON-encoded
list attribute.
"""
key = f"data-additional-query-param-{name}"
values = json.loads(self.attrs.get(key, "[]"))
values.append(value)
self.attrs[key] = json.dumps(values)
def add_conditional_query_param(self, condition, value):
"""
Add details for a URL query strings to append to the URL if the condition is
met. The condition is specified in the form `<field_name>__<field_value>`.
"""
self.attrs[f"data-conditional-query-param-{condition}"] = value
class APISelectMultiple(APISelect, forms.SelectMultiple):
"""
Same API select widget using select2 but allowing multiple choices.
"""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.attrs["data-multiple"] = 1
self.attrs["data-close-on-select"] = 0
class StaticSelect(forms.Select):
"""
Select widget for static choices leveraging the select2 component.
"""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.attrs["class"] = "custom-select2-static"
class StaticSelectMultiple(StaticSelect, forms.SelectMultiple):
"""
Same static select widget using select2 but allowing multiple choices.
"""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.attrs["data-multiple"] = 1
self.attrs["data-close-on-select"] = 0
class CustomNullBooleanSelect(StaticSelect):
"""
Do not enforce True/False when not selecting an option.
"""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.choices = (("unknown", "---------"), ("true", "Yes"), ("false", "No"))
class DynamicModelChoiceMixin(object):
filter = django_filters.ModelChoiceFilter
widget = APISelect
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
def get_bound_field(self, form, field_name):
bound_field = BoundField(form, self, field_name)
# Modify the QuerySet of the field before we return it. Limit choices to any
# data already bound: Options will be populated on-demand via the APISelect
# widget
data = self.prepare_value(bound_field.data or bound_field.initial)
if data:
filter = self.filter(
field_name=self.to_field_name or "pk", queryset=self.queryset
)
self.queryset = filter.filter(self.queryset, data)
else:
self.queryset = self.queryset.none()
# Set the data URL on the APISelect widget (if not already set)
widget = bound_field.field.widget
if not widget.attrs.get("data-url"):
data_url = reverse(
f"{self.queryset.model._meta.app_label}-api:{self.queryset.model._meta.model_name}-list"
)
widget.attrs["data-url"] = data_url
return bound_field
class DynamicModelChoiceField(DynamicModelChoiceMixin, forms.ModelChoiceField):
"""
Override get_bound_field() to avoid pre-populating field choices with a SQL query.
The field will be rendered only with choices set via bound data.
Choices are populated on-demand via the APISelect widget.
"""
pass
class DynamicModelMultipleChoiceField(
DynamicModelChoiceMixin, forms.ModelMultipleChoiceField
):
"""
A multiple-choice version of DynamicModelChoiceField.
"""
filter = django_filters.ModelMultipleChoiceFilter
widget = APISelectMultiple
class ObjectChangeFilterForm(BootstrapMixin, forms.Form):
model = ObjectChange
q = forms.CharField(required=False, label="Search")
time_after = forms.DateTimeField(
label="After",
required=False,
widget=forms.TextInput(attrs={"placeholder": "YYYY-MM-DD hh:mm:ss"}),
)
time_before = forms.DateTimeField(
label="Before",
required=False,
widget=forms.TextInput(attrs={"placeholder": "YYYY-MM-DD hh:mm:ss"}),
)
action = forms.ChoiceField(
required=False,
choices=OBJECT_CHANGE_ACTION_CHOICES,
widget=StaticSelectMultiple,
)
user = forms.ModelChoiceField(
required=False,
queryset=User.objects.order_by("username"),
widget=StaticSelectMultiple,
)
class TagBulkEditForm(BootstrapMixin, BulkEditForm):
pk = forms.ModelMultipleChoiceField(
queryset=Tag.objects.all(), widget=forms.MultipleHiddenInput
)
color = forms.CharField(max_length=6, required=False, widget=ColorSelect())
class Meta:
nullable_fields = ["comments"]
class TagFilterForm(BootstrapMixin, forms.Form):
model = Tag
q = forms.CharField(required=False, label="Search")
class TagForm(BootstrapMixin, forms.ModelForm):
slug = SlugField()
comments = CommentField()
class Meta:
model = Tag
fields = ["name", "slug", "color", "comments"]
class AddRemoveTagsForm(forms.Form):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.fields["add_tags"] = TagField(required=False)
self.fields["remove_tags"] = TagField(required=False)
class TagFilterField(forms.MultipleChoiceField):
"""
A filter field for the tags of a model.
Only the tags used by a model are displayed.
"""
widget = StaticSelectMultiple
def __init__(self, model, *args, **kwargs):
def get_choices():
tags = model.tags.annotate(count=Count("utils_taggeditem_items")).order_by(
"name"
)
return [(str(tag.slug), f"{tag.name} ({tag.count})") for tag in tags]
# Choices are fetched each time the form is initialized
super().__init__(
label="Tags", choices=get_choices, required=False, *args, **kwargs
)
|
import difflib
import os
from functools import partial
ANSI_ESCAPE_CODES = {
"green": "\x1b[32m",
"red": "\x1b[31m",
"reset": "\x1b[39m",
}
# Recipe from https://github.com/ActiveState/
# code/recipes/Python/577452_memoize_decorator_instance/recipe-577452.py
class memoize:
"""cache the return value of a method
This class is meant to be used as a decorator of methods. The return value
from a given method invocation will be cached on the instance whose method
was invoked. All arguments passed to a method decorated with memoize must
be hashable.
If a memoized method is invoked directly on its class the result will not
be cached. Instead the method will be invoked like a static method:
class Obj:
@memoize
def add_to(self, arg):
return self + arg
Obj.add_to(1) # not enough arguments
Obj.add_to(1, 2) # returns 3, result is not cached
"""
def __init__(self, func):
self.func = func
def __get__(self, obj, objtype=None):
if obj is None:
return self.func
return partial(self, obj)
def __call__(self, *args, **kw):
obj = args[0]
try:
cache = obj.__cache
except AttributeError:
cache = obj.__cache = {}
key = (self.func, args[1:], frozenset(kw.items()))
try:
res = cache[key]
except KeyError:
res = cache[key] = self.func(*args, **kw)
return res
def colorize(text, color):
color_code = ANSI_ESCAPE_CODES[color]
return f'{color_code}{text}{ANSI_ESCAPE_CODES['reset']}'
def red(text):
return colorize(text, "red")
def green(text):
return colorize(text, "green")
def rangify(number_list):
"""Assumes the list is sorted."""
if not number_list:
return number_list
ranges = []
range_start = prev_num = number_list[0]
for num in number_list[1:]:
if num != (prev_num + 1):
ranges.append((range_start, prev_num))
range_start = num
prev_num = num
ranges.append((range_start, prev_num))
return ranges
def stringify(number_list):
"""Assumes the list is sorted."""
rangified_list = rangify(number_list)
stringified_list = [
f"{line_start}" if line_start == line_stop else f"{line_start}-{line_stop}"
for line_start, line_stop in rangified_list
]
return ", ".join(stringified_list)
def extrapolate_coverage(lines_w_status):
"""
Given the following input:
>>> lines_w_status = [
(1, True),
(4, True),
(7, False),
(9, False),
]
Return expanded lines with their extrapolated line status.
>>> extrapolate_coverage(lines_w_status) == [
(1, True),
(2, True),
(3, True),
(4, True),
(5, None),
(6, None),
(7, False),
(8, False),
(9, False),
]
"""
lines = []
prev_lineno = 0
prev_status = True
for lineno, status in lines_w_status:
while (lineno - prev_lineno) > 1:
prev_lineno += 1
if prev_status is status:
lines.append((prev_lineno, status))
else:
lines.append((prev_lineno, None))
lines.append((lineno, status))
prev_lineno = lineno
prev_status = status
return lines
def reconcile_lines(lines1, lines2):
"""
Return a dict `{lineno1: lineno2}` which reconciles line numbers `lineno1`
of list `lines1` to line numbers `lineno2` of list `lines2`. Only lines
that are common in both sets are present in the dict, lines unique to one
of the sets are omitted.
"""
differ = difflib.Differ()
diff = differ.compare(lines1, lines2)
SAME = " "
ADDED = "+ "
REMOVED = "- "
INFO = "? "
lineno_map = {} # {lineno1: lineno2, ...}
lineno1_offset = 0
lineno2 = 1
for diffline in diff:
if diffline.startswith(INFO):
continue
if diffline.startswith(SAME):
lineno1 = lineno2 + lineno1_offset
lineno_map[lineno1] = lineno2
elif diffline.startswith(ADDED):
lineno1_offset -= 1
elif diffline.startswith(REMOVED):
lineno1_offset += 1
continue
lineno2 += 1
return lineno_map
def hunkify_lines(lines, context=3):
"""
Return a list of line hunks given a list of lines `lines`. The number of
context lines can be control with `context` which will return line hunks
surrounded with `context` lines before and after the code change.
"""
# Find contiguous line changes
ranges = []
range_start = None
for i, line in enumerate(lines):
if line.status is not None:
if range_start is None:
range_start = i
continue
elif range_start is not None:
range_stop = i
ranges.append((range_start, range_stop))
range_start = None
else:
# Append the last range
if range_start is not None:
range_stop = i
ranges.append((range_start, range_stop))
# add context
ranges_w_context = []
for range_start, range_stop in ranges:
range_start = range_start - context
range_start = range_start if range_start >= 0 else 0
range_stop = range_stop + context
ranges_w_context.append((range_start, range_stop))
# merge overlapping hunks
merged_ranges = ranges_w_context[:1]
for range_start, range_stop in ranges_w_context[1:]:
prev_start, prev_stop = merged_ranges[-1]
if range_start <= prev_stop:
range_start = prev_start
merged_ranges[-1] = (range_start, range_stop)
else:
merged_ranges.append((range_start, range_stop))
# build final hunks
hunks = []
for range_start, range_stop in merged_ranges:
hunk = lines[range_start:range_stop]
hunks.append(hunk)
return hunks
def get_dir_from_file_path(file_path):
return os.path.dirname(file_path) or "."
| import difflib
import os
from functools import partial
ANSI_ESCAPE_CODES = {
"green": "\x1b[32m",
"red": "\x1b[31m",
"reset": "\x1b[39m",
}
# Recipe from https://github.com/ActiveState/
# code/recipes/Python/577452_memoize_decorator_instance/recipe-577452.py
class memoize:
"""cache the return value of a method
This class is meant to be used as a decorator of methods. The return value
from a given method invocation will be cached on the instance whose method
was invoked. All arguments passed to a method decorated with memoize must
be hashable.
If a memoized method is invoked directly on its class the result will not
be cached. Instead the method will be invoked like a static method:
class Obj:
@memoize
def add_to(self, arg):
return self + arg
Obj.add_to(1) # not enough arguments
Obj.add_to(1, 2) # returns 3, result is not cached
"""
def __init__(self, func):
self.func = func
def __get__(self, obj, objtype=None):
if obj is None:
return self.func
return partial(self, obj)
def __call__(self, *args, **kw):
obj = args[0]
try:
cache = obj.__cache
except AttributeError:
cache = obj.__cache = {}
key = (self.func, args[1:], frozenset(kw.items()))
try:
res = cache[key]
except KeyError:
res = cache[key] = self.func(*args, **kw)
return res
def colorize(text, color):
color_code = ANSI_ESCAPE_CODES[color]
return f'{color_code}{text}{ANSI_ESCAPE_CODES["reset"]}'
def red(text):
return colorize(text, "red")
def green(text):
return colorize(text, "green")
def rangify(number_list):
"""Assumes the list is sorted."""
if not number_list:
return number_list
ranges = []
range_start = prev_num = number_list[0]
for num in number_list[1:]:
if num != (prev_num + 1):
ranges.append((range_start, prev_num))
range_start = num
prev_num = num
ranges.append((range_start, prev_num))
return ranges
def stringify(number_list):
"""Assumes the list is sorted."""
rangified_list = rangify(number_list)
stringified_list = [
f"{line_start}" if line_start == line_stop else f"{line_start}-{line_stop}"
for line_start, line_stop in rangified_list
]
return ", ".join(stringified_list)
def extrapolate_coverage(lines_w_status):
"""
Given the following input:
>>> lines_w_status = [
(1, True),
(4, True),
(7, False),
(9, False),
]
Return expanded lines with their extrapolated line status.
>>> extrapolate_coverage(lines_w_status) == [
(1, True),
(2, True),
(3, True),
(4, True),
(5, None),
(6, None),
(7, False),
(8, False),
(9, False),
]
"""
lines = []
prev_lineno = 0
prev_status = True
for lineno, status in lines_w_status:
while (lineno - prev_lineno) > 1:
prev_lineno += 1
if prev_status is status:
lines.append((prev_lineno, status))
else:
lines.append((prev_lineno, None))
lines.append((lineno, status))
prev_lineno = lineno
prev_status = status
return lines
def reconcile_lines(lines1, lines2):
"""
Return a dict `{lineno1: lineno2}` which reconciles line numbers `lineno1`
of list `lines1` to line numbers `lineno2` of list `lines2`. Only lines
that are common in both sets are present in the dict, lines unique to one
of the sets are omitted.
"""
differ = difflib.Differ()
diff = differ.compare(lines1, lines2)
SAME = " "
ADDED = "+ "
REMOVED = "- "
INFO = "? "
lineno_map = {} # {lineno1: lineno2, ...}
lineno1_offset = 0
lineno2 = 1
for diffline in diff:
if diffline.startswith(INFO):
continue
if diffline.startswith(SAME):
lineno1 = lineno2 + lineno1_offset
lineno_map[lineno1] = lineno2
elif diffline.startswith(ADDED):
lineno1_offset -= 1
elif diffline.startswith(REMOVED):
lineno1_offset += 1
continue
lineno2 += 1
return lineno_map
def hunkify_lines(lines, context=3):
"""
Return a list of line hunks given a list of lines `lines`. The number of
context lines can be control with `context` which will return line hunks
surrounded with `context` lines before and after the code change.
"""
# Find contiguous line changes
ranges = []
range_start = None
for i, line in enumerate(lines):
if line.status is not None:
if range_start is None:
range_start = i
continue
elif range_start is not None:
range_stop = i
ranges.append((range_start, range_stop))
range_start = None
else:
# Append the last range
if range_start is not None:
range_stop = i
ranges.append((range_start, range_stop))
# add context
ranges_w_context = []
for range_start, range_stop in ranges:
range_start = range_start - context
range_start = range_start if range_start >= 0 else 0
range_stop = range_stop + context
ranges_w_context.append((range_start, range_stop))
# merge overlapping hunks
merged_ranges = ranges_w_context[:1]
for range_start, range_stop in ranges_w_context[1:]:
prev_start, prev_stop = merged_ranges[-1]
if range_start <= prev_stop:
range_start = prev_start
merged_ranges[-1] = (range_start, range_stop)
else:
merged_ranges.append((range_start, range_stop))
# build final hunks
hunks = []
for range_start, range_stop in merged_ranges:
hunk = lines[range_start:range_stop]
hunks.append(hunk)
return hunks
def get_dir_from_file_path(file_path):
return os.path.dirname(file_path) or "."
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
""" Handles the exchange data and communication """
import logging
import inspect
import time
import os
import json
from distutils.util import strtobool
from . import errors
log = logging.getLogger('crypto-exporter')
def short_msg(msg, chars=75):
""" Truncates the message to {chars} characters and adds three dots at the end """
return (str(msg)[:chars] + '..') if len(str(msg)) > chars else str(msg)
def ddos_protection_handler(error, sleep=1, shortify=True):
""" Prints a warning and sleeps """
caller = inspect.stack()[1].function
if shortify:
error = short_msg(error)
log.warning(f'({caller}) Rate limit has been reached. Sleeping for {sleep}s. The exception: {error}')
time.sleep(sleep) # don't hit the rate limit
def exchange_not_available_handler(error, sleep=10, shortify=True):
""" Prints an error and sleeps """
caller = inspect.stack()[1].function
if shortify:
error = short_msg(error)
log.error(f'({caller}) The exchange API could not be reached. Sleeping for {sleep}s. The error: {error}')
time.sleep(sleep) # don't hit the rate limit
def authentication_error_handler(error, nonce='', shortify=True):
""" Logs hints about the authentication error """
caller = inspect.stack()[1].function
if shortify:
error = short_msg(error)
message = f"({caller}) Can't authenticate to read the accounts."
if 'request timestamp expired' in str(error):
if nonce == 'milliseconds':
message += ' Set NONCE to `seconds` and try again.'
elif nonce == 'seconds':
message += ' Set NONCE to `milliseconds` and try again.'
else:
message += f' Check your API_KEY/API_SECRET/API_UID/API_PASS. Disabling the credentials. The exception: {error}'
log.error(message)
def permission_denied_handler(error, shortify=True):
""" Prints error and gives hints about the cause """
caller = inspect.stack()[1].function
if shortify:
error = short_msg(error)
log.error(f'({caller}) The exchange reports "permission denied": {error} Check the API token permissions')
def generic_error_handler(error, shortify=True):
""" Handler for generic errors """
caller = inspect.stack()[1].function
if shortify:
error = short_msg(error)
log.error(f'({caller}) A generic error occurred: {error}')
def gather_environ(keys=None) -> dict:
"""
Return a dict of environment variables correlating to the keys dict
:param keys: The environ keys to use, each of them correlating to `int`, `list`, `json`, `string` or `bool`.
The format of the values should be key = {'key_type': type, 'default': value, 'mandatory': bool}
:return: A dict of found environ values
"""
environs = {}
for key, key_details in keys.items():
environment_key = os.environ.get(key.upper())
if environment_key:
environs.update({key: environment_key})
if key_details['key_type'] == 'int':
environs[key] = int(environment_key)
if key_details['key_type'] == 'list':
environs[key] = environs[key].split(',')
if key_details['key_type'] == 'json':
try:
environs[key] = json.loads(environment_key)
except (TypeError, json.decoder.JSONDecodeError):
log.warning((
f"{key.upper()} does not contain a valid JSON object."
f" Setting to: {key_details["default"]}."
))
environs[key] = key_details['default']
if key_details['key_type'] == 'bool':
try:
environs[key] = strtobool(environment_key)
except ValueError:
log.warning(f"Invalid value for {key.upper()}. Setting to: {key_details["default"]}.")
environs[key] = key_details['default']
if key_details.get('redact'):
log.debug(f"{key.upper()} set to ***REDACTED***")
else:
log.debug(f"{key.upper()} set to {environs[key]}")
elif key_details['mandatory']:
raise errors.EnvironmentMissing(f'{key.upper()} is mandatory')
else:
environs[key] = key_details['default']
log.debug(f"{key.upper()} is not set. Using default: {key_details["default"]}")
return environs
| #!/usr/bin/env python3
# -*- coding: utf-8 -*-
""" Handles the exchange data and communication """
import logging
import inspect
import time
import os
import json
from distutils.util import strtobool
from . import errors
log = logging.getLogger('crypto-exporter')
def short_msg(msg, chars=75):
""" Truncates the message to {chars} characters and adds three dots at the end """
return (str(msg)[:chars] + '..') if len(str(msg)) > chars else str(msg)
def ddos_protection_handler(error, sleep=1, shortify=True):
""" Prints a warning and sleeps """
caller = inspect.stack()[1].function
if shortify:
error = short_msg(error)
log.warning(f'({caller}) Rate limit has been reached. Sleeping for {sleep}s. The exception: {error}')
time.sleep(sleep) # don't hit the rate limit
def exchange_not_available_handler(error, sleep=10, shortify=True):
""" Prints an error and sleeps """
caller = inspect.stack()[1].function
if shortify:
error = short_msg(error)
log.error(f'({caller}) The exchange API could not be reached. Sleeping for {sleep}s. The error: {error}')
time.sleep(sleep) # don't hit the rate limit
def authentication_error_handler(error, nonce='', shortify=True):
""" Logs hints about the authentication error """
caller = inspect.stack()[1].function
if shortify:
error = short_msg(error)
message = f"({caller}) Can't authenticate to read the accounts."
if 'request timestamp expired' in str(error):
if nonce == 'milliseconds':
message += ' Set NONCE to `seconds` and try again.'
elif nonce == 'seconds':
message += ' Set NONCE to `milliseconds` and try again.'
else:
message += f' Check your API_KEY/API_SECRET/API_UID/API_PASS. Disabling the credentials. The exception: {error}'
log.error(message)
def permission_denied_handler(error, shortify=True):
""" Prints error and gives hints about the cause """
caller = inspect.stack()[1].function
if shortify:
error = short_msg(error)
log.error(f'({caller}) The exchange reports "permission denied": {error} Check the API token permissions')
def generic_error_handler(error, shortify=True):
""" Handler for generic errors """
caller = inspect.stack()[1].function
if shortify:
error = short_msg(error)
log.error(f'({caller}) A generic error occurred: {error}')
def gather_environ(keys=None) -> dict:
"""
Return a dict of environment variables correlating to the keys dict
:param keys: The environ keys to use, each of them correlating to `int`, `list`, `json`, `string` or `bool`.
The format of the values should be key = {'key_type': type, 'default': value, 'mandatory': bool}
:return: A dict of found environ values
"""
environs = {}
for key, key_details in keys.items():
environment_key = os.environ.get(key.upper())
if environment_key:
environs.update({key: environment_key})
if key_details['key_type'] == 'int':
environs[key] = int(environment_key)
if key_details['key_type'] == 'list':
environs[key] = environs[key].split(',')
if key_details['key_type'] == 'json':
try:
environs[key] = json.loads(environment_key)
except (TypeError, json.decoder.JSONDecodeError):
log.warning((
f"{key.upper()} does not contain a valid JSON object."
f" Setting to: {key_details['default']}."
))
environs[key] = key_details['default']
if key_details['key_type'] == 'bool':
try:
environs[key] = strtobool(environment_key)
except ValueError:
log.warning(f"Invalid value for {key.upper()}. Setting to: {key_details['default']}.")
environs[key] = key_details['default']
if key_details.get('redact'):
log.debug(f"{key.upper()} set to ***REDACTED***")
else:
log.debug(f"{key.upper()} set to {environs[key]}")
elif key_details['mandatory']:
raise errors.EnvironmentMissing(f'{key.upper()} is mandatory')
else:
environs[key] = key_details['default']
log.debug(f"{key.upper()} is not set. Using default: {key_details['default']}")
return environs
|
# -*- coding: utf-8 -*-
"""Dimensional reduction and batch correction using Harmony."""
if __name__ == "__main__":
import anndata as ad
import matplotlib.pyplot as plt
import scanpy as sc
import scanpy.external as sce
from helpers.logs.get_logger import get_logger
from helpers.logs.sc_logs import set_sc_log
from helpers.select_pcs import select_pcs
LOG = snakemake.log[0] # noqa: F821
PARAMS = snakemake.params # noqa: F821
INPUT = snakemake.input # noqa: F821
OUTPUT = snakemake.output # noqa: F821
THREADS = snakemake.threads # noqa: F821
logger = get_logger(__name__, LOG)
sc.settings = set_sc_log(sc.settings, logfile=LOG)
sc.settings.n_jobs = THREADS
# Concatenate samples
adata = ad.concat(
[sc.read_h5ad(path) for path in INPUT["data"]],
join="outer",
merge="same",
label=None,
)
adata.obs_names_make_unique()
logger.info(f"Adata read from {INPUT["data"]}")
logger.info(f"Input data: {adata}")
# HVGs
# Before normalisation as seurat_v3 expects raw counts
if not PARAMS["nHVG"]:
nHVG = max(min(len(adata.obs) / 2, 10000), 1000)
logger.info(f"nHVG not provided. Using {nHVG}.")
sc.pp.highly_variable_genes(
adata,
n_top_genes=nHVG,
flavor="seurat_v3",
batch_key="lane",
subset=False,
)
_ = sc.pl.highly_variable_genes(
adata,
log=False,
show=False,
save=False,
)
plt.savefig(OUTPUT["hvg"], dpi=300, bbox_inches="tight")
plt.close()
# Normalise
# Exclude highly expressed to prevent skew of normalisation
sc.pp.normalize_total(adata, exclude_highly_expressed=True)
sc.pp.log1p(adata)
# Save raw and filter
adata.raw = adata
# Regress and scale
# No batch - covered with bbknn
sc.pp.regress_out(adata, ["total_counts", "pct_counts_mt"], n_jobs=None)
sc.pp.scale(adata, max_value=10)
# PCA
sc.tl.pca(adata, n_comps=50, use_highly_variable=True)
_ = sc.pl.pca_variance_ratio(adata, n_pcs=50, show=False, save=False)
plt.savefig(OUTPUT["elbow"], dpi=300, bbox_inches="tight")
plt.close()
# Harmony for batch correction
# As it runs on all pcs include, we must first filter to desired
npc, adata = select_pcs(adata, threshold=PARAMS["var_thresh"])
logger.info(f"{npc} PCs used.")
sce.pp.harmony_integrate(
adata,
key="lane",
adjusted_basis="X_harmony",
max_iter_harmony=50,
)
# And save
adata.write_h5ad(OUTPUT["data"])
| # -*- coding: utf-8 -*-
"""Dimensional reduction and batch correction using Harmony."""
if __name__ == "__main__":
import anndata as ad
import matplotlib.pyplot as plt
import scanpy as sc
import scanpy.external as sce
from helpers.logs.get_logger import get_logger
from helpers.logs.sc_logs import set_sc_log
from helpers.select_pcs import select_pcs
LOG = snakemake.log[0] # noqa: F821
PARAMS = snakemake.params # noqa: F821
INPUT = snakemake.input # noqa: F821
OUTPUT = snakemake.output # noqa: F821
THREADS = snakemake.threads # noqa: F821
logger = get_logger(__name__, LOG)
sc.settings = set_sc_log(sc.settings, logfile=LOG)
sc.settings.n_jobs = THREADS
# Concatenate samples
adata = ad.concat(
[sc.read_h5ad(path) for path in INPUT["data"]],
join="outer",
merge="same",
label=None,
)
adata.obs_names_make_unique()
logger.info(f"Adata read from {INPUT['data']}")
logger.info(f"Input data: {adata}")
# HVGs
# Before normalisation as seurat_v3 expects raw counts
if not PARAMS["nHVG"]:
nHVG = max(min(len(adata.obs) / 2, 10000), 1000)
logger.info(f"nHVG not provided. Using {nHVG}.")
sc.pp.highly_variable_genes(
adata,
n_top_genes=nHVG,
flavor="seurat_v3",
batch_key="lane",
subset=False,
)
_ = sc.pl.highly_variable_genes(
adata,
log=False,
show=False,
save=False,
)
plt.savefig(OUTPUT["hvg"], dpi=300, bbox_inches="tight")
plt.close()
# Normalise
# Exclude highly expressed to prevent skew of normalisation
sc.pp.normalize_total(adata, exclude_highly_expressed=True)
sc.pp.log1p(adata)
# Save raw and filter
adata.raw = adata
# Regress and scale
# No batch - covered with bbknn
sc.pp.regress_out(adata, ["total_counts", "pct_counts_mt"], n_jobs=None)
sc.pp.scale(adata, max_value=10)
# PCA
sc.tl.pca(adata, n_comps=50, use_highly_variable=True)
_ = sc.pl.pca_variance_ratio(adata, n_pcs=50, show=False, save=False)
plt.savefig(OUTPUT["elbow"], dpi=300, bbox_inches="tight")
plt.close()
# Harmony for batch correction
# As it runs on all pcs include, we must first filter to desired
npc, adata = select_pcs(adata, threshold=PARAMS["var_thresh"])
logger.info(f"{npc} PCs used.")
sce.pp.harmony_integrate(
adata,
key="lane",
adjusted_basis="X_harmony",
max_iter_harmony=50,
)
# And save
adata.write_h5ad(OUTPUT["data"])
|
import random
from collections import Counter
from typing import Optional
import discord
from discord.ext import commands
class Plural:
"""Converts a text to plural when used in a f string
Examples
--------
>>> f"{Plural(1):time}"
'1 time'
>>> f"{Plural(5):time}"
'5 times'
>>> f"{Plural(1):match|es}"
'1 match'
>>> f"{Plural(5):match|es}"
'5 matches'
"""
def __init__(self, value):
self.value = value
def __format__(self, format_spec):
v = self.value
singular, sep, plural = format_spec.partition("|")
plural = plural or f"{singular}s"
if abs(v) != 1:
return f"{v} {plural}"
return f"{v} {singular}"
class Random(commands.Cog):
"""Random commands"""
def __init__(self, bot):
self.bot = bot
@commands.command(daliases=["cbo"])
async def choosebestof(self, ctx, times: Optional[int], *choices: commands.clean_content):
"""Chooses between multiple choices N times."""
if len(choices) < 2:
return await ctx.send("Not enough choices to pick from.")
if times is None:
times = (len(choices) ** 2) + 1
# The times can be a minimum of 1 and a maximum of 10000
times = min(10001, max(1, times))
results = Counter(random.choice(choices) for i in range(times))
builder = []
if len(results) > 10:
builder.append("Only showing top 10 results...")
for index, (elem, count) in enumerate(results.most_common(10), start=1):
builder.append(f"{index}. {elem} ({Plural(count):time}, {count / times:.2%})")
await ctx.send("\n".join(builder))
@commands.command(
name="8ball",
aliases=["eightball", "eight ball", "question", "answer", "8b"],
)
async def _8ball(self, ctx, *, question: commands.clean_content):
"""The user asks a yes-no question to the ball, then the bot reveals an answer."""
answers = [
"It is certain",
"It is decidedly so",
"Without a doubt",
"Yes - definitely",
"You may rely on it",
"As I see it, yes",
"Most likely",
"Outlook good",
"Yes Signs point to yes",
"Reply hazy",
"try again",
"Ask again later",
"Better not tell you now",
"Cannot predict now",
"Concentrate and ask again",
"Don't count on it",
"My reply is no",
"My sources say no",
"Outlook not so good",
"Very doubtful",
]
await ctx.send(f"`Question:` {question}\n`Answer:` {random.choice(answers)}")
@commands.command(aliases=["pick", "choice", "ch"])
async def choose(self, ctx, *, choices):
"""Chooses a random item from a list of items."""
# We split it by comma and a comma followed by a space
choices = choices.split(", ").split(",")
# We generate the embed
embed = discord.Embed(
title="Chosen",
description=f"__Choices__: {", ".join(choices)}\n__Chosen__: {random.choice(choices)}"
)
# We send the embed
await ctx.send(embed=embed)
@commands.command(aliases=["rcmd"])
async def randomcommand(self, ctx):
"""Sends a random command for you to try"""
await ctx.send_help(random.choice(list(self.bot.commands)))
def setup(bot):
"""Adds the cog to the bot"""
bot.add_cog(Random(bot))
| import random
from collections import Counter
from typing import Optional
import discord
from discord.ext import commands
class Plural:
"""Converts a text to plural when used in a f string
Examples
--------
>>> f"{Plural(1):time}"
'1 time'
>>> f"{Plural(5):time}"
'5 times'
>>> f"{Plural(1):match|es}"
'1 match'
>>> f"{Plural(5):match|es}"
'5 matches'
"""
def __init__(self, value):
self.value = value
def __format__(self, format_spec):
v = self.value
singular, sep, plural = format_spec.partition("|")
plural = plural or f"{singular}s"
if abs(v) != 1:
return f"{v} {plural}"
return f"{v} {singular}"
class Random(commands.Cog):
"""Random commands"""
def __init__(self, bot):
self.bot = bot
@commands.command(daliases=["cbo"])
async def choosebestof(self, ctx, times: Optional[int], *choices: commands.clean_content):
"""Chooses between multiple choices N times."""
if len(choices) < 2:
return await ctx.send("Not enough choices to pick from.")
if times is None:
times = (len(choices) ** 2) + 1
# The times can be a minimum of 1 and a maximum of 10000
times = min(10001, max(1, times))
results = Counter(random.choice(choices) for i in range(times))
builder = []
if len(results) > 10:
builder.append("Only showing top 10 results...")
for index, (elem, count) in enumerate(results.most_common(10), start=1):
builder.append(f"{index}. {elem} ({Plural(count):time}, {count / times:.2%})")
await ctx.send("\n".join(builder))
@commands.command(
name="8ball",
aliases=["eightball", "eight ball", "question", "answer", "8b"],
)
async def _8ball(self, ctx, *, question: commands.clean_content):
"""The user asks a yes-no question to the ball, then the bot reveals an answer."""
answers = [
"It is certain",
"It is decidedly so",
"Without a doubt",
"Yes - definitely",
"You may rely on it",
"As I see it, yes",
"Most likely",
"Outlook good",
"Yes Signs point to yes",
"Reply hazy",
"try again",
"Ask again later",
"Better not tell you now",
"Cannot predict now",
"Concentrate and ask again",
"Don't count on it",
"My reply is no",
"My sources say no",
"Outlook not so good",
"Very doubtful",
]
await ctx.send(f"`Question:` {question}\n`Answer:` {random.choice(answers)}")
@commands.command(aliases=["pick", "choice", "ch"])
async def choose(self, ctx, *, choices):
"""Chooses a random item from a list of items."""
# We split it by comma and a comma followed by a space
choices = choices.split(", ").split(",")
# We generate the embed
embed = discord.Embed(
title="Chosen",
description=f"__Choices__: {', '.join(choices)}\n__Chosen__: {random.choice(choices)}"
)
# We send the embed
await ctx.send(embed=embed)
@commands.command(aliases=["rcmd"])
async def randomcommand(self, ctx):
"""Sends a random command for you to try"""
await ctx.send_help(random.choice(list(self.bot.commands)))
def setup(bot):
"""Adds the cog to the bot"""
bot.add_cog(Random(bot))
|
"""Reads vehicle status from BMW connected drive portal."""
from __future__ import annotations
import logging
from bimmer_connected.account import ConnectedDriveAccount
from bimmer_connected.country_selector import get_region_from_name
import voluptuous as vol
from homeassistant.components.notify import DOMAIN as NOTIFY_DOMAIN
from homeassistant.config_entries import SOURCE_IMPORT, ConfigEntry
from homeassistant.const import (
ATTR_ATTRIBUTION,
CONF_NAME,
CONF_PASSWORD,
CONF_REGION,
CONF_USERNAME,
)
from homeassistant.core import HomeAssistant, callback
from homeassistant.exceptions import ConfigEntryNotReady
from homeassistant.helpers import discovery
import homeassistant.helpers.config_validation as cv
from homeassistant.helpers.entity import DeviceInfo, Entity
from homeassistant.helpers.event import track_utc_time_change
from homeassistant.util import slugify
import homeassistant.util.dt as dt_util
from .const import (
ATTRIBUTION,
CONF_ACCOUNT,
CONF_ALLOWED_REGIONS,
CONF_READ_ONLY,
CONF_USE_LOCATION,
DATA_ENTRIES,
DATA_HASS_CONFIG,
)
_LOGGER = logging.getLogger(__name__)
DOMAIN = "bmw_connected_drive"
ATTR_VIN = "vin"
ACCOUNT_SCHEMA = vol.Schema(
{
vol.Required(CONF_USERNAME): cv.string,
vol.Required(CONF_PASSWORD): cv.string,
vol.Required(CONF_REGION): vol.In(CONF_ALLOWED_REGIONS),
vol.Optional(CONF_READ_ONLY): cv.boolean,
}
)
CONFIG_SCHEMA = vol.Schema({DOMAIN: {cv.string: ACCOUNT_SCHEMA}}, extra=vol.ALLOW_EXTRA)
SERVICE_SCHEMA = vol.Schema({vol.Required(ATTR_VIN): cv.string})
DEFAULT_OPTIONS = {
CONF_READ_ONLY: False,
CONF_USE_LOCATION: False,
}
PLATFORMS = ["binary_sensor", "device_tracker", "lock", "notify", "sensor"]
UPDATE_INTERVAL = 5 # in minutes
SERVICE_UPDATE_STATE = "update_state"
_SERVICE_MAP = {
"light_flash": "trigger_remote_light_flash",
"sound_horn": "trigger_remote_horn",
"activate_air_conditioning": "trigger_remote_air_conditioning",
"find_vehicle": "trigger_remote_vehicle_finder",
}
UNDO_UPDATE_LISTENER = "undo_update_listener"
async def async_setup(hass: HomeAssistant, config: dict):
"""Set up the BMW Connected Drive component from configuration.yaml."""
hass.data.setdefault(DOMAIN, {})
hass.data[DOMAIN][DATA_HASS_CONFIG] = config
if DOMAIN in config:
for entry_config in config[DOMAIN].values():
hass.async_create_task(
hass.config_entries.flow.async_init(
DOMAIN, context={"source": SOURCE_IMPORT}, data=entry_config
)
)
return True
@callback
def _async_migrate_options_from_data_if_missing(hass, entry):
data = dict(entry.data)
options = dict(entry.options)
if CONF_READ_ONLY in data or list(options) != list(DEFAULT_OPTIONS):
options = dict(DEFAULT_OPTIONS, **options)
options[CONF_READ_ONLY] = data.pop(CONF_READ_ONLY, False)
hass.config_entries.async_update_entry(entry, data=data, options=options)
async def async_setup_entry(hass: HomeAssistant, entry: ConfigEntry) -> bool:
"""Set up BMW Connected Drive from a config entry."""
hass.data.setdefault(DOMAIN, {})
hass.data[DOMAIN].setdefault(DATA_ENTRIES, {})
_async_migrate_options_from_data_if_missing(hass, entry)
try:
account = await hass.async_add_executor_job(
setup_account, entry, hass, entry.data[CONF_USERNAME]
)
except OSError as ex:
raise ConfigEntryNotReady from ex
async def _async_update_all(service_call=None):
"""Update all BMW accounts."""
await hass.async_add_executor_job(_update_all)
def _update_all() -> None:
"""Update all BMW accounts."""
for entry in hass.data[DOMAIN][DATA_ENTRIES].copy().values():
entry[CONF_ACCOUNT].update()
# Add update listener for config entry changes (options)
undo_listener = entry.add_update_listener(update_listener)
hass.data[DOMAIN][DATA_ENTRIES][entry.entry_id] = {
CONF_ACCOUNT: account,
UNDO_UPDATE_LISTENER: undo_listener,
}
# Service to manually trigger updates for all accounts.
hass.services.async_register(DOMAIN, SERVICE_UPDATE_STATE, _async_update_all)
await _async_update_all()
hass.config_entries.async_setup_platforms(
entry, [platform for platform in PLATFORMS if platform != NOTIFY_DOMAIN]
)
# set up notify platform, no entry support for notify platform yet,
# have to use discovery to load platform.
hass.async_create_task(
discovery.async_load_platform(
hass,
NOTIFY_DOMAIN,
DOMAIN,
{CONF_NAME: DOMAIN},
hass.data[DOMAIN][DATA_HASS_CONFIG],
)
)
return True
async def async_unload_entry(hass: HomeAssistant, entry: ConfigEntry):
"""Unload a config entry."""
unload_ok = await hass.config_entries.async_unload_platforms(
entry, [platform for platform in PLATFORMS if platform != NOTIFY_DOMAIN]
)
# Only remove services if it is the last account and not read only
if (
len(hass.data[DOMAIN][DATA_ENTRIES]) == 1
and not hass.data[DOMAIN][DATA_ENTRIES][entry.entry_id][CONF_ACCOUNT].read_only
):
services = list(_SERVICE_MAP) + [SERVICE_UPDATE_STATE]
for service in services:
hass.services.async_remove(DOMAIN, service)
for vehicle in hass.data[DOMAIN][DATA_ENTRIES][entry.entry_id][
CONF_ACCOUNT
].account.vehicles:
hass.services.async_remove(NOTIFY_DOMAIN, slugify(f"{DOMAIN}_{vehicle.name}"))
if unload_ok:
hass.data[DOMAIN][DATA_ENTRIES][entry.entry_id][UNDO_UPDATE_LISTENER]()
hass.data[DOMAIN][DATA_ENTRIES].pop(entry.entry_id)
return unload_ok
async def update_listener(hass, config_entry):
"""Handle options update."""
await hass.config_entries.async_reload(config_entry.entry_id)
def setup_account(entry: ConfigEntry, hass, name: str) -> BMWConnectedDriveAccount:
"""Set up a new BMWConnectedDriveAccount based on the config."""
username = entry.data[CONF_USERNAME]
password = entry.data[CONF_PASSWORD]
region = entry.data[CONF_REGION]
read_only = entry.options[CONF_READ_ONLY]
use_location = entry.options[CONF_USE_LOCATION]
_LOGGER.debug("Adding new account %s", name)
pos = (
(hass.config.latitude, hass.config.longitude) if use_location else (None, None)
)
cd_account = BMWConnectedDriveAccount(
username, password, region, name, read_only, *pos
)
def execute_service(call):
"""Execute a service for a vehicle."""
vin = call.data[ATTR_VIN]
vehicle = None
# Double check for read_only accounts as another account could create the services
for entry_data in [
e
for e in hass.data[DOMAIN][DATA_ENTRIES].values()
if not e[CONF_ACCOUNT].read_only
]:
vehicle = entry_data[CONF_ACCOUNT].account.get_vehicle(vin)
if vehicle:
break
if not vehicle:
_LOGGER.error("Could not find a vehicle for VIN %s", vin)
return
function_name = _SERVICE_MAP[call.service]
function_call = getattr(vehicle.remote_services, function_name)
function_call()
if not read_only:
# register the remote services
for service in _SERVICE_MAP:
hass.services.register(
DOMAIN, service, execute_service, schema=SERVICE_SCHEMA
)
# update every UPDATE_INTERVAL minutes, starting now
# this should even out the load on the servers
now = dt_util.utcnow()
track_utc_time_change(
hass,
cd_account.update,
minute=range(now.minute % UPDATE_INTERVAL, 60, UPDATE_INTERVAL),
second=now.second,
)
# Initialize
cd_account.update()
return cd_account
class BMWConnectedDriveAccount:
"""Representation of a BMW vehicle."""
def __init__(
self,
username: str,
password: str,
region_str: str,
name: str,
read_only: bool,
lat=None,
lon=None,
) -> None:
"""Initialize account."""
region = get_region_from_name(region_str)
self.read_only = read_only
self.account = ConnectedDriveAccount(username, password, region)
self.name = name
self._update_listeners = []
# Set observer position once for older cars to be in range for
# GPS position (pre-7/2014, <2km) and get new data from API
if lat and lon:
self.account.set_observer_position(lat, lon)
self.account.update_vehicle_states()
def update(self, *_):
"""Update the state of all vehicles.
Notify all listeners about the update.
"""
_LOGGER.debug(
"Updating vehicle state for account %s, notifying %d listeners",
self.name,
len(self._update_listeners),
)
try:
self.account.update_vehicle_states()
for listener in self._update_listeners:
listener()
except OSError as exception:
_LOGGER.error(
"Could not connect to the BMW Connected Drive portal. "
"The vehicle state could not be updated"
)
_LOGGER.exception(exception)
def add_update_listener(self, listener):
"""Add a listener for update notifications."""
self._update_listeners.append(listener)
class BMWConnectedDriveBaseEntity(Entity):
"""Common base for BMW entities."""
def __init__(self, account, vehicle):
"""Initialize sensor."""
self._account = account
self._vehicle = vehicle
self._attrs = {
"car": self._vehicle.name,
"vin": self._vehicle.vin,
ATTR_ATTRIBUTION: ATTRIBUTION,
}
@property
def device_info(self) -> DeviceInfo:
"""Return info for device registry."""
return {
"identifiers": {(DOMAIN, self._vehicle.vin)},
"name": f'{self._vehicle.attributes.get('brand')} {self._vehicle.name}',
"model": self._vehicle.name,
"manufacturer": self._vehicle.attributes.get("brand"),
}
@property
def extra_state_attributes(self):
"""Return the state attributes of the sensor."""
return self._attrs
@property
def should_poll(self):
"""Do not poll this class.
Updates are triggered from BMWConnectedDriveAccount.
"""
return False
def update_callback(self):
"""Schedule a state update."""
self.schedule_update_ha_state(True)
async def async_added_to_hass(self):
"""Add callback after being added to hass.
Show latest data after startup.
"""
self._account.add_update_listener(self.update_callback)
| """Reads vehicle status from BMW connected drive portal."""
from __future__ import annotations
import logging
from bimmer_connected.account import ConnectedDriveAccount
from bimmer_connected.country_selector import get_region_from_name
import voluptuous as vol
from homeassistant.components.notify import DOMAIN as NOTIFY_DOMAIN
from homeassistant.config_entries import SOURCE_IMPORT, ConfigEntry
from homeassistant.const import (
ATTR_ATTRIBUTION,
CONF_NAME,
CONF_PASSWORD,
CONF_REGION,
CONF_USERNAME,
)
from homeassistant.core import HomeAssistant, callback
from homeassistant.exceptions import ConfigEntryNotReady
from homeassistant.helpers import discovery
import homeassistant.helpers.config_validation as cv
from homeassistant.helpers.entity import DeviceInfo, Entity
from homeassistant.helpers.event import track_utc_time_change
from homeassistant.util import slugify
import homeassistant.util.dt as dt_util
from .const import (
ATTRIBUTION,
CONF_ACCOUNT,
CONF_ALLOWED_REGIONS,
CONF_READ_ONLY,
CONF_USE_LOCATION,
DATA_ENTRIES,
DATA_HASS_CONFIG,
)
_LOGGER = logging.getLogger(__name__)
DOMAIN = "bmw_connected_drive"
ATTR_VIN = "vin"
ACCOUNT_SCHEMA = vol.Schema(
{
vol.Required(CONF_USERNAME): cv.string,
vol.Required(CONF_PASSWORD): cv.string,
vol.Required(CONF_REGION): vol.In(CONF_ALLOWED_REGIONS),
vol.Optional(CONF_READ_ONLY): cv.boolean,
}
)
CONFIG_SCHEMA = vol.Schema({DOMAIN: {cv.string: ACCOUNT_SCHEMA}}, extra=vol.ALLOW_EXTRA)
SERVICE_SCHEMA = vol.Schema({vol.Required(ATTR_VIN): cv.string})
DEFAULT_OPTIONS = {
CONF_READ_ONLY: False,
CONF_USE_LOCATION: False,
}
PLATFORMS = ["binary_sensor", "device_tracker", "lock", "notify", "sensor"]
UPDATE_INTERVAL = 5 # in minutes
SERVICE_UPDATE_STATE = "update_state"
_SERVICE_MAP = {
"light_flash": "trigger_remote_light_flash",
"sound_horn": "trigger_remote_horn",
"activate_air_conditioning": "trigger_remote_air_conditioning",
"find_vehicle": "trigger_remote_vehicle_finder",
}
UNDO_UPDATE_LISTENER = "undo_update_listener"
async def async_setup(hass: HomeAssistant, config: dict):
"""Set up the BMW Connected Drive component from configuration.yaml."""
hass.data.setdefault(DOMAIN, {})
hass.data[DOMAIN][DATA_HASS_CONFIG] = config
if DOMAIN in config:
for entry_config in config[DOMAIN].values():
hass.async_create_task(
hass.config_entries.flow.async_init(
DOMAIN, context={"source": SOURCE_IMPORT}, data=entry_config
)
)
return True
@callback
def _async_migrate_options_from_data_if_missing(hass, entry):
data = dict(entry.data)
options = dict(entry.options)
if CONF_READ_ONLY in data or list(options) != list(DEFAULT_OPTIONS):
options = dict(DEFAULT_OPTIONS, **options)
options[CONF_READ_ONLY] = data.pop(CONF_READ_ONLY, False)
hass.config_entries.async_update_entry(entry, data=data, options=options)
async def async_setup_entry(hass: HomeAssistant, entry: ConfigEntry) -> bool:
"""Set up BMW Connected Drive from a config entry."""
hass.data.setdefault(DOMAIN, {})
hass.data[DOMAIN].setdefault(DATA_ENTRIES, {})
_async_migrate_options_from_data_if_missing(hass, entry)
try:
account = await hass.async_add_executor_job(
setup_account, entry, hass, entry.data[CONF_USERNAME]
)
except OSError as ex:
raise ConfigEntryNotReady from ex
async def _async_update_all(service_call=None):
"""Update all BMW accounts."""
await hass.async_add_executor_job(_update_all)
def _update_all() -> None:
"""Update all BMW accounts."""
for entry in hass.data[DOMAIN][DATA_ENTRIES].copy().values():
entry[CONF_ACCOUNT].update()
# Add update listener for config entry changes (options)
undo_listener = entry.add_update_listener(update_listener)
hass.data[DOMAIN][DATA_ENTRIES][entry.entry_id] = {
CONF_ACCOUNT: account,
UNDO_UPDATE_LISTENER: undo_listener,
}
# Service to manually trigger updates for all accounts.
hass.services.async_register(DOMAIN, SERVICE_UPDATE_STATE, _async_update_all)
await _async_update_all()
hass.config_entries.async_setup_platforms(
entry, [platform for platform in PLATFORMS if platform != NOTIFY_DOMAIN]
)
# set up notify platform, no entry support for notify platform yet,
# have to use discovery to load platform.
hass.async_create_task(
discovery.async_load_platform(
hass,
NOTIFY_DOMAIN,
DOMAIN,
{CONF_NAME: DOMAIN},
hass.data[DOMAIN][DATA_HASS_CONFIG],
)
)
return True
async def async_unload_entry(hass: HomeAssistant, entry: ConfigEntry):
"""Unload a config entry."""
unload_ok = await hass.config_entries.async_unload_platforms(
entry, [platform for platform in PLATFORMS if platform != NOTIFY_DOMAIN]
)
# Only remove services if it is the last account and not read only
if (
len(hass.data[DOMAIN][DATA_ENTRIES]) == 1
and not hass.data[DOMAIN][DATA_ENTRIES][entry.entry_id][CONF_ACCOUNT].read_only
):
services = list(_SERVICE_MAP) + [SERVICE_UPDATE_STATE]
for service in services:
hass.services.async_remove(DOMAIN, service)
for vehicle in hass.data[DOMAIN][DATA_ENTRIES][entry.entry_id][
CONF_ACCOUNT
].account.vehicles:
hass.services.async_remove(NOTIFY_DOMAIN, slugify(f"{DOMAIN}_{vehicle.name}"))
if unload_ok:
hass.data[DOMAIN][DATA_ENTRIES][entry.entry_id][UNDO_UPDATE_LISTENER]()
hass.data[DOMAIN][DATA_ENTRIES].pop(entry.entry_id)
return unload_ok
async def update_listener(hass, config_entry):
"""Handle options update."""
await hass.config_entries.async_reload(config_entry.entry_id)
def setup_account(entry: ConfigEntry, hass, name: str) -> BMWConnectedDriveAccount:
"""Set up a new BMWConnectedDriveAccount based on the config."""
username = entry.data[CONF_USERNAME]
password = entry.data[CONF_PASSWORD]
region = entry.data[CONF_REGION]
read_only = entry.options[CONF_READ_ONLY]
use_location = entry.options[CONF_USE_LOCATION]
_LOGGER.debug("Adding new account %s", name)
pos = (
(hass.config.latitude, hass.config.longitude) if use_location else (None, None)
)
cd_account = BMWConnectedDriveAccount(
username, password, region, name, read_only, *pos
)
def execute_service(call):
"""Execute a service for a vehicle."""
vin = call.data[ATTR_VIN]
vehicle = None
# Double check for read_only accounts as another account could create the services
for entry_data in [
e
for e in hass.data[DOMAIN][DATA_ENTRIES].values()
if not e[CONF_ACCOUNT].read_only
]:
vehicle = entry_data[CONF_ACCOUNT].account.get_vehicle(vin)
if vehicle:
break
if not vehicle:
_LOGGER.error("Could not find a vehicle for VIN %s", vin)
return
function_name = _SERVICE_MAP[call.service]
function_call = getattr(vehicle.remote_services, function_name)
function_call()
if not read_only:
# register the remote services
for service in _SERVICE_MAP:
hass.services.register(
DOMAIN, service, execute_service, schema=SERVICE_SCHEMA
)
# update every UPDATE_INTERVAL minutes, starting now
# this should even out the load on the servers
now = dt_util.utcnow()
track_utc_time_change(
hass,
cd_account.update,
minute=range(now.minute % UPDATE_INTERVAL, 60, UPDATE_INTERVAL),
second=now.second,
)
# Initialize
cd_account.update()
return cd_account
class BMWConnectedDriveAccount:
"""Representation of a BMW vehicle."""
def __init__(
self,
username: str,
password: str,
region_str: str,
name: str,
read_only: bool,
lat=None,
lon=None,
) -> None:
"""Initialize account."""
region = get_region_from_name(region_str)
self.read_only = read_only
self.account = ConnectedDriveAccount(username, password, region)
self.name = name
self._update_listeners = []
# Set observer position once for older cars to be in range for
# GPS position (pre-7/2014, <2km) and get new data from API
if lat and lon:
self.account.set_observer_position(lat, lon)
self.account.update_vehicle_states()
def update(self, *_):
"""Update the state of all vehicles.
Notify all listeners about the update.
"""
_LOGGER.debug(
"Updating vehicle state for account %s, notifying %d listeners",
self.name,
len(self._update_listeners),
)
try:
self.account.update_vehicle_states()
for listener in self._update_listeners:
listener()
except OSError as exception:
_LOGGER.error(
"Could not connect to the BMW Connected Drive portal. "
"The vehicle state could not be updated"
)
_LOGGER.exception(exception)
def add_update_listener(self, listener):
"""Add a listener for update notifications."""
self._update_listeners.append(listener)
class BMWConnectedDriveBaseEntity(Entity):
"""Common base for BMW entities."""
def __init__(self, account, vehicle):
"""Initialize sensor."""
self._account = account
self._vehicle = vehicle
self._attrs = {
"car": self._vehicle.name,
"vin": self._vehicle.vin,
ATTR_ATTRIBUTION: ATTRIBUTION,
}
@property
def device_info(self) -> DeviceInfo:
"""Return info for device registry."""
return {
"identifiers": {(DOMAIN, self._vehicle.vin)},
"name": f'{self._vehicle.attributes.get("brand")} {self._vehicle.name}',
"model": self._vehicle.name,
"manufacturer": self._vehicle.attributes.get("brand"),
}
@property
def extra_state_attributes(self):
"""Return the state attributes of the sensor."""
return self._attrs
@property
def should_poll(self):
"""Do not poll this class.
Updates are triggered from BMWConnectedDriveAccount.
"""
return False
def update_callback(self):
"""Schedule a state update."""
self.schedule_update_ha_state(True)
async def async_added_to_hass(self):
"""Add callback after being added to hass.
Show latest data after startup.
"""
self._account.add_update_listener(self.update_callback)
|
import functools
import logging.config
import multiprocessing
import os
from datetime import date
from datetime import datetime as dt
from pathlib import Path
from typing import List, Mapping, Optional, Tuple
from cdsapi import Client
from miranda.scripting import LOGGING_CONFIG
logging.config.dictConfig(LOGGING_CONFIG)
__all__ = ["request_era5"]
def request_era5(
variables: Optional[Mapping[str, str]] = None,
projects: List[str] = None,
domain: str = "AMNO",
year_start: int = 1981,
year_end: Optional[int] = None,
processes: int = 4,
) -> None:
"""Request ERA5/ERA5-Land from Copernicus Data Store in NetCDF4 format.
Parameters
----------
variables: Mapping[str, str], optional
projects : List[{"era5", "era5-land"}]
domain : {"GLOBAL", "AMNO", "CAN", "QC"}
year_start : int
year_end : int, optional
processes : int
Returns
-------
None
"""
# Variables of interest
variable_reference = dict(
pr="total_precipitation",
vas="10m_v_component_of_wind",
uas="10m_u_component_of_wind",
td="2m_dewpoint_temperature",
tas="2m_temperature",
potevap="potential evaporation",
snd="snow_depth",
prsn="snowfall",
)
v_requested = dict()
if variables:
for v in variables:
if v in variable_reference:
v_requested[v] = variable_reference[v]
else:
v_requested = variable_reference
if year_end is None:
year_end = date.today().year
years = range(year_start, year_end)
months = [str(d).zfill(2) for d in range(13)]
yearmonth = list()
for y in years:
for m in months:
yearmonth.append((y, m))
project_names = list()
if "era5" in projects:
project_names.append("reanalysis-era5-single-levels")
if "era5-land" in projects:
project_names.append("reanalysis-era5-land")
product = project_names[0].split("-")[0]
target = Path().cwd().joinpath("downloaded")
Path(target).mkdir(exist_ok=True)
os.chdir(target)
for p in projects:
proc = multiprocessing.Pool(processes=processes)
func = functools.partial(_request_direct_era, v_requested, p, domain, product)
logging.info([func, dt.now().strftime("%Y-%m-%d %X")])
proc.map(func, yearmonth)
proc.close()
proc.join()
def _request_direct_era(
variables: Mapping[str, str],
project: str,
domain: str,
product: str,
yearmonth: Tuple[int, str],
):
"""Launch formatted request."""
year, month = yearmonth
days = [str(d).zfill(2) for d in range(32)]
times = ["{}:00".format(str(t).zfill(2)) for t in range(24)]
if domain.upper() == "GLOBAL":
region = "90/-180/-90/180"
elif domain.upper() == "AMNO":
region = "90/-180/10/-10"
elif domain.upper() == "CAN":
region = "83.5/-141/41.5/-52.5"
elif domain.upper() == "QC":
region = "63/-80/44.5/-57"
else:
raise ValueError()
c = Client()
for var in variables.keys():
netcdf_name = f"{var}_{"-".join(project.split("-")[1:])}_{product}_hourly_{year}{month}_NAM.nc"
if Path(netcdf_name).exists():
continue
request_kwargs = dict(
variable=variables[var],
year=year,
month=month,
day=days,
time=times,
area=region,
format="netcdf",
)
if project == "reanalysis-era5-single-levels":
request_kwargs.update(dict(product_type=product))
c.retrieve(
project,
request_kwargs,
netcdf_name,
)
| import functools
import logging.config
import multiprocessing
import os
from datetime import date
from datetime import datetime as dt
from pathlib import Path
from typing import List, Mapping, Optional, Tuple
from cdsapi import Client
from miranda.scripting import LOGGING_CONFIG
logging.config.dictConfig(LOGGING_CONFIG)
__all__ = ["request_era5"]
def request_era5(
variables: Optional[Mapping[str, str]] = None,
projects: List[str] = None,
domain: str = "AMNO",
year_start: int = 1981,
year_end: Optional[int] = None,
processes: int = 4,
) -> None:
"""Request ERA5/ERA5-Land from Copernicus Data Store in NetCDF4 format.
Parameters
----------
variables: Mapping[str, str], optional
projects : List[{"era5", "era5-land"}]
domain : {"GLOBAL", "AMNO", "CAN", "QC"}
year_start : int
year_end : int, optional
processes : int
Returns
-------
None
"""
# Variables of interest
variable_reference = dict(
pr="total_precipitation",
vas="10m_v_component_of_wind",
uas="10m_u_component_of_wind",
td="2m_dewpoint_temperature",
tas="2m_temperature",
potevap="potential evaporation",
snd="snow_depth",
prsn="snowfall",
)
v_requested = dict()
if variables:
for v in variables:
if v in variable_reference:
v_requested[v] = variable_reference[v]
else:
v_requested = variable_reference
if year_end is None:
year_end = date.today().year
years = range(year_start, year_end)
months = [str(d).zfill(2) for d in range(13)]
yearmonth = list()
for y in years:
for m in months:
yearmonth.append((y, m))
project_names = list()
if "era5" in projects:
project_names.append("reanalysis-era5-single-levels")
if "era5-land" in projects:
project_names.append("reanalysis-era5-land")
product = project_names[0].split("-")[0]
target = Path().cwd().joinpath("downloaded")
Path(target).mkdir(exist_ok=True)
os.chdir(target)
for p in projects:
proc = multiprocessing.Pool(processes=processes)
func = functools.partial(_request_direct_era, v_requested, p, domain, product)
logging.info([func, dt.now().strftime("%Y-%m-%d %X")])
proc.map(func, yearmonth)
proc.close()
proc.join()
def _request_direct_era(
variables: Mapping[str, str],
project: str,
domain: str,
product: str,
yearmonth: Tuple[int, str],
):
"""Launch formatted request."""
year, month = yearmonth
days = [str(d).zfill(2) for d in range(32)]
times = ["{}:00".format(str(t).zfill(2)) for t in range(24)]
if domain.upper() == "GLOBAL":
region = "90/-180/-90/180"
elif domain.upper() == "AMNO":
region = "90/-180/10/-10"
elif domain.upper() == "CAN":
region = "83.5/-141/41.5/-52.5"
elif domain.upper() == "QC":
region = "63/-80/44.5/-57"
else:
raise ValueError()
c = Client()
for var in variables.keys():
netcdf_name = f"{var}_{'-'.join(project.split('-')[1:])}_{product}_hourly_{year}{month}_NAM.nc"
if Path(netcdf_name).exists():
continue
request_kwargs = dict(
variable=variables[var],
year=year,
month=month,
day=days,
time=times,
area=region,
format="netcdf",
)
if project == "reanalysis-era5-single-levels":
request_kwargs.update(dict(product_type=product))
c.retrieve(
project,
request_kwargs,
netcdf_name,
)
|
students = []
filepath = "./datastore/students.txt"
def print_students_titlecase():
for student in students:
stud = {"roll_number": student['roll_number'],
"name": student['name'], "student_age": student['student_age']}
print(stud)
def save_file(student):
try:
f = open(filepath, "a")
f.write(
f'{student['roll_number']} {student['name']} {student['student_age']}\n')
f.close()
student = {"roll_number": student["roll_number"],
"name": student["name"], "student_age": student["student_age"]}
students.append(student)
except Exception as error:
print("Could not save file ", error)
def read_file():
try:
f = open(filepath, "r")
for student in f.readlines():
values = student.split()
# add_student(values[0], values[1], values[2])
f.close()
except Exception:
print("Could not read file")
# -------------------------------------------
read_file()
print_students_titlecase()
roll_number = input("Enter student ID: ")
student_name = input("Enter student name: ")
student_age = input("Enter student age: ")
# student = add_student(student_name, roll_number, student_age)
student = {"roll_number": roll_number,
"name": student_name, "student_age": student_age}
save_file(student)
| students = []
filepath = "./datastore/students.txt"
def print_students_titlecase():
for student in students:
stud = {"roll_number": student['roll_number'],
"name": student['name'], "student_age": student['student_age']}
print(stud)
def save_file(student):
try:
f = open(filepath, "a")
f.write(
f'{student["roll_number"]} {student["name"]} {student["student_age"]}\n')
f.close()
student = {"roll_number": student["roll_number"],
"name": student["name"], "student_age": student["student_age"]}
students.append(student)
except Exception as error:
print("Could not save file ", error)
def read_file():
try:
f = open(filepath, "r")
for student in f.readlines():
values = student.split()
# add_student(values[0], values[1], values[2])
f.close()
except Exception:
print("Could not read file")
# -------------------------------------------
read_file()
print_students_titlecase()
roll_number = input("Enter student ID: ")
student_name = input("Enter student name: ")
student_age = input("Enter student age: ")
# student = add_student(student_name, roll_number, student_age)
student = {"roll_number": roll_number,
"name": student_name, "student_age": student_age}
save_file(student)
|
import functools
import json
import logging
import os
import re
import socket
import ssl
import threading
from asyncio.selector_events import BaseSelectorEventLoop
from typing import Dict, List, Match, Optional, Union
from urllib.parse import parse_qs, unquote, urlencode, urlparse
import requests
from flask_cors import CORS
from flask_cors.core import (
ACL_ALLOW_HEADERS,
ACL_EXPOSE_HEADERS,
ACL_METHODS,
ACL_ORIGIN,
ACL_REQUEST_HEADERS,
)
from requests.models import Request, Response
from werkzeug.exceptions import HTTPException
from localstack import config
from localstack.config import (
EXTRA_CORS_ALLOWED_HEADERS,
EXTRA_CORS_ALLOWED_ORIGINS,
EXTRA_CORS_EXPOSE_HEADERS,
)
from localstack.constants import (
APPLICATION_JSON,
AWS_REGION_US_EAST_1,
BIND_HOST,
HEADER_LOCALSTACK_REQUEST_URL,
)
from localstack.services.messages import Headers, MessagePayload
from localstack.services.messages import Request as RoutingRequest
from localstack.services.messages import Response as RoutingResponse
from localstack.utils.aws import aws_stack
from localstack.utils.aws.aws_responses import LambdaResponse
from localstack.utils.aws.aws_stack import is_internal_call_context
from localstack.utils.aws.request_context import RequestContextManager, get_proxy_request_for_thread
from localstack.utils.crypto import generate_ssl_cert
from localstack.utils.functions import empty_context_manager
from localstack.utils.json import json_safe
from localstack.utils.net import wait_for_port_open
from localstack.utils.server import http2_server
from localstack.utils.serving import Server
from localstack.utils.strings import to_bytes, to_str
from localstack.utils.threads import start_thread
from localstack.utils.urls import path_from_url
# set up logger
LOG = logging.getLogger(__name__)
# path for test certificate
SERVER_CERT_PEM_FILE = "server.test.pem"
# CORS constants below
CORS_ALLOWED_HEADERS = [
"authorization",
"cache-control",
"content-length",
"content-md5",
"content-type",
"etag",
"location",
"x-amz-acl",
"x-amz-content-sha256",
"x-amz-date",
"x-amz-request-id",
"x-amz-security-token",
"x-amz-tagging",
"x-amz-target",
"x-amz-user-agent",
"x-amz-version-id",
"x-amzn-requestid",
"x-localstack-target",
# for AWS SDK v3
"amz-sdk-invocation-id",
"amz-sdk-request",
]
if EXTRA_CORS_ALLOWED_HEADERS:
CORS_ALLOWED_HEADERS += EXTRA_CORS_ALLOWED_HEADERS.split(",")
CORS_ALLOWED_METHODS = ("HEAD", "GET", "PUT", "POST", "DELETE", "OPTIONS", "PATCH")
CORS_EXPOSE_HEADERS = (
"etag",
"x-amz-version-id",
)
if EXTRA_CORS_EXPOSE_HEADERS:
CORS_EXPOSE_HEADERS += tuple(EXTRA_CORS_EXPOSE_HEADERS.split(","))
ALLOWED_CORS_RESPONSE_HEADERS = [
"Access-Control-Allow-Origin",
"Access-Control-Allow-Methods",
"Access-Control-Allow-Headers",
"Access-Control-Max-Age",
"Access-Control-Allow-Credentials",
"Access-Control-Expose-Headers",
]
ALLOWED_CORS_ORIGINS = [
"https://app.localstack.cloud",
"http://app.localstack.cloud",
f"https://localhost:{config.EDGE_PORT}",
f"http://localhost:{config.EDGE_PORT}",
f"https://localhost.localstack.cloud:{config.EDGE_PORT}",
f"http://localhost.localstack.cloud:{config.EDGE_PORT}",
"https://localhost",
"https://localhost.localstack.cloud",
]
if EXTRA_CORS_ALLOWED_ORIGINS:
ALLOWED_CORS_ORIGINS += EXTRA_CORS_ALLOWED_ORIGINS.split(",")
class ProxyListener(object):
# List of `ProxyListener` instances that are enabled by default for all requests.
# For inbound flows, the default listeners are applied *before* forwarding requests
# to the backend; for outbound flows, the default listeners are applied *after* the
# response has been received from the backend service.
DEFAULT_LISTENERS = []
def forward_request(
self, method: str, path: str, data: MessagePayload, headers: Headers
) -> Union[int, Response, Request, dict, bool]:
"""This interceptor method is called by the proxy when receiving a new request
(*before* forwarding the request to the backend service). It receives details
of the incoming request, and returns either of the following results:
* True if the request should be forwarded to the backend service as-is (default).
* An integer (e.g., 200) status code to return directly to the client without
calling the backend service.
* An instance of requests.models.Response to return directly to the client without
calling the backend service.
* An instance of requests.models.Request which represents a new/modified request
that will be forwarded to the backend service.
* Any other value, in which case a 503 Bad Gateway is returned to the client
without calling the backend service.
"""
return True
def return_response(
self,
method: str,
path: str,
data: MessagePayload,
headers: Headers,
response: Response,
) -> Optional[Response]:
"""This interceptor method is called by the proxy when returning a response
(*after* having forwarded the request and received a response from the backend
service). It receives details of the incoming request as well as the response
from the backend service, and returns either of the following results:
* An instance of requests.models.Response to return to the client instead of the
actual response returned from the backend service.
* Any other value, in which case the response from the backend service is
returned to the client.
"""
return None
def get_forward_url(self, method: str, path: str, data, headers):
"""Return a custom URL to forward the given request to. If a falsy value is returned,
then the default URL will be used.
"""
return None
class MessageModifyingProxyListener(ProxyListener):
# Special handler that can be used to modify an inbound/outbound message
# and forward it to the next handler in the chain (instead of forwarding
# to the backend directly, which is the default for regular ProxyListeners)
# TODO: to be replaced with listener chain in ASF Gateway, once integrated
def forward_request(
self, method: str, path: str, data: MessagePayload, headers: Headers
) -> Optional[RoutingRequest]:
"""Return a RoutingRequest with modified request data, or None to forward the request
unmodified"""
return None
def return_response(
self,
method: str,
path: str,
data: MessagePayload,
headers: Headers,
response: Response,
) -> Optional[RoutingResponse]:
"""Return a RoutingResponse with modified response data, or None to forward the response
unmodified"""
return None
class ArnPartitionRewriteListener(MessageModifyingProxyListener):
"""
Intercepts requests and responses and tries to adjust the partitions in ARNs within the
intercepted requests.
For incoming requests, the default partition is set ("aws").
For outgoing responses, the partition is adjusted based on the region in the ARN, or by the
default region
if the ARN does not contain a region.
This listener is used to support other partitions than the default "aws" partition (f.e.
aws-us-gov) without
rewriting all the cases where the ARN is parsed or constructed within LocalStack or moto.
In other words, this listener makes sure that internally the ARNs are always in the partition
"aws", while the
client gets ARNs with the proper partition.
"""
# Partition which should be statically set for incoming requests
DEFAULT_INBOUND_PARTITION = "aws"
class InvalidRegionException(Exception):
"""An exception indicating that a region could not be matched to a partition."""
pass
arn_regex = re.compile(
r"arn:" # Prefix
r"(?P<Partition>(aws|aws-cn|aws-iso|aws-iso-b|aws-us-gov)*):" # Partition
r"(?P<Service>[\w-]*):" # Service (lambda, s3, ecs,...)
r"(?P<Region>[\w-]*):" # Region (us-east-1, us-gov-west-1,...)
r"(?P<AccountID>[\w-]*):" # AccountID
r"(?P<ResourcePath>" # Combine the resource type and id to the ResourcePath
r"((?P<ResourceType>[\w-]*)[:/])?" # ResourceType (optional, f.e. S3 bucket name)
r"(?P<ResourceID>[\w\-/*]*)" # Resource ID (f.e. file name in S3)
r")"
)
def forward_request(
self, method: str, path: str, data: MessagePayload, headers: Headers
) -> Optional[RoutingRequest]:
return RoutingRequest(
method=method,
path=self._adjust_partition_in_path(path, self.DEFAULT_INBOUND_PARTITION),
data=self._adjust_partition(data, self.DEFAULT_INBOUND_PARTITION),
headers=self._adjust_partition(headers, self.DEFAULT_INBOUND_PARTITION),
)
def return_response(
self,
method: str,
path: str,
data: MessagePayload,
headers: Headers,
response: Response,
) -> Optional[RoutingResponse]:
# Only handle responses for calls from external clients
if is_internal_call_context(headers):
return None
return RoutingResponse(
status_code=response.status_code,
content=self._adjust_partition(response.content),
headers=self._adjust_partition(response.headers),
)
def _adjust_partition_in_path(self, path: str, static_partition: str = None):
"""Adjusts the (still url encoded) URL path"""
parsed_url = urlparse(path)
# Make sure to keep blank values, otherwise we drop query params which do not have a
# value (f.e. "/?policy")
decoded_query = parse_qs(qs=parsed_url.query, keep_blank_values=True)
adjusted_path = self._adjust_partition(parsed_url.path, static_partition)
adjusted_query = self._adjust_partition(decoded_query, static_partition)
encoded_query = urlencode(adjusted_query, doseq=True)
# Make sure to avoid empty equals signs (in between and in the end)
encoded_query = encoded_query.replace("=&", "&")
encoded_query = re.sub(r"=$", "", encoded_query)
return f"{adjusted_path}{("?" + encoded_query) if encoded_query else ""}"
def _adjust_partition(self, source, static_partition: str = None):
# Call this function recursively if we get a dictionary or a list
if isinstance(source, dict):
result = {}
for k, v in source.items():
result[k] = self._adjust_partition(v, static_partition)
return result
if isinstance(source, list):
result = []
for v in source:
result.append(self._adjust_partition(v, static_partition))
return result
elif isinstance(source, bytes):
try:
decoded = unquote(to_str(source))
adjusted = self._adjust_partition(decoded, static_partition)
return to_bytes(adjusted)
except UnicodeDecodeError:
# If the body can't be decoded to a string, we return the initial source
return source
elif not isinstance(source, str):
# Ignore any other types
return source
return self.arn_regex.sub(lambda m: self._adjust_match(m, static_partition), source)
def _adjust_match(self, match: Match, static_partition: str = None):
region = match.group("Region")
partition = self._partition_lookup(region) if static_partition is None else static_partition
service = match.group("Service")
account_id = match.group("AccountID")
resource_path = match.group("ResourcePath")
return f"arn:{partition}:{service}:{region}:{account_id}:{resource_path}"
def _partition_lookup(self, region: str):
try:
partition = self._get_partition_for_region(region)
except ArnPartitionRewriteListener.InvalidRegionException:
try:
# If the region is not properly set (f.e. because it is set to a wildcard),
# the partition is determined based on the default region.
partition = self._get_partition_for_region(config.DEFAULT_REGION)
except self.InvalidRegionException:
# If it also fails with the DEFAULT_REGION, we use us-east-1 as a fallback
partition = self._get_partition_for_region(AWS_REGION_US_EAST_1)
return partition
def _get_partition_for_region(self, region: str) -> str:
# Region-Partition matching is based on the "regionRegex" definitions in the endpoints.json
# in the botocore package.
if region.startswith("us-gov-"):
return "aws-us-gov"
elif region.startswith("us-iso-"):
return "aws-iso"
elif region.startswith("us-isob-"):
return "aws-iso-b"
elif region.startswith("cn-"):
return "aws-cn"
elif re.match(r"^(us|eu|ap|sa|ca|me|af)-\w+-\d+$", region):
return "aws"
else:
raise ArnPartitionRewriteListener.InvalidRegionException(
f"Region ({region}) could not be matched to a partition."
)
# -------------------
# BASE BACKEND UTILS
# -------------------
class RegionBackend(object):
"""Base class for region-specific backends for the different APIs.
RegionBackend lookup methods are not thread safe."""
REGIONS: Dict[str, "RegionBackend"]
name: str # name of the region
@classmethod
def get(cls, region: str = None) -> "RegionBackend":
region = region or cls.get_current_request_region()
regions = cls.regions()
backend = regions.get(region)
if not backend:
backend = cls()
backend.name = region
regions[region] = backend
return regions[region]
@classmethod
def regions(cls) -> Dict[str, "RegionBackend"]:
if not hasattr(cls, "REGIONS"):
# maps region name to region backend instance
cls.REGIONS = {}
return cls.REGIONS
@classmethod
def get_current_request_region(cls):
return aws_stack.get_region()
@classmethod
def reset(cls):
"""Reset the (in-memory) state of this service region backend."""
# for now, simply reset the regions and discard all existing region instances
cls.REGIONS = {}
return cls.regions()
# ---------------------
# PROXY LISTENER UTILS
# ---------------------
def append_cors_headers(request_headers=None, response=None):
# Note: Use "response is not None" here instead of "not response"!
headers = {} if response is None else response.headers
# In case we have LambdaResponse copy multivalue headers to regular headers, since
# CaseInsensitiveDict does not support "__contains__" and it's easier to deal with
# a single headers object
if isinstance(response, LambdaResponse):
for key in response.multi_value_headers.keys():
headers_list = list(response.multi_value_headers[key]) + [response.headers.get(key)]
headers_list = [str(h) for h in headers_list if h is not None]
headers[key] = ",".join(headers_list)
response.multi_value_headers = {}
if ACL_ORIGIN not in headers:
headers[ACL_ORIGIN] = (
request_headers["origin"]
if request_headers.get("origin") and not config.DISABLE_CORS_CHECKS
else "*"
)
if ACL_METHODS not in headers:
headers[ACL_METHODS] = ",".join(CORS_ALLOWED_METHODS)
if ACL_ALLOW_HEADERS not in headers:
requested_headers = headers.get(ACL_REQUEST_HEADERS, "")
requested_headers = re.split(r"[,\s]+", requested_headers) + CORS_ALLOWED_HEADERS
headers[ACL_ALLOW_HEADERS] = ",".join([h for h in requested_headers if h])
if ACL_EXPOSE_HEADERS not in headers:
headers[ACL_EXPOSE_HEADERS] = ",".join(CORS_EXPOSE_HEADERS)
for header in ALLOWED_CORS_RESPONSE_HEADERS:
if headers.get(header) == "":
del headers[header]
def http_exception_to_response(e: HTTPException):
"""Convert a werkzeug HTTP exception to a requests.Response object"""
response = Response()
response.status_code = e.code
response.headers.update(dict(e.get_headers()))
body = e.get_body()
response.headers["Content-Length"] = str(len(str(body or "")))
response._content = body
return response
def cors_error_response():
response = Response()
response.status_code = 403
return response
def _is_in_allowed_origins(allowed_origins, origin):
for allowed_origin in allowed_origins:
if allowed_origin == "*" or origin == allowed_origin:
return True
return False
def is_cors_origin_allowed(headers, allowed_origins=None):
"""Returns true if origin is allowed to perform cors requests, false otherwise"""
allowed_origins = ALLOWED_CORS_ORIGINS if allowed_origins is None else allowed_origins
origin = headers.get("origin")
referer = headers.get("referer")
if origin:
return _is_in_allowed_origins(allowed_origins, origin)
elif referer:
referer_uri = "{uri.scheme}://{uri.netloc}".format(uri=urlparse(referer))
return _is_in_allowed_origins(allowed_origins, referer_uri)
# If both headers are not set, let it through (awscli etc. do not send these headers)
return True
def should_enforce_self_managed_service(method, path, headers, data):
if config.DISABLE_CUSTOM_CORS_S3 and config.DISABLE_CUSTOM_CORS_APIGATEWAY:
return True
# allow only certain api calls without checking origin
import localstack.services.edge
api, _ = localstack.services.edge.get_api_from_custom_rules(method, path, data, headers) or (
"",
None,
)
if not config.DISABLE_CUSTOM_CORS_S3 and api == "s3":
return False
if not config.DISABLE_CUSTOM_CORS_APIGATEWAY and api == "apigateway":
return False
return True
def update_path_in_url(base_url: str, path: str) -> str:
"""Construct a URL from the given base URL and path"""
parsed = urlparse(base_url)
path = path or ""
path = path if path.startswith("/") else f"/{path}"
protocol = f"{parsed.scheme}:" if parsed.scheme else ""
return f"{protocol}//{parsed.netloc}{path}"
def with_context():
"""
Decorator wraps function in a request context manager
:return:
"""
def context_manager(method=None, path=None, data_bytes=None, headers=None, *args, **kwargs):
req_context = get_proxy_request_for_thread()
ctx_manager = empty_context_manager()
if not req_context:
req_context = Request(url=path, data=data_bytes, headers=headers, method=method)
ctx_manager = RequestContextManager(req_context)
return ctx_manager
def decorator(func):
@functools.wraps(func)
def wrapper(*args, **kwargs):
ctx_manager = context_manager(*args, **kwargs)
with ctx_manager:
value = func(*args, **kwargs)
return value
return wrapper
return decorator
@with_context()
def modify_and_forward(
method: str = None,
path: str = None,
data_bytes: bytes = None,
headers: Headers = None,
forward_base_url: str = None,
listeners: List[ProxyListener] = None,
client_address: str = None,
server_address: str = None,
):
"""This is the central function that coordinates the incoming/outgoing messages
with the proxy listeners (message interceptors)."""
from localstack.services.edge import ProxyListenerEdge
# Check origin / referer header before anything else happens.
if (
not config.DISABLE_CORS_CHECKS
and should_enforce_self_managed_service(method, path, headers, data_bytes)
and not is_cors_origin_allowed(headers)
):
LOG.info(
"Blocked CORS request from forbidden origin %s",
headers.get("origin") or headers.get("referer"),
)
return cors_error_response()
listeners = [lis for lis in listeners or [] if lis]
default_listeners = list(ProxyListener.DEFAULT_LISTENERS)
# ensure that MessageModifyingProxyListeners are not applied in the edge proxy request chain
# TODO: find a better approach for this!
is_edge_request = [lis for lis in listeners if isinstance(lis, ProxyListenerEdge)]
if is_edge_request:
default_listeners = [
lis for lis in default_listeners if not isinstance(lis, MessageModifyingProxyListener)
]
listeners_inbound = default_listeners + listeners
listeners_outbound = listeners + default_listeners
data = data_bytes
original_request = RoutingRequest(method=method, path=path, data=data, headers=headers)
def is_full_url(url):
return re.match(r"[a-zA-Z]+://.+", url)
def get_proxy_backend_url(_path, original_url=None, run_listeners=False):
if is_full_url(_path):
_path = _path.split("://", 1)[1]
_path = "/%s" % (_path.split("/", 1)[1] if "/" in _path else "")
base_url = forward_base_url or original_url
result = update_path_in_url(base_url, _path)
if run_listeners:
for listener in listeners_inbound:
result = listener.get_forward_url(method, path, data, headers) or result
return result
target_url = path
if not is_full_url(target_url):
target_url = "%s%s" % (forward_base_url, target_url)
# update original "Host" header (moto s3 relies on this behavior)
if not headers.get("Host"):
headers["host"] = urlparse(target_url).netloc
headers["X-Forwarded-For"] = build_x_forwarded_for(headers, client_address, server_address)
response = None
handler_chain_request = original_request.copy()
modified_request_to_backend = None
# run inbound handlers (pre-invocation)
for listener in listeners_inbound:
try:
listener_result = listener.forward_request(
method=handler_chain_request.method,
path=handler_chain_request.path,
data=handler_chain_request.data,
headers=handler_chain_request.headers,
)
except HTTPException as e:
# TODO: implement properly using exception handlers
return http_exception_to_response(e)
if isinstance(listener, MessageModifyingProxyListener):
if isinstance(listener_result, RoutingRequest):
# update the modified request details, then call next listener
handler_chain_request.method = (
listener_result.method or handler_chain_request.method
)
handler_chain_request.path = listener_result.path or handler_chain_request.path
if listener_result.data is not None:
handler_chain_request.data = listener_result.data
if listener_result.headers is not None:
handler_chain_request.headers = listener_result.headers
continue
if isinstance(listener_result, Response):
response = listener_result
break
if isinstance(listener_result, LambdaResponse):
response = listener_result
break
if isinstance(listener_result, dict):
response = Response()
response._content = json.dumps(json_safe(listener_result))
response.headers["Content-Type"] = APPLICATION_JSON
response.status_code = 200
break
elif isinstance(listener_result, Request):
# TODO: unify modified_request_to_backend (requests.Request) and
# handler_chain_request (ls.routing.Request)
modified_request_to_backend = listener_result
break
elif http2_server.get_async_generator_result(listener_result):
return listener_result
elif listener_result is not True:
# get status code from response, or use Bad Gateway status code
code = listener_result if isinstance(listener_result, int) else 503
response = Response()
response.status_code = code
response._content = ""
response.headers["Content-Length"] = "0"
append_cors_headers(request_headers=headers, response=response)
return response
# perform the actual invocation of the backend service
headers_to_send = None
data_to_send = None
method_to_send = None
if response is None:
headers_to_send = handler_chain_request.headers
headers_to_send["Connection"] = headers_to_send.get("Connection") or "close"
data_to_send = handler_chain_request.data
method_to_send = handler_chain_request.method
request_url = get_proxy_backend_url(handler_chain_request.path, run_listeners=True)
if modified_request_to_backend:
if modified_request_to_backend.url:
request_url = get_proxy_backend_url(
modified_request_to_backend.url, original_url=request_url
)
data_to_send = modified_request_to_backend.data
if modified_request_to_backend.method:
method_to_send = modified_request_to_backend.method
# make sure we drop "chunked" transfer encoding from the headers to be forwarded
headers_to_send.pop("Transfer-Encoding", None)
response = requests.request(
method_to_send,
url=request_url,
data=data_to_send,
headers=headers_to_send,
stream=True,
verify=False,
)
# prevent requests from processing response body (e.g., to pass-through gzip encoded content
# unmodified)
not_consumed = not getattr(response, "_content_consumed", True)
pass_raw = not_consumed or response.headers.get("content-encoding") in ["gzip"]
if pass_raw and getattr(response, "raw", None):
new_content = response.raw.read()
if new_content:
response._content = new_content
# run outbound handlers (post-invocation)
for listener in listeners_outbound:
updated_response = listener.return_response(
method=method_to_send or handler_chain_request.method,
path=handler_chain_request.path,
data=data_to_send or handler_chain_request.data,
headers=headers_to_send or handler_chain_request.headers,
response=response,
)
message_modifier = isinstance(listener, MessageModifyingProxyListener)
if message_modifier and isinstance(updated_response, RoutingResponse):
# update the fields from updated_response in final response
response.status_code = updated_response.status_code or response.status_code
response.headers = updated_response.headers or response.headers
if isinstance(updated_response.content, (str, bytes)):
response._content = updated_response.content
if isinstance(updated_response, Response):
response = updated_response
# allow pre-flight CORS headers by default
from localstack.services.s3.s3_listener import ProxyListenerS3
is_s3_listener = any(
isinstance(service_listener, ProxyListenerS3) for service_listener in listeners
)
if not is_s3_listener:
append_cors_headers(request_headers=headers, response=response)
return response
def build_x_forwarded_for(headers, client_address, server_address):
x_forwarded_for = headers.get("X-Forwarded-For")
if x_forwarded_for:
x_forwarded_for_list = (x_forwarded_for, client_address, server_address)
else:
x_forwarded_for_list = (client_address, server_address)
return ", ".join(x_forwarded_for_list)
class DuplexSocket(ssl.SSLSocket):
"""Simple duplex socket wrapper that allows serving HTTP/HTTPS over the same port."""
def accept(self):
newsock, addr = socket.socket.accept(self)
if DuplexSocket.is_ssl_socket(newsock) is not False:
newsock = self.context.wrap_socket(
newsock,
do_handshake_on_connect=self.do_handshake_on_connect,
suppress_ragged_eofs=self.suppress_ragged_eofs,
server_side=True,
)
return newsock, addr
@staticmethod
def is_ssl_socket(newsock):
"""Returns True/False if the socket uses SSL or not, or None if the status cannot be
determined"""
def peek_ssl_header():
peek_bytes = 5
first_bytes = newsock.recv(peek_bytes, socket.MSG_PEEK)
if len(first_bytes or "") != peek_bytes:
return
first_byte = first_bytes[0]
return first_byte < 32 or first_byte >= 127
try:
return peek_ssl_header()
except Exception:
# Fix for "[Errno 11] Resource temporarily unavailable" - This can
# happen if we're using a non-blocking socket in a blocking thread.
newsock.setblocking(1)
newsock.settimeout(1)
try:
return peek_ssl_header()
except Exception:
return False
# set globally defined SSL socket implementation class
ssl.SSLContext.sslsocket_class = DuplexSocket
class GenericProxy(object):
# TODO: move methods to different class?
@classmethod
def create_ssl_cert(cls, serial_number=None):
cert_pem_file = get_cert_pem_file_path()
return generate_ssl_cert(cert_pem_file, serial_number=serial_number)
@classmethod
def get_flask_ssl_context(cls, serial_number=None):
if config.USE_SSL:
_, cert_file_name, key_file_name = cls.create_ssl_cert(serial_number=serial_number)
return cert_file_name, key_file_name
return None
class UrlMatchingForwarder(ProxyListener):
"""
ProxyListener that matches URLs to a base url pattern, and if the request url matches the
pattern, forwards it to
a forward_url. See TestUrlMatchingForwarder for how it behaves.
"""
def __init__(self, base_url: str, forward_url: str) -> None:
super().__init__()
self.base_url = urlparse(base_url)
self.forward_url = urlparse(forward_url)
def forward_request(self, method, path, data, headers):
host = headers.get("Host", "")
if not self.matches(host, path):
return True
# build forward url
forward_url = self.build_forward_url(host, path)
# update headers
headers["Host"] = forward_url.netloc
# TODO: set proxy headers like x-forwarded-for?
return self.do_forward(method, forward_url.geturl(), headers, data)
def do_forward(self, method, url, headers, data):
return requests.request(method, url, data=data, headers=headers, stream=True, verify=False)
def matches(self, host, path):
# TODO: refine matching default ports (80, 443 if scheme is https). Example:
# http://localhost:80 matches
# http://localhost) check host rule. Can lead to problems with 443-4566 edge proxy
# forwarding if not enabled
if self.base_url.netloc:
stripped_netloc, _, port = self.base_url.netloc.rpartition(":")
if host != self.base_url.netloc and (
host != stripped_netloc or port not in ["80", "443"]
):
return False
# check path components
if self.base_url.path == "/":
if path.startswith("/"):
return True
path_parts = path.split("/")
base_path_parts = self.base_url.path.split("/")
if len(base_path_parts) > len(path_parts):
return False
for i, component in enumerate(base_path_parts):
if component != path_parts[i]:
return False
return True
def build_forward_url(self, host, path):
# build forward url
if self.forward_url.hostname:
forward_host = self.forward_url.scheme + "://" + self.forward_url.netloc
else:
forward_host = host
forward_path_root = self.forward_url.path
forward_path = path[len(self.base_url.path) :] # strip base path
# avoid double slashes
if forward_path and not forward_path_root.endswith("/"):
if not forward_path.startswith("/"):
forward_path = "/" + forward_path
forward_url = forward_host + forward_path_root + forward_path
return urlparse(forward_url)
class EndpointProxy(ProxyListener):
def __init__(self, base_url: str, forward_url: str) -> None:
super().__init__()
self.forwarder = UrlMatchingForwarder(
base_url=base_url,
forward_url=forward_url,
)
def forward_request(self, method, path, data, headers):
return self.forwarder.forward_request(method, path, data, headers)
def register(self):
ProxyListener.DEFAULT_LISTENERS.append(self)
def unregister(self):
try:
ProxyListener.DEFAULT_LISTENERS.remove(self)
except ValueError:
pass
class FakeEndpointProxyServer(Server):
"""
Makes an EndpointProxy behave like a Server. You can use this to create transparent
multiplexing behavior.
"""
endpoint: EndpointProxy
def __init__(self, endpoint: EndpointProxy) -> None:
self.endpoint = endpoint
self._shutdown_event = threading.Event()
self._url = self.endpoint.forwarder.base_url
super().__init__(self._url.port, self._url.hostname)
@property
def url(self):
return self._url.geturl()
def do_run(self):
self.endpoint.register()
try:
self._shutdown_event.wait()
finally:
self.endpoint.unregister()
def do_shutdown(self):
self._shutdown_event.set()
self.endpoint.unregister()
async def _accept_connection2(self, protocol_factory, conn, extra, sslcontext, *args, **kwargs):
is_ssl_socket = DuplexSocket.is_ssl_socket(conn)
if is_ssl_socket is False:
sslcontext = None
result = await _accept_connection2_orig(
self, protocol_factory, conn, extra, sslcontext, *args, **kwargs
)
return result
# patch asyncio server to accept SSL and non-SSL traffic over same port
if hasattr(BaseSelectorEventLoop, "_accept_connection2") and not hasattr(
BaseSelectorEventLoop, "_ls_patched"
):
_accept_connection2_orig = BaseSelectorEventLoop._accept_connection2
BaseSelectorEventLoop._accept_connection2 = _accept_connection2
BaseSelectorEventLoop._ls_patched = True
def get_cert_pem_file_path():
return os.path.join(config.dirs.cache, SERVER_CERT_PEM_FILE)
def start_proxy_server(
port,
bind_address=None,
forward_url=None,
use_ssl=None,
update_listener: Optional[Union[ProxyListener, List[ProxyListener]]] = None,
quiet=False,
params=None, # TODO: not being used - should be investigated/removed
asynchronous=True,
check_port=True,
max_content_length: int = None,
send_timeout: int = None,
):
bind_address = bind_address if bind_address else BIND_HOST
if update_listener is None:
listeners = []
elif isinstance(update_listener, list):
listeners = update_listener
else:
listeners = [update_listener]
def handler(request, data):
parsed_url = urlparse(request.url)
path_with_params = path_from_url(request.url)
method = request.method
headers = request.headers
headers[HEADER_LOCALSTACK_REQUEST_URL] = str(request.url)
response = modify_and_forward(
method=method,
path=path_with_params,
data_bytes=data,
headers=headers,
forward_base_url=forward_url,
listeners=listeners,
client_address=request.remote_addr,
server_address=parsed_url.netloc,
)
return response
ssl_creds = (None, None)
if use_ssl:
install_predefined_cert_if_available()
_, cert_file_name, key_file_name = GenericProxy.create_ssl_cert(serial_number=port)
ssl_creds = (cert_file_name, key_file_name)
result = http2_server.run_server(
port,
bind_address,
handler=handler,
asynchronous=asynchronous,
ssl_creds=ssl_creds,
max_content_length=max_content_length,
send_timeout=send_timeout,
)
if asynchronous and check_port:
wait_for_port_open(port, sleep_time=0.2, retries=12)
return result
def install_predefined_cert_if_available():
try:
from localstack_ext.bootstrap import install
if config.SKIP_SSL_CERT_DOWNLOAD:
LOG.debug("Skipping download of local SSL cert, as SKIP_SSL_CERT_DOWNLOAD=1")
return
install.setup_ssl_cert()
except Exception:
pass
def serve_flask_app(app, port, host=None, cors=True, asynchronous=False):
if cors:
CORS(app)
if not config.DEBUG:
logging.getLogger("werkzeug").setLevel(logging.ERROR)
if not host:
host = "0.0.0.0"
ssl_context = None
if not config.FORWARD_EDGE_INMEM:
ssl_context = GenericProxy.get_flask_ssl_context(serial_number=port)
app.config["ENV"] = "development"
def noecho(*args, **kwargs):
pass
try:
import click
click.echo = noecho
except Exception:
pass
def _run(*_):
app.run(port=int(port), threaded=True, host=host, ssl_context=ssl_context)
return app
if asynchronous:
return start_thread(_run)
return _run()
| import functools
import json
import logging
import os
import re
import socket
import ssl
import threading
from asyncio.selector_events import BaseSelectorEventLoop
from typing import Dict, List, Match, Optional, Union
from urllib.parse import parse_qs, unquote, urlencode, urlparse
import requests
from flask_cors import CORS
from flask_cors.core import (
ACL_ALLOW_HEADERS,
ACL_EXPOSE_HEADERS,
ACL_METHODS,
ACL_ORIGIN,
ACL_REQUEST_HEADERS,
)
from requests.models import Request, Response
from werkzeug.exceptions import HTTPException
from localstack import config
from localstack.config import (
EXTRA_CORS_ALLOWED_HEADERS,
EXTRA_CORS_ALLOWED_ORIGINS,
EXTRA_CORS_EXPOSE_HEADERS,
)
from localstack.constants import (
APPLICATION_JSON,
AWS_REGION_US_EAST_1,
BIND_HOST,
HEADER_LOCALSTACK_REQUEST_URL,
)
from localstack.services.messages import Headers, MessagePayload
from localstack.services.messages import Request as RoutingRequest
from localstack.services.messages import Response as RoutingResponse
from localstack.utils.aws import aws_stack
from localstack.utils.aws.aws_responses import LambdaResponse
from localstack.utils.aws.aws_stack import is_internal_call_context
from localstack.utils.aws.request_context import RequestContextManager, get_proxy_request_for_thread
from localstack.utils.crypto import generate_ssl_cert
from localstack.utils.functions import empty_context_manager
from localstack.utils.json import json_safe
from localstack.utils.net import wait_for_port_open
from localstack.utils.server import http2_server
from localstack.utils.serving import Server
from localstack.utils.strings import to_bytes, to_str
from localstack.utils.threads import start_thread
from localstack.utils.urls import path_from_url
# set up logger
LOG = logging.getLogger(__name__)
# path for test certificate
SERVER_CERT_PEM_FILE = "server.test.pem"
# CORS constants below
CORS_ALLOWED_HEADERS = [
"authorization",
"cache-control",
"content-length",
"content-md5",
"content-type",
"etag",
"location",
"x-amz-acl",
"x-amz-content-sha256",
"x-amz-date",
"x-amz-request-id",
"x-amz-security-token",
"x-amz-tagging",
"x-amz-target",
"x-amz-user-agent",
"x-amz-version-id",
"x-amzn-requestid",
"x-localstack-target",
# for AWS SDK v3
"amz-sdk-invocation-id",
"amz-sdk-request",
]
if EXTRA_CORS_ALLOWED_HEADERS:
CORS_ALLOWED_HEADERS += EXTRA_CORS_ALLOWED_HEADERS.split(",")
CORS_ALLOWED_METHODS = ("HEAD", "GET", "PUT", "POST", "DELETE", "OPTIONS", "PATCH")
CORS_EXPOSE_HEADERS = (
"etag",
"x-amz-version-id",
)
if EXTRA_CORS_EXPOSE_HEADERS:
CORS_EXPOSE_HEADERS += tuple(EXTRA_CORS_EXPOSE_HEADERS.split(","))
ALLOWED_CORS_RESPONSE_HEADERS = [
"Access-Control-Allow-Origin",
"Access-Control-Allow-Methods",
"Access-Control-Allow-Headers",
"Access-Control-Max-Age",
"Access-Control-Allow-Credentials",
"Access-Control-Expose-Headers",
]
ALLOWED_CORS_ORIGINS = [
"https://app.localstack.cloud",
"http://app.localstack.cloud",
f"https://localhost:{config.EDGE_PORT}",
f"http://localhost:{config.EDGE_PORT}",
f"https://localhost.localstack.cloud:{config.EDGE_PORT}",
f"http://localhost.localstack.cloud:{config.EDGE_PORT}",
"https://localhost",
"https://localhost.localstack.cloud",
]
if EXTRA_CORS_ALLOWED_ORIGINS:
ALLOWED_CORS_ORIGINS += EXTRA_CORS_ALLOWED_ORIGINS.split(",")
class ProxyListener(object):
# List of `ProxyListener` instances that are enabled by default for all requests.
# For inbound flows, the default listeners are applied *before* forwarding requests
# to the backend; for outbound flows, the default listeners are applied *after* the
# response has been received from the backend service.
DEFAULT_LISTENERS = []
def forward_request(
self, method: str, path: str, data: MessagePayload, headers: Headers
) -> Union[int, Response, Request, dict, bool]:
"""This interceptor method is called by the proxy when receiving a new request
(*before* forwarding the request to the backend service). It receives details
of the incoming request, and returns either of the following results:
* True if the request should be forwarded to the backend service as-is (default).
* An integer (e.g., 200) status code to return directly to the client without
calling the backend service.
* An instance of requests.models.Response to return directly to the client without
calling the backend service.
* An instance of requests.models.Request which represents a new/modified request
that will be forwarded to the backend service.
* Any other value, in which case a 503 Bad Gateway is returned to the client
without calling the backend service.
"""
return True
def return_response(
self,
method: str,
path: str,
data: MessagePayload,
headers: Headers,
response: Response,
) -> Optional[Response]:
"""This interceptor method is called by the proxy when returning a response
(*after* having forwarded the request and received a response from the backend
service). It receives details of the incoming request as well as the response
from the backend service, and returns either of the following results:
* An instance of requests.models.Response to return to the client instead of the
actual response returned from the backend service.
* Any other value, in which case the response from the backend service is
returned to the client.
"""
return None
def get_forward_url(self, method: str, path: str, data, headers):
"""Return a custom URL to forward the given request to. If a falsy value is returned,
then the default URL will be used.
"""
return None
class MessageModifyingProxyListener(ProxyListener):
# Special handler that can be used to modify an inbound/outbound message
# and forward it to the next handler in the chain (instead of forwarding
# to the backend directly, which is the default for regular ProxyListeners)
# TODO: to be replaced with listener chain in ASF Gateway, once integrated
def forward_request(
self, method: str, path: str, data: MessagePayload, headers: Headers
) -> Optional[RoutingRequest]:
"""Return a RoutingRequest with modified request data, or None to forward the request
unmodified"""
return None
def return_response(
self,
method: str,
path: str,
data: MessagePayload,
headers: Headers,
response: Response,
) -> Optional[RoutingResponse]:
"""Return a RoutingResponse with modified response data, or None to forward the response
unmodified"""
return None
class ArnPartitionRewriteListener(MessageModifyingProxyListener):
"""
Intercepts requests and responses and tries to adjust the partitions in ARNs within the
intercepted requests.
For incoming requests, the default partition is set ("aws").
For outgoing responses, the partition is adjusted based on the region in the ARN, or by the
default region
if the ARN does not contain a region.
This listener is used to support other partitions than the default "aws" partition (f.e.
aws-us-gov) without
rewriting all the cases where the ARN is parsed or constructed within LocalStack or moto.
In other words, this listener makes sure that internally the ARNs are always in the partition
"aws", while the
client gets ARNs with the proper partition.
"""
# Partition which should be statically set for incoming requests
DEFAULT_INBOUND_PARTITION = "aws"
class InvalidRegionException(Exception):
"""An exception indicating that a region could not be matched to a partition."""
pass
arn_regex = re.compile(
r"arn:" # Prefix
r"(?P<Partition>(aws|aws-cn|aws-iso|aws-iso-b|aws-us-gov)*):" # Partition
r"(?P<Service>[\w-]*):" # Service (lambda, s3, ecs,...)
r"(?P<Region>[\w-]*):" # Region (us-east-1, us-gov-west-1,...)
r"(?P<AccountID>[\w-]*):" # AccountID
r"(?P<ResourcePath>" # Combine the resource type and id to the ResourcePath
r"((?P<ResourceType>[\w-]*)[:/])?" # ResourceType (optional, f.e. S3 bucket name)
r"(?P<ResourceID>[\w\-/*]*)" # Resource ID (f.e. file name in S3)
r")"
)
def forward_request(
self, method: str, path: str, data: MessagePayload, headers: Headers
) -> Optional[RoutingRequest]:
return RoutingRequest(
method=method,
path=self._adjust_partition_in_path(path, self.DEFAULT_INBOUND_PARTITION),
data=self._adjust_partition(data, self.DEFAULT_INBOUND_PARTITION),
headers=self._adjust_partition(headers, self.DEFAULT_INBOUND_PARTITION),
)
def return_response(
self,
method: str,
path: str,
data: MessagePayload,
headers: Headers,
response: Response,
) -> Optional[RoutingResponse]:
# Only handle responses for calls from external clients
if is_internal_call_context(headers):
return None
return RoutingResponse(
status_code=response.status_code,
content=self._adjust_partition(response.content),
headers=self._adjust_partition(response.headers),
)
def _adjust_partition_in_path(self, path: str, static_partition: str = None):
"""Adjusts the (still url encoded) URL path"""
parsed_url = urlparse(path)
# Make sure to keep blank values, otherwise we drop query params which do not have a
# value (f.e. "/?policy")
decoded_query = parse_qs(qs=parsed_url.query, keep_blank_values=True)
adjusted_path = self._adjust_partition(parsed_url.path, static_partition)
adjusted_query = self._adjust_partition(decoded_query, static_partition)
encoded_query = urlencode(adjusted_query, doseq=True)
# Make sure to avoid empty equals signs (in between and in the end)
encoded_query = encoded_query.replace("=&", "&")
encoded_query = re.sub(r"=$", "", encoded_query)
return f"{adjusted_path}{('?' + encoded_query) if encoded_query else ''}"
def _adjust_partition(self, source, static_partition: str = None):
# Call this function recursively if we get a dictionary or a list
if isinstance(source, dict):
result = {}
for k, v in source.items():
result[k] = self._adjust_partition(v, static_partition)
return result
if isinstance(source, list):
result = []
for v in source:
result.append(self._adjust_partition(v, static_partition))
return result
elif isinstance(source, bytes):
try:
decoded = unquote(to_str(source))
adjusted = self._adjust_partition(decoded, static_partition)
return to_bytes(adjusted)
except UnicodeDecodeError:
# If the body can't be decoded to a string, we return the initial source
return source
elif not isinstance(source, str):
# Ignore any other types
return source
return self.arn_regex.sub(lambda m: self._adjust_match(m, static_partition), source)
def _adjust_match(self, match: Match, static_partition: str = None):
region = match.group("Region")
partition = self._partition_lookup(region) if static_partition is None else static_partition
service = match.group("Service")
account_id = match.group("AccountID")
resource_path = match.group("ResourcePath")
return f"arn:{partition}:{service}:{region}:{account_id}:{resource_path}"
def _partition_lookup(self, region: str):
try:
partition = self._get_partition_for_region(region)
except ArnPartitionRewriteListener.InvalidRegionException:
try:
# If the region is not properly set (f.e. because it is set to a wildcard),
# the partition is determined based on the default region.
partition = self._get_partition_for_region(config.DEFAULT_REGION)
except self.InvalidRegionException:
# If it also fails with the DEFAULT_REGION, we use us-east-1 as a fallback
partition = self._get_partition_for_region(AWS_REGION_US_EAST_1)
return partition
def _get_partition_for_region(self, region: str) -> str:
# Region-Partition matching is based on the "regionRegex" definitions in the endpoints.json
# in the botocore package.
if region.startswith("us-gov-"):
return "aws-us-gov"
elif region.startswith("us-iso-"):
return "aws-iso"
elif region.startswith("us-isob-"):
return "aws-iso-b"
elif region.startswith("cn-"):
return "aws-cn"
elif re.match(r"^(us|eu|ap|sa|ca|me|af)-\w+-\d+$", region):
return "aws"
else:
raise ArnPartitionRewriteListener.InvalidRegionException(
f"Region ({region}) could not be matched to a partition."
)
# -------------------
# BASE BACKEND UTILS
# -------------------
class RegionBackend(object):
"""Base class for region-specific backends for the different APIs.
RegionBackend lookup methods are not thread safe."""
REGIONS: Dict[str, "RegionBackend"]
name: str # name of the region
@classmethod
def get(cls, region: str = None) -> "RegionBackend":
region = region or cls.get_current_request_region()
regions = cls.regions()
backend = regions.get(region)
if not backend:
backend = cls()
backend.name = region
regions[region] = backend
return regions[region]
@classmethod
def regions(cls) -> Dict[str, "RegionBackend"]:
if not hasattr(cls, "REGIONS"):
# maps region name to region backend instance
cls.REGIONS = {}
return cls.REGIONS
@classmethod
def get_current_request_region(cls):
return aws_stack.get_region()
@classmethod
def reset(cls):
"""Reset the (in-memory) state of this service region backend."""
# for now, simply reset the regions and discard all existing region instances
cls.REGIONS = {}
return cls.regions()
# ---------------------
# PROXY LISTENER UTILS
# ---------------------
def append_cors_headers(request_headers=None, response=None):
# Note: Use "response is not None" here instead of "not response"!
headers = {} if response is None else response.headers
# In case we have LambdaResponse copy multivalue headers to regular headers, since
# CaseInsensitiveDict does not support "__contains__" and it's easier to deal with
# a single headers object
if isinstance(response, LambdaResponse):
for key in response.multi_value_headers.keys():
headers_list = list(response.multi_value_headers[key]) + [response.headers.get(key)]
headers_list = [str(h) for h in headers_list if h is not None]
headers[key] = ",".join(headers_list)
response.multi_value_headers = {}
if ACL_ORIGIN not in headers:
headers[ACL_ORIGIN] = (
request_headers["origin"]
if request_headers.get("origin") and not config.DISABLE_CORS_CHECKS
else "*"
)
if ACL_METHODS not in headers:
headers[ACL_METHODS] = ",".join(CORS_ALLOWED_METHODS)
if ACL_ALLOW_HEADERS not in headers:
requested_headers = headers.get(ACL_REQUEST_HEADERS, "")
requested_headers = re.split(r"[,\s]+", requested_headers) + CORS_ALLOWED_HEADERS
headers[ACL_ALLOW_HEADERS] = ",".join([h for h in requested_headers if h])
if ACL_EXPOSE_HEADERS not in headers:
headers[ACL_EXPOSE_HEADERS] = ",".join(CORS_EXPOSE_HEADERS)
for header in ALLOWED_CORS_RESPONSE_HEADERS:
if headers.get(header) == "":
del headers[header]
def http_exception_to_response(e: HTTPException):
"""Convert a werkzeug HTTP exception to a requests.Response object"""
response = Response()
response.status_code = e.code
response.headers.update(dict(e.get_headers()))
body = e.get_body()
response.headers["Content-Length"] = str(len(str(body or "")))
response._content = body
return response
def cors_error_response():
response = Response()
response.status_code = 403
return response
def _is_in_allowed_origins(allowed_origins, origin):
for allowed_origin in allowed_origins:
if allowed_origin == "*" or origin == allowed_origin:
return True
return False
def is_cors_origin_allowed(headers, allowed_origins=None):
"""Returns true if origin is allowed to perform cors requests, false otherwise"""
allowed_origins = ALLOWED_CORS_ORIGINS if allowed_origins is None else allowed_origins
origin = headers.get("origin")
referer = headers.get("referer")
if origin:
return _is_in_allowed_origins(allowed_origins, origin)
elif referer:
referer_uri = "{uri.scheme}://{uri.netloc}".format(uri=urlparse(referer))
return _is_in_allowed_origins(allowed_origins, referer_uri)
# If both headers are not set, let it through (awscli etc. do not send these headers)
return True
def should_enforce_self_managed_service(method, path, headers, data):
if config.DISABLE_CUSTOM_CORS_S3 and config.DISABLE_CUSTOM_CORS_APIGATEWAY:
return True
# allow only certain api calls without checking origin
import localstack.services.edge
api, _ = localstack.services.edge.get_api_from_custom_rules(method, path, data, headers) or (
"",
None,
)
if not config.DISABLE_CUSTOM_CORS_S3 and api == "s3":
return False
if not config.DISABLE_CUSTOM_CORS_APIGATEWAY and api == "apigateway":
return False
return True
def update_path_in_url(base_url: str, path: str) -> str:
"""Construct a URL from the given base URL and path"""
parsed = urlparse(base_url)
path = path or ""
path = path if path.startswith("/") else f"/{path}"
protocol = f"{parsed.scheme}:" if parsed.scheme else ""
return f"{protocol}//{parsed.netloc}{path}"
def with_context():
"""
Decorator wraps function in a request context manager
:return:
"""
def context_manager(method=None, path=None, data_bytes=None, headers=None, *args, **kwargs):
req_context = get_proxy_request_for_thread()
ctx_manager = empty_context_manager()
if not req_context:
req_context = Request(url=path, data=data_bytes, headers=headers, method=method)
ctx_manager = RequestContextManager(req_context)
return ctx_manager
def decorator(func):
@functools.wraps(func)
def wrapper(*args, **kwargs):
ctx_manager = context_manager(*args, **kwargs)
with ctx_manager:
value = func(*args, **kwargs)
return value
return wrapper
return decorator
@with_context()
def modify_and_forward(
method: str = None,
path: str = None,
data_bytes: bytes = None,
headers: Headers = None,
forward_base_url: str = None,
listeners: List[ProxyListener] = None,
client_address: str = None,
server_address: str = None,
):
"""This is the central function that coordinates the incoming/outgoing messages
with the proxy listeners (message interceptors)."""
from localstack.services.edge import ProxyListenerEdge
# Check origin / referer header before anything else happens.
if (
not config.DISABLE_CORS_CHECKS
and should_enforce_self_managed_service(method, path, headers, data_bytes)
and not is_cors_origin_allowed(headers)
):
LOG.info(
"Blocked CORS request from forbidden origin %s",
headers.get("origin") or headers.get("referer"),
)
return cors_error_response()
listeners = [lis for lis in listeners or [] if lis]
default_listeners = list(ProxyListener.DEFAULT_LISTENERS)
# ensure that MessageModifyingProxyListeners are not applied in the edge proxy request chain
# TODO: find a better approach for this!
is_edge_request = [lis for lis in listeners if isinstance(lis, ProxyListenerEdge)]
if is_edge_request:
default_listeners = [
lis for lis in default_listeners if not isinstance(lis, MessageModifyingProxyListener)
]
listeners_inbound = default_listeners + listeners
listeners_outbound = listeners + default_listeners
data = data_bytes
original_request = RoutingRequest(method=method, path=path, data=data, headers=headers)
def is_full_url(url):
return re.match(r"[a-zA-Z]+://.+", url)
def get_proxy_backend_url(_path, original_url=None, run_listeners=False):
if is_full_url(_path):
_path = _path.split("://", 1)[1]
_path = "/%s" % (_path.split("/", 1)[1] if "/" in _path else "")
base_url = forward_base_url or original_url
result = update_path_in_url(base_url, _path)
if run_listeners:
for listener in listeners_inbound:
result = listener.get_forward_url(method, path, data, headers) or result
return result
target_url = path
if not is_full_url(target_url):
target_url = "%s%s" % (forward_base_url, target_url)
# update original "Host" header (moto s3 relies on this behavior)
if not headers.get("Host"):
headers["host"] = urlparse(target_url).netloc
headers["X-Forwarded-For"] = build_x_forwarded_for(headers, client_address, server_address)
response = None
handler_chain_request = original_request.copy()
modified_request_to_backend = None
# run inbound handlers (pre-invocation)
for listener in listeners_inbound:
try:
listener_result = listener.forward_request(
method=handler_chain_request.method,
path=handler_chain_request.path,
data=handler_chain_request.data,
headers=handler_chain_request.headers,
)
except HTTPException as e:
# TODO: implement properly using exception handlers
return http_exception_to_response(e)
if isinstance(listener, MessageModifyingProxyListener):
if isinstance(listener_result, RoutingRequest):
# update the modified request details, then call next listener
handler_chain_request.method = (
listener_result.method or handler_chain_request.method
)
handler_chain_request.path = listener_result.path or handler_chain_request.path
if listener_result.data is not None:
handler_chain_request.data = listener_result.data
if listener_result.headers is not None:
handler_chain_request.headers = listener_result.headers
continue
if isinstance(listener_result, Response):
response = listener_result
break
if isinstance(listener_result, LambdaResponse):
response = listener_result
break
if isinstance(listener_result, dict):
response = Response()
response._content = json.dumps(json_safe(listener_result))
response.headers["Content-Type"] = APPLICATION_JSON
response.status_code = 200
break
elif isinstance(listener_result, Request):
# TODO: unify modified_request_to_backend (requests.Request) and
# handler_chain_request (ls.routing.Request)
modified_request_to_backend = listener_result
break
elif http2_server.get_async_generator_result(listener_result):
return listener_result
elif listener_result is not True:
# get status code from response, or use Bad Gateway status code
code = listener_result if isinstance(listener_result, int) else 503
response = Response()
response.status_code = code
response._content = ""
response.headers["Content-Length"] = "0"
append_cors_headers(request_headers=headers, response=response)
return response
# perform the actual invocation of the backend service
headers_to_send = None
data_to_send = None
method_to_send = None
if response is None:
headers_to_send = handler_chain_request.headers
headers_to_send["Connection"] = headers_to_send.get("Connection") or "close"
data_to_send = handler_chain_request.data
method_to_send = handler_chain_request.method
request_url = get_proxy_backend_url(handler_chain_request.path, run_listeners=True)
if modified_request_to_backend:
if modified_request_to_backend.url:
request_url = get_proxy_backend_url(
modified_request_to_backend.url, original_url=request_url
)
data_to_send = modified_request_to_backend.data
if modified_request_to_backend.method:
method_to_send = modified_request_to_backend.method
# make sure we drop "chunked" transfer encoding from the headers to be forwarded
headers_to_send.pop("Transfer-Encoding", None)
response = requests.request(
method_to_send,
url=request_url,
data=data_to_send,
headers=headers_to_send,
stream=True,
verify=False,
)
# prevent requests from processing response body (e.g., to pass-through gzip encoded content
# unmodified)
not_consumed = not getattr(response, "_content_consumed", True)
pass_raw = not_consumed or response.headers.get("content-encoding") in ["gzip"]
if pass_raw and getattr(response, "raw", None):
new_content = response.raw.read()
if new_content:
response._content = new_content
# run outbound handlers (post-invocation)
for listener in listeners_outbound:
updated_response = listener.return_response(
method=method_to_send or handler_chain_request.method,
path=handler_chain_request.path,
data=data_to_send or handler_chain_request.data,
headers=headers_to_send or handler_chain_request.headers,
response=response,
)
message_modifier = isinstance(listener, MessageModifyingProxyListener)
if message_modifier and isinstance(updated_response, RoutingResponse):
# update the fields from updated_response in final response
response.status_code = updated_response.status_code or response.status_code
response.headers = updated_response.headers or response.headers
if isinstance(updated_response.content, (str, bytes)):
response._content = updated_response.content
if isinstance(updated_response, Response):
response = updated_response
# allow pre-flight CORS headers by default
from localstack.services.s3.s3_listener import ProxyListenerS3
is_s3_listener = any(
isinstance(service_listener, ProxyListenerS3) for service_listener in listeners
)
if not is_s3_listener:
append_cors_headers(request_headers=headers, response=response)
return response
def build_x_forwarded_for(headers, client_address, server_address):
x_forwarded_for = headers.get("X-Forwarded-For")
if x_forwarded_for:
x_forwarded_for_list = (x_forwarded_for, client_address, server_address)
else:
x_forwarded_for_list = (client_address, server_address)
return ", ".join(x_forwarded_for_list)
class DuplexSocket(ssl.SSLSocket):
"""Simple duplex socket wrapper that allows serving HTTP/HTTPS over the same port."""
def accept(self):
newsock, addr = socket.socket.accept(self)
if DuplexSocket.is_ssl_socket(newsock) is not False:
newsock = self.context.wrap_socket(
newsock,
do_handshake_on_connect=self.do_handshake_on_connect,
suppress_ragged_eofs=self.suppress_ragged_eofs,
server_side=True,
)
return newsock, addr
@staticmethod
def is_ssl_socket(newsock):
"""Returns True/False if the socket uses SSL or not, or None if the status cannot be
determined"""
def peek_ssl_header():
peek_bytes = 5
first_bytes = newsock.recv(peek_bytes, socket.MSG_PEEK)
if len(first_bytes or "") != peek_bytes:
return
first_byte = first_bytes[0]
return first_byte < 32 or first_byte >= 127
try:
return peek_ssl_header()
except Exception:
# Fix for "[Errno 11] Resource temporarily unavailable" - This can
# happen if we're using a non-blocking socket in a blocking thread.
newsock.setblocking(1)
newsock.settimeout(1)
try:
return peek_ssl_header()
except Exception:
return False
# set globally defined SSL socket implementation class
ssl.SSLContext.sslsocket_class = DuplexSocket
class GenericProxy(object):
# TODO: move methods to different class?
@classmethod
def create_ssl_cert(cls, serial_number=None):
cert_pem_file = get_cert_pem_file_path()
return generate_ssl_cert(cert_pem_file, serial_number=serial_number)
@classmethod
def get_flask_ssl_context(cls, serial_number=None):
if config.USE_SSL:
_, cert_file_name, key_file_name = cls.create_ssl_cert(serial_number=serial_number)
return cert_file_name, key_file_name
return None
class UrlMatchingForwarder(ProxyListener):
"""
ProxyListener that matches URLs to a base url pattern, and if the request url matches the
pattern, forwards it to
a forward_url. See TestUrlMatchingForwarder for how it behaves.
"""
def __init__(self, base_url: str, forward_url: str) -> None:
super().__init__()
self.base_url = urlparse(base_url)
self.forward_url = urlparse(forward_url)
def forward_request(self, method, path, data, headers):
host = headers.get("Host", "")
if not self.matches(host, path):
return True
# build forward url
forward_url = self.build_forward_url(host, path)
# update headers
headers["Host"] = forward_url.netloc
# TODO: set proxy headers like x-forwarded-for?
return self.do_forward(method, forward_url.geturl(), headers, data)
def do_forward(self, method, url, headers, data):
return requests.request(method, url, data=data, headers=headers, stream=True, verify=False)
def matches(self, host, path):
# TODO: refine matching default ports (80, 443 if scheme is https). Example:
# http://localhost:80 matches
# http://localhost) check host rule. Can lead to problems with 443-4566 edge proxy
# forwarding if not enabled
if self.base_url.netloc:
stripped_netloc, _, port = self.base_url.netloc.rpartition(":")
if host != self.base_url.netloc and (
host != stripped_netloc or port not in ["80", "443"]
):
return False
# check path components
if self.base_url.path == "/":
if path.startswith("/"):
return True
path_parts = path.split("/")
base_path_parts = self.base_url.path.split("/")
if len(base_path_parts) > len(path_parts):
return False
for i, component in enumerate(base_path_parts):
if component != path_parts[i]:
return False
return True
def build_forward_url(self, host, path):
# build forward url
if self.forward_url.hostname:
forward_host = self.forward_url.scheme + "://" + self.forward_url.netloc
else:
forward_host = host
forward_path_root = self.forward_url.path
forward_path = path[len(self.base_url.path) :] # strip base path
# avoid double slashes
if forward_path and not forward_path_root.endswith("/"):
if not forward_path.startswith("/"):
forward_path = "/" + forward_path
forward_url = forward_host + forward_path_root + forward_path
return urlparse(forward_url)
class EndpointProxy(ProxyListener):
def __init__(self, base_url: str, forward_url: str) -> None:
super().__init__()
self.forwarder = UrlMatchingForwarder(
base_url=base_url,
forward_url=forward_url,
)
def forward_request(self, method, path, data, headers):
return self.forwarder.forward_request(method, path, data, headers)
def register(self):
ProxyListener.DEFAULT_LISTENERS.append(self)
def unregister(self):
try:
ProxyListener.DEFAULT_LISTENERS.remove(self)
except ValueError:
pass
class FakeEndpointProxyServer(Server):
"""
Makes an EndpointProxy behave like a Server. You can use this to create transparent
multiplexing behavior.
"""
endpoint: EndpointProxy
def __init__(self, endpoint: EndpointProxy) -> None:
self.endpoint = endpoint
self._shutdown_event = threading.Event()
self._url = self.endpoint.forwarder.base_url
super().__init__(self._url.port, self._url.hostname)
@property
def url(self):
return self._url.geturl()
def do_run(self):
self.endpoint.register()
try:
self._shutdown_event.wait()
finally:
self.endpoint.unregister()
def do_shutdown(self):
self._shutdown_event.set()
self.endpoint.unregister()
async def _accept_connection2(self, protocol_factory, conn, extra, sslcontext, *args, **kwargs):
is_ssl_socket = DuplexSocket.is_ssl_socket(conn)
if is_ssl_socket is False:
sslcontext = None
result = await _accept_connection2_orig(
self, protocol_factory, conn, extra, sslcontext, *args, **kwargs
)
return result
# patch asyncio server to accept SSL and non-SSL traffic over same port
if hasattr(BaseSelectorEventLoop, "_accept_connection2") and not hasattr(
BaseSelectorEventLoop, "_ls_patched"
):
_accept_connection2_orig = BaseSelectorEventLoop._accept_connection2
BaseSelectorEventLoop._accept_connection2 = _accept_connection2
BaseSelectorEventLoop._ls_patched = True
def get_cert_pem_file_path():
return os.path.join(config.dirs.cache, SERVER_CERT_PEM_FILE)
def start_proxy_server(
port,
bind_address=None,
forward_url=None,
use_ssl=None,
update_listener: Optional[Union[ProxyListener, List[ProxyListener]]] = None,
quiet=False,
params=None, # TODO: not being used - should be investigated/removed
asynchronous=True,
check_port=True,
max_content_length: int = None,
send_timeout: int = None,
):
bind_address = bind_address if bind_address else BIND_HOST
if update_listener is None:
listeners = []
elif isinstance(update_listener, list):
listeners = update_listener
else:
listeners = [update_listener]
def handler(request, data):
parsed_url = urlparse(request.url)
path_with_params = path_from_url(request.url)
method = request.method
headers = request.headers
headers[HEADER_LOCALSTACK_REQUEST_URL] = str(request.url)
response = modify_and_forward(
method=method,
path=path_with_params,
data_bytes=data,
headers=headers,
forward_base_url=forward_url,
listeners=listeners,
client_address=request.remote_addr,
server_address=parsed_url.netloc,
)
return response
ssl_creds = (None, None)
if use_ssl:
install_predefined_cert_if_available()
_, cert_file_name, key_file_name = GenericProxy.create_ssl_cert(serial_number=port)
ssl_creds = (cert_file_name, key_file_name)
result = http2_server.run_server(
port,
bind_address,
handler=handler,
asynchronous=asynchronous,
ssl_creds=ssl_creds,
max_content_length=max_content_length,
send_timeout=send_timeout,
)
if asynchronous and check_port:
wait_for_port_open(port, sleep_time=0.2, retries=12)
return result
def install_predefined_cert_if_available():
try:
from localstack_ext.bootstrap import install
if config.SKIP_SSL_CERT_DOWNLOAD:
LOG.debug("Skipping download of local SSL cert, as SKIP_SSL_CERT_DOWNLOAD=1")
return
install.setup_ssl_cert()
except Exception:
pass
def serve_flask_app(app, port, host=None, cors=True, asynchronous=False):
if cors:
CORS(app)
if not config.DEBUG:
logging.getLogger("werkzeug").setLevel(logging.ERROR)
if not host:
host = "0.0.0.0"
ssl_context = None
if not config.FORWARD_EDGE_INMEM:
ssl_context = GenericProxy.get_flask_ssl_context(serial_number=port)
app.config["ENV"] = "development"
def noecho(*args, **kwargs):
pass
try:
import click
click.echo = noecho
except Exception:
pass
def _run(*_):
app.run(port=int(port), threaded=True, host=host, ssl_context=ssl_context)
return app
if asynchronous:
return start_thread(_run)
return _run()
|
# -*- coding: utf-8 -*-
# ======================================================================================================================
# Copyright (©) 2015-2021 LCS - Laboratoire Catalyse et Spectrochimie, Caen, France.
# =
# CeCILL-B FREE SOFTWARE LICENSE AGREEMENT - See full LICENSE agreement in the root directory
# =
# ======================================================================================================================
"""
This module define the `application` on which the API rely.
It also define
the default application preferences and IPython magic functions.
"""
__all__ = []
import re
import sys
import logging
import subprocess
import datetime
import warnings
import pprint
import json
from os import environ
from pathlib import Path
import threading
from pkg_resources import parse_version, get_distribution, DistributionNotFound
import requests
from setuptools_scm import get_version
from traitlets.config.configurable import Config
from traitlets.config.application import Application
from traitlets import (
Bool,
Unicode,
List,
Integer,
Enum,
Union,
HasTraits,
Instance,
default,
observe,
)
from traitlets.config.manager import BaseJSONConfigManager
import matplotlib as mpl
from matplotlib import pyplot as plt
from IPython import get_ipython
from IPython.core.interactiveshell import InteractiveShell
from IPython.core.magic import Magics, magics_class, line_cell_magic
from IPython.core.magics.code import extract_symbols
from IPython.core.error import UsageError
from IPython.utils.text import get_text_list
from IPython.display import publish_display_data, clear_output
from jinja2 import Template
from spectrochempy.utils import MetaConfigurable, pathclean, get_pkg_path
from .plot_preferences import PlotPreferences
# set the default style
plt.style.use(["classic"])
# ----------------------------------------------------------------------------------------------------------------------
# Log levels
# ----------------------------------------------------------------------------------------------------------------------
DEBUG = logging.DEBUG
INFO = logging.INFO
WARNING = logging.WARNING
ERROR = logging.ERROR
CRITICAL = logging.CRITICAL
# ----------------------------------------------------------------------------------------------------------------------
# logo / copyright display
# ----------------------------------------------------------------------------------------------------------------------
def display_info_string(**kwargs): # pragma: no cover
_template = """
{{widgetcss}}
<table><tr><td>
{% if logo %}
<img src='data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAABgAAAAYCAYAAADgdz34AAAAAXNSR0IArs4c6QAAAAlw
SFlzAAAJOgAACToB8GSSSgAAAetpVFh0WE1MOmNvbS5hZG9iZS54bXAAAAAAADx4OnhtcG1ldGEgeG1sbnM6eD0iYWRvYmU6bnM6
bWV0YS8iIHg6eG1wdGs9IlhNUCBDb3JlIDUuNC4wIj4KICAgPHJkZjpSREYgeG1sbnM6cmRmPSJodHRwOi8vd3d3LnczLm9yZy8x
OTk5LzAyLzIyLXJkZi1zeW50YXgtbnMjIj4KICAgICAgPHJkZjpEZXNjcmlwdGlvbiByZGY6YWJvdXQ9IiIKICAgICAgICAgICAg
eG1sbnM6eG1wPSJodHRwOi8vbnMuYWRvYmUuY29tL3hhcC8xLjAvIgogICAgICAgICAgICB4bWxuczp0aWZmPSJodHRwOi8vbnMu
YWRvYmUuY29tL3RpZmYvMS4wLyI+CiAgICAgICAgIDx4bXA6Q3JlYXRvclRvb2w+bWF0cGxvdGxpYiB2ZXJzaW9uIDIuMS4wLCBo
dHRwOi8vbWF0cGxvdGxpYi5vcmcvPC94bXA6Q3JlYXRvclRvb2w+CiAgICAgICAgIDx0aWZmOk9yaWVudGF0aW9uPjE8L3RpZmY6
T3JpZW50YXRpb24+CiAgICAgIDwvcmRmOkRlc2NyaXB0aW9uPgogICA8L3JkZjpSREY+CjwveDp4bXBtZXRhPgqNQaNYAAAGiUlE
QVRIDY1We4xU1Rn/3XPuYx47u8w+hnU38hTcuoUEt/6D2y4RB0ME1BoEd9taJaKh9CFiN7YGp7appUAMNmktMZFoJTYVLVQ0smsy
26CN0SU1QgsuFAaW3WVmx33N677O6XfuyoIxTXqSO/fec+75fd93vt/3/UbDV0aKSZmCpkFMLz3T9utuu2N+o98aDSMBKVAo89z5
y+zEz3ZafcCOfvWdlGCalqKn1Bf71CygTd+mf1esSOnpdMpTb+vWpTZuWVfe3jLPa5tzHYNm0T5N0gpdkkHaDBeGBU6d1/t/fyS8
+/CbqdfUvmsx1PuMgc2bNxv79u1zgd31r+7JH1jbIZKxWRXAcYUQ8IWvBfBXNjEuJWPgMA02NR7C3/pYT9fjdZ3A9tGrWF8YSJHn
qcDz3y7q2T967PZv+gnYJdd1mEZ+62zGDQV/dQgKhmLzDNOXCEWM3j6eTT5Y3w78dOBKJLR1PQf+4ivPj76UPZnssBN+wbM9Aet/
AV81Mf1EEULXYfOobvX2WWQk0aoioXwwSmirOlioY0mu8BIouzYl7P8GV3vpqCCEZvlFz769w08oLDWvyKIyL1asSm28d6WfzA97
ztvvV1kexUMsmhlkULEkuGYmFYC6AvfUrITnwUKl5K79lkjeSSRRTCTbQPd95e1WzMbZSya74XoXAxctCllCnbECMOjZNGRwvzIX
nD85wbkMmKK+U045Dtdi8Qp+SAxU2GTg2bYlC9224pgvmSb54vkVTBQYyhUt2KjAMyMmPjwRQW5Mh2WKwJhlBh6jVGagFM84wZnQ
4bpC0Rt4pk1PbSt0NDcxDA5xryosDHWgtbM0DGZDWLSoiDMDYeQnGVrmOThxLozB0RAaahzkJzjKNqcIQBymJFMkOlN8Dqjpg0XY
Tx5xO/QbmmUrqIjGJznq47TqTaClKYfjp+PInLMwnOdYvtQBZ2XcunQY+VwIo4U4muoFEjVEFE6lQyEUKzHYfgQG9ylCyngU+Cxj
tOqxCDGHcCsOMCs6iQul5ZiStdATYxjMZXDLTUVwLY8Jey4uOh2IxjwsrP8UXJYxUrkZrghBahzV5iXU6gNkq0Z1EzIsUBUSCV2n
EOHo0LVxHCpuxabJJdhi5PFnvw5vLXwXIfNZvD/+JNo/X40NegE54sUaazl+UL8XD1x+FB9Ijjt4EQfdGN6J/x131LwIV9ap/AYs
0x1fz1ZKFbh6A7qKy/By9Dg6G36Ep91vUJJ15Cqr0Z67E8/HzmBrw1OwxWyM+3Mo6BAuSB17oyfx0Oyl2DN0Hqs/70Cx6hBCvESF
UY1ShWXZZEE7OTAYxZzaPH4TuoiusZvRnunFy2NbiHYuBp2vB66srX4vMEjpRKPxKXmnoQ4+Mn4DPiv8CYcrs3GfNUXJLtM+alSO
hrMj/KT+wBNW3+E/2liywNO3iSflbaFva/+stGDTxE0E9Sjaox8HBhxpEamzMGSEaFKg+mjEddzDh1MxTDq3YV1kGBsjfwW3S9Cq
anjmko+ndlb1UR3s6K8JlfphNWq9Ew/7c61T2BB/EbcaNkb8GBaE0tANH7/M34PLdhJDzjIcL9xPbdTG6zyM72Y+wXPHmvB489No
fm0b5HnbQ9Rgp/7DSSd29AeVvPeNyK6JcYl/yQVi5dBjuGvoV/gaJe47s45QUxrDmcYX0MBsdF7egvXZ7+O0vZA4X8QmOQWjlSK7
RDz5wIM30gp9UbWcGjXxhzdDu1SiNSpx6kcQB57rPnr/3dlkZarWLnlRq5oPET1dOCIOk4wALib9eeS5iygfhkd09H0DWphB/+gs
+PcOAS+ssrFmmXXgVfR0de9cpbAJfH3Q1jofW9DZk56dDcVsq9YcsoUMEd1qyLoT3BX1YiyHMJuk97hyjqIoE91t+NcTLeN0ZrfM
oXatZbu6G0h4VG+ibqq0IJVK6cAjo6serG3vSUezCMct0yQeSOFJSUImqb2qbknUpDqlZxE0QZ+ZUpSlZx79h4Nda6zef9dlk121
JDjbR5XggPRZlRnS6bRQRtLpn4++cuie/Yvn2svmNxuLw9WCcYIl4fEoTEGiSTUqJdfgU+8ROqf1iMkLzS389YtNPXc/PH8l8ONB
JZkHD+4JtD04HmVEDWWErmBhzV2/2LB1bemJG6krzv2S6NOHUgtEP0Oif5pE/3fHoruP7N8RiP61GArzSwbUhJJQpXJKiKbfr/3b
IhKq76sKPUdF9NW/LSqfSn6vjv8C45H/6FSgvZQAAAAASUVORK5CYII='
style='height:25px; border-radius:12px; display:inline-block; float:left; vertical-align:middle'></img>
{% endif %}
</td><td>
{% if message %}
<span style='font-size:12px'>{{ message }}</span>
{% endif %}
</td></tr></table>
</div>
"""
clear_output()
logo = kwargs.get("logo", True)
message = kwargs.get("message", "info ")
template = Template(_template)
html = template.render(
{"logo": logo, "message": message.strip().replace("\n", "<br/>")}
)
publish_display_data(data={"text/html": html})
# ----------------------------------------------------------------------------------------------------------------------
# Version
# ----------------------------------------------------------------------------------------------------------------------
try:
__release__ = get_distribution("spectrochempy").version.split("+")[0]
"Release version string of this package"
except DistributionNotFound: # pragma: no cover
# package is not installed
__release__ = "--not set--"
try:
__version__ = get_version(root="..", relative_to=__file__)
"Version string of this package"
except LookupError: # pragma: no cover
__version__ = __release__
# ............................................................................
def _get_copyright():
current_year = datetime.date.today().year
right = "2014-{}".format(current_year)
right += " - A.Travert & C.Fernandez @ LCS"
return right
__copyright__ = _get_copyright()
"Copyright string of this package"
# .............................................................................
def _get_release_date():
return subprocess.getoutput("git log -1 --tags --date=short --format='%ad'")
__release_date__ = _get_release_date()
"Last release date of this package"
def _check_for_updates(*args, **kwargs):
# Get version
conda_url = "https://anaconda.org/spectrocat/spectrochempy/files"
try:
response = requests.get(conda_url)
except requests.exceptions.RequestException: # pragma: no cover
return None
regex = (
r"\/\d{1,2}\.\d{1,2}\.\d{1,2}\/download\/noarch"
r"\/spectrochempy-(\d{1,2}\.\d{1,2}\.\d{1,2})\-(dev\d{1,2}|stable).tar.bz2"
)
matches = re.finditer(regex, response.text, re.MULTILINE)
vavailables = []
for matchNum, match in enumerate(matches):
v = match[1]
if match[2] == "stable":
vavailables.append(v)
old = parse_version(__version__)
new_version = None
for key in vavailables:
new = parse_version(key)
if new > old: # pragma: no cover
new_version = key
fi = Path.home() / ".scpy_update"
if new_version: # pragma: no cover
fi.write_text(
f"\n\n\tYou are running SpectrocChemPy-{__version__} but version {new_version} is available."
f"\n\tPlease consider updating for bug fixes and new features! "
)
else: # pragma: no cover
if fi.exists():
fi.unlink()
CHECK_UPDATE = threading.Thread(target=_check_for_updates, args=(1,))
CHECK_UPDATE.start()
# other info
# ............................................................................
__url__ = "https://www.spectrochempy.fr"
"URL for the documentation of this package"
__author__ = "C. Fernandez & A. Travert"
"First authors(s) of this package"
__contributor__ = "A. Ait Blal, W. Guérin"
"contributor(s) to this package"
__license__ = "CeCILL-B license"
"Licence of this package"
__cite__ = (
f"Arnaud Travert & Christian Fernandez (2021) SpectroChemPy (version"
f" {".".join(__version__.split(".")[0:2])}). "
f"Zenodo. https://doi.org/10.5281/zenodo.3823841"
)
"How to cite this package"
# ..................................................................................................................
def _find_or_create_spectrochempy_dir():
directory = Path.home() / ".spectrochempy"
directory.mkdir(exist_ok=True) # Create directory only if it do not exist
if directory.is_file(): # pragma: no cover
msg = "Intended SpectroChemPy directory `{0}` is " "actually a file."
raise IOError(msg.format(directory))
return directory
# ======================================================================================================================
# Magic ipython function
# ======================================================================================================================
@magics_class
class SpectroChemPyMagics(Magics):
"""
This class implements the addscript ipython magic function.
"""
@line_cell_magic
def addscript(self, pars="", cell=None):
"""
This works both as **%addscript** and as **%%addscript**
This magic command can either take a local filename, element in the
namespace or history range (see %history),
or the current cell content
Usage:
%addscript -p project n1-n2 n3-n4 ... n5 .. n6 ...
or
%%addscript -p project
...code lines ...
Options:
-p <string> Name of the project where the script will be stored.
If not provided, a project with a standard
name : `proj` is searched.
-o <string> script name
-s <symbols> Specify function or classes to load from python
source.
-a append to the current script instead of
overwriting it.
-n search symbol in the current namespace
Examples
--------
.. sourcecode:: ipython
In[1]: %addscript myscript.py
In[2]: %addscript 7-27
In[3]: %addscript -s MyClass,myfunction myscript.py
In[4]: %addscript MyClass
In[5]: %addscript mymodule.myfunction
"""
opts, args = self.parse_options(pars, "p:o:s:n:a")
# append = 'a' in opts
# mode = 'a' if append else 'w'
search_ns = "n" in opts
if not args and not cell and not search_ns: # pragma: no cover
raise UsageError(
"Missing filename, input history range, "
"or element in the user namespace.\n "
"If no argument are given then the cell content "
"should "
"not be empty"
)
name = "script"
if "o" in opts:
name = opts["o"]
proj = "proj"
if "p" in opts:
proj = opts["p"]
if proj not in self.shell.user_ns: # pragma: no cover
raise ValueError(
"Cannot find any project with name `{}` in the "
"namespace.".format(proj)
)
# get the proj object
projobj = self.shell.user_ns[proj]
contents = ""
if search_ns:
contents += (
"\n" + self.shell.find_user_code(opts["n"], search_ns=search_ns) + "\n"
)
args = " ".join(args)
if args.strip():
contents += (
"\n" + self.shell.find_user_code(args, search_ns=search_ns) + "\n"
)
if "s" in opts: # pragma: no cover
try:
blocks, not_found = extract_symbols(contents, opts["s"])
except SyntaxError:
# non python code
logging.error("Unable to parse the input as valid Python code")
return
if len(not_found) == 1:
warnings.warn("The symbol `%s` was not found" % not_found[0])
elif len(not_found) > 1:
warnings.warn(
"The symbols %s were not found"
% get_text_list(not_found, wrap_item_with="`")
)
contents = "\n".join(blocks)
if cell:
contents += "\n" + cell
# import delayed to avoid circular import error
from spectrochempy.core.scripts.script import Script
script = Script(name, content=contents)
projobj[name] = script
return "Script {} created.".format(name)
# @line_magic # def runscript(self, pars=''): # """ # # # """ # opts,
# args = self.parse_options(pars, '') # # if # not args: # raise UsageError('Missing script
# name') # # # return args
# ======================================================================================================================
# DataDir class
# ======================================================================================================================
class DataDir(HasTraits):
"""A class used to determine the path to the testdata directory."""
path = Instance(Path)
@default("path")
def _get_path_default(self, **kwargs): # pragma: no cover
super().__init__(**kwargs)
# create a directory testdata in .spectrochempy to avoid an error if the following do not work
path = _find_or_create_spectrochempy_dir() / "testdata"
path.mkdir(exist_ok=True)
# try to use the conda installed testdata (spectrochempy_data package)
try:
conda_env = environ["CONDA_PREFIX"]
path = Path(conda_env) / "share" / "spectrochempy_data" / "testdata"
if not path.exists():
path = (
Path(conda_env) / "share" / "spectrochempy_data"
) # depending on the version of spectrochempy_data
except KeyError:
pass
return path
def listing(self):
"""
Create a str representing a listing of the testdata folder.
Returns
-------
listing : str
Display of the datadir content
"""
strg = f"{self.path.name}\n" # os.path.basename(self.path) + "\n"
def _listdir(s, initial, ns):
ns += 1
for f in pathclean(initial).glob(
"*"
): # glob.glob(os.path.join(initial, '*')):
fb = f.name # os.path.basename(f)
if fb.startswith("."): # pragma: no cover
continue
if (
not fb.startswith("acqu")
and not fb.startswith("pulse")
and fb not in ["ser", "fid"]
):
s += " " * ns + "|__" + "%s\n" % fb
if f.is_dir():
s = _listdir(s, f, ns)
return s
return _listdir(strg, self.path, -1)
@classmethod
def class_print_help(cls):
# to work with --help-all
"""""" # TODO: make some useful help
def __str__(self):
return self.listing()
def _repr_html_(self): # pragma: no cover
# _repr_html is needed to output in notebooks
return self.listing().replace("\n", "<br/>").replace(" ", " ")
# ======================================================================================================================
# General Preferences
# ======================================================================================================================
class GeneralPreferences(MetaConfigurable):
"""
Preferences that apply to the |scpy| application in general
They should be accessible from the main API
"""
name = Unicode("GeneralPreferences")
description = Unicode("General options for the SpectroChemPy application")
updated = Bool(False)
# ------------------------------------------------------------------------------------------------------------------
# Configuration entries
# ------------------------------------------------------------------------------------------------------------------
# NON GUI
show_info_on_loading = Bool(True, help="Display info on loading").tag(config=True)
use_qt = Bool(
False,
help="Use QT for dialog instead of TK which is the default. "
"If True the PyQt libraries must be installed",
).tag(config=True)
# GUI
databases_directory = Union(
(Instance(Path), Unicode()),
help="Directory where to look for database files such as csv",
).tag(config=True, gui=True, kind="folder")
datadir = Union(
(Instance(Path), Unicode()), help="Directory where to look for data by default"
).tag(config=True, gui=True, kind="folder")
workspace = Union(
(Instance(Path), Unicode()), help="Workspace directory by default"
).tag(config=True, gui=True, kind="folder")
# ------------------------------------------------------------------------------------------------------------------
# Configuration entries
# ------------------------------------------------------------------------------------------------------------------
autoload_project = Bool(
True, help="Automatic loading of the last project at startup"
).tag(config=True, gui=True)
autosave_project = Bool(True, help="Automatic saving of the current project").tag(
config=True, gui=True
)
project_directory = Union(
(Instance(Path), Unicode()),
help="Directory where projects are stored by default",
).tag(config=True, kind="folder")
last_project = Union(
(Instance(Path, allow_none=True), Unicode()), help="Last used project"
).tag(config=True, gui=True, kind="file")
show_close_dialog = Bool(
True,
help="Display the close project dialog project changing or on application exit",
).tag(config=True, gui=True)
csv_delimiter = Enum(
[",", ";", r"\t", " "], default_value=",", help="CSV data delimiter"
).tag(config=True, gui=True)
@default("project_directory")
def _get_default_project_directory(self):
# Determines the SpectroChemPy project directory name and creates the directory if it doesn't exist.
# This directory is typically ``$HOME/spectrochempy/projects``, but if the SCP_PROJECTS_HOME environment
# variable is set and the `$SCP_PROJECTS_HOME` directory exists, it will be that directory.
# If neither exists, the former will be created.
# first look for SCP_PROJECTS_HOME
pscp = environ.get("SCP_PROJECTS_HOME")
if pscp is not None and Path(pscp).exists():
return Path(pscp)
pscp = Path.home() / ".spectrochempy" / "projects"
pscp.mkdir(exist_ok=True)
if pscp.is_file():
raise IOError("Intended Projects directory is actually a file.")
return pscp
# ..................................................................................................................
@default("workspace")
def _get_workspace_default(self):
# the spectra path in package data
return Path.home()
# ..................................................................................................................
@default("databases_directory")
def _get_databases_directory_default(self):
# the spectra path in package data
return Path(get_pkg_path("databases", "scp_data"))
# ..................................................................................................................
@default("datadir")
def _get_default_datadir(self):
return self.parent.datadir.path
# ..................................................................................................................
@observe("datadir")
def _datadir_changed(self, change):
self.parent.datadir.path = pathclean(change["new"])
# ..................................................................................................................
@property
def log_level(self):
"""
int - logging level
"""
return self.parent.log_level
# ..................................................................................................................
@log_level.setter
def log_level(self, value):
if isinstance(value, str):
value = getattr(logging, value, None)
if value is None: # pragma: no cover
warnings.warn(
"Log level not changed: invalid value given\n"
"string values must be DEBUG, INFO, WARNING, "
"or ERROR"
)
self.parent.log_level = value
# ..................................................................................................................
def __init__(self, **kwargs):
super().__init__(jsonfile="GeneralPreferences", **kwargs)
# ======================================================================================================================
# Application
# ======================================================================================================================
class SpectroChemPy(Application):
"""
This class SpectroChemPy is the main class, containing most of the setup,
configuration and more.
"""
icon = Unicode("scpy.png")
"Icon for the application"
running = Bool(False)
"Running status of the |scpy| application"
name = Unicode("SpectroChemPy")
"Running name of the application"
description = Unicode(
"SpectroChemPy is a framework for processing, analysing and modelling Spectroscopic data for "
"Chemistry with Python."
)
"Short description of the |scpy| application"
long_description = Unicode()
"Long description of the |scpy| application"
@default("long_description")
def _get_long_description(self):
desc = """
<p><strong>SpectroChemPy</strong> is a framework for processing, analysing and modelling
<strong>Spectro</>scopic data for <strong>Chem</strong>istry with <strong>Py</strong>thon.
It is a cross platform software, running on Linux, Windows or OS X.</p><br><br>
<strong>Version:</strong> {version}<br>
<strong>Authors:</strong> {authors}<br>
<strong>License:</strong> {license}<br>
<div class='warning'> SpectroChemPy is still experimental and under active development. Its current design and
functionalities are subject to major changes, reorganizations, bugs and crashes!!!. Please report any issues
to the <a url='https://github.com/spectrochempy/spectrochempy/issues'>Issue Tracker<a>
</div><br><br>
When using <strong>SpectroChemPy</strong> for your own work, you are kindly requested to cite it this way:
<pre>{cite}
</pre></p>""".format(
version=__release__, authors=__author__, license=__license__, cite=__cite__
)
return desc
# ------------------------------------------------------------------------------------------------------------------
# Configuration parameters
# ------------------------------------------------------------------------------------------------------------------
# Config file setting
# ------------------------------------------------------------------------------------------------------------------
_loaded_config_files = List()
reset_config = Bool(False, help="Should we restore a default configuration ?").tag(
config=True
)
"""Flag: True if one wants to reset settings to the original config defaults"""
config_file_name = Unicode(None, help="Configuration file name").tag(config=True)
"""Configuration file name"""
@default("config_file_name")
def _get_config_file_name_default(self):
return str(self.name).lower() + "_cfg"
config_dir = Instance(Path, help="Set the configuration directory location").tag(
config=True
)
"""Configuration directory"""
@default("config_dir")
def _get_config_dir_default(self):
return self.get_config_dir()
config_manager = Instance(BaseJSONConfigManager)
@default("config_manager")
def _get_default_config_manager(self):
return BaseJSONConfigManager(config_dir=str(self.config_dir))
log_format = Unicode(
"%(highlevel)s %(message)s",
help="The Logging format template",
).tag(config=True)
debug = Bool(True, help="Set DEBUG mode, with full outputs").tag(config=True)
"""Flag to set debugging mode"""
info = Bool(False, help="Set INFO mode, with msg outputs").tag(config=True)
"""Flag to set info mode"""
quiet = Bool(False, help="Set Quiet mode, with minimal outputs").tag(config=True)
"""Flag to set in fully quite mode (even no warnings)"""
nodisplay = Bool(False, help="Set NO DISPLAY mode, i.e., no graphics outputs").tag(
config=True
)
"""Flag to set in NO DISPLAY mode """
# last_project = Unicode('', help='Last used project').tag(config=True, type='project')
# """Last used project"""
#
# @observe('last_project')
# def _last_project_changed(self, change):
# if change.name in self.traits(config=True):
# self.config_manager.update(self.config_file_name, {self.__class__.__name__: {change.name: change.new, }})
show_config = Bool(help="Dump configuration to stdout at startup").tag(config=True)
@observe("show_config")
def _show_config_changed(self, change):
if change.new:
self._save_start = self.start
self.start = self.start_show_config
show_config_json = Bool(help="Dump configuration to stdout (as JSON)").tag(
config=True
)
@observe("show_config_json")
def _show_config_json_changed(self, change):
self.show_config = change.new
test = Bool(False, help="test flag").tag(config=True)
"""Flag to set the application in testing mode"""
port = Integer(7000, help="Dash server port").tag(config=True)
"""Dash server port"""
# Command line interface
# ------------------------------------------------------------------------------------------------------------------
aliases = dict(
test="SpectroChemPy.test",
project="SpectroChemPy.last_project",
f="SpectroChemPy.startup_filename",
port="SpectroChemPy.port",
)
flags = dict(
debug=(
{"SpectroChemPy": {"log_level": DEBUG}},
"Set log_level to DEBUG - most verbose mode",
),
info=(
{"SpectroChemPy": {"log_level": INFO}},
"Set log_level to INFO - verbose mode",
),
quiet=(
{"SpectroChemPy": {"log_level": ERROR}},
"Set log_level to ERROR - no verbosity at all",
),
nodisplay=(
{"SpectroChemPy": {"nodisplay": True}},
"Set NO DISPLAY mode to true - no graphics at all",
),
reset_config=(
{"SpectroChemPy": {"reset_config": True}},
"Reset config to default",
),
show_config=(
{
"SpectroChemPy": {
"show_config": True,
}
},
"Show the application's configuration (human-readable " "format)",
),
show_config_json=(
{
"SpectroChemPy": {
"show_config_json": True,
}
},
"Show the application's configuration (json " "format)",
),
)
classes = List(
[
GeneralPreferences,
PlotPreferences,
DataDir,
]
)
# ------------------------------------------------------------------------------------------------------------------
# Initialisation of the application
# ------------------------------------------------------------------------------------------------------------------
def __init__(self, **kwargs):
super().__init__(**kwargs)
self.logs = (
self.log
) # we change the no name in order to avoid latter conflict with numpy.log
self.initialize()
def initialize(self, argv=None):
"""
Initialisation function for the API applications
Parameters
----------
argv : List, [optional].
List of configuration parameters.
"""
# parse the argv
# --------------------------------------------------------------------
# if we are running this under ipython and jupyter notebooks
# deactivate potential command line arguments
# (such that those from jupyter which cause problems here)
IN_IPYTHON = False
if InteractiveShell.initialized():
IN_IPYTHON = True
if not IN_IPYTHON:
# remove argument not known by spectrochempy
if "make.py" in sys.argv[0] or "pytest" in sys.argv[0]: # building docs
options = []
for item in sys.argv[:]:
for k in list(self.flags.keys()):
if item.startswith("--" + k) or k in ["--help", "--help-all"]:
options.append(item)
continue
for k in list(self.aliases.keys()):
if item.startswith("-" + k) or k in [
"h",
]:
options.append(item)
self.parse_command_line(options)
else: # pragma: no cover
self.parse_command_line(sys.argv)
# Get preferences from the config file and init everything
# ---------------------------------------------------------------------
self._init_all_preferences()
# we catch warnings and error for a lighter display to the end-user.
# except if we are in debugging mode
# warning handler
# --------------------------------------------------------------------
def send_warnings_to_log(message, category):
self.logs.warning(f"{category.__name__} - {message}")
return
warnings.showwarning = send_warnings_to_log
# exception handler
# --------------------------------------------------------------------
if IN_IPYTHON: # pragma: no cover
ip = get_ipython()
def _custom_exc(shell, etype, evalue, tb, tb_offset=None):
if self.log_level == logging.DEBUG:
shell.showtraceback((etype, evalue, tb), tb_offset=tb_offset)
else:
self.logs.error(f"{etype.__name__}: {evalue}")
ip.set_custom_exc((Exception,), _custom_exc)
# load our custom magic extensions
# --------------------------------------------------------------------
if ip is not None:
ip.register_magics(SpectroChemPyMagics)
def _init_all_preferences(self):
# Get preferences from the config file
# ---------------------------------------------------------------------
if not self.config:
self.config = Config()
configfiles = []
if self.config_file_name:
config_file = self.config_dir / self.config_file_name
configfiles.append(config_file)
lis = self.config_dir.iterdir()
for f in lis:
if f.suffix == ".json":
jsonname = self.config_dir / f
if self.reset_config or f == "PlotPreferences.json":
# remove the user json file to reset to defaults
jsonname.unlink()
else:
configfiles.append(jsonname)
for cfgname in configfiles:
self.load_config_file(cfgname)
if cfgname not in self._loaded_config_files:
self._loaded_config_files.append(cfgname)
# Eventually write the default config file
# --------------------------------------
self._make_default_config_file()
self.datadir = (
DataDir()
) # config=self.config) -- passing args deprecated in traitlets 4.2
self.preferences = GeneralPreferences(config=self.config, parent=self)
self.plot_preferences = PlotPreferences(config=self.config, parent=self)
# ..................................................................................................................
@staticmethod
def get_config_dir():
"""
Determines the SpectroChemPy configuration directory name and
creates the directory if it doesn't exist.
This directory is typically ``$HOME/.spectrochempy/config``,
but if the
SCP_CONFIG_HOME environment variable is set and the
``$SCP_CONFIG_HOME`` directory exists, it will be that
directory.
If neither exists, the former will be created.
Returns
-------
config_dir : str
The absolute path to the configuration directory.
"""
# first look for SCP_CONFIG_HOME
scp = environ.get("SCP_CONFIG_HOME")
if scp is not None and Path(scp).exists():
return Path(scp)
config = _find_or_create_spectrochempy_dir() / "config"
if not config.exists():
config.mkdir(exist_ok=True)
return config
def start_show_config(self, **kwargs):
"""start function used when show_config is True"""
config = self.config.copy()
# exclude show_config flags from displayed config
for cls in self.__class__.mro():
if cls.__name__ in config:
cls_config = config[cls.__name__]
cls_config.pop("show_config", None)
cls_config.pop("show_config_json", None)
if self.show_config_json:
json.dump(config, sys.stdout, indent=1, sort_keys=True, default=repr)
# add trailing newlines
sys.stdout.write("\n")
print()
return self._start()
if self._loaded_config_files:
print("Loaded config files:")
for f in self._loaded_config_files:
print(" " + f)
print()
for classname in sorted(config):
class_config = config[classname]
if not class_config:
continue
print(classname)
pformat_kwargs = dict(indent=4)
if sys.version_info >= (3, 4):
# use compact pretty-print on Pythons that support it
pformat_kwargs["compact"] = True
for traitname in sorted(class_config):
value = class_config[traitname]
print(
" .{} = {}".format(
traitname,
pprint.pformat(value, **pformat_kwargs),
)
)
print()
# now run the actual start function
return self._start()
def reset_preferences(self):
"""
Reset all preferences to default
"""
self.reset_config = True
self._init_all_preferences()
self.reset_config = False
# ------------------------------------------------------------------------------------------------------------------
# start the application
# ------------------------------------------------------------------------------------------------------------------
def start(self):
"""
Start the |scpy| API
All configuration must have been done before calling this function
"""
# print(f'{sys.argv}')
return self._start()
# ------------------------------------------------------------------------------------------------------------------
# Private methods
# ------------------------------------------------------------------------------------------------------------------
def _start(self):
if self.running:
# API already started. Nothing done!
return
if self.preferences.show_info_on_loading:
info_string = "SpectroChemPy's API - v.{}\n" "© Copyright {}".format(
__version__, __copyright__
)
ip = get_ipython()
if ip is not None and "TerminalInteractiveShell" not in str(ip):
display_info_string(message=info_string.strip())
else:
if "/bin/scpy" not in sys.argv[0]: # deactivate for console scripts
print(info_string.strip())
# force update of rcParams
for rckey in mpl.rcParams.keys():
key = rckey.replace("_", "__").replace(".", "_").replace("-", "___")
try:
mpl.rcParams[rckey] = getattr(self.plot_preferences, key)
except ValueError:
mpl.rcParams[rckey] = getattr(self.plot_preferences, key).replace(
"'", ""
)
except AttributeError:
# print(f'{e} -> you may want to add it to PlotPreferences.py')
pass
self.plot_preferences.set_latex_font(self.plot_preferences.font_family)
self.running = True
# display needs for update
# time.sleep(1)
fi = Path.home() / ".scpy_update"
if fi.exists():
try:
msg = fi.read_text()
self.logs.warning(msg)
except Exception:
pass
return True
# ..................................................................................................................
def _make_default_config_file(self):
"""auto generate default config file."""
fname = self.config_dir / self.config_file_name
fname = fname.with_suffix(".py")
if not fname.exists() or self.reset_config:
s = self.generate_config_file()
self.logs.info("Generating default config file: %r" % fname)
with open(fname, "w") as f:
f.write(s)
# ------------------------------------------------------------------------------------------------------------------
# Events from Application
# ------------------------------------------------------------------------------------------------------------------
@observe("log_level")
def _log_level_changed(self, change):
self.log_format = "%(message)s"
if change.new == DEBUG:
self.log_format = "[%(filename)s-%(funcName)s %(levelname)s] %(" "message)s"
self.logs._cache = {}
self.logs.level = self.log_level
for handler in self.logs.handlers:
handler.level = self.log_level
self.logs.info(
"changed default log_level to {}".format(logging.getLevelName(change.new))
)
# ======================================================================================================================
if __name__ == "__main__":
pass
| # -*- coding: utf-8 -*-
# ======================================================================================================================
# Copyright (©) 2015-2021 LCS - Laboratoire Catalyse et Spectrochimie, Caen, France.
# =
# CeCILL-B FREE SOFTWARE LICENSE AGREEMENT - See full LICENSE agreement in the root directory
# =
# ======================================================================================================================
"""
This module define the `application` on which the API rely.
It also define
the default application preferences and IPython magic functions.
"""
__all__ = []
import re
import sys
import logging
import subprocess
import datetime
import warnings
import pprint
import json
from os import environ
from pathlib import Path
import threading
from pkg_resources import parse_version, get_distribution, DistributionNotFound
import requests
from setuptools_scm import get_version
from traitlets.config.configurable import Config
from traitlets.config.application import Application
from traitlets import (
Bool,
Unicode,
List,
Integer,
Enum,
Union,
HasTraits,
Instance,
default,
observe,
)
from traitlets.config.manager import BaseJSONConfigManager
import matplotlib as mpl
from matplotlib import pyplot as plt
from IPython import get_ipython
from IPython.core.interactiveshell import InteractiveShell
from IPython.core.magic import Magics, magics_class, line_cell_magic
from IPython.core.magics.code import extract_symbols
from IPython.core.error import UsageError
from IPython.utils.text import get_text_list
from IPython.display import publish_display_data, clear_output
from jinja2 import Template
from spectrochempy.utils import MetaConfigurable, pathclean, get_pkg_path
from .plot_preferences import PlotPreferences
# set the default style
plt.style.use(["classic"])
# ----------------------------------------------------------------------------------------------------------------------
# Log levels
# ----------------------------------------------------------------------------------------------------------------------
DEBUG = logging.DEBUG
INFO = logging.INFO
WARNING = logging.WARNING
ERROR = logging.ERROR
CRITICAL = logging.CRITICAL
# ----------------------------------------------------------------------------------------------------------------------
# logo / copyright display
# ----------------------------------------------------------------------------------------------------------------------
def display_info_string(**kwargs): # pragma: no cover
_template = """
{{widgetcss}}
<table><tr><td>
{% if logo %}
<img src='data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAABgAAAAYCAYAAADgdz34AAAAAXNSR0IArs4c6QAAAAlw
SFlzAAAJOgAACToB8GSSSgAAAetpVFh0WE1MOmNvbS5hZG9iZS54bXAAAAAAADx4OnhtcG1ldGEgeG1sbnM6eD0iYWRvYmU6bnM6
bWV0YS8iIHg6eG1wdGs9IlhNUCBDb3JlIDUuNC4wIj4KICAgPHJkZjpSREYgeG1sbnM6cmRmPSJodHRwOi8vd3d3LnczLm9yZy8x
OTk5LzAyLzIyLXJkZi1zeW50YXgtbnMjIj4KICAgICAgPHJkZjpEZXNjcmlwdGlvbiByZGY6YWJvdXQ9IiIKICAgICAgICAgICAg
eG1sbnM6eG1wPSJodHRwOi8vbnMuYWRvYmUuY29tL3hhcC8xLjAvIgogICAgICAgICAgICB4bWxuczp0aWZmPSJodHRwOi8vbnMu
YWRvYmUuY29tL3RpZmYvMS4wLyI+CiAgICAgICAgIDx4bXA6Q3JlYXRvclRvb2w+bWF0cGxvdGxpYiB2ZXJzaW9uIDIuMS4wLCBo
dHRwOi8vbWF0cGxvdGxpYi5vcmcvPC94bXA6Q3JlYXRvclRvb2w+CiAgICAgICAgIDx0aWZmOk9yaWVudGF0aW9uPjE8L3RpZmY6
T3JpZW50YXRpb24+CiAgICAgIDwvcmRmOkRlc2NyaXB0aW9uPgogICA8L3JkZjpSREY+CjwveDp4bXBtZXRhPgqNQaNYAAAGiUlE
QVRIDY1We4xU1Rn/3XPuYx47u8w+hnU38hTcuoUEt/6D2y4RB0ME1BoEd9taJaKh9CFiN7YGp7appUAMNmktMZFoJTYVLVQ0smsy
26CN0SU1QgsuFAaW3WVmx33N677O6XfuyoIxTXqSO/fec+75fd93vt/3/UbDV0aKSZmCpkFMLz3T9utuu2N+o98aDSMBKVAo89z5
y+zEz3ZafcCOfvWdlGCalqKn1Bf71CygTd+mf1esSOnpdMpTb+vWpTZuWVfe3jLPa5tzHYNm0T5N0gpdkkHaDBeGBU6d1/t/fyS8
+/CbqdfUvmsx1PuMgc2bNxv79u1zgd31r+7JH1jbIZKxWRXAcYUQ8IWvBfBXNjEuJWPgMA02NR7C3/pYT9fjdZ3A9tGrWF8YSJHn
qcDz3y7q2T967PZv+gnYJdd1mEZ+62zGDQV/dQgKhmLzDNOXCEWM3j6eTT5Y3w78dOBKJLR1PQf+4ivPj76UPZnssBN+wbM9Aet/
AV81Mf1EEULXYfOobvX2WWQk0aoioXwwSmirOlioY0mu8BIouzYl7P8GV3vpqCCEZvlFz769w08oLDWvyKIyL1asSm28d6WfzA97
ztvvV1kexUMsmhlkULEkuGYmFYC6AvfUrITnwUKl5K79lkjeSSRRTCTbQPd95e1WzMbZSya74XoXAxctCllCnbECMOjZNGRwvzIX
nD85wbkMmKK+U045Dtdi8Qp+SAxU2GTg2bYlC9224pgvmSb54vkVTBQYyhUt2KjAMyMmPjwRQW5Mh2WKwJhlBh6jVGagFM84wZnQ
4bpC0Rt4pk1PbSt0NDcxDA5xryosDHWgtbM0DGZDWLSoiDMDYeQnGVrmOThxLozB0RAaahzkJzjKNqcIQBymJFMkOlN8Dqjpg0XY
Tx5xO/QbmmUrqIjGJznq47TqTaClKYfjp+PInLMwnOdYvtQBZ2XcunQY+VwIo4U4muoFEjVEFE6lQyEUKzHYfgQG9ylCyngU+Cxj
tOqxCDGHcCsOMCs6iQul5ZiStdATYxjMZXDLTUVwLY8Jey4uOh2IxjwsrP8UXJYxUrkZrghBahzV5iXU6gNkq0Z1EzIsUBUSCV2n
EOHo0LVxHCpuxabJJdhi5PFnvw5vLXwXIfNZvD/+JNo/X40NegE54sUaazl+UL8XD1x+FB9Ijjt4EQfdGN6J/x131LwIV9ap/AYs
0x1fz1ZKFbh6A7qKy/By9Dg6G36Ep91vUJJ15Cqr0Z67E8/HzmBrw1OwxWyM+3Mo6BAuSB17oyfx0Oyl2DN0Hqs/70Cx6hBCvESF
UY1ShWXZZEE7OTAYxZzaPH4TuoiusZvRnunFy2NbiHYuBp2vB66srX4vMEjpRKPxKXmnoQ4+Mn4DPiv8CYcrs3GfNUXJLtM+alSO
hrMj/KT+wBNW3+E/2liywNO3iSflbaFva/+stGDTxE0E9Sjaox8HBhxpEamzMGSEaFKg+mjEddzDh1MxTDq3YV1kGBsjfwW3S9Cq
anjmko+ndlb1UR3s6K8JlfphNWq9Ew/7c61T2BB/EbcaNkb8GBaE0tANH7/M34PLdhJDzjIcL9xPbdTG6zyM72Y+wXPHmvB489No
fm0b5HnbQ9Rgp/7DSSd29AeVvPeNyK6JcYl/yQVi5dBjuGvoV/gaJe47s45QUxrDmcYX0MBsdF7egvXZ7+O0vZA4X8QmOQWjlSK7
RDz5wIM30gp9UbWcGjXxhzdDu1SiNSpx6kcQB57rPnr/3dlkZarWLnlRq5oPET1dOCIOk4wALib9eeS5iygfhkd09H0DWphB/+gs
+PcOAS+ssrFmmXXgVfR0de9cpbAJfH3Q1jofW9DZk56dDcVsq9YcsoUMEd1qyLoT3BX1YiyHMJuk97hyjqIoE91t+NcTLeN0ZrfM
oXatZbu6G0h4VG+ibqq0IJVK6cAjo6serG3vSUezCMct0yQeSOFJSUImqb2qbknUpDqlZxE0QZ+ZUpSlZx79h4Nda6zef9dlk121
JDjbR5XggPRZlRnS6bRQRtLpn4++cuie/Yvn2svmNxuLw9WCcYIl4fEoTEGiSTUqJdfgU+8ROqf1iMkLzS389YtNPXc/PH8l8ONB
JZkHD+4JtD04HmVEDWWErmBhzV2/2LB1bemJG6krzv2S6NOHUgtEP0Oif5pE/3fHoruP7N8RiP61GArzSwbUhJJQpXJKiKbfr/3b
IhKq76sKPUdF9NW/LSqfSn6vjv8C45H/6FSgvZQAAAAASUVORK5CYII='
style='height:25px; border-radius:12px; display:inline-block; float:left; vertical-align:middle'></img>
{% endif %}
</td><td>
{% if message %}
<span style='font-size:12px'>{{ message }}</span>
{% endif %}
</td></tr></table>
</div>
"""
clear_output()
logo = kwargs.get("logo", True)
message = kwargs.get("message", "info ")
template = Template(_template)
html = template.render(
{"logo": logo, "message": message.strip().replace("\n", "<br/>")}
)
publish_display_data(data={"text/html": html})
# ----------------------------------------------------------------------------------------------------------------------
# Version
# ----------------------------------------------------------------------------------------------------------------------
try:
__release__ = get_distribution("spectrochempy").version.split("+")[0]
"Release version string of this package"
except DistributionNotFound: # pragma: no cover
# package is not installed
__release__ = "--not set--"
try:
__version__ = get_version(root="..", relative_to=__file__)
"Version string of this package"
except LookupError: # pragma: no cover
__version__ = __release__
# ............................................................................
def _get_copyright():
current_year = datetime.date.today().year
right = "2014-{}".format(current_year)
right += " - A.Travert & C.Fernandez @ LCS"
return right
__copyright__ = _get_copyright()
"Copyright string of this package"
# .............................................................................
def _get_release_date():
return subprocess.getoutput("git log -1 --tags --date=short --format='%ad'")
__release_date__ = _get_release_date()
"Last release date of this package"
def _check_for_updates(*args, **kwargs):
# Get version
conda_url = "https://anaconda.org/spectrocat/spectrochempy/files"
try:
response = requests.get(conda_url)
except requests.exceptions.RequestException: # pragma: no cover
return None
regex = (
r"\/\d{1,2}\.\d{1,2}\.\d{1,2}\/download\/noarch"
r"\/spectrochempy-(\d{1,2}\.\d{1,2}\.\d{1,2})\-(dev\d{1,2}|stable).tar.bz2"
)
matches = re.finditer(regex, response.text, re.MULTILINE)
vavailables = []
for matchNum, match in enumerate(matches):
v = match[1]
if match[2] == "stable":
vavailables.append(v)
old = parse_version(__version__)
new_version = None
for key in vavailables:
new = parse_version(key)
if new > old: # pragma: no cover
new_version = key
fi = Path.home() / ".scpy_update"
if new_version: # pragma: no cover
fi.write_text(
f"\n\n\tYou are running SpectrocChemPy-{__version__} but version {new_version} is available."
f"\n\tPlease consider updating for bug fixes and new features! "
)
else: # pragma: no cover
if fi.exists():
fi.unlink()
CHECK_UPDATE = threading.Thread(target=_check_for_updates, args=(1,))
CHECK_UPDATE.start()
# other info
# ............................................................................
__url__ = "https://www.spectrochempy.fr"
"URL for the documentation of this package"
__author__ = "C. Fernandez & A. Travert"
"First authors(s) of this package"
__contributor__ = "A. Ait Blal, W. Guérin"
"contributor(s) to this package"
__license__ = "CeCILL-B license"
"Licence of this package"
__cite__ = (
f"Arnaud Travert & Christian Fernandez (2021) SpectroChemPy (version"
f" {'.'.join(__version__.split('.')[0:2])}). "
f"Zenodo. https://doi.org/10.5281/zenodo.3823841"
)
"How to cite this package"
# ..................................................................................................................
def _find_or_create_spectrochempy_dir():
directory = Path.home() / ".spectrochempy"
directory.mkdir(exist_ok=True) # Create directory only if it do not exist
if directory.is_file(): # pragma: no cover
msg = "Intended SpectroChemPy directory `{0}` is " "actually a file."
raise IOError(msg.format(directory))
return directory
# ======================================================================================================================
# Magic ipython function
# ======================================================================================================================
@magics_class
class SpectroChemPyMagics(Magics):
"""
This class implements the addscript ipython magic function.
"""
@line_cell_magic
def addscript(self, pars="", cell=None):
"""
This works both as **%addscript** and as **%%addscript**
This magic command can either take a local filename, element in the
namespace or history range (see %history),
or the current cell content
Usage:
%addscript -p project n1-n2 n3-n4 ... n5 .. n6 ...
or
%%addscript -p project
...code lines ...
Options:
-p <string> Name of the project where the script will be stored.
If not provided, a project with a standard
name : `proj` is searched.
-o <string> script name
-s <symbols> Specify function or classes to load from python
source.
-a append to the current script instead of
overwriting it.
-n search symbol in the current namespace
Examples
--------
.. sourcecode:: ipython
In[1]: %addscript myscript.py
In[2]: %addscript 7-27
In[3]: %addscript -s MyClass,myfunction myscript.py
In[4]: %addscript MyClass
In[5]: %addscript mymodule.myfunction
"""
opts, args = self.parse_options(pars, "p:o:s:n:a")
# append = 'a' in opts
# mode = 'a' if append else 'w'
search_ns = "n" in opts
if not args and not cell and not search_ns: # pragma: no cover
raise UsageError(
"Missing filename, input history range, "
"or element in the user namespace.\n "
"If no argument are given then the cell content "
"should "
"not be empty"
)
name = "script"
if "o" in opts:
name = opts["o"]
proj = "proj"
if "p" in opts:
proj = opts["p"]
if proj not in self.shell.user_ns: # pragma: no cover
raise ValueError(
"Cannot find any project with name `{}` in the "
"namespace.".format(proj)
)
# get the proj object
projobj = self.shell.user_ns[proj]
contents = ""
if search_ns:
contents += (
"\n" + self.shell.find_user_code(opts["n"], search_ns=search_ns) + "\n"
)
args = " ".join(args)
if args.strip():
contents += (
"\n" + self.shell.find_user_code(args, search_ns=search_ns) + "\n"
)
if "s" in opts: # pragma: no cover
try:
blocks, not_found = extract_symbols(contents, opts["s"])
except SyntaxError:
# non python code
logging.error("Unable to parse the input as valid Python code")
return
if len(not_found) == 1:
warnings.warn("The symbol `%s` was not found" % not_found[0])
elif len(not_found) > 1:
warnings.warn(
"The symbols %s were not found"
% get_text_list(not_found, wrap_item_with="`")
)
contents = "\n".join(blocks)
if cell:
contents += "\n" + cell
# import delayed to avoid circular import error
from spectrochempy.core.scripts.script import Script
script = Script(name, content=contents)
projobj[name] = script
return "Script {} created.".format(name)
# @line_magic # def runscript(self, pars=''): # """ # # # """ # opts,
# args = self.parse_options(pars, '') # # if # not args: # raise UsageError('Missing script
# name') # # # return args
# ======================================================================================================================
# DataDir class
# ======================================================================================================================
class DataDir(HasTraits):
"""A class used to determine the path to the testdata directory."""
path = Instance(Path)
@default("path")
def _get_path_default(self, **kwargs): # pragma: no cover
super().__init__(**kwargs)
# create a directory testdata in .spectrochempy to avoid an error if the following do not work
path = _find_or_create_spectrochempy_dir() / "testdata"
path.mkdir(exist_ok=True)
# try to use the conda installed testdata (spectrochempy_data package)
try:
conda_env = environ["CONDA_PREFIX"]
path = Path(conda_env) / "share" / "spectrochempy_data" / "testdata"
if not path.exists():
path = (
Path(conda_env) / "share" / "spectrochempy_data"
) # depending on the version of spectrochempy_data
except KeyError:
pass
return path
def listing(self):
"""
Create a str representing a listing of the testdata folder.
Returns
-------
listing : str
Display of the datadir content
"""
strg = f"{self.path.name}\n" # os.path.basename(self.path) + "\n"
def _listdir(s, initial, ns):
ns += 1
for f in pathclean(initial).glob(
"*"
): # glob.glob(os.path.join(initial, '*')):
fb = f.name # os.path.basename(f)
if fb.startswith("."): # pragma: no cover
continue
if (
not fb.startswith("acqu")
and not fb.startswith("pulse")
and fb not in ["ser", "fid"]
):
s += " " * ns + "|__" + "%s\n" % fb
if f.is_dir():
s = _listdir(s, f, ns)
return s
return _listdir(strg, self.path, -1)
@classmethod
def class_print_help(cls):
# to work with --help-all
"""""" # TODO: make some useful help
def __str__(self):
return self.listing()
def _repr_html_(self): # pragma: no cover
# _repr_html is needed to output in notebooks
return self.listing().replace("\n", "<br/>").replace(" ", " ")
# ======================================================================================================================
# General Preferences
# ======================================================================================================================
class GeneralPreferences(MetaConfigurable):
"""
Preferences that apply to the |scpy| application in general
They should be accessible from the main API
"""
name = Unicode("GeneralPreferences")
description = Unicode("General options for the SpectroChemPy application")
updated = Bool(False)
# ------------------------------------------------------------------------------------------------------------------
# Configuration entries
# ------------------------------------------------------------------------------------------------------------------
# NON GUI
show_info_on_loading = Bool(True, help="Display info on loading").tag(config=True)
use_qt = Bool(
False,
help="Use QT for dialog instead of TK which is the default. "
"If True the PyQt libraries must be installed",
).tag(config=True)
# GUI
databases_directory = Union(
(Instance(Path), Unicode()),
help="Directory where to look for database files such as csv",
).tag(config=True, gui=True, kind="folder")
datadir = Union(
(Instance(Path), Unicode()), help="Directory where to look for data by default"
).tag(config=True, gui=True, kind="folder")
workspace = Union(
(Instance(Path), Unicode()), help="Workspace directory by default"
).tag(config=True, gui=True, kind="folder")
# ------------------------------------------------------------------------------------------------------------------
# Configuration entries
# ------------------------------------------------------------------------------------------------------------------
autoload_project = Bool(
True, help="Automatic loading of the last project at startup"
).tag(config=True, gui=True)
autosave_project = Bool(True, help="Automatic saving of the current project").tag(
config=True, gui=True
)
project_directory = Union(
(Instance(Path), Unicode()),
help="Directory where projects are stored by default",
).tag(config=True, kind="folder")
last_project = Union(
(Instance(Path, allow_none=True), Unicode()), help="Last used project"
).tag(config=True, gui=True, kind="file")
show_close_dialog = Bool(
True,
help="Display the close project dialog project changing or on application exit",
).tag(config=True, gui=True)
csv_delimiter = Enum(
[",", ";", r"\t", " "], default_value=",", help="CSV data delimiter"
).tag(config=True, gui=True)
@default("project_directory")
def _get_default_project_directory(self):
# Determines the SpectroChemPy project directory name and creates the directory if it doesn't exist.
# This directory is typically ``$HOME/spectrochempy/projects``, but if the SCP_PROJECTS_HOME environment
# variable is set and the `$SCP_PROJECTS_HOME` directory exists, it will be that directory.
# If neither exists, the former will be created.
# first look for SCP_PROJECTS_HOME
pscp = environ.get("SCP_PROJECTS_HOME")
if pscp is not None and Path(pscp).exists():
return Path(pscp)
pscp = Path.home() / ".spectrochempy" / "projects"
pscp.mkdir(exist_ok=True)
if pscp.is_file():
raise IOError("Intended Projects directory is actually a file.")
return pscp
# ..................................................................................................................
@default("workspace")
def _get_workspace_default(self):
# the spectra path in package data
return Path.home()
# ..................................................................................................................
@default("databases_directory")
def _get_databases_directory_default(self):
# the spectra path in package data
return Path(get_pkg_path("databases", "scp_data"))
# ..................................................................................................................
@default("datadir")
def _get_default_datadir(self):
return self.parent.datadir.path
# ..................................................................................................................
@observe("datadir")
def _datadir_changed(self, change):
self.parent.datadir.path = pathclean(change["new"])
# ..................................................................................................................
@property
def log_level(self):
"""
int - logging level
"""
return self.parent.log_level
# ..................................................................................................................
@log_level.setter
def log_level(self, value):
if isinstance(value, str):
value = getattr(logging, value, None)
if value is None: # pragma: no cover
warnings.warn(
"Log level not changed: invalid value given\n"
"string values must be DEBUG, INFO, WARNING, "
"or ERROR"
)
self.parent.log_level = value
# ..................................................................................................................
def __init__(self, **kwargs):
super().__init__(jsonfile="GeneralPreferences", **kwargs)
# ======================================================================================================================
# Application
# ======================================================================================================================
class SpectroChemPy(Application):
"""
This class SpectroChemPy is the main class, containing most of the setup,
configuration and more.
"""
icon = Unicode("scpy.png")
"Icon for the application"
running = Bool(False)
"Running status of the |scpy| application"
name = Unicode("SpectroChemPy")
"Running name of the application"
description = Unicode(
"SpectroChemPy is a framework for processing, analysing and modelling Spectroscopic data for "
"Chemistry with Python."
)
"Short description of the |scpy| application"
long_description = Unicode()
"Long description of the |scpy| application"
@default("long_description")
def _get_long_description(self):
desc = """
<p><strong>SpectroChemPy</strong> is a framework for processing, analysing and modelling
<strong>Spectro</>scopic data for <strong>Chem</strong>istry with <strong>Py</strong>thon.
It is a cross platform software, running on Linux, Windows or OS X.</p><br><br>
<strong>Version:</strong> {version}<br>
<strong>Authors:</strong> {authors}<br>
<strong>License:</strong> {license}<br>
<div class='warning'> SpectroChemPy is still experimental and under active development. Its current design and
functionalities are subject to major changes, reorganizations, bugs and crashes!!!. Please report any issues
to the <a url='https://github.com/spectrochempy/spectrochempy/issues'>Issue Tracker<a>
</div><br><br>
When using <strong>SpectroChemPy</strong> for your own work, you are kindly requested to cite it this way:
<pre>{cite}
</pre></p>""".format(
version=__release__, authors=__author__, license=__license__, cite=__cite__
)
return desc
# ------------------------------------------------------------------------------------------------------------------
# Configuration parameters
# ------------------------------------------------------------------------------------------------------------------
# Config file setting
# ------------------------------------------------------------------------------------------------------------------
_loaded_config_files = List()
reset_config = Bool(False, help="Should we restore a default configuration ?").tag(
config=True
)
"""Flag: True if one wants to reset settings to the original config defaults"""
config_file_name = Unicode(None, help="Configuration file name").tag(config=True)
"""Configuration file name"""
@default("config_file_name")
def _get_config_file_name_default(self):
return str(self.name).lower() + "_cfg"
config_dir = Instance(Path, help="Set the configuration directory location").tag(
config=True
)
"""Configuration directory"""
@default("config_dir")
def _get_config_dir_default(self):
return self.get_config_dir()
config_manager = Instance(BaseJSONConfigManager)
@default("config_manager")
def _get_default_config_manager(self):
return BaseJSONConfigManager(config_dir=str(self.config_dir))
log_format = Unicode(
"%(highlevel)s %(message)s",
help="The Logging format template",
).tag(config=True)
debug = Bool(True, help="Set DEBUG mode, with full outputs").tag(config=True)
"""Flag to set debugging mode"""
info = Bool(False, help="Set INFO mode, with msg outputs").tag(config=True)
"""Flag to set info mode"""
quiet = Bool(False, help="Set Quiet mode, with minimal outputs").tag(config=True)
"""Flag to set in fully quite mode (even no warnings)"""
nodisplay = Bool(False, help="Set NO DISPLAY mode, i.e., no graphics outputs").tag(
config=True
)
"""Flag to set in NO DISPLAY mode """
# last_project = Unicode('', help='Last used project').tag(config=True, type='project')
# """Last used project"""
#
# @observe('last_project')
# def _last_project_changed(self, change):
# if change.name in self.traits(config=True):
# self.config_manager.update(self.config_file_name, {self.__class__.__name__: {change.name: change.new, }})
show_config = Bool(help="Dump configuration to stdout at startup").tag(config=True)
@observe("show_config")
def _show_config_changed(self, change):
if change.new:
self._save_start = self.start
self.start = self.start_show_config
show_config_json = Bool(help="Dump configuration to stdout (as JSON)").tag(
config=True
)
@observe("show_config_json")
def _show_config_json_changed(self, change):
self.show_config = change.new
test = Bool(False, help="test flag").tag(config=True)
"""Flag to set the application in testing mode"""
port = Integer(7000, help="Dash server port").tag(config=True)
"""Dash server port"""
# Command line interface
# ------------------------------------------------------------------------------------------------------------------
aliases = dict(
test="SpectroChemPy.test",
project="SpectroChemPy.last_project",
f="SpectroChemPy.startup_filename",
port="SpectroChemPy.port",
)
flags = dict(
debug=(
{"SpectroChemPy": {"log_level": DEBUG}},
"Set log_level to DEBUG - most verbose mode",
),
info=(
{"SpectroChemPy": {"log_level": INFO}},
"Set log_level to INFO - verbose mode",
),
quiet=(
{"SpectroChemPy": {"log_level": ERROR}},
"Set log_level to ERROR - no verbosity at all",
),
nodisplay=(
{"SpectroChemPy": {"nodisplay": True}},
"Set NO DISPLAY mode to true - no graphics at all",
),
reset_config=(
{"SpectroChemPy": {"reset_config": True}},
"Reset config to default",
),
show_config=(
{
"SpectroChemPy": {
"show_config": True,
}
},
"Show the application's configuration (human-readable " "format)",
),
show_config_json=(
{
"SpectroChemPy": {
"show_config_json": True,
}
},
"Show the application's configuration (json " "format)",
),
)
classes = List(
[
GeneralPreferences,
PlotPreferences,
DataDir,
]
)
# ------------------------------------------------------------------------------------------------------------------
# Initialisation of the application
# ------------------------------------------------------------------------------------------------------------------
def __init__(self, **kwargs):
super().__init__(**kwargs)
self.logs = (
self.log
) # we change the no name in order to avoid latter conflict with numpy.log
self.initialize()
def initialize(self, argv=None):
"""
Initialisation function for the API applications
Parameters
----------
argv : List, [optional].
List of configuration parameters.
"""
# parse the argv
# --------------------------------------------------------------------
# if we are running this under ipython and jupyter notebooks
# deactivate potential command line arguments
# (such that those from jupyter which cause problems here)
IN_IPYTHON = False
if InteractiveShell.initialized():
IN_IPYTHON = True
if not IN_IPYTHON:
# remove argument not known by spectrochempy
if "make.py" in sys.argv[0] or "pytest" in sys.argv[0]: # building docs
options = []
for item in sys.argv[:]:
for k in list(self.flags.keys()):
if item.startswith("--" + k) or k in ["--help", "--help-all"]:
options.append(item)
continue
for k in list(self.aliases.keys()):
if item.startswith("-" + k) or k in [
"h",
]:
options.append(item)
self.parse_command_line(options)
else: # pragma: no cover
self.parse_command_line(sys.argv)
# Get preferences from the config file and init everything
# ---------------------------------------------------------------------
self._init_all_preferences()
# we catch warnings and error for a lighter display to the end-user.
# except if we are in debugging mode
# warning handler
# --------------------------------------------------------------------
def send_warnings_to_log(message, category):
self.logs.warning(f"{category.__name__} - {message}")
return
warnings.showwarning = send_warnings_to_log
# exception handler
# --------------------------------------------------------------------
if IN_IPYTHON: # pragma: no cover
ip = get_ipython()
def _custom_exc(shell, etype, evalue, tb, tb_offset=None):
if self.log_level == logging.DEBUG:
shell.showtraceback((etype, evalue, tb), tb_offset=tb_offset)
else:
self.logs.error(f"{etype.__name__}: {evalue}")
ip.set_custom_exc((Exception,), _custom_exc)
# load our custom magic extensions
# --------------------------------------------------------------------
if ip is not None:
ip.register_magics(SpectroChemPyMagics)
def _init_all_preferences(self):
# Get preferences from the config file
# ---------------------------------------------------------------------
if not self.config:
self.config = Config()
configfiles = []
if self.config_file_name:
config_file = self.config_dir / self.config_file_name
configfiles.append(config_file)
lis = self.config_dir.iterdir()
for f in lis:
if f.suffix == ".json":
jsonname = self.config_dir / f
if self.reset_config or f == "PlotPreferences.json":
# remove the user json file to reset to defaults
jsonname.unlink()
else:
configfiles.append(jsonname)
for cfgname in configfiles:
self.load_config_file(cfgname)
if cfgname not in self._loaded_config_files:
self._loaded_config_files.append(cfgname)
# Eventually write the default config file
# --------------------------------------
self._make_default_config_file()
self.datadir = (
DataDir()
) # config=self.config) -- passing args deprecated in traitlets 4.2
self.preferences = GeneralPreferences(config=self.config, parent=self)
self.plot_preferences = PlotPreferences(config=self.config, parent=self)
# ..................................................................................................................
@staticmethod
def get_config_dir():
"""
Determines the SpectroChemPy configuration directory name and
creates the directory if it doesn't exist.
This directory is typically ``$HOME/.spectrochempy/config``,
but if the
SCP_CONFIG_HOME environment variable is set and the
``$SCP_CONFIG_HOME`` directory exists, it will be that
directory.
If neither exists, the former will be created.
Returns
-------
config_dir : str
The absolute path to the configuration directory.
"""
# first look for SCP_CONFIG_HOME
scp = environ.get("SCP_CONFIG_HOME")
if scp is not None and Path(scp).exists():
return Path(scp)
config = _find_or_create_spectrochempy_dir() / "config"
if not config.exists():
config.mkdir(exist_ok=True)
return config
def start_show_config(self, **kwargs):
"""start function used when show_config is True"""
config = self.config.copy()
# exclude show_config flags from displayed config
for cls in self.__class__.mro():
if cls.__name__ in config:
cls_config = config[cls.__name__]
cls_config.pop("show_config", None)
cls_config.pop("show_config_json", None)
if self.show_config_json:
json.dump(config, sys.stdout, indent=1, sort_keys=True, default=repr)
# add trailing newlines
sys.stdout.write("\n")
print()
return self._start()
if self._loaded_config_files:
print("Loaded config files:")
for f in self._loaded_config_files:
print(" " + f)
print()
for classname in sorted(config):
class_config = config[classname]
if not class_config:
continue
print(classname)
pformat_kwargs = dict(indent=4)
if sys.version_info >= (3, 4):
# use compact pretty-print on Pythons that support it
pformat_kwargs["compact"] = True
for traitname in sorted(class_config):
value = class_config[traitname]
print(
" .{} = {}".format(
traitname,
pprint.pformat(value, **pformat_kwargs),
)
)
print()
# now run the actual start function
return self._start()
def reset_preferences(self):
"""
Reset all preferences to default
"""
self.reset_config = True
self._init_all_preferences()
self.reset_config = False
# ------------------------------------------------------------------------------------------------------------------
# start the application
# ------------------------------------------------------------------------------------------------------------------
def start(self):
"""
Start the |scpy| API
All configuration must have been done before calling this function
"""
# print(f'{sys.argv}')
return self._start()
# ------------------------------------------------------------------------------------------------------------------
# Private methods
# ------------------------------------------------------------------------------------------------------------------
def _start(self):
if self.running:
# API already started. Nothing done!
return
if self.preferences.show_info_on_loading:
info_string = "SpectroChemPy's API - v.{}\n" "© Copyright {}".format(
__version__, __copyright__
)
ip = get_ipython()
if ip is not None and "TerminalInteractiveShell" not in str(ip):
display_info_string(message=info_string.strip())
else:
if "/bin/scpy" not in sys.argv[0]: # deactivate for console scripts
print(info_string.strip())
# force update of rcParams
for rckey in mpl.rcParams.keys():
key = rckey.replace("_", "__").replace(".", "_").replace("-", "___")
try:
mpl.rcParams[rckey] = getattr(self.plot_preferences, key)
except ValueError:
mpl.rcParams[rckey] = getattr(self.plot_preferences, key).replace(
"'", ""
)
except AttributeError:
# print(f'{e} -> you may want to add it to PlotPreferences.py')
pass
self.plot_preferences.set_latex_font(self.plot_preferences.font_family)
self.running = True
# display needs for update
# time.sleep(1)
fi = Path.home() / ".scpy_update"
if fi.exists():
try:
msg = fi.read_text()
self.logs.warning(msg)
except Exception:
pass
return True
# ..................................................................................................................
def _make_default_config_file(self):
"""auto generate default config file."""
fname = self.config_dir / self.config_file_name
fname = fname.with_suffix(".py")
if not fname.exists() or self.reset_config:
s = self.generate_config_file()
self.logs.info("Generating default config file: %r" % fname)
with open(fname, "w") as f:
f.write(s)
# ------------------------------------------------------------------------------------------------------------------
# Events from Application
# ------------------------------------------------------------------------------------------------------------------
@observe("log_level")
def _log_level_changed(self, change):
self.log_format = "%(message)s"
if change.new == DEBUG:
self.log_format = "[%(filename)s-%(funcName)s %(levelname)s] %(" "message)s"
self.logs._cache = {}
self.logs.level = self.log_level
for handler in self.logs.handlers:
handler.level = self.log_level
self.logs.info(
"changed default log_level to {}".format(logging.getLevelName(change.new))
)
# ======================================================================================================================
if __name__ == "__main__":
pass
|
import time
import logging
import functools
import slack_sdk
from slack_sdk.web.async_client import AsyncWebClient
from typing import Any, Dict, List, Optional
from tenacity import TryAgain, retry, retry_if_exception_type, stop_after_attempt
from .config import SlackConversationConfiguration
log = logging.getLogger(__name__)
def create_slack_client(config: SlackConversationConfiguration, run_async: bool = False):
"""Creates a Slack Web API client."""
if not run_async:
return slack_sdk.WebClient(token=config.api_bot_token.get_secret_value())
return AsyncWebClient(token=config.api_bot_token.get_secret_value())
def resolve_user(client: Any, user_id: str):
"""Attempts to resolve a user object regardless if email, id, or prefix."""
if "@" in user_id:
return get_user_info_by_email(client, user_id)
return {"id": user_id}
def chunks(ids, n):
"""Yield successive n-sized chunks from l."""
for i in range(0, len(ids), n):
yield ids[i : i + n]
def paginated(data_key):
def decorator(func):
@functools.wraps(func)
def decorated_function(*args, **kwargs):
results = []
while True:
response = func(*args, **kwargs)
results += response[data_key]
# stop if we hit an empty string
next_cursor = response["response_metadata"]["next_cursor"]
if not next_cursor:
break
kwargs.update({"cursor": next_cursor})
return results
return decorated_function
return decorator
def time_pagination(data_key):
def decorator(func):
@functools.wraps(func)
def decorated_function(*args, **kwargs):
results = []
while True:
response = func(*args, **kwargs)
results += response[data_key]
# stop if we hit an empty string
if not response["has_more"]:
break
kwargs.update({"latest": response["messages"][0]["ts"]})
return results
return decorated_function
return decorator
# NOTE I don't like this but slack client is annoying (kglisson)
SLACK_GET_ENDPOINTS = [
"conversations.history",
"conversations.info",
"users.conversations",
"users.info",
"users.lookupByEmail",
"users.profile.get",
]
@retry(stop=stop_after_attempt(5), retry=retry_if_exception_type(TryAgain))
def make_call(client: Any, endpoint: str, **kwargs):
"""Make an Slack client api call."""
try:
if endpoint in SLACK_GET_ENDPOINTS:
response = client.api_call(endpoint, http_verb="GET", params=kwargs)
else:
response = client.api_call(endpoint, json=kwargs)
except slack_sdk.errors.SlackApiError as e:
log.error(f"SlackError. Response: {e.response} Endpoint: {endpoint} kwargs: {kwargs}")
# NOTE we've seen some eventual consistency problems with channel creation
if e.response["error"] == "channel_not_found":
raise TryAgain
# NOTE we've seen some eventual consistency problems after adding users to a channel
if e.response["error"] == "user_not_in_channel":
raise TryAgain
# NOTE we've experienced a wide range of issues when Slack's performance is degraded
if e.response["error"] == "fatal_error":
# we wait 5 minutes before trying again, as performance issues take time to troubleshoot and fix
time.sleep(300)
raise TryAgain
if e.response.headers.get("Retry-After"):
wait = int(e.response.headers["Retry-After"])
log.info(f"SlackError: Rate limit hit. Waiting {wait} seconds.")
time.sleep(wait)
raise TryAgain
else:
raise e
return response
async def make_call_async(client: Any, endpoint: str, **kwargs):
"""Make an Slack client api call."""
try:
if endpoint in SLACK_GET_ENDPOINTS:
response = await client.api_call(endpoint, http_verb="GET", params=kwargs)
else:
response = await client.api_call(endpoint, json=kwargs)
except slack_sdk.errors.SlackApiError as e:
log.error(f"SlackError. Response: {e.response} Endpoint: {endpoint} kwargs: {kwargs}")
if e.response.headers.get("Retry-After"):
wait = int(e.response.headers["Retry-After"])
log.info(f"SlackError: Rate limit hit. Waiting {wait} seconds.")
time.sleep(wait)
raise TryAgain
else:
raise e
return response
@paginated("channels")
def list_conversations(client: Any, **kwargs):
return make_call(client, "conversations.list", types="private_channel", **kwargs)
# @time_pagination("messages")
def list_conversation_messages(client: Any, conversation_id: str, **kwargs):
"""Returns a list of conversation messages."""
return make_call(client, "conversations.history", channel=conversation_id, **kwargs)
@functools.lru_cache()
def get_user_info_by_id(client: Any, user_id: str):
"""Gets profile information about a user by id."""
return make_call(client, "users.info", user=user_id)["user"]
# @functools.lru_cache()
async def get_user_info_by_id_async(client: Any, user_id: str):
"""Gets profile information about a user by id."""
user_info = await make_call_async(client, "users.info", user=user_id)
return user_info["user"]
@functools.lru_cache()
def get_user_info_by_email(client: Any, email: str):
"""Gets profile information about a user by email."""
return make_call(client, "users.lookupByEmail", email=email)["user"]
@functools.lru_cache()
def get_user_profile_by_email(client: Any, email: str):
"""Gets extended profile information about a user by email."""
user = make_call(client, "users.lookupByEmail", email=email)["user"]
profile = make_call(client, "users.profile.get", user=user["id"])["profile"]
profile["tz"] = user["tz"]
return profile
def get_user_email(client: Any, user_id: str):
"""Gets the user's email."""
user_info = get_user_info_by_id(client, user_id)
return user_info["profile"]["email"]
async def get_user_email_async(client: Any, user_id: str):
"""Gets the user's email."""
user_info = await get_user_info_by_id_async(client, user_id)
return user_info["profile"]["email"]
def get_user_username(client: Any, user_id: str):
"""Gets the user's username."""
return get_user_email(client, user_id).split("@")[0]
def get_user_avatar_url(client: Any, email: str):
"""Gets the user's avatar url."""
return get_user_info_by_email(client, email)["profile"]["image_512"]
# @functools.lru_cache()
async def get_conversations_by_user_id_async(client: Any, user_id: str):
"""Gets the list of public and private conversations a user is a member of."""
result = await make_call_async(
client,
"users.conversations",
user=user_id,
types="public_channel",
exclude_archived="true",
)
public_conversations = [c["name"] for c in result["channels"]]
result = await make_call_async(
client,
"users.conversations",
user=user_id,
types="private_channel",
exclude_archived="true",
)
private_conversations = [c["name"] for c in result["channels"]]
return public_conversations, private_conversations
# note this will get slower over time, we might exclude archived to make it sane
def get_conversation_by_name(client: Any, name: str):
"""Fetches a conversation by name."""
for c in list_conversations(client):
if c["name"] == name:
return c
async def get_conversation_name_by_id_async(client: Any, conversation_id: str):
"""Fetches a conversation by id and returns its name."""
try:
return (await make_call_async(client, "conversations.info", channel=conversation_id))[
"channel"
]["name"]
except slack_sdk.errors.SlackApiError as e:
if e.response["error"] == "channel_not_found":
return None
else:
raise e
def set_conversation_topic(client: Any, conversation_id: str, topic: str):
"""Sets the topic of the specified conversation."""
return make_call(client, "conversations.setTopic", channel=conversation_id, topic=topic)
def create_conversation(client: Any, name: str, is_private: bool = False):
"""Make a new Slack conversation."""
response = make_call(
client,
"conversations.create",
name=name.lower(), # slack disallows upperCase
is_group=is_private,
is_private=is_private,
)["channel"]
return {
"id": response["id"],
"name": response["name"],
"weblink": f"https://slack.com/app_redirect?channel={response["id"]}",
}
def archive_conversation(client: Any, conversation_id: str):
"""Archives an existing conversation."""
return make_call(client, "conversations.archive", channel=conversation_id)
def unarchive_conversation(client: Any, conversation_id: str):
"""Unarchives an existing conversation."""
try:
return make_call(client, "conversations.unarchive", channel=conversation_id)
except slack_sdk.errors.SlackApiError as e:
# if the channel isn't archived thats okay
if e.response["error"] != "not_archived":
raise e
def add_users_to_conversation(client: Any, conversation_id: str, user_ids: List[str]):
"""Add users to conversation."""
# NOTE this will trigger a member_joined_channel event, which we will capture and run the incident.incident_add_or_reactivate_participant_flow() as a result
for c in chunks(user_ids, 30): # NOTE api only allows 30 at a time.
try:
make_call(client, "conversations.invite", users=c, channel=conversation_id)
except slack_sdk.errors.SlackApiError as e:
# sometimes slack sends duplicate member_join events that result in folks already existing in the channel.
if e.response["error"] == "already_in_channel":
pass
def send_message(
client: Any,
conversation_id: str,
text: str = None,
blocks: List[Dict] = None,
persist: bool = False,
):
"""Sends a message to the given conversation."""
response = make_call(
client, "chat.postMessage", channel=conversation_id, text=text, blocks=blocks
)
if persist:
add_pin(client, response["channel"], response["ts"])
return {"id": response["channel"], "timestamp": response["ts"]}
def send_ephemeral_message(
client: Any,
conversation_id: str,
user_id: str,
text: str,
blocks: Optional[List] = None,
thread_ts: Optional[str] = None,
):
"""Sends an ephemeral message to a user in a channel."""
if thread_ts:
response = make_call(
client,
"chat.postEphemeral",
channel=conversation_id,
user=user_id,
text=text,
thread_ts=thread_ts,
blocks=blocks,
)
else:
response = make_call(
client,
"chat.postEphemeral",
channel=conversation_id,
user=user_id,
text=text,
blocks=blocks,
)
return {"id": response["channel"], "timestamp": response["ts"]}
def add_pin(client: Any, conversation_id: str, timestamp: str):
"""Adds a pin to a conversation."""
return make_call(client, "pins.add", channel=conversation_id, timestamp=timestamp)
def message_filter(message):
"""Some messages are not useful, we filter them here."""
if not message["text"]: # sometimes for file upload there is no text only files
return
if message["type"] != "message":
return
if message.get("subtype"):
return
if message.get("bot_id"):
return
return message
def is_user(config: SlackConversationConfiguration, slack_user: str):
"""Returns true if it's a regular user, false if dispatch bot'."""
return slack_user != config.app_user_slug
def open_dialog_with_user(client: Any, trigger_id: str, dialog: dict):
"""Opens a dialog with a user."""
return make_call(client, "dialog.open", trigger_id=trigger_id, dialog=dialog)
def open_modal_with_user(client: Any, trigger_id: str, modal: dict):
"""Opens a modal with a user."""
# the argument should be view in the make call, since slack api expects view
return make_call(client, "views.open", trigger_id=trigger_id, view=modal)
def update_modal_with_user(client: Any, trigger_id: str, view_id: str, modal: dict):
"""Updates a modal with a user."""
return make_call(client, "views.update", trigger_id=trigger_id, view_id=view_id, view=modal)
| import time
import logging
import functools
import slack_sdk
from slack_sdk.web.async_client import AsyncWebClient
from typing import Any, Dict, List, Optional
from tenacity import TryAgain, retry, retry_if_exception_type, stop_after_attempt
from .config import SlackConversationConfiguration
log = logging.getLogger(__name__)
def create_slack_client(config: SlackConversationConfiguration, run_async: bool = False):
"""Creates a Slack Web API client."""
if not run_async:
return slack_sdk.WebClient(token=config.api_bot_token.get_secret_value())
return AsyncWebClient(token=config.api_bot_token.get_secret_value())
def resolve_user(client: Any, user_id: str):
"""Attempts to resolve a user object regardless if email, id, or prefix."""
if "@" in user_id:
return get_user_info_by_email(client, user_id)
return {"id": user_id}
def chunks(ids, n):
"""Yield successive n-sized chunks from l."""
for i in range(0, len(ids), n):
yield ids[i : i + n]
def paginated(data_key):
def decorator(func):
@functools.wraps(func)
def decorated_function(*args, **kwargs):
results = []
while True:
response = func(*args, **kwargs)
results += response[data_key]
# stop if we hit an empty string
next_cursor = response["response_metadata"]["next_cursor"]
if not next_cursor:
break
kwargs.update({"cursor": next_cursor})
return results
return decorated_function
return decorator
def time_pagination(data_key):
def decorator(func):
@functools.wraps(func)
def decorated_function(*args, **kwargs):
results = []
while True:
response = func(*args, **kwargs)
results += response[data_key]
# stop if we hit an empty string
if not response["has_more"]:
break
kwargs.update({"latest": response["messages"][0]["ts"]})
return results
return decorated_function
return decorator
# NOTE I don't like this but slack client is annoying (kglisson)
SLACK_GET_ENDPOINTS = [
"conversations.history",
"conversations.info",
"users.conversations",
"users.info",
"users.lookupByEmail",
"users.profile.get",
]
@retry(stop=stop_after_attempt(5), retry=retry_if_exception_type(TryAgain))
def make_call(client: Any, endpoint: str, **kwargs):
"""Make an Slack client api call."""
try:
if endpoint in SLACK_GET_ENDPOINTS:
response = client.api_call(endpoint, http_verb="GET", params=kwargs)
else:
response = client.api_call(endpoint, json=kwargs)
except slack_sdk.errors.SlackApiError as e:
log.error(f"SlackError. Response: {e.response} Endpoint: {endpoint} kwargs: {kwargs}")
# NOTE we've seen some eventual consistency problems with channel creation
if e.response["error"] == "channel_not_found":
raise TryAgain
# NOTE we've seen some eventual consistency problems after adding users to a channel
if e.response["error"] == "user_not_in_channel":
raise TryAgain
# NOTE we've experienced a wide range of issues when Slack's performance is degraded
if e.response["error"] == "fatal_error":
# we wait 5 minutes before trying again, as performance issues take time to troubleshoot and fix
time.sleep(300)
raise TryAgain
if e.response.headers.get("Retry-After"):
wait = int(e.response.headers["Retry-After"])
log.info(f"SlackError: Rate limit hit. Waiting {wait} seconds.")
time.sleep(wait)
raise TryAgain
else:
raise e
return response
async def make_call_async(client: Any, endpoint: str, **kwargs):
"""Make an Slack client api call."""
try:
if endpoint in SLACK_GET_ENDPOINTS:
response = await client.api_call(endpoint, http_verb="GET", params=kwargs)
else:
response = await client.api_call(endpoint, json=kwargs)
except slack_sdk.errors.SlackApiError as e:
log.error(f"SlackError. Response: {e.response} Endpoint: {endpoint} kwargs: {kwargs}")
if e.response.headers.get("Retry-After"):
wait = int(e.response.headers["Retry-After"])
log.info(f"SlackError: Rate limit hit. Waiting {wait} seconds.")
time.sleep(wait)
raise TryAgain
else:
raise e
return response
@paginated("channels")
def list_conversations(client: Any, **kwargs):
return make_call(client, "conversations.list", types="private_channel", **kwargs)
# @time_pagination("messages")
def list_conversation_messages(client: Any, conversation_id: str, **kwargs):
"""Returns a list of conversation messages."""
return make_call(client, "conversations.history", channel=conversation_id, **kwargs)
@functools.lru_cache()
def get_user_info_by_id(client: Any, user_id: str):
"""Gets profile information about a user by id."""
return make_call(client, "users.info", user=user_id)["user"]
# @functools.lru_cache()
async def get_user_info_by_id_async(client: Any, user_id: str):
"""Gets profile information about a user by id."""
user_info = await make_call_async(client, "users.info", user=user_id)
return user_info["user"]
@functools.lru_cache()
def get_user_info_by_email(client: Any, email: str):
"""Gets profile information about a user by email."""
return make_call(client, "users.lookupByEmail", email=email)["user"]
@functools.lru_cache()
def get_user_profile_by_email(client: Any, email: str):
"""Gets extended profile information about a user by email."""
user = make_call(client, "users.lookupByEmail", email=email)["user"]
profile = make_call(client, "users.profile.get", user=user["id"])["profile"]
profile["tz"] = user["tz"]
return profile
def get_user_email(client: Any, user_id: str):
"""Gets the user's email."""
user_info = get_user_info_by_id(client, user_id)
return user_info["profile"]["email"]
async def get_user_email_async(client: Any, user_id: str):
"""Gets the user's email."""
user_info = await get_user_info_by_id_async(client, user_id)
return user_info["profile"]["email"]
def get_user_username(client: Any, user_id: str):
"""Gets the user's username."""
return get_user_email(client, user_id).split("@")[0]
def get_user_avatar_url(client: Any, email: str):
"""Gets the user's avatar url."""
return get_user_info_by_email(client, email)["profile"]["image_512"]
# @functools.lru_cache()
async def get_conversations_by_user_id_async(client: Any, user_id: str):
"""Gets the list of public and private conversations a user is a member of."""
result = await make_call_async(
client,
"users.conversations",
user=user_id,
types="public_channel",
exclude_archived="true",
)
public_conversations = [c["name"] for c in result["channels"]]
result = await make_call_async(
client,
"users.conversations",
user=user_id,
types="private_channel",
exclude_archived="true",
)
private_conversations = [c["name"] for c in result["channels"]]
return public_conversations, private_conversations
# note this will get slower over time, we might exclude archived to make it sane
def get_conversation_by_name(client: Any, name: str):
"""Fetches a conversation by name."""
for c in list_conversations(client):
if c["name"] == name:
return c
async def get_conversation_name_by_id_async(client: Any, conversation_id: str):
"""Fetches a conversation by id and returns its name."""
try:
return (await make_call_async(client, "conversations.info", channel=conversation_id))[
"channel"
]["name"]
except slack_sdk.errors.SlackApiError as e:
if e.response["error"] == "channel_not_found":
return None
else:
raise e
def set_conversation_topic(client: Any, conversation_id: str, topic: str):
"""Sets the topic of the specified conversation."""
return make_call(client, "conversations.setTopic", channel=conversation_id, topic=topic)
def create_conversation(client: Any, name: str, is_private: bool = False):
"""Make a new Slack conversation."""
response = make_call(
client,
"conversations.create",
name=name.lower(), # slack disallows upperCase
is_group=is_private,
is_private=is_private,
)["channel"]
return {
"id": response["id"],
"name": response["name"],
"weblink": f"https://slack.com/app_redirect?channel={response['id']}",
}
def archive_conversation(client: Any, conversation_id: str):
"""Archives an existing conversation."""
return make_call(client, "conversations.archive", channel=conversation_id)
def unarchive_conversation(client: Any, conversation_id: str):
"""Unarchives an existing conversation."""
try:
return make_call(client, "conversations.unarchive", channel=conversation_id)
except slack_sdk.errors.SlackApiError as e:
# if the channel isn't archived thats okay
if e.response["error"] != "not_archived":
raise e
def add_users_to_conversation(client: Any, conversation_id: str, user_ids: List[str]):
"""Add users to conversation."""
# NOTE this will trigger a member_joined_channel event, which we will capture and run the incident.incident_add_or_reactivate_participant_flow() as a result
for c in chunks(user_ids, 30): # NOTE api only allows 30 at a time.
try:
make_call(client, "conversations.invite", users=c, channel=conversation_id)
except slack_sdk.errors.SlackApiError as e:
# sometimes slack sends duplicate member_join events that result in folks already existing in the channel.
if e.response["error"] == "already_in_channel":
pass
def send_message(
client: Any,
conversation_id: str,
text: str = None,
blocks: List[Dict] = None,
persist: bool = False,
):
"""Sends a message to the given conversation."""
response = make_call(
client, "chat.postMessage", channel=conversation_id, text=text, blocks=blocks
)
if persist:
add_pin(client, response["channel"], response["ts"])
return {"id": response["channel"], "timestamp": response["ts"]}
def send_ephemeral_message(
client: Any,
conversation_id: str,
user_id: str,
text: str,
blocks: Optional[List] = None,
thread_ts: Optional[str] = None,
):
"""Sends an ephemeral message to a user in a channel."""
if thread_ts:
response = make_call(
client,
"chat.postEphemeral",
channel=conversation_id,
user=user_id,
text=text,
thread_ts=thread_ts,
blocks=blocks,
)
else:
response = make_call(
client,
"chat.postEphemeral",
channel=conversation_id,
user=user_id,
text=text,
blocks=blocks,
)
return {"id": response["channel"], "timestamp": response["ts"]}
def add_pin(client: Any, conversation_id: str, timestamp: str):
"""Adds a pin to a conversation."""
return make_call(client, "pins.add", channel=conversation_id, timestamp=timestamp)
def message_filter(message):
"""Some messages are not useful, we filter them here."""
if not message["text"]: # sometimes for file upload there is no text only files
return
if message["type"] != "message":
return
if message.get("subtype"):
return
if message.get("bot_id"):
return
return message
def is_user(config: SlackConversationConfiguration, slack_user: str):
"""Returns true if it's a regular user, false if dispatch bot'."""
return slack_user != config.app_user_slug
def open_dialog_with_user(client: Any, trigger_id: str, dialog: dict):
"""Opens a dialog with a user."""
return make_call(client, "dialog.open", trigger_id=trigger_id, dialog=dialog)
def open_modal_with_user(client: Any, trigger_id: str, modal: dict):
"""Opens a modal with a user."""
# the argument should be view in the make call, since slack api expects view
return make_call(client, "views.open", trigger_id=trigger_id, view=modal)
def update_modal_with_user(client: Any, trigger_id: str, view_id: str, modal: dict):
"""Updates a modal with a user."""
return make_call(client, "views.update", trigger_id=trigger_id, view_id=view_id, view=modal)
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import logging
import re
import textwrap
import time
from collections import defaultdict, deque
from contextlib import closing
from datetime import datetime
from distutils.version import StrictVersion
from typing import Any, cast, Dict, List, Optional, Tuple, TYPE_CHECKING
from urllib import parse
import pandas as pd
import simplejson as json
from sqlalchemy import Column, literal_column
from sqlalchemy.engine.base import Engine
from sqlalchemy.engine.reflection import Inspector
from sqlalchemy.engine.result import RowProxy
from sqlalchemy.engine.url import URL
from sqlalchemy.orm import Session
from sqlalchemy.sql.expression import ColumnClause, Select
from superset import app, cache, is_feature_enabled, security_manager
from superset.db_engine_specs.base import BaseEngineSpec
from superset.exceptions import SupersetTemplateException
from superset.models.sql_lab import Query
from superset.models.sql_types.presto_sql_types import type_map as presto_type_map
from superset.sql_parse import ParsedQuery
from superset.utils import core as utils
if TYPE_CHECKING:
# prevent circular imports
from superset.models.core import Database # pylint: disable=unused-import
QueryStatus = utils.QueryStatus
config = app.config
logger = logging.getLogger(__name__)
def get_children(column: Dict[str, str]) -> List[Dict[str, str]]:
"""
Get the children of a complex Presto type (row or array).
For arrays, we return a single list with the base type:
>>> get_children(dict(name="a", type="ARRAY(BIGINT)"))
[{"name": "a", "type": "BIGINT"}]
For rows, we return a list of the columns:
>>> get_children(dict(name="a", type="ROW(BIGINT,FOO VARCHAR)"))
[{'name': 'a._col0', 'type': 'BIGINT'}, {'name': 'a.foo', 'type': 'VARCHAR'}]
:param column: dictionary representing a Presto column
:return: list of dictionaries representing children columns
"""
pattern = re.compile(r"(?P<type>\w+)\((?P<children>.*)\)")
match = pattern.match(column["type"])
if not match:
raise Exception(f"Unable to parse column type {column["type"]}")
group = match.groupdict()
type_ = group["type"].upper()
children_type = group["children"]
if type_ == "ARRAY":
return [{"name": column["name"], "type": children_type}]
elif type_ == "ROW":
nameless_columns = 0
columns = []
for child in utils.split(children_type, ","):
parts = list(utils.split(child.strip(), " "))
if len(parts) == 2:
name, type_ = parts
name = name.strip('"')
else:
name = f"_col{nameless_columns}"
type_ = parts[0]
nameless_columns += 1
columns.append({"name": f"{column["name"]}.{name.lower()}", "type": type_})
return columns
else:
raise Exception(f"Unknown type {type_}!")
class PrestoEngineSpec(BaseEngineSpec):
engine = "presto"
_time_grain_expressions = {
None: "{col}",
"PT1S": "date_trunc('second', CAST({col} AS TIMESTAMP))",
"PT1M": "date_trunc('minute', CAST({col} AS TIMESTAMP))",
"PT1H": "date_trunc('hour', CAST({col} AS TIMESTAMP))",
"P1D": "date_trunc('day', CAST({col} AS TIMESTAMP))",
"P1W": "date_trunc('week', CAST({col} AS TIMESTAMP))",
"P1M": "date_trunc('month', CAST({col} AS TIMESTAMP))",
"P0.25Y": "date_trunc('quarter', CAST({col} AS TIMESTAMP))",
"P1Y": "date_trunc('year', CAST({col} AS TIMESTAMP))",
"P1W/1970-01-03T00:00:00Z": "date_add('day', 5, date_trunc('week', "
"date_add('day', 1, CAST({col} AS TIMESTAMP))))",
"1969-12-28T00:00:00Z/P1W": "date_add('day', -1, date_trunc('week', "
"date_add('day', 1, CAST({col} AS TIMESTAMP))))",
}
@classmethod
def get_allow_cost_estimate(cls, version: Optional[str] = None) -> bool:
return version is not None and StrictVersion(version) >= StrictVersion("0.319")
@classmethod
def get_table_names(
cls, database: "Database", inspector: Inspector, schema: Optional[str]
) -> List[str]:
tables = super().get_table_names(database, inspector, schema)
if not is_feature_enabled("PRESTO_SPLIT_VIEWS_FROM_TABLES"):
return tables
views = set(cls.get_view_names(database, inspector, schema))
actual_tables = set(tables) - views
return list(actual_tables)
@classmethod
def get_view_names(
cls, database: "Database", inspector: Inspector, schema: Optional[str]
) -> List[str]:
"""Returns an empty list
get_table_names() function returns all table names and view names,
and get_view_names() is not implemented in sqlalchemy_presto.py
https://github.com/dropbox/PyHive/blob/e25fc8440a0686bbb7a5db5de7cb1a77bdb4167a/pyhive/sqlalchemy_presto.py
"""
if not is_feature_enabled("PRESTO_SPLIT_VIEWS_FROM_TABLES"):
return []
if schema:
sql = (
"SELECT table_name FROM information_schema.views "
"WHERE table_schema=%(schema)s"
)
params = {"schema": schema}
else:
sql = "SELECT table_name FROM information_schema.views"
params = {}
engine = cls.get_engine(database, schema=schema)
with closing(engine.raw_connection()) as conn:
with closing(conn.cursor()) as cursor:
cursor.execute(sql, params)
results = cursor.fetchall()
return [row[0] for row in results]
@classmethod
def _create_column_info(cls, name: str, data_type: str) -> dict:
"""
Create column info object
:param name: column name
:param data_type: column data type
:return: column info object
"""
return {"name": name, "type": f"{data_type}"}
@classmethod
def _get_full_name(cls, names: List[Tuple[str, str]]) -> str:
"""
Get the full column name
:param names: list of all individual column names
:return: full column name
"""
return ".".join(column[0] for column in names if column[0])
@classmethod
def _has_nested_data_types(cls, component_type: str) -> bool:
"""
Check if string contains a data type. We determine if there is a data type by
whitespace or multiple data types by commas
:param component_type: data type
:return: boolean
"""
comma_regex = r",(?=(?:[^\"]*\"[^\"]*\")*[^\"]*$)"
white_space_regex = r"\s(?=(?:[^\"]*\"[^\"]*\")*[^\"]*$)"
return (
re.search(comma_regex, component_type) is not None
or re.search(white_space_regex, component_type) is not None
)
@classmethod
def _split_data_type(cls, data_type: str, delimiter: str) -> List[str]:
"""
Split data type based on given delimiter. Do not split the string if the
delimiter is enclosed in quotes
:param data_type: data type
:param delimiter: string separator (i.e. open parenthesis, closed parenthesis,
comma, whitespace)
:return: list of strings after breaking it by the delimiter
"""
return re.split(
r"{}(?=(?:[^\"]*\"[^\"]*\")*[^\"]*$)".format(delimiter), data_type
)
@classmethod
def _parse_structural_column( # pylint: disable=too-many-locals,too-many-branches
cls, parent_column_name: str, parent_data_type: str, result: List[dict]
) -> None:
"""
Parse a row or array column
:param result: list tracking the results
"""
formatted_parent_column_name = parent_column_name
# Quote the column name if there is a space
if " " in parent_column_name:
formatted_parent_column_name = f'"{parent_column_name}"'
full_data_type = f"{formatted_parent_column_name} {parent_data_type}"
original_result_len = len(result)
# split on open parenthesis ( to get the structural
# data type and its component types
data_types = cls._split_data_type(full_data_type, r"\(")
stack: List[Tuple[str, str]] = []
for data_type in data_types:
# split on closed parenthesis ) to track which component
# types belong to what structural data type
inner_types = cls._split_data_type(data_type, r"\)")
for inner_type in inner_types:
# We have finished parsing multiple structural data types
if not inner_type and stack:
stack.pop()
elif cls._has_nested_data_types(inner_type):
# split on comma , to get individual data types
single_fields = cls._split_data_type(inner_type, ",")
for single_field in single_fields:
single_field = single_field.strip()
# If component type starts with a comma, the first single field
# will be an empty string. Disregard this empty string.
if not single_field:
continue
# split on whitespace to get field name and data type
field_info = cls._split_data_type(single_field, r"\s")
# check if there is a structural data type within
# overall structural data type
if field_info[1] == "array" or field_info[1] == "row":
stack.append((field_info[0], field_info[1]))
full_parent_path = cls._get_full_name(stack)
result.append(
cls._create_column_info(
full_parent_path, presto_type_map[field_info[1]]()
)
)
else: # otherwise this field is a basic data type
full_parent_path = cls._get_full_name(stack)
column_name = "{}.{}".format(
full_parent_path, field_info[0]
)
result.append(
cls._create_column_info(
column_name, presto_type_map[field_info[1]]()
)
)
# If the component type ends with a structural data type, do not pop
# the stack. We have run across a structural data type within the
# overall structural data type. Otherwise, we have completely parsed
# through the entire structural data type and can move on.
if not (inner_type.endswith("array") or inner_type.endswith("row")):
stack.pop()
# We have an array of row objects (i.e. array(row(...)))
elif inner_type == "array" or inner_type == "row":
# Push a dummy object to represent the structural data type
stack.append(("", inner_type))
# We have an array of a basic data types(i.e. array(varchar)).
elif stack:
# Because it is an array of a basic data type. We have finished
# parsing the structural data type and can move on.
stack.pop()
# Unquote the column name if necessary
if formatted_parent_column_name != parent_column_name:
for index in range(original_result_len, len(result)):
result[index]["name"] = result[index]["name"].replace(
formatted_parent_column_name, parent_column_name
)
@classmethod
def _show_columns(
cls, inspector: Inspector, table_name: str, schema: Optional[str]
) -> List[RowProxy]:
"""
Show presto column names
:param inspector: object that performs database schema inspection
:param table_name: table name
:param schema: schema name
:return: list of column objects
"""
quote = inspector.engine.dialect.identifier_preparer.quote_identifier
full_table = quote(table_name)
if schema:
full_table = "{}.{}".format(quote(schema), full_table)
columns = inspector.bind.execute("SHOW COLUMNS FROM {}".format(full_table))
return columns
@classmethod
def get_columns(
cls, inspector: Inspector, table_name: str, schema: Optional[str]
) -> List[Dict[str, Any]]:
"""
Get columns from a Presto data source. This includes handling row and
array data types
:param inspector: object that performs database schema inspection
:param table_name: table name
:param schema: schema name
:return: a list of results that contain column info
(i.e. column name and data type)
"""
columns = cls._show_columns(inspector, table_name, schema)
result: List[dict] = []
for column in columns:
try:
# parse column if it is a row or array
if is_feature_enabled("PRESTO_EXPAND_DATA") and (
"array" in column.Type or "row" in column.Type
):
structural_column_index = len(result)
cls._parse_structural_column(column.Column, column.Type, result)
result[structural_column_index]["nullable"] = getattr(
column, "Null", True
)
result[structural_column_index]["default"] = None
continue
else: # otherwise column is a basic data type
column_type = presto_type_map[column.Type]()
except KeyError:
logger.info(
"Did not recognize type {} of column {}".format( # pylint: disable=logging-format-interpolation
column.Type, column.Column
)
)
column_type = "OTHER"
column_info = cls._create_column_info(column.Column, column_type)
column_info["nullable"] = getattr(column, "Null", True)
column_info["default"] = None
result.append(column_info)
return result
@classmethod
def _is_column_name_quoted(cls, column_name: str) -> bool:
"""
Check if column name is in quotes
:param column_name: column name
:return: boolean
"""
return column_name.startswith('"') and column_name.endswith('"')
@classmethod
def _get_fields(cls, cols: List[dict]) -> List[ColumnClause]:
"""
Format column clauses where names are in quotes and labels are specified
:param cols: columns
:return: column clauses
"""
column_clauses = []
# Column names are separated by periods. This regex will find periods in a
# string if they are not enclosed in quotes because if a period is enclosed in
# quotes, then that period is part of a column name.
dot_pattern = r"""\. # split on period
(?= # look ahead
(?: # create non-capture group
[^\"]*\"[^\"]*\" # two quotes
)*[^\"]*$) # end regex"""
dot_regex = re.compile(dot_pattern, re.VERBOSE)
for col in cols:
# get individual column names
col_names = re.split(dot_regex, col["name"])
# quote each column name if it is not already quoted
for index, col_name in enumerate(col_names):
if not cls._is_column_name_quoted(col_name):
col_names[index] = '"{}"'.format(col_name)
quoted_col_name = ".".join(
col_name if cls._is_column_name_quoted(col_name) else f'"{col_name}"'
for col_name in col_names
)
# create column clause in the format "name"."name" AS "name.name"
column_clause = literal_column(quoted_col_name).label(col["name"])
column_clauses.append(column_clause)
return column_clauses
@classmethod
def select_star( # pylint: disable=too-many-arguments
cls,
database: "Database",
table_name: str,
engine: Engine,
schema: Optional[str] = None,
limit: int = 100,
show_cols: bool = False,
indent: bool = True,
latest_partition: bool = True,
cols: Optional[List[Dict[str, Any]]] = None,
) -> str:
"""
Include selecting properties of row objects. We cannot easily break arrays into
rows, so render the whole array in its own row and skip columns that correspond
to an array's contents.
"""
cols = cols or []
presto_cols = cols
if is_feature_enabled("PRESTO_EXPAND_DATA") and show_cols:
dot_regex = r"\.(?=(?:[^\"]*\"[^\"]*\")*[^\"]*$)"
presto_cols = [
col for col in presto_cols if not re.search(dot_regex, col["name"])
]
return super().select_star(
database,
table_name,
engine,
schema,
limit,
show_cols,
indent,
latest_partition,
presto_cols,
)
@classmethod
def estimate_statement_cost( # pylint: disable=too-many-locals
cls, statement: str, database: "Database", cursor: Any, user_name: str
) -> Dict[str, Any]:
"""
Run a SQL query that estimates the cost of a given statement.
:param statement: A single SQL statement
:param database: Database instance
:param cursor: Cursor instance
:param username: Effective username
:return: JSON response from Presto
"""
parsed_query = ParsedQuery(statement)
sql = parsed_query.stripped()
sql_query_mutator = config["SQL_QUERY_MUTATOR"]
if sql_query_mutator:
sql = sql_query_mutator(sql, user_name, security_manager, database)
sql = f"EXPLAIN (TYPE IO, FORMAT JSON) {sql}"
cursor.execute(sql)
# the output from Presto is a single column and a single row containing
# JSON:
#
# {
# ...
# "estimate" : {
# "outputRowCount" : 8.73265878E8,
# "outputSizeInBytes" : 3.41425774958E11,
# "cpuCost" : 3.41425774958E11,
# "maxMemory" : 0.0,
# "networkCost" : 3.41425774958E11
# }
# }
result = json.loads(cursor.fetchone()[0])
return result
@classmethod
def query_cost_formatter(
cls, raw_cost: List[Dict[str, Any]]
) -> List[Dict[str, str]]:
"""
Format cost estimate.
:param raw_cost: JSON estimate from Presto
:return: Human readable cost estimate
"""
def humanize(value: Any, suffix: str) -> str:
try:
value = int(value)
except ValueError:
return str(value)
prefixes = ["K", "M", "G", "T", "P", "E", "Z", "Y"]
prefix = ""
to_next_prefix = 1000
while value > to_next_prefix and prefixes:
prefix = prefixes.pop(0)
value //= to_next_prefix
return f"{value} {prefix}{suffix}"
cost = []
columns = [
("outputRowCount", "Output count", " rows"),
("outputSizeInBytes", "Output size", "B"),
("cpuCost", "CPU cost", ""),
("maxMemory", "Max memory", "B"),
("networkCost", "Network cost", ""),
]
for row in raw_cost:
estimate: Dict[str, float] = row.get("estimate", {})
statement_cost = {}
for key, label, suffix in columns:
if key in estimate:
statement_cost[label] = humanize(estimate[key], suffix).strip()
cost.append(statement_cost)
return cost
@classmethod
def adjust_database_uri(
cls, uri: URL, selected_schema: Optional[str] = None
) -> None:
database = uri.database
if selected_schema and database:
selected_schema = parse.quote(selected_schema, safe="")
if "/" in database:
database = database.split("/")[0] + "/" + selected_schema
else:
database += "/" + selected_schema
uri.database = database
@classmethod
def convert_dttm(cls, target_type: str, dttm: datetime) -> Optional[str]:
tt = target_type.upper()
if tt == "DATE":
return f"""from_iso8601_date('{dttm.date().isoformat()}')"""
if tt == "TIMESTAMP":
return f"""from_iso8601_timestamp('{dttm.isoformat(timespec="microseconds")}')""" # pylint: disable=line-too-long
return None
@classmethod
def epoch_to_dttm(cls) -> str:
return "from_unixtime({col})"
@classmethod
def get_all_datasource_names(
cls, database: "Database", datasource_type: str
) -> List[utils.DatasourceName]:
datasource_df = database.get_df(
"SELECT table_schema, table_name FROM INFORMATION_SCHEMA.{}S "
"ORDER BY concat(table_schema, '.', table_name)".format(
datasource_type.upper()
),
None,
)
datasource_names: List[utils.DatasourceName] = []
for _unused, row in datasource_df.iterrows():
datasource_names.append(
utils.DatasourceName(
schema=row["table_schema"], table=row["table_name"]
)
)
return datasource_names
@classmethod
def expand_data( # pylint: disable=too-many-locals
cls, columns: List[dict], data: List[dict]
) -> Tuple[List[dict], List[dict], List[dict]]:
"""
We do not immediately display rows and arrays clearly in the data grid. This
method separates out nested fields and data values to help clearly display
structural columns.
Example: ColumnA is a row(nested_obj varchar) and ColumnB is an array(int)
Original data set = [
{'ColumnA': ['a1'], 'ColumnB': [1, 2]},
{'ColumnA': ['a2'], 'ColumnB': [3, 4]},
]
Expanded data set = [
{'ColumnA': ['a1'], 'ColumnA.nested_obj': 'a1', 'ColumnB': 1},
{'ColumnA': '', 'ColumnA.nested_obj': '', 'ColumnB': 2},
{'ColumnA': ['a2'], 'ColumnA.nested_obj': 'a2', 'ColumnB': 3},
{'ColumnA': '', 'ColumnA.nested_obj': '', 'ColumnB': 4},
]
:param columns: columns selected in the query
:param data: original data set
:return: list of all columns(selected columns and their nested fields),
expanded data set, listed of nested fields
"""
if not is_feature_enabled("PRESTO_EXPAND_DATA"):
return columns, data, []
# process each column, unnesting ARRAY types and
# expanding ROW types into new columns
to_process = deque((column, 0) for column in columns)
all_columns: List[dict] = []
expanded_columns = []
current_array_level = None
while to_process:
column, level = to_process.popleft()
if column["name"] not in [column["name"] for column in all_columns]:
all_columns.append(column)
# When unnesting arrays we need to keep track of how many extra rows
# were added, for each original row. This is necessary when we expand
# multiple arrays, so that the arrays after the first reuse the rows
# added by the first. every time we change a level in the nested arrays
# we reinitialize this.
if level != current_array_level:
unnested_rows: Dict[int, int] = defaultdict(int)
current_array_level = level
name = column["name"]
if column["type"].startswith("ARRAY("):
# keep processing array children; we append to the right so that
# multiple nested arrays are processed breadth-first
to_process.append((get_children(column)[0], level + 1))
# unnest array objects data into new rows
i = 0
while i < len(data):
row = data[i]
values = row.get(name)
if values:
# how many extra rows we need to unnest the data?
extra_rows = len(values) - 1
# how many rows were already added for this row?
current_unnested_rows = unnested_rows[i]
# add any necessary rows
missing = extra_rows - current_unnested_rows
for _ in range(missing):
data.insert(i + current_unnested_rows + 1, {})
unnested_rows[i] += 1
# unnest array into rows
for j, value in enumerate(values):
data[i + j][name] = value
# skip newly unnested rows
i += unnested_rows[i]
i += 1
if column["type"].startswith("ROW("):
# expand columns; we append them to the left so they are added
# immediately after the parent
expanded = get_children(column)
to_process.extendleft((column, level) for column in expanded)
expanded_columns.extend(expanded)
# expand row objects into new columns
for row in data:
for value, col in zip(row.get(name) or [], expanded):
row[col["name"]] = value
data = [
{k["name"]: row.get(k["name"], "") for k in all_columns} for row in data
]
return all_columns, data, expanded_columns
@classmethod
def extra_table_metadata(
cls, database: "Database", table_name: str, schema_name: str
) -> Dict[str, Any]:
metadata = {}
indexes = database.get_indexes(table_name, schema_name)
if indexes:
cols = indexes[0].get("column_names", [])
full_table_name = table_name
if schema_name and "." not in table_name:
full_table_name = "{}.{}".format(schema_name, table_name)
pql = cls._partition_query(full_table_name, database)
col_names, latest_parts = cls.latest_partition(
table_name, schema_name, database, show_first=True
)
if not latest_parts:
latest_parts = tuple([None] * len(col_names)) # type: ignore
metadata["partitions"] = {
"cols": cols,
"latest": dict(zip(col_names, latest_parts)), # type: ignore
"partitionQuery": pql,
}
# flake8 is not matching `Optional[str]` to `Any` for some reason...
metadata["view"] = cast(
Any, cls.get_create_view(database, schema_name, table_name)
)
return metadata
@classmethod
def get_create_view(
cls, database: "Database", schema: str, table: str
) -> Optional[str]:
"""
Return a CREATE VIEW statement, or `None` if not a view.
:param database: Database instance
:param schema: Schema name
:param table: Table (view) name
"""
from pyhive.exc import DatabaseError
engine = cls.get_engine(database, schema)
with closing(engine.raw_connection()) as conn:
with closing(conn.cursor()) as cursor:
sql = f"SHOW CREATE VIEW {schema}.{table}"
try:
cls.execute(cursor, sql)
polled = cursor.poll()
while polled:
time.sleep(0.2)
polled = cursor.poll()
except DatabaseError: # not a VIEW
return None
rows = cls.fetch_data(cursor, 1)
return rows[0][0]
@classmethod
def handle_cursor(cls, cursor: Any, query: Query, session: Session) -> None:
"""Updates progress information"""
query_id = query.id
logger.info(f"Query {query_id}: Polling the cursor for progress")
polled = cursor.poll()
# poll returns dict -- JSON status information or ``None``
# if the query is done
# https://github.com/dropbox/PyHive/blob/
# b34bdbf51378b3979eaf5eca9e956f06ddc36ca0/pyhive/presto.py#L178
while polled:
# Update the object and wait for the kill signal.
stats = polled.get("stats", {})
query = session.query(type(query)).filter_by(id=query_id).one()
if query.status in [QueryStatus.STOPPED, QueryStatus.TIMED_OUT]:
cursor.cancel()
break
if stats:
state = stats.get("state")
# if already finished, then stop polling
if state == "FINISHED":
break
completed_splits = float(stats.get("completedSplits"))
total_splits = float(stats.get("totalSplits"))
if total_splits and completed_splits:
progress = 100 * (completed_splits / total_splits)
logger.info(
"Query {} progress: {} / {} " # pylint: disable=logging-format-interpolation
"splits".format(query_id, completed_splits, total_splits)
)
if progress > query.progress:
query.progress = progress
session.commit()
time.sleep(1)
logger.info(f"Query {query_id}: Polling the cursor for progress")
polled = cursor.poll()
@classmethod
def _extract_error_message(cls, e: Exception) -> Optional[str]:
if (
hasattr(e, "orig")
and type(e.orig).__name__ == "DatabaseError" # type: ignore
and isinstance(e.orig[0], dict) # type: ignore
):
error_dict = e.orig[0] # type: ignore
return "{} at {}: {}".format(
error_dict.get("errorName"),
error_dict.get("errorLocation"),
error_dict.get("message"),
)
if type(e).__name__ == "DatabaseError" and hasattr(e, "args") and e.args:
error_dict = e.args[0]
return error_dict.get("message")
return utils.error_msg_from_exception(e)
@classmethod
def _partition_query( # pylint: disable=too-many-arguments,too-many-locals
cls,
table_name: str,
database: "Database",
limit: int = 0,
order_by: Optional[List[Tuple[str, bool]]] = None,
filters: Optional[Dict[Any, Any]] = None,
) -> str:
"""Returns a partition query
:param table_name: the name of the table to get partitions from
:type table_name: str
:param limit: the number of partitions to be returned
:type limit: int
:param order_by: a list of tuples of field name and a boolean
that determines if that field should be sorted in descending
order
:type order_by: list of (str, bool) tuples
:param filters: dict of field name and filter value combinations
"""
limit_clause = "LIMIT {}".format(limit) if limit else ""
order_by_clause = ""
if order_by:
l = []
for field, desc in order_by:
l.append(field + " DESC" if desc else "")
order_by_clause = "ORDER BY " + ", ".join(l)
where_clause = ""
if filters:
l = []
for field, value in filters.items():
l.append(f"{field} = '{value}'")
where_clause = "WHERE " + " AND ".join(l)
presto_version = database.get_extra().get("version")
# Partition select syntax changed in v0.199, so check here.
# Default to the new syntax if version is unset.
partition_select_clause = (
f'SELECT * FROM "{table_name}$partitions"'
if not presto_version
or StrictVersion(presto_version) >= StrictVersion("0.199")
else f"SHOW PARTITIONS FROM {table_name}"
)
sql = textwrap.dedent(
f"""\
{partition_select_clause}
{where_clause}
{order_by_clause}
{limit_clause}
"""
)
return sql
@classmethod
def where_latest_partition( # pylint: disable=too-many-arguments
cls,
table_name: str,
schema: Optional[str],
database: "Database",
query: Select,
columns: Optional[List] = None,
) -> Optional[Select]:
try:
col_names, values = cls.latest_partition(
table_name, schema, database, show_first=True
)
except Exception: # pylint: disable=broad-except
# table is not partitioned
return None
if values is None:
return None
column_names = {column.get("name") for column in columns or []}
for col_name, value in zip(col_names, values):
if col_name in column_names:
query = query.where(Column(col_name) == value)
return query
@classmethod
def _latest_partition_from_df( # pylint: disable=invalid-name
cls, df: pd.DataFrame
) -> Optional[List[str]]:
if not df.empty:
return df.to_records(index=False)[0].item()
return None
@classmethod
def latest_partition(
cls,
table_name: str,
schema: Optional[str],
database: "Database",
show_first: bool = False,
) -> Tuple[List[str], Optional[List[str]]]:
"""Returns col name and the latest (max) partition value for a table
:param table_name: the name of the table
:param schema: schema / database / namespace
:param database: database query will be run against
:type database: models.Database
:param show_first: displays the value for the first partitioning key
if there are many partitioning keys
:type show_first: bool
>>> latest_partition('foo_table')
(['ds'], ('2018-01-01',))
"""
indexes = database.get_indexes(table_name, schema)
if not indexes:
raise SupersetTemplateException(
f"Error getting partition for {schema}.{table_name}. "
"Verify that this table has a partition."
)
if len(indexes[0]["column_names"]) < 1:
raise SupersetTemplateException(
"The table should have one partitioned field"
)
elif not show_first and len(indexes[0]["column_names"]) > 1:
raise SupersetTemplateException(
"The table should have a single partitioned field "
"to use this function. You may want to use "
"`presto.latest_sub_partition`"
)
column_names = indexes[0]["column_names"]
part_fields = [(column_name, True) for column_name in column_names]
sql = cls._partition_query(table_name, database, 1, part_fields)
df = database.get_df(sql, schema)
return column_names, cls._latest_partition_from_df(df)
@classmethod
def latest_sub_partition(
cls, table_name: str, schema: Optional[str], database: "Database", **kwargs: Any
) -> Any:
"""Returns the latest (max) partition value for a table
A filtering criteria should be passed for all fields that are
partitioned except for the field to be returned. For example,
if a table is partitioned by (``ds``, ``event_type`` and
``event_category``) and you want the latest ``ds``, you'll want
to provide a filter as keyword arguments for both
``event_type`` and ``event_category`` as in
``latest_sub_partition('my_table',
event_category='page', event_type='click')``
:param table_name: the name of the table, can be just the table
name or a fully qualified table name as ``schema_name.table_name``
:type table_name: str
:param schema: schema / database / namespace
:type schema: str
:param database: database query will be run against
:type database: models.Database
:param kwargs: keyword arguments define the filtering criteria
on the partition list. There can be many of these.
:type kwargs: str
>>> latest_sub_partition('sub_partition_table', event_type='click')
'2018-01-01'
"""
indexes = database.get_indexes(table_name, schema)
part_fields = indexes[0]["column_names"]
for k in kwargs.keys(): # pylint: disable=consider-iterating-dictionary
if k not in k in part_fields:
msg = "Field [{k}] is not part of the portioning key"
raise SupersetTemplateException(msg)
if len(kwargs.keys()) != len(part_fields) - 1:
msg = (
"A filter needs to be specified for {} out of the " "{} fields."
).format(len(part_fields) - 1, len(part_fields))
raise SupersetTemplateException(msg)
for field in part_fields:
if field not in kwargs.keys():
field_to_return = field
sql = cls._partition_query(
table_name, database, 1, [(field_to_return, True)], kwargs
)
df = database.get_df(sql, schema)
if df.empty:
return ""
return df.to_dict()[field_to_return][0]
@classmethod
@cache.memoize()
def get_function_names(cls, database: "Database") -> List[str]:
"""
Get a list of function names that are able to be called on the database.
Used for SQL Lab autocomplete.
:param database: The database to get functions for
:return: A list of function names useable in the database
"""
return database.get_df("SHOW FUNCTIONS")["Function"].tolist()
| # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import logging
import re
import textwrap
import time
from collections import defaultdict, deque
from contextlib import closing
from datetime import datetime
from distutils.version import StrictVersion
from typing import Any, cast, Dict, List, Optional, Tuple, TYPE_CHECKING
from urllib import parse
import pandas as pd
import simplejson as json
from sqlalchemy import Column, literal_column
from sqlalchemy.engine.base import Engine
from sqlalchemy.engine.reflection import Inspector
from sqlalchemy.engine.result import RowProxy
from sqlalchemy.engine.url import URL
from sqlalchemy.orm import Session
from sqlalchemy.sql.expression import ColumnClause, Select
from superset import app, cache, is_feature_enabled, security_manager
from superset.db_engine_specs.base import BaseEngineSpec
from superset.exceptions import SupersetTemplateException
from superset.models.sql_lab import Query
from superset.models.sql_types.presto_sql_types import type_map as presto_type_map
from superset.sql_parse import ParsedQuery
from superset.utils import core as utils
if TYPE_CHECKING:
# prevent circular imports
from superset.models.core import Database # pylint: disable=unused-import
QueryStatus = utils.QueryStatus
config = app.config
logger = logging.getLogger(__name__)
def get_children(column: Dict[str, str]) -> List[Dict[str, str]]:
"""
Get the children of a complex Presto type (row or array).
For arrays, we return a single list with the base type:
>>> get_children(dict(name="a", type="ARRAY(BIGINT)"))
[{"name": "a", "type": "BIGINT"}]
For rows, we return a list of the columns:
>>> get_children(dict(name="a", type="ROW(BIGINT,FOO VARCHAR)"))
[{'name': 'a._col0', 'type': 'BIGINT'}, {'name': 'a.foo', 'type': 'VARCHAR'}]
:param column: dictionary representing a Presto column
:return: list of dictionaries representing children columns
"""
pattern = re.compile(r"(?P<type>\w+)\((?P<children>.*)\)")
match = pattern.match(column["type"])
if not match:
raise Exception(f"Unable to parse column type {column['type']}")
group = match.groupdict()
type_ = group["type"].upper()
children_type = group["children"]
if type_ == "ARRAY":
return [{"name": column["name"], "type": children_type}]
elif type_ == "ROW":
nameless_columns = 0
columns = []
for child in utils.split(children_type, ","):
parts = list(utils.split(child.strip(), " "))
if len(parts) == 2:
name, type_ = parts
name = name.strip('"')
else:
name = f"_col{nameless_columns}"
type_ = parts[0]
nameless_columns += 1
columns.append({"name": f"{column['name']}.{name.lower()}", "type": type_})
return columns
else:
raise Exception(f"Unknown type {type_}!")
class PrestoEngineSpec(BaseEngineSpec):
engine = "presto"
_time_grain_expressions = {
None: "{col}",
"PT1S": "date_trunc('second', CAST({col} AS TIMESTAMP))",
"PT1M": "date_trunc('minute', CAST({col} AS TIMESTAMP))",
"PT1H": "date_trunc('hour', CAST({col} AS TIMESTAMP))",
"P1D": "date_trunc('day', CAST({col} AS TIMESTAMP))",
"P1W": "date_trunc('week', CAST({col} AS TIMESTAMP))",
"P1M": "date_trunc('month', CAST({col} AS TIMESTAMP))",
"P0.25Y": "date_trunc('quarter', CAST({col} AS TIMESTAMP))",
"P1Y": "date_trunc('year', CAST({col} AS TIMESTAMP))",
"P1W/1970-01-03T00:00:00Z": "date_add('day', 5, date_trunc('week', "
"date_add('day', 1, CAST({col} AS TIMESTAMP))))",
"1969-12-28T00:00:00Z/P1W": "date_add('day', -1, date_trunc('week', "
"date_add('day', 1, CAST({col} AS TIMESTAMP))))",
}
@classmethod
def get_allow_cost_estimate(cls, version: Optional[str] = None) -> bool:
return version is not None and StrictVersion(version) >= StrictVersion("0.319")
@classmethod
def get_table_names(
cls, database: "Database", inspector: Inspector, schema: Optional[str]
) -> List[str]:
tables = super().get_table_names(database, inspector, schema)
if not is_feature_enabled("PRESTO_SPLIT_VIEWS_FROM_TABLES"):
return tables
views = set(cls.get_view_names(database, inspector, schema))
actual_tables = set(tables) - views
return list(actual_tables)
@classmethod
def get_view_names(
cls, database: "Database", inspector: Inspector, schema: Optional[str]
) -> List[str]:
"""Returns an empty list
get_table_names() function returns all table names and view names,
and get_view_names() is not implemented in sqlalchemy_presto.py
https://github.com/dropbox/PyHive/blob/e25fc8440a0686bbb7a5db5de7cb1a77bdb4167a/pyhive/sqlalchemy_presto.py
"""
if not is_feature_enabled("PRESTO_SPLIT_VIEWS_FROM_TABLES"):
return []
if schema:
sql = (
"SELECT table_name FROM information_schema.views "
"WHERE table_schema=%(schema)s"
)
params = {"schema": schema}
else:
sql = "SELECT table_name FROM information_schema.views"
params = {}
engine = cls.get_engine(database, schema=schema)
with closing(engine.raw_connection()) as conn:
with closing(conn.cursor()) as cursor:
cursor.execute(sql, params)
results = cursor.fetchall()
return [row[0] for row in results]
@classmethod
def _create_column_info(cls, name: str, data_type: str) -> dict:
"""
Create column info object
:param name: column name
:param data_type: column data type
:return: column info object
"""
return {"name": name, "type": f"{data_type}"}
@classmethod
def _get_full_name(cls, names: List[Tuple[str, str]]) -> str:
"""
Get the full column name
:param names: list of all individual column names
:return: full column name
"""
return ".".join(column[0] for column in names if column[0])
@classmethod
def _has_nested_data_types(cls, component_type: str) -> bool:
"""
Check if string contains a data type. We determine if there is a data type by
whitespace or multiple data types by commas
:param component_type: data type
:return: boolean
"""
comma_regex = r",(?=(?:[^\"]*\"[^\"]*\")*[^\"]*$)"
white_space_regex = r"\s(?=(?:[^\"]*\"[^\"]*\")*[^\"]*$)"
return (
re.search(comma_regex, component_type) is not None
or re.search(white_space_regex, component_type) is not None
)
@classmethod
def _split_data_type(cls, data_type: str, delimiter: str) -> List[str]:
"""
Split data type based on given delimiter. Do not split the string if the
delimiter is enclosed in quotes
:param data_type: data type
:param delimiter: string separator (i.e. open parenthesis, closed parenthesis,
comma, whitespace)
:return: list of strings after breaking it by the delimiter
"""
return re.split(
r"{}(?=(?:[^\"]*\"[^\"]*\")*[^\"]*$)".format(delimiter), data_type
)
@classmethod
def _parse_structural_column( # pylint: disable=too-many-locals,too-many-branches
cls, parent_column_name: str, parent_data_type: str, result: List[dict]
) -> None:
"""
Parse a row or array column
:param result: list tracking the results
"""
formatted_parent_column_name = parent_column_name
# Quote the column name if there is a space
if " " in parent_column_name:
formatted_parent_column_name = f'"{parent_column_name}"'
full_data_type = f"{formatted_parent_column_name} {parent_data_type}"
original_result_len = len(result)
# split on open parenthesis ( to get the structural
# data type and its component types
data_types = cls._split_data_type(full_data_type, r"\(")
stack: List[Tuple[str, str]] = []
for data_type in data_types:
# split on closed parenthesis ) to track which component
# types belong to what structural data type
inner_types = cls._split_data_type(data_type, r"\)")
for inner_type in inner_types:
# We have finished parsing multiple structural data types
if not inner_type and stack:
stack.pop()
elif cls._has_nested_data_types(inner_type):
# split on comma , to get individual data types
single_fields = cls._split_data_type(inner_type, ",")
for single_field in single_fields:
single_field = single_field.strip()
# If component type starts with a comma, the first single field
# will be an empty string. Disregard this empty string.
if not single_field:
continue
# split on whitespace to get field name and data type
field_info = cls._split_data_type(single_field, r"\s")
# check if there is a structural data type within
# overall structural data type
if field_info[1] == "array" or field_info[1] == "row":
stack.append((field_info[0], field_info[1]))
full_parent_path = cls._get_full_name(stack)
result.append(
cls._create_column_info(
full_parent_path, presto_type_map[field_info[1]]()
)
)
else: # otherwise this field is a basic data type
full_parent_path = cls._get_full_name(stack)
column_name = "{}.{}".format(
full_parent_path, field_info[0]
)
result.append(
cls._create_column_info(
column_name, presto_type_map[field_info[1]]()
)
)
# If the component type ends with a structural data type, do not pop
# the stack. We have run across a structural data type within the
# overall structural data type. Otherwise, we have completely parsed
# through the entire structural data type and can move on.
if not (inner_type.endswith("array") or inner_type.endswith("row")):
stack.pop()
# We have an array of row objects (i.e. array(row(...)))
elif inner_type == "array" or inner_type == "row":
# Push a dummy object to represent the structural data type
stack.append(("", inner_type))
# We have an array of a basic data types(i.e. array(varchar)).
elif stack:
# Because it is an array of a basic data type. We have finished
# parsing the structural data type and can move on.
stack.pop()
# Unquote the column name if necessary
if formatted_parent_column_name != parent_column_name:
for index in range(original_result_len, len(result)):
result[index]["name"] = result[index]["name"].replace(
formatted_parent_column_name, parent_column_name
)
@classmethod
def _show_columns(
cls, inspector: Inspector, table_name: str, schema: Optional[str]
) -> List[RowProxy]:
"""
Show presto column names
:param inspector: object that performs database schema inspection
:param table_name: table name
:param schema: schema name
:return: list of column objects
"""
quote = inspector.engine.dialect.identifier_preparer.quote_identifier
full_table = quote(table_name)
if schema:
full_table = "{}.{}".format(quote(schema), full_table)
columns = inspector.bind.execute("SHOW COLUMNS FROM {}".format(full_table))
return columns
@classmethod
def get_columns(
cls, inspector: Inspector, table_name: str, schema: Optional[str]
) -> List[Dict[str, Any]]:
"""
Get columns from a Presto data source. This includes handling row and
array data types
:param inspector: object that performs database schema inspection
:param table_name: table name
:param schema: schema name
:return: a list of results that contain column info
(i.e. column name and data type)
"""
columns = cls._show_columns(inspector, table_name, schema)
result: List[dict] = []
for column in columns:
try:
# parse column if it is a row or array
if is_feature_enabled("PRESTO_EXPAND_DATA") and (
"array" in column.Type or "row" in column.Type
):
structural_column_index = len(result)
cls._parse_structural_column(column.Column, column.Type, result)
result[structural_column_index]["nullable"] = getattr(
column, "Null", True
)
result[structural_column_index]["default"] = None
continue
else: # otherwise column is a basic data type
column_type = presto_type_map[column.Type]()
except KeyError:
logger.info(
"Did not recognize type {} of column {}".format( # pylint: disable=logging-format-interpolation
column.Type, column.Column
)
)
column_type = "OTHER"
column_info = cls._create_column_info(column.Column, column_type)
column_info["nullable"] = getattr(column, "Null", True)
column_info["default"] = None
result.append(column_info)
return result
@classmethod
def _is_column_name_quoted(cls, column_name: str) -> bool:
"""
Check if column name is in quotes
:param column_name: column name
:return: boolean
"""
return column_name.startswith('"') and column_name.endswith('"')
@classmethod
def _get_fields(cls, cols: List[dict]) -> List[ColumnClause]:
"""
Format column clauses where names are in quotes and labels are specified
:param cols: columns
:return: column clauses
"""
column_clauses = []
# Column names are separated by periods. This regex will find periods in a
# string if they are not enclosed in quotes because if a period is enclosed in
# quotes, then that period is part of a column name.
dot_pattern = r"""\. # split on period
(?= # look ahead
(?: # create non-capture group
[^\"]*\"[^\"]*\" # two quotes
)*[^\"]*$) # end regex"""
dot_regex = re.compile(dot_pattern, re.VERBOSE)
for col in cols:
# get individual column names
col_names = re.split(dot_regex, col["name"])
# quote each column name if it is not already quoted
for index, col_name in enumerate(col_names):
if not cls._is_column_name_quoted(col_name):
col_names[index] = '"{}"'.format(col_name)
quoted_col_name = ".".join(
col_name if cls._is_column_name_quoted(col_name) else f'"{col_name}"'
for col_name in col_names
)
# create column clause in the format "name"."name" AS "name.name"
column_clause = literal_column(quoted_col_name).label(col["name"])
column_clauses.append(column_clause)
return column_clauses
@classmethod
def select_star( # pylint: disable=too-many-arguments
cls,
database: "Database",
table_name: str,
engine: Engine,
schema: Optional[str] = None,
limit: int = 100,
show_cols: bool = False,
indent: bool = True,
latest_partition: bool = True,
cols: Optional[List[Dict[str, Any]]] = None,
) -> str:
"""
Include selecting properties of row objects. We cannot easily break arrays into
rows, so render the whole array in its own row and skip columns that correspond
to an array's contents.
"""
cols = cols or []
presto_cols = cols
if is_feature_enabled("PRESTO_EXPAND_DATA") and show_cols:
dot_regex = r"\.(?=(?:[^\"]*\"[^\"]*\")*[^\"]*$)"
presto_cols = [
col for col in presto_cols if not re.search(dot_regex, col["name"])
]
return super().select_star(
database,
table_name,
engine,
schema,
limit,
show_cols,
indent,
latest_partition,
presto_cols,
)
@classmethod
def estimate_statement_cost( # pylint: disable=too-many-locals
cls, statement: str, database: "Database", cursor: Any, user_name: str
) -> Dict[str, Any]:
"""
Run a SQL query that estimates the cost of a given statement.
:param statement: A single SQL statement
:param database: Database instance
:param cursor: Cursor instance
:param username: Effective username
:return: JSON response from Presto
"""
parsed_query = ParsedQuery(statement)
sql = parsed_query.stripped()
sql_query_mutator = config["SQL_QUERY_MUTATOR"]
if sql_query_mutator:
sql = sql_query_mutator(sql, user_name, security_manager, database)
sql = f"EXPLAIN (TYPE IO, FORMAT JSON) {sql}"
cursor.execute(sql)
# the output from Presto is a single column and a single row containing
# JSON:
#
# {
# ...
# "estimate" : {
# "outputRowCount" : 8.73265878E8,
# "outputSizeInBytes" : 3.41425774958E11,
# "cpuCost" : 3.41425774958E11,
# "maxMemory" : 0.0,
# "networkCost" : 3.41425774958E11
# }
# }
result = json.loads(cursor.fetchone()[0])
return result
@classmethod
def query_cost_formatter(
cls, raw_cost: List[Dict[str, Any]]
) -> List[Dict[str, str]]:
"""
Format cost estimate.
:param raw_cost: JSON estimate from Presto
:return: Human readable cost estimate
"""
def humanize(value: Any, suffix: str) -> str:
try:
value = int(value)
except ValueError:
return str(value)
prefixes = ["K", "M", "G", "T", "P", "E", "Z", "Y"]
prefix = ""
to_next_prefix = 1000
while value > to_next_prefix and prefixes:
prefix = prefixes.pop(0)
value //= to_next_prefix
return f"{value} {prefix}{suffix}"
cost = []
columns = [
("outputRowCount", "Output count", " rows"),
("outputSizeInBytes", "Output size", "B"),
("cpuCost", "CPU cost", ""),
("maxMemory", "Max memory", "B"),
("networkCost", "Network cost", ""),
]
for row in raw_cost:
estimate: Dict[str, float] = row.get("estimate", {})
statement_cost = {}
for key, label, suffix in columns:
if key in estimate:
statement_cost[label] = humanize(estimate[key], suffix).strip()
cost.append(statement_cost)
return cost
@classmethod
def adjust_database_uri(
cls, uri: URL, selected_schema: Optional[str] = None
) -> None:
database = uri.database
if selected_schema and database:
selected_schema = parse.quote(selected_schema, safe="")
if "/" in database:
database = database.split("/")[0] + "/" + selected_schema
else:
database += "/" + selected_schema
uri.database = database
@classmethod
def convert_dttm(cls, target_type: str, dttm: datetime) -> Optional[str]:
tt = target_type.upper()
if tt == "DATE":
return f"""from_iso8601_date('{dttm.date().isoformat()}')"""
if tt == "TIMESTAMP":
return f"""from_iso8601_timestamp('{dttm.isoformat(timespec="microseconds")}')""" # pylint: disable=line-too-long
return None
@classmethod
def epoch_to_dttm(cls) -> str:
return "from_unixtime({col})"
@classmethod
def get_all_datasource_names(
cls, database: "Database", datasource_type: str
) -> List[utils.DatasourceName]:
datasource_df = database.get_df(
"SELECT table_schema, table_name FROM INFORMATION_SCHEMA.{}S "
"ORDER BY concat(table_schema, '.', table_name)".format(
datasource_type.upper()
),
None,
)
datasource_names: List[utils.DatasourceName] = []
for _unused, row in datasource_df.iterrows():
datasource_names.append(
utils.DatasourceName(
schema=row["table_schema"], table=row["table_name"]
)
)
return datasource_names
@classmethod
def expand_data( # pylint: disable=too-many-locals
cls, columns: List[dict], data: List[dict]
) -> Tuple[List[dict], List[dict], List[dict]]:
"""
We do not immediately display rows and arrays clearly in the data grid. This
method separates out nested fields and data values to help clearly display
structural columns.
Example: ColumnA is a row(nested_obj varchar) and ColumnB is an array(int)
Original data set = [
{'ColumnA': ['a1'], 'ColumnB': [1, 2]},
{'ColumnA': ['a2'], 'ColumnB': [3, 4]},
]
Expanded data set = [
{'ColumnA': ['a1'], 'ColumnA.nested_obj': 'a1', 'ColumnB': 1},
{'ColumnA': '', 'ColumnA.nested_obj': '', 'ColumnB': 2},
{'ColumnA': ['a2'], 'ColumnA.nested_obj': 'a2', 'ColumnB': 3},
{'ColumnA': '', 'ColumnA.nested_obj': '', 'ColumnB': 4},
]
:param columns: columns selected in the query
:param data: original data set
:return: list of all columns(selected columns and their nested fields),
expanded data set, listed of nested fields
"""
if not is_feature_enabled("PRESTO_EXPAND_DATA"):
return columns, data, []
# process each column, unnesting ARRAY types and
# expanding ROW types into new columns
to_process = deque((column, 0) for column in columns)
all_columns: List[dict] = []
expanded_columns = []
current_array_level = None
while to_process:
column, level = to_process.popleft()
if column["name"] not in [column["name"] for column in all_columns]:
all_columns.append(column)
# When unnesting arrays we need to keep track of how many extra rows
# were added, for each original row. This is necessary when we expand
# multiple arrays, so that the arrays after the first reuse the rows
# added by the first. every time we change a level in the nested arrays
# we reinitialize this.
if level != current_array_level:
unnested_rows: Dict[int, int] = defaultdict(int)
current_array_level = level
name = column["name"]
if column["type"].startswith("ARRAY("):
# keep processing array children; we append to the right so that
# multiple nested arrays are processed breadth-first
to_process.append((get_children(column)[0], level + 1))
# unnest array objects data into new rows
i = 0
while i < len(data):
row = data[i]
values = row.get(name)
if values:
# how many extra rows we need to unnest the data?
extra_rows = len(values) - 1
# how many rows were already added for this row?
current_unnested_rows = unnested_rows[i]
# add any necessary rows
missing = extra_rows - current_unnested_rows
for _ in range(missing):
data.insert(i + current_unnested_rows + 1, {})
unnested_rows[i] += 1
# unnest array into rows
for j, value in enumerate(values):
data[i + j][name] = value
# skip newly unnested rows
i += unnested_rows[i]
i += 1
if column["type"].startswith("ROW("):
# expand columns; we append them to the left so they are added
# immediately after the parent
expanded = get_children(column)
to_process.extendleft((column, level) for column in expanded)
expanded_columns.extend(expanded)
# expand row objects into new columns
for row in data:
for value, col in zip(row.get(name) or [], expanded):
row[col["name"]] = value
data = [
{k["name"]: row.get(k["name"], "") for k in all_columns} for row in data
]
return all_columns, data, expanded_columns
@classmethod
def extra_table_metadata(
cls, database: "Database", table_name: str, schema_name: str
) -> Dict[str, Any]:
metadata = {}
indexes = database.get_indexes(table_name, schema_name)
if indexes:
cols = indexes[0].get("column_names", [])
full_table_name = table_name
if schema_name and "." not in table_name:
full_table_name = "{}.{}".format(schema_name, table_name)
pql = cls._partition_query(full_table_name, database)
col_names, latest_parts = cls.latest_partition(
table_name, schema_name, database, show_first=True
)
if not latest_parts:
latest_parts = tuple([None] * len(col_names)) # type: ignore
metadata["partitions"] = {
"cols": cols,
"latest": dict(zip(col_names, latest_parts)), # type: ignore
"partitionQuery": pql,
}
# flake8 is not matching `Optional[str]` to `Any` for some reason...
metadata["view"] = cast(
Any, cls.get_create_view(database, schema_name, table_name)
)
return metadata
@classmethod
def get_create_view(
cls, database: "Database", schema: str, table: str
) -> Optional[str]:
"""
Return a CREATE VIEW statement, or `None` if not a view.
:param database: Database instance
:param schema: Schema name
:param table: Table (view) name
"""
from pyhive.exc import DatabaseError
engine = cls.get_engine(database, schema)
with closing(engine.raw_connection()) as conn:
with closing(conn.cursor()) as cursor:
sql = f"SHOW CREATE VIEW {schema}.{table}"
try:
cls.execute(cursor, sql)
polled = cursor.poll()
while polled:
time.sleep(0.2)
polled = cursor.poll()
except DatabaseError: # not a VIEW
return None
rows = cls.fetch_data(cursor, 1)
return rows[0][0]
@classmethod
def handle_cursor(cls, cursor: Any, query: Query, session: Session) -> None:
"""Updates progress information"""
query_id = query.id
logger.info(f"Query {query_id}: Polling the cursor for progress")
polled = cursor.poll()
# poll returns dict -- JSON status information or ``None``
# if the query is done
# https://github.com/dropbox/PyHive/blob/
# b34bdbf51378b3979eaf5eca9e956f06ddc36ca0/pyhive/presto.py#L178
while polled:
# Update the object and wait for the kill signal.
stats = polled.get("stats", {})
query = session.query(type(query)).filter_by(id=query_id).one()
if query.status in [QueryStatus.STOPPED, QueryStatus.TIMED_OUT]:
cursor.cancel()
break
if stats:
state = stats.get("state")
# if already finished, then stop polling
if state == "FINISHED":
break
completed_splits = float(stats.get("completedSplits"))
total_splits = float(stats.get("totalSplits"))
if total_splits and completed_splits:
progress = 100 * (completed_splits / total_splits)
logger.info(
"Query {} progress: {} / {} " # pylint: disable=logging-format-interpolation
"splits".format(query_id, completed_splits, total_splits)
)
if progress > query.progress:
query.progress = progress
session.commit()
time.sleep(1)
logger.info(f"Query {query_id}: Polling the cursor for progress")
polled = cursor.poll()
@classmethod
def _extract_error_message(cls, e: Exception) -> Optional[str]:
if (
hasattr(e, "orig")
and type(e.orig).__name__ == "DatabaseError" # type: ignore
and isinstance(e.orig[0], dict) # type: ignore
):
error_dict = e.orig[0] # type: ignore
return "{} at {}: {}".format(
error_dict.get("errorName"),
error_dict.get("errorLocation"),
error_dict.get("message"),
)
if type(e).__name__ == "DatabaseError" and hasattr(e, "args") and e.args:
error_dict = e.args[0]
return error_dict.get("message")
return utils.error_msg_from_exception(e)
@classmethod
def _partition_query( # pylint: disable=too-many-arguments,too-many-locals
cls,
table_name: str,
database: "Database",
limit: int = 0,
order_by: Optional[List[Tuple[str, bool]]] = None,
filters: Optional[Dict[Any, Any]] = None,
) -> str:
"""Returns a partition query
:param table_name: the name of the table to get partitions from
:type table_name: str
:param limit: the number of partitions to be returned
:type limit: int
:param order_by: a list of tuples of field name and a boolean
that determines if that field should be sorted in descending
order
:type order_by: list of (str, bool) tuples
:param filters: dict of field name and filter value combinations
"""
limit_clause = "LIMIT {}".format(limit) if limit else ""
order_by_clause = ""
if order_by:
l = []
for field, desc in order_by:
l.append(field + " DESC" if desc else "")
order_by_clause = "ORDER BY " + ", ".join(l)
where_clause = ""
if filters:
l = []
for field, value in filters.items():
l.append(f"{field} = '{value}'")
where_clause = "WHERE " + " AND ".join(l)
presto_version = database.get_extra().get("version")
# Partition select syntax changed in v0.199, so check here.
# Default to the new syntax if version is unset.
partition_select_clause = (
f'SELECT * FROM "{table_name}$partitions"'
if not presto_version
or StrictVersion(presto_version) >= StrictVersion("0.199")
else f"SHOW PARTITIONS FROM {table_name}"
)
sql = textwrap.dedent(
f"""\
{partition_select_clause}
{where_clause}
{order_by_clause}
{limit_clause}
"""
)
return sql
@classmethod
def where_latest_partition( # pylint: disable=too-many-arguments
cls,
table_name: str,
schema: Optional[str],
database: "Database",
query: Select,
columns: Optional[List] = None,
) -> Optional[Select]:
try:
col_names, values = cls.latest_partition(
table_name, schema, database, show_first=True
)
except Exception: # pylint: disable=broad-except
# table is not partitioned
return None
if values is None:
return None
column_names = {column.get("name") for column in columns or []}
for col_name, value in zip(col_names, values):
if col_name in column_names:
query = query.where(Column(col_name) == value)
return query
@classmethod
def _latest_partition_from_df( # pylint: disable=invalid-name
cls, df: pd.DataFrame
) -> Optional[List[str]]:
if not df.empty:
return df.to_records(index=False)[0].item()
return None
@classmethod
def latest_partition(
cls,
table_name: str,
schema: Optional[str],
database: "Database",
show_first: bool = False,
) -> Tuple[List[str], Optional[List[str]]]:
"""Returns col name and the latest (max) partition value for a table
:param table_name: the name of the table
:param schema: schema / database / namespace
:param database: database query will be run against
:type database: models.Database
:param show_first: displays the value for the first partitioning key
if there are many partitioning keys
:type show_first: bool
>>> latest_partition('foo_table')
(['ds'], ('2018-01-01',))
"""
indexes = database.get_indexes(table_name, schema)
if not indexes:
raise SupersetTemplateException(
f"Error getting partition for {schema}.{table_name}. "
"Verify that this table has a partition."
)
if len(indexes[0]["column_names"]) < 1:
raise SupersetTemplateException(
"The table should have one partitioned field"
)
elif not show_first and len(indexes[0]["column_names"]) > 1:
raise SupersetTemplateException(
"The table should have a single partitioned field "
"to use this function. You may want to use "
"`presto.latest_sub_partition`"
)
column_names = indexes[0]["column_names"]
part_fields = [(column_name, True) for column_name in column_names]
sql = cls._partition_query(table_name, database, 1, part_fields)
df = database.get_df(sql, schema)
return column_names, cls._latest_partition_from_df(df)
@classmethod
def latest_sub_partition(
cls, table_name: str, schema: Optional[str], database: "Database", **kwargs: Any
) -> Any:
"""Returns the latest (max) partition value for a table
A filtering criteria should be passed for all fields that are
partitioned except for the field to be returned. For example,
if a table is partitioned by (``ds``, ``event_type`` and
``event_category``) and you want the latest ``ds``, you'll want
to provide a filter as keyword arguments for both
``event_type`` and ``event_category`` as in
``latest_sub_partition('my_table',
event_category='page', event_type='click')``
:param table_name: the name of the table, can be just the table
name or a fully qualified table name as ``schema_name.table_name``
:type table_name: str
:param schema: schema / database / namespace
:type schema: str
:param database: database query will be run against
:type database: models.Database
:param kwargs: keyword arguments define the filtering criteria
on the partition list. There can be many of these.
:type kwargs: str
>>> latest_sub_partition('sub_partition_table', event_type='click')
'2018-01-01'
"""
indexes = database.get_indexes(table_name, schema)
part_fields = indexes[0]["column_names"]
for k in kwargs.keys(): # pylint: disable=consider-iterating-dictionary
if k not in k in part_fields:
msg = "Field [{k}] is not part of the portioning key"
raise SupersetTemplateException(msg)
if len(kwargs.keys()) != len(part_fields) - 1:
msg = (
"A filter needs to be specified for {} out of the " "{} fields."
).format(len(part_fields) - 1, len(part_fields))
raise SupersetTemplateException(msg)
for field in part_fields:
if field not in kwargs.keys():
field_to_return = field
sql = cls._partition_query(
table_name, database, 1, [(field_to_return, True)], kwargs
)
df = database.get_df(sql, schema)
if df.empty:
return ""
return df.to_dict()[field_to_return][0]
@classmethod
@cache.memoize()
def get_function_names(cls, database: "Database") -> List[str]:
"""
Get a list of function names that are able to be called on the database.
Used for SQL Lab autocomplete.
:param database: The database to get functions for
:return: A list of function names useable in the database
"""
return database.get_df("SHOW FUNCTIONS")["Function"].tolist()
|
#Get everything that the base depends on.
import math
from numpy.lib.utils import source
from workers.worker_base import *
import sqlalchemy as s
import time
import math
#This is a worker base subclass that adds the ability to query github/gitlab with the api key
class WorkerGitInterfaceable(Worker):
def __init__(self, worker_type, config={}, given=[], models=[], data_tables=[], operations_tables=[], platform="github"):
super().__init__(worker_type, config, given, models, data_tables, operations_tables)
self.config.update({
'gh_api_key': self.augur_config.get_value('Database', 'key'),
'gitlab_api_key': self.augur_config.get_value('Database', 'gitlab_api_key')
})
#Fix loose attribute definition
self.headers = None
self.platform = platform
self.given = given
self.models = models
self.specs = {
'id': self.config['id'], # what the broker knows this worker as
'location': self.config['location'], # host + port worker is running on (so broker can send tasks here)
'qualifications': [
{
'given': self.given, # type of repo this worker can be given as a task
'models': self.models # models this worker can fill for a repo as a task
}
],
'config': self.config
}
# Send broker hello message
# Attempts to determine if these attributes exist
# If not, it creates them with default values
try:
self.tool_source
self.tool_version
self.data_source
except AttributeError:
self.tool_source = 'Augur Worker Testing'
self.tool_version = '0.0.0'
self.data_source = 'Augur Worker Testing'
#database interface, additional functionality with github interface.
def initialize_database_connections(self):
super().initialize_database_connections()
# Organize different api keys/oauths available
self.logger.info("Initializing API key.")
if 'gh_api_key' in self.config or 'gitlab_api_key' in self.config:
try:
self.init_oauths(self.platform)
except AttributeError:
self.logger.error("Worker not configured to use API key!")
else:
self.oauths = [{'oauth_id': 0}]
def find_id_from_login(self, login, platform='github'):
""" Retrieves our contributor table primary key value for the contributor with
the given GitHub login credentials, if this contributor is not there, then
they get inserted.
:param login: String, the GitHub login username to find the primary key id for
:return: Integer, the id of the row in our database with the matching GitHub login
"""
idSQL = s.sql.text("""
SELECT cntrb_id FROM contributors WHERE cntrb_login = '{}' \
AND LOWER(data_source) = '{} api'
""".format(login, platform))
rs = pd.read_sql(idSQL, self.db, params={})
data_list = [list(row) for row in rs.itertuples(index=False)]
try:
return data_list[0][0]
except:
self.logger.info('contributor needs to be added...')
if platform == 'github':
cntrb_url = ("https://api.github.com/users/" + login)
elif platform == 'gitlab':
cntrb_url = ("https://gitlab.com/api/v4/users?username=" + login )
self.logger.info("Hitting endpoint: {} ...\n".format(cntrb_url))
# Possible infinite loop if this request never succeeds?
while True:
try:
r = requests.get(url=cntrb_url, headers=self.headers)
break
except TimeoutError as e:
self.logger.info("Request timed out. Sleeping 10 seconds and trying again...\n")
time.sleep(30)
self.update_rate_limit(r)
contributor = r.json()
# Used primarily for the Gitlab block below
company = None
location = None
email = None
if 'company' in contributor:
company = contributor['company']
if 'location' in contributor:
location = contributor['location']
if 'email' in contributor:
email = contributor['email']
''' Because a user name nan exists, we needed to cast any string where their
user name appears as a string, because otherwise python was casting it as a float '''
if platform == 'github':
cntrb = {
'cntrb_login': str(contributor['login']) if 'login' in contributor else None,
'cntrb_email': str(contributor['email']) if 'email' in contributor else None,
'cntrb_company': str(contributor['company']) if 'company' in contributor else None,
'cntrb_location': str(contributor['location']) if 'location' in contributor else None,
'cntrb_created_at': contributor['created_at'] if 'created_at' in contributor else None,
'cntrb_canonical': str(contributor['email']) if 'email' in contributor else None,
'gh_user_id': contributor['id'] if 'id' in contributor else None,
'gh_login': str(contributor['login']) if 'login' in contributor else None,
'gh_url': str(contributor['url']) if 'url' in contributor else None,
'gh_html_url': str(contributor['html_url']) if 'html_url' in contributor else None,
'gh_node_id': contributor['node_id'] if 'node_id' in contributor else None,
'gh_avatar_url': str(contributor['avatar_url']) if 'avatar_url' in contributor else None,
'gh_gravatar_id': str(contributor['gravatar_id']) if 'gravatar_id' in contributor else None,
'gh_followers_url': str(contributor['followers_url']) if 'followers_url' in contributor else None,
'gh_following_url': str(contributor['following_url']) if 'following_url' in contributor else None,
'gh_gists_url': str(contributor['gists_url']) if 'gists_url' in contributor else None,
'gh_starred_url': str(contributor['starred_url']) if 'starred_url' in contributor else None,
'gh_subscriptions_url': str(contributor['subscriptions_url']) if 'subscriptions_url' in contributor else None,
'gh_organizations_url': str(contributor['organizations_url']) if 'organizations_url' in contributor else None,
'gh_repos_url': str(contributor['repos_url']) if 'repos_url' in contributor else None,
'gh_events_url': str(contributor['events_url']) if 'events_url' in contributor else None,
'gh_received_events_url': str(contributor['received_events_url']) if 'received_events_url' in contributor else None,
'gh_type': contributor['type'] if 'type' in contributor else None,
'gh_site_admin': contributor['site_admin'] if 'site_admin' in contributor else None,
'tool_source': self.tool_source,
'tool_version': self.tool_version,
'data_source': self.data_source
}
elif platform == 'gitlab':
cntrb = {
'cntrb_login': contributor[0]['username'] if 'username' in contributor[0] else None,
'cntrb_email': email,
'cntrb_company': company,
'cntrb_location': location,
'cntrb_created_at': contributor[0]['created_at'] if 'created_at' in contributor[0] else None,
'cntrb_canonical': email,
'gh_user_id': contributor[0]['id'],
'gh_login': contributor[0]['username'],
'gh_url': contributor[0]['web_url'],
'gh_html_url': None,
'gh_node_id': None,
'gh_avatar_url': contributor[0]['avatar_url'],
'gh_gravatar_id': None,
'gh_followers_url': None,
'gh_following_url': None,
'gh_gists_url': None,
'gh_starred_url': None,
'gh_subscriptions_url': None,
'gh_organizations_url': None,
'gh_repos_url': None,
'gh_events_url': None,
'gh_received_events_url': None,
'gh_type': None,
'gh_site_admin': None,
'tool_source': self.tool_source,
'tool_version': self.tool_version,
'data_source': self.data_source
}
result = self.db.execute(self.contributors_table.insert().values(cntrb))
self.logger.info("Primary key inserted into the contributors table: " + str(result.inserted_primary_key))
self.results_counter += 1
self.cntrb_id_inc = int(result.inserted_primary_key[0])
self.logger.info(f"Inserted contributor: {cntrb["cntrb_login"]}\n")
return self.find_id_from_login(login, platform)
def init_oauths(self, platform='github'):
self.oauths = []
self.headers = None
self.logger.info("Trying initialization.")
# Make a list of api key in the config combined w keys stored in the database
# Select endpoint to hit solely to retrieve rate limit
# information from headers of the response
# Adjust header keys needed to fetch rate limit information from the API responses
if platform == 'github':
self.logger.debug("in Github block")
url = "https://api.github.com/users/sgoggins"
oauthSQL = s.sql.text("""
SELECT * FROM worker_oauth WHERE access_token <> '{}' and platform = 'github'
""".format(self.config['gh_api_key']))
key_name = 'gh_api_key'
rate_limit_header_key = "X-RateLimit-Remaining"
rate_limit_reset_header_key = "X-RateLimit-Reset"
self.logger.debug('end of github block.')
elif platform == 'gitlab':
url = "https://gitlab.com/api/v4/version"
oauthSQL = s.sql.text("""
SELECT * FROM worker_oauth WHERE access_token <> '{}' and platform = 'gitlab'
""".format(self.config['gitlab_api_key']))
key_name = 'gitlab_api_key'
rate_limit_header_key = 'ratelimit-remaining'
rate_limit_reset_header_key = 'ratelimit-reset'
for oauth in [{'oauth_id': 0, 'access_token': self.config[key_name]}] + json.loads(
pd.read_sql(oauthSQL, self.helper_db, params={}).to_json(orient="records")
):
if platform == 'github':
self.headers = {'Authorization': 'token %s' % oauth['access_token']}
elif platform == 'gitlab':
self.headers = {'Authorization': 'Bearer %s' % oauth['access_token']}
response = requests.get(url=url, headers=self.headers, timeout=180)
self.oauths.append({
'oauth_id': oauth['oauth_id'],
'access_token': oauth['access_token'],
'rate_limit': int(response.headers[rate_limit_header_key]),
'seconds_to_reset': (
datetime.datetime.fromtimestamp(
int(response.headers[rate_limit_reset_header_key])
) - datetime.datetime.now()
).total_seconds()
})
self.logger.debug("Found OAuth available for use: {}".format(self.oauths[-1]))
if len(self.oauths) == 0:
self.logger.info(
"No API keys detected, please include one in your config or in the "
"worker_oauths table in the augur_operations schema of your database."
)
# First key to be used will be the one specified in the config (first element in
# self.oauths array will always be the key in use)
if platform == 'github':
self.headers = {'Authorization': 'token %s' % self.oauths[0]['access_token']}
elif platform == 'gitlab':
self.headers = {'Authorization': 'Bearer %s' % self.oauths[0]['access_token']}
self.logger.info("OAuth initialized\n")
def enrich_cntrb_id(
self, data, key, action_map_additions={'insert': {'source': [], 'augur': []}},
platform='github', prefix=''
):
if not len(data):
self.logger.info(f"Enrich contrib data is empty for {len(data)}, for the key {key}.")
raise ValueError
self.logger.info(f"Enriching contributor ids for {len(data)} data points...")
source_df = pd.DataFrame(data)
expanded_source_df = self._add_nested_columns(
source_df.copy(), [key] + action_map_additions['insert']['source']
)
# Insert cntrbs that are not in db
cntrb_action_map = {
'insert': {
'source': [key] + action_map_additions['insert']['source'] + [f'{prefix}id'],
'augur': ['cntrb_login'] + action_map_additions['insert']['augur'] + ['gh_user_id']
}
}
table_values_cntrb = self.db.execute(
s.sql.select(self.get_relevant_columns(self.contributors_table,cntrb_action_map))
).fetchall()
source_data = expanded_source_df.to_dict(orient='records')
#Filter out bad data where we can't even hit the api.
source_data = [data for data in source_data if f'{prefix}login' in data and data[f'{prefix}login'] != None and type(data[f'{prefix}login']) is str]
self.logger.info(f"table_values_cntrb keys: {table_values_cntrb[0].keys()}")
# self.logger.info(f"source_data keys: {source_data[0].keys()}")
#We can't use this because of worker collisions
#TODO: seperate this method into it's own worker.
#cntrb_id_offset = self.get_max_id(self.contributors_table, 'cntrb_id') - 1
self.logger.debug(f"Enriching {len(source_data)} contributors.")
# source_data = source_data.loc[data[f'{prefix}login'] != 'nan']
# loop through data to test if it is already in the database
for index, data in enumerate(source_data):
if data[f'{prefix}login'] == 'nan':
self.logger.debug("Nan user found continuing")
continue
#removed this log because it was generating a lot of data.
#self.logger.info(f"Enriching {index} of {len(source_data)}")
user_unique_ids = []
#Allow for alt identifiers to be checked if user.id is not present in source_data
try:
#This will trigger a KeyError if data has alt identifier.
data[f'{prefix}id']
for row in table_values_cntrb:
# removed checks for nan user in this block because this is getting all the gh_user_ids that are
# already in the database so it doesn't need to be filtered from the database, it needs to be
# filtered out so it is never inserted into the database
# Andrew Brain 12/21/2021
user_unique_ids.append(row['gh_user_id'])
except KeyError:
self.print_traceback("Enrich_cntrb_id, data doesn't have user.id. Using node_id instead", e, True)
finally:
for row in table_values_cntrb:
try:
user_unique_ids.append(row['gh_node_id'])
except Exception as e:
self.logger.info(f"Error adding gh_node_id: {e}. Row: {row}")
self.print_traceback("", e, True)
#self.logger.info(f"gh_user_ids: {gh_user_ids}")
# self.logger.info(f"Users gh_user_id: {data["user.id"]}")
# in_user_ids = False
# if data['user.id'] in gh_user_ids:
# in_user_ids = True
# self.logger.info(f"{data["user.id"]} is in gh_user_ids")
# self.logger.info(f"table_values_cntrb len: {len(table_values_cntrb)}")
#Deal with if data
#See if we can check using the user.id
source_data_id = None
try:
source_data_id = data[f'{prefix}id']
except KeyError:
source_data_id = data[f'{prefix}node_id']
#if user.id is in the database then there is no need to add the contributor
if source_data_id in user_unique_ids:
self.logger.info("{} found in database".format(source_data_id))
user_id_row = []
try:
data[f'{prefix}id']
#gets the dict from the table_values_cntrb that contains data['user.id']
user_id_row = list(filter(lambda x: x['gh_user_id'] == source_data_id, table_values_cntrb))[0]
#### Andrew: in a small number of cases, using data on contributors originally gathered in late 2019, there
#### is a mismatch .. the gh_user_id for a login is different. I suspect this rare case to be one
#### where a person did something that changed their gh_user_id ... I am unsure how this can happen ...
except KeyError:
user_id_row = list(filter(lambda x: x['gh_node_id'] == source_data_id, table_values_cntrb))[0]
#pass # 12/3/2021 SPG ... added pass to try to get past this key error in large inserts.
continue # 12/3/2021 SPG ... may be better inside a loop
#assigns the cntrb_id to the source data to be returned to the workers
data['cntrb_id'] = user_id_row['cntrb_id']
self.logger.info(f"cntrb_id {data["cntrb_id"]} found in database and assigned to enriched data")
#contributor is not in the database
else:
self.logger.info("{} not in database, making api call".format(source_data_id))
self.logger.info("login: {}".format(data[f'{prefix}login']))
try:
url = ("https://api.github.com/users/" + str(data[f'{prefix}login']))
except Exception as e:
self.logger.info(f"Error when creating url: {e}. Data: {data}")
#pass # changed continue to pass 12/3/2021 SPG
continue # changed back 12/3/2021 SPG
attempts = 0
contributor = None
success = False
while attempts < 10:
self.logger.info(f"Hitting endpoint: {url} ...\n")
try:
response = requests.get(url=url, headers=self.headers)
except TimeoutError:
self.logger.info(f"User data request for enriching contributor data failed with {attempts} attempts! Trying again...")
time.sleep(10)
#pass # changed continue to pass 12/3/2021 SPG
continue # changed back 12/3/2021 SPG
self.update_rate_limit(response,platform=platform)
try:
contributor = response.json()
except:
contributor = json.loads(json.dumps(response.text))
continue # added continue 12/3/2021 SPG
if type(contributor) == dict:
self.logger.info("Request returned a dict!")
self.logger.info(f"Contributor data: {contributor}")
# contributor['gh_login'] = str(contributor['gh_login']) ## cast as string by SPG on 11/28/2021 due to `nan` user
success = True
break
elif type(contributor) == list:
self.logger.warning("Wrong type returned trying again...")
self.logger.info(f"Contributor data: {contributor}")
elif type(contributor) == str:
self.logger.info(f"Warning! page_data was string: {contributor}\n")
if "<!DOCTYPE html>" in contributor:
self.logger.info("HTML was returned, trying again...\n")
elif len(contributor) == 0:
self.logger.warning("Empty string, trying again...\n")
else:
try:
contributor = json.loads(contributor)
# contributor['gh_login'] = str(contributor['gh_login']) ## cast as string by SPG on 11/28/2021 due to `nan` user
success = True
break
except:
pass
attempts += 1
if not success:
break
self.logger.info(f"Contributor data: {contributor}")
cntrb = {
"cntrb_login": contributor['login'],
"cntrb_created_at": contributor['created_at'],
"cntrb_email": contributor['email'] if 'email' in contributor else None,
"cntrb_company": contributor['company'] if 'company' in contributor else None,
"cntrb_location": contributor['location'] if 'location' in contributor else None,
# "cntrb_type": , dont have a use for this as of now ... let it default to null
"cntrb_canonical": contributor['email'] if 'email' in contributor else None,
"gh_user_id": contributor['id'],
"gh_login": str(contributor['login']), ## cast as string by SPG on 11/28/2021 due to `nan` user
"gh_url": contributor['url'],
"gh_html_url": contributor['html_url'],
"gh_node_id": contributor['node_id'],
"gh_avatar_url": contributor['avatar_url'],
"gh_gravatar_id": contributor['gravatar_id'],
"gh_followers_url": contributor['followers_url'],
"gh_following_url": contributor['following_url'],
"gh_gists_url": contributor['gists_url'],
"gh_starred_url": contributor['starred_url'],
"gh_subscriptions_url": contributor['subscriptions_url'],
"gh_organizations_url": contributor['organizations_url'],
"gh_repos_url": contributor['repos_url'],
"gh_events_url": contributor['events_url'],
"gh_received_events_url": contributor['received_events_url'],
"gh_type": contributor['type'],
"gh_site_admin": contributor['site_admin'],
"cntrb_last_used" : None if 'updated_at' not in contributor else contributor['updated_at'],
"cntrb_full_name" : None if 'name' not in contributor else contributor['name'],
"tool_source": self.tool_source,
"tool_version": self.tool_version,
"data_source": self.data_source
}
#insert new contributor into database
# TODO: make this method it's own worker. This errors because of collisions between github_worker and pull_request_worker.
#We can solve this by making another worker with a queue. It wouldn't have to be too complicated.
try:
self.db.execute(self.contributors_table.insert().values(cntrb))
except s.exc.IntegrityError:
self.logger.info(f"there was a collision caught ....")
self.logger.info(traceback.format_exc())
#pass # added by sean 11/29/2021 ... think it might be blocking comment insertion otherwise
continue # changed to continue on 12/3/2021
except Exception as e:
self.logger.info(f"Contributor was unable to be added to table! Attempting to get cntrb_id from table anyway because of possible collision. Error: {e}")
#pass # added by sean 11/29/2021 ... think it might be blocking comment insertion otherwise
continue
#Get the contributor id from the newly inserted contributor.
cntrb_id_row = self.db.execute(
s.sql.select(self.get_relevant_columns(self.contributors_table,cntrb_action_map)).where(
self.contributors_table.c.gh_user_id==cntrb["gh_user_id"]
)
).fetchall()
#Handle and log rare failure cases. If this part errors something is very wrong.
if len(cntrb_id_row) == 1:
data['cntrb_id'] = cntrb_id_row[0]['cntrb_id']
self.logger.info(f"cntrb_id {data["cntrb_id"]} found in database and assigned to enriched data")
elif len(cntrb_id_row) == 0:
self.logger.error("Couldn't find contributor in database. Something has gone very wrong. Augur ran into a contributor that is unable to be inserted into the contributors table but is also not present in that table.")
else:
self.logger.info(f"There are more than one contributors in the table with gh_user_id={cntrb["gh_user_id"]}")
cntrb_data = {
'cntrb_id': int(data['cntrb_id']), # came through as a float. Fixed 11/28/2021, SPG
'gh_node_id': cntrb['gh_node_id'],
'cntrb_login': str(cntrb['cntrb_login']), # NaN user issue. Fixed 11/28/2021, SPG
'gh_user_id': int(cntrb['gh_user_id']) # came through as a float. Fixed 11/28/2021, SPG
}
#This updates our list of who is already in the database as we iterate to avoid duplicates.
#People who make changes tend to make more than one in a row.
table_values_cntrb.append(cntrb_data)
self.logger.info(
"Contributor id enrichment successful, result has "
f"{len(source_data)} data points.\n"
)
for data in source_data:
self.logger.debug("User login type: " + str(type(data[f'{prefix}login'])) + ". Login: " + str(data[f'{prefix}login']))
try:
data['cntrb_id']
except:
self.logger.debug(f"AB ERROR: data exiting enrich_cntrb_id without cntrb_id, login is: " + str(data[f'{prefix}login']))
return source_data
# Try to construct the best url to ping GitHub's API for a username given an email.
"""
I changed this because of the following note on the API site: With the in qualifier you can restrict your search to the username (login), full name, public email, or any combination of these. When you omit this qualifier, only the username and email address are searched. For privacy reasons, you cannot search by email domain name.
https://docs.github.com/en/github/searching-for-information-on-github/searching-on-github/searching-users#search-only-users-or-organizations
"""
def create_endpoint_from_email(self, email):
self.logger.info(f"Trying to resolve contributor from email: {email}")
# Note: I added "+type:user" to avoid having user owned organizations be returned
# Also stopped splitting per note above.
url = 'https://api.github.com/search/users?q={}+in:email+type:user'.format(
email)
return url
def query_github_contributors(self, entry_info, repo_id):
""" Data collection function
Query the GitHub API for contributors
"""
self.logger.info(f"Querying contributors with given entry info: {entry_info}\n")
## It absolutely doesn't matter if the contributor has already contributoed to a repo. it only matters that they exist in our table, and
## if the DO, then we DO NOT want to insert them again in any GitHub Method.
github_url = entry_info['given']['github_url'] if 'github_url' in entry_info['given'] else entry_info['given']['git_url']
# Extract owner/repo from the url for the endpoint
try:
owner, name = self.get_owner_repo(github_url)
except IndexError as e:
self.logger.error(f"Encountered bad entry info: {entry_info}")
return
# Set the base of the url and place to hold contributors to insert
contributors_url = (
f"https://api.github.com/repos/{owner}/{name}/" +
"contributors?per_page=100&page={}"
)
# Get contributors that we already have stored
# Set our duplicate and update column map keys (something other than PK) to
# check dupicates/needed column updates with
table = 'contributors'
table_pkey = 'cntrb_id'
update_col_map = {'cntrb_email': 'email'}
duplicate_col_map = {'cntrb_login': 'login'}
#list to hold contributors needing insertion or update
contributors = self.paginate(contributors_url, duplicate_col_map, update_col_map, table, table_pkey)
self.logger.info("Count of contributors needing insertion: " + str(len(contributors)) + "\n")
for repo_contributor in contributors:
try:
# Need to hit this single contributor endpoint to get extra data including...
# `created at`
# i think that's it
cntrb_url = ("https://api.github.com/users/" + repo_contributor['login'])
self.logger.info("Hitting endpoint: " + cntrb_url + " ...\n")
r = requests.get(url=cntrb_url, headers=self.headers)
self.update_gh_rate_limit(r)
contributor = r.json()
company = None
location = None
email = None
if 'company' in contributor:
company = contributor['company']
if 'location' in contributor:
location = contributor['location']
if 'email' in contributor:
email = contributor['email']
canonical_email = contributor['email']
cntrb = {
"cntrb_login": contributor['login'],
"cntrb_created_at": contributor['created_at'],
"cntrb_email": email,
"cntrb_company": company,
"cntrb_location": location,
# "cntrb_type": , dont have a use for this as of now ... let it default to null
"cntrb_canonical": canonical_email,
"gh_user_id": contributor['id'],
"gh_login": contributor['login'],
"gh_url": contributor['url'],
"gh_html_url": contributor['html_url'],
"gh_node_id": contributor['node_id'], #This is what we are dup checking
"gh_avatar_url": contributor['avatar_url'],
"gh_gravatar_id": contributor['gravatar_id'],
"gh_followers_url": contributor['followers_url'],
"gh_following_url": contributor['following_url'],
"gh_gists_url": contributor['gists_url'],
"gh_starred_url": contributor['starred_url'],
"gh_subscriptions_url": contributor['subscriptions_url'],
"gh_organizations_url": contributor['organizations_url'],
"gh_repos_url": contributor['repos_url'],
"gh_events_url": contributor['events_url'],
"gh_received_events_url": contributor['received_events_url'],
"gh_type": contributor['type'],
"gh_site_admin": contributor['site_admin'],
"cntrb_last_used" : None if 'updated_at' not in contributor else contributor['updated_at'],
"cntrb_full_name" : None if 'name' not in contributor else contributor['name'],
"tool_source": self.tool_source,
"tool_version": self.tool_version,
"data_source": self.data_source
}
#dup check
#TODO: add additional fields to check if needed.
existingMatchingContributors = self.db.execute(
s.sql.select(
[self.contributors_table.c.gh_node_id]
).where(
self.contributors_table.c.gh_node_id==cntrb["gh_node_id"]
)
).fetchall()
if len(existingMatchingContributors) > 0:
break #if contributor already exists in table
# Commit insertion to table
if repo_contributor['flag'] == 'need_update':
result = self.db.execute(self.contributors_table.update().where(
self.worker_history_table.c.cntrb_email==email).values(cntrb))
self.logger.info("Updated tuple in the contributors table with existing email: {}".format(email))
self.cntrb_id_inc = repo_contributor['pkey']
elif repo_contributor['flag'] == 'need_insertion':
result = self.db.execute(self.contributors_table.insert().values(cntrb))
self.logger.info("Primary key inserted into the contributors table: {}".format(result.inserted_primary_key))
#For workers that aren't an interface.
if self.worker_type != "Contributor_interface":
self.results_counter += 1
self.logger.info("Inserted contributor: " + contributor['login'] + "\n")
# Increment our global track of the cntrb id for the possibility of it being used as a FK
self.cntrb_id_inc = int(result.inserted_primary_key[0])
except Exception as e:
self.logger.error("Caught exception: {}".format(e))
self.logger.error(f"Traceback: {traceback.format_exc()}")
self.logger.error("Cascading Contributor Anomalie from missing repo contributor data: {} ...\n".format(cntrb_url))
continue
# Hit the endpoint specified by the url and return the json that it returns if it returns a dict.
# Returns None on failure.
def request_dict_from_endpoint(self, url, timeout_wait=10):
self.logger.info(f"Hitting endpoint: {url}")
attempts = 0
response_data = None
success = False
# This borrow's the logic to safely hit an endpoint from paginate_endpoint.
while attempts < 10:
try:
response = requests.get(url=url, headers=self.headers)
except TimeoutError:
self.logger.info(
f"User data request for enriching contributor data failed with {attempts} attempts! Trying again...")
time.sleep(timeout_wait)
continue
# Make sure we know how many requests our api tokens have.
self.update_rate_limit(response, platform="github")
try:
response_data = response.json()
except:
response_data = json.loads(json.dumps(response.text))
if type(response_data) == dict:
# Sometimes GitHub Sends us an error message in a dict instead of a string.
# While a bit annoying, it is easy to work around
if 'message' in response_data:
try:
assert 'API rate limit exceeded' not in response_data['message']
except AssertionError as e:
self.logger.info(
f"Detected error in response data from gitHub. Trying again... Error: {e}")
attempts += 1
continue
# self.logger.info(f"Returned dict: {response_data}")
success = True
break
elif type(response_data) == list:
self.logger.warning("Wrong type returned, trying again...")
self.logger.info(f"Returned list: {response_data}")
elif type(response_data) == str:
self.logger.info(
f"Warning! page_data was string: {response_data}")
if "<!DOCTYPE html>" in response_data:
self.logger.info("HTML was returned, trying again...\n")
elif len(response_data) == 0:
self.logger.warning("Empty string, trying again...\n")
else:
try:
# Sometimes raw text can be converted to a dict
response_data = json.loads(response_data)
success = True
break
except:
pass
attempts += 1
if not success:
return None
return response_data
#probably a better version of query_github_contributors but uses bulk_insert which is a bit shaky right now.
def query_github_contributors_bulk(self, entry_info, repo_id):
""" Data collection function
Query the GitHub API for contributors.
Uses paginate_endpoint rather than paginate, and supports stagger for larger repos.
"""
self.logger.info(f"Querying contributors with given entry info: {entry_info}\n")
github_url = entry_info['given']['github_url'] if 'github_url' in entry_info['given'] else entry_info['given']['git_url']
owner, name = self.get_owner_repo(github_url)
contributors_url = (f"https://api.github.com/repos/{owner}/{name}/" +
"contributors?per_page=100&page={}")
action_map = {
'insert': {
'source': ['login'],
'augur': ['cntrb_login']
},
'update': {
'source': ['email'],
'augur': ['cntrb_email']
}
}
source_contributors = self.paginate_endpoint(contributors_url, action_map=action_map,
table=self.contributors_table)
contributors_insert = []
for repo_contributor in source_contributors['insert']:
# Need to hit this single contributor endpoint to get extra data
cntrb_url = (f"https://api.github.com/users/{repo_contributor["login"]}")
self.logger.info(f"Hitting endpoint: {cntrb_url} ...\n")
r = requests.get(url=cntrb_url, headers=self.headers)
self.update_gh_rate_limit(r)
contributor = r.json()
contributors_insert.append({
'cntrb_login': contributor['login'],
'cntrb_created_at': contributor['created_at'],
'cntrb_email': contributor['email'] if 'email' in contributor else None,
'cntrb_company': contributor['company'] if 'company' in contributor else None,
'cntrb_location': contributor['location'] if 'location' in contributor else None,
'cntrb_canonical': contributor['email'] if 'email' in contributor else None,
'gh_user_id': contributor['id'],
'gh_login': contributor['login'],
'gh_url': contributor['url'],
'gh_html_url': contributor['html_url'],
'gh_node_id': contributor['node_id'],
'gh_avatar_url': contributor['avatar_url'],
'gh_gravatar_id': contributor['gravatar_id'],
'gh_followers_url': contributor['followers_url'],
'gh_following_url': contributor['following_url'],
'gh_gists_url': contributor['gists_url'],
'gh_starred_url': contributor['starred_url'],
'gh_subscriptions_url': contributor['subscriptions_url'],
'gh_organizations_url': contributor['organizations_url'],
'gh_repos_url': contributor['repos_url'],
'gh_events_url': contributor['events_url'],
'gh_received_events_url': contributor['received_events_url'],
'gh_type': contributor['type'],
'gh_site_admin': contributor['site_admin'],
'tool_source': self.tool_source,
'tool_version': self.tool_version,
'data_source': self.data_source
})
contributors_insert_result, contributors_update_result = self.bulk_insert(self.contributors_table,
update=source_contributors['update'], unique_columns=action_map['insert']['augur'],
insert=contributors_insert, update_columns=action_map['update']['augur'])
def query_gitlab_contributors(self, entry_info, repo_id):
gitlab_url = (
entry_info['given']['gitlab_url'] if 'gitlab_url' in entry_info['given']
else entry_info['given']['git_url']
)
self.logger.info("Querying contributors with given entry info: " + str(entry_info) + "\n")
path = urlparse(gitlab_url)
split = path[2].split('/')
owner = split[1]
name = split[2]
# Handles git url case by removing the extension
if ".git" in name:
name = name[:-4]
url_encoded_format = quote(owner + '/' + name, safe='')
table = 'contributors'
table_pkey = 'cntrb_id'
### Here we are adding gitlab user information from the API
### Following Gabe's rework of the contributor worker.
### The GitLab API will NEVER give you an email. It will let you
### Query an email, but never give you one.
### ## Gitlab email api: https://gitlab.com/api/v4/users?search=s@goggins.com
### We don't need to update right now, so commenting out.
### TODO: SOLVE LOGIC.
# update_col_map = {'cntrb_email': 'email'}
update_col_map = {}
duplicate_col_map = {'gl_username': 'username'}
# list to hold contributors needing insertion or update
contributors = self.paginate("https://gitlab.com/api/v4/projects/" + url_encoded_format + "/repository/contributors?per_page=100&page={}", duplicate_col_map, update_col_map, table, table_pkey, platform='gitlab')
for repo_contributor in contributors:
try:
cntrb_compressed_url = ("https://gitlab.com/api/v4/users?search=" + repo_contributor['email'])
self.logger.info("Hitting endpoint: " + cntrb_compressed_url + " ...\n")
r = requests.get(url=cntrb_compressed_url, headers=self.headers)
contributor_compressed = r.json()
email = repo_contributor['email']
self.logger.info(contributor_compressed)
if len(contributor_compressed) == 0 or type(contributor_compressed) is dict or "id" not in contributor_compressed[0]:
continue
self.logger.info("Fetching for user: " + str(contributor_compressed[0]["id"]))
cntrb_url = ("https://gitlab.com/api/v4/users/" + str(contributor_compressed[0]["id"]))
self.logger.info("Hitting end point to get complete contributor info now: " + cntrb_url + "...\n")
r = requests.get(url=cntrb_url, headers=self.headers)
contributor = r.json()
cntrb = {
"gl_id": contributor.get('gl_id', None),
"gl_full_name": contributor.get('full_name', None),
"gl_username": contributor.get('username', None),
"gl_state": contributor.get('state', None),
"gl_avatar_url": contributor.get('avatar_url', None),
"gl_web_url": contributor.get('web_url', None),
#"cntrb_login": contributor.get('username', None),
#"cntrb_created_at": contributor.get('created_at', None),
"cntrb_email": ('email', None),
#"cntrb_company": contributor.get('organization', None),
#"cntrb_location": contributor.get('location', None),
# "cntrb_type": , dont have a use for this as of now ... let it default to null
#"cntrb_canonical": contributor.get('public_email', None),
#"gh_user_id": contributor.get('id', None),
#"gh_login": contributor.get('username', None),
#"gh_url": contributor.get('web_url', None),
#"gh_html_url": contributor.get('web_url', None),
#"gh_node_id": None,
#"gh_avatar_url": contributor.get('avatar_url', None),
#"gh_gravatar_id": None,
#"gh_followers_url": None,
#"gh_following_url": None,
#"gh_gists_url": None,
#"gh_starred_url": None,
#"gh_subscriptions_url": None,
#"gh_organizations_url": None,
#"gh_repos_url": None,
#"gh_events_url": None,
#"gh_received_events_url": None,
#"gh_type": None,
#"gh_site_admin": None,
"tool_source": self.tool_source,
"tool_version": self.tool_version,
"data_source": self.data_source
}
# Commit insertion to table
if repo_contributor['flag'] == 'need_update':
result = self.db.execute(self.contributors_table.update().where(
self.worker_history_table.c.cntrb_email == email).values(cntrb))
self.logger.info("Updated tuple in the contributors table with existing email: {}".format(email))
self.cntrb_id_inc = repo_contributor['pkey']
elif repo_contributor['flag'] == 'need_insertion':
result = self.db.execute(self.contributors_table.insert().values(cntrb))
self.logger.info("Primary key inserted into the contributors table: {}".format(result.inserted_primary_key))
self.results_counter += 1
self.logger.info("Inserted contributor: " + contributor['username'] + "\n")
# Increment our global track of the cntrb id for the possibility of it being used as a FK
self.cntrb_id_inc = int(result.inserted_primary_key[0])
except Exception as e:
self.logger.info("Caught exception: {}".format(e))
self.logger.info("Cascading Contributor Anomalie from missing repo contributor data: {} ...\n".format(cntrb_url))
continue
def update_gitlab_rate_limit(self, response, bad_credentials=False, temporarily_disable=False):
# Try to get rate limit from request headers, sometimes it does not work (GH's issue)
# In that case we just decrement from last recieved header count
if bad_credentials and len(self.oauths) > 1:
self.logger.info(
f"Removing oauth with bad credentials from consideration: {self.oauths[0]}"
)
del self.oauths[0]
if temporarily_disable:
self.logger.info("Gitlab rate limit reached. Temp. disabling...")
self.oauths[0]['rate_limit'] = 0
else:
try:
self.oauths[0]['rate_limit'] = int(response.headers['RateLimit-Remaining'])
except:
self.oauths[0]['rate_limit'] -= 1
self.logger.info("Updated rate limit, you have: " +
str(self.oauths[0]['rate_limit']) + " requests remaining.")
if self.oauths[0]['rate_limit'] <= 0:
try:
reset_time = response.headers['RateLimit-Reset']
except Exception as e:
self.logger.info(f"Could not get reset time from headers because of error: {e}")
reset_time = 3600
time_diff = datetime.datetime.fromtimestamp(int(reset_time)) - datetime.datetime.now()
self.logger.info("Rate limit exceeded, checking for other available keys to use.")
# We will be finding oauth with the highest rate limit left out of our list of oauths
new_oauth = self.oauths[0]
# Endpoint to hit solely to retrieve rate limit information from headers of the response
url = "https://gitlab.com/api/v4/version"
other_oauths = self.oauths[0:] if len(self.oauths) > 1 else []
for oauth in other_oauths:
# self.logger.info("Inspecting rate limit info for oauth: {}\n".format(oauth))
self.headers = {"PRIVATE-TOKEN" : oauth['access_token']}
response = requests.get(url=url, headers=self.headers)
oauth['rate_limit'] = int(response.headers['RateLimit-Remaining'])
oauth['seconds_to_reset'] = (
datetime.datetime.fromtimestamp(
int(response.headers['RateLimit-Reset'])
) - datetime.datetime.now()
).total_seconds()
# Update oauth to switch to if a higher limit is found
if oauth['rate_limit'] > new_oauth['rate_limit']:
self.logger.info(f"Higher rate limit found in oauth: {oauth}")
new_oauth = oauth
elif (
oauth['rate_limit'] == new_oauth['rate_limit']
and oauth['seconds_to_reset'] < new_oauth['seconds_to_reset']
):
self.logger.info(
f"Lower wait time found in oauth with same rate limit: {oauth}"
)
new_oauth = oauth
if new_oauth['rate_limit'] <= 0 and new_oauth['seconds_to_reset'] > 0:
self.logger.info(
"No oauths with >0 rate limit were found, waiting for oauth with "
f"smallest wait time: {new_oauth}\n"
)
time.sleep(new_oauth['seconds_to_reset'])
# Make new oauth the 0th element in self.oauths so we know which one is in use
index = self.oauths.index(new_oauth)
self.oauths[0], self.oauths[index] = self.oauths[index], self.oauths[0]
self.logger.info("Using oauth: {}\n".format(self.oauths[0]))
# Change headers to be using the new oauth's key
self.headers = {"PRIVATE-TOKEN" : self.oauths[0]['access_token']}
def update_gh_rate_limit(self, response, bad_credentials=False, temporarily_disable=False):
# Try to get rate limit from request headers, sometimes it does not work (GH's issue)
# In that case we just decrement from last recieved header count
if bad_credentials and len(self.oauths) > 1:
self.logger.warning(
f"Removing oauth with bad credentials from consideration: {self.oauths[0]}"
)
del self.oauths[0]
if temporarily_disable:
self.logger.debug(
"Github thinks we are abusing their api. Preventing use "
"of this key until its rate limit resets..."
)
self.oauths[0]['rate_limit'] = 0
else:
try:
self.oauths[0]['rate_limit'] = int(response.headers['X-RateLimit-Remaining'])
# self.logger.info("Recieved rate limit from headers\n")
except:
self.oauths[0]['rate_limit'] -= 1
self.logger.info("Headers did not work, had to decrement")
time.sleep(30)
self.logger.info(
f"Updated rate limit, you have: {self.oauths[0]["rate_limit"]} requests remaining."
)
#Stalls after here for some reason.
if self.oauths[0]['rate_limit'] <= 0:
try:
reset_time = response.headers['X-RateLimit-Reset']
except Exception as e:
self.logger.error(f"Could not get reset time from headers because of error: {e}")
reset_time = 3600
time_diff = datetime.datetime.fromtimestamp(int(reset_time)) - datetime.datetime.now()
self.logger.info("Rate limit exceeded, checking for other available keys to use.")
# We will be finding oauth with the highest rate limit left out of our list of oauths
new_oauth = self.oauths[0]
# Endpoint to hit solely to retrieve rate limit information from headers of the response
url = "https://api.github.com/users/sgoggins"
other_oauths = self.oauths[0:] if len(self.oauths) > 1 else []
for oauth in other_oauths:
# self.logger.info("Inspecting rate limit info for oauth: {}\n".format(oauth))
self.headers = {'Authorization': 'token %s' % oauth['access_token']}
attempts = 3
success = False
while attempts > 0 and not success:
response = requests.get(url=url, headers=self.headers)
try:
oauth['rate_limit'] = int(response.headers['X-RateLimit-Remaining'])
oauth['seconds_to_reset'] = (
datetime.datetime.fromtimestamp(
int(response.headers['X-RateLimit-Reset'])
) - datetime.datetime.now()
).total_seconds()
success = True
except Exception as e:
self.logger.info(
f"oath method ran into error getting info from headers: {e}\n"
)
self.logger.info(f"{self.headers}\n{url}\n")
attempts -= 1
if not success:
continue
# Update oauth to switch to if a higher limit is found
if oauth['rate_limit'] > new_oauth['rate_limit']:
self.logger.info("Higher rate limit found in oauth: {}\n".format(oauth))
new_oauth = oauth
elif (
oauth['rate_limit'] == new_oauth['rate_limit']
and oauth['seconds_to_reset'] < new_oauth['seconds_to_reset']
):
self.logger.info(
f"Lower wait time found in oauth with same rate limit: {oauth}\n"
)
new_oauth = oauth
if new_oauth['rate_limit'] <= 0 and new_oauth['seconds_to_reset'] > 0:
self.logger.info(
"No oauths with >0 rate limit were found, waiting for oauth with "
f"smallest wait time: {new_oauth}\n"
)
time.sleep(new_oauth['seconds_to_reset'])
# Make new oauth the 0th element in self.oauths so we know which one is in use
index = self.oauths.index(new_oauth)
self.oauths[0], self.oauths[index] = self.oauths[index], self.oauths[0]
self.logger.info("Using oauth: {}\n".format(self.oauths[0]))
# Change headers to be using the new oauth's key
self.headers = {'Authorization': 'token %s' % self.oauths[0]['access_token']}
def update_rate_limit(
self, response, bad_credentials=False, temporarily_disable=False, platform="gitlab"
):
if platform == 'gitlab':
return self.update_gitlab_rate_limit(
response, bad_credentials=bad_credentials, temporarily_disable=temporarily_disable
)
elif platform == 'github':
return self.update_gh_rate_limit(
response, bad_credentials=bad_credentials, temporarily_disable=temporarily_disable
)
#insertion_method and stagger are arguments that allow paginate_endpoint to insert at around ~500 pages at a time.
def paginate_endpoint(
self, url, action_map={}, table=None, where_clause=True, platform='github', in_memory=True, stagger=False, insertion_method=None, insertion_threshold=1000
):
#Get augur columns using the action map along with the primary key
table_values = self.db.execute(
s.sql.select(self.get_relevant_columns(table, action_map)).where(where_clause)
).fetchall()
page_number = 1
multiple_pages = False
need_insertion = []
need_update = []
#Stores sum of page data
all_data = []
forward_pagination = True
backwards_activation = False
last_page_number = -1
#Block to handle page queries and retry at least 10 times
while True:
# Multiple attempts to hit endpoint
num_attempts = 0
success = False
while num_attempts < 10:
self.logger.info(f"Hitting endpoint: {url.format(page_number)}...\n")
try:
response = requests.get(url=url.format(page_number), headers=self.headers)
except TimeoutError as e:
self.logger.info("Request timed out. Sleeping 10 seconds and trying again...\n")
time.sleep(10)
continue
self.update_rate_limit(response, platform=platform)
try:
page_data = response.json()
except:
page_data = json.loads(json.dumps(response.text))
if type(page_data) == list:
success = True
break
elif type(page_data) == dict:
self.logger.info("Request returned a dict: {}\n".format(page_data))
if page_data['message'] == "Not Found":
self.logger.warning(
"Github repo was not found or does not exist for endpoint: "
f"{url.format(page_number)}\n"
)
break
if "You have exceeded a secondary rate limit. Please wait a few minutes before you try again" in page_data['message']:
num_attempts -=1
self.logger.info('\n\n\n\nSleeping for 100 seconds due to secondary rate limit issue.\n\n\n\n')
time.sleep(100)
if "You have triggered an abuse detection mechanism." in page_data['message']:
num_attempts -= 1
self.update_rate_limit(response, temporarily_disable=True,platform=platform)
if page_data['message'] == "Bad credentials":
self.logger.info("\n\n\n\n\n\n\n POSSIBLY BAD TOKEN \n\n\n\n\n\n\n")
self.update_rate_limit(response, bad_credentials=True, platform=platform)
elif type(page_data) == str:
self.logger.info(f"Warning! page_data was string: {page_data}\n")
if "<!DOCTYPE html>" in page_data:
self.logger.info("HTML was returned, trying again...\n")
elif len(page_data) == 0:
self.logger.warning("Empty string, trying again...\n")
else:
try:
page_data = json.loads(page_data)
success = True
break
except:
pass
num_attempts += 1
if not success:
break
# Success
# Determine if continued pagination is needed
if len(page_data) == 0:
self.logger.info("Response was empty, breaking from pagination.\n")
break
all_data += page_data
if not forward_pagination:
# Checking contents of requests with what we already have in the db
page_insertions, page_updates = self.organize_needed_data(
page_data, table_values, list(table.primary_key)[0].name,
action_map
)
# Reached a page where we already have all tuples
if len(need_insertion) == 0 and len(need_update) == 0 and \
backwards_activation:
self.logger.info(
"No more pages with unknown tuples, breaking from pagination.\n"
)
break
need_insertion += page_insertions
need_update += page_updates
# Find last page so we can decrement from there
if 'last' in response.links and last_page_number == -1:
if platform == 'github':
last_page_number = int(response.links['last']['url'][-6:].split('=')[1])
elif platform == 'gitlab':
last_page_number = int(response.links['last']['url'].split('&')[2].split('=')[1])
if not forward_pagination and not backwards_activation:
page_number = last_page_number
backwards_activation = True
self.logger.info("Analyzation of page {} of {} complete\n".format(page_number,
int(last_page_number) if last_page_number != -1 else "*last page not known*"))
if (page_number <= 1 and not forward_pagination) or \
(page_number >= last_page_number and forward_pagination):
self.logger.info("No more pages to check, breaking from pagination.\n")
break
#This is probably where we should insert at around ~500 at a time
#makes sure that stagger is enabled, we have an insertion method, and the insertion happens every 500 pages or so.
if stagger and insertion_method != None and page_number % insertion_threshold == 0:
#call insertion method passed as argument.
staggered_source_prs = {
'insert' : need_insertion,
'update' : need_update,
'all' : all_data
}
#Use the method the subclass needs in order to insert the data.
insertion_method(staggered_source_prs,action_map)
#clear the data from memory and avoid duplicate insertions.
need_insertion = []
need_update = []
all_data = []
page_number = page_number + 1 if forward_pagination else page_number - 1
if forward_pagination:
need_insertion, need_update = self.organize_needed_data(
all_data, table_values, list(table.primary_key)[0].name, action_map
)
return {
'insert': need_insertion,
'update': need_update,
'all': all_data
}
#TODO: deprecated but still used by many other methods
def paginate(self, url, duplicate_col_map, update_col_map, table, table_pkey, where_clause="", value_update_col_map={}, platform="github"):
""" DEPRECATED
Paginate either backwards or forwards (depending on the value of the worker's
finishing_task attribute) through all the GitHub or GitLab api endpoint pages.
:param url: String, the url of the API endpoint we are paginating through, expects
a curly brace string formatter within the string to format the Integer
representing the page number that is wanted to be returned
:param duplicate_col_map: Dictionary, maps the column names of the source data
to the field names in our database for columns that should be checked for
duplicates (if source data value == value in existing database row, then this
element is a duplicate and would not need an insertion). Key is source data
column name, value is database field name. Example: {'id': 'gh_issue_id'}
:param update_col_map: Dictionary, maps the column names of the source data
to the field names in our database for columns that should be checked for
updates (if source data value != value in existing database row, then an
update is needed). Key is source data column name, value is database field name.
Example: {'id': 'gh_issue_id'}
:param table: String, the name of the table that holds the values to check for
duplicates/updates against
:param table_pkey: String, the field name of the primary key of the table in
the database that we are getting the values for to cross-reference to check
for duplicates.
:param where_clause: String, optional where clause to filter the values
that are queried when preparing the values that will be cross-referenced
for duplicates/updates
:param value_update_col_map: Dictionary, sometimes we add a new field to a table,
and we want to trigger an update of that row in the database even if all of the
data values are the same and would not need an update ordinarily. Checking for
a specific existing value in the database field allows us to do this. The key is the
name of the field in the database we are checking for a specific value to trigger
an update, the value is the value we are checking for equality to trigger an update.
Example: {'cntrb_id': None}
:return: List of dictionaries, all data points from the pages of the specified API endpoint
each with a 'flag' key-value pair representing the required action to take with that
data point (i.e. 'need_insertion', 'need_update', 'none')
"""
update_keys = list(update_col_map.keys()) if update_col_map else []
update_keys += list(value_update_col_map.keys()) if value_update_col_map else []
cols_to_query = list(duplicate_col_map.keys()) + update_keys + [table_pkey]
table_values = self.get_table_values(cols_to_query, [table], where_clause)
i = 1
multiple_pages = False
tuples = []
while True:
num_attempts = 0
success = False
while num_attempts < 3:
self.logger.info(f'Hitting endpoint: {url.format(i)}...\n')
r = requests.get(url=url.format(i), headers=self.headers)
self.update_rate_limit(r, platform=platform)
if 'last' not in r.links:
last_page = None
else:
if platform == "github":
last_page = r.links['last']['url'][-6:].split('=')[1]
elif platform == "gitlab":
last_page = r.links['last']['url'].split('&')[2].split("=")[1]
self.logger.info("Analyzing page {} of {}\n".format(i, int(last_page) + 1 if last_page is not None else '*last page not known*'))
try:
j = r.json()
except:
j = json.loads(json.dumps(r.text))
if type(j) != dict and type(j) != str:
success = True
break
elif type(j) == dict:
self.logger.info("Request returned a dict: {}\n".format(j))
if j['message'] == 'Not Found':
self.logger.warning("Github repo was not found or does not exist for endpoint: {}\n".format(url))
break
if j['message'] == 'You have triggered an abuse detection mechanism. Please wait a few minutes before you try again.':
num_attempts -= 1
self.logger.info("rate limit update code goes here")
self.update_rate_limit(r, temporarily_disable=True,platform=platform)
if j['message'] == 'Bad credentials':
self.logger.info("rate limit update code goes here")
self.update_rate_limit(r, bad_credentials=True, platform=platform)
elif type(j) == str:
self.logger.info(f'J was string: {j}\n')
if '<!DOCTYPE html>' in j:
self.logger.info('HTML was returned, trying again...\n')
elif len(j) == 0:
self.logger.warning('Empty string, trying again...\n')
else:
try:
j = json.loads(j)
success = True
break
except:
pass
num_attempts += 1
if not success:
break
# Find last page so we can decrement from there
if 'last' in r.links and not multiple_pages and not self.finishing_task:
if platform == "github":
param = r.links['last']['url'][-6:]
i = int(param.split('=')[1]) + 1
elif platform == "gitlab":
i = int(r.links['last']['url'].split('&')[2].split("=")[1]) + 1
self.logger.info("Multiple pages of request, last page is " + str(i - 1) + "\n")
multiple_pages = True
elif not multiple_pages and not self.finishing_task:
self.logger.info("Only 1 page of request\n")
elif self.finishing_task:
self.logger.info("Finishing a previous task, paginating forwards ..."
" excess rate limit requests will be made\n")
if len(j) == 0:
self.logger.info("Response was empty, breaking from pagination.\n")
break
# Checking contents of requests with what we already have in the db
j = self.assign_tuple_action(j, table_values, update_col_map, duplicate_col_map, table_pkey, value_update_col_map)
if not j:
self.logger.error("Assigning tuple action failed, moving to next page.\n")
i = i + 1 if self.finishing_task else i - 1
continue
try:
to_add = [obj for obj in j if obj not in tuples and (obj['flag'] != 'none')]
except Exception as e:
self.logger.error("Failure accessing data of page: {}. Moving to next page.\n".format(e))
i = i + 1 if self.finishing_task else i - 1
continue
if len(to_add) == 0 and multiple_pages and 'last' in r.links:
self.logger.info("{}".format(r.links['last']))
if platform == "github":
page_number = int(r.links['last']['url'][-6:].split('=')[1])
elif platform == "gitlab":
page_number = int(r.links['last']['url'].split('&')[2].split("=")[1])
if i - 1 != page_number:
self.logger.info("No more pages with unknown tuples, breaking from pagination.\n")
break
tuples += to_add
i = i + 1 if self.finishing_task else i - 1
# Since we already wouldve checked the first page... break
if (i == 1 and multiple_pages and not self.finishing_task) or i < 1 or len(j) == 0:
self.logger.info("No more pages to check, breaking from pagination.\n")
break
return tuples
def new_paginate_endpoint(
self, url, action_map={}, table=None, where_clause=True, platform='github'
):
page_number = 1
multiple_pages = False
need_insertion = []
need_update = []
all_data = []
forward_pagination = True
backwards_activation = False
last_page_number = -1
while True:
# Multiple attempts to hit endpoint
num_attempts = 0
success = False
while num_attempts < 10:
self.logger.info("hitting an endpiont")
# f"Hitting endpoint: ...\n"
# f"{url.format(page_number)} on page number. \n")
try:
response = requests.get(url=url.format(page_number), headers=self.headers)
except TimeoutError as e:
self.logger.info("Request timed out. Sleeping 10 seconds and trying again...\n")
time.sleep(10)
continue
self.update_rate_limit(response, platform=platform)
try:
page_data = response.json()
except:
page_data = json.loads(json.dumps(response.text))
if type(page_data) == list:
success = True
break
elif type(page_data) == dict:
self.logger.info("Request returned a dict: {}\n".format(page_data))
if page_data['message'] == "Not Found":
self.logger.warning(
"Github repo was not found or does not exist for endpoint: "
f"{url.format(page_number)}\n"
)
break
if "You have triggered an abuse detection mechanism." in page_data['message']:
num_attempts -= 1
self.update_rate_limit(response, temporarily_disable=True,platform=platform)
if page_data['message'] == "Bad credentials":
self.update_rate_limit(response, bad_credentials=True, platform=platform)
elif type(page_data) == str:
self.logger.info(f"Warning! page_data was string: {page_data}\n")
if "<!DOCTYPE html>" in page_data:
self.logger.info("HTML was returned, trying again...\n")
elif len(page_data) == 0:
self.logger.warning("Empty string, trying again...\n")
else:
try:
page_data = json.loads(page_data)
success = True
break
except:
pass
num_attempts += 1
if not success:
break
# Success
# Determine if continued pagination is needed
if len(page_data) == 0:
self.logger.info("Response was empty, breaking from pagination.\n")
break
all_data += page_data
if not forward_pagination:
# Checking contents of requests with what we already have in the db
page_insertions, page_updates = self.new_organize_needed_data(
page_data, augur_table=table, action_map=action_map
)
# Reached a page where we already have all tuples
if len(need_insertion) == 0 and len(need_update) == 0 and \
backwards_activation:
self.logger.info(
"No more pages with unknown tuples, breaking from pagination.\n"
)
break
need_insertion += page_insertions
need_update += page_updates
# Find last page so we can decrement from there
if 'last' in response.links and last_page_number == -1:
if platform == 'github':
last_page_number = int(response.links['last']['url'][-6:].split('=')[1])
elif platform == 'gitlab':
last_page_number = int(response.links['last']['url'].split('&')[2].split('=')[1])
if not forward_pagination and not backwards_activation:
page_number = last_page_number
backwards_activation = True
self.logger.info("Analyzation of page {} of {} complete\n".format(page_number,
int(last_page_number) if last_page_number != -1 else "*last page not known*"))
if (page_number <= 1 and not forward_pagination) or \
(page_number >= last_page_number and forward_pagination):
self.logger.info("No more pages to check, breaking from pagination.\n")
break
page_number = page_number + 1 if forward_pagination else page_number - 1
if forward_pagination:
need_insertion, need_update = self.new_organize_needed_data(
all_data, augur_table=table, action_map=action_map
)
return {
'insert': need_insertion,
'update': need_update,
'all': all_data
}
| #Get everything that the base depends on.
import math
from numpy.lib.utils import source
from workers.worker_base import *
import sqlalchemy as s
import time
import math
#This is a worker base subclass that adds the ability to query github/gitlab with the api key
class WorkerGitInterfaceable(Worker):
def __init__(self, worker_type, config={}, given=[], models=[], data_tables=[], operations_tables=[], platform="github"):
super().__init__(worker_type, config, given, models, data_tables, operations_tables)
self.config.update({
'gh_api_key': self.augur_config.get_value('Database', 'key'),
'gitlab_api_key': self.augur_config.get_value('Database', 'gitlab_api_key')
})
#Fix loose attribute definition
self.headers = None
self.platform = platform
self.given = given
self.models = models
self.specs = {
'id': self.config['id'], # what the broker knows this worker as
'location': self.config['location'], # host + port worker is running on (so broker can send tasks here)
'qualifications': [
{
'given': self.given, # type of repo this worker can be given as a task
'models': self.models # models this worker can fill for a repo as a task
}
],
'config': self.config
}
# Send broker hello message
# Attempts to determine if these attributes exist
# If not, it creates them with default values
try:
self.tool_source
self.tool_version
self.data_source
except AttributeError:
self.tool_source = 'Augur Worker Testing'
self.tool_version = '0.0.0'
self.data_source = 'Augur Worker Testing'
#database interface, additional functionality with github interface.
def initialize_database_connections(self):
super().initialize_database_connections()
# Organize different api keys/oauths available
self.logger.info("Initializing API key.")
if 'gh_api_key' in self.config or 'gitlab_api_key' in self.config:
try:
self.init_oauths(self.platform)
except AttributeError:
self.logger.error("Worker not configured to use API key!")
else:
self.oauths = [{'oauth_id': 0}]
def find_id_from_login(self, login, platform='github'):
""" Retrieves our contributor table primary key value for the contributor with
the given GitHub login credentials, if this contributor is not there, then
they get inserted.
:param login: String, the GitHub login username to find the primary key id for
:return: Integer, the id of the row in our database with the matching GitHub login
"""
idSQL = s.sql.text("""
SELECT cntrb_id FROM contributors WHERE cntrb_login = '{}' \
AND LOWER(data_source) = '{} api'
""".format(login, platform))
rs = pd.read_sql(idSQL, self.db, params={})
data_list = [list(row) for row in rs.itertuples(index=False)]
try:
return data_list[0][0]
except:
self.logger.info('contributor needs to be added...')
if platform == 'github':
cntrb_url = ("https://api.github.com/users/" + login)
elif platform == 'gitlab':
cntrb_url = ("https://gitlab.com/api/v4/users?username=" + login )
self.logger.info("Hitting endpoint: {} ...\n".format(cntrb_url))
# Possible infinite loop if this request never succeeds?
while True:
try:
r = requests.get(url=cntrb_url, headers=self.headers)
break
except TimeoutError as e:
self.logger.info("Request timed out. Sleeping 10 seconds and trying again...\n")
time.sleep(30)
self.update_rate_limit(r)
contributor = r.json()
# Used primarily for the Gitlab block below
company = None
location = None
email = None
if 'company' in contributor:
company = contributor['company']
if 'location' in contributor:
location = contributor['location']
if 'email' in contributor:
email = contributor['email']
''' Because a user name nan exists, we needed to cast any string where their
user name appears as a string, because otherwise python was casting it as a float '''
if platform == 'github':
cntrb = {
'cntrb_login': str(contributor['login']) if 'login' in contributor else None,
'cntrb_email': str(contributor['email']) if 'email' in contributor else None,
'cntrb_company': str(contributor['company']) if 'company' in contributor else None,
'cntrb_location': str(contributor['location']) if 'location' in contributor else None,
'cntrb_created_at': contributor['created_at'] if 'created_at' in contributor else None,
'cntrb_canonical': str(contributor['email']) if 'email' in contributor else None,
'gh_user_id': contributor['id'] if 'id' in contributor else None,
'gh_login': str(contributor['login']) if 'login' in contributor else None,
'gh_url': str(contributor['url']) if 'url' in contributor else None,
'gh_html_url': str(contributor['html_url']) if 'html_url' in contributor else None,
'gh_node_id': contributor['node_id'] if 'node_id' in contributor else None,
'gh_avatar_url': str(contributor['avatar_url']) if 'avatar_url' in contributor else None,
'gh_gravatar_id': str(contributor['gravatar_id']) if 'gravatar_id' in contributor else None,
'gh_followers_url': str(contributor['followers_url']) if 'followers_url' in contributor else None,
'gh_following_url': str(contributor['following_url']) if 'following_url' in contributor else None,
'gh_gists_url': str(contributor['gists_url']) if 'gists_url' in contributor else None,
'gh_starred_url': str(contributor['starred_url']) if 'starred_url' in contributor else None,
'gh_subscriptions_url': str(contributor['subscriptions_url']) if 'subscriptions_url' in contributor else None,
'gh_organizations_url': str(contributor['organizations_url']) if 'organizations_url' in contributor else None,
'gh_repos_url': str(contributor['repos_url']) if 'repos_url' in contributor else None,
'gh_events_url': str(contributor['events_url']) if 'events_url' in contributor else None,
'gh_received_events_url': str(contributor['received_events_url']) if 'received_events_url' in contributor else None,
'gh_type': contributor['type'] if 'type' in contributor else None,
'gh_site_admin': contributor['site_admin'] if 'site_admin' in contributor else None,
'tool_source': self.tool_source,
'tool_version': self.tool_version,
'data_source': self.data_source
}
elif platform == 'gitlab':
cntrb = {
'cntrb_login': contributor[0]['username'] if 'username' in contributor[0] else None,
'cntrb_email': email,
'cntrb_company': company,
'cntrb_location': location,
'cntrb_created_at': contributor[0]['created_at'] if 'created_at' in contributor[0] else None,
'cntrb_canonical': email,
'gh_user_id': contributor[0]['id'],
'gh_login': contributor[0]['username'],
'gh_url': contributor[0]['web_url'],
'gh_html_url': None,
'gh_node_id': None,
'gh_avatar_url': contributor[0]['avatar_url'],
'gh_gravatar_id': None,
'gh_followers_url': None,
'gh_following_url': None,
'gh_gists_url': None,
'gh_starred_url': None,
'gh_subscriptions_url': None,
'gh_organizations_url': None,
'gh_repos_url': None,
'gh_events_url': None,
'gh_received_events_url': None,
'gh_type': None,
'gh_site_admin': None,
'tool_source': self.tool_source,
'tool_version': self.tool_version,
'data_source': self.data_source
}
result = self.db.execute(self.contributors_table.insert().values(cntrb))
self.logger.info("Primary key inserted into the contributors table: " + str(result.inserted_primary_key))
self.results_counter += 1
self.cntrb_id_inc = int(result.inserted_primary_key[0])
self.logger.info(f"Inserted contributor: {cntrb['cntrb_login']}\n")
return self.find_id_from_login(login, platform)
def init_oauths(self, platform='github'):
self.oauths = []
self.headers = None
self.logger.info("Trying initialization.")
# Make a list of api key in the config combined w keys stored in the database
# Select endpoint to hit solely to retrieve rate limit
# information from headers of the response
# Adjust header keys needed to fetch rate limit information from the API responses
if platform == 'github':
self.logger.debug("in Github block")
url = "https://api.github.com/users/sgoggins"
oauthSQL = s.sql.text("""
SELECT * FROM worker_oauth WHERE access_token <> '{}' and platform = 'github'
""".format(self.config['gh_api_key']))
key_name = 'gh_api_key'
rate_limit_header_key = "X-RateLimit-Remaining"
rate_limit_reset_header_key = "X-RateLimit-Reset"
self.logger.debug('end of github block.')
elif platform == 'gitlab':
url = "https://gitlab.com/api/v4/version"
oauthSQL = s.sql.text("""
SELECT * FROM worker_oauth WHERE access_token <> '{}' and platform = 'gitlab'
""".format(self.config['gitlab_api_key']))
key_name = 'gitlab_api_key'
rate_limit_header_key = 'ratelimit-remaining'
rate_limit_reset_header_key = 'ratelimit-reset'
for oauth in [{'oauth_id': 0, 'access_token': self.config[key_name]}] + json.loads(
pd.read_sql(oauthSQL, self.helper_db, params={}).to_json(orient="records")
):
if platform == 'github':
self.headers = {'Authorization': 'token %s' % oauth['access_token']}
elif platform == 'gitlab':
self.headers = {'Authorization': 'Bearer %s' % oauth['access_token']}
response = requests.get(url=url, headers=self.headers, timeout=180)
self.oauths.append({
'oauth_id': oauth['oauth_id'],
'access_token': oauth['access_token'],
'rate_limit': int(response.headers[rate_limit_header_key]),
'seconds_to_reset': (
datetime.datetime.fromtimestamp(
int(response.headers[rate_limit_reset_header_key])
) - datetime.datetime.now()
).total_seconds()
})
self.logger.debug("Found OAuth available for use: {}".format(self.oauths[-1]))
if len(self.oauths) == 0:
self.logger.info(
"No API keys detected, please include one in your config or in the "
"worker_oauths table in the augur_operations schema of your database."
)
# First key to be used will be the one specified in the config (first element in
# self.oauths array will always be the key in use)
if platform == 'github':
self.headers = {'Authorization': 'token %s' % self.oauths[0]['access_token']}
elif platform == 'gitlab':
self.headers = {'Authorization': 'Bearer %s' % self.oauths[0]['access_token']}
self.logger.info("OAuth initialized\n")
def enrich_cntrb_id(
self, data, key, action_map_additions={'insert': {'source': [], 'augur': []}},
platform='github', prefix=''
):
if not len(data):
self.logger.info(f"Enrich contrib data is empty for {len(data)}, for the key {key}.")
raise ValueError
self.logger.info(f"Enriching contributor ids for {len(data)} data points...")
source_df = pd.DataFrame(data)
expanded_source_df = self._add_nested_columns(
source_df.copy(), [key] + action_map_additions['insert']['source']
)
# Insert cntrbs that are not in db
cntrb_action_map = {
'insert': {
'source': [key] + action_map_additions['insert']['source'] + [f'{prefix}id'],
'augur': ['cntrb_login'] + action_map_additions['insert']['augur'] + ['gh_user_id']
}
}
table_values_cntrb = self.db.execute(
s.sql.select(self.get_relevant_columns(self.contributors_table,cntrb_action_map))
).fetchall()
source_data = expanded_source_df.to_dict(orient='records')
#Filter out bad data where we can't even hit the api.
source_data = [data for data in source_data if f'{prefix}login' in data and data[f'{prefix}login'] != None and type(data[f'{prefix}login']) is str]
self.logger.info(f"table_values_cntrb keys: {table_values_cntrb[0].keys()}")
# self.logger.info(f"source_data keys: {source_data[0].keys()}")
#We can't use this because of worker collisions
#TODO: seperate this method into it's own worker.
#cntrb_id_offset = self.get_max_id(self.contributors_table, 'cntrb_id') - 1
self.logger.debug(f"Enriching {len(source_data)} contributors.")
# source_data = source_data.loc[data[f'{prefix}login'] != 'nan']
# loop through data to test if it is already in the database
for index, data in enumerate(source_data):
if data[f'{prefix}login'] == 'nan':
self.logger.debug("Nan user found continuing")
continue
#removed this log because it was generating a lot of data.
#self.logger.info(f"Enriching {index} of {len(source_data)}")
user_unique_ids = []
#Allow for alt identifiers to be checked if user.id is not present in source_data
try:
#This will trigger a KeyError if data has alt identifier.
data[f'{prefix}id']
for row in table_values_cntrb:
# removed checks for nan user in this block because this is getting all the gh_user_ids that are
# already in the database so it doesn't need to be filtered from the database, it needs to be
# filtered out so it is never inserted into the database
# Andrew Brain 12/21/2021
user_unique_ids.append(row['gh_user_id'])
except KeyError:
self.print_traceback("Enrich_cntrb_id, data doesn't have user.id. Using node_id instead", e, True)
finally:
for row in table_values_cntrb:
try:
user_unique_ids.append(row['gh_node_id'])
except Exception as e:
self.logger.info(f"Error adding gh_node_id: {e}. Row: {row}")
self.print_traceback("", e, True)
#self.logger.info(f"gh_user_ids: {gh_user_ids}")
# self.logger.info(f"Users gh_user_id: {data['user.id']}")
# in_user_ids = False
# if data['user.id'] in gh_user_ids:
# in_user_ids = True
# self.logger.info(f"{data['user.id']} is in gh_user_ids")
# self.logger.info(f"table_values_cntrb len: {len(table_values_cntrb)}")
#Deal with if data
#See if we can check using the user.id
source_data_id = None
try:
source_data_id = data[f'{prefix}id']
except KeyError:
source_data_id = data[f'{prefix}node_id']
#if user.id is in the database then there is no need to add the contributor
if source_data_id in user_unique_ids:
self.logger.info("{} found in database".format(source_data_id))
user_id_row = []
try:
data[f'{prefix}id']
#gets the dict from the table_values_cntrb that contains data['user.id']
user_id_row = list(filter(lambda x: x['gh_user_id'] == source_data_id, table_values_cntrb))[0]
#### Andrew: in a small number of cases, using data on contributors originally gathered in late 2019, there
#### is a mismatch .. the gh_user_id for a login is different. I suspect this rare case to be one
#### where a person did something that changed their gh_user_id ... I am unsure how this can happen ...
except KeyError:
user_id_row = list(filter(lambda x: x['gh_node_id'] == source_data_id, table_values_cntrb))[0]
#pass # 12/3/2021 SPG ... added pass to try to get past this key error in large inserts.
continue # 12/3/2021 SPG ... may be better inside a loop
#assigns the cntrb_id to the source data to be returned to the workers
data['cntrb_id'] = user_id_row['cntrb_id']
self.logger.info(f"cntrb_id {data['cntrb_id']} found in database and assigned to enriched data")
#contributor is not in the database
else:
self.logger.info("{} not in database, making api call".format(source_data_id))
self.logger.info("login: {}".format(data[f'{prefix}login']))
try:
url = ("https://api.github.com/users/" + str(data[f'{prefix}login']))
except Exception as e:
self.logger.info(f"Error when creating url: {e}. Data: {data}")
#pass # changed continue to pass 12/3/2021 SPG
continue # changed back 12/3/2021 SPG
attempts = 0
contributor = None
success = False
while attempts < 10:
self.logger.info(f"Hitting endpoint: {url} ...\n")
try:
response = requests.get(url=url, headers=self.headers)
except TimeoutError:
self.logger.info(f"User data request for enriching contributor data failed with {attempts} attempts! Trying again...")
time.sleep(10)
#pass # changed continue to pass 12/3/2021 SPG
continue # changed back 12/3/2021 SPG
self.update_rate_limit(response,platform=platform)
try:
contributor = response.json()
except:
contributor = json.loads(json.dumps(response.text))
continue # added continue 12/3/2021 SPG
if type(contributor) == dict:
self.logger.info("Request returned a dict!")
self.logger.info(f"Contributor data: {contributor}")
# contributor['gh_login'] = str(contributor['gh_login']) ## cast as string by SPG on 11/28/2021 due to `nan` user
success = True
break
elif type(contributor) == list:
self.logger.warning("Wrong type returned trying again...")
self.logger.info(f"Contributor data: {contributor}")
elif type(contributor) == str:
self.logger.info(f"Warning! page_data was string: {contributor}\n")
if "<!DOCTYPE html>" in contributor:
self.logger.info("HTML was returned, trying again...\n")
elif len(contributor) == 0:
self.logger.warning("Empty string, trying again...\n")
else:
try:
contributor = json.loads(contributor)
# contributor['gh_login'] = str(contributor['gh_login']) ## cast as string by SPG on 11/28/2021 due to `nan` user
success = True
break
except:
pass
attempts += 1
if not success:
break
self.logger.info(f"Contributor data: {contributor}")
cntrb = {
"cntrb_login": contributor['login'],
"cntrb_created_at": contributor['created_at'],
"cntrb_email": contributor['email'] if 'email' in contributor else None,
"cntrb_company": contributor['company'] if 'company' in contributor else None,
"cntrb_location": contributor['location'] if 'location' in contributor else None,
# "cntrb_type": , dont have a use for this as of now ... let it default to null
"cntrb_canonical": contributor['email'] if 'email' in contributor else None,
"gh_user_id": contributor['id'],
"gh_login": str(contributor['login']), ## cast as string by SPG on 11/28/2021 due to `nan` user
"gh_url": contributor['url'],
"gh_html_url": contributor['html_url'],
"gh_node_id": contributor['node_id'],
"gh_avatar_url": contributor['avatar_url'],
"gh_gravatar_id": contributor['gravatar_id'],
"gh_followers_url": contributor['followers_url'],
"gh_following_url": contributor['following_url'],
"gh_gists_url": contributor['gists_url'],
"gh_starred_url": contributor['starred_url'],
"gh_subscriptions_url": contributor['subscriptions_url'],
"gh_organizations_url": contributor['organizations_url'],
"gh_repos_url": contributor['repos_url'],
"gh_events_url": contributor['events_url'],
"gh_received_events_url": contributor['received_events_url'],
"gh_type": contributor['type'],
"gh_site_admin": contributor['site_admin'],
"cntrb_last_used" : None if 'updated_at' not in contributor else contributor['updated_at'],
"cntrb_full_name" : None if 'name' not in contributor else contributor['name'],
"tool_source": self.tool_source,
"tool_version": self.tool_version,
"data_source": self.data_source
}
#insert new contributor into database
# TODO: make this method it's own worker. This errors because of collisions between github_worker and pull_request_worker.
#We can solve this by making another worker with a queue. It wouldn't have to be too complicated.
try:
self.db.execute(self.contributors_table.insert().values(cntrb))
except s.exc.IntegrityError:
self.logger.info(f"there was a collision caught ....")
self.logger.info(traceback.format_exc())
#pass # added by sean 11/29/2021 ... think it might be blocking comment insertion otherwise
continue # changed to continue on 12/3/2021
except Exception as e:
self.logger.info(f"Contributor was unable to be added to table! Attempting to get cntrb_id from table anyway because of possible collision. Error: {e}")
#pass # added by sean 11/29/2021 ... think it might be blocking comment insertion otherwise
continue
#Get the contributor id from the newly inserted contributor.
cntrb_id_row = self.db.execute(
s.sql.select(self.get_relevant_columns(self.contributors_table,cntrb_action_map)).where(
self.contributors_table.c.gh_user_id==cntrb["gh_user_id"]
)
).fetchall()
#Handle and log rare failure cases. If this part errors something is very wrong.
if len(cntrb_id_row) == 1:
data['cntrb_id'] = cntrb_id_row[0]['cntrb_id']
self.logger.info(f"cntrb_id {data['cntrb_id']} found in database and assigned to enriched data")
elif len(cntrb_id_row) == 0:
self.logger.error("Couldn't find contributor in database. Something has gone very wrong. Augur ran into a contributor that is unable to be inserted into the contributors table but is also not present in that table.")
else:
self.logger.info(f"There are more than one contributors in the table with gh_user_id={cntrb['gh_user_id']}")
cntrb_data = {
'cntrb_id': int(data['cntrb_id']), # came through as a float. Fixed 11/28/2021, SPG
'gh_node_id': cntrb['gh_node_id'],
'cntrb_login': str(cntrb['cntrb_login']), # NaN user issue. Fixed 11/28/2021, SPG
'gh_user_id': int(cntrb['gh_user_id']) # came through as a float. Fixed 11/28/2021, SPG
}
#This updates our list of who is already in the database as we iterate to avoid duplicates.
#People who make changes tend to make more than one in a row.
table_values_cntrb.append(cntrb_data)
self.logger.info(
"Contributor id enrichment successful, result has "
f"{len(source_data)} data points.\n"
)
for data in source_data:
self.logger.debug("User login type: " + str(type(data[f'{prefix}login'])) + ". Login: " + str(data[f'{prefix}login']))
try:
data['cntrb_id']
except:
self.logger.debug(f"AB ERROR: data exiting enrich_cntrb_id without cntrb_id, login is: " + str(data[f'{prefix}login']))
return source_data
# Try to construct the best url to ping GitHub's API for a username given an email.
"""
I changed this because of the following note on the API site: With the in qualifier you can restrict your search to the username (login), full name, public email, or any combination of these. When you omit this qualifier, only the username and email address are searched. For privacy reasons, you cannot search by email domain name.
https://docs.github.com/en/github/searching-for-information-on-github/searching-on-github/searching-users#search-only-users-or-organizations
"""
def create_endpoint_from_email(self, email):
self.logger.info(f"Trying to resolve contributor from email: {email}")
# Note: I added "+type:user" to avoid having user owned organizations be returned
# Also stopped splitting per note above.
url = 'https://api.github.com/search/users?q={}+in:email+type:user'.format(
email)
return url
def query_github_contributors(self, entry_info, repo_id):
""" Data collection function
Query the GitHub API for contributors
"""
self.logger.info(f"Querying contributors with given entry info: {entry_info}\n")
## It absolutely doesn't matter if the contributor has already contributoed to a repo. it only matters that they exist in our table, and
## if the DO, then we DO NOT want to insert them again in any GitHub Method.
github_url = entry_info['given']['github_url'] if 'github_url' in entry_info['given'] else entry_info['given']['git_url']
# Extract owner/repo from the url for the endpoint
try:
owner, name = self.get_owner_repo(github_url)
except IndexError as e:
self.logger.error(f"Encountered bad entry info: {entry_info}")
return
# Set the base of the url and place to hold contributors to insert
contributors_url = (
f"https://api.github.com/repos/{owner}/{name}/" +
"contributors?per_page=100&page={}"
)
# Get contributors that we already have stored
# Set our duplicate and update column map keys (something other than PK) to
# check dupicates/needed column updates with
table = 'contributors'
table_pkey = 'cntrb_id'
update_col_map = {'cntrb_email': 'email'}
duplicate_col_map = {'cntrb_login': 'login'}
#list to hold contributors needing insertion or update
contributors = self.paginate(contributors_url, duplicate_col_map, update_col_map, table, table_pkey)
self.logger.info("Count of contributors needing insertion: " + str(len(contributors)) + "\n")
for repo_contributor in contributors:
try:
# Need to hit this single contributor endpoint to get extra data including...
# `created at`
# i think that's it
cntrb_url = ("https://api.github.com/users/" + repo_contributor['login'])
self.logger.info("Hitting endpoint: " + cntrb_url + " ...\n")
r = requests.get(url=cntrb_url, headers=self.headers)
self.update_gh_rate_limit(r)
contributor = r.json()
company = None
location = None
email = None
if 'company' in contributor:
company = contributor['company']
if 'location' in contributor:
location = contributor['location']
if 'email' in contributor:
email = contributor['email']
canonical_email = contributor['email']
cntrb = {
"cntrb_login": contributor['login'],
"cntrb_created_at": contributor['created_at'],
"cntrb_email": email,
"cntrb_company": company,
"cntrb_location": location,
# "cntrb_type": , dont have a use for this as of now ... let it default to null
"cntrb_canonical": canonical_email,
"gh_user_id": contributor['id'],
"gh_login": contributor['login'],
"gh_url": contributor['url'],
"gh_html_url": contributor['html_url'],
"gh_node_id": contributor['node_id'], #This is what we are dup checking
"gh_avatar_url": contributor['avatar_url'],
"gh_gravatar_id": contributor['gravatar_id'],
"gh_followers_url": contributor['followers_url'],
"gh_following_url": contributor['following_url'],
"gh_gists_url": contributor['gists_url'],
"gh_starred_url": contributor['starred_url'],
"gh_subscriptions_url": contributor['subscriptions_url'],
"gh_organizations_url": contributor['organizations_url'],
"gh_repos_url": contributor['repos_url'],
"gh_events_url": contributor['events_url'],
"gh_received_events_url": contributor['received_events_url'],
"gh_type": contributor['type'],
"gh_site_admin": contributor['site_admin'],
"cntrb_last_used" : None if 'updated_at' not in contributor else contributor['updated_at'],
"cntrb_full_name" : None if 'name' not in contributor else contributor['name'],
"tool_source": self.tool_source,
"tool_version": self.tool_version,
"data_source": self.data_source
}
#dup check
#TODO: add additional fields to check if needed.
existingMatchingContributors = self.db.execute(
s.sql.select(
[self.contributors_table.c.gh_node_id]
).where(
self.contributors_table.c.gh_node_id==cntrb["gh_node_id"]
)
).fetchall()
if len(existingMatchingContributors) > 0:
break #if contributor already exists in table
# Commit insertion to table
if repo_contributor['flag'] == 'need_update':
result = self.db.execute(self.contributors_table.update().where(
self.worker_history_table.c.cntrb_email==email).values(cntrb))
self.logger.info("Updated tuple in the contributors table with existing email: {}".format(email))
self.cntrb_id_inc = repo_contributor['pkey']
elif repo_contributor['flag'] == 'need_insertion':
result = self.db.execute(self.contributors_table.insert().values(cntrb))
self.logger.info("Primary key inserted into the contributors table: {}".format(result.inserted_primary_key))
#For workers that aren't an interface.
if self.worker_type != "Contributor_interface":
self.results_counter += 1
self.logger.info("Inserted contributor: " + contributor['login'] + "\n")
# Increment our global track of the cntrb id for the possibility of it being used as a FK
self.cntrb_id_inc = int(result.inserted_primary_key[0])
except Exception as e:
self.logger.error("Caught exception: {}".format(e))
self.logger.error(f"Traceback: {traceback.format_exc()}")
self.logger.error("Cascading Contributor Anomalie from missing repo contributor data: {} ...\n".format(cntrb_url))
continue
# Hit the endpoint specified by the url and return the json that it returns if it returns a dict.
# Returns None on failure.
def request_dict_from_endpoint(self, url, timeout_wait=10):
self.logger.info(f"Hitting endpoint: {url}")
attempts = 0
response_data = None
success = False
# This borrow's the logic to safely hit an endpoint from paginate_endpoint.
while attempts < 10:
try:
response = requests.get(url=url, headers=self.headers)
except TimeoutError:
self.logger.info(
f"User data request for enriching contributor data failed with {attempts} attempts! Trying again...")
time.sleep(timeout_wait)
continue
# Make sure we know how many requests our api tokens have.
self.update_rate_limit(response, platform="github")
try:
response_data = response.json()
except:
response_data = json.loads(json.dumps(response.text))
if type(response_data) == dict:
# Sometimes GitHub Sends us an error message in a dict instead of a string.
# While a bit annoying, it is easy to work around
if 'message' in response_data:
try:
assert 'API rate limit exceeded' not in response_data['message']
except AssertionError as e:
self.logger.info(
f"Detected error in response data from gitHub. Trying again... Error: {e}")
attempts += 1
continue
# self.logger.info(f"Returned dict: {response_data}")
success = True
break
elif type(response_data) == list:
self.logger.warning("Wrong type returned, trying again...")
self.logger.info(f"Returned list: {response_data}")
elif type(response_data) == str:
self.logger.info(
f"Warning! page_data was string: {response_data}")
if "<!DOCTYPE html>" in response_data:
self.logger.info("HTML was returned, trying again...\n")
elif len(response_data) == 0:
self.logger.warning("Empty string, trying again...\n")
else:
try:
# Sometimes raw text can be converted to a dict
response_data = json.loads(response_data)
success = True
break
except:
pass
attempts += 1
if not success:
return None
return response_data
#probably a better version of query_github_contributors but uses bulk_insert which is a bit shaky right now.
def query_github_contributors_bulk(self, entry_info, repo_id):
""" Data collection function
Query the GitHub API for contributors.
Uses paginate_endpoint rather than paginate, and supports stagger for larger repos.
"""
self.logger.info(f"Querying contributors with given entry info: {entry_info}\n")
github_url = entry_info['given']['github_url'] if 'github_url' in entry_info['given'] else entry_info['given']['git_url']
owner, name = self.get_owner_repo(github_url)
contributors_url = (f"https://api.github.com/repos/{owner}/{name}/" +
"contributors?per_page=100&page={}")
action_map = {
'insert': {
'source': ['login'],
'augur': ['cntrb_login']
},
'update': {
'source': ['email'],
'augur': ['cntrb_email']
}
}
source_contributors = self.paginate_endpoint(contributors_url, action_map=action_map,
table=self.contributors_table)
contributors_insert = []
for repo_contributor in source_contributors['insert']:
# Need to hit this single contributor endpoint to get extra data
cntrb_url = (f"https://api.github.com/users/{repo_contributor['login']}")
self.logger.info(f"Hitting endpoint: {cntrb_url} ...\n")
r = requests.get(url=cntrb_url, headers=self.headers)
self.update_gh_rate_limit(r)
contributor = r.json()
contributors_insert.append({
'cntrb_login': contributor['login'],
'cntrb_created_at': contributor['created_at'],
'cntrb_email': contributor['email'] if 'email' in contributor else None,
'cntrb_company': contributor['company'] if 'company' in contributor else None,
'cntrb_location': contributor['location'] if 'location' in contributor else None,
'cntrb_canonical': contributor['email'] if 'email' in contributor else None,
'gh_user_id': contributor['id'],
'gh_login': contributor['login'],
'gh_url': contributor['url'],
'gh_html_url': contributor['html_url'],
'gh_node_id': contributor['node_id'],
'gh_avatar_url': contributor['avatar_url'],
'gh_gravatar_id': contributor['gravatar_id'],
'gh_followers_url': contributor['followers_url'],
'gh_following_url': contributor['following_url'],
'gh_gists_url': contributor['gists_url'],
'gh_starred_url': contributor['starred_url'],
'gh_subscriptions_url': contributor['subscriptions_url'],
'gh_organizations_url': contributor['organizations_url'],
'gh_repos_url': contributor['repos_url'],
'gh_events_url': contributor['events_url'],
'gh_received_events_url': contributor['received_events_url'],
'gh_type': contributor['type'],
'gh_site_admin': contributor['site_admin'],
'tool_source': self.tool_source,
'tool_version': self.tool_version,
'data_source': self.data_source
})
contributors_insert_result, contributors_update_result = self.bulk_insert(self.contributors_table,
update=source_contributors['update'], unique_columns=action_map['insert']['augur'],
insert=contributors_insert, update_columns=action_map['update']['augur'])
def query_gitlab_contributors(self, entry_info, repo_id):
gitlab_url = (
entry_info['given']['gitlab_url'] if 'gitlab_url' in entry_info['given']
else entry_info['given']['git_url']
)
self.logger.info("Querying contributors with given entry info: " + str(entry_info) + "\n")
path = urlparse(gitlab_url)
split = path[2].split('/')
owner = split[1]
name = split[2]
# Handles git url case by removing the extension
if ".git" in name:
name = name[:-4]
url_encoded_format = quote(owner + '/' + name, safe='')
table = 'contributors'
table_pkey = 'cntrb_id'
### Here we are adding gitlab user information from the API
### Following Gabe's rework of the contributor worker.
### The GitLab API will NEVER give you an email. It will let you
### Query an email, but never give you one.
### ## Gitlab email api: https://gitlab.com/api/v4/users?search=s@goggins.com
### We don't need to update right now, so commenting out.
### TODO: SOLVE LOGIC.
# update_col_map = {'cntrb_email': 'email'}
update_col_map = {}
duplicate_col_map = {'gl_username': 'username'}
# list to hold contributors needing insertion or update
contributors = self.paginate("https://gitlab.com/api/v4/projects/" + url_encoded_format + "/repository/contributors?per_page=100&page={}", duplicate_col_map, update_col_map, table, table_pkey, platform='gitlab')
for repo_contributor in contributors:
try:
cntrb_compressed_url = ("https://gitlab.com/api/v4/users?search=" + repo_contributor['email'])
self.logger.info("Hitting endpoint: " + cntrb_compressed_url + " ...\n")
r = requests.get(url=cntrb_compressed_url, headers=self.headers)
contributor_compressed = r.json()
email = repo_contributor['email']
self.logger.info(contributor_compressed)
if len(contributor_compressed) == 0 or type(contributor_compressed) is dict or "id" not in contributor_compressed[0]:
continue
self.logger.info("Fetching for user: " + str(contributor_compressed[0]["id"]))
cntrb_url = ("https://gitlab.com/api/v4/users/" + str(contributor_compressed[0]["id"]))
self.logger.info("Hitting end point to get complete contributor info now: " + cntrb_url + "...\n")
r = requests.get(url=cntrb_url, headers=self.headers)
contributor = r.json()
cntrb = {
"gl_id": contributor.get('gl_id', None),
"gl_full_name": contributor.get('full_name', None),
"gl_username": contributor.get('username', None),
"gl_state": contributor.get('state', None),
"gl_avatar_url": contributor.get('avatar_url', None),
"gl_web_url": contributor.get('web_url', None),
#"cntrb_login": contributor.get('username', None),
#"cntrb_created_at": contributor.get('created_at', None),
"cntrb_email": ('email', None),
#"cntrb_company": contributor.get('organization', None),
#"cntrb_location": contributor.get('location', None),
# "cntrb_type": , dont have a use for this as of now ... let it default to null
#"cntrb_canonical": contributor.get('public_email', None),
#"gh_user_id": contributor.get('id', None),
#"gh_login": contributor.get('username', None),
#"gh_url": contributor.get('web_url', None),
#"gh_html_url": contributor.get('web_url', None),
#"gh_node_id": None,
#"gh_avatar_url": contributor.get('avatar_url', None),
#"gh_gravatar_id": None,
#"gh_followers_url": None,
#"gh_following_url": None,
#"gh_gists_url": None,
#"gh_starred_url": None,
#"gh_subscriptions_url": None,
#"gh_organizations_url": None,
#"gh_repos_url": None,
#"gh_events_url": None,
#"gh_received_events_url": None,
#"gh_type": None,
#"gh_site_admin": None,
"tool_source": self.tool_source,
"tool_version": self.tool_version,
"data_source": self.data_source
}
# Commit insertion to table
if repo_contributor['flag'] == 'need_update':
result = self.db.execute(self.contributors_table.update().where(
self.worker_history_table.c.cntrb_email == email).values(cntrb))
self.logger.info("Updated tuple in the contributors table with existing email: {}".format(email))
self.cntrb_id_inc = repo_contributor['pkey']
elif repo_contributor['flag'] == 'need_insertion':
result = self.db.execute(self.contributors_table.insert().values(cntrb))
self.logger.info("Primary key inserted into the contributors table: {}".format(result.inserted_primary_key))
self.results_counter += 1
self.logger.info("Inserted contributor: " + contributor['username'] + "\n")
# Increment our global track of the cntrb id for the possibility of it being used as a FK
self.cntrb_id_inc = int(result.inserted_primary_key[0])
except Exception as e:
self.logger.info("Caught exception: {}".format(e))
self.logger.info("Cascading Contributor Anomalie from missing repo contributor data: {} ...\n".format(cntrb_url))
continue
def update_gitlab_rate_limit(self, response, bad_credentials=False, temporarily_disable=False):
# Try to get rate limit from request headers, sometimes it does not work (GH's issue)
# In that case we just decrement from last recieved header count
if bad_credentials and len(self.oauths) > 1:
self.logger.info(
f"Removing oauth with bad credentials from consideration: {self.oauths[0]}"
)
del self.oauths[0]
if temporarily_disable:
self.logger.info("Gitlab rate limit reached. Temp. disabling...")
self.oauths[0]['rate_limit'] = 0
else:
try:
self.oauths[0]['rate_limit'] = int(response.headers['RateLimit-Remaining'])
except:
self.oauths[0]['rate_limit'] -= 1
self.logger.info("Updated rate limit, you have: " +
str(self.oauths[0]['rate_limit']) + " requests remaining.")
if self.oauths[0]['rate_limit'] <= 0:
try:
reset_time = response.headers['RateLimit-Reset']
except Exception as e:
self.logger.info(f"Could not get reset time from headers because of error: {e}")
reset_time = 3600
time_diff = datetime.datetime.fromtimestamp(int(reset_time)) - datetime.datetime.now()
self.logger.info("Rate limit exceeded, checking for other available keys to use.")
# We will be finding oauth with the highest rate limit left out of our list of oauths
new_oauth = self.oauths[0]
# Endpoint to hit solely to retrieve rate limit information from headers of the response
url = "https://gitlab.com/api/v4/version"
other_oauths = self.oauths[0:] if len(self.oauths) > 1 else []
for oauth in other_oauths:
# self.logger.info("Inspecting rate limit info for oauth: {}\n".format(oauth))
self.headers = {"PRIVATE-TOKEN" : oauth['access_token']}
response = requests.get(url=url, headers=self.headers)
oauth['rate_limit'] = int(response.headers['RateLimit-Remaining'])
oauth['seconds_to_reset'] = (
datetime.datetime.fromtimestamp(
int(response.headers['RateLimit-Reset'])
) - datetime.datetime.now()
).total_seconds()
# Update oauth to switch to if a higher limit is found
if oauth['rate_limit'] > new_oauth['rate_limit']:
self.logger.info(f"Higher rate limit found in oauth: {oauth}")
new_oauth = oauth
elif (
oauth['rate_limit'] == new_oauth['rate_limit']
and oauth['seconds_to_reset'] < new_oauth['seconds_to_reset']
):
self.logger.info(
f"Lower wait time found in oauth with same rate limit: {oauth}"
)
new_oauth = oauth
if new_oauth['rate_limit'] <= 0 and new_oauth['seconds_to_reset'] > 0:
self.logger.info(
"No oauths with >0 rate limit were found, waiting for oauth with "
f"smallest wait time: {new_oauth}\n"
)
time.sleep(new_oauth['seconds_to_reset'])
# Make new oauth the 0th element in self.oauths so we know which one is in use
index = self.oauths.index(new_oauth)
self.oauths[0], self.oauths[index] = self.oauths[index], self.oauths[0]
self.logger.info("Using oauth: {}\n".format(self.oauths[0]))
# Change headers to be using the new oauth's key
self.headers = {"PRIVATE-TOKEN" : self.oauths[0]['access_token']}
def update_gh_rate_limit(self, response, bad_credentials=False, temporarily_disable=False):
# Try to get rate limit from request headers, sometimes it does not work (GH's issue)
# In that case we just decrement from last recieved header count
if bad_credentials and len(self.oauths) > 1:
self.logger.warning(
f"Removing oauth with bad credentials from consideration: {self.oauths[0]}"
)
del self.oauths[0]
if temporarily_disable:
self.logger.debug(
"Github thinks we are abusing their api. Preventing use "
"of this key until its rate limit resets..."
)
self.oauths[0]['rate_limit'] = 0
else:
try:
self.oauths[0]['rate_limit'] = int(response.headers['X-RateLimit-Remaining'])
# self.logger.info("Recieved rate limit from headers\n")
except:
self.oauths[0]['rate_limit'] -= 1
self.logger.info("Headers did not work, had to decrement")
time.sleep(30)
self.logger.info(
f"Updated rate limit, you have: {self.oauths[0]['rate_limit']} requests remaining."
)
#Stalls after here for some reason.
if self.oauths[0]['rate_limit'] <= 0:
try:
reset_time = response.headers['X-RateLimit-Reset']
except Exception as e:
self.logger.error(f"Could not get reset time from headers because of error: {e}")
reset_time = 3600
time_diff = datetime.datetime.fromtimestamp(int(reset_time)) - datetime.datetime.now()
self.logger.info("Rate limit exceeded, checking for other available keys to use.")
# We will be finding oauth with the highest rate limit left out of our list of oauths
new_oauth = self.oauths[0]
# Endpoint to hit solely to retrieve rate limit information from headers of the response
url = "https://api.github.com/users/sgoggins"
other_oauths = self.oauths[0:] if len(self.oauths) > 1 else []
for oauth in other_oauths:
# self.logger.info("Inspecting rate limit info for oauth: {}\n".format(oauth))
self.headers = {'Authorization': 'token %s' % oauth['access_token']}
attempts = 3
success = False
while attempts > 0 and not success:
response = requests.get(url=url, headers=self.headers)
try:
oauth['rate_limit'] = int(response.headers['X-RateLimit-Remaining'])
oauth['seconds_to_reset'] = (
datetime.datetime.fromtimestamp(
int(response.headers['X-RateLimit-Reset'])
) - datetime.datetime.now()
).total_seconds()
success = True
except Exception as e:
self.logger.info(
f"oath method ran into error getting info from headers: {e}\n"
)
self.logger.info(f"{self.headers}\n{url}\n")
attempts -= 1
if not success:
continue
# Update oauth to switch to if a higher limit is found
if oauth['rate_limit'] > new_oauth['rate_limit']:
self.logger.info("Higher rate limit found in oauth: {}\n".format(oauth))
new_oauth = oauth
elif (
oauth['rate_limit'] == new_oauth['rate_limit']
and oauth['seconds_to_reset'] < new_oauth['seconds_to_reset']
):
self.logger.info(
f"Lower wait time found in oauth with same rate limit: {oauth}\n"
)
new_oauth = oauth
if new_oauth['rate_limit'] <= 0 and new_oauth['seconds_to_reset'] > 0:
self.logger.info(
"No oauths with >0 rate limit were found, waiting for oauth with "
f"smallest wait time: {new_oauth}\n"
)
time.sleep(new_oauth['seconds_to_reset'])
# Make new oauth the 0th element in self.oauths so we know which one is in use
index = self.oauths.index(new_oauth)
self.oauths[0], self.oauths[index] = self.oauths[index], self.oauths[0]
self.logger.info("Using oauth: {}\n".format(self.oauths[0]))
# Change headers to be using the new oauth's key
self.headers = {'Authorization': 'token %s' % self.oauths[0]['access_token']}
def update_rate_limit(
self, response, bad_credentials=False, temporarily_disable=False, platform="gitlab"
):
if platform == 'gitlab':
return self.update_gitlab_rate_limit(
response, bad_credentials=bad_credentials, temporarily_disable=temporarily_disable
)
elif platform == 'github':
return self.update_gh_rate_limit(
response, bad_credentials=bad_credentials, temporarily_disable=temporarily_disable
)
#insertion_method and stagger are arguments that allow paginate_endpoint to insert at around ~500 pages at a time.
def paginate_endpoint(
self, url, action_map={}, table=None, where_clause=True, platform='github', in_memory=True, stagger=False, insertion_method=None, insertion_threshold=1000
):
#Get augur columns using the action map along with the primary key
table_values = self.db.execute(
s.sql.select(self.get_relevant_columns(table, action_map)).where(where_clause)
).fetchall()
page_number = 1
multiple_pages = False
need_insertion = []
need_update = []
#Stores sum of page data
all_data = []
forward_pagination = True
backwards_activation = False
last_page_number = -1
#Block to handle page queries and retry at least 10 times
while True:
# Multiple attempts to hit endpoint
num_attempts = 0
success = False
while num_attempts < 10:
self.logger.info(f"Hitting endpoint: {url.format(page_number)}...\n")
try:
response = requests.get(url=url.format(page_number), headers=self.headers)
except TimeoutError as e:
self.logger.info("Request timed out. Sleeping 10 seconds and trying again...\n")
time.sleep(10)
continue
self.update_rate_limit(response, platform=platform)
try:
page_data = response.json()
except:
page_data = json.loads(json.dumps(response.text))
if type(page_data) == list:
success = True
break
elif type(page_data) == dict:
self.logger.info("Request returned a dict: {}\n".format(page_data))
if page_data['message'] == "Not Found":
self.logger.warning(
"Github repo was not found or does not exist for endpoint: "
f"{url.format(page_number)}\n"
)
break
if "You have exceeded a secondary rate limit. Please wait a few minutes before you try again" in page_data['message']:
num_attempts -=1
self.logger.info('\n\n\n\nSleeping for 100 seconds due to secondary rate limit issue.\n\n\n\n')
time.sleep(100)
if "You have triggered an abuse detection mechanism." in page_data['message']:
num_attempts -= 1
self.update_rate_limit(response, temporarily_disable=True,platform=platform)
if page_data['message'] == "Bad credentials":
self.logger.info("\n\n\n\n\n\n\n POSSIBLY BAD TOKEN \n\n\n\n\n\n\n")
self.update_rate_limit(response, bad_credentials=True, platform=platform)
elif type(page_data) == str:
self.logger.info(f"Warning! page_data was string: {page_data}\n")
if "<!DOCTYPE html>" in page_data:
self.logger.info("HTML was returned, trying again...\n")
elif len(page_data) == 0:
self.logger.warning("Empty string, trying again...\n")
else:
try:
page_data = json.loads(page_data)
success = True
break
except:
pass
num_attempts += 1
if not success:
break
# Success
# Determine if continued pagination is needed
if len(page_data) == 0:
self.logger.info("Response was empty, breaking from pagination.\n")
break
all_data += page_data
if not forward_pagination:
# Checking contents of requests with what we already have in the db
page_insertions, page_updates = self.organize_needed_data(
page_data, table_values, list(table.primary_key)[0].name,
action_map
)
# Reached a page where we already have all tuples
if len(need_insertion) == 0 and len(need_update) == 0 and \
backwards_activation:
self.logger.info(
"No more pages with unknown tuples, breaking from pagination.\n"
)
break
need_insertion += page_insertions
need_update += page_updates
# Find last page so we can decrement from there
if 'last' in response.links and last_page_number == -1:
if platform == 'github':
last_page_number = int(response.links['last']['url'][-6:].split('=')[1])
elif platform == 'gitlab':
last_page_number = int(response.links['last']['url'].split('&')[2].split('=')[1])
if not forward_pagination and not backwards_activation:
page_number = last_page_number
backwards_activation = True
self.logger.info("Analyzation of page {} of {} complete\n".format(page_number,
int(last_page_number) if last_page_number != -1 else "*last page not known*"))
if (page_number <= 1 and not forward_pagination) or \
(page_number >= last_page_number and forward_pagination):
self.logger.info("No more pages to check, breaking from pagination.\n")
break
#This is probably where we should insert at around ~500 at a time
#makes sure that stagger is enabled, we have an insertion method, and the insertion happens every 500 pages or so.
if stagger and insertion_method != None and page_number % insertion_threshold == 0:
#call insertion method passed as argument.
staggered_source_prs = {
'insert' : need_insertion,
'update' : need_update,
'all' : all_data
}
#Use the method the subclass needs in order to insert the data.
insertion_method(staggered_source_prs,action_map)
#clear the data from memory and avoid duplicate insertions.
need_insertion = []
need_update = []
all_data = []
page_number = page_number + 1 if forward_pagination else page_number - 1
if forward_pagination:
need_insertion, need_update = self.organize_needed_data(
all_data, table_values, list(table.primary_key)[0].name, action_map
)
return {
'insert': need_insertion,
'update': need_update,
'all': all_data
}
#TODO: deprecated but still used by many other methods
def paginate(self, url, duplicate_col_map, update_col_map, table, table_pkey, where_clause="", value_update_col_map={}, platform="github"):
""" DEPRECATED
Paginate either backwards or forwards (depending on the value of the worker's
finishing_task attribute) through all the GitHub or GitLab api endpoint pages.
:param url: String, the url of the API endpoint we are paginating through, expects
a curly brace string formatter within the string to format the Integer
representing the page number that is wanted to be returned
:param duplicate_col_map: Dictionary, maps the column names of the source data
to the field names in our database for columns that should be checked for
duplicates (if source data value == value in existing database row, then this
element is a duplicate and would not need an insertion). Key is source data
column name, value is database field name. Example: {'id': 'gh_issue_id'}
:param update_col_map: Dictionary, maps the column names of the source data
to the field names in our database for columns that should be checked for
updates (if source data value != value in existing database row, then an
update is needed). Key is source data column name, value is database field name.
Example: {'id': 'gh_issue_id'}
:param table: String, the name of the table that holds the values to check for
duplicates/updates against
:param table_pkey: String, the field name of the primary key of the table in
the database that we are getting the values for to cross-reference to check
for duplicates.
:param where_clause: String, optional where clause to filter the values
that are queried when preparing the values that will be cross-referenced
for duplicates/updates
:param value_update_col_map: Dictionary, sometimes we add a new field to a table,
and we want to trigger an update of that row in the database even if all of the
data values are the same and would not need an update ordinarily. Checking for
a specific existing value in the database field allows us to do this. The key is the
name of the field in the database we are checking for a specific value to trigger
an update, the value is the value we are checking for equality to trigger an update.
Example: {'cntrb_id': None}
:return: List of dictionaries, all data points from the pages of the specified API endpoint
each with a 'flag' key-value pair representing the required action to take with that
data point (i.e. 'need_insertion', 'need_update', 'none')
"""
update_keys = list(update_col_map.keys()) if update_col_map else []
update_keys += list(value_update_col_map.keys()) if value_update_col_map else []
cols_to_query = list(duplicate_col_map.keys()) + update_keys + [table_pkey]
table_values = self.get_table_values(cols_to_query, [table], where_clause)
i = 1
multiple_pages = False
tuples = []
while True:
num_attempts = 0
success = False
while num_attempts < 3:
self.logger.info(f'Hitting endpoint: {url.format(i)}...\n')
r = requests.get(url=url.format(i), headers=self.headers)
self.update_rate_limit(r, platform=platform)
if 'last' not in r.links:
last_page = None
else:
if platform == "github":
last_page = r.links['last']['url'][-6:].split('=')[1]
elif platform == "gitlab":
last_page = r.links['last']['url'].split('&')[2].split("=")[1]
self.logger.info("Analyzing page {} of {}\n".format(i, int(last_page) + 1 if last_page is not None else '*last page not known*'))
try:
j = r.json()
except:
j = json.loads(json.dumps(r.text))
if type(j) != dict and type(j) != str:
success = True
break
elif type(j) == dict:
self.logger.info("Request returned a dict: {}\n".format(j))
if j['message'] == 'Not Found':
self.logger.warning("Github repo was not found or does not exist for endpoint: {}\n".format(url))
break
if j['message'] == 'You have triggered an abuse detection mechanism. Please wait a few minutes before you try again.':
num_attempts -= 1
self.logger.info("rate limit update code goes here")
self.update_rate_limit(r, temporarily_disable=True,platform=platform)
if j['message'] == 'Bad credentials':
self.logger.info("rate limit update code goes here")
self.update_rate_limit(r, bad_credentials=True, platform=platform)
elif type(j) == str:
self.logger.info(f'J was string: {j}\n')
if '<!DOCTYPE html>' in j:
self.logger.info('HTML was returned, trying again...\n')
elif len(j) == 0:
self.logger.warning('Empty string, trying again...\n')
else:
try:
j = json.loads(j)
success = True
break
except:
pass
num_attempts += 1
if not success:
break
# Find last page so we can decrement from there
if 'last' in r.links and not multiple_pages and not self.finishing_task:
if platform == "github":
param = r.links['last']['url'][-6:]
i = int(param.split('=')[1]) + 1
elif platform == "gitlab":
i = int(r.links['last']['url'].split('&')[2].split("=")[1]) + 1
self.logger.info("Multiple pages of request, last page is " + str(i - 1) + "\n")
multiple_pages = True
elif not multiple_pages and not self.finishing_task:
self.logger.info("Only 1 page of request\n")
elif self.finishing_task:
self.logger.info("Finishing a previous task, paginating forwards ..."
" excess rate limit requests will be made\n")
if len(j) == 0:
self.logger.info("Response was empty, breaking from pagination.\n")
break
# Checking contents of requests with what we already have in the db
j = self.assign_tuple_action(j, table_values, update_col_map, duplicate_col_map, table_pkey, value_update_col_map)
if not j:
self.logger.error("Assigning tuple action failed, moving to next page.\n")
i = i + 1 if self.finishing_task else i - 1
continue
try:
to_add = [obj for obj in j if obj not in tuples and (obj['flag'] != 'none')]
except Exception as e:
self.logger.error("Failure accessing data of page: {}. Moving to next page.\n".format(e))
i = i + 1 if self.finishing_task else i - 1
continue
if len(to_add) == 0 and multiple_pages and 'last' in r.links:
self.logger.info("{}".format(r.links['last']))
if platform == "github":
page_number = int(r.links['last']['url'][-6:].split('=')[1])
elif platform == "gitlab":
page_number = int(r.links['last']['url'].split('&')[2].split("=")[1])
if i - 1 != page_number:
self.logger.info("No more pages with unknown tuples, breaking from pagination.\n")
break
tuples += to_add
i = i + 1 if self.finishing_task else i - 1
# Since we already wouldve checked the first page... break
if (i == 1 and multiple_pages and not self.finishing_task) or i < 1 or len(j) == 0:
self.logger.info("No more pages to check, breaking from pagination.\n")
break
return tuples
def new_paginate_endpoint(
self, url, action_map={}, table=None, where_clause=True, platform='github'
):
page_number = 1
multiple_pages = False
need_insertion = []
need_update = []
all_data = []
forward_pagination = True
backwards_activation = False
last_page_number = -1
while True:
# Multiple attempts to hit endpoint
num_attempts = 0
success = False
while num_attempts < 10:
self.logger.info("hitting an endpiont")
# f"Hitting endpoint: ...\n"
# f"{url.format(page_number)} on page number. \n")
try:
response = requests.get(url=url.format(page_number), headers=self.headers)
except TimeoutError as e:
self.logger.info("Request timed out. Sleeping 10 seconds and trying again...\n")
time.sleep(10)
continue
self.update_rate_limit(response, platform=platform)
try:
page_data = response.json()
except:
page_data = json.loads(json.dumps(response.text))
if type(page_data) == list:
success = True
break
elif type(page_data) == dict:
self.logger.info("Request returned a dict: {}\n".format(page_data))
if page_data['message'] == "Not Found":
self.logger.warning(
"Github repo was not found or does not exist for endpoint: "
f"{url.format(page_number)}\n"
)
break
if "You have triggered an abuse detection mechanism." in page_data['message']:
num_attempts -= 1
self.update_rate_limit(response, temporarily_disable=True,platform=platform)
if page_data['message'] == "Bad credentials":
self.update_rate_limit(response, bad_credentials=True, platform=platform)
elif type(page_data) == str:
self.logger.info(f"Warning! page_data was string: {page_data}\n")
if "<!DOCTYPE html>" in page_data:
self.logger.info("HTML was returned, trying again...\n")
elif len(page_data) == 0:
self.logger.warning("Empty string, trying again...\n")
else:
try:
page_data = json.loads(page_data)
success = True
break
except:
pass
num_attempts += 1
if not success:
break
# Success
# Determine if continued pagination is needed
if len(page_data) == 0:
self.logger.info("Response was empty, breaking from pagination.\n")
break
all_data += page_data
if not forward_pagination:
# Checking contents of requests with what we already have in the db
page_insertions, page_updates = self.new_organize_needed_data(
page_data, augur_table=table, action_map=action_map
)
# Reached a page where we already have all tuples
if len(need_insertion) == 0 and len(need_update) == 0 and \
backwards_activation:
self.logger.info(
"No more pages with unknown tuples, breaking from pagination.\n"
)
break
need_insertion += page_insertions
need_update += page_updates
# Find last page so we can decrement from there
if 'last' in response.links and last_page_number == -1:
if platform == 'github':
last_page_number = int(response.links['last']['url'][-6:].split('=')[1])
elif platform == 'gitlab':
last_page_number = int(response.links['last']['url'].split('&')[2].split('=')[1])
if not forward_pagination and not backwards_activation:
page_number = last_page_number
backwards_activation = True
self.logger.info("Analyzation of page {} of {} complete\n".format(page_number,
int(last_page_number) if last_page_number != -1 else "*last page not known*"))
if (page_number <= 1 and not forward_pagination) or \
(page_number >= last_page_number and forward_pagination):
self.logger.info("No more pages to check, breaking from pagination.\n")
break
page_number = page_number + 1 if forward_pagination else page_number - 1
if forward_pagination:
need_insertion, need_update = self.new_organize_needed_data(
all_data, augur_table=table, action_map=action_map
)
return {
'insert': need_insertion,
'update': need_update,
'all': all_data
}
|
#!/usr/bin/env python3
import os
from urllib.parse import urljoin
import requests
from requests.auth import HTTPBasicAuth
def Run(user, key, contribution):
repo_names_zup = [
"charlescd",
"charlescd-docs"
]
insights = []
contributors = []
base_url_zup = f"https://api.github.com/repos/ZupIT/"
print(f"🐙 Getting insights for ZupIT's repos:")
for repo in repo_names_zup:
print(f"\t- github.com/ZupIT/{repo}/")
repo_url_zup = urljoin(base_url_zup, repo + "/")
traffic = requests.get(
urljoin(repo_url_zup, "traffic/views",), auth=HTTPBasicAuth(user, key),
).json()
clones = requests.get(
urljoin(repo_url_zup, "traffic/clones",), auth=HTTPBasicAuth(user, key),
).json()
contributors = requests.get(
urljoin(repo_url_zup, "contributors",), auth=HTTPBasicAuth(user, key),
).json()
url = f"https://api.github.com/repos/ZupIT/{repo}"
repo_stats = requests.get(
url, auth=HTTPBasicAuth(user, key),
).json()
try:
clones = clones["count"]
except (IndexError, KeyError) :
clones = "-"
try:
forks = repo_stats["forks_count"]
except (IndexError, KeyError):
forks = "-"
try:
stars = repo_stats["stargazers_count"]
except (IndexError, KeyError):
stars = "-"
try:
watchers = repo_stats["subscribers_count"]
except (IndexError, KeyError):
watchers = "-"
try:
views = traffic["count"]
except (IndexError, KeyError):
views = "-"
try:
uniques = traffic["uniques"]
except (IndexError, KeyError):
uniques = "-"
insights.append(
{
"repo": repo,
"views": views,
"uniques": uniques,
"clones": clones,
"contributors": len(contributors),
"contributors_list": contributors,
"forks": forks,
"stars": stars,
"watchers": watchers,
}
)
print("\n-------------------------------------------------------------------------------------------------------")
print(f'{'Repository':25} {'Views':^10} {'Uniques':^10} {'Clones':^10} {'Contributors':^10} {'Forks':^10} {'Stars':^10} {'Watchers':^10}')
print("-------------------------------------------------------------------------------------------------------")
for insight in insights:
print(
f'{insight['repo']:25} {insight['views']:^10} {insight['uniques']:^10} {insight['clones']:^10} {insight['contributors']:^12} {insight['forks']:^10} {insight['stars']:^10} {insight['watchers']:^10}'
)
if contribution == "yes" :
print_contribution(insights, user, key)
def print_contribution(insights, user, key):
for insight in insights:
print(f"\nRepository: https://github.com/ZupIT/" + insight["repo"] + "/")
print("----------------------------------------------------------------------------------------------------------------------------")
print(f'{'Github ID':10} {'Username':^20} {'Name':^35} {'Email':^40} {'Contributions':^10}')
print("----------------------------------------------------------------------------------------------------------------------------")
try:
for contributor in insight["contributors_list"]:
get_contributor_details(user, key, contributor)
print(
f'{contributor['id']:^10} {contributor['login']:^20} {contributor['name']:^35} {contributor['email']:^40} {contributor['contributions']:^10}'
)
except(TypeError):
print("🚫 Sorry: We could't retrieve the contributors list for this repository...\n")
def get_contributor_details(user, key, contributor):
github_user = requests.get(
('https://api.github.com/users/%s' % (contributor["login"])), auth=HTTPBasicAuth(user, key),
).json()
if "message" in github_user and github_user["message"] == "Not Found":
print ("Github User does not exist.")
else:
contributor["email"] = github_user["email"]
contributor["name"] = github_user["name"]
if contributor["email"] is None or contributor["name"] is None:
events = requests.get(
('https://api.github.com/users/%s/events?per_page=100' % (contributor["login"])), auth=HTTPBasicAuth(user, key),
).json()
if contributor["name"] is None:
contributor["name"] = get_name(events, contributor["login"])
if contributor["email"] is None:
contributor["email"] = get_email(events, contributor["login"], contributor["name"])
def get_email(events, login, name):
email = "-"
found_email = False
for event in events:
if not found_email and event["type"] == "PushEvent" and event["payload"] is not None:
payload = event["payload"]
for commit in payload["commits"]:
if not found_email and commit["author"] is not None:
author = commit["author"]
if not found_email and author["name"] in login and "github" not in author["email"]:
email = author["email"]
found_email = True
if not found_email and author["name"] in name and "github" not in author["email"]:
email = author["email"]
found_email = True
if not found_email and name.split()[0].lower() in author["name"] and "github" not in author["email"]:
email = author["email"] + " *" # The * represents an email that is related but not necessary from this user account.
return email
def get_name(events, login):
name = "-"
found_name = False
for event in events:
if not found_name and event["type"] == "PushEvent" and event["actor"]is not None and event["payload"] is not None:
actor = event["actor"]
if actor["login"] == login:
payload = event["payload"]
if len(payload["commits"]) == 1:
for commit in payload["commits"]:
if not found_name and commit["author"] is not None:
author = commit["author"]
if not found_name and author["email"] is not None and "github" not in author["email"]:
name = author["name"]
found_name = True
return name
| #!/usr/bin/env python3
import os
from urllib.parse import urljoin
import requests
from requests.auth import HTTPBasicAuth
def Run(user, key, contribution):
repo_names_zup = [
"charlescd",
"charlescd-docs"
]
insights = []
contributors = []
base_url_zup = f"https://api.github.com/repos/ZupIT/"
print(f"🐙 Getting insights for ZupIT's repos:")
for repo in repo_names_zup:
print(f"\t- github.com/ZupIT/{repo}/")
repo_url_zup = urljoin(base_url_zup, repo + "/")
traffic = requests.get(
urljoin(repo_url_zup, "traffic/views",), auth=HTTPBasicAuth(user, key),
).json()
clones = requests.get(
urljoin(repo_url_zup, "traffic/clones",), auth=HTTPBasicAuth(user, key),
).json()
contributors = requests.get(
urljoin(repo_url_zup, "contributors",), auth=HTTPBasicAuth(user, key),
).json()
url = f"https://api.github.com/repos/ZupIT/{repo}"
repo_stats = requests.get(
url, auth=HTTPBasicAuth(user, key),
).json()
try:
clones = clones["count"]
except (IndexError, KeyError) :
clones = "-"
try:
forks = repo_stats["forks_count"]
except (IndexError, KeyError):
forks = "-"
try:
stars = repo_stats["stargazers_count"]
except (IndexError, KeyError):
stars = "-"
try:
watchers = repo_stats["subscribers_count"]
except (IndexError, KeyError):
watchers = "-"
try:
views = traffic["count"]
except (IndexError, KeyError):
views = "-"
try:
uniques = traffic["uniques"]
except (IndexError, KeyError):
uniques = "-"
insights.append(
{
"repo": repo,
"views": views,
"uniques": uniques,
"clones": clones,
"contributors": len(contributors),
"contributors_list": contributors,
"forks": forks,
"stars": stars,
"watchers": watchers,
}
)
print("\n-------------------------------------------------------------------------------------------------------")
print(f'{"Repository":25} {"Views":^10} {"Uniques":^10} {"Clones":^10} {"Contributors":^10} {"Forks":^10} {"Stars":^10} {"Watchers":^10}')
print("-------------------------------------------------------------------------------------------------------")
for insight in insights:
print(
f'{insight["repo"]:25} {insight["views"]:^10} {insight["uniques"]:^10} {insight["clones"]:^10} {insight["contributors"]:^12} {insight["forks"]:^10} {insight["stars"]:^10} {insight["watchers"]:^10}'
)
if contribution == "yes" :
print_contribution(insights, user, key)
def print_contribution(insights, user, key):
for insight in insights:
print(f"\nRepository: https://github.com/ZupIT/" + insight["repo"] + "/")
print("----------------------------------------------------------------------------------------------------------------------------")
print(f'{"Github ID":10} {"Username":^20} {"Name":^35} {"Email":^40} {"Contributions":^10}')
print("----------------------------------------------------------------------------------------------------------------------------")
try:
for contributor in insight["contributors_list"]:
get_contributor_details(user, key, contributor)
print(
f'{contributor["id"]:^10} {contributor["login"]:^20} {contributor["name"]:^35} {contributor["email"]:^40} {contributor["contributions"]:^10}'
)
except(TypeError):
print("🚫 Sorry: We could't retrieve the contributors list for this repository...\n")
def get_contributor_details(user, key, contributor):
github_user = requests.get(
('https://api.github.com/users/%s' % (contributor["login"])), auth=HTTPBasicAuth(user, key),
).json()
if "message" in github_user and github_user["message"] == "Not Found":
print ("Github User does not exist.")
else:
contributor["email"] = github_user["email"]
contributor["name"] = github_user["name"]
if contributor["email"] is None or contributor["name"] is None:
events = requests.get(
('https://api.github.com/users/%s/events?per_page=100' % (contributor["login"])), auth=HTTPBasicAuth(user, key),
).json()
if contributor["name"] is None:
contributor["name"] = get_name(events, contributor["login"])
if contributor["email"] is None:
contributor["email"] = get_email(events, contributor["login"], contributor["name"])
def get_email(events, login, name):
email = "-"
found_email = False
for event in events:
if not found_email and event["type"] == "PushEvent" and event["payload"] is not None:
payload = event["payload"]
for commit in payload["commits"]:
if not found_email and commit["author"] is not None:
author = commit["author"]
if not found_email and author["name"] in login and "github" not in author["email"]:
email = author["email"]
found_email = True
if not found_email and author["name"] in name and "github" not in author["email"]:
email = author["email"]
found_email = True
if not found_email and name.split()[0].lower() in author["name"] and "github" not in author["email"]:
email = author["email"] + " *" # The * represents an email that is related but not necessary from this user account.
return email
def get_name(events, login):
name = "-"
found_name = False
for event in events:
if not found_name and event["type"] == "PushEvent" and event["actor"]is not None and event["payload"] is not None:
actor = event["actor"]
if actor["login"] == login:
payload = event["payload"]
if len(payload["commits"]) == 1:
for commit in payload["commits"]:
if not found_name and commit["author"] is not None:
author = commit["author"]
if not found_name and author["email"] is not None and "github" not in author["email"]:
name = author["name"]
found_name = True
return name
|
import json
from ssl import SSLContext
from typing import Optional, Dict, Any, Union, List
import ast
import status
from urllib3 import Retry, HTTPResponse
from urllib3.exceptions import HTTPError, MaxRetryError
from urllib3.poolmanager import PoolManager
from urllib3.util import parse_url
from bxcommon import constants
from bxutils import log_messages
from bxutils import logging
from bxutils.encoding import json_encoder
# recursive types are not supported: https://github.com/python/typing/issues/182
JT = Union[Dict[str, Any], List[Any]]
logger = logging.get_logger(__name__)
_url = constants.SDN_ROOT_URL
_ssl_context: Optional[SSLContext] = None
METHODS_WHITELIST = frozenset(["HEAD", "GET", "PUT", "DELETE", "OPTIONS", "TRACE", "POST", "PATCH"])
def set_root_url(sdn_url: str):
# pylint: disable=global-statement
global _url
_url = sdn_url
def update_http_ssl_context(context: Optional[SSLContext] = None):
# pylint: disable=global-statement
global _ssl_context
_ssl_context = context
def ssl_context() -> Optional[SSLContext]:
return _ssl_context
def post_json(endpoint: str, payload=None) -> Optional[JT]:
return _http_request(
"POST", endpoint, body=json_encoder.to_json(payload), headers=constants.HTTP_HEADERS
)
def patch_json(endpoint: str, payload=None) -> Optional[JT]:
return _http_request(
"PATCH", endpoint, body=json_encoder.to_json(payload), headers=constants.HTTP_HEADERS
)
def delete_json(endpoint: str, payload=None) -> Optional[JT]:
return _http_request(
"DELETE", endpoint, body=json_encoder.to_json(payload), headers=constants.HTTP_HEADERS,
)
def get_json(endpoint: str) -> Optional[JT]:
return _http_request("GET", endpoint, headers=constants.HTTP_HEADERS)
def get_json_with_payload(endpoint: str, payload=None) -> Optional[JT]:
return _http_request(
"GET", endpoint, body=json_encoder.to_json(payload), headers=constants.HTTP_HEADERS
)
def build_url(endpoint: str) -> str:
if not endpoint or not isinstance(endpoint, str):
raise ValueError("Missing or invalid URL")
return _url + endpoint
def raise_for_status(res: HTTPResponse) -> None:
if status.is_client_error(res.status) or status.is_server_error(res.status):
raise HTTPError(f"{res.status}:{res.reason}. {ast.literal_eval(res.data.decode("utf-8"))}")
def _http_request(method: str, endpoint: str, **kwargs) -> Optional[JT]:
url = build_url(endpoint)
parsed_url = parse_url(url)
pm_args = {
"num_pools": constants.HTTP_POOL_MANAGER_COUNT,
"host": parsed_url.host,
"port": parsed_url.port,
"retries": Retry(
connect=constants.HTTP_REQUEST_RETRIES_COUNT,
read=constants.HTTP_REQUEST_RETRIES_COUNT,
redirect=constants.HTTP_REQUEST_RETRIES_COUNT,
backoff_factor=constants.HTTP_REQUEST_BACKOFF_FACTOR,
method_whitelist=METHODS_WHITELIST,
),
"ssl_context": _ssl_context,
}
if _ssl_context is not None and url.startswith("https"):
pm_args["assert_hostname"] = False
http_pool_manager: PoolManager = PoolManager(**pm_args)
try:
logger.trace("HTTP {0} to {1}", method, url)
response = http_pool_manager.request(
method=method, url=parsed_url.url, timeout=constants.HTTP_REQUEST_TIMEOUT, **kwargs
)
raise_for_status(response)
except MaxRetryError as e:
logger.info("{} to {} failed due to: {}.", method, url, e)
return None
except Exception as e: # pylint: disable=broad-except
logger.error(log_messages.HTTP_REQUEST_RETURNED_ERROR, method, url, e)
return None
return json.loads(response.data)
| import json
from ssl import SSLContext
from typing import Optional, Dict, Any, Union, List
import ast
import status
from urllib3 import Retry, HTTPResponse
from urllib3.exceptions import HTTPError, MaxRetryError
from urllib3.poolmanager import PoolManager
from urllib3.util import parse_url
from bxcommon import constants
from bxutils import log_messages
from bxutils import logging
from bxutils.encoding import json_encoder
# recursive types are not supported: https://github.com/python/typing/issues/182
JT = Union[Dict[str, Any], List[Any]]
logger = logging.get_logger(__name__)
_url = constants.SDN_ROOT_URL
_ssl_context: Optional[SSLContext] = None
METHODS_WHITELIST = frozenset(["HEAD", "GET", "PUT", "DELETE", "OPTIONS", "TRACE", "POST", "PATCH"])
def set_root_url(sdn_url: str):
# pylint: disable=global-statement
global _url
_url = sdn_url
def update_http_ssl_context(context: Optional[SSLContext] = None):
# pylint: disable=global-statement
global _ssl_context
_ssl_context = context
def ssl_context() -> Optional[SSLContext]:
return _ssl_context
def post_json(endpoint: str, payload=None) -> Optional[JT]:
return _http_request(
"POST", endpoint, body=json_encoder.to_json(payload), headers=constants.HTTP_HEADERS
)
def patch_json(endpoint: str, payload=None) -> Optional[JT]:
return _http_request(
"PATCH", endpoint, body=json_encoder.to_json(payload), headers=constants.HTTP_HEADERS
)
def delete_json(endpoint: str, payload=None) -> Optional[JT]:
return _http_request(
"DELETE", endpoint, body=json_encoder.to_json(payload), headers=constants.HTTP_HEADERS,
)
def get_json(endpoint: str) -> Optional[JT]:
return _http_request("GET", endpoint, headers=constants.HTTP_HEADERS)
def get_json_with_payload(endpoint: str, payload=None) -> Optional[JT]:
return _http_request(
"GET", endpoint, body=json_encoder.to_json(payload), headers=constants.HTTP_HEADERS
)
def build_url(endpoint: str) -> str:
if not endpoint or not isinstance(endpoint, str):
raise ValueError("Missing or invalid URL")
return _url + endpoint
def raise_for_status(res: HTTPResponse) -> None:
if status.is_client_error(res.status) or status.is_server_error(res.status):
raise HTTPError(f"{res.status}:{res.reason}. {ast.literal_eval(res.data.decode('utf-8'))}")
def _http_request(method: str, endpoint: str, **kwargs) -> Optional[JT]:
url = build_url(endpoint)
parsed_url = parse_url(url)
pm_args = {
"num_pools": constants.HTTP_POOL_MANAGER_COUNT,
"host": parsed_url.host,
"port": parsed_url.port,
"retries": Retry(
connect=constants.HTTP_REQUEST_RETRIES_COUNT,
read=constants.HTTP_REQUEST_RETRIES_COUNT,
redirect=constants.HTTP_REQUEST_RETRIES_COUNT,
backoff_factor=constants.HTTP_REQUEST_BACKOFF_FACTOR,
method_whitelist=METHODS_WHITELIST,
),
"ssl_context": _ssl_context,
}
if _ssl_context is not None and url.startswith("https"):
pm_args["assert_hostname"] = False
http_pool_manager: PoolManager = PoolManager(**pm_args)
try:
logger.trace("HTTP {0} to {1}", method, url)
response = http_pool_manager.request(
method=method, url=parsed_url.url, timeout=constants.HTTP_REQUEST_TIMEOUT, **kwargs
)
raise_for_status(response)
except MaxRetryError as e:
logger.info("{} to {} failed due to: {}.", method, url, e)
return None
except Exception as e: # pylint: disable=broad-except
logger.error(log_messages.HTTP_REQUEST_RETURNED_ERROR, method, url, e)
return None
return json.loads(response.data)
|
import asyncio
import os
import math
from typing import Optional, Dict, List, Tuple
import aiohttp
from PIL import Image, ImageDraw, ImageFont, ImageFilter
from src.libraries.maimaidx_music import total_list
scoreRank = 'D C B BB BBB A AA AAA S S+ SS SS+ SSS SSS+'.split(' ')
combo = ' FC FC+ AP AP+'.split(' ')
diffs = 'Basic Advanced Expert Master Re:Master'.split(' ')
class ChartInfo(object):
def __init__(self, idNum:str, diff:int, tp:str, achievement:float, ra:int, comboId:int, scoreId:int,
title:str, ds:float, lv:str):
self.idNum = idNum
self.diff = diff
self.tp = tp
self.achievement = achievement
self.ra = computeRa(ds,achievement)
self.comboId = comboId
self.scoreId = scoreId
self.title = title
self.ds = ds
self.lv = lv
def __str__(self):
return '%-50s' % f'{self.title} [{self.tp}]' + f'{self.ds}\t{diffs[self.diff]}\t{self.ra}'
def __eq__(self, other):
return self.ra == other.ra
def __lt__(self, other):
return self.ra < other.ra
@classmethod
def from_json(cls, data):
rate = ['d', 'c', 'b', 'bb', 'bbb', 'a', 'aa', 'aaa', 's', 'sp', 'ss', 'ssp', 'sss', 'sssp']
ri = rate.index(data["rate"])
fc = ['', 'fc', 'fcp', 'ap', 'app']
fi = fc.index(data["fc"])
return cls(
idNum=total_list.by_title(data["title"]).id,
title=data["title"],
diff=data["level_index"],
ra=data["ra"],
ds=data["ds"],
comboId=fi,
scoreId=ri,
lv=data["level"],
achievement=data["achievements"],
tp=data["type"]
)
class BestList(object):
def __init__(self, size:int):
self.data = []
self.size = size
def push(self, elem:ChartInfo):
if len(self.data) >= self.size and elem < self.data[-1]:
return
self.data.append(elem)
self.data.sort()
self.data.reverse()
while(len(self.data) > self.size):
del self.data[-1]
def pop(self):
del self.data[-1]
def __str__(self):
return '[\n\t' + ', \n\t'.join([str(ci) for ci in self.data]) + '\n]'
def __len__(self):
return len(self.data)
def __getitem__(self, index):
return self.data[index]
class DrawBest(object):
def __init__(self, sdBest:BestList, dxBest:BestList, userName:str):
self.sdBest = sdBest
self.dxBest = dxBest
self.userName = self._stringQ2B(userName)
self.sdRating = 0
self.dxRating = 0
for sd in sdBest:
self.sdRating += computeRa(sd.ds, sd.achievement)
for dx in dxBest:
self.dxRating += computeRa(dx.ds, dx.achievement)
self.playerRating = self.sdRating + self.dxRating
self.pic_dir = 'src/static/mai/pic/'
self.cover_dir = 'src/static/mai/cover/'
self.img = Image.open(self.pic_dir + 'UI_TTR_BG_Base_Plus.png').convert('RGBA')
self.ROWS_IMG = [2]
for i in range(6):
self.ROWS_IMG.append(116 + 96 * i)
self.COLOUMS_IMG = []
for i in range(8):
self.COLOUMS_IMG.append(2 + 138 * i)
for i in range(4):
self.COLOUMS_IMG.append(988 + 138 * i)
self.draw()
def _Q2B(self, uchar):
"""单个字符 全角转半角"""
inside_code = ord(uchar)
if inside_code == 0x3000:
inside_code = 0x0020
else:
inside_code -= 0xfee0
if inside_code < 0x0020 or inside_code > 0x7e: #转完之后不是半角字符返回原来的字符
return uchar
return chr(inside_code)
def _stringQ2B(self, ustring):
"""把字符串全角转半角"""
return "".join([self._Q2B(uchar) for uchar in ustring])
def _getCharWidth(self, o) -> int:
widths = [
(126, 1), (159, 0), (687, 1), (710, 0), (711, 1), (727, 0), (733, 1), (879, 0), (1154, 1), (1161, 0),
(4347, 1), (4447, 2), (7467, 1), (7521, 0), (8369, 1), (8426, 0), (9000, 1), (9002, 2), (11021, 1),
(12350, 2), (12351, 1), (12438, 2), (12442, 0), (19893, 2), (19967, 1), (55203, 2), (63743, 1),
(64106, 2), (65039, 1), (65059, 0), (65131, 2), (65279, 1), (65376, 2), (65500, 1), (65510, 2),
(120831, 1), (262141, 2), (1114109, 1),
]
if o == 0xe or o == 0xf:
return 0
for num, wid in widths:
if o <= num:
return wid
return 1
def _coloumWidth(self, s:str):
res = 0
for ch in s:
res += self._getCharWidth(ord(ch))
return res
def _changeColumnWidth(self, s:str, len:int) -> str:
res = 0
sList = []
for ch in s:
res += self._getCharWidth(ord(ch))
if res <= len:
sList.append(ch)
return ''.join(sList)
def _resizePic(self, img:Image.Image, time:float):
return img.resize((int(img.size[0] * time), int(img.size[1] * time)))
def _findRaPic(self) -> str:
num = '10'
if self.playerRating < 1000:
num = '01'
elif self.playerRating < 2000:
num = '02'
elif self.playerRating < 4000:
num = '03'
elif self.playerRating < 7000:
num = '04'
elif self.playerRating < 10000:
num = '05'
elif self.playerRating < 12000:
num = '06'
elif self.playerRating < 13000:
num = '07'
elif self.playerRating < 14500:
num = '08'
elif self.playerRating < 15000:
num = '09'
return f'UI_CMN_DXRating_S_{num}.png'
def _drawRating(self, ratingBaseImg:Image.Image):
COLOUMS_RATING = [86, 100, 115, 130, 145]
theRa = self.playerRating
i = 4
while theRa:
digit = theRa % 10
theRa = theRa // 10
digitImg = Image.open(self.pic_dir + f'UI_NUM_Drating_{digit}.png').convert('RGBA')
digitImg = self._resizePic(digitImg, 0.6)
ratingBaseImg.paste(digitImg, (COLOUMS_RATING[i] - 2, 9), mask=digitImg.split()[3])
i = i - 1
return ratingBaseImg
def _drawBestList(self, img:Image.Image, sdBest:BestList, dxBest:BestList):
itemW = 131
itemH = 88
Color = [(69, 193, 36), (255, 186, 1), (255, 90, 102), (134, 49, 200), (217, 197, 233)]
levelTriagle = [(itemW, 0), (itemW - 27, 0), (itemW, 27)]
rankPic = 'D C B BB BBB A AA AAA S Sp SS SSp SSS SSSp'.split(' ')
comboPic = ' FC FCp AP APp'.split(' ')
imgDraw = ImageDraw.Draw(img)
titleFontName = 'src/static/adobe_simhei.otf'
for num in range(0, len(sdBest)):
i = num // 7
j = num % 7
chartInfo = sdBest[num]
pngPath = self.cover_dir + f'{chartInfo.idNum}.jpg'
if not os.path.exists(pngPath):
pngPath = self.cover_dir + '1000.png'
temp = Image.open(pngPath).convert('RGB')
temp = self._resizePic(temp, itemW / temp.size[0])
temp = temp.crop((0, (temp.size[1] - itemH) / 2, itemW, (temp.size[1] + itemH) / 2))
temp = temp.filter(ImageFilter.GaussianBlur(3))
temp = temp.point(lambda p: p * 0.72)
tempDraw = ImageDraw.Draw(temp)
tempDraw.polygon(levelTriagle, Color[chartInfo.diff])
font = ImageFont.truetype(titleFontName, 16, encoding='utf-8')
title = chartInfo.title
if self._coloumWidth(title) > 15:
title = self._changeColumnWidth(title, 12) + '...'
tempDraw.text((8, 8), title, 'white', font)
font = ImageFont.truetype(titleFontName, 12, encoding='utf-8')
tempDraw.text((7, 28), f'{'%.4f' % chartInfo.achievement}%', 'white', font)
rankImg = Image.open(self.pic_dir + f'UI_GAM_Rank_{rankPic[chartInfo.scoreId]}.png').convert('RGBA')
rankImg = self._resizePic(rankImg, 0.3)
temp.paste(rankImg, (72, 28), rankImg.split()[3])
if chartInfo.comboId:
comboImg = Image.open(self.pic_dir + f'UI_MSS_MBase_Icon_{comboPic[chartInfo.comboId]}_S.png').convert('RGBA')
comboImg = self._resizePic(comboImg, 0.45)
temp.paste(comboImg, (103, 27), comboImg.split()[3])
font = ImageFont.truetype('src/static/adobe_simhei.otf', 12, encoding='utf-8')
tempDraw.text((8, 44), f'Base: {chartInfo.ds} -> {computeRa(chartInfo.ds, chartInfo.achievement)}', 'white', font)
font = ImageFont.truetype('src/static/adobe_simhei.otf', 18, encoding='utf-8')
tempDraw.text((8, 60), f'#{num + 1}', 'white', font)
recBase = Image.new('RGBA', (itemW, itemH), 'black')
recBase = recBase.point(lambda p: p * 0.8)
img.paste(recBase, (self.COLOUMS_IMG[j] + 5, self.ROWS_IMG[i + 1] + 5))
img.paste(temp, (self.COLOUMS_IMG[j] + 4, self.ROWS_IMG[i + 1] + 4))
for num in range(len(sdBest), sdBest.size):
i = num // 7
j = num % 7
temp = Image.open(self.cover_dir + f'1000.png').convert('RGB')
temp = self._resizePic(temp, itemW / temp.size[0])
temp = temp.crop((0, (temp.size[1] - itemH) / 2, itemW, (temp.size[1] + itemH) / 2))
temp = temp.filter(ImageFilter.GaussianBlur(1))
img.paste(temp, (self.COLOUMS_IMG[j] + 4, self.ROWS_IMG[i + 1] + 4))
for num in range(0, len(dxBest)):
i = num // 3
j = num % 3
chartInfo = dxBest[num]
pngPath = self.cover_dir + f'{int(chartInfo.idNum)}.jpg'
if not os.path.exists(pngPath):
pngPath = self.cover_dir + f'{int(chartInfo.idNum)}.png'
if not os.path.exists(pngPath):
pngPath = self.cover_dir + '1000.png'
temp = Image.open(pngPath).convert('RGB')
temp = self._resizePic(temp, itemW / temp.size[0])
temp = temp.crop((0, (temp.size[1] - itemH) / 2, itemW, (temp.size[1] + itemH) / 2))
temp = temp.filter(ImageFilter.GaussianBlur(3))
temp = temp.point(lambda p: p * 0.72)
tempDraw = ImageDraw.Draw(temp)
tempDraw.polygon(levelTriagle, Color[chartInfo.diff])
font = ImageFont.truetype(titleFontName, 14, encoding='utf-8')
title = chartInfo.title
if self._coloumWidth(title) > 13:
title = self._changeColumnWidth(title, 12) + '...'
tempDraw.text((8, 8), title, 'white', font)
font = ImageFont.truetype(titleFontName, 12, encoding='utf-8')
tempDraw.text((7, 28), f'{'%.4f' % chartInfo.achievement}%', 'white', font)
rankImg = Image.open(self.pic_dir + f'UI_GAM_Rank_{rankPic[chartInfo.scoreId]}.png').convert('RGBA')
rankImg = self._resizePic(rankImg, 0.3)
temp.paste(rankImg, (72, 28), rankImg.split()[3])
if chartInfo.comboId:
comboImg = Image.open(self.pic_dir + f'UI_MSS_MBase_Icon_{comboPic[chartInfo.comboId]}_S.png').convert(
'RGBA')
comboImg = self._resizePic(comboImg, 0.45)
temp.paste(comboImg, (103, 27), comboImg.split()[3])
font = ImageFont.truetype('src/static/adobe_simhei.otf', 12, encoding='utf-8')
tempDraw.text((8, 44), f'Base: {chartInfo.ds} -> {chartInfo.ra}', 'white', font)
font = ImageFont.truetype('src/static/adobe_simhei.otf', 18, encoding='utf-8')
tempDraw.text((8, 60), f'#{num + 1}', 'white', font)
recBase = Image.new('RGBA', (itemW, itemH), 'black')
recBase = recBase.point(lambda p: p * 0.8)
img.paste(recBase, (self.COLOUMS_IMG[j + 8] + 5, self.ROWS_IMG[i + 1] + 5))
img.paste(temp, (self.COLOUMS_IMG[j + 8] + 4, self.ROWS_IMG[i + 1] + 4))
for num in range(len(dxBest), dxBest.size):
i = num // 3
j = num % 3
temp = Image.open(self.cover_dir + f'1000.png').convert('RGB')
temp = self._resizePic(temp, itemW / temp.size[0])
temp = temp.crop((0, (temp.size[1] - itemH) / 2, itemW, (temp.size[1] + itemH) / 2))
temp = temp.filter(ImageFilter.GaussianBlur(1))
img.paste(temp, (self.COLOUMS_IMG[j + 8] + 4, self.ROWS_IMG[i + 1] + 4))
def draw(self):
splashLogo = Image.open(self.pic_dir + 'UI_CMN_TabTitle_MaimaiTitle_Ver214.png').convert('RGBA')
splashLogo = self._resizePic(splashLogo, 0.65)
self.img.paste(splashLogo, (10, 10), mask=splashLogo.split()[3])
ratingBaseImg = Image.open(self.pic_dir + self._findRaPic()).convert('RGBA')
ratingBaseImg = self._drawRating(ratingBaseImg)
ratingBaseImg = self._resizePic(ratingBaseImg, 0.85)
self.img.paste(ratingBaseImg, (240, 8), mask=ratingBaseImg.split()[3])
namePlateImg = Image.open(self.pic_dir + 'UI_TST_PlateMask.png').convert('RGBA')
namePlateImg = namePlateImg.resize((285, 40))
namePlateDraw = ImageDraw.Draw(namePlateImg)
font1 = ImageFont.truetype('src/static/msyh.ttc', 28, encoding='unic')
namePlateDraw.text((12, 4), ' '.join(list(self.userName)), 'black', font1)
nameDxImg = Image.open(self.pic_dir + 'UI_CMN_Name_DX.png').convert('RGBA')
nameDxImg = self._resizePic(nameDxImg, 0.9)
namePlateImg.paste(nameDxImg, (230, 4), mask=nameDxImg.split()[3])
self.img.paste(namePlateImg, (240, 40), mask=namePlateImg.split()[3])
shougouImg = Image.open(self.pic_dir + 'UI_CMN_Shougou_Rainbow.png').convert('RGBA')
shougouDraw = ImageDraw.Draw(shougouImg)
font2 = ImageFont.truetype('src/static/adobe_simhei.otf', 14, encoding='utf-8')
playCountInfo = f'SD: {self.sdRating} + DX: {self.dxRating} = {self.playerRating}'
shougouImgW, shougouImgH = shougouImg.size
playCountInfoW, playCountInfoH = shougouDraw.textsize(playCountInfo, font2)
textPos = ((shougouImgW - playCountInfoW - font2.getoffset(playCountInfo)[0]) / 2, 5)
shougouDraw.text((textPos[0] - 1, textPos[1]), playCountInfo, 'black', font2)
shougouDraw.text((textPos[0] + 1, textPos[1]), playCountInfo, 'black', font2)
shougouDraw.text((textPos[0], textPos[1] - 1), playCountInfo, 'black', font2)
shougouDraw.text((textPos[0], textPos[1] + 1), playCountInfo, 'black', font2)
shougouDraw.text((textPos[0] - 1, textPos[1] - 1), playCountInfo, 'black', font2)
shougouDraw.text((textPos[0] + 1, textPos[1] - 1), playCountInfo, 'black', font2)
shougouDraw.text((textPos[0] - 1, textPos[1] + 1), playCountInfo, 'black', font2)
shougouDraw.text((textPos[0] + 1, textPos[1] + 1), playCountInfo, 'black', font2)
shougouDraw.text(textPos, playCountInfo, 'white', font2)
shougouImg = self._resizePic(shougouImg, 1.05)
self.img.paste(shougouImg, (240, 83), mask=shougouImg.split()[3])
self._drawBestList(self.img, self.sdBest, self.dxBest)
authorBoardImg = Image.open(self.pic_dir + 'UI_CMN_MiniDialog_01.png').convert('RGBA')
authorBoardImg = self._resizePic(authorBoardImg, 0.35)
authorBoardDraw = ImageDraw.Draw(authorBoardImg)
authorBoardDraw.text((31, 28), ' Generated By\nXybBot & Chiyuki', 'black', font2)
self.img.paste(authorBoardImg, (1224, 19), mask=authorBoardImg.split()[3])
dxImg = Image.open(self.pic_dir + 'UI_RSL_MBase_Parts_01.png').convert('RGBA')
self.img.paste(dxImg, (988, 65), mask=dxImg.split()[3])
sdImg = Image.open(self.pic_dir + 'UI_RSL_MBase_Parts_02.png').convert('RGBA')
self.img.paste(sdImg, (865, 65), mask=sdImg.split()[3])
# self.img.show()
def getDir(self):
return self.img
def computeRa(ds: float, achievement: float) -> int:
baseRa = 22.4
if achievement < 50:
baseRa = 7.0
elif achievement < 60:
baseRa = 8.0
elif achievement < 70:
baseRa = 9.6
elif achievement < 75:
baseRa = 11.2
elif achievement < 80:
baseRa = 12.0
elif achievement < 90:
baseRa = 13.6
elif achievement < 94:
baseRa = 15.2
elif achievement < 97:
baseRa = 16.8
elif achievement < 98:
baseRa = 20.0
elif achievement < 99:
baseRa = 20.3
elif achievement < 99.5:
baseRa = 20.8
elif achievement < 100:
baseRa = 21.1
elif achievement < 100.5:
baseRa = 21.6
return math.floor(ds * (min(100.5, achievement) / 100) * baseRa)
async def generate50(payload: Dict) -> Tuple[Optional[Image.Image], bool]:
async with aiohttp.request("POST", "https://www.diving-fish.com/api/maimaidxprober/query/player", json=payload) as resp:
if resp.status == 400:
return None, 400
if resp.status == 403:
return None, 403
sd_best = BestList(35)
dx_best = BestList(15)
obj = await resp.json()
dx: List[Dict] = obj["charts"]["dx"]
sd: List[Dict] = obj["charts"]["sd"]
for c in sd:
sd_best.push(ChartInfo.from_json(c))
for c in dx:
dx_best.push(ChartInfo.from_json(c))
pic = DrawBest(sd_best, dx_best, obj["nickname"]).getDir()
return pic, 0
| import asyncio
import os
import math
from typing import Optional, Dict, List, Tuple
import aiohttp
from PIL import Image, ImageDraw, ImageFont, ImageFilter
from src.libraries.maimaidx_music import total_list
scoreRank = 'D C B BB BBB A AA AAA S S+ SS SS+ SSS SSS+'.split(' ')
combo = ' FC FC+ AP AP+'.split(' ')
diffs = 'Basic Advanced Expert Master Re:Master'.split(' ')
class ChartInfo(object):
def __init__(self, idNum:str, diff:int, tp:str, achievement:float, ra:int, comboId:int, scoreId:int,
title:str, ds:float, lv:str):
self.idNum = idNum
self.diff = diff
self.tp = tp
self.achievement = achievement
self.ra = computeRa(ds,achievement)
self.comboId = comboId
self.scoreId = scoreId
self.title = title
self.ds = ds
self.lv = lv
def __str__(self):
return '%-50s' % f'{self.title} [{self.tp}]' + f'{self.ds}\t{diffs[self.diff]}\t{self.ra}'
def __eq__(self, other):
return self.ra == other.ra
def __lt__(self, other):
return self.ra < other.ra
@classmethod
def from_json(cls, data):
rate = ['d', 'c', 'b', 'bb', 'bbb', 'a', 'aa', 'aaa', 's', 'sp', 'ss', 'ssp', 'sss', 'sssp']
ri = rate.index(data["rate"])
fc = ['', 'fc', 'fcp', 'ap', 'app']
fi = fc.index(data["fc"])
return cls(
idNum=total_list.by_title(data["title"]).id,
title=data["title"],
diff=data["level_index"],
ra=data["ra"],
ds=data["ds"],
comboId=fi,
scoreId=ri,
lv=data["level"],
achievement=data["achievements"],
tp=data["type"]
)
class BestList(object):
def __init__(self, size:int):
self.data = []
self.size = size
def push(self, elem:ChartInfo):
if len(self.data) >= self.size and elem < self.data[-1]:
return
self.data.append(elem)
self.data.sort()
self.data.reverse()
while(len(self.data) > self.size):
del self.data[-1]
def pop(self):
del self.data[-1]
def __str__(self):
return '[\n\t' + ', \n\t'.join([str(ci) for ci in self.data]) + '\n]'
def __len__(self):
return len(self.data)
def __getitem__(self, index):
return self.data[index]
class DrawBest(object):
def __init__(self, sdBest:BestList, dxBest:BestList, userName:str):
self.sdBest = sdBest
self.dxBest = dxBest
self.userName = self._stringQ2B(userName)
self.sdRating = 0
self.dxRating = 0
for sd in sdBest:
self.sdRating += computeRa(sd.ds, sd.achievement)
for dx in dxBest:
self.dxRating += computeRa(dx.ds, dx.achievement)
self.playerRating = self.sdRating + self.dxRating
self.pic_dir = 'src/static/mai/pic/'
self.cover_dir = 'src/static/mai/cover/'
self.img = Image.open(self.pic_dir + 'UI_TTR_BG_Base_Plus.png').convert('RGBA')
self.ROWS_IMG = [2]
for i in range(6):
self.ROWS_IMG.append(116 + 96 * i)
self.COLOUMS_IMG = []
for i in range(8):
self.COLOUMS_IMG.append(2 + 138 * i)
for i in range(4):
self.COLOUMS_IMG.append(988 + 138 * i)
self.draw()
def _Q2B(self, uchar):
"""单个字符 全角转半角"""
inside_code = ord(uchar)
if inside_code == 0x3000:
inside_code = 0x0020
else:
inside_code -= 0xfee0
if inside_code < 0x0020 or inside_code > 0x7e: #转完之后不是半角字符返回原来的字符
return uchar
return chr(inside_code)
def _stringQ2B(self, ustring):
"""把字符串全角转半角"""
return "".join([self._Q2B(uchar) for uchar in ustring])
def _getCharWidth(self, o) -> int:
widths = [
(126, 1), (159, 0), (687, 1), (710, 0), (711, 1), (727, 0), (733, 1), (879, 0), (1154, 1), (1161, 0),
(4347, 1), (4447, 2), (7467, 1), (7521, 0), (8369, 1), (8426, 0), (9000, 1), (9002, 2), (11021, 1),
(12350, 2), (12351, 1), (12438, 2), (12442, 0), (19893, 2), (19967, 1), (55203, 2), (63743, 1),
(64106, 2), (65039, 1), (65059, 0), (65131, 2), (65279, 1), (65376, 2), (65500, 1), (65510, 2),
(120831, 1), (262141, 2), (1114109, 1),
]
if o == 0xe or o == 0xf:
return 0
for num, wid in widths:
if o <= num:
return wid
return 1
def _coloumWidth(self, s:str):
res = 0
for ch in s:
res += self._getCharWidth(ord(ch))
return res
def _changeColumnWidth(self, s:str, len:int) -> str:
res = 0
sList = []
for ch in s:
res += self._getCharWidth(ord(ch))
if res <= len:
sList.append(ch)
return ''.join(sList)
def _resizePic(self, img:Image.Image, time:float):
return img.resize((int(img.size[0] * time), int(img.size[1] * time)))
def _findRaPic(self) -> str:
num = '10'
if self.playerRating < 1000:
num = '01'
elif self.playerRating < 2000:
num = '02'
elif self.playerRating < 4000:
num = '03'
elif self.playerRating < 7000:
num = '04'
elif self.playerRating < 10000:
num = '05'
elif self.playerRating < 12000:
num = '06'
elif self.playerRating < 13000:
num = '07'
elif self.playerRating < 14500:
num = '08'
elif self.playerRating < 15000:
num = '09'
return f'UI_CMN_DXRating_S_{num}.png'
def _drawRating(self, ratingBaseImg:Image.Image):
COLOUMS_RATING = [86, 100, 115, 130, 145]
theRa = self.playerRating
i = 4
while theRa:
digit = theRa % 10
theRa = theRa // 10
digitImg = Image.open(self.pic_dir + f'UI_NUM_Drating_{digit}.png').convert('RGBA')
digitImg = self._resizePic(digitImg, 0.6)
ratingBaseImg.paste(digitImg, (COLOUMS_RATING[i] - 2, 9), mask=digitImg.split()[3])
i = i - 1
return ratingBaseImg
def _drawBestList(self, img:Image.Image, sdBest:BestList, dxBest:BestList):
itemW = 131
itemH = 88
Color = [(69, 193, 36), (255, 186, 1), (255, 90, 102), (134, 49, 200), (217, 197, 233)]
levelTriagle = [(itemW, 0), (itemW - 27, 0), (itemW, 27)]
rankPic = 'D C B BB BBB A AA AAA S Sp SS SSp SSS SSSp'.split(' ')
comboPic = ' FC FCp AP APp'.split(' ')
imgDraw = ImageDraw.Draw(img)
titleFontName = 'src/static/adobe_simhei.otf'
for num in range(0, len(sdBest)):
i = num // 7
j = num % 7
chartInfo = sdBest[num]
pngPath = self.cover_dir + f'{chartInfo.idNum}.jpg'
if not os.path.exists(pngPath):
pngPath = self.cover_dir + '1000.png'
temp = Image.open(pngPath).convert('RGB')
temp = self._resizePic(temp, itemW / temp.size[0])
temp = temp.crop((0, (temp.size[1] - itemH) / 2, itemW, (temp.size[1] + itemH) / 2))
temp = temp.filter(ImageFilter.GaussianBlur(3))
temp = temp.point(lambda p: p * 0.72)
tempDraw = ImageDraw.Draw(temp)
tempDraw.polygon(levelTriagle, Color[chartInfo.diff])
font = ImageFont.truetype(titleFontName, 16, encoding='utf-8')
title = chartInfo.title
if self._coloumWidth(title) > 15:
title = self._changeColumnWidth(title, 12) + '...'
tempDraw.text((8, 8), title, 'white', font)
font = ImageFont.truetype(titleFontName, 12, encoding='utf-8')
tempDraw.text((7, 28), f'{"%.4f" % chartInfo.achievement}%', 'white', font)
rankImg = Image.open(self.pic_dir + f'UI_GAM_Rank_{rankPic[chartInfo.scoreId]}.png').convert('RGBA')
rankImg = self._resizePic(rankImg, 0.3)
temp.paste(rankImg, (72, 28), rankImg.split()[3])
if chartInfo.comboId:
comboImg = Image.open(self.pic_dir + f'UI_MSS_MBase_Icon_{comboPic[chartInfo.comboId]}_S.png').convert('RGBA')
comboImg = self._resizePic(comboImg, 0.45)
temp.paste(comboImg, (103, 27), comboImg.split()[3])
font = ImageFont.truetype('src/static/adobe_simhei.otf', 12, encoding='utf-8')
tempDraw.text((8, 44), f'Base: {chartInfo.ds} -> {computeRa(chartInfo.ds, chartInfo.achievement)}', 'white', font)
font = ImageFont.truetype('src/static/adobe_simhei.otf', 18, encoding='utf-8')
tempDraw.text((8, 60), f'#{num + 1}', 'white', font)
recBase = Image.new('RGBA', (itemW, itemH), 'black')
recBase = recBase.point(lambda p: p * 0.8)
img.paste(recBase, (self.COLOUMS_IMG[j] + 5, self.ROWS_IMG[i + 1] + 5))
img.paste(temp, (self.COLOUMS_IMG[j] + 4, self.ROWS_IMG[i + 1] + 4))
for num in range(len(sdBest), sdBest.size):
i = num // 7
j = num % 7
temp = Image.open(self.cover_dir + f'1000.png').convert('RGB')
temp = self._resizePic(temp, itemW / temp.size[0])
temp = temp.crop((0, (temp.size[1] - itemH) / 2, itemW, (temp.size[1] + itemH) / 2))
temp = temp.filter(ImageFilter.GaussianBlur(1))
img.paste(temp, (self.COLOUMS_IMG[j] + 4, self.ROWS_IMG[i + 1] + 4))
for num in range(0, len(dxBest)):
i = num // 3
j = num % 3
chartInfo = dxBest[num]
pngPath = self.cover_dir + f'{int(chartInfo.idNum)}.jpg'
if not os.path.exists(pngPath):
pngPath = self.cover_dir + f'{int(chartInfo.idNum)}.png'
if not os.path.exists(pngPath):
pngPath = self.cover_dir + '1000.png'
temp = Image.open(pngPath).convert('RGB')
temp = self._resizePic(temp, itemW / temp.size[0])
temp = temp.crop((0, (temp.size[1] - itemH) / 2, itemW, (temp.size[1] + itemH) / 2))
temp = temp.filter(ImageFilter.GaussianBlur(3))
temp = temp.point(lambda p: p * 0.72)
tempDraw = ImageDraw.Draw(temp)
tempDraw.polygon(levelTriagle, Color[chartInfo.diff])
font = ImageFont.truetype(titleFontName, 14, encoding='utf-8')
title = chartInfo.title
if self._coloumWidth(title) > 13:
title = self._changeColumnWidth(title, 12) + '...'
tempDraw.text((8, 8), title, 'white', font)
font = ImageFont.truetype(titleFontName, 12, encoding='utf-8')
tempDraw.text((7, 28), f'{"%.4f" % chartInfo.achievement}%', 'white', font)
rankImg = Image.open(self.pic_dir + f'UI_GAM_Rank_{rankPic[chartInfo.scoreId]}.png').convert('RGBA')
rankImg = self._resizePic(rankImg, 0.3)
temp.paste(rankImg, (72, 28), rankImg.split()[3])
if chartInfo.comboId:
comboImg = Image.open(self.pic_dir + f'UI_MSS_MBase_Icon_{comboPic[chartInfo.comboId]}_S.png').convert(
'RGBA')
comboImg = self._resizePic(comboImg, 0.45)
temp.paste(comboImg, (103, 27), comboImg.split()[3])
font = ImageFont.truetype('src/static/adobe_simhei.otf', 12, encoding='utf-8')
tempDraw.text((8, 44), f'Base: {chartInfo.ds} -> {chartInfo.ra}', 'white', font)
font = ImageFont.truetype('src/static/adobe_simhei.otf', 18, encoding='utf-8')
tempDraw.text((8, 60), f'#{num + 1}', 'white', font)
recBase = Image.new('RGBA', (itemW, itemH), 'black')
recBase = recBase.point(lambda p: p * 0.8)
img.paste(recBase, (self.COLOUMS_IMG[j + 8] + 5, self.ROWS_IMG[i + 1] + 5))
img.paste(temp, (self.COLOUMS_IMG[j + 8] + 4, self.ROWS_IMG[i + 1] + 4))
for num in range(len(dxBest), dxBest.size):
i = num // 3
j = num % 3
temp = Image.open(self.cover_dir + f'1000.png').convert('RGB')
temp = self._resizePic(temp, itemW / temp.size[0])
temp = temp.crop((0, (temp.size[1] - itemH) / 2, itemW, (temp.size[1] + itemH) / 2))
temp = temp.filter(ImageFilter.GaussianBlur(1))
img.paste(temp, (self.COLOUMS_IMG[j + 8] + 4, self.ROWS_IMG[i + 1] + 4))
def draw(self):
splashLogo = Image.open(self.pic_dir + 'UI_CMN_TabTitle_MaimaiTitle_Ver214.png').convert('RGBA')
splashLogo = self._resizePic(splashLogo, 0.65)
self.img.paste(splashLogo, (10, 10), mask=splashLogo.split()[3])
ratingBaseImg = Image.open(self.pic_dir + self._findRaPic()).convert('RGBA')
ratingBaseImg = self._drawRating(ratingBaseImg)
ratingBaseImg = self._resizePic(ratingBaseImg, 0.85)
self.img.paste(ratingBaseImg, (240, 8), mask=ratingBaseImg.split()[3])
namePlateImg = Image.open(self.pic_dir + 'UI_TST_PlateMask.png').convert('RGBA')
namePlateImg = namePlateImg.resize((285, 40))
namePlateDraw = ImageDraw.Draw(namePlateImg)
font1 = ImageFont.truetype('src/static/msyh.ttc', 28, encoding='unic')
namePlateDraw.text((12, 4), ' '.join(list(self.userName)), 'black', font1)
nameDxImg = Image.open(self.pic_dir + 'UI_CMN_Name_DX.png').convert('RGBA')
nameDxImg = self._resizePic(nameDxImg, 0.9)
namePlateImg.paste(nameDxImg, (230, 4), mask=nameDxImg.split()[3])
self.img.paste(namePlateImg, (240, 40), mask=namePlateImg.split()[3])
shougouImg = Image.open(self.pic_dir + 'UI_CMN_Shougou_Rainbow.png').convert('RGBA')
shougouDraw = ImageDraw.Draw(shougouImg)
font2 = ImageFont.truetype('src/static/adobe_simhei.otf', 14, encoding='utf-8')
playCountInfo = f'SD: {self.sdRating} + DX: {self.dxRating} = {self.playerRating}'
shougouImgW, shougouImgH = shougouImg.size
playCountInfoW, playCountInfoH = shougouDraw.textsize(playCountInfo, font2)
textPos = ((shougouImgW - playCountInfoW - font2.getoffset(playCountInfo)[0]) / 2, 5)
shougouDraw.text((textPos[0] - 1, textPos[1]), playCountInfo, 'black', font2)
shougouDraw.text((textPos[0] + 1, textPos[1]), playCountInfo, 'black', font2)
shougouDraw.text((textPos[0], textPos[1] - 1), playCountInfo, 'black', font2)
shougouDraw.text((textPos[0], textPos[1] + 1), playCountInfo, 'black', font2)
shougouDraw.text((textPos[0] - 1, textPos[1] - 1), playCountInfo, 'black', font2)
shougouDraw.text((textPos[0] + 1, textPos[1] - 1), playCountInfo, 'black', font2)
shougouDraw.text((textPos[0] - 1, textPos[1] + 1), playCountInfo, 'black', font2)
shougouDraw.text((textPos[0] + 1, textPos[1] + 1), playCountInfo, 'black', font2)
shougouDraw.text(textPos, playCountInfo, 'white', font2)
shougouImg = self._resizePic(shougouImg, 1.05)
self.img.paste(shougouImg, (240, 83), mask=shougouImg.split()[3])
self._drawBestList(self.img, self.sdBest, self.dxBest)
authorBoardImg = Image.open(self.pic_dir + 'UI_CMN_MiniDialog_01.png').convert('RGBA')
authorBoardImg = self._resizePic(authorBoardImg, 0.35)
authorBoardDraw = ImageDraw.Draw(authorBoardImg)
authorBoardDraw.text((31, 28), ' Generated By\nXybBot & Chiyuki', 'black', font2)
self.img.paste(authorBoardImg, (1224, 19), mask=authorBoardImg.split()[3])
dxImg = Image.open(self.pic_dir + 'UI_RSL_MBase_Parts_01.png').convert('RGBA')
self.img.paste(dxImg, (988, 65), mask=dxImg.split()[3])
sdImg = Image.open(self.pic_dir + 'UI_RSL_MBase_Parts_02.png').convert('RGBA')
self.img.paste(sdImg, (865, 65), mask=sdImg.split()[3])
# self.img.show()
def getDir(self):
return self.img
def computeRa(ds: float, achievement: float) -> int:
baseRa = 22.4
if achievement < 50:
baseRa = 7.0
elif achievement < 60:
baseRa = 8.0
elif achievement < 70:
baseRa = 9.6
elif achievement < 75:
baseRa = 11.2
elif achievement < 80:
baseRa = 12.0
elif achievement < 90:
baseRa = 13.6
elif achievement < 94:
baseRa = 15.2
elif achievement < 97:
baseRa = 16.8
elif achievement < 98:
baseRa = 20.0
elif achievement < 99:
baseRa = 20.3
elif achievement < 99.5:
baseRa = 20.8
elif achievement < 100:
baseRa = 21.1
elif achievement < 100.5:
baseRa = 21.6
return math.floor(ds * (min(100.5, achievement) / 100) * baseRa)
async def generate50(payload: Dict) -> Tuple[Optional[Image.Image], bool]:
async with aiohttp.request("POST", "https://www.diving-fish.com/api/maimaidxprober/query/player", json=payload) as resp:
if resp.status == 400:
return None, 400
if resp.status == 403:
return None, 403
sd_best = BestList(35)
dx_best = BestList(15)
obj = await resp.json()
dx: List[Dict] = obj["charts"]["dx"]
sd: List[Dict] = obj["charts"]["sd"]
for c in sd:
sd_best.push(ChartInfo.from_json(c))
for c in dx:
dx_best.push(ChartInfo.from_json(c))
pic = DrawBest(sd_best, dx_best, obj["nickname"]).getDir()
return pic, 0
|
import pytest
from mock import MagicMock
from zmon_worker_monitor.zmon_worker.common.http import get_user_agent
from zmon_worker_monitor.zmon_worker.notifications.slack import NotifySlack, NotificationError
URL = 'http://slack-webhook'
HEADERS = {
'User-agent': get_user_agent(),
'Content-type': 'application/json',
}
def test_slack_notification(monkeypatch):
post = MagicMock()
monkeypatch.setattr('requests.post', post)
alert = {'changed': True, 'is_alert': True, 'alert_def': {'id': 123, 'name': 'alert'}, 'entity': {'id': 'e-1'}}
NotifySlack._config = {'notifications.slack.webhook': URL}
r = NotifySlack.notify(alert, message='ALERT')
data = {
'username': 'ZMON',
'channel': '#general',
'text': 'ALERT',
'icon_emoji': ':bar_chart:',
}
assert r == 0
post.assert_called_with(URL, json=data, headers=HEADERS, timeout=5)
def test_slack_notification_url_error(monkeypatch):
alert = {'changed': True, 'is_alert': True, 'alert_def': {'id': 123, 'name': 'alert'}, 'entity': {'id': 'e-1'}}
NotifySlack._config = {}
with pytest.raises(NotificationError):
NotifySlack.notify(alert, message='ALERT')
def test_slack_notification_error(monkeypatch):
post = MagicMock()
post.side_effect = Exception
monkeypatch.setattr('requests.post', post)
alert = {'changed': True, 'is_alert': True, 'alert_def': {'id': 123, 'name': 'alert'}, 'entity': {'id': 'e-1'}}
NotifySlack._config = {'notifications.slack.webhook': URL}
r = NotifySlack.notify(alert, message='ALERT')
assert r == 0
| import pytest
from mock import MagicMock
from zmon_worker_monitor.zmon_worker.common.http import get_user_agent
from zmon_worker_monitor.zmon_worker.notifications.slack import NotifySlack, NotificationError
URL = 'http://slack-webhook'
HEADERS = {
'User-agent': get_user_agent(),
'Content-type': 'application/json',
}
def test_slack_notification(monkeypatch):
post = MagicMock()
monkeypatch.setattr('requests.post', post)
alert = {'changed': True, 'is_alert': True, 'alert_def': {'id': 123, 'name': 'alert'}, 'entity': {'id': 'e-1'}}
NotifySlack._config = {'notifications.slack.webhook': URL}
r = NotifySlack.notify(alert, message='ALERT')
data = {
'username': 'ZMON',
'channel': '#general',
'text': 'ALERT',
'icon_emoji': ':bar_chart:',
}
assert r == 0
post.assert_called_with(URL, json=data, headers=HEADERS, timeout=5)
def test_slack_notification_url_error(monkeypatch):
alert = {'changed': True, 'is_alert': True, 'alert_def': {'id': 123, 'name': 'alert'}, 'entity': {'id': 'e-1'}}
NotifySlack._config = {}
with pytest.raises(NotificationError):
NotifySlack.notify(alert, message='ALERT')
def test_slack_notification_error(monkeypatch):
post = MagicMock()
post.side_effect = Exception
monkeypatch.setattr('requests.post', post)
alert = {'changed': True, 'is_alert': True, 'alert_def': {'id': 123, 'name': 'alert'}, 'entity': {'id': 'e-1'}}
NotifySlack._config = {'notifications.slack.webhook': URL}
r = NotifySlack.notify(alert, message='ALERT')
assert r == 0
|
import logging
import tkinter
from tkinter import ttk, font as font, messagebox, _setit
from typing import Union, List, Tuple, Dict, Mapping
import mouse
import lifxlan
import win32api
from lifxlan import (
ORANGE,
YELLOW,
GREEN,
CYAN,
BLUE,
PURPLE,
PINK,
WHITE,
COLD_WHITE,
WARM_WHITE,
GOLD,
)
from lifx_control_panel import RED, FRAME_PERIOD_MS
from lifx_control_panel.ui.colorscale import ColorScale
from lifx_control_panel.ui.settings import config
from lifx_control_panel.utilities import color_thread
from lifx_control_panel.utilities.color_thread import getScreenAsImage, normalizeRects
from lifx_control_panel.utilities.utils import (
Color,
tuple2hex,
HSBKtoRGB,
hueToRGB,
kelvinToRGB,
get_primary_monitor,
str2list,
str2tuple,
getDisplayRects,
)
MAX_KELVIN_DEFAULT = 9000
MIN_KELVIN_DEFAULT = 1500
class LightFrame(ttk.Labelframe): # pylint: disable=too-many-ancestors
""" Holds control and state information about a single device. """
label: str
target: Union[lifxlan.Group, lifxlan.Device]
###
screen_region_lf: ttk.LabelFrame
screen_region_entries: Dict[str, tkinter.Entry]
avg_screen_btn: tkinter.Button
dominant_screen_btn: tkinter.Button
music_button: tkinter.Button
preset_colors_lf: ttk.LabelFrame
color_var: tkinter.StringVar
default_colors: Mapping[str, Color]
preset_dropdown: tkinter.OptionMenu
uservar: tkinter.StringVar
user_dropdown: tkinter.OptionMenu
current_color: tkinter.Canvas
hsbk: Tuple[tkinter.IntVar, tkinter.IntVar, tkinter.IntVar, tkinter.IntVar]
hsbk_labels: Tuple[tkinter.Label, tkinter.Label, tkinter.Label, tkinter.Label]
hsbk_scale: Tuple[ColorScale, ColorScale, ColorScale, ColorScale]
hsbk_display: Tuple[tkinter.Canvas, tkinter.Canvas, tkinter.Canvas, tkinter.Canvas]
threads: Dict[str, color_thread.ColorThreadRunner]
powervar: tkinter.BooleanVar
option_on: tkinter.Radiobutton
option_off: tkinter.Radiobutton
logger: logging.Logger
min_kelvin: int = MIN_KELVIN_DEFAULT
max_kelvin: int = MAX_KELVIN_DEFAULT
def __init__(self, master, target: Union[lifxlan.Group, lifxlan.Light]):
super().__init__(
master,
padding="3 3 12 12",
labelwidget=tkinter.Label(
master,
text="<LABEL_ERR>",
font=font.Font(size=12),
fg="#0046d5",
relief=tkinter.RIDGE,
),
)
self.icon_update_flag: bool = True
# Initialize LightFrames
bulb_power, init_color = self.get_light_info(target)
# Reconfigure label with correct name
self.configure(
labelwidget=tkinter.Label(
master,
text=self.label,
font=font.Font(size=12),
fg="#0046d5",
relief=tkinter.RIDGE,
)
)
self.grid(column=1, row=0, sticky=(tkinter.N, tkinter.W, tkinter.E, tkinter.S))
self.columnconfigure(0, weight=1)
self.rowconfigure(0, weight=1)
self.target = target
# Setup logger
self.setup_logger()
# Initialize vars to hold on/off state
self.setup_power_controls(bulb_power)
# Initialize vars to hold and display bulb color
self.setup_color_controls(init_color)
# Add buttons for pre-made colors
self.setup_color_dropdowns()
# Add buttons for special routines
self.special_functions_lf = ttk.LabelFrame(
self, text="Special Functions", padding="3 3 12 12"
)
####
self.setup_special_functions()
####
# Add custom screen region (real ugly)
self.setup_screen_region_select()
# Start update loop
self.update_status_from_bulb()
def get_light_info(
self, target: Union[lifxlan.Group, lifxlan.Light, lifxlan.MultiZoneLight]
) -> Tuple[int, Color]:
bulb_power: int = 0
init_color: Color = Color(*lifxlan.WARM_WHITE)
try:
self.label = target.get_label()
bulb_power = target.get_power()
if target.supports_multizone():
init_color = Color(*target.get_color_zones()[0])
else:
init_color = Color(*target.get_color())
self.min_kelvin = (
target.product_features.get("min_kelvin") or MIN_KELVIN_DEFAULT
)
self.max_kelvin = (
target.product_features.get("max_kelvin") or MAX_KELVIN_DEFAULT
)
except lifxlan.WorkflowException as exc:
messagebox.showerror(
"Error building {}".format(self.__class__.__name__),
"Error thrown when trying to get label from bulb:\n{}".format(exc),
)
self.master.on_closing()
# TODO Let this fail safely and try again later
return bulb_power, init_color
def setup_screen_region_select(self):
self.screen_region_lf = ttk.LabelFrame(
self, text="Screen Avg. Region", padding="3 3 12 12"
)
self.screen_region_entries = {
"left": tkinter.Entry(self.screen_region_lf, width=6),
"width": tkinter.Entry(self.screen_region_lf, width=6),
"top": tkinter.Entry(self.screen_region_lf, width=6),
"height": tkinter.Entry(self.screen_region_lf, width=6),
}
region = config["AverageColor"][
self.label
if self.label in config["AverageColor"].keys()
else "defaultmonitor"
]
if region == "full":
region = ["full"] * 4
elif region[:19] == "get_primary_monitor":
region = get_primary_monitor()
else:
region = str2list(region, int)
self.screen_region_entries["left"].insert(tkinter.END, region[0])
self.screen_region_entries["top"].insert(tkinter.END, region[1])
self.screen_region_entries["width"].insert(tkinter.END, region[2])
self.screen_region_entries["height"].insert(tkinter.END, region[3])
tkinter.Label(self.screen_region_lf, text="left").grid(
row=7, column=0, sticky="e"
)
self.screen_region_entries["left"].grid(row=7, column=1, padx=(0, 10))
tkinter.Label(self.screen_region_lf, text="width").grid(row=7, column=2)
self.screen_region_entries["width"].grid(row=7, column=3)
tkinter.Label(self.screen_region_lf, text="top").grid(
row=8, column=0, sticky="e"
)
self.screen_region_entries["top"].grid(row=8, column=1, padx=(0, 10))
tkinter.Label(self.screen_region_lf, text="height").grid(row=8, column=2)
self.screen_region_entries["height"].grid(row=8, column=3)
tkinter.Button(
self.screen_region_lf, text="Save", command=self.save_monitor_bounds
).grid(row=9, column=1, sticky="w")
self.screen_region_lf.grid(row=7, columnspan=4)
def setup_special_functions(self):
# Screen Avg.
self.threads["screen"] = color_thread.ColorThreadRunner(
self.target,
color_thread.avg_screen_color,
self,
func_bounds=self.get_monitor_bounds,
)
def start_screen_avg():
""" Allow the screen avg. to be run in a separate thread. Also turns button green while running. """
self.avg_screen_btn.config(bg="Green")
self.threads["screen"].start()
self.avg_screen_btn = tkinter.Button(
self.special_functions_lf,
text="Avg. Screen Color",
command=start_screen_avg,
)
self.avg_screen_btn.grid(row=6, column=0)
tkinter.Button(
self.special_functions_lf,
text="Pick Color",
command=self.get_color_from_palette,
).grid(row=6, column=1)
# Screen Dominant
self.threads["dominant"] = color_thread.ColorThreadRunner(
self.target,
color_thread.dominant_screen_color,
self,
func_bounds=self.get_monitor_bounds,
)
def start_screen_dominant():
self.dominant_screen_btn.config(bg="Green")
self.threads["dominant"].start()
self.dominant_screen_btn = tkinter.Button(
self.special_functions_lf,
text="Dominant Screen Color",
command=start_screen_dominant,
)
self.dominant_screen_btn.grid(row=7, column=0)
# Audio
self.threads["audio"] = color_thread.ColorThreadRunner(
self.target, self.master.audio_interface.get_music_color, self
)
def start_audio():
""" Allow the audio to be run in a separate thread. Also turns button green while running. """
self.music_button.config(bg="Green")
self.threads["audio"].start()
self.music_button = tkinter.Button(
self.special_functions_lf,
text="Music Color",
command=start_audio,
state="normal" if self.master.audio_interface.initialized else "disabled",
)
self.music_button.grid(row=8, column=0)
self.threads["eyedropper"] = color_thread.ColorThreadRunner(
self.target, self.eyedropper, self, continuous=False
)
tkinter.Button(
self.special_functions_lf,
text="Color Eyedropper",
command=self.threads["eyedropper"].start,
).grid(row=7, column=1)
tkinter.Button(
self.special_functions_lf, text="Stop effects", command=self.stop_threads
).grid(row=8, column=1)
self.special_functions_lf.grid(row=6, columnspan=4)
def setup_color_dropdowns(self):
self.preset_colors_lf = ttk.LabelFrame(
self, text="Preset Colors", padding="3 3 12 12"
)
self.color_var = tkinter.StringVar(self, value="Presets")
self.default_colors = {
"RED": RED,
"ORANGE": ORANGE,
"YELLOW": YELLOW,
"GREEN": GREEN,
"CYAN": CYAN,
"BLUE": BLUE,
"PURPLE": PURPLE,
"PINK": PINK,
"WHITE": WHITE,
"COLD_WHITE": COLD_WHITE,
"WARM_WHITE": WARM_WHITE,
"GOLD": GOLD,
}
self.preset_dropdown = tkinter.OptionMenu(
self.preset_colors_lf, self.color_var, *self.default_colors
)
self.preset_dropdown.grid(row=0, column=0)
self.preset_dropdown.configure(width=13)
self.color_var.trace("w", self.change_preset_dropdown)
self.uservar = tkinter.StringVar(self, value="User Presets")
self.user_dropdown = tkinter.OptionMenu(
self.preset_colors_lf,
self.uservar,
*(
[*config["PresetColors"].keys()]
if any(config["PresetColors"].keys())
else [None]
),
)
self.user_dropdown.grid(row=0, column=1)
self.user_dropdown.config(width=13)
self.uservar.trace("w", self.change_user_dropdown)
self.preset_colors_lf.grid(row=5, columnspan=4)
def setup_color_controls(self, init_color):
self.logger.info("Initial light color HSBK: %s", init_color)
self.current_color = tkinter.Canvas(
self,
background=tuple2hex(HSBKtoRGB(init_color)),
width=40,
height=20,
borderwidth=3,
relief=tkinter.GROOVE,
)
self.current_color.grid(row=0, column=2)
self.hsbk = (
tkinter.IntVar(self, init_color.hue, "Hue"),
tkinter.IntVar(self, init_color.saturation, "Saturation"),
tkinter.IntVar(self, init_color.brightness, "Brightness"),
tkinter.IntVar(self, init_color.kelvin, "Kelvin"),
)
for i in self.hsbk:
i.trace("w", self.trigger_icon_update)
self.hsbk_labels: Tuple[
tkinter.Label, tkinter.Label, tkinter.Label, tkinter.Label
] = (
tkinter.Label(self, text="%.3g" % (360 * (self.hsbk[0].get() / 65535))),
tkinter.Label(
self, text=str("%.3g" % (100 * self.hsbk[1].get() / 65535)) + "%"
),
tkinter.Label(
self, text=str("%.3g" % (100 * self.hsbk[2].get() / 65535)) + "%"
),
tkinter.Label(self, text=str(self.hsbk[3].get()) + " K"),
)
self.hsbk_scale: Tuple[ColorScale, ColorScale, ColorScale, ColorScale] = (
ColorScale(
self,
to=65535.0,
variable=self.hsbk[0],
command=self.update_color_from_ui,
),
ColorScale(
self,
from_=0,
to=65535,
variable=self.hsbk[1],
command=self.update_color_from_ui,
gradient="wb",
),
ColorScale(
self,
from_=0,
to=65535,
variable=self.hsbk[2],
command=self.update_color_from_ui,
gradient="bw",
),
ColorScale(
self,
from_=self.min_kelvin,
to=self.max_kelvin,
variable=self.hsbk[3],
command=self.update_color_from_ui,
gradient="kelvin",
),
)
relief = tkinter.GROOVE
self.hsbk_display: Tuple[
tkinter.Canvas, tkinter.Canvas, tkinter.Canvas, tkinter.Canvas
] = (
tkinter.Canvas(
self,
background=tuple2hex(hueToRGB(360 * (init_color.hue / 65535))),
width=20,
height=20,
borderwidth=3,
relief=relief,
),
tkinter.Canvas(
self,
background=tuple2hex(
(
int(255 * (init_color.saturation / 65535)),
int(255 * (init_color.saturation / 65535)),
int(255 * (init_color.saturation / 65535)),
)
),
width=20,
height=20,
borderwidth=3,
relief=relief,
),
tkinter.Canvas(
self,
background=tuple2hex(
(
int(255 * (init_color.brightness / 65535)),
int(255 * (init_color.brightness / 65535)),
int(255 * (init_color.brightness / 65535)),
)
),
width=20,
height=20,
borderwidth=3,
relief=relief,
),
tkinter.Canvas(
self,
background=tuple2hex(kelvinToRGB(init_color.kelvin)),
width=20,
height=20,
borderwidth=3,
relief=relief,
),
)
scale: ColorScale
for key, scale in enumerate(self.hsbk_scale):
tkinter.Label(self, text=self.hsbk[key]).grid(row=key + 1, column=0)
scale.grid(row=key + 1, column=1)
self.hsbk_labels[key].grid(row=key + 1, column=2)
self.hsbk_display[key].grid(row=key + 1, column=3)
self.threads: Dict[str, color_thread.ColorThreadRunner] = {}
def setup_power_controls(self, bulb_power):
self.powervar = tkinter.BooleanVar(self)
self.powervar.set(bulb_power)
self.option_on = tkinter.Radiobutton(
self,
text="On",
variable=self.powervar,
value=65535,
command=self.update_power,
)
self.option_off = tkinter.Radiobutton(
self, text="Off", variable=self.powervar, value=0, command=self.update_power
)
if self.powervar.get() == 0:
# Light is off
self.option_off.select()
self.option_on.selection_clear()
else:
self.option_on.select()
self.option_off.selection_clear()
self.option_on.grid(row=0, column=0)
self.option_off.grid(row=0, column=1)
def setup_logger(self):
self.logger = logging.getLogger(
self.master.logger.name
+ "."
+ self.__class__.__name__
+ "({})".format(self.label)
)
self.logger.setLevel(logging.DEBUG)
self.logger.info(
"%s logger initialized: %s // Device: %s",
self.__class__.__name__,
self.logger.name,
self.label,
)
def restart(self):
""" Get updated information for the bulb when clicked. """
self.update_status_from_bulb()
self.logger.info("Light frame Restarted.")
def get_label(self):
""" Getter method for the label attribute. Often is monkey-patched. """
return self.label
def trigger_icon_update(self, *_, **__):
""" Just sets a flag for now. Could be more advanced in the future. """
self.icon_update_flag = True
def get_color_values_hsbk(self):
""" Get color values entered into GUI"""
return Color(*tuple(v.get() for v in self.hsbk))
def stop_threads(self):
""" Stop all ColorRunner threads """
self.music_button.config(bg="SystemButtonFace")
self.avg_screen_btn.config(bg="SystemButtonFace")
self.dominant_screen_btn.config(bg="SystemButtonFace")
for thread in self.threads.values():
thread.stop()
def update_power(self):
""" Send new power state to bulb when UI is changed. """
self.stop_threads()
self.target.set_power(self.powervar.get())
def update_color_from_ui(self, *_, **__):
""" Send new color state to bulb when UI is changed. """
self.stop_threads()
self.set_color(self.get_color_values_hsbk(), rapid=True)
def set_color(self, color, rapid=False):
""" Should be called whenever the bulb wants to change color. Sends bulb command and updates UI accordingly. """
self.stop_threads()
try:
self.target.set_color(
color,
duration=0
if rapid
else float(config["AverageColor"]["duration"]) * 1000,
rapid=rapid,
)
except lifxlan.WorkflowException as exc:
if rapid: # If we're going fast we don't care if we miss a packet.
pass
else:
raise exc
if not rapid:
self.logger.debug(
"Color changed to HSBK: %s", color
) # Don't pollute log with rapid color changes
def update_label(self, key: int):
""" Update scale labels, formatted accordingly. """
return [
self.hsbk_labels[0].config(
text=str("%.3g" % (360 * (self.hsbk[0].get() / 65535)))
),
self.hsbk_labels[1].config(
text=str("%.3g" % (100 * (self.hsbk[1].get() / 65535))) + "%"
),
self.hsbk_labels[2].config(
text=str("%.3g" % (100 * (self.hsbk[2].get() / 65535))) + "%"
),
self.hsbk_labels[3].config(text=str(self.hsbk[3].get()) + " K"),
][key]
def update_display(self, key):
""" Update color swatches to match current device state """
h, s, b, k = self.get_color_values_hsbk() # pylint: disable=invalid-name
if key == 0:
self.hsbk_display[0].config(
background=tuple2hex(hueToRGB(360 * (h / 65535)))
)
elif key == 1:
s = 65535 - s # pylint: disable=invalid-name
self.hsbk_display[1].config(
background=tuple2hex(
(
int(255 * (s / 65535)),
int(255 * (s / 65535)),
int(255 * (s / 65535)),
)
)
)
elif key == 2:
self.hsbk_display[2].config(
background=tuple2hex(
(
int(255 * (b / 65535)),
int(255 * (b / 65535)),
int(255 * (b / 65535)),
)
)
)
elif key == 3:
self.hsbk_display[3].config(background=tuple2hex(kelvinToRGB(k)))
def get_color_from_palette(self):
""" Asks users for color selection using standard color palette dialog. """
color = tkinter.colorchooser.askcolor(
initialcolor=HSBKtoRGB(self.get_color_values_hsbk())
)[0]
if color:
# RGBtoHBSK sometimes returns >65535, so we have to truncate
hsbk = [min(c, 65535) for c in lifxlan.RGBtoHSBK(color, self.hsbk[3].get())]
self.set_color(hsbk)
self.logger.info("Color set to HSBK %s from palette.", hsbk)
def update_status_from_bulb(self, run_once=False):
"""
Periodically update status from the bulb to keep UI in sync.
:param run_once: Don't call `after` statement at end. Keeps a million workers from being instanced.
"""
require_icon_update = False
if not self.master.bulb_interface.power_queue[self.label].empty():
power = self.master.bulb_interface.power_queue[self.label].get()
require_icon_update = True
self.powervar.set(power)
if self.powervar.get() == 0:
# Light is off
self.option_off.select()
self.option_on.selection_clear()
else:
self.option_on.select()
self.option_off.selection_clear()
if not self.master.bulb_interface.color_queue[self.label].empty():
hsbk = self.master.bulb_interface.color_queue[self.label].get()
require_icon_update = True
for key, _ in enumerate(self.hsbk):
self.hsbk[key].set(hsbk[key])
self.update_label(key)
self.update_display(key)
self.current_color.config(background=tuple2hex(HSBKtoRGB(hsbk)))
if require_icon_update:
self.trigger_icon_update()
if not run_once:
self.after(FRAME_PERIOD_MS, self.update_status_from_bulb)
def eyedropper(self, *_, **__):
""" Allows user to select a color pixel from the screen. """
self.master.master.withdraw() # Hide window
state_left = win32api.GetKeyState(
0x01
) # Left button down = 0 or 1. tkinter.Button up = -127 or -128
while True:
action = win32api.GetKeyState(0x01)
if action != state_left: # tkinter.Button state changed
state_left = action
if action < 0: # tkinter.Button down
pass
else: # tkinter.Button up
break
lifxlan.sleep(0.001)
# tkinter.Button state changed
screen_img = getScreenAsImage()
cursor_pos = mouse.get_position()
# Convert display coords to image coords
cursor_pos = normalizeRects(
getDisplayRects() + [(cursor_pos[0], cursor_pos[1], 0, 0)]
)[-1][:2]
color = screen_img.getpixel(cursor_pos)
self.master.master.deiconify() # Reshow window
self.logger.info("Eyedropper color found RGB %s", color)
return lifxlan.RGBtoHSBK(color, temperature=self.get_color_values_hsbk().kelvin)
def change_preset_dropdown(self, *_, **__):
""" Change device color to selected preset option. """
color = Color(*globals()[self.color_var.get()])
self.preset_dropdown.config(
bg=tuple2hex(HSBKtoRGB(color)), activebackground=tuple2hex(HSBKtoRGB(color))
)
self.set_color(color, False)
def change_user_dropdown(self, *_, **__):
""" Change device color to selected user-defined option. """
color = str2tuple(config["PresetColors"][self.uservar.get()], int)
self.user_dropdown.config(
bg=tuple2hex(HSBKtoRGB(color)), activebackground=tuple2hex(HSBKtoRGB(color))
)
self.set_color(color, rapid=False)
def update_user_dropdown(self):
""" Add newly defined color to the user color dropdown menu. """
# self.uservar.set('')
self.user_dropdown["menu"].delete(0, "end")
for choice in config["PresetColors"]:
self.user_dropdown["menu"].add_command(
label=choice, command=_setit(self.uservar, choice)
)
def get_monitor_bounds(self):
""" Return the 4 rectangle coordinates from the entry boxes in the UI """
return (
f"[{self.screen_region_entries["left"].get()}, {self.screen_region_entries["top"].get()}, "
f"{self.screen_region_entries["width"].get()}, {self.screen_region_entries["height"].get()}]"
)
def save_monitor_bounds(self):
""" Write monitor bounds entered in the UI into the config file. """
config["AverageColor"][self.label] = self.get_monitor_bounds()
# Write to config file
with open("config.ini", "w") as cfg:
config.write(cfg)
class GroupFrame(LightFrame):
def get_light_info(self, target) -> Tuple[int, Color]:
bulb_power: int = 0
init_color: Color = Color(*lifxlan.WARM_WHITE)
try:
devices: List[
Union[lifxlan.Group, lifxlan.Light, lifxlan.MultiZoneLight]
] = target.get_device_list()
if len(devices) == 0:
logging.error("No devices found in group list")
self.label = "<No Group Found>"
self.min_kelvin, self.max_kelvin = 0, 99999 # arbitrary range
return 0, Color(0, 0, 0, 0)
self.label = devices[0].get_group_label()
bulb_power = devices[0].get_power()
# Find an init_color- ensure device has color attribute, otherwise fallback
color_devices: List[
Union[lifxlan.Group, lifxlan.Light, lifxlan.MultiZoneLight]
] = list(filter(lambda d: d.supports_color(), devices))
if len(color_devices) > 0 and hasattr(color_devices[0], "get_color"):
init_color = Color(*color_devices[0].get_color())
self.min_kelvin = min(
[
device.product_features.get("min_kelvin") or MIN_KELVIN_DEFAULT
for device in target.get_device_list()
]
)
self.max_kelvin = max(
[
device.product_features.get("max_kelvin") or MAX_KELVIN_DEFAULT
for device in target.get_device_list()
]
)
except lifxlan.WorkflowException as exc:
messagebox.showerror(
"Error building {}".format(self.__class__.__name__),
"Error thrown when trying to get label from bulb:\n{}".format(exc),
)
self.master.on_closing()
# TODO Let this fail safely and try again later
return bulb_power, init_color
def update_status_from_bulb(self, run_once=False):
return
class MultiZoneFrame(LightFrame):
pass
| import logging
import tkinter
from tkinter import ttk, font as font, messagebox, _setit
from typing import Union, List, Tuple, Dict, Mapping
import mouse
import lifxlan
import win32api
from lifxlan import (
ORANGE,
YELLOW,
GREEN,
CYAN,
BLUE,
PURPLE,
PINK,
WHITE,
COLD_WHITE,
WARM_WHITE,
GOLD,
)
from lifx_control_panel import RED, FRAME_PERIOD_MS
from lifx_control_panel.ui.colorscale import ColorScale
from lifx_control_panel.ui.settings import config
from lifx_control_panel.utilities import color_thread
from lifx_control_panel.utilities.color_thread import getScreenAsImage, normalizeRects
from lifx_control_panel.utilities.utils import (
Color,
tuple2hex,
HSBKtoRGB,
hueToRGB,
kelvinToRGB,
get_primary_monitor,
str2list,
str2tuple,
getDisplayRects,
)
MAX_KELVIN_DEFAULT = 9000
MIN_KELVIN_DEFAULT = 1500
class LightFrame(ttk.Labelframe): # pylint: disable=too-many-ancestors
""" Holds control and state information about a single device. """
label: str
target: Union[lifxlan.Group, lifxlan.Device]
###
screen_region_lf: ttk.LabelFrame
screen_region_entries: Dict[str, tkinter.Entry]
avg_screen_btn: tkinter.Button
dominant_screen_btn: tkinter.Button
music_button: tkinter.Button
preset_colors_lf: ttk.LabelFrame
color_var: tkinter.StringVar
default_colors: Mapping[str, Color]
preset_dropdown: tkinter.OptionMenu
uservar: tkinter.StringVar
user_dropdown: tkinter.OptionMenu
current_color: tkinter.Canvas
hsbk: Tuple[tkinter.IntVar, tkinter.IntVar, tkinter.IntVar, tkinter.IntVar]
hsbk_labels: Tuple[tkinter.Label, tkinter.Label, tkinter.Label, tkinter.Label]
hsbk_scale: Tuple[ColorScale, ColorScale, ColorScale, ColorScale]
hsbk_display: Tuple[tkinter.Canvas, tkinter.Canvas, tkinter.Canvas, tkinter.Canvas]
threads: Dict[str, color_thread.ColorThreadRunner]
powervar: tkinter.BooleanVar
option_on: tkinter.Radiobutton
option_off: tkinter.Radiobutton
logger: logging.Logger
min_kelvin: int = MIN_KELVIN_DEFAULT
max_kelvin: int = MAX_KELVIN_DEFAULT
def __init__(self, master, target: Union[lifxlan.Group, lifxlan.Light]):
super().__init__(
master,
padding="3 3 12 12",
labelwidget=tkinter.Label(
master,
text="<LABEL_ERR>",
font=font.Font(size=12),
fg="#0046d5",
relief=tkinter.RIDGE,
),
)
self.icon_update_flag: bool = True
# Initialize LightFrames
bulb_power, init_color = self.get_light_info(target)
# Reconfigure label with correct name
self.configure(
labelwidget=tkinter.Label(
master,
text=self.label,
font=font.Font(size=12),
fg="#0046d5",
relief=tkinter.RIDGE,
)
)
self.grid(column=1, row=0, sticky=(tkinter.N, tkinter.W, tkinter.E, tkinter.S))
self.columnconfigure(0, weight=1)
self.rowconfigure(0, weight=1)
self.target = target
# Setup logger
self.setup_logger()
# Initialize vars to hold on/off state
self.setup_power_controls(bulb_power)
# Initialize vars to hold and display bulb color
self.setup_color_controls(init_color)
# Add buttons for pre-made colors
self.setup_color_dropdowns()
# Add buttons for special routines
self.special_functions_lf = ttk.LabelFrame(
self, text="Special Functions", padding="3 3 12 12"
)
####
self.setup_special_functions()
####
# Add custom screen region (real ugly)
self.setup_screen_region_select()
# Start update loop
self.update_status_from_bulb()
def get_light_info(
self, target: Union[lifxlan.Group, lifxlan.Light, lifxlan.MultiZoneLight]
) -> Tuple[int, Color]:
bulb_power: int = 0
init_color: Color = Color(*lifxlan.WARM_WHITE)
try:
self.label = target.get_label()
bulb_power = target.get_power()
if target.supports_multizone():
init_color = Color(*target.get_color_zones()[0])
else:
init_color = Color(*target.get_color())
self.min_kelvin = (
target.product_features.get("min_kelvin") or MIN_KELVIN_DEFAULT
)
self.max_kelvin = (
target.product_features.get("max_kelvin") or MAX_KELVIN_DEFAULT
)
except lifxlan.WorkflowException as exc:
messagebox.showerror(
"Error building {}".format(self.__class__.__name__),
"Error thrown when trying to get label from bulb:\n{}".format(exc),
)
self.master.on_closing()
# TODO Let this fail safely and try again later
return bulb_power, init_color
def setup_screen_region_select(self):
self.screen_region_lf = ttk.LabelFrame(
self, text="Screen Avg. Region", padding="3 3 12 12"
)
self.screen_region_entries = {
"left": tkinter.Entry(self.screen_region_lf, width=6),
"width": tkinter.Entry(self.screen_region_lf, width=6),
"top": tkinter.Entry(self.screen_region_lf, width=6),
"height": tkinter.Entry(self.screen_region_lf, width=6),
}
region = config["AverageColor"][
self.label
if self.label in config["AverageColor"].keys()
else "defaultmonitor"
]
if region == "full":
region = ["full"] * 4
elif region[:19] == "get_primary_monitor":
region = get_primary_monitor()
else:
region = str2list(region, int)
self.screen_region_entries["left"].insert(tkinter.END, region[0])
self.screen_region_entries["top"].insert(tkinter.END, region[1])
self.screen_region_entries["width"].insert(tkinter.END, region[2])
self.screen_region_entries["height"].insert(tkinter.END, region[3])
tkinter.Label(self.screen_region_lf, text="left").grid(
row=7, column=0, sticky="e"
)
self.screen_region_entries["left"].grid(row=7, column=1, padx=(0, 10))
tkinter.Label(self.screen_region_lf, text="width").grid(row=7, column=2)
self.screen_region_entries["width"].grid(row=7, column=3)
tkinter.Label(self.screen_region_lf, text="top").grid(
row=8, column=0, sticky="e"
)
self.screen_region_entries["top"].grid(row=8, column=1, padx=(0, 10))
tkinter.Label(self.screen_region_lf, text="height").grid(row=8, column=2)
self.screen_region_entries["height"].grid(row=8, column=3)
tkinter.Button(
self.screen_region_lf, text="Save", command=self.save_monitor_bounds
).grid(row=9, column=1, sticky="w")
self.screen_region_lf.grid(row=7, columnspan=4)
def setup_special_functions(self):
# Screen Avg.
self.threads["screen"] = color_thread.ColorThreadRunner(
self.target,
color_thread.avg_screen_color,
self,
func_bounds=self.get_monitor_bounds,
)
def start_screen_avg():
""" Allow the screen avg. to be run in a separate thread. Also turns button green while running. """
self.avg_screen_btn.config(bg="Green")
self.threads["screen"].start()
self.avg_screen_btn = tkinter.Button(
self.special_functions_lf,
text="Avg. Screen Color",
command=start_screen_avg,
)
self.avg_screen_btn.grid(row=6, column=0)
tkinter.Button(
self.special_functions_lf,
text="Pick Color",
command=self.get_color_from_palette,
).grid(row=6, column=1)
# Screen Dominant
self.threads["dominant"] = color_thread.ColorThreadRunner(
self.target,
color_thread.dominant_screen_color,
self,
func_bounds=self.get_monitor_bounds,
)
def start_screen_dominant():
self.dominant_screen_btn.config(bg="Green")
self.threads["dominant"].start()
self.dominant_screen_btn = tkinter.Button(
self.special_functions_lf,
text="Dominant Screen Color",
command=start_screen_dominant,
)
self.dominant_screen_btn.grid(row=7, column=0)
# Audio
self.threads["audio"] = color_thread.ColorThreadRunner(
self.target, self.master.audio_interface.get_music_color, self
)
def start_audio():
""" Allow the audio to be run in a separate thread. Also turns button green while running. """
self.music_button.config(bg="Green")
self.threads["audio"].start()
self.music_button = tkinter.Button(
self.special_functions_lf,
text="Music Color",
command=start_audio,
state="normal" if self.master.audio_interface.initialized else "disabled",
)
self.music_button.grid(row=8, column=0)
self.threads["eyedropper"] = color_thread.ColorThreadRunner(
self.target, self.eyedropper, self, continuous=False
)
tkinter.Button(
self.special_functions_lf,
text="Color Eyedropper",
command=self.threads["eyedropper"].start,
).grid(row=7, column=1)
tkinter.Button(
self.special_functions_lf, text="Stop effects", command=self.stop_threads
).grid(row=8, column=1)
self.special_functions_lf.grid(row=6, columnspan=4)
def setup_color_dropdowns(self):
self.preset_colors_lf = ttk.LabelFrame(
self, text="Preset Colors", padding="3 3 12 12"
)
self.color_var = tkinter.StringVar(self, value="Presets")
self.default_colors = {
"RED": RED,
"ORANGE": ORANGE,
"YELLOW": YELLOW,
"GREEN": GREEN,
"CYAN": CYAN,
"BLUE": BLUE,
"PURPLE": PURPLE,
"PINK": PINK,
"WHITE": WHITE,
"COLD_WHITE": COLD_WHITE,
"WARM_WHITE": WARM_WHITE,
"GOLD": GOLD,
}
self.preset_dropdown = tkinter.OptionMenu(
self.preset_colors_lf, self.color_var, *self.default_colors
)
self.preset_dropdown.grid(row=0, column=0)
self.preset_dropdown.configure(width=13)
self.color_var.trace("w", self.change_preset_dropdown)
self.uservar = tkinter.StringVar(self, value="User Presets")
self.user_dropdown = tkinter.OptionMenu(
self.preset_colors_lf,
self.uservar,
*(
[*config["PresetColors"].keys()]
if any(config["PresetColors"].keys())
else [None]
),
)
self.user_dropdown.grid(row=0, column=1)
self.user_dropdown.config(width=13)
self.uservar.trace("w", self.change_user_dropdown)
self.preset_colors_lf.grid(row=5, columnspan=4)
def setup_color_controls(self, init_color):
self.logger.info("Initial light color HSBK: %s", init_color)
self.current_color = tkinter.Canvas(
self,
background=tuple2hex(HSBKtoRGB(init_color)),
width=40,
height=20,
borderwidth=3,
relief=tkinter.GROOVE,
)
self.current_color.grid(row=0, column=2)
self.hsbk = (
tkinter.IntVar(self, init_color.hue, "Hue"),
tkinter.IntVar(self, init_color.saturation, "Saturation"),
tkinter.IntVar(self, init_color.brightness, "Brightness"),
tkinter.IntVar(self, init_color.kelvin, "Kelvin"),
)
for i in self.hsbk:
i.trace("w", self.trigger_icon_update)
self.hsbk_labels: Tuple[
tkinter.Label, tkinter.Label, tkinter.Label, tkinter.Label
] = (
tkinter.Label(self, text="%.3g" % (360 * (self.hsbk[0].get() / 65535))),
tkinter.Label(
self, text=str("%.3g" % (100 * self.hsbk[1].get() / 65535)) + "%"
),
tkinter.Label(
self, text=str("%.3g" % (100 * self.hsbk[2].get() / 65535)) + "%"
),
tkinter.Label(self, text=str(self.hsbk[3].get()) + " K"),
)
self.hsbk_scale: Tuple[ColorScale, ColorScale, ColorScale, ColorScale] = (
ColorScale(
self,
to=65535.0,
variable=self.hsbk[0],
command=self.update_color_from_ui,
),
ColorScale(
self,
from_=0,
to=65535,
variable=self.hsbk[1],
command=self.update_color_from_ui,
gradient="wb",
),
ColorScale(
self,
from_=0,
to=65535,
variable=self.hsbk[2],
command=self.update_color_from_ui,
gradient="bw",
),
ColorScale(
self,
from_=self.min_kelvin,
to=self.max_kelvin,
variable=self.hsbk[3],
command=self.update_color_from_ui,
gradient="kelvin",
),
)
relief = tkinter.GROOVE
self.hsbk_display: Tuple[
tkinter.Canvas, tkinter.Canvas, tkinter.Canvas, tkinter.Canvas
] = (
tkinter.Canvas(
self,
background=tuple2hex(hueToRGB(360 * (init_color.hue / 65535))),
width=20,
height=20,
borderwidth=3,
relief=relief,
),
tkinter.Canvas(
self,
background=tuple2hex(
(
int(255 * (init_color.saturation / 65535)),
int(255 * (init_color.saturation / 65535)),
int(255 * (init_color.saturation / 65535)),
)
),
width=20,
height=20,
borderwidth=3,
relief=relief,
),
tkinter.Canvas(
self,
background=tuple2hex(
(
int(255 * (init_color.brightness / 65535)),
int(255 * (init_color.brightness / 65535)),
int(255 * (init_color.brightness / 65535)),
)
),
width=20,
height=20,
borderwidth=3,
relief=relief,
),
tkinter.Canvas(
self,
background=tuple2hex(kelvinToRGB(init_color.kelvin)),
width=20,
height=20,
borderwidth=3,
relief=relief,
),
)
scale: ColorScale
for key, scale in enumerate(self.hsbk_scale):
tkinter.Label(self, text=self.hsbk[key]).grid(row=key + 1, column=0)
scale.grid(row=key + 1, column=1)
self.hsbk_labels[key].grid(row=key + 1, column=2)
self.hsbk_display[key].grid(row=key + 1, column=3)
self.threads: Dict[str, color_thread.ColorThreadRunner] = {}
def setup_power_controls(self, bulb_power):
self.powervar = tkinter.BooleanVar(self)
self.powervar.set(bulb_power)
self.option_on = tkinter.Radiobutton(
self,
text="On",
variable=self.powervar,
value=65535,
command=self.update_power,
)
self.option_off = tkinter.Radiobutton(
self, text="Off", variable=self.powervar, value=0, command=self.update_power
)
if self.powervar.get() == 0:
# Light is off
self.option_off.select()
self.option_on.selection_clear()
else:
self.option_on.select()
self.option_off.selection_clear()
self.option_on.grid(row=0, column=0)
self.option_off.grid(row=0, column=1)
def setup_logger(self):
self.logger = logging.getLogger(
self.master.logger.name
+ "."
+ self.__class__.__name__
+ "({})".format(self.label)
)
self.logger.setLevel(logging.DEBUG)
self.logger.info(
"%s logger initialized: %s // Device: %s",
self.__class__.__name__,
self.logger.name,
self.label,
)
def restart(self):
""" Get updated information for the bulb when clicked. """
self.update_status_from_bulb()
self.logger.info("Light frame Restarted.")
def get_label(self):
""" Getter method for the label attribute. Often is monkey-patched. """
return self.label
def trigger_icon_update(self, *_, **__):
""" Just sets a flag for now. Could be more advanced in the future. """
self.icon_update_flag = True
def get_color_values_hsbk(self):
""" Get color values entered into GUI"""
return Color(*tuple(v.get() for v in self.hsbk))
def stop_threads(self):
""" Stop all ColorRunner threads """
self.music_button.config(bg="SystemButtonFace")
self.avg_screen_btn.config(bg="SystemButtonFace")
self.dominant_screen_btn.config(bg="SystemButtonFace")
for thread in self.threads.values():
thread.stop()
def update_power(self):
""" Send new power state to bulb when UI is changed. """
self.stop_threads()
self.target.set_power(self.powervar.get())
def update_color_from_ui(self, *_, **__):
""" Send new color state to bulb when UI is changed. """
self.stop_threads()
self.set_color(self.get_color_values_hsbk(), rapid=True)
def set_color(self, color, rapid=False):
""" Should be called whenever the bulb wants to change color. Sends bulb command and updates UI accordingly. """
self.stop_threads()
try:
self.target.set_color(
color,
duration=0
if rapid
else float(config["AverageColor"]["duration"]) * 1000,
rapid=rapid,
)
except lifxlan.WorkflowException as exc:
if rapid: # If we're going fast we don't care if we miss a packet.
pass
else:
raise exc
if not rapid:
self.logger.debug(
"Color changed to HSBK: %s", color
) # Don't pollute log with rapid color changes
def update_label(self, key: int):
""" Update scale labels, formatted accordingly. """
return [
self.hsbk_labels[0].config(
text=str("%.3g" % (360 * (self.hsbk[0].get() / 65535)))
),
self.hsbk_labels[1].config(
text=str("%.3g" % (100 * (self.hsbk[1].get() / 65535))) + "%"
),
self.hsbk_labels[2].config(
text=str("%.3g" % (100 * (self.hsbk[2].get() / 65535))) + "%"
),
self.hsbk_labels[3].config(text=str(self.hsbk[3].get()) + " K"),
][key]
def update_display(self, key):
""" Update color swatches to match current device state """
h, s, b, k = self.get_color_values_hsbk() # pylint: disable=invalid-name
if key == 0:
self.hsbk_display[0].config(
background=tuple2hex(hueToRGB(360 * (h / 65535)))
)
elif key == 1:
s = 65535 - s # pylint: disable=invalid-name
self.hsbk_display[1].config(
background=tuple2hex(
(
int(255 * (s / 65535)),
int(255 * (s / 65535)),
int(255 * (s / 65535)),
)
)
)
elif key == 2:
self.hsbk_display[2].config(
background=tuple2hex(
(
int(255 * (b / 65535)),
int(255 * (b / 65535)),
int(255 * (b / 65535)),
)
)
)
elif key == 3:
self.hsbk_display[3].config(background=tuple2hex(kelvinToRGB(k)))
def get_color_from_palette(self):
""" Asks users for color selection using standard color palette dialog. """
color = tkinter.colorchooser.askcolor(
initialcolor=HSBKtoRGB(self.get_color_values_hsbk())
)[0]
if color:
# RGBtoHBSK sometimes returns >65535, so we have to truncate
hsbk = [min(c, 65535) for c in lifxlan.RGBtoHSBK(color, self.hsbk[3].get())]
self.set_color(hsbk)
self.logger.info("Color set to HSBK %s from palette.", hsbk)
def update_status_from_bulb(self, run_once=False):
"""
Periodically update status from the bulb to keep UI in sync.
:param run_once: Don't call `after` statement at end. Keeps a million workers from being instanced.
"""
require_icon_update = False
if not self.master.bulb_interface.power_queue[self.label].empty():
power = self.master.bulb_interface.power_queue[self.label].get()
require_icon_update = True
self.powervar.set(power)
if self.powervar.get() == 0:
# Light is off
self.option_off.select()
self.option_on.selection_clear()
else:
self.option_on.select()
self.option_off.selection_clear()
if not self.master.bulb_interface.color_queue[self.label].empty():
hsbk = self.master.bulb_interface.color_queue[self.label].get()
require_icon_update = True
for key, _ in enumerate(self.hsbk):
self.hsbk[key].set(hsbk[key])
self.update_label(key)
self.update_display(key)
self.current_color.config(background=tuple2hex(HSBKtoRGB(hsbk)))
if require_icon_update:
self.trigger_icon_update()
if not run_once:
self.after(FRAME_PERIOD_MS, self.update_status_from_bulb)
def eyedropper(self, *_, **__):
""" Allows user to select a color pixel from the screen. """
self.master.master.withdraw() # Hide window
state_left = win32api.GetKeyState(
0x01
) # Left button down = 0 or 1. tkinter.Button up = -127 or -128
while True:
action = win32api.GetKeyState(0x01)
if action != state_left: # tkinter.Button state changed
state_left = action
if action < 0: # tkinter.Button down
pass
else: # tkinter.Button up
break
lifxlan.sleep(0.001)
# tkinter.Button state changed
screen_img = getScreenAsImage()
cursor_pos = mouse.get_position()
# Convert display coords to image coords
cursor_pos = normalizeRects(
getDisplayRects() + [(cursor_pos[0], cursor_pos[1], 0, 0)]
)[-1][:2]
color = screen_img.getpixel(cursor_pos)
self.master.master.deiconify() # Reshow window
self.logger.info("Eyedropper color found RGB %s", color)
return lifxlan.RGBtoHSBK(color, temperature=self.get_color_values_hsbk().kelvin)
def change_preset_dropdown(self, *_, **__):
""" Change device color to selected preset option. """
color = Color(*globals()[self.color_var.get()])
self.preset_dropdown.config(
bg=tuple2hex(HSBKtoRGB(color)), activebackground=tuple2hex(HSBKtoRGB(color))
)
self.set_color(color, False)
def change_user_dropdown(self, *_, **__):
""" Change device color to selected user-defined option. """
color = str2tuple(config["PresetColors"][self.uservar.get()], int)
self.user_dropdown.config(
bg=tuple2hex(HSBKtoRGB(color)), activebackground=tuple2hex(HSBKtoRGB(color))
)
self.set_color(color, rapid=False)
def update_user_dropdown(self):
""" Add newly defined color to the user color dropdown menu. """
# self.uservar.set('')
self.user_dropdown["menu"].delete(0, "end")
for choice in config["PresetColors"]:
self.user_dropdown["menu"].add_command(
label=choice, command=_setit(self.uservar, choice)
)
def get_monitor_bounds(self):
""" Return the 4 rectangle coordinates from the entry boxes in the UI """
return (
f"[{self.screen_region_entries['left'].get()}, {self.screen_region_entries['top'].get()}, "
f"{self.screen_region_entries['width'].get()}, {self.screen_region_entries['height'].get()}]"
)
def save_monitor_bounds(self):
""" Write monitor bounds entered in the UI into the config file. """
config["AverageColor"][self.label] = self.get_monitor_bounds()
# Write to config file
with open("config.ini", "w") as cfg:
config.write(cfg)
class GroupFrame(LightFrame):
def get_light_info(self, target) -> Tuple[int, Color]:
bulb_power: int = 0
init_color: Color = Color(*lifxlan.WARM_WHITE)
try:
devices: List[
Union[lifxlan.Group, lifxlan.Light, lifxlan.MultiZoneLight]
] = target.get_device_list()
if len(devices) == 0:
logging.error("No devices found in group list")
self.label = "<No Group Found>"
self.min_kelvin, self.max_kelvin = 0, 99999 # arbitrary range
return 0, Color(0, 0, 0, 0)
self.label = devices[0].get_group_label()
bulb_power = devices[0].get_power()
# Find an init_color- ensure device has color attribute, otherwise fallback
color_devices: List[
Union[lifxlan.Group, lifxlan.Light, lifxlan.MultiZoneLight]
] = list(filter(lambda d: d.supports_color(), devices))
if len(color_devices) > 0 and hasattr(color_devices[0], "get_color"):
init_color = Color(*color_devices[0].get_color())
self.min_kelvin = min(
[
device.product_features.get("min_kelvin") or MIN_KELVIN_DEFAULT
for device in target.get_device_list()
]
)
self.max_kelvin = max(
[
device.product_features.get("max_kelvin") or MAX_KELVIN_DEFAULT
for device in target.get_device_list()
]
)
except lifxlan.WorkflowException as exc:
messagebox.showerror(
"Error building {}".format(self.__class__.__name__),
"Error thrown when trying to get label from bulb:\n{}".format(exc),
)
self.master.on_closing()
# TODO Let this fail safely and try again later
return bulb_power, init_color
def update_status_from_bulb(self, run_once=False):
return
class MultiZoneFrame(LightFrame):
pass
|
import inspect
from typing import List
from ormx.constants import *
from ormx.exceptions import *
from ormx.types import *
class Table:
"""
Sub-class of Database, itself one table of Database
Have same methods but without table name argument
Attributes
----------
name : str
Name of table
columns = list
Generator of columns in table
columns_names = list
Generator of column's names in table
"""
def __init__(self, **kwargs):
"""
Parameters
----------
Keyword Anguments
"""
self._data = {
'id': None
}
for key, value in kwargs.items():
self._data[key] = value
def __getattribute__(self, key):
_data = object.__getattribute__(self, '_data')
if key in _data:
return _data[key]
return object.__getattribute__(self, key)
def __setattr__(self, key, value):
super().__setattr__(key, value)
if key in self._data:
self._data[key] = value
@classmethod
def _get_name(cls):
return cls.__tablename__.split()[0] if hasattr(cls, '__tablename__') else cls.__name__.lower()
@classmethod
def _get_create_sql(cls):
fields = [
("id", "INTEGER PRIMARY KEY AUTOINCREMENT")
]
for name, field in inspect.getmembers(cls):
if isinstance(field, Column):
fields.append((name, field.sql_type))
elif isinstance(field, ForeignKey):
fields.append((name + "_id", "INTEGER"))
fields = [" ".join(x) for x in fields]
return CREATE_TABLE_SQL.format(
name=cls._get_name(),
fields=", ".join(fields))
@classmethod
def _get_column_names(cls):
fields = ['id']
for name, field in inspect.getmembers(cls):
if isinstance(field, Column):
fields.append(name)
if isinstance(field, ForeignKey):
fields.append(name + "_id")
return fields
def _get_insert_sql(self):
cls = self.__class__
fields = []
placeholders = []
values = []
for name, field in inspect.getmembers(cls):
if isinstance(field, Column):
fields.append(name)
if field.default and isinstance(getattr(self, name), Column):
if type(field.default) == field.type:
values.append(field.default if type(getattr(self, name)).__name__ == 'Column' and type(
field.default) == field.type else getattr(self, name))
else:
raise TypeError(
f'Excepted {field.type.__name__}, but given {type(field.default).__name__} type for default value')
else:
if type(getattr(self, name)) == field.type:
values.append(getattr(self, name))
else:
raise TypeError(
f'Excepted {field.type.__name__}, but given {type(getattr(self, name)).__name__} type for value')
placeholders.append('?')
elif isinstance(field, ForeignKey):
fields.append(name + "_id")
values.append(getattr(self, name).id if isinstance(getattr(self, name), Table) else 0)
placeholders.append('?')
sql = INSERT_SQL.format(name=cls._get_name(),
fields=", ".join(fields),
placeholders=", ".join(placeholders))
return sql, values
@classmethod
def _get_first_sql(cls):
sql = SELECT_FIRST_SQL.format(name=cls._get_name())
return sql
@classmethod
def _get_count_sql(cls):
sql = SELECT_ALL_SQL.format(fields='COUNT(*)', name=cls._get_name())
return sql
def _get_update_sql(self):
cls = self.__class__
fields = []
values = []
for name, field in inspect.getmembers(cls):
if isinstance(field, Column):
fields.append(name)
values.append(getattr(self, name))
elif isinstance(field, ForeignKey):
fields.append(name + "_id")
values.append(getattr(self, name).id)
values.append(getattr(self, 'id'))
sql = UPDATE_SQL.format(
name=cls.__name__.lower(),
fields=', '.join([f"{field} = ?" for field in fields])
)
return sql, values
@classmethod
def _get_select_all_sql(cls,
order_by: tuple,
limit: list = None,
where: list = None,
fields: list = None
):
params = []
fields = cls._get_column_names() if fields is None else fields
sql = SELECT_ALL_SQL.format(name=cls._get_name(),
fields=", ".join(fields))
if where:
if isinstance(where[0], str):
if where[1] in WHERE_OPTS:
sql += f' WHERE ({where[0]} {where[1]} ?)'
params.append(where[2])
elif all(isinstance(x, tuple) for x in where):
filters = []
sql += ' WHERE '
for i in where:
if i[1] in WHERE_OPTS:
filters.append(f"{i[0]} {i[1]} ?")
params.append(i[2])
else:
raise TypeError(
f"Second parameter in list is wrong, it must be one of: {", ".join(WHERE_OPTS)}")
sql += f" AND ".join(filters)
else:
for i in where[1::2]:
if not i in WHERE_CONDITIONS:
raise TypeError("Item's type must str or tuple")
sql += ' WHERE '
for i in where:
if isinstance(i, tuple):
sql += f"({i[0]} {i[1]} ?)"
params.append(i[2])
elif i in WHERE_CONDITIONS:
sql += f' {i} '
if order_by:
if not isinstance(order_by, tuple):
raise OrderByParamError(order_by)
if not (isinstance(order_by[0], str) and order_by[0] in fields):
raise OrderByColumnError(order_by[0])
if not (order_by[1] in ORDER_BY_PARAMS):
raise SortingTypeError(order_by[1])
sql += f' ORDER BY {order_by[0]} ?'
params.append(order_by[1])
if limit:
if isinstance(limit, list):
for i in limit:
if not isinstance(i, int): raise TypeError(
f"Parameters must be int, not {type(i).__name__}")
if len(limit) == 1:
sql += f' LIMIT ?'
params.append(limit[0])
elif len(limit) == 2:
sql += f' LIMIT ? OFFSET ?'
params.append(limit[0])
params.append(limit[1])
else:
raise LimitTooMuchParamsError(limit)
return sql, fields, tuple(params)
@classmethod
def _rows(cls):
return inspect.getmembers(cls)
@classmethod
def _get_delete_sql(cls, **kwargs):
filters = []
params = []
for key, value in kwargs.items():
filters.append(key + " = ?")
params.append(value)
sql = DELETE_SQL.format(name=cls._get_name(),
query=" AND ".join(filters))
return sql, tuple(params)
@classmethod
def _get_drop_sql(cls, exp=None):
sql = DROP_SQL.format(name=cls._get_name(), exp=IF_EXISTS if exp else '')
return sql
@classmethod
def _get_select_where_sql(cls, fields: list = None, **kwargs):
fields = fields or cls._get_column_names()
filters = []
params = []
for key, value in kwargs.items():
filters.append(key + " = ?")
params.append(value)
sql = SELECT_WHERE_SQL.format(name=cls._get_name(),
fields=", ".join(fields),
query=" AND ".join(filters))
return sql, fields, tuple(params)
@classmethod
def _name(cls):
attributes = inspect.getmembers(cls, lambda a: not (inspect.isroutine(a)))
atrs = [a for a in attributes if not (a[0].startswith('__') and a[0].endswith('__'))]
return atrs
class ListQuery:
def __init__(self, db, key):
self.db = db
self.key = key
def __new__(cls, key, db, *args, **kwargs) -> list:
if key in db.tables:
rows = db.cur.execute(f"SELECT * FROM {key}").fetchall()
fields = [description[0] for description in db.cur.description]
return [dict(zip(fields, rows[i])) for i in range(len(rows))]
else:
raise TableInfoError
class Column:
def __init__(self, type, default=None):
self.type = type
self.default = default
@property
def sql_type(self):
return SQLITE_TYPE_MAP[self.type]
class ForeignKey:
def __init__(self, table):
self.table = table
class Rel:
def __init__(self, table):
self.table = table
self._data: List[table: Table] = []
def __str__(self):
return f"<Relation{self._data}>"
def add(self, table: Table) -> None:
if not isinstance(table, self.table):
raise TableTypeInvalid(table)
self._data.append(table)
__all__ = [
'Table',
'ListQuery',
'Column',
'ForeignKey',
'Rel'
]
| import inspect
from typing import List
from ormx.constants import *
from ormx.exceptions import *
from ormx.types import *
class Table:
"""
Sub-class of Database, itself one table of Database
Have same methods but without table name argument
Attributes
----------
name : str
Name of table
columns = list
Generator of columns in table
columns_names = list
Generator of column's names in table
"""
def __init__(self, **kwargs):
"""
Parameters
----------
Keyword Anguments
"""
self._data = {
'id': None
}
for key, value in kwargs.items():
self._data[key] = value
def __getattribute__(self, key):
_data = object.__getattribute__(self, '_data')
if key in _data:
return _data[key]
return object.__getattribute__(self, key)
def __setattr__(self, key, value):
super().__setattr__(key, value)
if key in self._data:
self._data[key] = value
@classmethod
def _get_name(cls):
return cls.__tablename__.split()[0] if hasattr(cls, '__tablename__') else cls.__name__.lower()
@classmethod
def _get_create_sql(cls):
fields = [
("id", "INTEGER PRIMARY KEY AUTOINCREMENT")
]
for name, field in inspect.getmembers(cls):
if isinstance(field, Column):
fields.append((name, field.sql_type))
elif isinstance(field, ForeignKey):
fields.append((name + "_id", "INTEGER"))
fields = [" ".join(x) for x in fields]
return CREATE_TABLE_SQL.format(
name=cls._get_name(),
fields=", ".join(fields))
@classmethod
def _get_column_names(cls):
fields = ['id']
for name, field in inspect.getmembers(cls):
if isinstance(field, Column):
fields.append(name)
if isinstance(field, ForeignKey):
fields.append(name + "_id")
return fields
def _get_insert_sql(self):
cls = self.__class__
fields = []
placeholders = []
values = []
for name, field in inspect.getmembers(cls):
if isinstance(field, Column):
fields.append(name)
if field.default and isinstance(getattr(self, name), Column):
if type(field.default) == field.type:
values.append(field.default if type(getattr(self, name)).__name__ == 'Column' and type(
field.default) == field.type else getattr(self, name))
else:
raise TypeError(
f'Excepted {field.type.__name__}, but given {type(field.default).__name__} type for default value')
else:
if type(getattr(self, name)) == field.type:
values.append(getattr(self, name))
else:
raise TypeError(
f'Excepted {field.type.__name__}, but given {type(getattr(self, name)).__name__} type for value')
placeholders.append('?')
elif isinstance(field, ForeignKey):
fields.append(name + "_id")
values.append(getattr(self, name).id if isinstance(getattr(self, name), Table) else 0)
placeholders.append('?')
sql = INSERT_SQL.format(name=cls._get_name(),
fields=", ".join(fields),
placeholders=", ".join(placeholders))
return sql, values
@classmethod
def _get_first_sql(cls):
sql = SELECT_FIRST_SQL.format(name=cls._get_name())
return sql
@classmethod
def _get_count_sql(cls):
sql = SELECT_ALL_SQL.format(fields='COUNT(*)', name=cls._get_name())
return sql
def _get_update_sql(self):
cls = self.__class__
fields = []
values = []
for name, field in inspect.getmembers(cls):
if isinstance(field, Column):
fields.append(name)
values.append(getattr(self, name))
elif isinstance(field, ForeignKey):
fields.append(name + "_id")
values.append(getattr(self, name).id)
values.append(getattr(self, 'id'))
sql = UPDATE_SQL.format(
name=cls.__name__.lower(),
fields=', '.join([f"{field} = ?" for field in fields])
)
return sql, values
@classmethod
def _get_select_all_sql(cls,
order_by: tuple,
limit: list = None,
where: list = None,
fields: list = None
):
params = []
fields = cls._get_column_names() if fields is None else fields
sql = SELECT_ALL_SQL.format(name=cls._get_name(),
fields=", ".join(fields))
if where:
if isinstance(where[0], str):
if where[1] in WHERE_OPTS:
sql += f' WHERE ({where[0]} {where[1]} ?)'
params.append(where[2])
elif all(isinstance(x, tuple) for x in where):
filters = []
sql += ' WHERE '
for i in where:
if i[1] in WHERE_OPTS:
filters.append(f"{i[0]} {i[1]} ?")
params.append(i[2])
else:
raise TypeError(
f"Second parameter in list is wrong, it must be one of: {', '.join(WHERE_OPTS)}")
sql += f" AND ".join(filters)
else:
for i in where[1::2]:
if not i in WHERE_CONDITIONS:
raise TypeError("Item's type must str or tuple")
sql += ' WHERE '
for i in where:
if isinstance(i, tuple):
sql += f"({i[0]} {i[1]} ?)"
params.append(i[2])
elif i in WHERE_CONDITIONS:
sql += f' {i} '
if order_by:
if not isinstance(order_by, tuple):
raise OrderByParamError(order_by)
if not (isinstance(order_by[0], str) and order_by[0] in fields):
raise OrderByColumnError(order_by[0])
if not (order_by[1] in ORDER_BY_PARAMS):
raise SortingTypeError(order_by[1])
sql += f' ORDER BY {order_by[0]} ?'
params.append(order_by[1])
if limit:
if isinstance(limit, list):
for i in limit:
if not isinstance(i, int): raise TypeError(
f"Parameters must be int, not {type(i).__name__}")
if len(limit) == 1:
sql += f' LIMIT ?'
params.append(limit[0])
elif len(limit) == 2:
sql += f' LIMIT ? OFFSET ?'
params.append(limit[0])
params.append(limit[1])
else:
raise LimitTooMuchParamsError(limit)
return sql, fields, tuple(params)
@classmethod
def _rows(cls):
return inspect.getmembers(cls)
@classmethod
def _get_delete_sql(cls, **kwargs):
filters = []
params = []
for key, value in kwargs.items():
filters.append(key + " = ?")
params.append(value)
sql = DELETE_SQL.format(name=cls._get_name(),
query=" AND ".join(filters))
return sql, tuple(params)
@classmethod
def _get_drop_sql(cls, exp=None):
sql = DROP_SQL.format(name=cls._get_name(), exp=IF_EXISTS if exp else '')
return sql
@classmethod
def _get_select_where_sql(cls, fields: list = None, **kwargs):
fields = fields or cls._get_column_names()
filters = []
params = []
for key, value in kwargs.items():
filters.append(key + " = ?")
params.append(value)
sql = SELECT_WHERE_SQL.format(name=cls._get_name(),
fields=", ".join(fields),
query=" AND ".join(filters))
return sql, fields, tuple(params)
@classmethod
def _name(cls):
attributes = inspect.getmembers(cls, lambda a: not (inspect.isroutine(a)))
atrs = [a for a in attributes if not (a[0].startswith('__') and a[0].endswith('__'))]
return atrs
class ListQuery:
def __init__(self, db, key):
self.db = db
self.key = key
def __new__(cls, key, db, *args, **kwargs) -> list:
if key in db.tables:
rows = db.cur.execute(f"SELECT * FROM {key}").fetchall()
fields = [description[0] for description in db.cur.description]
return [dict(zip(fields, rows[i])) for i in range(len(rows))]
else:
raise TableInfoError
class Column:
def __init__(self, type, default=None):
self.type = type
self.default = default
@property
def sql_type(self):
return SQLITE_TYPE_MAP[self.type]
class ForeignKey:
def __init__(self, table):
self.table = table
class Rel:
def __init__(self, table):
self.table = table
self._data: List[table: Table] = []
def __str__(self):
return f"<Relation{self._data}>"
def add(self, table: Table) -> None:
if not isinstance(table, self.table):
raise TableTypeInvalid(table)
self._data.append(table)
__all__ = [
'Table',
'ListQuery',
'Column',
'ForeignKey',
'Rel'
]
|
import logging
import os
import random
import sys
from subprocess import Popen, PIPE
import matplotlib.pyplot as plt
import mlflow
import numpy as np
import torch
from PIL import Image
from .utils import tensor2im, mk_clean_dir, mkdirs
if sys.version_info[0] == 2:
VisdomExceptionBase = Exception
else:
VisdomExceptionBase = ConnectionError
def show_img(img, grayscale=True):
cmap = 'gray' if grayscale else "viridis"
plt.imshow(img, cmap=cmap)
plt.show()
def get_next_free_ind(d:dict):
""" given a dictionary of indices, get the next free index """
return max(d.values()) + 1
class ImageSaver:
def __init__(self, results_dir, has_label=False, has_pseudo=False, include_bbox=False,
save_masks_separately=False, mean_and_std=None, confidence=None):
self.results_dir = results_dir
mkdirs(self.results_dir)
self.has_label = has_label
logging.info(f"initialized image saver to dir {results_dir} with has_label = {has_label}")
self.include_bbox = include_bbox
self.has_pseudo = has_pseudo
if self.include_bbox:
raise NotImplementedError("need to include bboxes in ImageSaver")
self.ind = 0
self.save_masks_separately = save_masks_separately
if self.save_masks_separately:
mk_clean_dir(os.path.join(self.results_dir, "labels"))
mk_clean_dir(os.path.join(self.results_dir, "outputs"))
self.mean_and_std = mean_and_std
self.confidence = confidence
def _fix_images_for_mean_and_std(self, data_batch):
for k in data_batch:
if type(data_batch[k]) == torch.Tensor:
data_batch[k] = np.array(data_batch[k])
if self.mean_and_std is not None:
for k in ["images", "pseudos"]:
if k in data_batch:
data_batch[k] = (data_batch[k] * self.mean_and_std[k]["std"]) + self.mean_and_std[k]["mean"]
@staticmethod
def _clean_ax(ax):
ax.axis("off")
ax.set_xticks([])
ax.set_yticks([])
def _plt_imgs(self, axs, images, titles): # pass in list of Nones for titles if no title desired
for ax, img, title in zip(axs, images, titles):
img += img.min()
ax.imshow(img.squeeze(), cmap="gray", vmin=0., vmax=1.)
if title is not None:
self._clean_ax(ax)
ax.set_title(title)
def _plt_seg_overlay(self, axs, images, segs, titles, num_channels=1):
self._plt_imgs(axs, images, titles)
for ax, seg in zip(axs, segs):
vmax = max([1, num_channels - 1]) # not sure if this is necessary but just in case
ax.imshow(seg.squeeze(), cmap="viridis", alpha=.2, vmin=0, vmax=vmax) # overlay
def _plt_seg_difference(self, axs, segs1, segs2, titles, num_channels=1):
for ax, seg1, seg2, title in zip(axs, segs1, segs2, titles):
diff = seg1.astype(np.float32) - seg2.astype(np.float32)
vmax = max([1, num_channels - 1])
ax.imshow(diff.squeeze(), cmap="RdYlGn", vmin=-vmax, vmax=vmax)
self._clean_ax(ax)
ax.set_title(title)
def save_label(self, save_dir, labels):
for i, label in enumerate(labels):
im = Image.fromarray(tensor2im(label))
im.save(os.path.join(self.results_dir, save_dir, f"{self.ind}_{i}.png"))
@staticmethod
def get_titles(data_batch):
return [os.path.splitext(os.path.basename(ip))[0] for ip in data_batch["image_paths"]]
def confidence_titles(self, segs):
titles = []
for seg in segs:
conf = self.confidence(seg, vals=True)
titles += [f"s {conf["simplicity"]:.2f}, c {conf["convexity"]:.2f}"]
# titles += ["Passed" if self.confidence(seg) else "FAILED"]
return titles
def __call__(self, data_batch, outputs):
""" data_batch comes from data_loader, outputs comes from model"""
if self.confidence is not None:
titles = self.confidence_titles(outputs["segs"])
else:
titles = self.get_titles(data_batch)
if outputs["segs"].requires_grad:
outputs["segs"] = outputs["segs"].detach().cpu()
batch_size = outputs["segs"].shape[0]
outputs["segs"] = np.array(outputs["segs"])
num_channels = outputs["segs"].shape[1]
if num_channels > 1:
outputs["segs"] = outputs["segs"].argmax(axis=1)
self._fix_images_for_mean_and_std(data_batch) # make images display okay
if self.has_label:
assert "segs" in data_batch, f"has_label was set, but segs not in data batch {data_batch.keys()}"
ncols = 2 + (2 * self.has_label) + self.has_pseudo
fig, axs = plt.subplots(batch_size, ncols, figsize=(3 * ncols, 3 * batch_size))
if batch_size == 1:
axs = np.expand_dims(axs, 0) # batch size of 1 automatically flattens array
self._plt_imgs(axs[:, 0], data_batch["images"], titles)
self._plt_seg_overlay(axs[:, 1 + self.has_pseudo], data_batch["images"], outputs["segs"],
["outputs"] * batch_size, num_channels=num_channels)
if self.has_pseudo:
self._plt_imgs(axs[:, 1], data_batch["pseudos"], "Pseudo")
if self.has_label:
self._plt_seg_overlay(axs[:, 2 + self.has_pseudo], data_batch["images"], data_batch["segs"],
["labels"] * batch_size, num_channels=num_channels)
self._plt_seg_difference(axs[:, 3 + self.has_pseudo], outputs["segs"], data_batch["segs"],
["out-label"] * batch_size, num_channels=num_channels)
plt.savefig(os.path.join(self.results_dir, f"{self.ind}.png"))
mlflow.log_artifact(os.path.join(self.results_dir, f"{self.ind}.png"))
if self.save_masks_separately:
self.save_label("labels", data_batch["segs"])
self.save_label("outputs", outputs["segs"])
plt.close()
self.ind += 1
def get_visuals(inp_tensors: dict, mean_and_std: dict = None, bboxes: dict = None, save_all: bool = False):
assert len(inp_tensors) > 0, "at least one input must be provided"
im = inp_tensors[next(iter(inp_tensors))] # get first element
batch_size = im.shape[0]
visuals = list()
if batch_size > 1 and not save_all:
batch_index_select = [random.choice(range(batch_size))]
else:
batch_index_select = range(batch_size)
for batch_index in batch_index_select:
plot_ims = dict()
# TODO: centralize renorming of data to one location!!
for k, v in inp_tensors.items():
if mean_and_std is not None and k in mean_and_std and k != "labels":
v = (v * mean_and_std[k]["std"]) + mean_and_std[k]["mean"]
if v is not None:
plot_ims[k] = tensor2im(v[batch_index])
if bboxes is not None:
plot_ims["bboxes"] = add_bboxes([v[batch_index] for v in bboxes.values()])
visuals.append(plot_ims)
return visuals
def add_bboxes(bboxes, im_shape=(256, 256)):
assert len(bboxes) < 3, "bbox uses channels so no more than 2 bboxes should be included"
bbox_image = np.zeros((im_shape))
for i, bbox in enumerate(bboxes):
bbox *= im_shape[0]
min_r, min_c, h, w = bbox.type(torch.int32)
bbox_image[min_r:min_r + h, min_c:min_c + w] += 100.
return bbox_image
class Visualizer():
"""This class includes several functions that can display/save images and print/save logging information.
It uses a Python library 'visdom' for display, and a Python library 'dominate' (wrapped in 'HTML') for creating HTML files with images.
"""
def __init__(self, opt):
"""Initialize the Visualizer class
Parameters:
opt -- stores all the experiment flags; needs to be a subclass of BaseOptions
Step 1: Cache the training/test options
Step 2: connect to a visdom server
Step 3: create an HTML object for saveing HTML filters
Step 4: create a logging file to store training losses
"""
self.opt = opt # cache the option
self.display_id = dict(image_table=1) # image table is says which images are which
self.win_size = opt.display_winsize
self.name = opt.name
self.port = opt.display_port
self.saved = False
self.visdom_on = not opt.disable_visdom
if self.visdom_on: # connect to a visdom server given <display_port> and <display_server>
import visdom
self.ncols = opt.display_ncols + opt.include_bbox
self.vis = visdom.Visdom(server=opt.display_server, port=opt.display_port, env=opt.display_env)
if not self.vis.check_connection():
self.create_visdom_connections()
def reset(self):
"""Reset the self.saved status"""
self.saved = False
def create_visdom_connections(self):
"""If the program could not connect to Visdom server, this function will start a new server at port < self.port > """
cmd = sys.executable + ' -m visdom.server -p %d &>/dev/null &' % self.port
print('\n\nCould not connect to Visdom server. \n Trying to start a server....')
print('Command: %s' % cmd)
Popen(cmd, shell=True, stdout=PIPE, stderr=PIPE)
def display_current_results(self, visuals, epoch, save_result, phase):
"""Display current results on visdom; save current results to an HTML file.
Parameters:
visuals (OrderedDict) - - dictionary of images to display or save
epoch (int) - - the current epoch
save_result (bool) - - if save the current results to an HTML file
phase (str) -- phase name
"""
if self.visdom_on: # show images in the browser using visdom
ncols = self.ncols
if ncols > 0: # show all the images in one visdom panel
ncols = min(ncols, len(visuals))
h, w = next(iter(visuals.values())).shape[:2]
# create a table of images.
title = self.name + ': ' + phase
images = []
idx = 0
for label, image in visuals.items():
image_numpy = tensor2im(image)
if len(image_numpy.shape) == 3:
image_numpy = image_numpy.transpose([2, 0, 1])
if len(image_numpy.shape) == 2:
image_numpy = np.expand_dims(image_numpy, 0)
images.append(image_numpy)
idx += 1
white_image = np.ones_like(image_numpy) * 255
while idx % ncols != 0:
images.append(white_image)
idx += 1
if f"{phase}_images" not in self.display_id:
self.display_id[f"{phase}_images"] = get_next_free_ind(self.display_id)
try:
self.vis.images(images, nrow=ncols, win=self.display_id[f"{phase}_images"],
padding=2, opts=dict(title=title + ' images'))
except VisdomExceptionBase:
self.create_visdom_connections()
else: # show each image in a separate visdom panel;
idx = 1
try:
for label, image in visuals.items():
if f"{phase}_images_{idx}" not in self.display_id:
self.display_id[f"{phase}_images_{idx}"] = get_next_free_ind(self.display_id)
image_numpy = tensor2im(image)
self.vis.image(image_numpy, opts=dict(title='_'.join([phase, label])),
win=self.display_id[f"{phase}_images_{idx}"])
idx += 1
except VisdomExceptionBase:
self.create_visdom_connections()
def plot_current_losses(self, epoch, counter_ratio, losses, phase):
"""display the current losses on visdom display: dictionary of error labels and values
Parameters:
epoch (int) -- current epoch
counter_ratio (float) -- progress (percentage) in the current epoch, between 0 to 1
losses (OrderedDict) -- training losses stored in the format of (name, float) pairs
phase (str) -- phase name
"""
if self.visdom_on:
if f"{phase}_metrics" not in self.display_id:
self.display_id[f"{phase}_metrics"] = get_next_free_ind(self.display_id)
if not hasattr(self, 'plot_data'):
self.plot_data = dict()
if phase not in self.plot_data.keys():
self.plot_data[phase] = {'X': [], 'Y': [], 'legend': list(losses.keys())}
self.plot_data[phase]['X'].append(epoch + counter_ratio)
self.plot_data[phase]['Y'].append([l for l in losses.values()])
try:
self.vis.line(
X=np.stack([np.array(self.plot_data[phase]['X'])] * len(losses), 1),
Y=np.array(self.plot_data[phase]['Y']),
opts={
'title': self.name + '_' + phase + ': loss over time',
'legend': self.plot_data[phase]['legend'],
'xlabel': 'epoch',
'ylabel': 'loss'},
win=self.display_id[f"{phase}_metrics"])
except VisdomExceptionBase:
self.create_visdom_connections()
except AssertionError:
print(f"visdom plot failed, X= {self.plot_data[phase]["X"][-1]}, Y={self.plot_data[phase]["Y"][-1]}")
print(losses)
print(self.plot_data)
# losses: same format as |losses| of plot_current_losses
def print_current_losses(self, epoch, iters, losses, phase):
"""print current losses on console; also save the losses to the disk
Parameters:
epoch (int) -- current epoch
iters (int) -- current training iteration during this epoch (reset to 0 at the end of every epoch)
losses (OrderedDict) -- training losses stored in the format of (name, float) pairs
phase (str) -- phase name
"""
message = f'(phase {phase} epoch: {epoch}, iters: {iters}) '
for k, v in losses.items():
message += '%s: %.3f ' % (k, v)
print(message) # print the message
# with open(self.log_name, "a") as log_file:
# log_file.write('%s\n' % message) # save the message
| import logging
import os
import random
import sys
from subprocess import Popen, PIPE
import matplotlib.pyplot as plt
import mlflow
import numpy as np
import torch
from PIL import Image
from .utils import tensor2im, mk_clean_dir, mkdirs
if sys.version_info[0] == 2:
VisdomExceptionBase = Exception
else:
VisdomExceptionBase = ConnectionError
def show_img(img, grayscale=True):
cmap = 'gray' if grayscale else "viridis"
plt.imshow(img, cmap=cmap)
plt.show()
def get_next_free_ind(d:dict):
""" given a dictionary of indices, get the next free index """
return max(d.values()) + 1
class ImageSaver:
def __init__(self, results_dir, has_label=False, has_pseudo=False, include_bbox=False,
save_masks_separately=False, mean_and_std=None, confidence=None):
self.results_dir = results_dir
mkdirs(self.results_dir)
self.has_label = has_label
logging.info(f"initialized image saver to dir {results_dir} with has_label = {has_label}")
self.include_bbox = include_bbox
self.has_pseudo = has_pseudo
if self.include_bbox:
raise NotImplementedError("need to include bboxes in ImageSaver")
self.ind = 0
self.save_masks_separately = save_masks_separately
if self.save_masks_separately:
mk_clean_dir(os.path.join(self.results_dir, "labels"))
mk_clean_dir(os.path.join(self.results_dir, "outputs"))
self.mean_and_std = mean_and_std
self.confidence = confidence
def _fix_images_for_mean_and_std(self, data_batch):
for k in data_batch:
if type(data_batch[k]) == torch.Tensor:
data_batch[k] = np.array(data_batch[k])
if self.mean_and_std is not None:
for k in ["images", "pseudos"]:
if k in data_batch:
data_batch[k] = (data_batch[k] * self.mean_and_std[k]["std"]) + self.mean_and_std[k]["mean"]
@staticmethod
def _clean_ax(ax):
ax.axis("off")
ax.set_xticks([])
ax.set_yticks([])
def _plt_imgs(self, axs, images, titles): # pass in list of Nones for titles if no title desired
for ax, img, title in zip(axs, images, titles):
img += img.min()
ax.imshow(img.squeeze(), cmap="gray", vmin=0., vmax=1.)
if title is not None:
self._clean_ax(ax)
ax.set_title(title)
def _plt_seg_overlay(self, axs, images, segs, titles, num_channels=1):
self._plt_imgs(axs, images, titles)
for ax, seg in zip(axs, segs):
vmax = max([1, num_channels - 1]) # not sure if this is necessary but just in case
ax.imshow(seg.squeeze(), cmap="viridis", alpha=.2, vmin=0, vmax=vmax) # overlay
def _plt_seg_difference(self, axs, segs1, segs2, titles, num_channels=1):
for ax, seg1, seg2, title in zip(axs, segs1, segs2, titles):
diff = seg1.astype(np.float32) - seg2.astype(np.float32)
vmax = max([1, num_channels - 1])
ax.imshow(diff.squeeze(), cmap="RdYlGn", vmin=-vmax, vmax=vmax)
self._clean_ax(ax)
ax.set_title(title)
def save_label(self, save_dir, labels):
for i, label in enumerate(labels):
im = Image.fromarray(tensor2im(label))
im.save(os.path.join(self.results_dir, save_dir, f"{self.ind}_{i}.png"))
@staticmethod
def get_titles(data_batch):
return [os.path.splitext(os.path.basename(ip))[0] for ip in data_batch["image_paths"]]
def confidence_titles(self, segs):
titles = []
for seg in segs:
conf = self.confidence(seg, vals=True)
titles += [f"s {conf['simplicity']:.2f}, c {conf['convexity']:.2f}"]
# titles += ["Passed" if self.confidence(seg) else "FAILED"]
return titles
def __call__(self, data_batch, outputs):
""" data_batch comes from data_loader, outputs comes from model"""
if self.confidence is not None:
titles = self.confidence_titles(outputs["segs"])
else:
titles = self.get_titles(data_batch)
if outputs["segs"].requires_grad:
outputs["segs"] = outputs["segs"].detach().cpu()
batch_size = outputs["segs"].shape[0]
outputs["segs"] = np.array(outputs["segs"])
num_channels = outputs["segs"].shape[1]
if num_channels > 1:
outputs["segs"] = outputs["segs"].argmax(axis=1)
self._fix_images_for_mean_and_std(data_batch) # make images display okay
if self.has_label:
assert "segs" in data_batch, f"has_label was set, but segs not in data batch {data_batch.keys()}"
ncols = 2 + (2 * self.has_label) + self.has_pseudo
fig, axs = plt.subplots(batch_size, ncols, figsize=(3 * ncols, 3 * batch_size))
if batch_size == 1:
axs = np.expand_dims(axs, 0) # batch size of 1 automatically flattens array
self._plt_imgs(axs[:, 0], data_batch["images"], titles)
self._plt_seg_overlay(axs[:, 1 + self.has_pseudo], data_batch["images"], outputs["segs"],
["outputs"] * batch_size, num_channels=num_channels)
if self.has_pseudo:
self._plt_imgs(axs[:, 1], data_batch["pseudos"], "Pseudo")
if self.has_label:
self._plt_seg_overlay(axs[:, 2 + self.has_pseudo], data_batch["images"], data_batch["segs"],
["labels"] * batch_size, num_channels=num_channels)
self._plt_seg_difference(axs[:, 3 + self.has_pseudo], outputs["segs"], data_batch["segs"],
["out-label"] * batch_size, num_channels=num_channels)
plt.savefig(os.path.join(self.results_dir, f"{self.ind}.png"))
mlflow.log_artifact(os.path.join(self.results_dir, f"{self.ind}.png"))
if self.save_masks_separately:
self.save_label("labels", data_batch["segs"])
self.save_label("outputs", outputs["segs"])
plt.close()
self.ind += 1
def get_visuals(inp_tensors: dict, mean_and_std: dict = None, bboxes: dict = None, save_all: bool = False):
assert len(inp_tensors) > 0, "at least one input must be provided"
im = inp_tensors[next(iter(inp_tensors))] # get first element
batch_size = im.shape[0]
visuals = list()
if batch_size > 1 and not save_all:
batch_index_select = [random.choice(range(batch_size))]
else:
batch_index_select = range(batch_size)
for batch_index in batch_index_select:
plot_ims = dict()
# TODO: centralize renorming of data to one location!!
for k, v in inp_tensors.items():
if mean_and_std is not None and k in mean_and_std and k != "labels":
v = (v * mean_and_std[k]["std"]) + mean_and_std[k]["mean"]
if v is not None:
plot_ims[k] = tensor2im(v[batch_index])
if bboxes is not None:
plot_ims["bboxes"] = add_bboxes([v[batch_index] for v in bboxes.values()])
visuals.append(plot_ims)
return visuals
def add_bboxes(bboxes, im_shape=(256, 256)):
assert len(bboxes) < 3, "bbox uses channels so no more than 2 bboxes should be included"
bbox_image = np.zeros((im_shape))
for i, bbox in enumerate(bboxes):
bbox *= im_shape[0]
min_r, min_c, h, w = bbox.type(torch.int32)
bbox_image[min_r:min_r + h, min_c:min_c + w] += 100.
return bbox_image
class Visualizer():
"""This class includes several functions that can display/save images and print/save logging information.
It uses a Python library 'visdom' for display, and a Python library 'dominate' (wrapped in 'HTML') for creating HTML files with images.
"""
def __init__(self, opt):
"""Initialize the Visualizer class
Parameters:
opt -- stores all the experiment flags; needs to be a subclass of BaseOptions
Step 1: Cache the training/test options
Step 2: connect to a visdom server
Step 3: create an HTML object for saveing HTML filters
Step 4: create a logging file to store training losses
"""
self.opt = opt # cache the option
self.display_id = dict(image_table=1) # image table is says which images are which
self.win_size = opt.display_winsize
self.name = opt.name
self.port = opt.display_port
self.saved = False
self.visdom_on = not opt.disable_visdom
if self.visdom_on: # connect to a visdom server given <display_port> and <display_server>
import visdom
self.ncols = opt.display_ncols + opt.include_bbox
self.vis = visdom.Visdom(server=opt.display_server, port=opt.display_port, env=opt.display_env)
if not self.vis.check_connection():
self.create_visdom_connections()
def reset(self):
"""Reset the self.saved status"""
self.saved = False
def create_visdom_connections(self):
"""If the program could not connect to Visdom server, this function will start a new server at port < self.port > """
cmd = sys.executable + ' -m visdom.server -p %d &>/dev/null &' % self.port
print('\n\nCould not connect to Visdom server. \n Trying to start a server....')
print('Command: %s' % cmd)
Popen(cmd, shell=True, stdout=PIPE, stderr=PIPE)
def display_current_results(self, visuals, epoch, save_result, phase):
"""Display current results on visdom; save current results to an HTML file.
Parameters:
visuals (OrderedDict) - - dictionary of images to display or save
epoch (int) - - the current epoch
save_result (bool) - - if save the current results to an HTML file
phase (str) -- phase name
"""
if self.visdom_on: # show images in the browser using visdom
ncols = self.ncols
if ncols > 0: # show all the images in one visdom panel
ncols = min(ncols, len(visuals))
h, w = next(iter(visuals.values())).shape[:2]
# create a table of images.
title = self.name + ': ' + phase
images = []
idx = 0
for label, image in visuals.items():
image_numpy = tensor2im(image)
if len(image_numpy.shape) == 3:
image_numpy = image_numpy.transpose([2, 0, 1])
if len(image_numpy.shape) == 2:
image_numpy = np.expand_dims(image_numpy, 0)
images.append(image_numpy)
idx += 1
white_image = np.ones_like(image_numpy) * 255
while idx % ncols != 0:
images.append(white_image)
idx += 1
if f"{phase}_images" not in self.display_id:
self.display_id[f"{phase}_images"] = get_next_free_ind(self.display_id)
try:
self.vis.images(images, nrow=ncols, win=self.display_id[f"{phase}_images"],
padding=2, opts=dict(title=title + ' images'))
except VisdomExceptionBase:
self.create_visdom_connections()
else: # show each image in a separate visdom panel;
idx = 1
try:
for label, image in visuals.items():
if f"{phase}_images_{idx}" not in self.display_id:
self.display_id[f"{phase}_images_{idx}"] = get_next_free_ind(self.display_id)
image_numpy = tensor2im(image)
self.vis.image(image_numpy, opts=dict(title='_'.join([phase, label])),
win=self.display_id[f"{phase}_images_{idx}"])
idx += 1
except VisdomExceptionBase:
self.create_visdom_connections()
def plot_current_losses(self, epoch, counter_ratio, losses, phase):
"""display the current losses on visdom display: dictionary of error labels and values
Parameters:
epoch (int) -- current epoch
counter_ratio (float) -- progress (percentage) in the current epoch, between 0 to 1
losses (OrderedDict) -- training losses stored in the format of (name, float) pairs
phase (str) -- phase name
"""
if self.visdom_on:
if f"{phase}_metrics" not in self.display_id:
self.display_id[f"{phase}_metrics"] = get_next_free_ind(self.display_id)
if not hasattr(self, 'plot_data'):
self.plot_data = dict()
if phase not in self.plot_data.keys():
self.plot_data[phase] = {'X': [], 'Y': [], 'legend': list(losses.keys())}
self.plot_data[phase]['X'].append(epoch + counter_ratio)
self.plot_data[phase]['Y'].append([l for l in losses.values()])
try:
self.vis.line(
X=np.stack([np.array(self.plot_data[phase]['X'])] * len(losses), 1),
Y=np.array(self.plot_data[phase]['Y']),
opts={
'title': self.name + '_' + phase + ': loss over time',
'legend': self.plot_data[phase]['legend'],
'xlabel': 'epoch',
'ylabel': 'loss'},
win=self.display_id[f"{phase}_metrics"])
except VisdomExceptionBase:
self.create_visdom_connections()
except AssertionError:
print(f"visdom plot failed, X= {self.plot_data[phase]['X'][-1]}, Y={self.plot_data[phase]['Y'][-1]}")
print(losses)
print(self.plot_data)
# losses: same format as |losses| of plot_current_losses
def print_current_losses(self, epoch, iters, losses, phase):
"""print current losses on console; also save the losses to the disk
Parameters:
epoch (int) -- current epoch
iters (int) -- current training iteration during this epoch (reset to 0 at the end of every epoch)
losses (OrderedDict) -- training losses stored in the format of (name, float) pairs
phase (str) -- phase name
"""
message = f'(phase {phase} epoch: {epoch}, iters: {iters}) '
for k, v in losses.items():
message += '%s: %.3f ' % (k, v)
print(message) # print the message
# with open(self.log_name, "a") as log_file:
# log_file.write('%s\n' % message) # save the message
|
import uuid
from django.db.models import Sum
from django.template.defaultfilters import date as date_filter
from django.utils.translation import gettext_lazy as _
from .helpers import get_calculation_annotation
from .registry import field_registry
class SlickReportField(object):
"""
Computation field responsible for making the calculation unit
"""
_field_registry = field_registry
name = ''
"""The name to be used in the ReportGenerator"""
calculation_field = 'value'
"""the Field to compute on"""
calculation_method = Sum
"""The computation Method"""
verbose_name = None
"""Verbose name to be used in front end when needed"""
requires = None
"""This can be a list of sibling classes,
they will be asked to compute and their value would be available to you in the `resolve` method
requires = [BasicCalculationA, BasicCalculationB]
"""
type = 'number'
"""Just a string describing what this computation field return, usually passed to frontend"""
is_summable = True
"""Indicate if this computation can be summed over. Useful to be passed to frontend or whenever needed"""
report_model = None
group_by = None
plus_side_q = None
minus_side_q = None
_require_classes = None
_debit_and_credit = True
@classmethod
def create(cls, method, field, name=None, verbose_name=None, is_summable=True):
"""
Creates a ReportField class on the fly
:param method: The computation Method to be used
:param field: The field on which the computation would occur
:param name: a name to refer to this field else where
:param verbose_name: Verbose name
:param is_summable:
:return:
"""
if not name:
identifier = str(uuid.uuid4()).split('-')[-1]
name = name or f"{method.name.lower()}__{field}"
assert name not in cls._field_registry.get_all_report_fields_names()
verbose_name = verbose_name or f'{method.name} {field}'
report_klass = type(f'ReportField_{name}', (cls,), {
'name': name,
'verbose_name': verbose_name,
'calculation_field': field,
'calculation_method': method,
'is_summable': is_summable,
})
return report_klass
def __init__(self, plus_side_q=None, minus_side_q=None,
report_model=None,
qs=None,
calculation_field=None, calculation_method=None, date_field='', group_by=None):
super(SlickReportField, self).__init__()
self.date_field = date_field
self.report_model = self.report_model or report_model
self.calculation_field = calculation_field if calculation_field else self.calculation_field
self.calculation_method = calculation_method if calculation_method else self.calculation_method
self.plus_side_q = self.plus_side_q or plus_side_q
self.minus_side_q = self.minus_side_q or minus_side_q
self.requires = self.requires or []
self.group_by = self.group_by or group_by
self._cache = None, None, None
self._require_classes = [field_registry.get_field_by_name(x) for x in self.requires]
if not self.plus_side_q and not self.minus_side_q:
self._debit_and_credit = False
@classmethod
def _get_required_classes(cls):
requires = cls.requires or []
return [field_registry.get_field_by_name(x) for x in requires]
def apply_q_plus_filter(self, qs):
return qs.filter(*self.plus_side_q)
def apply_q_minus_filter(self, qs):
return qs.filter(*self.minus_side_q)
def apply_aggregation(self, queryset, group_by=''):
annotation = self.calculation_method(self.calculation_field)
if group_by:
queryset = queryset.values(group_by).annotate(annotation)
else:
queryset = queryset.aggregate(annotation)
return queryset
def init_preparation(self, q_filters=None, kwargs_filters=None, **kwargs):
"""
Called by the generator to preparet he calculation of this field + it's requirements
:param q_filters:
:param kwargs_filters:
:param kwargs:
:return:
"""
kwargs_filters = kwargs_filters or {}
dep_values = self._prepare_dependencies(q_filters, kwargs_filters.copy())
debit_results, credit_results = self.prepare(q_filters, kwargs_filters, **kwargs)
self._cache = debit_results, credit_results, dep_values
def prepare(self, q_filters=None, kwargs_filters=None, **kwargs):
"""
This is the first hook where you can customize the calculation away from the Django Query aggregation method
This method et called with all available parameters , so you can prepare the results for the whole set and save
it in a local cache (like self._cache) .
The flow will later call the method `resolve`, giving you the id, for you to return it respective calculation
:param q_filters:
:param kwargs_filters:
:param kwargs:
:return:
"""
queryset = self.get_queryset()
if q_filters:
queryset = queryset.filter(*q_filters)
if kwargs_filters:
queryset = queryset.filter(**kwargs_filters)
if self.plus_side_q:
queryset = self.apply_q_plus_filter(queryset)
debit_results = self.apply_aggregation(queryset, self.group_by)
credit_results = None
if self._debit_and_credit:
queryset = self.get_queryset()
if kwargs_filters:
queryset = queryset.filter(**kwargs_filters)
if q_filters:
queryset = queryset.filter(*q_filters)
if self.minus_side_q:
queryset = self.apply_q_minus_filter(queryset)
credit_results = self.apply_aggregation(queryset, self.group_by)
return debit_results, credit_results
def get_queryset(self):
queryset = self.report_model.objects
return queryset.order_by()
def get_annotation_name(self):
"""
Get the annotation per the database
:return: string used ex:
"""
return get_calculation_annotation(self.calculation_field, self.calculation_method)
def _prepare_dependencies(self, q_filters=None, extra_filters=None, ):
values = {}
for dep_class in self._require_classes:
dep = dep_class(self.plus_side_q, self.minus_side_q, self.report_model,
date_field=self.date_field, group_by=self.group_by)
values[dep.name] = {'results': dep.init_preparation(q_filters, extra_filters),
'instance': dep}
return values
def resolve(self, current_obj, current_row=None):
'''
Reponsible for getting the exact data from the prepared value
:param cached: the returned data from prepare
:param current_obj: he value of group by id
:param current_row: the row in iteration
:return: a solid number or value
'''
cached = self._cache
debit_value, credit_value = self.extract_data(cached, current_obj)
dependencies_value = self._resolve_dependencies(current_obj)
return self.final_calculation(debit_value, credit_value, dependencies_value)
def get_dependency_value(self, current_obj, name=None):
"""
Get the values of the ReportFields specified in `requires`
:param current_obj: the current object which we want the calculation for
:param name: Optional, the name of the specific dependency you want.
:return: a dict containing dependencies names as keys and their calculation as values
or a specific value if name is specified.
"""
values = self._resolve_dependencies(current_obj)
if name:
return values.get(name)
return values
def _resolve_dependencies(self, current_obj):
dep_results = {}
cached_debit, cached_credit, dependencies_value = self._cache
dependencies_value = dependencies_value or {}
for d in dependencies_value.keys():
d_instance = dependencies_value[d]['instance']
dep_results[d] = d_instance.resolve(current_obj)
return dep_results
def extract_data(self, cached, current_obj):
group_by = self.group_by
debit_value = 0
credit_value = 0
annotation = self.get_annotation_name()
cached_debit, cached_credit, dependencies_value = cached
if cached_debit or cached_credit:
debit = None
if cached_debit is not None:
if not group_by:
x = list(cached_debit.keys())[0]
debit_value = cached_debit[x]
else:
for i, x in enumerate(cached_debit):
if str(x[group_by]) == current_obj:
debit = cached_debit[i]
break
if debit:
debit_value = debit[annotation]
if cached_credit is not None:
credit = None
if cached_credit is not None:
if not group_by:
x = list(cached_credit.keys())[0]
credit_value = cached_credit[x]
else:
for i, x in enumerate(cached_credit):
if str(x[group_by]) == current_obj:
credit = cached_credit[i]
break
if credit:
credit_value = credit[annotation]
return debit_value, credit_value
def final_calculation(self, debit, credit, dep_dict):
debit = debit or 0
credit = credit or 0
return debit - credit
@classmethod
def get_full_dependency_list(cls):
"""
Get the full Hirearchy of dependencies and dependencies dependency.
:return: List of dependecies classes
"""
def get_dependency(field_class):
dependencies = field_class._get_required_classes()
klasses = []
for klass in dependencies:
klasses.append(klass)
other = get_dependency(klass)
if other:
klasses += other
return klasses
return get_dependency(cls)
@classmethod
def get_crosstab_field_verbose_name(cls, model, id):
"""
Construct a verbose name for the crosstab field
:param model: the model name
:param id: the id of the current crosstab object
:return: a verbose string
"""
if id == '----':
return _('The reminder')
return f'{cls.verbose_name} {model} {id}'
@classmethod
def get_time_series_field_verbose_name(cls, date_period, index, dates, pattern):
"""
Get the name of the verbose name of a computaion field that's in a time_series.
should be a mix of the date period if the column an it's verbose name.
:param date_period: a tuple of (start_date, end_date)
:param index: the index of the current field in the whole dates to be calculated
:param dates a list of tuples representing the start and the end date
:return: a verbose string
"""
dt_format = '%Y/%m/%d'
if pattern == 'monthly':
month_name = date_filter(date_period[0], 'F Y')
return f'{cls.verbose_name} {month_name}'
elif pattern == 'daily':
return f'{cls.verbose_name} {date_period[0].strftime(dt_format)}'
elif pattern == 'weekly':
return f' {cls.verbose_name} {_('Week')} {index} {date_period[0].strftime(dt_format)}'
elif pattern == 'yearly':
year = date_filter(date_period[0], 'Y')
return f'{cls.verbose_name} {year}'
return f'{cls.verbose_name} {date_period[0].strftime(dt_format)} - {date_period[1].strftime(dt_format)}'
class FirstBalanceField(SlickReportField):
name = '__fb__'
verbose_name = _('first balance')
def prepare(self, q_filters=None, extra_filters=None, **kwargs):
extra_filters = extra_filters or {}
from_date_value = extra_filters.get(f'{self.date_field}__gte')
extra_filters.pop(f'{self.date_field}__gte', None)
extra_filters[f'{self.date_field}__lt'] = from_date_value
return super(FirstBalanceField, self).prepare(q_filters, extra_filters)
field_registry.register(FirstBalanceField)
class TotalReportField(SlickReportField):
name = '__total__'
verbose_name = _('Sum of value')
requires = ['__debit__', '__credit__']
field_registry.register(TotalReportField)
class BalanceReportField(SlickReportField):
name = '__balance__'
verbose_name = _('Cumulative Total')
requires = ['__fb__']
def final_calculation(self, debit, credit, dep_dict):
fb = dep_dict.get('__fb__')
debit = debit or 0
credit = credit or 0
fb = fb or 0
return fb + debit - credit
field_registry.register(BalanceReportField)
class CreditReportField(SlickReportField):
name = '__credit__'
verbose_name = _('Credit')
def final_calculation(self, debit, credit, dep_dict):
return credit
field_registry.register(CreditReportField)
class DebitReportField(SlickReportField):
name = '__debit__'
verbose_name = _('Debit')
def final_calculation(self, debit, credit, dep_dict):
return debit
field_registry.register(DebitReportField)
class TotalQTYReportField(SlickReportField):
name = '__total_quantity__'
verbose_name = _('Total QTY')
calculation_field = 'quantity'
is_summable = False
field_registry.register(TotalQTYReportField)
class FirstBalanceQTYReportField(FirstBalanceField):
name = '__fb_quan__'
verbose_name = _('starting QTY')
calculation_field = 'quantity'
is_summable = False
field_registry.register(FirstBalanceQTYReportField)
class BalanceQTYReportField(SlickReportField):
name = '__balance_quantity__'
verbose_name = _('Cumulative QTY')
calculation_field = 'quantity'
requires = ['__fb_quan__']
def final_calculation(self, debit, credit, dep_dict):
# Use `get` so it fails loud if its not there
fb = dep_dict.get('__fb_quan__')
fb = fb or 0
return fb + debit - credit
field_registry.register(BalanceQTYReportField)
| import uuid
from django.db.models import Sum
from django.template.defaultfilters import date as date_filter
from django.utils.translation import gettext_lazy as _
from .helpers import get_calculation_annotation
from .registry import field_registry
class SlickReportField(object):
"""
Computation field responsible for making the calculation unit
"""
_field_registry = field_registry
name = ''
"""The name to be used in the ReportGenerator"""
calculation_field = 'value'
"""the Field to compute on"""
calculation_method = Sum
"""The computation Method"""
verbose_name = None
"""Verbose name to be used in front end when needed"""
requires = None
"""This can be a list of sibling classes,
they will be asked to compute and their value would be available to you in the `resolve` method
requires = [BasicCalculationA, BasicCalculationB]
"""
type = 'number'
"""Just a string describing what this computation field return, usually passed to frontend"""
is_summable = True
"""Indicate if this computation can be summed over. Useful to be passed to frontend or whenever needed"""
report_model = None
group_by = None
plus_side_q = None
minus_side_q = None
_require_classes = None
_debit_and_credit = True
@classmethod
def create(cls, method, field, name=None, verbose_name=None, is_summable=True):
"""
Creates a ReportField class on the fly
:param method: The computation Method to be used
:param field: The field on which the computation would occur
:param name: a name to refer to this field else where
:param verbose_name: Verbose name
:param is_summable:
:return:
"""
if not name:
identifier = str(uuid.uuid4()).split('-')[-1]
name = name or f"{method.name.lower()}__{field}"
assert name not in cls._field_registry.get_all_report_fields_names()
verbose_name = verbose_name or f'{method.name} {field}'
report_klass = type(f'ReportField_{name}', (cls,), {
'name': name,
'verbose_name': verbose_name,
'calculation_field': field,
'calculation_method': method,
'is_summable': is_summable,
})
return report_klass
def __init__(self, plus_side_q=None, minus_side_q=None,
report_model=None,
qs=None,
calculation_field=None, calculation_method=None, date_field='', group_by=None):
super(SlickReportField, self).__init__()
self.date_field = date_field
self.report_model = self.report_model or report_model
self.calculation_field = calculation_field if calculation_field else self.calculation_field
self.calculation_method = calculation_method if calculation_method else self.calculation_method
self.plus_side_q = self.plus_side_q or plus_side_q
self.minus_side_q = self.minus_side_q or minus_side_q
self.requires = self.requires or []
self.group_by = self.group_by or group_by
self._cache = None, None, None
self._require_classes = [field_registry.get_field_by_name(x) for x in self.requires]
if not self.plus_side_q and not self.minus_side_q:
self._debit_and_credit = False
@classmethod
def _get_required_classes(cls):
requires = cls.requires or []
return [field_registry.get_field_by_name(x) for x in requires]
def apply_q_plus_filter(self, qs):
return qs.filter(*self.plus_side_q)
def apply_q_minus_filter(self, qs):
return qs.filter(*self.minus_side_q)
def apply_aggregation(self, queryset, group_by=''):
annotation = self.calculation_method(self.calculation_field)
if group_by:
queryset = queryset.values(group_by).annotate(annotation)
else:
queryset = queryset.aggregate(annotation)
return queryset
def init_preparation(self, q_filters=None, kwargs_filters=None, **kwargs):
"""
Called by the generator to preparet he calculation of this field + it's requirements
:param q_filters:
:param kwargs_filters:
:param kwargs:
:return:
"""
kwargs_filters = kwargs_filters or {}
dep_values = self._prepare_dependencies(q_filters, kwargs_filters.copy())
debit_results, credit_results = self.prepare(q_filters, kwargs_filters, **kwargs)
self._cache = debit_results, credit_results, dep_values
def prepare(self, q_filters=None, kwargs_filters=None, **kwargs):
"""
This is the first hook where you can customize the calculation away from the Django Query aggregation method
This method et called with all available parameters , so you can prepare the results for the whole set and save
it in a local cache (like self._cache) .
The flow will later call the method `resolve`, giving you the id, for you to return it respective calculation
:param q_filters:
:param kwargs_filters:
:param kwargs:
:return:
"""
queryset = self.get_queryset()
if q_filters:
queryset = queryset.filter(*q_filters)
if kwargs_filters:
queryset = queryset.filter(**kwargs_filters)
if self.plus_side_q:
queryset = self.apply_q_plus_filter(queryset)
debit_results = self.apply_aggregation(queryset, self.group_by)
credit_results = None
if self._debit_and_credit:
queryset = self.get_queryset()
if kwargs_filters:
queryset = queryset.filter(**kwargs_filters)
if q_filters:
queryset = queryset.filter(*q_filters)
if self.minus_side_q:
queryset = self.apply_q_minus_filter(queryset)
credit_results = self.apply_aggregation(queryset, self.group_by)
return debit_results, credit_results
def get_queryset(self):
queryset = self.report_model.objects
return queryset.order_by()
def get_annotation_name(self):
"""
Get the annotation per the database
:return: string used ex:
"""
return get_calculation_annotation(self.calculation_field, self.calculation_method)
def _prepare_dependencies(self, q_filters=None, extra_filters=None, ):
values = {}
for dep_class in self._require_classes:
dep = dep_class(self.plus_side_q, self.minus_side_q, self.report_model,
date_field=self.date_field, group_by=self.group_by)
values[dep.name] = {'results': dep.init_preparation(q_filters, extra_filters),
'instance': dep}
return values
def resolve(self, current_obj, current_row=None):
'''
Reponsible for getting the exact data from the prepared value
:param cached: the returned data from prepare
:param current_obj: he value of group by id
:param current_row: the row in iteration
:return: a solid number or value
'''
cached = self._cache
debit_value, credit_value = self.extract_data(cached, current_obj)
dependencies_value = self._resolve_dependencies(current_obj)
return self.final_calculation(debit_value, credit_value, dependencies_value)
def get_dependency_value(self, current_obj, name=None):
"""
Get the values of the ReportFields specified in `requires`
:param current_obj: the current object which we want the calculation for
:param name: Optional, the name of the specific dependency you want.
:return: a dict containing dependencies names as keys and their calculation as values
or a specific value if name is specified.
"""
values = self._resolve_dependencies(current_obj)
if name:
return values.get(name)
return values
def _resolve_dependencies(self, current_obj):
dep_results = {}
cached_debit, cached_credit, dependencies_value = self._cache
dependencies_value = dependencies_value or {}
for d in dependencies_value.keys():
d_instance = dependencies_value[d]['instance']
dep_results[d] = d_instance.resolve(current_obj)
return dep_results
def extract_data(self, cached, current_obj):
group_by = self.group_by
debit_value = 0
credit_value = 0
annotation = self.get_annotation_name()
cached_debit, cached_credit, dependencies_value = cached
if cached_debit or cached_credit:
debit = None
if cached_debit is not None:
if not group_by:
x = list(cached_debit.keys())[0]
debit_value = cached_debit[x]
else:
for i, x in enumerate(cached_debit):
if str(x[group_by]) == current_obj:
debit = cached_debit[i]
break
if debit:
debit_value = debit[annotation]
if cached_credit is not None:
credit = None
if cached_credit is not None:
if not group_by:
x = list(cached_credit.keys())[0]
credit_value = cached_credit[x]
else:
for i, x in enumerate(cached_credit):
if str(x[group_by]) == current_obj:
credit = cached_credit[i]
break
if credit:
credit_value = credit[annotation]
return debit_value, credit_value
def final_calculation(self, debit, credit, dep_dict):
debit = debit or 0
credit = credit or 0
return debit - credit
@classmethod
def get_full_dependency_list(cls):
"""
Get the full Hirearchy of dependencies and dependencies dependency.
:return: List of dependecies classes
"""
def get_dependency(field_class):
dependencies = field_class._get_required_classes()
klasses = []
for klass in dependencies:
klasses.append(klass)
other = get_dependency(klass)
if other:
klasses += other
return klasses
return get_dependency(cls)
@classmethod
def get_crosstab_field_verbose_name(cls, model, id):
"""
Construct a verbose name for the crosstab field
:param model: the model name
:param id: the id of the current crosstab object
:return: a verbose string
"""
if id == '----':
return _('The reminder')
return f'{cls.verbose_name} {model} {id}'
@classmethod
def get_time_series_field_verbose_name(cls, date_period, index, dates, pattern):
"""
Get the name of the verbose name of a computaion field that's in a time_series.
should be a mix of the date period if the column an it's verbose name.
:param date_period: a tuple of (start_date, end_date)
:param index: the index of the current field in the whole dates to be calculated
:param dates a list of tuples representing the start and the end date
:return: a verbose string
"""
dt_format = '%Y/%m/%d'
if pattern == 'monthly':
month_name = date_filter(date_period[0], 'F Y')
return f'{cls.verbose_name} {month_name}'
elif pattern == 'daily':
return f'{cls.verbose_name} {date_period[0].strftime(dt_format)}'
elif pattern == 'weekly':
return f' {cls.verbose_name} {_("Week")} {index} {date_period[0].strftime(dt_format)}'
elif pattern == 'yearly':
year = date_filter(date_period[0], 'Y')
return f'{cls.verbose_name} {year}'
return f'{cls.verbose_name} {date_period[0].strftime(dt_format)} - {date_period[1].strftime(dt_format)}'
class FirstBalanceField(SlickReportField):
name = '__fb__'
verbose_name = _('first balance')
def prepare(self, q_filters=None, extra_filters=None, **kwargs):
extra_filters = extra_filters or {}
from_date_value = extra_filters.get(f'{self.date_field}__gte')
extra_filters.pop(f'{self.date_field}__gte', None)
extra_filters[f'{self.date_field}__lt'] = from_date_value
return super(FirstBalanceField, self).prepare(q_filters, extra_filters)
field_registry.register(FirstBalanceField)
class TotalReportField(SlickReportField):
name = '__total__'
verbose_name = _('Sum of value')
requires = ['__debit__', '__credit__']
field_registry.register(TotalReportField)
class BalanceReportField(SlickReportField):
name = '__balance__'
verbose_name = _('Cumulative Total')
requires = ['__fb__']
def final_calculation(self, debit, credit, dep_dict):
fb = dep_dict.get('__fb__')
debit = debit or 0
credit = credit or 0
fb = fb or 0
return fb + debit - credit
field_registry.register(BalanceReportField)
class CreditReportField(SlickReportField):
name = '__credit__'
verbose_name = _('Credit')
def final_calculation(self, debit, credit, dep_dict):
return credit
field_registry.register(CreditReportField)
class DebitReportField(SlickReportField):
name = '__debit__'
verbose_name = _('Debit')
def final_calculation(self, debit, credit, dep_dict):
return debit
field_registry.register(DebitReportField)
class TotalQTYReportField(SlickReportField):
name = '__total_quantity__'
verbose_name = _('Total QTY')
calculation_field = 'quantity'
is_summable = False
field_registry.register(TotalQTYReportField)
class FirstBalanceQTYReportField(FirstBalanceField):
name = '__fb_quan__'
verbose_name = _('starting QTY')
calculation_field = 'quantity'
is_summable = False
field_registry.register(FirstBalanceQTYReportField)
class BalanceQTYReportField(SlickReportField):
name = '__balance_quantity__'
verbose_name = _('Cumulative QTY')
calculation_field = 'quantity'
requires = ['__fb_quan__']
def final_calculation(self, debit, credit, dep_dict):
# Use `get` so it fails loud if its not there
fb = dep_dict.get('__fb_quan__')
fb = fb or 0
return fb + debit - credit
field_registry.register(BalanceQTYReportField)
|
import os, re, glob
import logging
from pathlib import Path
from typing import Dict, List, Optional, Tuple, Union
import yaml
import numpy as np
import pandas as pd
import xarray as xr
from astrild.rays.rayramses import RayRamses
from astrild.particles.ecosmog import Ecosmog
dir_src = Path(__file__).parent.absolute()
default_file_config = dir_src / "configs/simulation_collection.yaml"
default_config_file_particle = dir_src / "configs/particle_snapshot_info.h5"
default_config_file_ray = dir_src / "configs/ray_snapshot_info.h5"
class SimulationCollectionWarning(BaseException):
pass
class SimulationCollection:
"""
Class to handle a collection of Ramses based simulations.
Attributes:
config_file:
config_file_df:
Methods:
from_file:
compress_stats:
compress_histograms:
sum_raytracing_snapshots:
"""
def __init__(
self,
config: pd.DataFrame,
sims: Dict[str, Union[Ecosmog, RayRamses]],
):
"""
Args:
config:
Contains info on simulations in the collection and
their directories & files that contain data to analyze.
sims:
Contains the simulations in the collection.
"""
self.config = config
self.sim = sims
self.sim_nrs = np.arange(1, len(list(sims.keys())) + 1)
@classmethod
def from_file(
cls,
config_file: str,
config_file_df: str,
) -> "SimulationCollection":
"""
Initialize SimulationCollection from path to config files.
Args:
config_file:
config_file_df:
"""
with open(config_file) as f:
sims_args = yaml.load(f, Loader=yaml.FullLoader)
sim_type = sims_args[list(sims_args.keys())[0]]["type"]
if not os.path.isfile(config_file_df):
raise SimulationCollectionWarning(
"The file 'ray_snapshot_info.h5' does note exist"
)
elif sim_type == "particles":
config = pd.read_hdf(config_file_df, key="df")
elif sim_type == "rays":
config = pd.read_hdf(config_file_df, key="df")
sims = {}
for idx, (sim_name, sim_args) in enumerate(sims_args.items()):
if sim_args["type"] == "particles":
sims[sim_name] = Ecosmog(config.loc[(idx+1,)], **sim_args["init"])
elif sim_args["type"] == "rays":
sims[sim_name] = RayRamses(config.loc[(idx+1,)], **sim_args["init"])
else:
raise SimulationCollectionWarning(
f"{sim_args["type"]} have not been simulated :-("
)
return SimulationCollection(config, sims)
def _find_common_z(self) -> np.array:
"""
Find the redshifts that the simulations in the collection have in common.
"""
z_nrs = self.config.loc[(self.sim_nrs[0],)]["redshift"].values
for sim_nr in self.sim_nrs:
z_nrs = np.intersect1d(z_nrs, self.config.loc[(sim_nr,)]["redshift"].values)
z_nrs = z_nrs[z_nrs < 2.3]
return z_nrs
def _find_nearest(self, array: np.ndarray, value: float) -> float:
""" Find element in array closest to value """
array = np.asarray(array)
idx = (np.abs(array - value)).argmin()
return array[idx]
def compress_stats(
self,
file_dsc: Dict[str, str],
dir_out: str,
snap_nrs: Optional[np.ndarray] = None,
z_nrs: Optional[List[float]] = None,
a_nrs: Optional[List[float]] = None,
zmatch: bool = False,
labels: Optional[Dict[str, str]] = {"x": "bin", "y": "value"},
) -> None:
"""
Combine halo statistics of individual simulations which are stored in
pandas .h5 format into one xarray dataset file.
Args:
snap_nrs: Snapshot numbers at which to compare stats
z_nrs: Redshifts at which to compare stats
a_nrs: Scale factors at which to compare stats
"""
if zmatch:
z_nrs = self._find_common_z()
elif z_nrs is not None:
z_available = self.config["redshift"].values
z_nrs = [self._find_nearest(z_available, z) for z in z_nrs]
elif a_nrs is not None:
z_available = self.config["redshift"].values
z_nrs = [self._find_nearest(z_available, (1/a - 1)) for a in a_nrs]
# initialize arrays that will contain results
if file_dsc["extension"] == "h5":
_stats_file = (
self.sim[list(self.sim.keys())[0]].dirs["sim"]
+ "%s.h5" % file_dsc["root"]
)
y_val_box = pd.read_hdf(_stats_file, key="df")
y_val = np.zeros((len(self.sim_nrs), len(z_nrs), len(y_val_box.index.values)))
snap_nrs = np.zeros((len(self.sim_nrs), len(z_nrs)))
else:
raise SimulationCollectionWarning("File extension not supported")
# loop over simulations in collection
for sim_idx, sim_name in enumerate(list(self.sim.keys())):
sim_df = self.config.loc[(sim_idx + 1,)]
_stats_file = (
self.sim[sim_name].dirs["sim"]
+ f"{file_dsc["root"]}.{file_dsc["extension"]}"
)
y_val_box = pd.read_hdf(_stats_file, key="df")
for z_idx, z_nr in enumerate(z_nrs):
snap_nr = sim_df.iloc[
(sim_df["redshift"] - z_nr).abs().argsort()[:1]
].index.values[0]
snap_nrs[sim_idx, z_idx] = snap_nr
y_val[sim_idx, z_idx, :] = y_val_box["snap_%d" % snap_nr].values
x_val = y_val_box.index.values
ds = xr.Dataset(
{labels["y"]: (["box", "redshift", labels["x"]], y_val)},
coords={
"redshift": z_nrs,
"box": self.sim_nrs,
labels["x"]: x_val,
"snapshot": (["box", "redshift"], snap_nrs),
},
)
self._stats_to_file(ds, file_dsc, dir_out)
def compress_histograms(
self,
file_dsc: Dict[str, str],
dir_out: str,
) -> None:
"""
Args:
"""
# initialize arrays that will contain results
if file_dsc["extension"] == "h5":
_stats_file = (
self.sim[list(self.sim.keys())[0]].dirs["sim"]
+ "%s.h5" % file_dsc["root"]
)
y_val_box = pd.read_hdf(_stats_file, key="df")
columns = y_val_box.columns.values
y_val = np.zeros((
len(self.sim_nrs),
len(y_val_box.columns.values),
len(y_val_box.index.values),
))
else:
raise SimulationCollectionWarning("File extension not supported")
# loop over simulations in collection
for sim_idx, sim_name in enumerate(list(self.sim.keys())):
sim_df = self.config.loc[(sim_idx + 1,)]
_stats_file = (
self.sim[sim_name].dirs["sim"]
+ f"{file_dsc["root"]}.{file_dsc["extension"]}"
)
y_val_box = pd.read_hdf(_stats_file, key="df")
for c_idx, col in enumerate(columns):
y_val[sim_idx, c_idx, :] = y_val_box[col].values
x_val = y_val_box.index.values
ds = xr.Dataset(
{"count": (["box", "property", "bin"], y_val)},
coords={
"box": self.sim_nrs,
"property": columns,
"bin": x_val,
},
)
self._stats_to_file(ds, file_dsc, dir_out)
def _stats_to_file(
self, ds: xr.Dataset, file_dsc: Dict[str, str], dir_out: str
) -> None:
""" Write xr.Dataset to .nc file """
if not os.path.isdir(dir_out):
Path(dir_out).mkdir(parents=False, exist_ok=True)
file_out = dir_out + "%s.nc" % file_dsc["root"]
print(f"Save in -> {file_out}")
ds.to_netcdf(file_out)
def sum_raytracing_snapshots(
self,
dir_out: str,
columns: list,
columns_z_shift: list,
integration_range: dict,
ray_file_root: str = "Ray_maps_output%05d.h5",
sim_folder_root: str = "box%d",
z_src: Optional[float] = None,
z_src_shift: Optional[float] = None,
rm_ray: Optional[dict] = None,
) -> None:
"""
Adds different ray-tracing outputs together. This can give you the
integrated ray-tracing quantities between arbitrary redshifts along
the ligh-cone. The ray-tracing outputs must either have the format of
RayRamses outputs in pd.DataFrames or np.ndarray images.
Args:
dir_out:
columns:
columns_z_shift:
integration_range:
ray_file_root:
sim_folder_root:
z_src:
z_src_shift:
"""
# sim_folder_root = self.dirs["lc"] + sim_folder_root
box_ray_nrs = self._get_box_and_ray_nrs_for_integration_range(
integration_range, rm_ray,
)
# loop over simulations in collection
first = True
for sim_idx, sim_name in enumerate(self.sim.keys()):
print(sim_name)
_sim = self.sim[sim_name]
box_nr = self._boxnr_from_simname(sim_name)
if box_nr not in list(box_ray_nrs.keys()):
continue
for ray_nr in box_ray_nrs[box_nr]:
sim_info_df = self.config.loc[(box_nr, ray_nr)]
ray_file = glob.glob(
_sim.dirs["sim"]
+ f"{_sim.file_dsc["root"]}_*{ray_nr}."
+ f"{_sim.file_dsc["extension"]}"
)[0]
if ray_file.split(".")[-1] == "h5":
ray_map_df = pd.read_hdf(ray_file, key="df", mode="r")
elif ray_file.split(".")[-1] == "npy":
ray_map = np.load(ray_file)
else:
SimulationCollectionWarning("This file type is not supported.")
print(
"Box Nr. %d; %s; Redshift %.3f"
% (box_nr, os.path.basename(ray_file), sim_info_df["redshift"])
)
if z_src_shift is not None and sim_info_df["redshift"] <= z_src_shift:
raise SimulationCollectionWarning(
"Redshift shift has not correct data structure"
)
# what snapshot to use if end of lightcone-box is reached
if (ray_box_info_df.name[1] == ray_nrs.min()) and (box_nr < 4):
z_next = self.config.loc[(box_nr + 1, 1)]["redshift"]
else:
z_next = ray_box_info_df.iloc[ii + 1]["redshift"]
# Shift redshift of light source
# only of kappa but not of iswrs !!!
if ray_file.split(".")[-1] == "h5":
ray_map_df["kappa_2"] = self._translate_redshift(
ray_map_df["kappa_2"].values,
sim_info_df["redshift"],
z_next,
z_src,
z_src_shift,
)
if first is True:
if ray_file.split(".")[-1] == "h5":
ray_df_sum = ray_map_df
else:
ray_sum = ray_map
first = False
else:
if ray_file.split(".")[-1] == "h5":
for column in columns:
ray_df_sum[column] = (
ray_df_sum[column].values + ray_map_df[column].values
)
else:
ray_sum += ray_map
if ray_file.split(".")[-1] == "h5":
return ray_df_sum
else:
return ray_sum
def _get_box_and_ray_nrs_for_integration_range(
self,
integration_range: dict,
rm_ray: Optional[dict] = None,
) -> dict:
"""
Get all box and ray-snapshot numbers for selected integration range.
Args:
integration_range: A dictionary of which the key must be one of
['box', 'ray', 'z'] to define how the range is defined. The
corresponding value is a list containing the integration
boundaries.
rm_ray: If any snapshot within the defined range should be
excluded, it can be identified by a similar dictionary
to 'integration_range' expcept that now the value
identifies the snapshot to be removed.
Returns:
"""
if not integration_range["z"]:
if integration_range["box"][0] == 0:
print("Integrate over whole light-cone")
elif integration_range["ray"][0] == 0:
print("Integrate over box", integration_range["box"])
self.config = self.config[
self.config.index.get_level_values(0).isin(integration_range["box"])
]
else:
print("Integrate over redshift-range", integration_range["z"])
# if merging based on redshift
z_range = np.asarray(integration_range["z"])
self.config = self.config[
(z_range.min() < self.config["redshift"])
& (self.config["redshift"] < z_range.max())
]
box_and_ray_nrs = {}
for box_nr, ray_nr in self.config.index.values:
box_and_ray_nrs.setdefault(box_nr, []).append(ray_nr)
if rm_ray:
for box_nr in rm_ray.keys():
for ray_nr in rm_ray[box_nr]:
box_and_ray_nrs[box_nr].remove(ray_nr)
return box_and_ray_nrs
def _translate_redshift(
self,
quantity: np.ndarray,
z_near: float,
z_far: float,
z_src: float,
z_src_shift: float,
) -> float:
"""
Shift ray-tracing quantity in redshift.
Parameters
----------
quantity pandas.DataSeries:
ray-ramses output quantity
x_near np.float:
comoving distance closer to observer
x_far np.float:
comoving distance further from observer
x_src np.float:
source redshift used in ray-ramses simulation
"""
x_far = self.cosmology.comoving_distance(z_far).to_value("Mpc")
x_near = self.cosmology.comoving_distance(z_near).to_value("Mpc")
x_src = self.cosmology.comoving_distance(z_src).to_value("Mpc")
if z_far > z_src_shift:
# if z of next snapshot larger than new source z, set the new source
# equal to it, so that a distance of 150[Mpc/h] is maintained
x_src_shift = self.cosmology.comoving_distance(z_far).to_value("Mpc")
else:
x_src_shift = self.cosmology.comoving_distance(z_src_shift).to_value("Mpc")
x_mid = 0.5 * (x_far + x_near)
quantity_shift = (
quantity
* self._kernel_function(x_mid, x_src_shift)
/ self._kernel_function(x_mid, x_src)
)
return quantity_shift
def _kernel_function(self, x: float, x_s: float) -> float:
"""
Args:
x np.float:
comoving distance
x_s np.float:
comoving distance to source
Returns:
"""
g = (x_s - x) * x / x_s
return g
def _merged_snapshots_to_file(
self, ray_df_sum: pd.DataFrame, dir_out: str, integration_range: dict
) -> None:
"""
Write merged ray-tracing pd.DataFrame to .h5 file
Args:
Returns:
"""
if not integration_range["z"]:
if integration_range["box"][0] == 0:
fout = dir_out + "Ray_maps_lc.h5"
elif integration_range["ray"][0] == 0:
fout = dir_out + "Ray_maps_box%d.h5" % box_nr
else:
fout = dir_out + "Ray_maps_zrange_%.2f_%.2f.h5" % (
self.config["redshift"].values.min(),
self.config["redshift"].values.max(),
)
if not os.path.isdir(dir_out):
Path(dir_out).mkdir(parents=True, exist_ok=True)
print(f"Save in -> {fout}")
ray_df_sum.to_hdf(fout, key="df", mode="w")
def _boxnr_from_simname(self, simname: Union[str, int]) -> int:
""" Get box number from simulation name """
if isinstance(simname, str):
box_nr = int(re.findall(r"\d+", simname)[0])
elif isinstance(simname, int):
box_nr = simname
return box_nr
| import os, re, glob
import logging
from pathlib import Path
from typing import Dict, List, Optional, Tuple, Union
import yaml
import numpy as np
import pandas as pd
import xarray as xr
from astrild.rays.rayramses import RayRamses
from astrild.particles.ecosmog import Ecosmog
dir_src = Path(__file__).parent.absolute()
default_file_config = dir_src / "configs/simulation_collection.yaml"
default_config_file_particle = dir_src / "configs/particle_snapshot_info.h5"
default_config_file_ray = dir_src / "configs/ray_snapshot_info.h5"
class SimulationCollectionWarning(BaseException):
pass
class SimulationCollection:
"""
Class to handle a collection of Ramses based simulations.
Attributes:
config_file:
config_file_df:
Methods:
from_file:
compress_stats:
compress_histograms:
sum_raytracing_snapshots:
"""
def __init__(
self,
config: pd.DataFrame,
sims: Dict[str, Union[Ecosmog, RayRamses]],
):
"""
Args:
config:
Contains info on simulations in the collection and
their directories & files that contain data to analyze.
sims:
Contains the simulations in the collection.
"""
self.config = config
self.sim = sims
self.sim_nrs = np.arange(1, len(list(sims.keys())) + 1)
@classmethod
def from_file(
cls,
config_file: str,
config_file_df: str,
) -> "SimulationCollection":
"""
Initialize SimulationCollection from path to config files.
Args:
config_file:
config_file_df:
"""
with open(config_file) as f:
sims_args = yaml.load(f, Loader=yaml.FullLoader)
sim_type = sims_args[list(sims_args.keys())[0]]["type"]
if not os.path.isfile(config_file_df):
raise SimulationCollectionWarning(
"The file 'ray_snapshot_info.h5' does note exist"
)
elif sim_type == "particles":
config = pd.read_hdf(config_file_df, key="df")
elif sim_type == "rays":
config = pd.read_hdf(config_file_df, key="df")
sims = {}
for idx, (sim_name, sim_args) in enumerate(sims_args.items()):
if sim_args["type"] == "particles":
sims[sim_name] = Ecosmog(config.loc[(idx+1,)], **sim_args["init"])
elif sim_args["type"] == "rays":
sims[sim_name] = RayRamses(config.loc[(idx+1,)], **sim_args["init"])
else:
raise SimulationCollectionWarning(
f"{sim_args['type']} have not been simulated :-("
)
return SimulationCollection(config, sims)
def _find_common_z(self) -> np.array:
"""
Find the redshifts that the simulations in the collection have in common.
"""
z_nrs = self.config.loc[(self.sim_nrs[0],)]["redshift"].values
for sim_nr in self.sim_nrs:
z_nrs = np.intersect1d(z_nrs, self.config.loc[(sim_nr,)]["redshift"].values)
z_nrs = z_nrs[z_nrs < 2.3]
return z_nrs
def _find_nearest(self, array: np.ndarray, value: float) -> float:
""" Find element in array closest to value """
array = np.asarray(array)
idx = (np.abs(array - value)).argmin()
return array[idx]
def compress_stats(
self,
file_dsc: Dict[str, str],
dir_out: str,
snap_nrs: Optional[np.ndarray] = None,
z_nrs: Optional[List[float]] = None,
a_nrs: Optional[List[float]] = None,
zmatch: bool = False,
labels: Optional[Dict[str, str]] = {"x": "bin", "y": "value"},
) -> None:
"""
Combine halo statistics of individual simulations which are stored in
pandas .h5 format into one xarray dataset file.
Args:
snap_nrs: Snapshot numbers at which to compare stats
z_nrs: Redshifts at which to compare stats
a_nrs: Scale factors at which to compare stats
"""
if zmatch:
z_nrs = self._find_common_z()
elif z_nrs is not None:
z_available = self.config["redshift"].values
z_nrs = [self._find_nearest(z_available, z) for z in z_nrs]
elif a_nrs is not None:
z_available = self.config["redshift"].values
z_nrs = [self._find_nearest(z_available, (1/a - 1)) for a in a_nrs]
# initialize arrays that will contain results
if file_dsc["extension"] == "h5":
_stats_file = (
self.sim[list(self.sim.keys())[0]].dirs["sim"]
+ "%s.h5" % file_dsc["root"]
)
y_val_box = pd.read_hdf(_stats_file, key="df")
y_val = np.zeros((len(self.sim_nrs), len(z_nrs), len(y_val_box.index.values)))
snap_nrs = np.zeros((len(self.sim_nrs), len(z_nrs)))
else:
raise SimulationCollectionWarning("File extension not supported")
# loop over simulations in collection
for sim_idx, sim_name in enumerate(list(self.sim.keys())):
sim_df = self.config.loc[(sim_idx + 1,)]
_stats_file = (
self.sim[sim_name].dirs["sim"]
+ f"{file_dsc['root']}.{file_dsc['extension']}"
)
y_val_box = pd.read_hdf(_stats_file, key="df")
for z_idx, z_nr in enumerate(z_nrs):
snap_nr = sim_df.iloc[
(sim_df["redshift"] - z_nr).abs().argsort()[:1]
].index.values[0]
snap_nrs[sim_idx, z_idx] = snap_nr
y_val[sim_idx, z_idx, :] = y_val_box["snap_%d" % snap_nr].values
x_val = y_val_box.index.values
ds = xr.Dataset(
{labels["y"]: (["box", "redshift", labels["x"]], y_val)},
coords={
"redshift": z_nrs,
"box": self.sim_nrs,
labels["x"]: x_val,
"snapshot": (["box", "redshift"], snap_nrs),
},
)
self._stats_to_file(ds, file_dsc, dir_out)
def compress_histograms(
self,
file_dsc: Dict[str, str],
dir_out: str,
) -> None:
"""
Args:
"""
# initialize arrays that will contain results
if file_dsc["extension"] == "h5":
_stats_file = (
self.sim[list(self.sim.keys())[0]].dirs["sim"]
+ "%s.h5" % file_dsc["root"]
)
y_val_box = pd.read_hdf(_stats_file, key="df")
columns = y_val_box.columns.values
y_val = np.zeros((
len(self.sim_nrs),
len(y_val_box.columns.values),
len(y_val_box.index.values),
))
else:
raise SimulationCollectionWarning("File extension not supported")
# loop over simulations in collection
for sim_idx, sim_name in enumerate(list(self.sim.keys())):
sim_df = self.config.loc[(sim_idx + 1,)]
_stats_file = (
self.sim[sim_name].dirs["sim"]
+ f"{file_dsc['root']}.{file_dsc['extension']}"
)
y_val_box = pd.read_hdf(_stats_file, key="df")
for c_idx, col in enumerate(columns):
y_val[sim_idx, c_idx, :] = y_val_box[col].values
x_val = y_val_box.index.values
ds = xr.Dataset(
{"count": (["box", "property", "bin"], y_val)},
coords={
"box": self.sim_nrs,
"property": columns,
"bin": x_val,
},
)
self._stats_to_file(ds, file_dsc, dir_out)
def _stats_to_file(
self, ds: xr.Dataset, file_dsc: Dict[str, str], dir_out: str
) -> None:
""" Write xr.Dataset to .nc file """
if not os.path.isdir(dir_out):
Path(dir_out).mkdir(parents=False, exist_ok=True)
file_out = dir_out + "%s.nc" % file_dsc["root"]
print(f"Save in -> {file_out}")
ds.to_netcdf(file_out)
def sum_raytracing_snapshots(
self,
dir_out: str,
columns: list,
columns_z_shift: list,
integration_range: dict,
ray_file_root: str = "Ray_maps_output%05d.h5",
sim_folder_root: str = "box%d",
z_src: Optional[float] = None,
z_src_shift: Optional[float] = None,
rm_ray: Optional[dict] = None,
) -> None:
"""
Adds different ray-tracing outputs together. This can give you the
integrated ray-tracing quantities between arbitrary redshifts along
the ligh-cone. The ray-tracing outputs must either have the format of
RayRamses outputs in pd.DataFrames or np.ndarray images.
Args:
dir_out:
columns:
columns_z_shift:
integration_range:
ray_file_root:
sim_folder_root:
z_src:
z_src_shift:
"""
# sim_folder_root = self.dirs["lc"] + sim_folder_root
box_ray_nrs = self._get_box_and_ray_nrs_for_integration_range(
integration_range, rm_ray,
)
# loop over simulations in collection
first = True
for sim_idx, sim_name in enumerate(self.sim.keys()):
print(sim_name)
_sim = self.sim[sim_name]
box_nr = self._boxnr_from_simname(sim_name)
if box_nr not in list(box_ray_nrs.keys()):
continue
for ray_nr in box_ray_nrs[box_nr]:
sim_info_df = self.config.loc[(box_nr, ray_nr)]
ray_file = glob.glob(
_sim.dirs["sim"]
+ f"{_sim.file_dsc['root']}_*{ray_nr}."
+ f"{_sim.file_dsc['extension']}"
)[0]
if ray_file.split(".")[-1] == "h5":
ray_map_df = pd.read_hdf(ray_file, key="df", mode="r")
elif ray_file.split(".")[-1] == "npy":
ray_map = np.load(ray_file)
else:
SimulationCollectionWarning("This file type is not supported.")
print(
"Box Nr. %d; %s; Redshift %.3f"
% (box_nr, os.path.basename(ray_file), sim_info_df["redshift"])
)
if z_src_shift is not None and sim_info_df["redshift"] <= z_src_shift:
raise SimulationCollectionWarning(
"Redshift shift has not correct data structure"
)
# what snapshot to use if end of lightcone-box is reached
if (ray_box_info_df.name[1] == ray_nrs.min()) and (box_nr < 4):
z_next = self.config.loc[(box_nr + 1, 1)]["redshift"]
else:
z_next = ray_box_info_df.iloc[ii + 1]["redshift"]
# Shift redshift of light source
# only of kappa but not of iswrs !!!
if ray_file.split(".")[-1] == "h5":
ray_map_df["kappa_2"] = self._translate_redshift(
ray_map_df["kappa_2"].values,
sim_info_df["redshift"],
z_next,
z_src,
z_src_shift,
)
if first is True:
if ray_file.split(".")[-1] == "h5":
ray_df_sum = ray_map_df
else:
ray_sum = ray_map
first = False
else:
if ray_file.split(".")[-1] == "h5":
for column in columns:
ray_df_sum[column] = (
ray_df_sum[column].values + ray_map_df[column].values
)
else:
ray_sum += ray_map
if ray_file.split(".")[-1] == "h5":
return ray_df_sum
else:
return ray_sum
def _get_box_and_ray_nrs_for_integration_range(
self,
integration_range: dict,
rm_ray: Optional[dict] = None,
) -> dict:
"""
Get all box and ray-snapshot numbers for selected integration range.
Args:
integration_range: A dictionary of which the key must be one of
['box', 'ray', 'z'] to define how the range is defined. The
corresponding value is a list containing the integration
boundaries.
rm_ray: If any snapshot within the defined range should be
excluded, it can be identified by a similar dictionary
to 'integration_range' expcept that now the value
identifies the snapshot to be removed.
Returns:
"""
if not integration_range["z"]:
if integration_range["box"][0] == 0:
print("Integrate over whole light-cone")
elif integration_range["ray"][0] == 0:
print("Integrate over box", integration_range["box"])
self.config = self.config[
self.config.index.get_level_values(0).isin(integration_range["box"])
]
else:
print("Integrate over redshift-range", integration_range["z"])
# if merging based on redshift
z_range = np.asarray(integration_range["z"])
self.config = self.config[
(z_range.min() < self.config["redshift"])
& (self.config["redshift"] < z_range.max())
]
box_and_ray_nrs = {}
for box_nr, ray_nr in self.config.index.values:
box_and_ray_nrs.setdefault(box_nr, []).append(ray_nr)
if rm_ray:
for box_nr in rm_ray.keys():
for ray_nr in rm_ray[box_nr]:
box_and_ray_nrs[box_nr].remove(ray_nr)
return box_and_ray_nrs
def _translate_redshift(
self,
quantity: np.ndarray,
z_near: float,
z_far: float,
z_src: float,
z_src_shift: float,
) -> float:
"""
Shift ray-tracing quantity in redshift.
Parameters
----------
quantity pandas.DataSeries:
ray-ramses output quantity
x_near np.float:
comoving distance closer to observer
x_far np.float:
comoving distance further from observer
x_src np.float:
source redshift used in ray-ramses simulation
"""
x_far = self.cosmology.comoving_distance(z_far).to_value("Mpc")
x_near = self.cosmology.comoving_distance(z_near).to_value("Mpc")
x_src = self.cosmology.comoving_distance(z_src).to_value("Mpc")
if z_far > z_src_shift:
# if z of next snapshot larger than new source z, set the new source
# equal to it, so that a distance of 150[Mpc/h] is maintained
x_src_shift = self.cosmology.comoving_distance(z_far).to_value("Mpc")
else:
x_src_shift = self.cosmology.comoving_distance(z_src_shift).to_value("Mpc")
x_mid = 0.5 * (x_far + x_near)
quantity_shift = (
quantity
* self._kernel_function(x_mid, x_src_shift)
/ self._kernel_function(x_mid, x_src)
)
return quantity_shift
def _kernel_function(self, x: float, x_s: float) -> float:
"""
Args:
x np.float:
comoving distance
x_s np.float:
comoving distance to source
Returns:
"""
g = (x_s - x) * x / x_s
return g
def _merged_snapshots_to_file(
self, ray_df_sum: pd.DataFrame, dir_out: str, integration_range: dict
) -> None:
"""
Write merged ray-tracing pd.DataFrame to .h5 file
Args:
Returns:
"""
if not integration_range["z"]:
if integration_range["box"][0] == 0:
fout = dir_out + "Ray_maps_lc.h5"
elif integration_range["ray"][0] == 0:
fout = dir_out + "Ray_maps_box%d.h5" % box_nr
else:
fout = dir_out + "Ray_maps_zrange_%.2f_%.2f.h5" % (
self.config["redshift"].values.min(),
self.config["redshift"].values.max(),
)
if not os.path.isdir(dir_out):
Path(dir_out).mkdir(parents=True, exist_ok=True)
print(f"Save in -> {fout}")
ray_df_sum.to_hdf(fout, key="df", mode="w")
def _boxnr_from_simname(self, simname: Union[str, int]) -> int:
""" Get box number from simulation name """
if isinstance(simname, str):
box_nr = int(re.findall(r"\d+", simname)[0])
elif isinstance(simname, int):
box_nr = simname
return box_nr
|
# -*- coding: utf-8 -*-
# Copyright (c) 2021, Lin To and contributors
# For license information, please see license.txt
from __future__ import unicode_literals
import frappe
from frappe.website.website_generator import WebsiteGenerator
class SalesInvoice(WebsiteGenerator):
def validate(self):
self.validate_account_types()
self.validate_item_quantities()
def before_save(self):
self.route = self.name.lower().replace(" ", "-")
self.set_item_entry_values() # Value Per Unit
self.set_item_entry_cost()
self.set_invoice_cost()
def on_submit(self):
self.remove_items_from_inventory()
self.add_ledger_entries()
def on_cancel(self):
if self.docstatus == 0:
return
self.add_items_to_inventory()
self.cancel_ledger_entries()
def validate_account_type(self, account, account_types):
account_doc = frappe.get_doc("Account", account)
isnt_valid = account_doc.account_type not in account_types
if isnt_valid:
frappe.throw(f"{account} is not from {", ".join(account_types)}")
def validate_account_types(self):
self.validate_account_type(self.stock_account, ["Stock"])
self.validate_account_type(self.receiving_account, ["Receivable"])
def validate_item_quantities(self):
for item_entry in self.items:
try:
inventory_doc = frappe.get_doc("Inventory", item_entry.item)
except frappe.DoesNotExistError:
frappe.throw(f"{self.item} not available.")
if item_entry.quantity <= 0:
frappe.throw(f"{item_entry.item} quantity should be more than 0.")
elif item_entry.quantity > inventory_doc.quantity:
frappe.throw(f"Insufficient quantity.")
def set_item_entry_values(self):
for item_entry in self.items:
if not item_entry.value:
item_entry.value = frappe.get_doc("Item", item_entry.item).value
def set_item_entry_cost(self):
for item_entry in self.items:
item_entry.cost = item_entry.value * item_entry.quantity
def set_invoice_cost(self):
self.cost = sum([item_entry.cost for item_entry in self.items])
def remove_items_from_inventory(self):
for item_entry in self.items:
inventory_doc = frappe.get_doc("Inventory", item_entry.item)
inventory_doc.quantity = inventory_doc.quantity - item_entry.quantity
inventory_doc.save(ignore_permissions=True)
def add_items_to_inventory(self):
for item_entry in self.items:
inventory_doc = frappe.get_doc("Inventory", item_entry.item)
inventory_doc.quantity = inventory_doc.quantity + item_entry.quantity
inventory_doc.save(ignore_permissions=True)
def get_ledger_entry(
self, account, against_account, credit, debit, is_for_cancel=False
):
return frappe.get_doc(
doctype="GL Entry",
posting_date=self.posting_date,
account=account,
against_account=against_account,
credit=credit,
debit=debit,
voucher_type=f"{"Cancel" if is_for_cancel else ""}Sales Invoice",
company_name=self.company,
voucher_number=self.name,
)
def cancel_ledger_entries(self):
credit_entry = self.get_ledger_entry(
self.stock_account,
self.customer,
credit=0.0,
debit=self.cost,
is_for_cancel=True,
)
debit_entry = self.get_ledger_entry(
self.receiving_account,
self.customer,
credit=self.cost,
debit=0.0,
is_for_cancel=True,
)
self.insert_ledger_entries(credit_entry, debit_entry)
def add_ledger_entries(self):
# Create Ledger Entries
credit_entry = self.get_ledger_entry(
self.stock_account, self.customer, credit=self.cost, debit=0.0
)
debit_entry = self.get_ledger_entry(
self.receiving_account, self.stock_account, credit=0.0, debit=self.cost
)
self.insert_ledger_entries(credit_entry, debit_entry)
def insert_ledger_entries(self, credit_entry, debit_entry):
# Insert Ledger Entries
for gl_entry in [credit_entry, debit_entry]:
gl_entry.docstatus = 1
gl_entry.insert(ignore_permissions=True, ignore_if_duplicate=True)
| # -*- coding: utf-8 -*-
# Copyright (c) 2021, Lin To and contributors
# For license information, please see license.txt
from __future__ import unicode_literals
import frappe
from frappe.website.website_generator import WebsiteGenerator
class SalesInvoice(WebsiteGenerator):
def validate(self):
self.validate_account_types()
self.validate_item_quantities()
def before_save(self):
self.route = self.name.lower().replace(" ", "-")
self.set_item_entry_values() # Value Per Unit
self.set_item_entry_cost()
self.set_invoice_cost()
def on_submit(self):
self.remove_items_from_inventory()
self.add_ledger_entries()
def on_cancel(self):
if self.docstatus == 0:
return
self.add_items_to_inventory()
self.cancel_ledger_entries()
def validate_account_type(self, account, account_types):
account_doc = frappe.get_doc("Account", account)
isnt_valid = account_doc.account_type not in account_types
if isnt_valid:
frappe.throw(f"{account} is not from {', '.join(account_types)}")
def validate_account_types(self):
self.validate_account_type(self.stock_account, ["Stock"])
self.validate_account_type(self.receiving_account, ["Receivable"])
def validate_item_quantities(self):
for item_entry in self.items:
try:
inventory_doc = frappe.get_doc("Inventory", item_entry.item)
except frappe.DoesNotExistError:
frappe.throw(f"{self.item} not available.")
if item_entry.quantity <= 0:
frappe.throw(f"{item_entry.item} quantity should be more than 0.")
elif item_entry.quantity > inventory_doc.quantity:
frappe.throw(f"Insufficient quantity.")
def set_item_entry_values(self):
for item_entry in self.items:
if not item_entry.value:
item_entry.value = frappe.get_doc("Item", item_entry.item).value
def set_item_entry_cost(self):
for item_entry in self.items:
item_entry.cost = item_entry.value * item_entry.quantity
def set_invoice_cost(self):
self.cost = sum([item_entry.cost for item_entry in self.items])
def remove_items_from_inventory(self):
for item_entry in self.items:
inventory_doc = frappe.get_doc("Inventory", item_entry.item)
inventory_doc.quantity = inventory_doc.quantity - item_entry.quantity
inventory_doc.save(ignore_permissions=True)
def add_items_to_inventory(self):
for item_entry in self.items:
inventory_doc = frappe.get_doc("Inventory", item_entry.item)
inventory_doc.quantity = inventory_doc.quantity + item_entry.quantity
inventory_doc.save(ignore_permissions=True)
def get_ledger_entry(
self, account, against_account, credit, debit, is_for_cancel=False
):
return frappe.get_doc(
doctype="GL Entry",
posting_date=self.posting_date,
account=account,
against_account=against_account,
credit=credit,
debit=debit,
voucher_type=f"{'Cancel' if is_for_cancel else ''}Sales Invoice",
company_name=self.company,
voucher_number=self.name,
)
def cancel_ledger_entries(self):
credit_entry = self.get_ledger_entry(
self.stock_account,
self.customer,
credit=0.0,
debit=self.cost,
is_for_cancel=True,
)
debit_entry = self.get_ledger_entry(
self.receiving_account,
self.customer,
credit=self.cost,
debit=0.0,
is_for_cancel=True,
)
self.insert_ledger_entries(credit_entry, debit_entry)
def add_ledger_entries(self):
# Create Ledger Entries
credit_entry = self.get_ledger_entry(
self.stock_account, self.customer, credit=self.cost, debit=0.0
)
debit_entry = self.get_ledger_entry(
self.receiving_account, self.stock_account, credit=0.0, debit=self.cost
)
self.insert_ledger_entries(credit_entry, debit_entry)
def insert_ledger_entries(self, credit_entry, debit_entry):
# Insert Ledger Entries
for gl_entry in [credit_entry, debit_entry]:
gl_entry.docstatus = 1
gl_entry.insert(ignore_permissions=True, ignore_if_duplicate=True)
|
# pylint: disable=no-self-use,invalid-name
from unittest import TestCase
from allennlp.models.archival import load_archive
from allennlp.service.predictors import Predictor
from propara.service.predictors.prostruct_prediction import ProStructPredictor
class TestProParaPredictor(TestCase):
def test_uses_named_inputs(self):
inputs = {"para_id": "4",
"sentence_texts": ["Plants die.",
"They are buried in sediment.",
"Bacteria is buried in the sediment.",
"Large amounts of sediment gradually pile on top of the original sediment.",
"Pressure builds up.",
"Heat increases.",
"The chemical structure of the buried sediment and plants changes.",
"The sediment and plants are at least one mile underground.",
"The buried area is extremely hot.",
"More chemical changes happen eand the buried material becomes oil."
],
"participants": ["plants",
"bacteria",
"sediment",
"oil"],
"states": [
["?", "?", "sediment", "sediment", "sediment", "sediment", "sediment", "sediment", "one mile underground", "one mile underground", "-"],
["?", "?", "?", "sediment", "sediment", "sediment", "sediment", "sediment", "sediment", "sediment", "-"],
["?", "?", "?", "?", "?", "?", "?", "?", "underground", "underground", "underground"],
["-", "-", "-", "-", "-", "-", "-", "-", "-", "-", "underground"]]}
archive = load_archive('tests/fixtures/prostruct/prostruct_toy_model.tar.gz')
predictor = Predictor.from_archive(archive, 'prostruct_prediction')
result = predictor.predict_json(inputs)
assert(result['para_id'] == '4')
assert(result["sentence_texts"] == ["Plants die.",
"They are buried in sediment.",
"Bacteria is buried in the sediment.",
"Large amounts of sediment gradually pile on top of the original sediment.",
"Pressure builds up.",
"Heat increases.",
"The chemical structure of the buried sediment and plants changes.",
"The sediment and plants are at least one mile underground.",
"The buried area is extremely hot.",
"More chemical changes happen eand the buried material becomes oil."
])
assert(result['participants'] == ["plants",
"bacteria",
"sediment",
"oil"])
# This changes with a new model (but some label must be predicted).
print(f"result['top1_labels']: {result["top1_labels"]}")
assert(len(result['top1_labels']) > 1)
| # pylint: disable=no-self-use,invalid-name
from unittest import TestCase
from allennlp.models.archival import load_archive
from allennlp.service.predictors import Predictor
from propara.service.predictors.prostruct_prediction import ProStructPredictor
class TestProParaPredictor(TestCase):
def test_uses_named_inputs(self):
inputs = {"para_id": "4",
"sentence_texts": ["Plants die.",
"They are buried in sediment.",
"Bacteria is buried in the sediment.",
"Large amounts of sediment gradually pile on top of the original sediment.",
"Pressure builds up.",
"Heat increases.",
"The chemical structure of the buried sediment and plants changes.",
"The sediment and plants are at least one mile underground.",
"The buried area is extremely hot.",
"More chemical changes happen eand the buried material becomes oil."
],
"participants": ["plants",
"bacteria",
"sediment",
"oil"],
"states": [
["?", "?", "sediment", "sediment", "sediment", "sediment", "sediment", "sediment", "one mile underground", "one mile underground", "-"],
["?", "?", "?", "sediment", "sediment", "sediment", "sediment", "sediment", "sediment", "sediment", "-"],
["?", "?", "?", "?", "?", "?", "?", "?", "underground", "underground", "underground"],
["-", "-", "-", "-", "-", "-", "-", "-", "-", "-", "underground"]]}
archive = load_archive('tests/fixtures/prostruct/prostruct_toy_model.tar.gz')
predictor = Predictor.from_archive(archive, 'prostruct_prediction')
result = predictor.predict_json(inputs)
assert(result['para_id'] == '4')
assert(result["sentence_texts"] == ["Plants die.",
"They are buried in sediment.",
"Bacteria is buried in the sediment.",
"Large amounts of sediment gradually pile on top of the original sediment.",
"Pressure builds up.",
"Heat increases.",
"The chemical structure of the buried sediment and plants changes.",
"The sediment and plants are at least one mile underground.",
"The buried area is extremely hot.",
"More chemical changes happen eand the buried material becomes oil."
])
assert(result['participants'] == ["plants",
"bacteria",
"sediment",
"oil"])
# This changes with a new model (but some label must be predicted).
print(f"result['top1_labels']: {result['top1_labels']}")
assert(len(result['top1_labels']) > 1)
|
'''
USAGE:
python test.py --img A_test.jpg
'''
import torch
import joblib
import torch.nn as nn
import numpy as np
import cv2
import argparse
import torchvision.transforms as transforms
import torch.nn.functional as F
import time
import cnn_models
from PIL import Image
# construct the argument parser and parse the arguments
parser = argparse.ArgumentParser()
parser.add_argument('-i', '--img', default='../app/src/main/assets/C1.jpg', type=str,
help='path for the image to test on')
args = vars(parser.parse_args())
aug = transforms.Compose([
transforms.Resize((224, 224)),
])
# load label binarizer
lb = joblib.load('lb.pkl')
model = cnn_models.CustomCNN()
model.load_state_dict(torch.load('asl.pth'))
print(model)
print('Model loaded')
image = Image.open(f"{args["img"]}")
image = aug(image)
image = np.transpose(image, (2, 0, 1)).astype(np.float32)
image = torch.tensor(image, dtype=torch.float)
image = image.unsqueeze(0)
print(image.shape)
start = time.time()
outputs = model(image)
_, preds = torch.max(outputs.data, 1)
print('PREDS', preds)
print(f"Predicted output: {lb.classes_[preds]}")
end = time.time()
print(f"{(end-start):.3f} seconds")
| '''
USAGE:
python test.py --img A_test.jpg
'''
import torch
import joblib
import torch.nn as nn
import numpy as np
import cv2
import argparse
import torchvision.transforms as transforms
import torch.nn.functional as F
import time
import cnn_models
from PIL import Image
# construct the argument parser and parse the arguments
parser = argparse.ArgumentParser()
parser.add_argument('-i', '--img', default='../app/src/main/assets/C1.jpg', type=str,
help='path for the image to test on')
args = vars(parser.parse_args())
aug = transforms.Compose([
transforms.Resize((224, 224)),
])
# load label binarizer
lb = joblib.load('lb.pkl')
model = cnn_models.CustomCNN()
model.load_state_dict(torch.load('asl.pth'))
print(model)
print('Model loaded')
image = Image.open(f"{args['img']}")
image = aug(image)
image = np.transpose(image, (2, 0, 1)).astype(np.float32)
image = torch.tensor(image, dtype=torch.float)
image = image.unsqueeze(0)
print(image.shape)
start = time.time()
outputs = model(image)
_, preds = torch.max(outputs.data, 1)
print('PREDS', preds)
print(f"Predicted output: {lb.classes_[preds]}")
end = time.time()
print(f"{(end-start):.3f} seconds")
|
import logging
import os
import sys
from functools import partial, partialmethod
from inspect import stack
import yaml
from bunch import Bunch
from emoji import emojize
from loguru import logger
from rich.console import Console
from yaml import FullLoader as Full_Loader
from about import name as about_name
emoji = partial(emojize, use_aliases=True)
console = Console()
class Formatter:
@staticmethod
def info(name):
for _stack in stack():
if name in _stack.filename.replace('_', '-'):
return _stack
return None
@classmethod
def apply(cls, record):
record['called'] = Bunch(filename=record['file'].name,
function=record['function'],
lineno=record['line'],
icon=emoji(':computer:'))
record['elapsed'] = Bunch(
time=record['elapsed'], icon=emoji(':alarm_clock:'))
record['message'] = emoji(record['message'])
class Log:
default_icon = ':small_orange_diamond:'
@classmethod
def init(cls, config):
"""Set the default and levels and initialize the log manager.
:param cls: Log class.
:param config: Path of the config filename
:param clear: Clear the previous logs.
"""
with open(config) as cfg_file:
cfg = yaml.load(cfg_file, Loader=Full_Loader)
levels = cfg.get('levels', [])
for level in levels:
level['icon'] = emoji(level.get('icon', cls.default_icon))
name = level.get('name', None)
if name:
klass = type(logger)
setattr(klass, name.lower(), partialmethod(klass.log, name))
hdls = []
for sink_data in cfg.get('sinks', []):
if sink_data.get('enabled', True):
klass = sink_data.get('klass', None)
if klass:
if klass.lower() == 'stdout':
sink = sys.stdout
elif klass.lower() == 'stderr':
sink = sys.stderr
else:
sink = klass.format(name=name)
if (sink_data.get('clear', True) and
os.path.exists(sink)):
os.remove(sink)
hdls.append(dict(sink=sink, **sink_data.get('args', {})))
logger.configure(handlers=hdls, levels=cfg.get(
'levels', {}), patcher=Formatter.apply)
for level in levels:
cls.get('log').info(
f"Found additional log level customization: {level["icon"]:<3} {level["name"]}") # noqa: E501
@classmethod
def get(cls, name=about_name):
out = logger.bind(context=name).opt(colors=True)
def __exception(msg, exception):
out.error(msg)
out.debug(exception)
out.exception = __exception
return out
@staticmethod
def get_levels():
"""Get list of log level names.
:returns: list of string
"""
return list(logging._levelToName.values())
| import logging
import os
import sys
from functools import partial, partialmethod
from inspect import stack
import yaml
from bunch import Bunch
from emoji import emojize
from loguru import logger
from rich.console import Console
from yaml import FullLoader as Full_Loader
from about import name as about_name
emoji = partial(emojize, use_aliases=True)
console = Console()
class Formatter:
@staticmethod
def info(name):
for _stack in stack():
if name in _stack.filename.replace('_', '-'):
return _stack
return None
@classmethod
def apply(cls, record):
record['called'] = Bunch(filename=record['file'].name,
function=record['function'],
lineno=record['line'],
icon=emoji(':computer:'))
record['elapsed'] = Bunch(
time=record['elapsed'], icon=emoji(':alarm_clock:'))
record['message'] = emoji(record['message'])
class Log:
default_icon = ':small_orange_diamond:'
@classmethod
def init(cls, config):
"""Set the default and levels and initialize the log manager.
:param cls: Log class.
:param config: Path of the config filename
:param clear: Clear the previous logs.
"""
with open(config) as cfg_file:
cfg = yaml.load(cfg_file, Loader=Full_Loader)
levels = cfg.get('levels', [])
for level in levels:
level['icon'] = emoji(level.get('icon', cls.default_icon))
name = level.get('name', None)
if name:
klass = type(logger)
setattr(klass, name.lower(), partialmethod(klass.log, name))
hdls = []
for sink_data in cfg.get('sinks', []):
if sink_data.get('enabled', True):
klass = sink_data.get('klass', None)
if klass:
if klass.lower() == 'stdout':
sink = sys.stdout
elif klass.lower() == 'stderr':
sink = sys.stderr
else:
sink = klass.format(name=name)
if (sink_data.get('clear', True) and
os.path.exists(sink)):
os.remove(sink)
hdls.append(dict(sink=sink, **sink_data.get('args', {})))
logger.configure(handlers=hdls, levels=cfg.get(
'levels', {}), patcher=Formatter.apply)
for level in levels:
cls.get('log').info(
f"Found additional log level customization: {level['icon']:<3} {level['name']}") # noqa: E501
@classmethod
def get(cls, name=about_name):
out = logger.bind(context=name).opt(colors=True)
def __exception(msg, exception):
out.error(msg)
out.debug(exception)
out.exception = __exception
return out
@staticmethod
def get_levels():
"""Get list of log level names.
:returns: list of string
"""
return list(logging._levelToName.values())
|
# -*- coding: utf-8 -*-
"""
Command Line Watcher for auto compile Qt ui to python file.
Usage Example:
"""
# Import future modules
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
# Import built-in modules
import argparse
import copy
import fnmatch
from functools import partial
from io import open
import os
from pathlib import Path
import signal
from string import Template
import subprocess
import sys
# Import third-party modules
import Qt
from Qt import QtCore
from Qt import QtWidgets
import isort
import toml
# Import local modules
from pyuiw.uic import __version__ as PySideUicVersion
from pyuiw.uic.driver import Driver
__author__ = "timmyliang"
__email__ = "820472580@qq.com"
__date__ = "2020-12-04 10:50:02"
FILE = Path(__file__)
DIR = FILE.parent
signal.signal(signal.SIGINT, signal.SIG_DFL)
if sys.hexversion >= 0x03000000:
# Import local modules
from pyuiw.uic.port_v3.invoke import invoke
else:
# Import local modules
from pyuiw.uic.port_v2.invoke import invoke
Version = "Qt User Interface Compiler version %s, running on %s %s." % (
PySideUicVersion,
Qt.__binding__,
Qt.QtCore.qVersion(),
)
class CliBase(object):
def __init__(self):
self.default_exp = "<${ui_dir}/${ui_name}_ui.py>"
self.parser = argparse.ArgumentParser(
prog="pyuiw",
formatter_class=argparse.RawTextHelpFormatter,
description=Version + __doc__,
)
def is_exp(self, exp):
return exp.startswith("<") and exp.endswith(">")
def parse_exp(self, exp, ui_file):
ui_file = Path(ui_file)
is_exp = self.is_exp(exp)
if is_exp:
template = Template(exp[1:-1])
exp = template.substitute(
{"ui_name": ui_file.stem, "ui_dir": ui_file.parent}
)
exp = os.path.abspath(exp)
return exp
def parse_config(self):
config = getattr(self.opts, "config", "./pyproject.toml")
config = Path(config)
if not config.is_file():
return [], []
watch_list = []
exclude_list = []
with open(config, "r") as f:
config = toml.load(f)
tool = config.get("tool", {})
pyuiw = tool.get("pyuiw", {})
watch_list = pyuiw.get("watch", [])
exclude_list = pyuiw.get("exclude", [])
os.environ["pyuiw_isUseQt"] = str(pyuiw.get("useQt", True)).lower()
os.environ["pyuiw_QtModule"] = pyuiw.get("QtModule", "Qt")
opts = {
"output": self.default_exp,
"indent": 4,
"execute": True,
"debug": False,
"preview": False,
"from_imports": False,
"black": True,
"isort": True,
"ts": "",
}
for opt, default in opts.items():
dst = pyuiw.get(opt, default)
src = getattr(self.opts, opt, None)
if src is not None and dst != default == src:
setattr(self.opts, opt, dst)
return watch_list, exclude_list
def parse(self):
self.opts, args = self.parser.parse_known_args()
watch_list, exclude_list = self.parse_config()
watch_list = getattr(self.opts, "watch", watch_list)
exclude_list = getattr(self.opts, "exclude", exclude_list)
# NOTES: add environment variable
if hasattr(self.opts, "useQt"):
os.environ["pyuiw_isUseQt"] = self.opts.useQt
if hasattr(self.opts, "QtModule"):
os.environ["pyuiw_QtModule"] = self.opts.QtModule
if not hasattr(self.opts, "ts"):
self.opts.ts = self.default_ts_exp
ui_file = args[0] if args else ""
if ui_file or not watch_list or not self.is_exp(self.opts.output):
return self.parse_single_ui(ui_file)
self.watch(watch_list, exclude_list)
def watch(self, watch_list, exclude_list):
app = QtWidgets.QApplication(sys.argv)
watcher = QtCore.QFileSystemWatcher()
paths = []
for path in watch_list:
path = Path(path.strip())
if path.is_file():
paths.append(str(path))
elif path.is_dir():
for root, dirs, files in os.walk(path):
root = Path(root)
for f in files:
if f.endswith(".ui"):
paths.append(str(root / f))
# NOTES filter path
for exclude in exclude_list:
for f in fnmatch.filter(paths, exclude):
paths.remove(f)
if not paths:
sys.stderr.write("Error: no find any ui file in watch path\n")
sys.exit(0)
print("watch ui files:")
print("\n".join(paths))
print(f"\n{"=" * 40}\n")
for ui_file in paths:
self.parse_single_ui(ui_file)
watcher.addPath(str(ui_file))
watcher.fileChanged.connect(self.on_file_change)
app.exec_()
def on_file_change(self, ui_file):
self.parse_single_ui(ui_file)
def parse_single_ui(self, ui_file):
ui_file = Path(ui_file)
if not ui_file.is_file():
self.parser.print_usage()
sys.stderr.write("Error: one input ui-file must be specified\n")
return
opts = copy.deepcopy(self.opts)
opts.output = self.parse_exp(self.opts.output, ui_file)
ui_file = str(ui_file.absolute())
invoke(Driver(opts, ui_file))
if opts.black:
subprocess.call([sys.executable, "-m", "black", opts.output])
if opts.isort:
subprocess.call([sys.executable, "-m", "isort", opts.output])
# isort.file(opts.output)
ts = self.parse_exp(self.opts.ts, ui_file)
try:
if Path(ts).touch():
subprocess.call(["pyside2-lupdate", opts.output, "-ts", ts])
except OSError:
print("[pyuiw] error: incorrect `ts` " + ts)
print("[pyuiw] output: ", opts.output)
class PyUIWatcherCli(CliBase):
def __init__(self):
super(PyUIWatcherCli, self).__init__()
self.parser.add_argument(
"-p",
"--preview",
dest="preview",
action="store_false",
default=False,
help="show a preview of the UI instead of generating code",
)
self.parser.add_argument(
"-o",
"--output",
dest="output",
action="store",
type=str,
default=self.default_exp,
metavar="FILE",
help="\n".join(
[
"write generated code to FILE instead of stdout",
f"<EXP> to define a output expression (default: {self.default_exp})",
r"${ui_dir} - input python directory path",
r"${ui_name} - input python file name",
]
),
)
self.parser.add_argument(
"-x",
"--execute",
dest="execute",
action="store_true",
default=True,
help="generate extra code to test and display the class",
)
self.parser.add_argument(
"-d",
"--debug",
dest="debug",
action="store_true",
default=False,
help="show debug output",
)
self.parser.add_argument(
"-i",
"--indent",
dest="indent",
action="store",
type=int,
default=4,
metavar="N",
help="set indent width to N spaces, tab if N is 0 (default: 4)",
)
g = self.parser.add_argument_group(title="Code generation options")
g.add_argument(
"--from-imports",
dest="from_imports",
action="store_true",
default=False,
help="generate imports relative to '.'",
)
g.add_argument(
"-nq",
"--no-useQt",
dest="useQt",
action="store_false",
default=argparse.SUPPRESS,
help="ignore Qt.py module for Qt compat",
)
g.add_argument(
"--QtModule",
dest="QtModule",
action="store",
type=str,
default=argparse.SUPPRESS,
metavar="module",
help="customize import Qt module name (default: Qt) | only work in --no-useQt flag set",
)
g.add_argument(
"-nb",
"--no-black",
dest="black",
action="store_false",
default=True,
help="ignore black format code",
)
g.add_argument(
"-ni",
"--no-isort",
dest="isort",
action="store_false",
default=True,
help="ignore isort format code",
)
g.add_argument(
"-ts",
"--gen-ts",
dest="ts",
action="store",
type=str,
default="",
help="generate ts file for i18n | support <EXP> like --output",
)
self.parser.add_argument_group(g)
g = self.parser.add_argument_group(title="Watcher options")
g.add_argument(
"-w",
"--watch",
dest="watch",
nargs="+",
type=str,
default=argparse.SUPPRESS,
help="watch files or directories",
)
g.add_argument(
"-e",
"--exclude",
dest="exclude",
nargs="+",
type=str,
default=argparse.SUPPRESS,
help="exclude files glob expression",
)
g.add_argument(
"-c",
"--config",
dest="config",
default=argparse.SUPPRESS,
metavar="FILE",
help="read specific config file",
)
self.parser.add_argument_group(g)
self.parse()
sys.exit(0)
if __name__ == "__main__":
PyUIWatcherCli()
| # -*- coding: utf-8 -*-
"""
Command Line Watcher for auto compile Qt ui to python file.
Usage Example:
"""
# Import future modules
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
# Import built-in modules
import argparse
import copy
import fnmatch
from functools import partial
from io import open
import os
from pathlib import Path
import signal
from string import Template
import subprocess
import sys
# Import third-party modules
import Qt
from Qt import QtCore
from Qt import QtWidgets
import isort
import toml
# Import local modules
from pyuiw.uic import __version__ as PySideUicVersion
from pyuiw.uic.driver import Driver
__author__ = "timmyliang"
__email__ = "820472580@qq.com"
__date__ = "2020-12-04 10:50:02"
FILE = Path(__file__)
DIR = FILE.parent
signal.signal(signal.SIGINT, signal.SIG_DFL)
if sys.hexversion >= 0x03000000:
# Import local modules
from pyuiw.uic.port_v3.invoke import invoke
else:
# Import local modules
from pyuiw.uic.port_v2.invoke import invoke
Version = "Qt User Interface Compiler version %s, running on %s %s." % (
PySideUicVersion,
Qt.__binding__,
Qt.QtCore.qVersion(),
)
class CliBase(object):
def __init__(self):
self.default_exp = "<${ui_dir}/${ui_name}_ui.py>"
self.parser = argparse.ArgumentParser(
prog="pyuiw",
formatter_class=argparse.RawTextHelpFormatter,
description=Version + __doc__,
)
def is_exp(self, exp):
return exp.startswith("<") and exp.endswith(">")
def parse_exp(self, exp, ui_file):
ui_file = Path(ui_file)
is_exp = self.is_exp(exp)
if is_exp:
template = Template(exp[1:-1])
exp = template.substitute(
{"ui_name": ui_file.stem, "ui_dir": ui_file.parent}
)
exp = os.path.abspath(exp)
return exp
def parse_config(self):
config = getattr(self.opts, "config", "./pyproject.toml")
config = Path(config)
if not config.is_file():
return [], []
watch_list = []
exclude_list = []
with open(config, "r") as f:
config = toml.load(f)
tool = config.get("tool", {})
pyuiw = tool.get("pyuiw", {})
watch_list = pyuiw.get("watch", [])
exclude_list = pyuiw.get("exclude", [])
os.environ["pyuiw_isUseQt"] = str(pyuiw.get("useQt", True)).lower()
os.environ["pyuiw_QtModule"] = pyuiw.get("QtModule", "Qt")
opts = {
"output": self.default_exp,
"indent": 4,
"execute": True,
"debug": False,
"preview": False,
"from_imports": False,
"black": True,
"isort": True,
"ts": "",
}
for opt, default in opts.items():
dst = pyuiw.get(opt, default)
src = getattr(self.opts, opt, None)
if src is not None and dst != default == src:
setattr(self.opts, opt, dst)
return watch_list, exclude_list
def parse(self):
self.opts, args = self.parser.parse_known_args()
watch_list, exclude_list = self.parse_config()
watch_list = getattr(self.opts, "watch", watch_list)
exclude_list = getattr(self.opts, "exclude", exclude_list)
# NOTES: add environment variable
if hasattr(self.opts, "useQt"):
os.environ["pyuiw_isUseQt"] = self.opts.useQt
if hasattr(self.opts, "QtModule"):
os.environ["pyuiw_QtModule"] = self.opts.QtModule
if not hasattr(self.opts, "ts"):
self.opts.ts = self.default_ts_exp
ui_file = args[0] if args else ""
if ui_file or not watch_list or not self.is_exp(self.opts.output):
return self.parse_single_ui(ui_file)
self.watch(watch_list, exclude_list)
def watch(self, watch_list, exclude_list):
app = QtWidgets.QApplication(sys.argv)
watcher = QtCore.QFileSystemWatcher()
paths = []
for path in watch_list:
path = Path(path.strip())
if path.is_file():
paths.append(str(path))
elif path.is_dir():
for root, dirs, files in os.walk(path):
root = Path(root)
for f in files:
if f.endswith(".ui"):
paths.append(str(root / f))
# NOTES filter path
for exclude in exclude_list:
for f in fnmatch.filter(paths, exclude):
paths.remove(f)
if not paths:
sys.stderr.write("Error: no find any ui file in watch path\n")
sys.exit(0)
print("watch ui files:")
print("\n".join(paths))
print(f"\n{'=' * 40}\n")
for ui_file in paths:
self.parse_single_ui(ui_file)
watcher.addPath(str(ui_file))
watcher.fileChanged.connect(self.on_file_change)
app.exec_()
def on_file_change(self, ui_file):
self.parse_single_ui(ui_file)
def parse_single_ui(self, ui_file):
ui_file = Path(ui_file)
if not ui_file.is_file():
self.parser.print_usage()
sys.stderr.write("Error: one input ui-file must be specified\n")
return
opts = copy.deepcopy(self.opts)
opts.output = self.parse_exp(self.opts.output, ui_file)
ui_file = str(ui_file.absolute())
invoke(Driver(opts, ui_file))
if opts.black:
subprocess.call([sys.executable, "-m", "black", opts.output])
if opts.isort:
subprocess.call([sys.executable, "-m", "isort", opts.output])
# isort.file(opts.output)
ts = self.parse_exp(self.opts.ts, ui_file)
try:
if Path(ts).touch():
subprocess.call(["pyside2-lupdate", opts.output, "-ts", ts])
except OSError:
print("[pyuiw] error: incorrect `ts` " + ts)
print("[pyuiw] output: ", opts.output)
class PyUIWatcherCli(CliBase):
def __init__(self):
super(PyUIWatcherCli, self).__init__()
self.parser.add_argument(
"-p",
"--preview",
dest="preview",
action="store_false",
default=False,
help="show a preview of the UI instead of generating code",
)
self.parser.add_argument(
"-o",
"--output",
dest="output",
action="store",
type=str,
default=self.default_exp,
metavar="FILE",
help="\n".join(
[
"write generated code to FILE instead of stdout",
f"<EXP> to define a output expression (default: {self.default_exp})",
r"${ui_dir} - input python directory path",
r"${ui_name} - input python file name",
]
),
)
self.parser.add_argument(
"-x",
"--execute",
dest="execute",
action="store_true",
default=True,
help="generate extra code to test and display the class",
)
self.parser.add_argument(
"-d",
"--debug",
dest="debug",
action="store_true",
default=False,
help="show debug output",
)
self.parser.add_argument(
"-i",
"--indent",
dest="indent",
action="store",
type=int,
default=4,
metavar="N",
help="set indent width to N spaces, tab if N is 0 (default: 4)",
)
g = self.parser.add_argument_group(title="Code generation options")
g.add_argument(
"--from-imports",
dest="from_imports",
action="store_true",
default=False,
help="generate imports relative to '.'",
)
g.add_argument(
"-nq",
"--no-useQt",
dest="useQt",
action="store_false",
default=argparse.SUPPRESS,
help="ignore Qt.py module for Qt compat",
)
g.add_argument(
"--QtModule",
dest="QtModule",
action="store",
type=str,
default=argparse.SUPPRESS,
metavar="module",
help="customize import Qt module name (default: Qt) | only work in --no-useQt flag set",
)
g.add_argument(
"-nb",
"--no-black",
dest="black",
action="store_false",
default=True,
help="ignore black format code",
)
g.add_argument(
"-ni",
"--no-isort",
dest="isort",
action="store_false",
default=True,
help="ignore isort format code",
)
g.add_argument(
"-ts",
"--gen-ts",
dest="ts",
action="store",
type=str,
default="",
help="generate ts file for i18n | support <EXP> like --output",
)
self.parser.add_argument_group(g)
g = self.parser.add_argument_group(title="Watcher options")
g.add_argument(
"-w",
"--watch",
dest="watch",
nargs="+",
type=str,
default=argparse.SUPPRESS,
help="watch files or directories",
)
g.add_argument(
"-e",
"--exclude",
dest="exclude",
nargs="+",
type=str,
default=argparse.SUPPRESS,
help="exclude files glob expression",
)
g.add_argument(
"-c",
"--config",
dest="config",
default=argparse.SUPPRESS,
metavar="FILE",
help="read specific config file",
)
self.parser.add_argument_group(g)
self.parse()
sys.exit(0)
if __name__ == "__main__":
PyUIWatcherCli()
|
#-----------------------------------------------------------------------------
# Title : Top Level Navigation Budget Sheet
#-----------------------------------------------------------------------------
# This file is part of the TID ID Smartsheets software platform. It is subject to
# the license terms in the LICENSE.txt file found in the top-level directory
# of this distribution and at:
# https://confluence.slac.stanford.edu/display/ppareg/LICENSE.html.
# No part of the TID ID Smartsheets software platform, including this file, may be
# copied, modified, propagated, or distributed except according to the terms
# contained in the LICENSE.txt file.
#-----------------------------------------------------------------------------
import smartsheet # pip3 install smartsheet-python-sdk
from . import budget_sheet
from . import schedule_sheet
from . import tracking_sheet
TID_WORKSPACE = 4728845933799300
TID_ID_ACTIVE_FOLDER = 1039693589571460
TID_ID_TEMPLATE_FOLDER = 4013014891423620
TID_ID_LIST_SHEET = 2931334483076996
TID_ID_FOLDER_PREFIX = 'TID/ID'
TID_ACTUALS_SHEET = 7403570111768452
TID_ACTUALS_START_ROW = 5693264191481732
TID_ACTUALS_END_ROW = 1256792717715332
TID_RESOURCE_FOLDER = 6665944920549252
OVERHEAD_NOTE = '12.25% Overhead'
LABOR_RATE_NOTE = 'Labor Rate FY22 (Oct - Feb): $273.25; (Mar - Sep) $281.45; Slac Tech Rate FY22: $162.65'
# Standard Project Files & Template IDs
StandardProjectFiles = {'Budget': 6232441649162116,
'PM Scoring': 888744673863556,
'Risk Registry': 607544575059844,
'Schedule': 1728842021791620,
'Tracking': 4586297051375492}
def get_folder_data(*, client, folderId, path=None):
folder = client.Folders.get_folder(folderId)
ret = {'folder': folder}
ret['path'] = path
ret['tracked'] = False
ret['name'] = folder.name
ret['url'] = folder.permalink
ret['sheets'] = {k: None for k in StandardProjectFiles}
for s in folder.sheets:
for k in StandardProjectFiles:
if k == s.name[-len(k):]:
ret['sheets'][k] = s
return ret
def check_project(*, client, folderId, doFixes, path=None):
fdata = get_folder_data(client=client, folderId=folderId)
if path is not None:
print(f"Processing project {path} : {folderId}")
else:
print(f"Processing project {fdata["folder"].name} : {folderId}")
##########################################################
# First Make sure folder has all of the neccessary files
##########################################################
for k, v in fdata['sheets'].items():
# Copy file if it is missing
if v is None:
print(f" Project is missing '{k}' file.")
if doFixes:
print(f" Coping '{k}' file to project.")
client.Sheets.copy_sheet(StandardProjectFiles[k], # Source sheet
smartsheet.models.ContainerDestination({'destination_type': 'folder',
'destination_id': fdata['folder'].id,
'new_name': fdata['folder'].name + ' ' + k}))
# Check for valid naming, rename if need be
elif 'Template Set ' not in fdata['folder'].name and not v.name.startswith(fdata['folder'].name):
print(f" Bad sheet name {v.name}.")
if doFixes:
print(f" Renaming {v.name}.")
client.Sheets.update_sheet(v.id, smartsheet.models.Sheet({'name': fdata['folder'].name + ' ' + k}))
# Refresh folder data, needed if new files were copied over
fdata = get_folder_data(client=client, folderId=folderId)
if fdata['sheets']['Budget'] is None or fdata['sheets']['Schedule'] is None:
print(" Skipping remaining processing")
return
# Re-read sheet data
fdata['sheets']['Budget'] = client.Sheets.get_sheet(fdata['sheets']['Budget'].id, include='format')
fdata['sheets']['Schedule'] = client.Sheets.get_sheet(fdata['sheets']['Schedule'].id, include='format')
fdata['sheets']['Tracking'] = client.Sheets.get_sheet(fdata['sheets']['Tracking'].id, include='format')
# Double check budget for new fix
if doFixes and not budget_sheet.check_structure(sheet=fdata['sheets']['Budget']):
print(" Attempting to update budget sheet")
budget_sheet.fix_structure(client=client, sheet=fdata['sheets']['Budget'])
fdata['sheets']['Budget'] = client.Sheets.get_sheet(fdata['sheets']['Budget'].id, include='format')
# Double check schedule for new fix
if doFixes and not schedule_sheet.check_structure(sheet=fdata['sheets']['Schedule']):
print(" Attempting to update schedule sheet")
schedule_sheet.fix_structure(client=client, sheet=fdata['sheets']['Schedule'])
fdata['sheets']['Schedule'] = client.Sheets.get_sheet(fdata['sheets']['Schedule'].id, include='format')
# Double check tracking for new fix
if doFixes and not tracking_sheet.check_structure(sheet=fdata['sheets']['Tracking']):
print(" Attempting to update tracking sheet")
tracking_sheet.fix_structure(client=client, sheet=fdata['sheets']['Tracking'])
fdata['sheets']['Tracking'] = client.Sheets.get_sheet(fdata['sheets']['Tracking'].id, include='format')
if budget_sheet.check_structure(sheet=fdata['sheets']['Budget']) and schedule_sheet.check_structure(sheet=fdata['sheets']['Schedule']) and tracking_sheet.check_structure(sheet=fdata['sheets']['Tracking']):
# Fix internal budget file references
laborRows = budget_sheet.check(client=client, sheet=fdata['sheets']['Budget'], doFixes=doFixes )
# Check schedule file
schedule_sheet.check(client=client, sheet=fdata['sheets']['Schedule'], laborRows=laborRows, laborSheet=fdata['sheets']['Budget'], doFixes=doFixes )
# Final fix of links in budget file
budget_sheet.check_task_links(client=client, sheet=fdata['sheets']['Budget'], laborRows=laborRows, scheduleSheet=fdata['sheets']['Schedule'], doFixes=doFixes)
# Fix tracking file
tracking_sheet.check(client=client, sheet=fdata['sheets']['Tracking'], budgetSheet=fdata['sheets']['Budget'], doFixes=doFixes)
else:
print(" Skipping remaining processing")
def get_active_list(*, client, path = TID_ID_FOLDER_PREFIX, folderId=TID_ID_ACTIVE_FOLDER):
folder = client.Folders.get_folder(folderId)
ret = {}
path = path + '/' + folder.name
# No sub folders, this might be a project
if len(folder.folders) == 0:
# Skip projects with no sheets
if len(folder.sheets) != 0:
ret[folderId] = get_folder_data(client=client, folderId=folder.id, path=path)
else:
for sub in folder.folders:
ret.update(get_active_list(client=client, path=path, folderId=sub.id))
return ret
| #-----------------------------------------------------------------------------
# Title : Top Level Navigation Budget Sheet
#-----------------------------------------------------------------------------
# This file is part of the TID ID Smartsheets software platform. It is subject to
# the license terms in the LICENSE.txt file found in the top-level directory
# of this distribution and at:
# https://confluence.slac.stanford.edu/display/ppareg/LICENSE.html.
# No part of the TID ID Smartsheets software platform, including this file, may be
# copied, modified, propagated, or distributed except according to the terms
# contained in the LICENSE.txt file.
#-----------------------------------------------------------------------------
import smartsheet # pip3 install smartsheet-python-sdk
from . import budget_sheet
from . import schedule_sheet
from . import tracking_sheet
TID_WORKSPACE = 4728845933799300
TID_ID_ACTIVE_FOLDER = 1039693589571460
TID_ID_TEMPLATE_FOLDER = 4013014891423620
TID_ID_LIST_SHEET = 2931334483076996
TID_ID_FOLDER_PREFIX = 'TID/ID'
TID_ACTUALS_SHEET = 7403570111768452
TID_ACTUALS_START_ROW = 5693264191481732
TID_ACTUALS_END_ROW = 1256792717715332
TID_RESOURCE_FOLDER = 6665944920549252
OVERHEAD_NOTE = '12.25% Overhead'
LABOR_RATE_NOTE = 'Labor Rate FY22 (Oct - Feb): $273.25; (Mar - Sep) $281.45; Slac Tech Rate FY22: $162.65'
# Standard Project Files & Template IDs
StandardProjectFiles = {'Budget': 6232441649162116,
'PM Scoring': 888744673863556,
'Risk Registry': 607544575059844,
'Schedule': 1728842021791620,
'Tracking': 4586297051375492}
def get_folder_data(*, client, folderId, path=None):
folder = client.Folders.get_folder(folderId)
ret = {'folder': folder}
ret['path'] = path
ret['tracked'] = False
ret['name'] = folder.name
ret['url'] = folder.permalink
ret['sheets'] = {k: None for k in StandardProjectFiles}
for s in folder.sheets:
for k in StandardProjectFiles:
if k == s.name[-len(k):]:
ret['sheets'][k] = s
return ret
def check_project(*, client, folderId, doFixes, path=None):
fdata = get_folder_data(client=client, folderId=folderId)
if path is not None:
print(f"Processing project {path} : {folderId}")
else:
print(f"Processing project {fdata['folder'].name} : {folderId}")
##########################################################
# First Make sure folder has all of the neccessary files
##########################################################
for k, v in fdata['sheets'].items():
# Copy file if it is missing
if v is None:
print(f" Project is missing '{k}' file.")
if doFixes:
print(f" Coping '{k}' file to project.")
client.Sheets.copy_sheet(StandardProjectFiles[k], # Source sheet
smartsheet.models.ContainerDestination({'destination_type': 'folder',
'destination_id': fdata['folder'].id,
'new_name': fdata['folder'].name + ' ' + k}))
# Check for valid naming, rename if need be
elif 'Template Set ' not in fdata['folder'].name and not v.name.startswith(fdata['folder'].name):
print(f" Bad sheet name {v.name}.")
if doFixes:
print(f" Renaming {v.name}.")
client.Sheets.update_sheet(v.id, smartsheet.models.Sheet({'name': fdata['folder'].name + ' ' + k}))
# Refresh folder data, needed if new files were copied over
fdata = get_folder_data(client=client, folderId=folderId)
if fdata['sheets']['Budget'] is None or fdata['sheets']['Schedule'] is None:
print(" Skipping remaining processing")
return
# Re-read sheet data
fdata['sheets']['Budget'] = client.Sheets.get_sheet(fdata['sheets']['Budget'].id, include='format')
fdata['sheets']['Schedule'] = client.Sheets.get_sheet(fdata['sheets']['Schedule'].id, include='format')
fdata['sheets']['Tracking'] = client.Sheets.get_sheet(fdata['sheets']['Tracking'].id, include='format')
# Double check budget for new fix
if doFixes and not budget_sheet.check_structure(sheet=fdata['sheets']['Budget']):
print(" Attempting to update budget sheet")
budget_sheet.fix_structure(client=client, sheet=fdata['sheets']['Budget'])
fdata['sheets']['Budget'] = client.Sheets.get_sheet(fdata['sheets']['Budget'].id, include='format')
# Double check schedule for new fix
if doFixes and not schedule_sheet.check_structure(sheet=fdata['sheets']['Schedule']):
print(" Attempting to update schedule sheet")
schedule_sheet.fix_structure(client=client, sheet=fdata['sheets']['Schedule'])
fdata['sheets']['Schedule'] = client.Sheets.get_sheet(fdata['sheets']['Schedule'].id, include='format')
# Double check tracking for new fix
if doFixes and not tracking_sheet.check_structure(sheet=fdata['sheets']['Tracking']):
print(" Attempting to update tracking sheet")
tracking_sheet.fix_structure(client=client, sheet=fdata['sheets']['Tracking'])
fdata['sheets']['Tracking'] = client.Sheets.get_sheet(fdata['sheets']['Tracking'].id, include='format')
if budget_sheet.check_structure(sheet=fdata['sheets']['Budget']) and schedule_sheet.check_structure(sheet=fdata['sheets']['Schedule']) and tracking_sheet.check_structure(sheet=fdata['sheets']['Tracking']):
# Fix internal budget file references
laborRows = budget_sheet.check(client=client, sheet=fdata['sheets']['Budget'], doFixes=doFixes )
# Check schedule file
schedule_sheet.check(client=client, sheet=fdata['sheets']['Schedule'], laborRows=laborRows, laborSheet=fdata['sheets']['Budget'], doFixes=doFixes )
# Final fix of links in budget file
budget_sheet.check_task_links(client=client, sheet=fdata['sheets']['Budget'], laborRows=laborRows, scheduleSheet=fdata['sheets']['Schedule'], doFixes=doFixes)
# Fix tracking file
tracking_sheet.check(client=client, sheet=fdata['sheets']['Tracking'], budgetSheet=fdata['sheets']['Budget'], doFixes=doFixes)
else:
print(" Skipping remaining processing")
def get_active_list(*, client, path = TID_ID_FOLDER_PREFIX, folderId=TID_ID_ACTIVE_FOLDER):
folder = client.Folders.get_folder(folderId)
ret = {}
path = path + '/' + folder.name
# No sub folders, this might be a project
if len(folder.folders) == 0:
# Skip projects with no sheets
if len(folder.sheets) != 0:
ret[folderId] = get_folder_data(client=client, folderId=folder.id, path=path)
else:
for sub in folder.folders:
ret.update(get_active_list(client=client, path=path, folderId=sub.id))
return ret
|
#!/usr/bin/env python
# coding: utf-8
import os
from flask import request, abort, jsonify
from app.auxiliary.query_tools import initialProcessing, logFail, logSuccess
# Загрузка компонента
@initialProcessing
def storage_upload(parameters, start_time, query_id):
if 'file' not in request.files or not request.files['file'].filename:
code = 400
logFail(query_id, start_time, code)
abort(code, f"Bad request body. Expected {parameters.get("extension")} file "
f"with key 'file' and correct filename in request body.")
filename = request.files['file'].filename \
if not request.headers.get('filename') \
else request.headers.get('filename')
if not isinstance(filename, str) or len(filename) > 99:
code = 400
logFail(query_id, start_time, code)
abort(code, "Too long or bad filename.")
abs_path_filename = os.path.join(parameters.get('folder'), filename)
if os.path.isfile(abs_path_filename):
code = 409
logFail(query_id, start_time, code)
abort(code, f"The {parameters.get("entity")} with the same filename already exists.")
request.files['file'].save(abs_path_filename)
logSuccess(query_id, start_time)
return '', 204
# Замена компонента
@initialProcessing
def storage_replace(parameters, start_time, query_id):
if 'file' not in request.files or not request.files['file'].filename:
code = 400
logFail(query_id, start_time, code)
abort(code, f"Bad request body. Expected {parameters.get("extension")} file "
f"with key 'file' and correct filename in request body.")
filename = request.files['file'].filename \
if not request.headers.get('filename') \
else request.headers.get('filename')
if not isinstance(filename, str) or len(filename) > 99:
code = 400
logFail(query_id, start_time, code)
abort(code, "Too long or bad filename.")
abs_path_filename = os.path.join(parameters.get('folder'), filename)
if not os.path.isfile(abs_path_filename):
code = 404
logFail(query_id, start_time, code)
abort(code, f"No {parameters.get("entity")} with this filename.")
os.remove(abs_path_filename)
request.files['file'].save(abs_path_filename)
logSuccess(query_id, start_time)
return '', 204
# Загрузка или изменение компонента
@initialProcessing
def storage_insert(parameters, start_time, query_id):
if 'file' not in request.files or not request.files['file'].filename:
code = 400
logFail(query_id, start_time, code)
abort(code, f"Bad request body. Expected {parameters.get("extension")} file "
f"with key 'file' and correct filename in request body.")
filename = request.files['file'].filename \
if not request.headers.get('filename') \
else request.headers.get('filename')
if not isinstance(filename, str) or len(filename) > 99:
code = 400
logFail(query_id, start_time, code)
abort(code, "Too long or bad filename.")
abs_path_filename = os.path.join(parameters.get('folder'), filename)
if os.path.isfile(abs_path_filename):
os.remove(abs_path_filename)
request.files['file'].save(abs_path_filename)
logSuccess(query_id, start_time)
return '', 204
# Список загруженных компонентов
@initialProcessing
def storage_list(parameters, start_time, query_id):
files = next(os.walk(parameters.get('folder')))[2]
logSuccess(query_id, start_time)
if not files:
return jsonify("Empty set"), 200
return jsonify(files), 200
# Удаление компонента
@initialProcessing
def storage_delete(parameters, filename, start_time, query_id):
if not isinstance(filename, str) or len(filename) > 99:
code = 400
logFail(query_id, start_time, code)
abort(code, "Too long or bad filename.")
abs_path_filename = os.path.join(parameters.get('folder'), filename)
if not os.path.isfile(abs_path_filename):
code = 404
logFail(query_id, start_time, code)
abort(code, f"No {parameters.get("entity")} with this filename.")
os.remove(abs_path_filename)
logSuccess(query_id, start_time)
return '', 204
| #!/usr/bin/env python
# coding: utf-8
import os
from flask import request, abort, jsonify
from app.auxiliary.query_tools import initialProcessing, logFail, logSuccess
# Загрузка компонента
@initialProcessing
def storage_upload(parameters, start_time, query_id):
if 'file' not in request.files or not request.files['file'].filename:
code = 400
logFail(query_id, start_time, code)
abort(code, f"Bad request body. Expected {parameters.get('extension')} file "
f"with key 'file' and correct filename in request body.")
filename = request.files['file'].filename \
if not request.headers.get('filename') \
else request.headers.get('filename')
if not isinstance(filename, str) or len(filename) > 99:
code = 400
logFail(query_id, start_time, code)
abort(code, "Too long or bad filename.")
abs_path_filename = os.path.join(parameters.get('folder'), filename)
if os.path.isfile(abs_path_filename):
code = 409
logFail(query_id, start_time, code)
abort(code, f"The {parameters.get('entity')} with the same filename already exists.")
request.files['file'].save(abs_path_filename)
logSuccess(query_id, start_time)
return '', 204
# Замена компонента
@initialProcessing
def storage_replace(parameters, start_time, query_id):
if 'file' not in request.files or not request.files['file'].filename:
code = 400
logFail(query_id, start_time, code)
abort(code, f"Bad request body. Expected {parameters.get('extension')} file "
f"with key 'file' and correct filename in request body.")
filename = request.files['file'].filename \
if not request.headers.get('filename') \
else request.headers.get('filename')
if not isinstance(filename, str) or len(filename) > 99:
code = 400
logFail(query_id, start_time, code)
abort(code, "Too long or bad filename.")
abs_path_filename = os.path.join(parameters.get('folder'), filename)
if not os.path.isfile(abs_path_filename):
code = 404
logFail(query_id, start_time, code)
abort(code, f"No {parameters.get('entity')} with this filename.")
os.remove(abs_path_filename)
request.files['file'].save(abs_path_filename)
logSuccess(query_id, start_time)
return '', 204
# Загрузка или изменение компонента
@initialProcessing
def storage_insert(parameters, start_time, query_id):
if 'file' not in request.files or not request.files['file'].filename:
code = 400
logFail(query_id, start_time, code)
abort(code, f"Bad request body. Expected {parameters.get('extension')} file "
f"with key 'file' and correct filename in request body.")
filename = request.files['file'].filename \
if not request.headers.get('filename') \
else request.headers.get('filename')
if not isinstance(filename, str) or len(filename) > 99:
code = 400
logFail(query_id, start_time, code)
abort(code, "Too long or bad filename.")
abs_path_filename = os.path.join(parameters.get('folder'), filename)
if os.path.isfile(abs_path_filename):
os.remove(abs_path_filename)
request.files['file'].save(abs_path_filename)
logSuccess(query_id, start_time)
return '', 204
# Список загруженных компонентов
@initialProcessing
def storage_list(parameters, start_time, query_id):
files = next(os.walk(parameters.get('folder')))[2]
logSuccess(query_id, start_time)
if not files:
return jsonify("Empty set"), 200
return jsonify(files), 200
# Удаление компонента
@initialProcessing
def storage_delete(parameters, filename, start_time, query_id):
if not isinstance(filename, str) or len(filename) > 99:
code = 400
logFail(query_id, start_time, code)
abort(code, "Too long or bad filename.")
abs_path_filename = os.path.join(parameters.get('folder'), filename)
if not os.path.isfile(abs_path_filename):
code = 404
logFail(query_id, start_time, code)
abort(code, f"No {parameters.get('entity')} with this filename.")
os.remove(abs_path_filename)
logSuccess(query_id, start_time)
return '', 204
|
"""
VSAN Policy Maker for Cisco Intersight, v2.0
Author: Ugo Emekauwa
Contact: uemekauw@cisco.com, uemekauwa@gmail.com
Summary: The VSAN Policy Maker for Cisco Intersight automates the creation
of VSAN Policies.
GitHub Repository: https://github.com/ugo-emekauwa/cisco-imm-automation-tools
"""
########################
# MODULE REQUIREMENT 1 #
########################
"""
For the following variable below named key_id, please fill in between
the quotes your Intersight API Key ID.
Here is an example:
key_id = "5c89885075646127773ec143/5c82fc477577712d3088eb2f/5c8987b17577712d302eaaff"
"""
key_id = ""
########################
# MODULE REQUIREMENT 2 #
########################
"""
For the following variable below named key, please fill in between
the quotes your system's file path to your Intersight API key "SecretKey.txt"
file.
Here is an example:
key = "C:\\Users\\demouser\\Documents\\SecretKey.txt"
"""
key = ""
########################
# MODULE REQUIREMENT 3 #
########################
"""
Provide the required configuration settings to create the
VSAN Policy on Cisco Intersight. Remove the sample
values and replace them with your own, where applicable.
"""
####### Start Configuration Settings - Provide values for the variables listed below. #######
# General Settings
vsan_policy_name = "VSAN-Policy-1"
vsan_policy_description = "A Cisco Intersight VSAN Policy generated by the VSAN Policy Maker."
vsan_policy_organization = "default"
vsan_policy_tags = {"Org": "IT", "Dept": "DevOps"} # Empty the vsan_policy_tags dictionary if no tags are needed, for example: vsan_policy_tags = {}
# Policy Detail Settings
## NOTE - To not configure any VSANs, leave the corresponding list empty, for example: vsan_list = []
vsan_list = [
{"Vsans": "100", "Name": "ESXi_FC_Storage", "FcoeVlans": "1000"},
{"Vsans": "101-102,150,170", "Name": "Hyper-V_FC_Storage", "FcoeVlans": "1001-1002,1050,1070", "DefaultZoning": "Disabled"},
{"Vsans": "175-177", "Name": "Xen_FC_Storage", "FcoeVlans": "1075-1077", "DefaultZoning": "Enabled"},
]
enable_uplink_trunking = False
# Intersight Base URL Setting (Change only if using the Intersight Virtual Appliance)
intersight_base_url = "https://www.intersight.com/api/v1"
# UCS Domain Profile Attachment Settings
ucs_domain_profile_name = ""
vsan_policy_fabric_interconnect = "A" # Options: "A", "B", "AB"
####### Finish Configuration Settings - The required value entries are complete. #######
#############################################################################################################################
#############################################################################################################################
import sys
import traceback
import json
import copy
import intersight
import re
# Function to get Intersight API client as specified in the Intersight Python SDK documentation for OpenAPI 3.x
## Modified to align with overall formatting and try/except blocks added for additional error handling
def get_api_client(api_key_id,
api_secret_file,
endpoint="https://intersight.com"
):
try:
with open(api_secret_file, 'r') as f:
api_key = f.read()
if re.search('BEGIN RSA PRIVATE KEY', api_key):
# API Key v2 format
signing_algorithm = intersight.signing.ALGORITHM_RSASSA_PKCS1v15
signing_scheme = intersight.signing.SCHEME_RSA_SHA256
hash_algorithm = intersight.signing.HASH_SHA256
elif re.search('BEGIN EC PRIVATE KEY', api_key):
# API Key v3 format
signing_algorithm = intersight.signing.ALGORITHM_ECDSA_MODE_DETERMINISTIC_RFC6979
signing_scheme = intersight.signing.SCHEME_HS2019
hash_algorithm = intersight.signing.HASH_SHA256
configuration = intersight.Configuration(
host=endpoint,
signing_info=intersight.signing.HttpSigningConfiguration(
key_id=api_key_id,
private_key_path=api_secret_file,
signing_scheme=signing_scheme,
signing_algorithm=signing_algorithm,
hash_algorithm=hash_algorithm,
signed_headers=[
intersight.signing.HEADER_REQUEST_TARGET,
intersight.signing.HEADER_HOST,
intersight.signing.HEADER_DATE,
intersight.signing.HEADER_DIGEST,
]
)
)
except Exception:
print("\nA configuration error has occurred!\n")
print("Unable to access the Intersight API Key.")
print("Exiting due to the Intersight API Key being unavailable.\n")
print("Please verify that the correct API Key ID and API Key have "
"been entered, then re-attempt execution.\n")
print("Exception Message: ")
traceback.print_exc()
sys.exit(0)
return intersight.ApiClient(configuration)
# Establish function to test for the availability of the Intersight API and Intersight account
def test_intersight_api_service(intersight_api_key_id,
intersight_api_key,
intersight_base_url="https://www.intersight.com/api/v1",
preconfigured_api_client=None
):
"""This is a function to test the availability of the Intersight API and
Intersight account. The tested Intersight account contains the user who is
the owner of the provided Intersight API Key and Key ID.
Args:
intersight_api_key_id (str):
The ID of the Intersight API key.
intersight_api_key (str):
The system file path of the Intersight API key.
intersight_base_url (str):
Optional; The base URL for Intersight API paths. The default value
is "https://www.intersight.com/api/v1". This value typically only
needs to be changed if using the Intersight Virtual Appliance. The
default value is "https://www.intersight.com/api/v1".
preconfigured_api_client ("ApiClient"):
Optional; An ApiClient class instance which handles
Intersight client-server communication through the use of API keys.
The default value is None. If a preconfigured_api_client argument
is provided, empty strings ("") or None can be provided for the
intersight_api_key_id, intersight_api_key, and intersight_base_url
arguments.
Returns:
A string of the name for the Intersight account tested, verifying the
Intersight API service is up and the Intersight account is accessible.
Raises:
Exception:
An exception occurred due to an issue with the provided API Key
and/or API Key ID.
"""
# Define Intersight SDK ApiClient variable
if preconfigured_api_client is None:
api_client = get_api_client(api_key_id=intersight_api_key_id,
api_secret_file=intersight_api_key,
endpoint=intersight_base_url
)
else:
api_client = preconfigured_api_client
try:
# Check that Intersight Account is accessible
print("Testing access to the Intersight API by verifying the "
"Intersight account information...")
api_client.call_api(resource_path="/iam/Accounts",
method="GET",
auth_settings=['cookieAuth', 'http_signature', 'oAuth2', 'oAuth2']
)
response = api_client.last_response.data
iam_account = json.loads(response)
if api_client.last_response.status != 200:
print("\nThe Intersight API and Account Availability Test did not "
"pass.")
print("The Intersight account information could not be verified.")
print("Exiting due to the Intersight account being unavailable.\n")
print("Please verify that the correct API Key ID and API Key have "
"been entered, then re-attempt execution.\n")
sys.exit(0)
else:
intersight_account_name = iam_account["Results"][0]["Name"]
print("The Intersight API and Account Availability Test has "
"passed.\n")
print(f"The Intersight account named '{intersight_account_name}' "
"has been found.")
return intersight_account_name
except Exception:
print("\nA configuration error has occurred!\n")
print("Unable to access the Intersight API.")
print("Exiting due to the Intersight API being unavailable.\n")
print("Please verify that the correct API Key ID and API Key have "
"been entered, then re-attempt execution.\n")
print("Exception Message: ")
traceback.print_exc()
sys.exit(0)
# Establish function to retrieve the MOID of a specific Intersight API object by name
def intersight_object_moid_retriever(intersight_api_key_id,
intersight_api_key,
object_name,
intersight_api_path,
object_type="object",
organization="default",
intersight_base_url="https://www.intersight.com/api/v1",
preconfigured_api_client=None
):
"""This is a function to retrieve the MOID of Intersight objects
using the Intersight API.
Args:
intersight_api_key_id (str):
The ID of the Intersight API key.
intersight_api_key (str):
The system file path of the Intersight API key.
object_name (str):
The name of the Intersight object.
intersight_api_path (str):
The Intersight API path of the Intersight object.
object_type (str):
Optional; The type of Intersight object. The default value is
"object".
organization (str):
Optional; The Intersight organization of the Intersight object.
The default value is "default".
intersight_base_url (str):
Optional; The base URL for Intersight API paths. The default value
is "https://www.intersight.com/api/v1". This value typically only
needs to be changed if using the Intersight Virtual Appliance.
preconfigured_api_client ("ApiClient"):
Optional; An ApiClient class instance which handles
Intersight client-server communication through the use of API keys.
The default value is None. If a preconfigured_api_client argument
is provided, empty strings ("") or None can be provided for the
intersight_api_key_id, intersight_api_key, and intersight_base_url
arguments.
Returns:
A string of the MOID for the provided Intersight object.
Raises:
Exception:
An exception occurred due to an issue accessing the Intersight API
path. The status code or error message will be specified.
"""
# Define Intersight SDK ApiClient variable
if preconfigured_api_client is None:
api_client = get_api_client(api_key_id=intersight_api_key_id,
api_secret_file=intersight_api_key,
endpoint=intersight_base_url
)
else:
api_client = preconfigured_api_client
try:
# Retrieve the Intersight Account name
api_client.call_api(resource_path="/iam/Accounts",
method="GET",
auth_settings=['cookieAuth', 'http_signature', 'oAuth2', 'oAuth2']
)
response = api_client.last_response.data
iam_account = json.loads(response)
if api_client.last_response.status != 200:
print("The provided Intersight account information could not be "
"accessed.")
print("Exiting due to the Intersight account being unavailable.\n")
print("Please verify that the correct API Key ID and API Key have "
"been entered, then re-attempt execution.\n")
sys.exit(0)
else:
intersight_account_name = iam_account["Results"][0]["Name"]
except Exception:
print("\nA configuration error has occurred!\n")
print("Unable to access the Intersight API.")
print("Exiting due to the Intersight API being unavailable.\n")
print("Please verify that the correct API Key ID and API Key have "
"been entered, then re-attempt execution.\n")
sys.exit(0)
# Retrieving the provided object from Intersight...
full_intersight_api_path = f"/{intersight_api_path}"
try:
api_client.call_api(resource_path=full_intersight_api_path,
method="GET",
auth_settings=['cookieAuth', 'http_signature', 'oAuth2', 'oAuth2']
)
response = api_client.last_response.data
intersight_objects = json.loads(response)
# The Intersight API resource path has been accessed successfully.
except Exception:
print("\nA configuration error has occurred!\n")
print("There was an issue retrieving the "
f"{object_type} from Intersight.")
print("Unable to access the provided Intersight API resource path "
f"'{intersight_api_path}'.")
print("Please review and resolve any error messages, then re-attempt "
"execution.\n")
print("Exception Message: ")
traceback.print_exc()
sys.exit(0)
if intersight_objects.get("Results"):
for intersight_object in intersight_objects.get("Results"):
if intersight_object.get("Organization"):
provided_organization_moid = intersight_object_moid_retriever(intersight_api_key_id=None,
intersight_api_key=None,
object_name=organization,
intersight_api_path="organization/Organizations",
object_type="Organization",
preconfigured_api_client=api_client
)
if intersight_object.get("Organization", {}).get("Moid") == provided_organization_moid:
if intersight_object.get("Name") == object_name:
intersight_object_moid = intersight_object.get("Moid")
# The provided object and MOID has been identified and retrieved.
return intersight_object_moid
else:
if intersight_object.get("Name") == object_name:
intersight_object_moid = intersight_object.get("Moid")
# The provided object and MOID has been identified and retrieved.
return intersight_object_moid
else:
print("\nA configuration error has occurred!\n")
print(f"The provided {object_type} named '{object_name}' was not "
"found.")
print("Please check the Intersight Account named "
f"{intersight_account_name}.")
print("Verify through the API or GUI that the needed "
f"{object_type} is present.")
print(f"If the needed {object_type} is missing, please create it.")
print("Once the issue has been resolved, re-attempt execution.\n")
sys.exit(0)
else:
print("\nA configuration error has occurred!\n")
print(f"The provided {object_type} named '{object_name}' was not "
"found.")
print(f"No requested {object_type} instance is currently available in "
f"the Intersight account named {intersight_account_name}.")
print("Please check the Intersight Account named "
f"{intersight_account_name}.")
print(f"Verify through the API or GUI that the needed {object_type} "
"is present.")
print(f"If the needed {object_type} is missing, please create it.")
print("Once the issue has been resolved, re-attempt execution.\n")
sys.exit(0)
# Establish function to retrieve all instances of a particular Intersight API object type
def get_intersight_objects(intersight_api_key_id,
intersight_api_key,
intersight_api_path,
object_type="object",
intersight_base_url="https://www.intersight.com/api/v1",
preconfigured_api_client=None
):
"""This is a function to perform an HTTP GET on all objects under an
available Intersight API type.
Args:
intersight_api_key_id (str):
The ID of the Intersight API key.
intersight_api_key (str):
The system file path of the Intersight API key.
intersight_api_path (str):
The path to the targeted Intersight API object type. For example,
to specify the Intersight API type for adapter configuration
policies, enter "adapter/ConfigPolicies". More API types can be
found in the Intersight API reference library at
https://intersight.com/apidocs/introduction/overview/.
object_type (str):
Optional; The type of Intersight object. The default value is
"object".
intersight_base_url (str):
Optional; The base URL for Intersight API paths. The default value
is "https://www.intersight.com/api/v1". This value typically only
needs to be changed if using the Intersight Virtual Appliance.
preconfigured_api_client ("ApiClient"):
Optional; An ApiClient class instance which handles
Intersight client-server communication through the use of API keys.
The default value is None. If a preconfigured_api_client argument
is provided, empty strings ("") or None can be provided for the
intersight_api_key_id, intersight_api_key, and intersight_base_url
arguments.
Returns:
A dictionary containing all objects of the specified API type. If the
API type is inaccessible, an implicit value of None will be returned.
Raises:
Exception:
An exception occurred due to an issue accessing the Intersight API
path. The status code or error message will be specified.
"""
# Define Intersight SDK ApiClient variable
if preconfigured_api_client is None:
api_client = get_api_client(api_key_id=intersight_api_key_id,
api_secret_file=intersight_api_key,
endpoint=intersight_base_url
)
else:
api_client = preconfigured_api_client
# Retrieving the provided object from Intersight...
full_intersight_api_path = f"/{intersight_api_path}"
try:
api_client.call_api(resource_path=full_intersight_api_path,
method="GET",
auth_settings=['cookieAuth', 'http_signature', 'oAuth2', 'oAuth2']
)
response = api_client.last_response.data
intersight_objects = json.loads(response)
# The Intersight API resource path has been accessed successfully.
return intersight_objects
except Exception:
print("\nA configuration error has occurred!\n")
print(f"There was an issue retrieving the requested {object_type} "
"instances from Intersight.")
print("Unable to access the provided Intersight API resource path "
f"'{intersight_api_path}'.")
print("Please review and resolve any error messages, then re-attempt "
"execution.\n")
print("Exception Message: ")
traceback.print_exc()
sys.exit(0)
# Establish function to retrieve a particular instance of a particular Intersight API object type
def get_single_intersight_object(intersight_api_key_id,
intersight_api_key,
intersight_api_path,
object_moid,
object_type="object",
intersight_base_url="https://www.intersight.com/api/v1",
preconfigured_api_client=None
):
"""This is a function to perform an HTTP GET on a single object under an
available Intersight API type.
Args:
intersight_api_key_id (str):
The ID of the Intersight API key.
intersight_api_key (str):
The system file path of the Intersight API key.
intersight_api_path (str):
The path to the targeted Intersight API object type. For example,
to specify the Intersight API type for adapter configuration
policies, enter "adapter/ConfigPolicies". More API types can be
found in the Intersight API reference library at
https://intersight.com/apidocs/introduction/overview/.
object_moid (str):
The MOID of the single Intersight object.
object_type (str):
Optional; The type of Intersight object. The default value is
"object".
intersight_base_url (str):
Optional; The base URL for Intersight API paths. The default value
is "https://www.intersight.com/api/v1". This value typically only
needs to be changed if using the Intersight Virtual Appliance.
preconfigured_api_client ("ApiClient"):
Optional; An ApiClient class instance which handles
Intersight client-server communication through the use of API keys.
The default value is None. If a preconfigured_api_client argument
is provided, empty strings ("") or None can be provided for the
intersight_api_key_id, intersight_api_key, and intersight_base_url
arguments.
Returns:
A dictionary containing all objects of the specified API type. If the
API type is inaccessible, an implicit value of None will be returned.
Raises:
Exception:
An exception occurred due to an issue accessing the Intersight API
path. The status code or error message will be specified.
"""
# Define Intersight SDK ApiClient variable
if preconfigured_api_client is None:
api_client = get_api_client(api_key_id=intersight_api_key_id,
api_secret_file=intersight_api_key,
endpoint=intersight_base_url
)
else:
api_client = preconfigured_api_client
# Retrieving the provided object from Intersight...
full_intersight_api_path = f"/{intersight_api_path}/{object_moid}"
try:
api_client.call_api(resource_path=full_intersight_api_path,
method="GET",
auth_settings=['cookieAuth', 'http_signature', 'oAuth2', 'oAuth2']
)
response = api_client.last_response.data
single_intersight_object = json.loads(response)
# The Intersight API resource path has been accessed successfully.
return single_intersight_object
except Exception:
print("\nA configuration error has occurred!\n")
print(f"There was an issue retrieving the requested {object_type} "
"instance from Intersight.")
print("Unable to access the provided Intersight API resource path "
f"'{intersight_api_path}'.")
print("Please review and resolve any error messages, then re-attempt "
"execution.\n")
print("Exception Message: ")
traceback.print_exc()
sys.exit(0)
# Establish function to convert a list of numbers in string type format to list type format.
def integer_number_list_maker(string_list,
remove_duplicate_elements_in_list=True,
sort_elements_in_list=True
):
"""This function converts a list of numbers in string type format
to list type format. The provided string should contain commas,
semicolons, or spaces as the separator between numbers. Number
ranges can be configured using hyphens. An example entry would be
"1, 2, 3, 4, 5, 6, 7, 7-14, 25". For each number in the list,
leading and rear spaces will be removed. Duplicate numbers in the
list are removed by default.
Args:
string_list (str):
A string containing a number or range of numbers.
remove_duplicate_elements_in_list (bool):
Optional; A setting to determine whether duplicate elements
are removed from the provided string list. The default
value is True.
sort_elements_in_list (bool):
Optional; A setting to determine if the elements in the provided
string list should be sorted in ascending order. The default value
is True.
Returns:
A list of integers.
"""
def string_to_list_separator(string_list,
separator
):
"""This function converts a list of elements in string type
format to list type format using the provided separator. For
each element in the list, leading and rear spaces are removed.
Args:
string_list (str):
A string containing an element or range of elements.
separator (str):
The character to identify where elements in the list
should be separated (e.g., a comma, semicolon, hyphen,
etc.).
Returns:
A list of separated elements that have been stripped of any
spaces.
"""
fully_stripped_list = []
# Split string by provided separator and create list of separated elements.
split_list = string_list.split(separator)
for element in split_list:
if element:
# Remove leading spaces from elements in list.
lstripped_element = element.lstrip()
# Remove rear spaces from elements in list.
rstripped_element = lstripped_element.rstrip()
# Populate new list with fully stripped elements.
fully_stripped_list.append(rstripped_element)
return fully_stripped_list
def list_to_list_separator(provided_list,
separator
):
"""This function converts a list of elements in list type
format to list type format using the provided separator. For
each element in the list, leading and rear spaces are removed.
Args:
provided_list (list):
A list of elements to be separated.
separator (str):
The character to identify where elements in the list
should be separated (e.g., a comma, semicolon, hyphen,
etc.).
Returns:
A list of separated elements that have been stripped of any
spaces.
"""
new_list = []
# Split list by provided separator and create new list of separated elements.
for element in provided_list:
if separator in element:
split_provided_list = string_to_list_separator(element,
separator
)
new_list.extend(split_provided_list)
else:
new_list.append(element)
return new_list
integer_number_list = []
# Split provided list by spaces.
space_split_list = string_to_list_separator(string_list,
" "
)
# Split provided list by commas.
post_comma_split_list = list_to_list_separator(space_split_list,
","
)
# Split provided list by semicolons.
post_semicolon_split_list = list_to_list_separator(post_comma_split_list,
";"
)
# Split provided number ranges in the list by hyphens.
for post_semicolon_split_number_set in post_semicolon_split_list:
if "-" in post_semicolon_split_number_set:
if post_semicolon_split_number_set[0] != "-" and post_semicolon_split_number_set[-1] != "-":
hyphen_split_list = string_to_list_separator(post_semicolon_split_number_set,
"-"
)
# Limit new list to a maximum of two elements.
fixed_hyphen_split_list = hyphen_split_list[:2]
# Enumerate the numbers in the hyphen split list.
integer_starting_number_of_provided_range = int(fixed_hyphen_split_list[0])
integer_ending_number_of_provided_range = int(fixed_hyphen_split_list[1])
enumerated_range_list = range(
integer_starting_number_of_provided_range,
(integer_ending_number_of_provided_range + 1)
)
integer_number_list.extend(enumerated_range_list)
else:
integer_of_current_number_set = int(post_semicolon_split_number_set)
integer_number_list.append(integer_of_current_number_set)
# Remove duplicates from list if enabled.
if remove_duplicate_elements_in_list:
integer_number_list = list(set(integer_number_list))
if sort_elements_in_list:
integer_number_list = sorted(integer_number_list)
return integer_number_list
# Establish Maker specific classes and functions
class UcsPolicy:
"""This class is used to configure a UCS Policy in Intersight.
"""
object_type = "UCS Policy"
intersight_api_path = None
subobject_types = None
subobject_attribute_maps = None
object_variable_value_maps = None
def __init__(self,
intersight_api_key_id,
intersight_api_key,
policy_name,
policy_description="",
organization="default",
intersight_base_url="https://www.intersight.com/api/v1",
tags=None,
preconfigured_api_client=None
):
self.intersight_api_key_id = intersight_api_key_id
self.intersight_api_key = intersight_api_key
self.policy_name = policy_name
self.policy_description = policy_description
self.organization = organization
self.intersight_base_url = intersight_base_url
if tags is None:
self.tags = {}
else:
self.tags = tags
if preconfigured_api_client is None:
self.api_client = get_api_client(api_key_id=intersight_api_key_id,
api_secret_file=intersight_api_key,
endpoint=intersight_base_url
)
else:
self.api_client = preconfigured_api_client
self.intersight_api_body = {
"Name": self.policy_name,
"Description": self.policy_description
}
def __repr__(self):
return (
f"{self.__class__.__name__}"
f"('{self.intersight_api_key_id}', "
f"'{self.intersight_api_key}', "
f"'{self.policy_name}', "
f"'{self.policy_description}', "
f"'{self.organization}', "
f"'{self.intersight_base_url}', "
f"{self.tags}, "
f"{self.api_client})"
)
def __str__(self):
return f"{self.__class__.__name__} class object for '{self.policy_name}'"
def _post_intersight_object(self):
"""This is a function to configure an Intersight object by
performing a POST through the Intersight API.
Returns:
A string with a statement indicating whether the POST method
was successful or failed.
Raises:
Exception:
An exception occurred while performing the API call.
The status code or error message will be specified.
"""
full_intersight_api_path = f"/{self.intersight_api_path}"
try:
self.api_client.call_api(resource_path=full_intersight_api_path,
method="POST",
body=self.intersight_api_body,
auth_settings=['cookieAuth', 'http_signature', 'oAuth2', 'oAuth2']
)
print(f"The configuration of the base {self.object_type} "
"has completed.")
return "The POST method was successful."
except intersight.exceptions.ApiException as error:
if error.status == 409:
existing_intersight_object_name = self.intersight_api_body.get("Name", "object")
print(f"The targeted {self.object_type} appears to already "
"exist.")
print("An attempt will be made to update the pre-existing "
f"{existing_intersight_object_name}...")
try:
existing_intersight_object_moid = intersight_object_moid_retriever(intersight_api_key_id=None,
intersight_api_key=None,
object_name=existing_intersight_object_name,
intersight_api_path=self.intersight_api_path,
object_type=self.object_type,
preconfigured_api_client=self.api_client
)
# Update full Intersight API path with the MOID of the existing object
full_intersight_api_path_with_moid = f"/{self.intersight_api_path}/{existing_intersight_object_moid}"
self.api_client.call_api(resource_path=full_intersight_api_path_with_moid,
method="POST",
body=self.intersight_api_body,
auth_settings=['cookieAuth', 'http_signature', 'oAuth2', 'oAuth2']
)
print(f"The update of the {self.object_type} has "
"completed.")
print(f"The pre-existing {existing_intersight_object_name} "
"has been updated.")
return "The POST method was successful."
except Exception:
print("\nA configuration error has occurred!\n")
print(f"Unable to update the {self.object_type} under the "
"Intersight API resource path "
f"'{full_intersight_api_path_with_moid}'.\n")
print(f"The pre-existing {existing_intersight_object_name} "
"could not be updated.")
print("Exception Message: ")
traceback.print_exc()
return "The POST method failed."
else:
print("\nA configuration error has occurred!\n")
print(f"Unable to configure the {self.object_type} under the "
"Intersight API resource path "
f"'{full_intersight_api_path}'.\n")
print("Exception Message: ")
traceback.print_exc()
return "The POST method failed."
except Exception:
print("\nA configuration error has occurred!\n")
print(f"Unable to configure the {self.object_type} under the "
"Intersight API resource path "
f"'{full_intersight_api_path}'.\n")
print("Exception Message: ")
traceback.print_exc()
return "The POST method failed."
def _update_api_body_general_attributes(self):
"""This function updates the Intersight API body with general
attributes for the Intersight object.
"""
# Retrieve the Intersight Organization MOID
policy_organization_moid = intersight_object_moid_retriever(intersight_api_key_id=None,
intersight_api_key=None,
object_name=self.organization,
intersight_api_path="organization/Organizations",
object_type="Organization",
preconfigured_api_client=self.api_client
)
# Update the API body with the Intersight Organization MOID
self.intersight_api_body["Organization"] = {"Moid": policy_organization_moid}
# Create the Intersight Tags dictionary list
tags_dictionary_list = []
if self.tags:
for key in self.tags:
tags_dictionary_list_entry = {
"Key": key,
"Value": self.tags.get(key)
}
tags_dictionary_list.append(tags_dictionary_list_entry)
# Update the API body with the Intersight Tags dictionary list
self.intersight_api_body["Tags"] = tags_dictionary_list
def _update_api_body_subobject_attributes(self):
"""This function updates the Intersight API body with individual
attributes for subobjects of the Intersight object.
Raises:
Exception:
An exception occurred while reformatting a provided value for
an attribute. The issue will likely be due to the provided
value not being in string format. Changing the value to string
format should resolve the exception.
"""
def attribute_map_handler(attribute_map_dictionary):
"""This is a function to handle attributes with a mismatch in the
Front-End Name and Back-End Name.
Args:
attribute_map_dictionary (dict):
A dictionary containing attribute data, including front-end
(GUI) to back-end (API) mapped values.
"""
# Establish default automatic insertion status of current attribute
automatic_insertion_of_attribute_value_performed = False
# Check if current attribute is mandatory
if attribute_map_dictionary.get("Mandatory"):
if not any(
attribute_name_key in
staged_subobject_dictionary for
attribute_name_key in
(attribute_map_dictionary["FrontEndName"],
attribute_map_dictionary["BackEndName"]
)
):
if attribute_map_dictionary.get("AutomaticInsertion"):
staged_subobject_dictionary[attribute_map_dictionary["BackEndName"]] = attribute_map_dictionary.get("AutomaticInsertionValue")
automatic_insertion_of_attribute_value_performed = True
else:
print("\nA configuration error has occurred!\n")
print("During the configuration of the "
f"{subobject_type_dictionary["Description"]} "
f"settings for the {self.object_type} named "
f"{self.policy_name}, there was an issue "
"accessing the value for the "
f"{attribute_map_dictionary["Description"]}.")
print("Please verify the following key exists in the "
"appropriate dictionary of the "
f"{subobject_type_dictionary["Description"]} "
f"list variable for the {self.object_type}, then "
"re-attempt execution:\n")
if attribute_map_dictionary['FrontEndName']:
print(f"'{attribute_map_dictionary["FrontEndName"]}'\n")
else:
print(f"'{attribute_map_dictionary["BackEndName"]}'\n")
sys.exit(0)
# Check for attribute front-end name key in the dictionary
if attribute_map_dictionary["FrontEndName"] in staged_subobject_dictionary:
# If the attribute back-end name key is not present in the dictionary, insert the back-end name key with the front-end name key value
if attribute_map_dictionary["BackEndName"] not in staged_subobject_dictionary:
staged_subobject_dictionary[attribute_map_dictionary["BackEndName"]] = staged_subobject_dictionary.get(attribute_map_dictionary["FrontEndName"])
# Remove the front-end name key from the dictionary
staged_subobject_dictionary.pop(attribute_map_dictionary["FrontEndName"])
# Check for front-end to back-end value mapping
if attribute_map_dictionary.get("FronttoBackEndValueMaps"):
if (
attribute_map_dictionary["BackEndName"] in
staged_subobject_dictionary and
not automatic_insertion_of_attribute_value_performed
):
# Retrieve the provided attribute value
provided_attribute_value = staged_subobject_dictionary.get(attribute_map_dictionary["BackEndName"])
# Reformat the provided attribute value to lowercase and remove spaces to prevent potential format issues
try:
provided_attribute_value_reformatted = "".join(provided_attribute_value.lower().split())
except Exception:
print("\nA configuration error has occurred!\n")
print("During the configuration of the "
f"{subobject_type_dictionary["Description"]} "
f"settings for the {self.object_type} named "
f"{self.policy_name}, there was an issue with "
"the value for the "
f"{attribute_map_dictionary["Description"]}.")
print("The value provided was "
f"{provided_attribute_value}.")
print("Please verify that the value has been provided "
"in an accepted string format.")
print("Please review and resolve any error messages, "
"then re-attempt execution.\n")
sys.exit(0)
# Create list of known and mapped front-end to back-end values
front_to_backend_value_maps_key_list = list(attribute_map_dictionary["FronttoBackEndValueMaps"])
# Replace known and reformatted front-end value with known and mapped back-end value
if provided_attribute_value_reformatted in front_to_backend_value_maps_key_list:
provided_attribute_value_mapped = attribute_map_dictionary["FronttoBackEndValueMaps"][provided_attribute_value_reformatted]
staged_subobject_dictionary[attribute_map_dictionary["BackEndName"]] = provided_attribute_value_mapped
else:
print("\nWARNING: An unknown "
f"{attribute_map_dictionary["Description"]} "
f"value of '{provided_attribute_value}' has been "
"provided for the "
f"{subobject_type_dictionary["Description"]} "
"settings!")
print("An attempt will be made to configure the "
"unknown "
f"{attribute_map_dictionary["Description"]} "
"value.")
print("If there is an error, please use one of the "
"following known values for the "
f"{attribute_map_dictionary["Description"]}, "
"then re-attempt execution:\n")
print(*attribute_map_dictionary["FixedFrontEndValues"],
sep=", "
)
# Check for subobject types that may need configuration
if self.subobject_types:
for subobject_type_dictionary in self.subobject_types:
subobject_list = getattr(self,
subobject_type_dictionary.get("SubobjectList")
)
if subobject_list:
converted_subobject_list = []
for subobject_dictionary in subobject_list:
staged_subobject_dictionary = copy.deepcopy(subobject_dictionary)
returned_subobject_type = subobject_type_dictionary["SubobjectType"]
# Set 'ClassId' attribute
staged_subobject_dictionary["ClassId"] = returned_subobject_type
# Set 'ObjectType' attribute
staged_subobject_dictionary["ObjectType"] = returned_subobject_type
# Handle setting of attributes which have mismatched Front-End and Back-End names
for attribute_map_dictionary in self.subobject_attribute_maps.get(returned_subobject_type):
attribute_map_handler(attribute_map_dictionary)
converted_subobject_dictionary = staged_subobject_dictionary
converted_subobject_list.append(converted_subobject_dictionary)
# Update Intersight API body with the converted sub-objects list
self.intersight_api_body[subobject_type_dictionary["AttributeName"]] = converted_subobject_list
def _update_api_body_mapped_object_attributes(self):
"""This function updates the Intersight API body with individual
attributes that require mapping frontend to backend values for
compatibility with the Intersight API.
Raises:
Exception:
An exception occurred while reformatting a provided value for
an attribute. The issue will likely be due to the provided
value not being in string format. Changing the value to string
format should resolve the exception.
"""
# Check for object variables with value maps that need configuration
if self.object_variable_value_maps:
for object_variable in self.object_variable_value_maps:
# Create list of all known and accepted frontend values
all_known_and_accepted_frontend_values = (object_variable_value["FrontEndValue"]
for
object_variable_value
in
object_variable["Values"]
)
# Retrieve the user provided object variable value
provided_object_variable_value = getattr(self,
object_variable["VariableName"]
)
# Reformat the user provided object variable value to lowercase and remove spaces to prevent potential format issues
try:
reformatted_object_variable_value = "".join(provided_object_variable_value.lower().split())
except Exception:
print("\nA configuration error has occurred!\n")
print(f"During the configuration of the {self.object_type} named "
f"{self.policy_name}, there was an issue with the value "
f"provided for the {object_variable["Description"]} setting.")
print(f"The value provided was {provided_object_variable_value}.")
print("To proceed, the value provided for the "
f"{object_variable["Description"]} setting should be updated to "
"an accepted string format.")
print("The recommended values are the following:\n")
# Print list of all known and accepted frontend values for user
print(*all_known_and_accepted_frontend_values,
sep=", "
)
print("\nPlease update the configuration, then re-attempt "
"execution.\n")
sys.exit(0)
# Cycle through known values and match provided object variable value to backend value
for object_variable_value in object_variable["Values"]:
# Create list of all known and accepted frontend and backend values
current_known_frontend_and_backend_value_options = (object_variable_value.values())
# Retrieve the current known backend value
current_known_backend_value = object_variable_value["BackEndValue"]
if (
reformatted_object_variable_value
in
("".join(current_known_frontend_or_backend_value.lower().split())
for
current_known_frontend_or_backend_value
in
current_known_frontend_and_backend_value_options
)
):
backend_object_variable_value = current_known_backend_value
break
else:
# If no backend match is found with the user provided object variable value, pass on the user provided object variable value to Intersight to decide
print(f"\nWARNING: An unknown {self.object_type} value of "
f"'{provided_object_variable_value}' has been "
f"provided for the {object_variable["Description"]} "
"settings!")
print("An attempt will be made to configure the unknown "
f"{object_variable["Description"]} value.")
print("If there is an error, please use one of the "
"following known values for the "
f"{object_variable["Description"]} settings, then "
"re-attempt execution:\n")
print(*all_known_and_accepted_frontend_values,
sep=", "
)
backend_object_variable_value = provided_object_variable_value
# Update Intersight API body with the converted object variable value
self.intersight_api_body[object_variable["AttributeName"]] = backend_object_variable_value
def object_maker(self):
"""This function makes the targeted policy object.
"""
print(f"\nConfiguring the {self.object_type} named "
f"{self.policy_name}...")
# Update the API body with general attributes
self._update_api_body_general_attributes()
# Update the API body with individual subobject attributes
self._update_api_body_subobject_attributes()
# Update the API body with individual mapped object attributes
self._update_api_body_mapped_object_attributes()
# POST the API body to Intersight
self._post_intersight_object()
class DirectlyAttachedUcsDomainPolicy(UcsPolicy):
"""This class is used to configure a UCS Domain Policy in Intersight that
is logically directly attached to UCS Fabric Interconnects through UCS
Domain Profiles.
"""
object_type = "Directly Attached UCS Domain Policy"
intersight_api_path = None
def __init__(self,
intersight_api_key_id,
intersight_api_key,
policy_name,
policy_description="",
organization="default",
intersight_base_url="https://www.intersight.com/api/v1",
tags=None,
preconfigured_api_client=None,
ucs_domain_profile_name="",
fabric_interconnect="AB"
):
super().__init__(intersight_api_key_id,
intersight_api_key,
policy_name,
policy_description,
organization,
intersight_base_url,
tags,
preconfigured_api_client
)
self.ucs_domain_profile_name = ucs_domain_profile_name
self.fabric_interconnect = fabric_interconnect
def __repr__(self):
return (
f"{self.__class__.__name__}"
f"('{self.intersight_api_key_id}', "
f"'{self.intersight_api_key}', "
f"'{self.policy_name}', "
f"'{self.policy_description}', "
f"'{self.organization}', "
f"'{self.intersight_base_url}', "
f"{self.tags}, "
f"{self.api_client}, "
f"'{self.ucs_domain_profile_name}', "
f"'{self.fabric_interconnect}')"
)
def _attach_ucs_domain_profile(self):
"""This is a function to attach an Intersight UCS Domain Profile to an
Intersight Policy.
Returns:
A dictionary for the API body of the policy object to be posted on
Intersight.
"""
# Attach UCS Domain Profile
if self.ucs_domain_profile_name:
print("Attaching the UCS Domain Profile named "
f"{self.ucs_domain_profile_name}...")
# Get UCS Domain Profile MOID
ucs_domain_profile_moid = intersight_object_moid_retriever(intersight_api_key_id=None,
intersight_api_key=None,
object_name=self.ucs_domain_profile_name,
intersight_api_path="fabric/SwitchClusterProfiles",
object_type="UCS Domain Profile",
organization=self.organization,
preconfigured_api_client=self.api_client
)
# Get UCS Domain Profile object dictionary attributes
ucs_domain_profile_object = get_single_intersight_object(intersight_api_key_id=None,
intersight_api_key=None,
intersight_api_path="fabric/SwitchClusterProfiles",
object_moid=ucs_domain_profile_moid,
object_type="UCS Domain Profile",
preconfigured_api_client=self.api_client
)
# Get Switch Profiles that are attached to the UCS Domain Profile
ucs_domain_profile_list_of_attached_switch_profiles = ucs_domain_profile_object.get("SwitchProfiles")
if len(ucs_domain_profile_list_of_attached_switch_profiles) != 2:
print("\nA configuration error has occurred!\n")
print("The provided UCS Domain Profile named "
f"{self.ucs_domain_profile_name} is not configured with "
"two attached Switch Profiles.")
print("To proceed, two Switch Profiles must be attached to the "
"provided UCS Domain Profile.")
print("Please update the configuration of the provided UCS "
f"Domain Profile, then re-attempt execution.\n")
sys.exit(0)
else:
fabric_interconnect_a_switch_profile_moid = ucs_domain_profile_list_of_attached_switch_profiles[0].get("Moid")
fabric_interconnect_b_switch_profile_moid = ucs_domain_profile_list_of_attached_switch_profiles[1].get("Moid")
# Update the API body with the appropriate Switch Profile MOIDs based on selected Fabric Interconnects
if self.fabric_interconnect not in ("AB", "BA", "A", "B"):
print("\nA configuration error has occurred!\n")
print("The provided UCS Domain Profile Fabric Interconnect "
"value of "
f"'{self.fabric_interconnect}' "
"is not supported.")
print("To proceed, the Fabric Interconnect value for the "
"UCS Domain Profile must be 'AB', 'A', or 'B'.")
print("Please update the configuration of the provided UCS "
"Domain Profile Fabric Interconnect, then re-attempt "
"execution.\n")
sys.exit(0)
else:
if self.fabric_interconnect == "A":
print("The attachment will be made to Fabric Interconnect "
"A.")
self.intersight_api_body["Profiles"] = [
{"Moid": fabric_interconnect_a_switch_profile_moid,
"ObjectType": "fabric.SwitchProfile"}
]
elif self.fabric_interconnect == "B":
print("The attachment will be made to Fabric Interconnect "
"B.")
self.intersight_api_body["Profiles"] = [
{"Moid": fabric_interconnect_b_switch_profile_moid,
"ObjectType": "fabric.SwitchProfile"}
]
else:
print("The attachment will be made to Fabric Interconnects "
"A and B.")
self.intersight_api_body["Profiles"] = [
{"Moid": fabric_interconnect_a_switch_profile_moid,
"ObjectType": "fabric.SwitchProfile"},
{"Moid": fabric_interconnect_b_switch_profile_moid,
"ObjectType": "fabric.SwitchProfile"}
]
def object_maker(self):
"""This function makes the targeted policy object.
"""
print(f"\nConfiguring the {self.object_type} named "
f"{self.policy_name}...")
# Update the API body with general attributes
self._update_api_body_general_attributes()
# Update the API body with individual subobject attributes
self._update_api_body_subobject_attributes()
# Update the API body with individual mapped object attributes
self._update_api_body_mapped_object_attributes()
# Update the API body with a UCS Domain Profile attached, if specified
self._attach_ucs_domain_profile()
# POST the API body to Intersight
self._post_intersight_object()
class VsanPolicy(DirectlyAttachedUcsDomainPolicy):
"""This class is used to configure a VSAN Policy in Intersight.
"""
object_type = "VSAN Policy"
intersight_api_path = "fabric/FcNetworkPolicies"
def __init__(self,
intersight_api_key_id,
intersight_api_key,
policy_name,
policy_description="",
organization="default",
intersight_base_url="https://www.intersight.com/api/v1",
tags=None,
preconfigured_api_client=None,
ucs_domain_profile_name="",
fabric_interconnect="A",
enable_uplink_trunking=False
):
super().__init__(intersight_api_key_id,
intersight_api_key,
policy_name,
policy_description,
organization,
intersight_base_url,
tags,
preconfigured_api_client,
ucs_domain_profile_name,
fabric_interconnect
)
self.enable_uplink_trunking = enable_uplink_trunking
self.intersight_api_body = {
"Name": self.policy_name,
"Description": self.policy_description,
"EnableTrunking": self.enable_uplink_trunking,
}
def __repr__(self):
return (
f"{self.__class__.__name__}"
f"('{self.intersight_api_key_id}', "
f"'{self.intersight_api_key}', "
f"'{self.policy_name}', "
f"'{self.policy_description}', "
f"'{self.organization}', "
f"'{self.intersight_base_url}', "
f"{self.tags}, "
f"{self.api_client}, "
f"'{self.ucs_domain_profile_name}', "
f"'{self.fabric_interconnect}', "
f"{self.enable_uplink_trunking})"
)
class IdConfigurator:
"""This class serves as a base class for configuring the IDs of objects in
Intersight policies. Examples of the objects that would use these IDs are
ports, VLANs, and VSANs.
"""
object_type = "ID Configurator"
attributes_that_require_special_handling = None
def __init__(self,
intersight_api_key_id,
intersight_api_key,
policy_name,
id_list=None,
organization="default",
intersight_base_url="https://www.intersight.com/api/v1",
preconfigured_api_client=None
):
self.intersight_api_key_id = intersight_api_key_id
self.intersight_api_key = intersight_api_key
self.policy_name = policy_name
if id_list is None:
self.id_list = []
else:
self.id_list = id_list
self.organization = organization
self.intersight_base_url = intersight_base_url
if preconfigured_api_client is None:
self.api_client = get_api_client(api_key_id=intersight_api_key_id,
api_secret_file=intersight_api_key,
endpoint=intersight_base_url
)
else:
self.api_client = preconfigured_api_client
def __repr__(self):
return (
f"{self.__class__.__name__}"
f"('{self.intersight_api_key_id}', "
f"'{self.intersight_api_key}', "
f"'{self.policy_name}', "
f"'{self.id_list}', "
f"'{self.organization}', "
f"'{self.intersight_base_url}', "
f"{self.api_client})"
)
def __str__(self):
return f"{self.__class__.__name__} class object for '{self.policy_name}'"
def enumerated_id_range_retriever(self,
id_dictionary,
id_key
):
"""This function captures provided ID ranges for enumeration and
returns the enumerated list.
Args:
id_dictionary (dict):
The dictionary containing the ID or range of IDs to be
enumerated.
id_key (str):
The key name of the ID to be enumerated.
Returns:
A list of enumerated integers for the provided IDs in a format
consumable by the Intersight API.
Raises:
KeyError:
A key error occurred due to an inaccessible dictionary key. In
this instance, the key for the ID type is inaccessible from the
ID dictionary. The key may be missing or misspelled.
"""
# Verify ID range has been configured in provided ID dictionary
try:
provided_id_range = id_dictionary[id_key]
except KeyError:
print("\nA configuration error has occurred!\n")
print(f"During the configuration of the {self.object_type} "
f"settings for the {self.policy_type} named "
f"{self.policy_name}, there was an issue accessing the range "
f"of IDs to be configured.")
print(f"Please verify a key named '{id_key}' exists in the "
f"ID list variable for the {self.object_type} "
"settings.\n")
print("Please review and resolve any error messages, then "
"re-attempt execution.\n")
print("Exception Message: ")
traceback.print_exc()
sys.exit(0)
return integer_number_list_maker(provided_id_range)
def attribute_handler(self,
staged_intersight_api_body
):
"""
This function is used to retrieve the MOIDs for provided attributes
that require them for configuration through the Intersight API.
Args:
staged_intersight_api_body (dict):
The staged Intersight API body dictionary being prepared for
configuration on Intersight.
Raises:
KeyError:
A key error occurred due to an inaccessible dictionary key. In
this instance, the key for the name of a required attribute is
inaccessible from the ID dictionary. The key may be missing or
misspelled.
"""
if self.attributes_that_require_special_handling:
for id_attribute in self.attributes_that_require_special_handling:
# If provided, add default attribute option to staged Intersight API body
if id_attribute.get("DefaultOption"):
id_attribute_default_option = getattr(self,
id_attribute.get("DefaultOption")
)
if id_attribute_default_option:
if id_attribute["Name"] not in staged_intersight_api_body:
staged_intersight_api_body[id_attribute["Name"]] = id_attribute_default_option
# Verify mandatory attributes are present in the staged Intersight API body
if id_attribute.get("Mandatory"):
try:
verify_id_attribute_presence = staged_intersight_api_body[id_attribute["Name"]]
except KeyError:
print("\nA configuration error has occurred!\n")
print("During the configuration of the "
f"{self.object_type} settings for the "
f"{self.policy_type} named {self.policy_name}, "
f"there was an issue accessing the value for "
f"the {id_attribute["Type"]}.")
print("Please verify the key named "
f"'{id_attribute["Name"]}' exists in the "
f"ID list variable for the {self.object_type} "
"settings.\n")
print("Please review and resolve any error messages, "
f"then re-attempt execution.\n")
print("Exception Message: ")
traceback.print_exc()
sys.exit(0)
# Retrieve MOID if required
if id_attribute.get("MoidRequired"):
id_attribute_moid = intersight_object_moid_retriever(intersight_api_key_id=None,
intersight_api_key=None,
object_name=staged_intersight_api_body[id_attribute["Name"]],
intersight_api_path=id_attribute["IntersightAPIPath"],
object_type=id_attribute["Type"],
organization=self.organization,
preconfigured_api_client=self.api_client
)
staged_intersight_api_body[id_attribute["Name"]] = {"Moid": id_attribute_moid}
else:
# Retrieve MOID if required for optional attributes
if id_attribute.get("MoidRequired"):
if staged_intersight_api_body.get(id_attribute["Name"]):
id_attribute_moid = intersight_object_moid_retriever(intersight_api_key_id=None,
intersight_api_key=None,
object_name=staged_intersight_api_body[id_attribute["Name"]],
intersight_api_path=id_attribute["IntersightAPIPath"],
object_type=id_attribute["Type"],
organization=self.organization,
preconfigured_api_client=self.api_client
)
staged_intersight_api_body[id_attribute["Name"]] = {"Moid": id_attribute_moid}
class Vsan(IdConfigurator):
"""This class serves as a base class for configuring the IDs of VSANs in
Intersight policies.
"""
object_type = "VSAN"
id_type = "Vsan"
intersight_api_path = "fabric/Vsans"
policy_type = "VSAN Policy"
policy_intersight_api_path = "fabric/FcNetworkPolicies"
attributes_that_require_special_handling = [
{"Name": "Name",
"Type": "VSAN Name Prefix",
"IntersightAPIPath": None,
"Mandatory": True,
"MoidRequired": False,
"DefaultOption": None
}
]
def __init__(self,
intersight_api_key_id,
intersight_api_key,
policy_name,
id_list=None,
organization="default",
intersight_base_url="https://www.intersight.com/api/v1",
preconfigured_api_client=None
):
super().__init__(intersight_api_key_id,
intersight_api_key,
policy_name,
id_list,
organization,
intersight_base_url,
preconfigured_api_client
)
def __repr__(self):
return (
f"{self.__class__.__name__}"
f"('{self.intersight_api_key_id}', "
f"'{self.intersight_api_key}', "
f"'{self.policy_name}', "
f"'{self.id_list}', "
f"'{self.organization}', "
f"'{self.intersight_base_url}', "
f"{self.api_client})"
)
def object_maker(self):
"""This function applies the provided id list configuration to the
targeted policy.
"""
def post_intersight_vsan(vsan_name,
body,
moid=None
):
"""This is a function to configure an Intersight object by
performing a POST through the Intersight API.
Args:
vsan_name (str):
The name of the VSAN to be posted on Intersight.
body (dict):
The body of the object to be posted on Intersight.
moid (str):
Optional; The Intersight MOID of the object to be posted
on Intersight. The default value is None.
Returns:
A string with a statement indicating whether the POST method
was successful or failed.
Raises:
Exception:
An exception occurred while performing the API call.
The status code or error message will be specified.
"""
if moid:
full_intersight_api_path = f"/{self.intersight_api_path}/{moid}"
else:
full_intersight_api_path = f"/{self.intersight_api_path}"
try:
self.api_client.call_api(resource_path=full_intersight_api_path,
method="POST",
body=body,
auth_settings=['cookieAuth', 'http_signature', 'oAuth2', 'oAuth2']
)
if moid:
print(f"The configuration of {self.object_type} "
f"{vsan_name} has been updated.")
else:
print(f"The configuration of {self.object_type} "
f"{vsan_name} has completed.")
return "The POST method was successful."
except Exception:
print("\nA configuration error has occurred!\n")
if moid:
print(f"Unable to update {self.object_type} "
f"{vsan_name} under the "
"Intersight API resource path "
f"'{full_intersight_api_path}'.\n")
else:
print(f"Unable to configure {self.object_type} "
f"{vsan_name} under the Intersight "
f"API resource path '{full_intersight_api_path}'.\n")
print("Exception Message: ")
traceback.print_exc()
return "The POST method failed."
if self.id_list:
vsans_id_key = f"{self.id_type}s"
fcoe_vlans_id_key = "FcoeVlans"
for id_dictionary in self.id_list:
# Enumerate provided VSAN range
vsans_enumerated_id_range = self.enumerated_id_range_retriever(id_dictionary,
vsans_id_key
)
# Enumerate provided FCoE VLAN range
fcoe_vlans_enumerated_id_range = self.enumerated_id_range_retriever(id_dictionary,
fcoe_vlans_id_key
)
# Match and pair enumerated VSAN and FCoE VLAN ranges
vsan_id_and_fcoe_vlan_id_matched_pair_list = list(zip(vsans_enumerated_id_range,
fcoe_vlans_enumerated_id_range
))
staged_intersight_api_body = copy.deepcopy(id_dictionary)
staged_intersight_api_body.pop(vsans_id_key)
staged_intersight_api_body.pop(fcoe_vlans_id_key)
# Retrieving the VSAN Policy MOID
policy_moid = intersight_object_moid_retriever(intersight_api_key_id=None,
intersight_api_key=None,
object_name=self.policy_name,
intersight_api_path=self.policy_intersight_api_path,
object_type=self.policy_type,
organization=self.organization,
intersight_base_url=self.intersight_base_url,
preconfigured_api_client=self.api_client
)
staged_intersight_api_body["FcNetworkPolicy"] = {"Moid": policy_moid}
# Retrieve MOIDs for ID attributes that require MOIDs
self.attribute_handler(staged_intersight_api_body)
current_vsan_id_name_prefix = id_dictionary.get("Name")
for current_vsan_id, current_fcoe_vlan_id in vsan_id_and_fcoe_vlan_id_matched_pair_list:
staged_intersight_api_body[f"{self.id_type}Id"] = current_vsan_id
current_vsan_id_full_name = f"{current_vsan_id_name_prefix}_{current_vsan_id}"
staged_intersight_api_body["Name"] = current_vsan_id_full_name
staged_intersight_api_body["FcoeVlan"] = current_fcoe_vlan_id
post_intersight_vsan(
current_vsan_id_full_name,
staged_intersight_api_body
)
def vsan_policy_maker(intersight_api_key_id,
intersight_api_key,
policy_name,
enable_uplink_trunking=False,
vsan_list=None,
policy_description="",
organization="default",
intersight_base_url="https://www.intersight.com/api/v1",
tags=None,
preconfigured_api_client=None,
ucs_domain_profile_name="",
fabric_interconnect="A"
):
"""This is a function used to make a VSAN Policy on Cisco Intersight.
Args:
intersight_api_key_id (str):
The ID of the Intersight API key.
intersight_api_key (str):
The system file path of the Intersight API key.
policy_name (str):
The name of the policy to be created.
enable_uplink_trunking (bool):
Optional; The setting to enable or disable trunking on all
configured FC uplink ports. The default value is False (disabled).
vsan_list (list):
Optional; The list of dictionary entries for VSANs to
be configured. The default value is None. An
example entry is:
[{"Vsans": "500",
"Name": "ESXi_FC_Storage",
"FcoeVlans": "500",
"DefaultZoning": "Disabled"
},
{"Vsans": "501-510,550,570",
"Name": "Hyper-V_FC_Storage",
"FcoeVlans": "501-510,550,570",
"DefaultZoning": "Disabled"
},
{"Vsans": "575-585",
"Name": "Xen_FC_Storage",
"FcoeVlans": "575-585",
"DefaultZoning": "Enabled"},].
A "Vsans" key with a string value must be configured in each
dictionary of the list to specify the VSAN ID or range of VSAN IDs
to be created. A "Name" key with a string value must also be
configured to specify the VSAN name prefix. A "FcoeVlans" key
must also be configured with a string value to specify the
FCoE VLAN ID or range of FCoE VLAN IDs to be created and matched
with the VSANs previously specified in the Vsans key. Optional key
settings such as "DefaultZoning" and "FcZoneSharingMode" can also
be configured in each list dictionary, if the associated system
default values are not desired. Multiple dictionaries can be placed
in the list to configure different settings for different ranges of
VSANs.
policy_description (str):
Optional; The description of the policy to be created. The default
value is an empty string ("").
organization (str):
Optional; The Intersight account organization of the policy.
The default value is "default".
intersight_base_url (str):
Optional; The base URL for Intersight API paths. The default value
is "https://www.intersight.com/api/v1". This value typically only
needs to be changed if using the Intersight Virtual Appliance.
tags (dict):
Optional; The Intersight account tags that will be assigned to the
policy. The default value is None.
preconfigured_api_client ("ApiClient"):
Optional; An ApiClient class instance which handles
Intersight client-server communication through the use of API keys.
The default value is None. If a preconfigured_api_client argument
is provided, empty strings ("") or None can be provided for the
intersight_api_key_id, intersight_api_key, and intersight_base_url
arguments.
ucs_domain_profile_name (str):
Optional; The UCS Domain Profile the policy should be attached to.
The default value is an empty string ("").
fabric_interconnect (str):
Optional; The Fabric Interconnect in the UCS Domain Profile that
the policy should be attached to. The default value is "A".
Accepted values are "A" for only Fabric Interconnect A, "B" for
only Fabric Interconnect B, and "AB" for both Fabric Interconnects A
and B. For VSAN configuration, typically each Fabric Interconnect
would have separate VSAN Policies to support different VSAN numbers
for each storage fabric.
"""
def builder(target_object):
"""This is a function used to build the objects that are components of
an overarching pool or policy on Cisco Intersight.
Args:
target_object (class):
The class representing the object to be built on Intersight.
Raises:
Exception:
An exception occurred due to an issue accessing the Intersight
API path. The status code or error message will be specified.
"""
try:
target_object.object_maker()
except Exception:
print("\nA configuration error has occurred!\n")
print("The builder function failed to configure the "
f"{target_object.object_type} settings.")
print("Please check the provided arguments for the "
f"{target_object.object_type} settings.\n")
print("Exception Message: ")
traceback.print_exc()
# Define and create VSAN Policy object in Intersight
builder(VsanPolicy(intersight_api_key_id=intersight_api_key_id,
intersight_api_key=intersight_api_key,
policy_name=policy_name,
policy_description=policy_description,
organization=organization,
intersight_base_url=intersight_base_url,
tags=tags,
preconfigured_api_client=preconfigured_api_client,
ucs_domain_profile_name=ucs_domain_profile_name,
fabric_interconnect=fabric_interconnect,
enable_uplink_trunking=enable_uplink_trunking
))
# Define and create VSAN objects in Intersight
builder(Vsan(intersight_api_key_id=intersight_api_key_id,
intersight_api_key=intersight_api_key,
policy_name=policy_name,
id_list=vsan_list,
organization=organization,
intersight_base_url=intersight_base_url,
preconfigured_api_client=preconfigured_api_client
))
def main():
# Establish Maker specific variables
maker_type = "Intersight VSAN Policy Maker"
# Establish Intersight SDK for Python API client instance
main_intersight_api_client = get_api_client(api_key_id=key_id,
api_secret_file=key,
endpoint=intersight_base_url
)
# Starting the Policy Maker for Cisco Intersight
print(f"\nStarting the {maker_type} for Cisco Intersight.\n")
# Run the Intersight API and Account Availability Test
print("Running the Intersight API and Account Availability Test.")
test_intersight_api_service(
intersight_api_key_id=None,
intersight_api_key=None,
preconfigured_api_client=main_intersight_api_client
)
# Create the Policy in Intersight
vsan_policy_maker(
intersight_api_key_id=None,
intersight_api_key=None,
policy_name=vsan_policy_name,
enable_uplink_trunking=enable_uplink_trunking,
vsan_list=vsan_list,
policy_description=vsan_policy_description,
organization=vsan_policy_organization,
intersight_base_url=intersight_base_url,
tags=vsan_policy_tags,
preconfigured_api_client=main_intersight_api_client,
ucs_domain_profile_name=ucs_domain_profile_name,
fabric_interconnect=vsan_policy_fabric_interconnect
)
# Policy Maker completion
print(f"\nThe {maker_type} has completed.\n")
if __name__ == "__main__":
main()
# Exiting the Policy Maker for Cisco Intersight
sys.exit(0)
| """
VSAN Policy Maker for Cisco Intersight, v2.0
Author: Ugo Emekauwa
Contact: uemekauw@cisco.com, uemekauwa@gmail.com
Summary: The VSAN Policy Maker for Cisco Intersight automates the creation
of VSAN Policies.
GitHub Repository: https://github.com/ugo-emekauwa/cisco-imm-automation-tools
"""
########################
# MODULE REQUIREMENT 1 #
########################
"""
For the following variable below named key_id, please fill in between
the quotes your Intersight API Key ID.
Here is an example:
key_id = "5c89885075646127773ec143/5c82fc477577712d3088eb2f/5c8987b17577712d302eaaff"
"""
key_id = ""
########################
# MODULE REQUIREMENT 2 #
########################
"""
For the following variable below named key, please fill in between
the quotes your system's file path to your Intersight API key "SecretKey.txt"
file.
Here is an example:
key = "C:\\Users\\demouser\\Documents\\SecretKey.txt"
"""
key = ""
########################
# MODULE REQUIREMENT 3 #
########################
"""
Provide the required configuration settings to create the
VSAN Policy on Cisco Intersight. Remove the sample
values and replace them with your own, where applicable.
"""
####### Start Configuration Settings - Provide values for the variables listed below. #######
# General Settings
vsan_policy_name = "VSAN-Policy-1"
vsan_policy_description = "A Cisco Intersight VSAN Policy generated by the VSAN Policy Maker."
vsan_policy_organization = "default"
vsan_policy_tags = {"Org": "IT", "Dept": "DevOps"} # Empty the vsan_policy_tags dictionary if no tags are needed, for example: vsan_policy_tags = {}
# Policy Detail Settings
## NOTE - To not configure any VSANs, leave the corresponding list empty, for example: vsan_list = []
vsan_list = [
{"Vsans": "100", "Name": "ESXi_FC_Storage", "FcoeVlans": "1000"},
{"Vsans": "101-102,150,170", "Name": "Hyper-V_FC_Storage", "FcoeVlans": "1001-1002,1050,1070", "DefaultZoning": "Disabled"},
{"Vsans": "175-177", "Name": "Xen_FC_Storage", "FcoeVlans": "1075-1077", "DefaultZoning": "Enabled"},
]
enable_uplink_trunking = False
# Intersight Base URL Setting (Change only if using the Intersight Virtual Appliance)
intersight_base_url = "https://www.intersight.com/api/v1"
# UCS Domain Profile Attachment Settings
ucs_domain_profile_name = ""
vsan_policy_fabric_interconnect = "A" # Options: "A", "B", "AB"
####### Finish Configuration Settings - The required value entries are complete. #######
#############################################################################################################################
#############################################################################################################################
import sys
import traceback
import json
import copy
import intersight
import re
# Function to get Intersight API client as specified in the Intersight Python SDK documentation for OpenAPI 3.x
## Modified to align with overall formatting and try/except blocks added for additional error handling
def get_api_client(api_key_id,
api_secret_file,
endpoint="https://intersight.com"
):
try:
with open(api_secret_file, 'r') as f:
api_key = f.read()
if re.search('BEGIN RSA PRIVATE KEY', api_key):
# API Key v2 format
signing_algorithm = intersight.signing.ALGORITHM_RSASSA_PKCS1v15
signing_scheme = intersight.signing.SCHEME_RSA_SHA256
hash_algorithm = intersight.signing.HASH_SHA256
elif re.search('BEGIN EC PRIVATE KEY', api_key):
# API Key v3 format
signing_algorithm = intersight.signing.ALGORITHM_ECDSA_MODE_DETERMINISTIC_RFC6979
signing_scheme = intersight.signing.SCHEME_HS2019
hash_algorithm = intersight.signing.HASH_SHA256
configuration = intersight.Configuration(
host=endpoint,
signing_info=intersight.signing.HttpSigningConfiguration(
key_id=api_key_id,
private_key_path=api_secret_file,
signing_scheme=signing_scheme,
signing_algorithm=signing_algorithm,
hash_algorithm=hash_algorithm,
signed_headers=[
intersight.signing.HEADER_REQUEST_TARGET,
intersight.signing.HEADER_HOST,
intersight.signing.HEADER_DATE,
intersight.signing.HEADER_DIGEST,
]
)
)
except Exception:
print("\nA configuration error has occurred!\n")
print("Unable to access the Intersight API Key.")
print("Exiting due to the Intersight API Key being unavailable.\n")
print("Please verify that the correct API Key ID and API Key have "
"been entered, then re-attempt execution.\n")
print("Exception Message: ")
traceback.print_exc()
sys.exit(0)
return intersight.ApiClient(configuration)
# Establish function to test for the availability of the Intersight API and Intersight account
def test_intersight_api_service(intersight_api_key_id,
intersight_api_key,
intersight_base_url="https://www.intersight.com/api/v1",
preconfigured_api_client=None
):
"""This is a function to test the availability of the Intersight API and
Intersight account. The tested Intersight account contains the user who is
the owner of the provided Intersight API Key and Key ID.
Args:
intersight_api_key_id (str):
The ID of the Intersight API key.
intersight_api_key (str):
The system file path of the Intersight API key.
intersight_base_url (str):
Optional; The base URL for Intersight API paths. The default value
is "https://www.intersight.com/api/v1". This value typically only
needs to be changed if using the Intersight Virtual Appliance. The
default value is "https://www.intersight.com/api/v1".
preconfigured_api_client ("ApiClient"):
Optional; An ApiClient class instance which handles
Intersight client-server communication through the use of API keys.
The default value is None. If a preconfigured_api_client argument
is provided, empty strings ("") or None can be provided for the
intersight_api_key_id, intersight_api_key, and intersight_base_url
arguments.
Returns:
A string of the name for the Intersight account tested, verifying the
Intersight API service is up and the Intersight account is accessible.
Raises:
Exception:
An exception occurred due to an issue with the provided API Key
and/or API Key ID.
"""
# Define Intersight SDK ApiClient variable
if preconfigured_api_client is None:
api_client = get_api_client(api_key_id=intersight_api_key_id,
api_secret_file=intersight_api_key,
endpoint=intersight_base_url
)
else:
api_client = preconfigured_api_client
try:
# Check that Intersight Account is accessible
print("Testing access to the Intersight API by verifying the "
"Intersight account information...")
api_client.call_api(resource_path="/iam/Accounts",
method="GET",
auth_settings=['cookieAuth', 'http_signature', 'oAuth2', 'oAuth2']
)
response = api_client.last_response.data
iam_account = json.loads(response)
if api_client.last_response.status != 200:
print("\nThe Intersight API and Account Availability Test did not "
"pass.")
print("The Intersight account information could not be verified.")
print("Exiting due to the Intersight account being unavailable.\n")
print("Please verify that the correct API Key ID and API Key have "
"been entered, then re-attempt execution.\n")
sys.exit(0)
else:
intersight_account_name = iam_account["Results"][0]["Name"]
print("The Intersight API and Account Availability Test has "
"passed.\n")
print(f"The Intersight account named '{intersight_account_name}' "
"has been found.")
return intersight_account_name
except Exception:
print("\nA configuration error has occurred!\n")
print("Unable to access the Intersight API.")
print("Exiting due to the Intersight API being unavailable.\n")
print("Please verify that the correct API Key ID and API Key have "
"been entered, then re-attempt execution.\n")
print("Exception Message: ")
traceback.print_exc()
sys.exit(0)
# Establish function to retrieve the MOID of a specific Intersight API object by name
def intersight_object_moid_retriever(intersight_api_key_id,
intersight_api_key,
object_name,
intersight_api_path,
object_type="object",
organization="default",
intersight_base_url="https://www.intersight.com/api/v1",
preconfigured_api_client=None
):
"""This is a function to retrieve the MOID of Intersight objects
using the Intersight API.
Args:
intersight_api_key_id (str):
The ID of the Intersight API key.
intersight_api_key (str):
The system file path of the Intersight API key.
object_name (str):
The name of the Intersight object.
intersight_api_path (str):
The Intersight API path of the Intersight object.
object_type (str):
Optional; The type of Intersight object. The default value is
"object".
organization (str):
Optional; The Intersight organization of the Intersight object.
The default value is "default".
intersight_base_url (str):
Optional; The base URL for Intersight API paths. The default value
is "https://www.intersight.com/api/v1". This value typically only
needs to be changed if using the Intersight Virtual Appliance.
preconfigured_api_client ("ApiClient"):
Optional; An ApiClient class instance which handles
Intersight client-server communication through the use of API keys.
The default value is None. If a preconfigured_api_client argument
is provided, empty strings ("") or None can be provided for the
intersight_api_key_id, intersight_api_key, and intersight_base_url
arguments.
Returns:
A string of the MOID for the provided Intersight object.
Raises:
Exception:
An exception occurred due to an issue accessing the Intersight API
path. The status code or error message will be specified.
"""
# Define Intersight SDK ApiClient variable
if preconfigured_api_client is None:
api_client = get_api_client(api_key_id=intersight_api_key_id,
api_secret_file=intersight_api_key,
endpoint=intersight_base_url
)
else:
api_client = preconfigured_api_client
try:
# Retrieve the Intersight Account name
api_client.call_api(resource_path="/iam/Accounts",
method="GET",
auth_settings=['cookieAuth', 'http_signature', 'oAuth2', 'oAuth2']
)
response = api_client.last_response.data
iam_account = json.loads(response)
if api_client.last_response.status != 200:
print("The provided Intersight account information could not be "
"accessed.")
print("Exiting due to the Intersight account being unavailable.\n")
print("Please verify that the correct API Key ID and API Key have "
"been entered, then re-attempt execution.\n")
sys.exit(0)
else:
intersight_account_name = iam_account["Results"][0]["Name"]
except Exception:
print("\nA configuration error has occurred!\n")
print("Unable to access the Intersight API.")
print("Exiting due to the Intersight API being unavailable.\n")
print("Please verify that the correct API Key ID and API Key have "
"been entered, then re-attempt execution.\n")
sys.exit(0)
# Retrieving the provided object from Intersight...
full_intersight_api_path = f"/{intersight_api_path}"
try:
api_client.call_api(resource_path=full_intersight_api_path,
method="GET",
auth_settings=['cookieAuth', 'http_signature', 'oAuth2', 'oAuth2']
)
response = api_client.last_response.data
intersight_objects = json.loads(response)
# The Intersight API resource path has been accessed successfully.
except Exception:
print("\nA configuration error has occurred!\n")
print("There was an issue retrieving the "
f"{object_type} from Intersight.")
print("Unable to access the provided Intersight API resource path "
f"'{intersight_api_path}'.")
print("Please review and resolve any error messages, then re-attempt "
"execution.\n")
print("Exception Message: ")
traceback.print_exc()
sys.exit(0)
if intersight_objects.get("Results"):
for intersight_object in intersight_objects.get("Results"):
if intersight_object.get("Organization"):
provided_organization_moid = intersight_object_moid_retriever(intersight_api_key_id=None,
intersight_api_key=None,
object_name=organization,
intersight_api_path="organization/Organizations",
object_type="Organization",
preconfigured_api_client=api_client
)
if intersight_object.get("Organization", {}).get("Moid") == provided_organization_moid:
if intersight_object.get("Name") == object_name:
intersight_object_moid = intersight_object.get("Moid")
# The provided object and MOID has been identified and retrieved.
return intersight_object_moid
else:
if intersight_object.get("Name") == object_name:
intersight_object_moid = intersight_object.get("Moid")
# The provided object and MOID has been identified and retrieved.
return intersight_object_moid
else:
print("\nA configuration error has occurred!\n")
print(f"The provided {object_type} named '{object_name}' was not "
"found.")
print("Please check the Intersight Account named "
f"{intersight_account_name}.")
print("Verify through the API or GUI that the needed "
f"{object_type} is present.")
print(f"If the needed {object_type} is missing, please create it.")
print("Once the issue has been resolved, re-attempt execution.\n")
sys.exit(0)
else:
print("\nA configuration error has occurred!\n")
print(f"The provided {object_type} named '{object_name}' was not "
"found.")
print(f"No requested {object_type} instance is currently available in "
f"the Intersight account named {intersight_account_name}.")
print("Please check the Intersight Account named "
f"{intersight_account_name}.")
print(f"Verify through the API or GUI that the needed {object_type} "
"is present.")
print(f"If the needed {object_type} is missing, please create it.")
print("Once the issue has been resolved, re-attempt execution.\n")
sys.exit(0)
# Establish function to retrieve all instances of a particular Intersight API object type
def get_intersight_objects(intersight_api_key_id,
intersight_api_key,
intersight_api_path,
object_type="object",
intersight_base_url="https://www.intersight.com/api/v1",
preconfigured_api_client=None
):
"""This is a function to perform an HTTP GET on all objects under an
available Intersight API type.
Args:
intersight_api_key_id (str):
The ID of the Intersight API key.
intersight_api_key (str):
The system file path of the Intersight API key.
intersight_api_path (str):
The path to the targeted Intersight API object type. For example,
to specify the Intersight API type for adapter configuration
policies, enter "adapter/ConfigPolicies". More API types can be
found in the Intersight API reference library at
https://intersight.com/apidocs/introduction/overview/.
object_type (str):
Optional; The type of Intersight object. The default value is
"object".
intersight_base_url (str):
Optional; The base URL for Intersight API paths. The default value
is "https://www.intersight.com/api/v1". This value typically only
needs to be changed if using the Intersight Virtual Appliance.
preconfigured_api_client ("ApiClient"):
Optional; An ApiClient class instance which handles
Intersight client-server communication through the use of API keys.
The default value is None. If a preconfigured_api_client argument
is provided, empty strings ("") or None can be provided for the
intersight_api_key_id, intersight_api_key, and intersight_base_url
arguments.
Returns:
A dictionary containing all objects of the specified API type. If the
API type is inaccessible, an implicit value of None will be returned.
Raises:
Exception:
An exception occurred due to an issue accessing the Intersight API
path. The status code or error message will be specified.
"""
# Define Intersight SDK ApiClient variable
if preconfigured_api_client is None:
api_client = get_api_client(api_key_id=intersight_api_key_id,
api_secret_file=intersight_api_key,
endpoint=intersight_base_url
)
else:
api_client = preconfigured_api_client
# Retrieving the provided object from Intersight...
full_intersight_api_path = f"/{intersight_api_path}"
try:
api_client.call_api(resource_path=full_intersight_api_path,
method="GET",
auth_settings=['cookieAuth', 'http_signature', 'oAuth2', 'oAuth2']
)
response = api_client.last_response.data
intersight_objects = json.loads(response)
# The Intersight API resource path has been accessed successfully.
return intersight_objects
except Exception:
print("\nA configuration error has occurred!\n")
print(f"There was an issue retrieving the requested {object_type} "
"instances from Intersight.")
print("Unable to access the provided Intersight API resource path "
f"'{intersight_api_path}'.")
print("Please review and resolve any error messages, then re-attempt "
"execution.\n")
print("Exception Message: ")
traceback.print_exc()
sys.exit(0)
# Establish function to retrieve a particular instance of a particular Intersight API object type
def get_single_intersight_object(intersight_api_key_id,
intersight_api_key,
intersight_api_path,
object_moid,
object_type="object",
intersight_base_url="https://www.intersight.com/api/v1",
preconfigured_api_client=None
):
"""This is a function to perform an HTTP GET on a single object under an
available Intersight API type.
Args:
intersight_api_key_id (str):
The ID of the Intersight API key.
intersight_api_key (str):
The system file path of the Intersight API key.
intersight_api_path (str):
The path to the targeted Intersight API object type. For example,
to specify the Intersight API type for adapter configuration
policies, enter "adapter/ConfigPolicies". More API types can be
found in the Intersight API reference library at
https://intersight.com/apidocs/introduction/overview/.
object_moid (str):
The MOID of the single Intersight object.
object_type (str):
Optional; The type of Intersight object. The default value is
"object".
intersight_base_url (str):
Optional; The base URL for Intersight API paths. The default value
is "https://www.intersight.com/api/v1". This value typically only
needs to be changed if using the Intersight Virtual Appliance.
preconfigured_api_client ("ApiClient"):
Optional; An ApiClient class instance which handles
Intersight client-server communication through the use of API keys.
The default value is None. If a preconfigured_api_client argument
is provided, empty strings ("") or None can be provided for the
intersight_api_key_id, intersight_api_key, and intersight_base_url
arguments.
Returns:
A dictionary containing all objects of the specified API type. If the
API type is inaccessible, an implicit value of None will be returned.
Raises:
Exception:
An exception occurred due to an issue accessing the Intersight API
path. The status code or error message will be specified.
"""
# Define Intersight SDK ApiClient variable
if preconfigured_api_client is None:
api_client = get_api_client(api_key_id=intersight_api_key_id,
api_secret_file=intersight_api_key,
endpoint=intersight_base_url
)
else:
api_client = preconfigured_api_client
# Retrieving the provided object from Intersight...
full_intersight_api_path = f"/{intersight_api_path}/{object_moid}"
try:
api_client.call_api(resource_path=full_intersight_api_path,
method="GET",
auth_settings=['cookieAuth', 'http_signature', 'oAuth2', 'oAuth2']
)
response = api_client.last_response.data
single_intersight_object = json.loads(response)
# The Intersight API resource path has been accessed successfully.
return single_intersight_object
except Exception:
print("\nA configuration error has occurred!\n")
print(f"There was an issue retrieving the requested {object_type} "
"instance from Intersight.")
print("Unable to access the provided Intersight API resource path "
f"'{intersight_api_path}'.")
print("Please review and resolve any error messages, then re-attempt "
"execution.\n")
print("Exception Message: ")
traceback.print_exc()
sys.exit(0)
# Establish function to convert a list of numbers in string type format to list type format.
def integer_number_list_maker(string_list,
remove_duplicate_elements_in_list=True,
sort_elements_in_list=True
):
"""This function converts a list of numbers in string type format
to list type format. The provided string should contain commas,
semicolons, or spaces as the separator between numbers. Number
ranges can be configured using hyphens. An example entry would be
"1, 2, 3, 4, 5, 6, 7, 7-14, 25". For each number in the list,
leading and rear spaces will be removed. Duplicate numbers in the
list are removed by default.
Args:
string_list (str):
A string containing a number or range of numbers.
remove_duplicate_elements_in_list (bool):
Optional; A setting to determine whether duplicate elements
are removed from the provided string list. The default
value is True.
sort_elements_in_list (bool):
Optional; A setting to determine if the elements in the provided
string list should be sorted in ascending order. The default value
is True.
Returns:
A list of integers.
"""
def string_to_list_separator(string_list,
separator
):
"""This function converts a list of elements in string type
format to list type format using the provided separator. For
each element in the list, leading and rear spaces are removed.
Args:
string_list (str):
A string containing an element or range of elements.
separator (str):
The character to identify where elements in the list
should be separated (e.g., a comma, semicolon, hyphen,
etc.).
Returns:
A list of separated elements that have been stripped of any
spaces.
"""
fully_stripped_list = []
# Split string by provided separator and create list of separated elements.
split_list = string_list.split(separator)
for element in split_list:
if element:
# Remove leading spaces from elements in list.
lstripped_element = element.lstrip()
# Remove rear spaces from elements in list.
rstripped_element = lstripped_element.rstrip()
# Populate new list with fully stripped elements.
fully_stripped_list.append(rstripped_element)
return fully_stripped_list
def list_to_list_separator(provided_list,
separator
):
"""This function converts a list of elements in list type
format to list type format using the provided separator. For
each element in the list, leading and rear spaces are removed.
Args:
provided_list (list):
A list of elements to be separated.
separator (str):
The character to identify where elements in the list
should be separated (e.g., a comma, semicolon, hyphen,
etc.).
Returns:
A list of separated elements that have been stripped of any
spaces.
"""
new_list = []
# Split list by provided separator and create new list of separated elements.
for element in provided_list:
if separator in element:
split_provided_list = string_to_list_separator(element,
separator
)
new_list.extend(split_provided_list)
else:
new_list.append(element)
return new_list
integer_number_list = []
# Split provided list by spaces.
space_split_list = string_to_list_separator(string_list,
" "
)
# Split provided list by commas.
post_comma_split_list = list_to_list_separator(space_split_list,
","
)
# Split provided list by semicolons.
post_semicolon_split_list = list_to_list_separator(post_comma_split_list,
";"
)
# Split provided number ranges in the list by hyphens.
for post_semicolon_split_number_set in post_semicolon_split_list:
if "-" in post_semicolon_split_number_set:
if post_semicolon_split_number_set[0] != "-" and post_semicolon_split_number_set[-1] != "-":
hyphen_split_list = string_to_list_separator(post_semicolon_split_number_set,
"-"
)
# Limit new list to a maximum of two elements.
fixed_hyphen_split_list = hyphen_split_list[:2]
# Enumerate the numbers in the hyphen split list.
integer_starting_number_of_provided_range = int(fixed_hyphen_split_list[0])
integer_ending_number_of_provided_range = int(fixed_hyphen_split_list[1])
enumerated_range_list = range(
integer_starting_number_of_provided_range,
(integer_ending_number_of_provided_range + 1)
)
integer_number_list.extend(enumerated_range_list)
else:
integer_of_current_number_set = int(post_semicolon_split_number_set)
integer_number_list.append(integer_of_current_number_set)
# Remove duplicates from list if enabled.
if remove_duplicate_elements_in_list:
integer_number_list = list(set(integer_number_list))
if sort_elements_in_list:
integer_number_list = sorted(integer_number_list)
return integer_number_list
# Establish Maker specific classes and functions
class UcsPolicy:
"""This class is used to configure a UCS Policy in Intersight.
"""
object_type = "UCS Policy"
intersight_api_path = None
subobject_types = None
subobject_attribute_maps = None
object_variable_value_maps = None
def __init__(self,
intersight_api_key_id,
intersight_api_key,
policy_name,
policy_description="",
organization="default",
intersight_base_url="https://www.intersight.com/api/v1",
tags=None,
preconfigured_api_client=None
):
self.intersight_api_key_id = intersight_api_key_id
self.intersight_api_key = intersight_api_key
self.policy_name = policy_name
self.policy_description = policy_description
self.organization = organization
self.intersight_base_url = intersight_base_url
if tags is None:
self.tags = {}
else:
self.tags = tags
if preconfigured_api_client is None:
self.api_client = get_api_client(api_key_id=intersight_api_key_id,
api_secret_file=intersight_api_key,
endpoint=intersight_base_url
)
else:
self.api_client = preconfigured_api_client
self.intersight_api_body = {
"Name": self.policy_name,
"Description": self.policy_description
}
def __repr__(self):
return (
f"{self.__class__.__name__}"
f"('{self.intersight_api_key_id}', "
f"'{self.intersight_api_key}', "
f"'{self.policy_name}', "
f"'{self.policy_description}', "
f"'{self.organization}', "
f"'{self.intersight_base_url}', "
f"{self.tags}, "
f"{self.api_client})"
)
def __str__(self):
return f"{self.__class__.__name__} class object for '{self.policy_name}'"
def _post_intersight_object(self):
"""This is a function to configure an Intersight object by
performing a POST through the Intersight API.
Returns:
A string with a statement indicating whether the POST method
was successful or failed.
Raises:
Exception:
An exception occurred while performing the API call.
The status code or error message will be specified.
"""
full_intersight_api_path = f"/{self.intersight_api_path}"
try:
self.api_client.call_api(resource_path=full_intersight_api_path,
method="POST",
body=self.intersight_api_body,
auth_settings=['cookieAuth', 'http_signature', 'oAuth2', 'oAuth2']
)
print(f"The configuration of the base {self.object_type} "
"has completed.")
return "The POST method was successful."
except intersight.exceptions.ApiException as error:
if error.status == 409:
existing_intersight_object_name = self.intersight_api_body.get("Name", "object")
print(f"The targeted {self.object_type} appears to already "
"exist.")
print("An attempt will be made to update the pre-existing "
f"{existing_intersight_object_name}...")
try:
existing_intersight_object_moid = intersight_object_moid_retriever(intersight_api_key_id=None,
intersight_api_key=None,
object_name=existing_intersight_object_name,
intersight_api_path=self.intersight_api_path,
object_type=self.object_type,
preconfigured_api_client=self.api_client
)
# Update full Intersight API path with the MOID of the existing object
full_intersight_api_path_with_moid = f"/{self.intersight_api_path}/{existing_intersight_object_moid}"
self.api_client.call_api(resource_path=full_intersight_api_path_with_moid,
method="POST",
body=self.intersight_api_body,
auth_settings=['cookieAuth', 'http_signature', 'oAuth2', 'oAuth2']
)
print(f"The update of the {self.object_type} has "
"completed.")
print(f"The pre-existing {existing_intersight_object_name} "
"has been updated.")
return "The POST method was successful."
except Exception:
print("\nA configuration error has occurred!\n")
print(f"Unable to update the {self.object_type} under the "
"Intersight API resource path "
f"'{full_intersight_api_path_with_moid}'.\n")
print(f"The pre-existing {existing_intersight_object_name} "
"could not be updated.")
print("Exception Message: ")
traceback.print_exc()
return "The POST method failed."
else:
print("\nA configuration error has occurred!\n")
print(f"Unable to configure the {self.object_type} under the "
"Intersight API resource path "
f"'{full_intersight_api_path}'.\n")
print("Exception Message: ")
traceback.print_exc()
return "The POST method failed."
except Exception:
print("\nA configuration error has occurred!\n")
print(f"Unable to configure the {self.object_type} under the "
"Intersight API resource path "
f"'{full_intersight_api_path}'.\n")
print("Exception Message: ")
traceback.print_exc()
return "The POST method failed."
def _update_api_body_general_attributes(self):
"""This function updates the Intersight API body with general
attributes for the Intersight object.
"""
# Retrieve the Intersight Organization MOID
policy_organization_moid = intersight_object_moid_retriever(intersight_api_key_id=None,
intersight_api_key=None,
object_name=self.organization,
intersight_api_path="organization/Organizations",
object_type="Organization",
preconfigured_api_client=self.api_client
)
# Update the API body with the Intersight Organization MOID
self.intersight_api_body["Organization"] = {"Moid": policy_organization_moid}
# Create the Intersight Tags dictionary list
tags_dictionary_list = []
if self.tags:
for key in self.tags:
tags_dictionary_list_entry = {
"Key": key,
"Value": self.tags.get(key)
}
tags_dictionary_list.append(tags_dictionary_list_entry)
# Update the API body with the Intersight Tags dictionary list
self.intersight_api_body["Tags"] = tags_dictionary_list
def _update_api_body_subobject_attributes(self):
"""This function updates the Intersight API body with individual
attributes for subobjects of the Intersight object.
Raises:
Exception:
An exception occurred while reformatting a provided value for
an attribute. The issue will likely be due to the provided
value not being in string format. Changing the value to string
format should resolve the exception.
"""
def attribute_map_handler(attribute_map_dictionary):
"""This is a function to handle attributes with a mismatch in the
Front-End Name and Back-End Name.
Args:
attribute_map_dictionary (dict):
A dictionary containing attribute data, including front-end
(GUI) to back-end (API) mapped values.
"""
# Establish default automatic insertion status of current attribute
automatic_insertion_of_attribute_value_performed = False
# Check if current attribute is mandatory
if attribute_map_dictionary.get("Mandatory"):
if not any(
attribute_name_key in
staged_subobject_dictionary for
attribute_name_key in
(attribute_map_dictionary["FrontEndName"],
attribute_map_dictionary["BackEndName"]
)
):
if attribute_map_dictionary.get("AutomaticInsertion"):
staged_subobject_dictionary[attribute_map_dictionary["BackEndName"]] = attribute_map_dictionary.get("AutomaticInsertionValue")
automatic_insertion_of_attribute_value_performed = True
else:
print("\nA configuration error has occurred!\n")
print("During the configuration of the "
f"{subobject_type_dictionary['Description']} "
f"settings for the {self.object_type} named "
f"{self.policy_name}, there was an issue "
"accessing the value for the "
f"{attribute_map_dictionary['Description']}.")
print("Please verify the following key exists in the "
"appropriate dictionary of the "
f"{subobject_type_dictionary['Description']} "
f"list variable for the {self.object_type}, then "
"re-attempt execution:\n")
if attribute_map_dictionary['FrontEndName']:
print(f"'{attribute_map_dictionary['FrontEndName']}'\n")
else:
print(f"'{attribute_map_dictionary['BackEndName']}'\n")
sys.exit(0)
# Check for attribute front-end name key in the dictionary
if attribute_map_dictionary["FrontEndName"] in staged_subobject_dictionary:
# If the attribute back-end name key is not present in the dictionary, insert the back-end name key with the front-end name key value
if attribute_map_dictionary["BackEndName"] not in staged_subobject_dictionary:
staged_subobject_dictionary[attribute_map_dictionary["BackEndName"]] = staged_subobject_dictionary.get(attribute_map_dictionary["FrontEndName"])
# Remove the front-end name key from the dictionary
staged_subobject_dictionary.pop(attribute_map_dictionary["FrontEndName"])
# Check for front-end to back-end value mapping
if attribute_map_dictionary.get("FronttoBackEndValueMaps"):
if (
attribute_map_dictionary["BackEndName"] in
staged_subobject_dictionary and
not automatic_insertion_of_attribute_value_performed
):
# Retrieve the provided attribute value
provided_attribute_value = staged_subobject_dictionary.get(attribute_map_dictionary["BackEndName"])
# Reformat the provided attribute value to lowercase and remove spaces to prevent potential format issues
try:
provided_attribute_value_reformatted = "".join(provided_attribute_value.lower().split())
except Exception:
print("\nA configuration error has occurred!\n")
print("During the configuration of the "
f"{subobject_type_dictionary['Description']} "
f"settings for the {self.object_type} named "
f"{self.policy_name}, there was an issue with "
"the value for the "
f"{attribute_map_dictionary['Description']}.")
print("The value provided was "
f"{provided_attribute_value}.")
print("Please verify that the value has been provided "
"in an accepted string format.")
print("Please review and resolve any error messages, "
"then re-attempt execution.\n")
sys.exit(0)
# Create list of known and mapped front-end to back-end values
front_to_backend_value_maps_key_list = list(attribute_map_dictionary["FronttoBackEndValueMaps"])
# Replace known and reformatted front-end value with known and mapped back-end value
if provided_attribute_value_reformatted in front_to_backend_value_maps_key_list:
provided_attribute_value_mapped = attribute_map_dictionary["FronttoBackEndValueMaps"][provided_attribute_value_reformatted]
staged_subobject_dictionary[attribute_map_dictionary["BackEndName"]] = provided_attribute_value_mapped
else:
print("\nWARNING: An unknown "
f"{attribute_map_dictionary['Description']} "
f"value of '{provided_attribute_value}' has been "
"provided for the "
f"{subobject_type_dictionary['Description']} "
"settings!")
print("An attempt will be made to configure the "
"unknown "
f"{attribute_map_dictionary['Description']} "
"value.")
print("If there is an error, please use one of the "
"following known values for the "
f"{attribute_map_dictionary['Description']}, "
"then re-attempt execution:\n")
print(*attribute_map_dictionary["FixedFrontEndValues"],
sep=", "
)
# Check for subobject types that may need configuration
if self.subobject_types:
for subobject_type_dictionary in self.subobject_types:
subobject_list = getattr(self,
subobject_type_dictionary.get("SubobjectList")
)
if subobject_list:
converted_subobject_list = []
for subobject_dictionary in subobject_list:
staged_subobject_dictionary = copy.deepcopy(subobject_dictionary)
returned_subobject_type = subobject_type_dictionary["SubobjectType"]
# Set 'ClassId' attribute
staged_subobject_dictionary["ClassId"] = returned_subobject_type
# Set 'ObjectType' attribute
staged_subobject_dictionary["ObjectType"] = returned_subobject_type
# Handle setting of attributes which have mismatched Front-End and Back-End names
for attribute_map_dictionary in self.subobject_attribute_maps.get(returned_subobject_type):
attribute_map_handler(attribute_map_dictionary)
converted_subobject_dictionary = staged_subobject_dictionary
converted_subobject_list.append(converted_subobject_dictionary)
# Update Intersight API body with the converted sub-objects list
self.intersight_api_body[subobject_type_dictionary["AttributeName"]] = converted_subobject_list
def _update_api_body_mapped_object_attributes(self):
"""This function updates the Intersight API body with individual
attributes that require mapping frontend to backend values for
compatibility with the Intersight API.
Raises:
Exception:
An exception occurred while reformatting a provided value for
an attribute. The issue will likely be due to the provided
value not being in string format. Changing the value to string
format should resolve the exception.
"""
# Check for object variables with value maps that need configuration
if self.object_variable_value_maps:
for object_variable in self.object_variable_value_maps:
# Create list of all known and accepted frontend values
all_known_and_accepted_frontend_values = (object_variable_value["FrontEndValue"]
for
object_variable_value
in
object_variable["Values"]
)
# Retrieve the user provided object variable value
provided_object_variable_value = getattr(self,
object_variable["VariableName"]
)
# Reformat the user provided object variable value to lowercase and remove spaces to prevent potential format issues
try:
reformatted_object_variable_value = "".join(provided_object_variable_value.lower().split())
except Exception:
print("\nA configuration error has occurred!\n")
print(f"During the configuration of the {self.object_type} named "
f"{self.policy_name}, there was an issue with the value "
f"provided for the {object_variable['Description']} setting.")
print(f"The value provided was {provided_object_variable_value}.")
print("To proceed, the value provided for the "
f"{object_variable['Description']} setting should be updated to "
"an accepted string format.")
print("The recommended values are the following:\n")
# Print list of all known and accepted frontend values for user
print(*all_known_and_accepted_frontend_values,
sep=", "
)
print("\nPlease update the configuration, then re-attempt "
"execution.\n")
sys.exit(0)
# Cycle through known values and match provided object variable value to backend value
for object_variable_value in object_variable["Values"]:
# Create list of all known and accepted frontend and backend values
current_known_frontend_and_backend_value_options = (object_variable_value.values())
# Retrieve the current known backend value
current_known_backend_value = object_variable_value["BackEndValue"]
if (
reformatted_object_variable_value
in
("".join(current_known_frontend_or_backend_value.lower().split())
for
current_known_frontend_or_backend_value
in
current_known_frontend_and_backend_value_options
)
):
backend_object_variable_value = current_known_backend_value
break
else:
# If no backend match is found with the user provided object variable value, pass on the user provided object variable value to Intersight to decide
print(f"\nWARNING: An unknown {self.object_type} value of "
f"'{provided_object_variable_value}' has been "
f"provided for the {object_variable['Description']} "
"settings!")
print("An attempt will be made to configure the unknown "
f"{object_variable['Description']} value.")
print("If there is an error, please use one of the "
"following known values for the "
f"{object_variable['Description']} settings, then "
"re-attempt execution:\n")
print(*all_known_and_accepted_frontend_values,
sep=", "
)
backend_object_variable_value = provided_object_variable_value
# Update Intersight API body with the converted object variable value
self.intersight_api_body[object_variable["AttributeName"]] = backend_object_variable_value
def object_maker(self):
"""This function makes the targeted policy object.
"""
print(f"\nConfiguring the {self.object_type} named "
f"{self.policy_name}...")
# Update the API body with general attributes
self._update_api_body_general_attributes()
# Update the API body with individual subobject attributes
self._update_api_body_subobject_attributes()
# Update the API body with individual mapped object attributes
self._update_api_body_mapped_object_attributes()
# POST the API body to Intersight
self._post_intersight_object()
class DirectlyAttachedUcsDomainPolicy(UcsPolicy):
"""This class is used to configure a UCS Domain Policy in Intersight that
is logically directly attached to UCS Fabric Interconnects through UCS
Domain Profiles.
"""
object_type = "Directly Attached UCS Domain Policy"
intersight_api_path = None
def __init__(self,
intersight_api_key_id,
intersight_api_key,
policy_name,
policy_description="",
organization="default",
intersight_base_url="https://www.intersight.com/api/v1",
tags=None,
preconfigured_api_client=None,
ucs_domain_profile_name="",
fabric_interconnect="AB"
):
super().__init__(intersight_api_key_id,
intersight_api_key,
policy_name,
policy_description,
organization,
intersight_base_url,
tags,
preconfigured_api_client
)
self.ucs_domain_profile_name = ucs_domain_profile_name
self.fabric_interconnect = fabric_interconnect
def __repr__(self):
return (
f"{self.__class__.__name__}"
f"('{self.intersight_api_key_id}', "
f"'{self.intersight_api_key}', "
f"'{self.policy_name}', "
f"'{self.policy_description}', "
f"'{self.organization}', "
f"'{self.intersight_base_url}', "
f"{self.tags}, "
f"{self.api_client}, "
f"'{self.ucs_domain_profile_name}', "
f"'{self.fabric_interconnect}')"
)
def _attach_ucs_domain_profile(self):
"""This is a function to attach an Intersight UCS Domain Profile to an
Intersight Policy.
Returns:
A dictionary for the API body of the policy object to be posted on
Intersight.
"""
# Attach UCS Domain Profile
if self.ucs_domain_profile_name:
print("Attaching the UCS Domain Profile named "
f"{self.ucs_domain_profile_name}...")
# Get UCS Domain Profile MOID
ucs_domain_profile_moid = intersight_object_moid_retriever(intersight_api_key_id=None,
intersight_api_key=None,
object_name=self.ucs_domain_profile_name,
intersight_api_path="fabric/SwitchClusterProfiles",
object_type="UCS Domain Profile",
organization=self.organization,
preconfigured_api_client=self.api_client
)
# Get UCS Domain Profile object dictionary attributes
ucs_domain_profile_object = get_single_intersight_object(intersight_api_key_id=None,
intersight_api_key=None,
intersight_api_path="fabric/SwitchClusterProfiles",
object_moid=ucs_domain_profile_moid,
object_type="UCS Domain Profile",
preconfigured_api_client=self.api_client
)
# Get Switch Profiles that are attached to the UCS Domain Profile
ucs_domain_profile_list_of_attached_switch_profiles = ucs_domain_profile_object.get("SwitchProfiles")
if len(ucs_domain_profile_list_of_attached_switch_profiles) != 2:
print("\nA configuration error has occurred!\n")
print("The provided UCS Domain Profile named "
f"{self.ucs_domain_profile_name} is not configured with "
"two attached Switch Profiles.")
print("To proceed, two Switch Profiles must be attached to the "
"provided UCS Domain Profile.")
print("Please update the configuration of the provided UCS "
f"Domain Profile, then re-attempt execution.\n")
sys.exit(0)
else:
fabric_interconnect_a_switch_profile_moid = ucs_domain_profile_list_of_attached_switch_profiles[0].get("Moid")
fabric_interconnect_b_switch_profile_moid = ucs_domain_profile_list_of_attached_switch_profiles[1].get("Moid")
# Update the API body with the appropriate Switch Profile MOIDs based on selected Fabric Interconnects
if self.fabric_interconnect not in ("AB", "BA", "A", "B"):
print("\nA configuration error has occurred!\n")
print("The provided UCS Domain Profile Fabric Interconnect "
"value of "
f"'{self.fabric_interconnect}' "
"is not supported.")
print("To proceed, the Fabric Interconnect value for the "
"UCS Domain Profile must be 'AB', 'A', or 'B'.")
print("Please update the configuration of the provided UCS "
"Domain Profile Fabric Interconnect, then re-attempt "
"execution.\n")
sys.exit(0)
else:
if self.fabric_interconnect == "A":
print("The attachment will be made to Fabric Interconnect "
"A.")
self.intersight_api_body["Profiles"] = [
{"Moid": fabric_interconnect_a_switch_profile_moid,
"ObjectType": "fabric.SwitchProfile"}
]
elif self.fabric_interconnect == "B":
print("The attachment will be made to Fabric Interconnect "
"B.")
self.intersight_api_body["Profiles"] = [
{"Moid": fabric_interconnect_b_switch_profile_moid,
"ObjectType": "fabric.SwitchProfile"}
]
else:
print("The attachment will be made to Fabric Interconnects "
"A and B.")
self.intersight_api_body["Profiles"] = [
{"Moid": fabric_interconnect_a_switch_profile_moid,
"ObjectType": "fabric.SwitchProfile"},
{"Moid": fabric_interconnect_b_switch_profile_moid,
"ObjectType": "fabric.SwitchProfile"}
]
def object_maker(self):
"""This function makes the targeted policy object.
"""
print(f"\nConfiguring the {self.object_type} named "
f"{self.policy_name}...")
# Update the API body with general attributes
self._update_api_body_general_attributes()
# Update the API body with individual subobject attributes
self._update_api_body_subobject_attributes()
# Update the API body with individual mapped object attributes
self._update_api_body_mapped_object_attributes()
# Update the API body with a UCS Domain Profile attached, if specified
self._attach_ucs_domain_profile()
# POST the API body to Intersight
self._post_intersight_object()
class VsanPolicy(DirectlyAttachedUcsDomainPolicy):
"""This class is used to configure a VSAN Policy in Intersight.
"""
object_type = "VSAN Policy"
intersight_api_path = "fabric/FcNetworkPolicies"
def __init__(self,
intersight_api_key_id,
intersight_api_key,
policy_name,
policy_description="",
organization="default",
intersight_base_url="https://www.intersight.com/api/v1",
tags=None,
preconfigured_api_client=None,
ucs_domain_profile_name="",
fabric_interconnect="A",
enable_uplink_trunking=False
):
super().__init__(intersight_api_key_id,
intersight_api_key,
policy_name,
policy_description,
organization,
intersight_base_url,
tags,
preconfigured_api_client,
ucs_domain_profile_name,
fabric_interconnect
)
self.enable_uplink_trunking = enable_uplink_trunking
self.intersight_api_body = {
"Name": self.policy_name,
"Description": self.policy_description,
"EnableTrunking": self.enable_uplink_trunking,
}
def __repr__(self):
return (
f"{self.__class__.__name__}"
f"('{self.intersight_api_key_id}', "
f"'{self.intersight_api_key}', "
f"'{self.policy_name}', "
f"'{self.policy_description}', "
f"'{self.organization}', "
f"'{self.intersight_base_url}', "
f"{self.tags}, "
f"{self.api_client}, "
f"'{self.ucs_domain_profile_name}', "
f"'{self.fabric_interconnect}', "
f"{self.enable_uplink_trunking})"
)
class IdConfigurator:
"""This class serves as a base class for configuring the IDs of objects in
Intersight policies. Examples of the objects that would use these IDs are
ports, VLANs, and VSANs.
"""
object_type = "ID Configurator"
attributes_that_require_special_handling = None
def __init__(self,
intersight_api_key_id,
intersight_api_key,
policy_name,
id_list=None,
organization="default",
intersight_base_url="https://www.intersight.com/api/v1",
preconfigured_api_client=None
):
self.intersight_api_key_id = intersight_api_key_id
self.intersight_api_key = intersight_api_key
self.policy_name = policy_name
if id_list is None:
self.id_list = []
else:
self.id_list = id_list
self.organization = organization
self.intersight_base_url = intersight_base_url
if preconfigured_api_client is None:
self.api_client = get_api_client(api_key_id=intersight_api_key_id,
api_secret_file=intersight_api_key,
endpoint=intersight_base_url
)
else:
self.api_client = preconfigured_api_client
def __repr__(self):
return (
f"{self.__class__.__name__}"
f"('{self.intersight_api_key_id}', "
f"'{self.intersight_api_key}', "
f"'{self.policy_name}', "
f"'{self.id_list}', "
f"'{self.organization}', "
f"'{self.intersight_base_url}', "
f"{self.api_client})"
)
def __str__(self):
return f"{self.__class__.__name__} class object for '{self.policy_name}'"
def enumerated_id_range_retriever(self,
id_dictionary,
id_key
):
"""This function captures provided ID ranges for enumeration and
returns the enumerated list.
Args:
id_dictionary (dict):
The dictionary containing the ID or range of IDs to be
enumerated.
id_key (str):
The key name of the ID to be enumerated.
Returns:
A list of enumerated integers for the provided IDs in a format
consumable by the Intersight API.
Raises:
KeyError:
A key error occurred due to an inaccessible dictionary key. In
this instance, the key for the ID type is inaccessible from the
ID dictionary. The key may be missing or misspelled.
"""
# Verify ID range has been configured in provided ID dictionary
try:
provided_id_range = id_dictionary[id_key]
except KeyError:
print("\nA configuration error has occurred!\n")
print(f"During the configuration of the {self.object_type} "
f"settings for the {self.policy_type} named "
f"{self.policy_name}, there was an issue accessing the range "
f"of IDs to be configured.")
print(f"Please verify a key named '{id_key}' exists in the "
f"ID list variable for the {self.object_type} "
"settings.\n")
print("Please review and resolve any error messages, then "
"re-attempt execution.\n")
print("Exception Message: ")
traceback.print_exc()
sys.exit(0)
return integer_number_list_maker(provided_id_range)
def attribute_handler(self,
staged_intersight_api_body
):
"""
This function is used to retrieve the MOIDs for provided attributes
that require them for configuration through the Intersight API.
Args:
staged_intersight_api_body (dict):
The staged Intersight API body dictionary being prepared for
configuration on Intersight.
Raises:
KeyError:
A key error occurred due to an inaccessible dictionary key. In
this instance, the key for the name of a required attribute is
inaccessible from the ID dictionary. The key may be missing or
misspelled.
"""
if self.attributes_that_require_special_handling:
for id_attribute in self.attributes_that_require_special_handling:
# If provided, add default attribute option to staged Intersight API body
if id_attribute.get("DefaultOption"):
id_attribute_default_option = getattr(self,
id_attribute.get("DefaultOption")
)
if id_attribute_default_option:
if id_attribute["Name"] not in staged_intersight_api_body:
staged_intersight_api_body[id_attribute["Name"]] = id_attribute_default_option
# Verify mandatory attributes are present in the staged Intersight API body
if id_attribute.get("Mandatory"):
try:
verify_id_attribute_presence = staged_intersight_api_body[id_attribute["Name"]]
except KeyError:
print("\nA configuration error has occurred!\n")
print("During the configuration of the "
f"{self.object_type} settings for the "
f"{self.policy_type} named {self.policy_name}, "
f"there was an issue accessing the value for "
f"the {id_attribute['Type']}.")
print("Please verify the key named "
f"'{id_attribute['Name']}' exists in the "
f"ID list variable for the {self.object_type} "
"settings.\n")
print("Please review and resolve any error messages, "
f"then re-attempt execution.\n")
print("Exception Message: ")
traceback.print_exc()
sys.exit(0)
# Retrieve MOID if required
if id_attribute.get("MoidRequired"):
id_attribute_moid = intersight_object_moid_retriever(intersight_api_key_id=None,
intersight_api_key=None,
object_name=staged_intersight_api_body[id_attribute["Name"]],
intersight_api_path=id_attribute["IntersightAPIPath"],
object_type=id_attribute["Type"],
organization=self.organization,
preconfigured_api_client=self.api_client
)
staged_intersight_api_body[id_attribute["Name"]] = {"Moid": id_attribute_moid}
else:
# Retrieve MOID if required for optional attributes
if id_attribute.get("MoidRequired"):
if staged_intersight_api_body.get(id_attribute["Name"]):
id_attribute_moid = intersight_object_moid_retriever(intersight_api_key_id=None,
intersight_api_key=None,
object_name=staged_intersight_api_body[id_attribute["Name"]],
intersight_api_path=id_attribute["IntersightAPIPath"],
object_type=id_attribute["Type"],
organization=self.organization,
preconfigured_api_client=self.api_client
)
staged_intersight_api_body[id_attribute["Name"]] = {"Moid": id_attribute_moid}
class Vsan(IdConfigurator):
"""This class serves as a base class for configuring the IDs of VSANs in
Intersight policies.
"""
object_type = "VSAN"
id_type = "Vsan"
intersight_api_path = "fabric/Vsans"
policy_type = "VSAN Policy"
policy_intersight_api_path = "fabric/FcNetworkPolicies"
attributes_that_require_special_handling = [
{"Name": "Name",
"Type": "VSAN Name Prefix",
"IntersightAPIPath": None,
"Mandatory": True,
"MoidRequired": False,
"DefaultOption": None
}
]
def __init__(self,
intersight_api_key_id,
intersight_api_key,
policy_name,
id_list=None,
organization="default",
intersight_base_url="https://www.intersight.com/api/v1",
preconfigured_api_client=None
):
super().__init__(intersight_api_key_id,
intersight_api_key,
policy_name,
id_list,
organization,
intersight_base_url,
preconfigured_api_client
)
def __repr__(self):
return (
f"{self.__class__.__name__}"
f"('{self.intersight_api_key_id}', "
f"'{self.intersight_api_key}', "
f"'{self.policy_name}', "
f"'{self.id_list}', "
f"'{self.organization}', "
f"'{self.intersight_base_url}', "
f"{self.api_client})"
)
def object_maker(self):
"""This function applies the provided id list configuration to the
targeted policy.
"""
def post_intersight_vsan(vsan_name,
body,
moid=None
):
"""This is a function to configure an Intersight object by
performing a POST through the Intersight API.
Args:
vsan_name (str):
The name of the VSAN to be posted on Intersight.
body (dict):
The body of the object to be posted on Intersight.
moid (str):
Optional; The Intersight MOID of the object to be posted
on Intersight. The default value is None.
Returns:
A string with a statement indicating whether the POST method
was successful or failed.
Raises:
Exception:
An exception occurred while performing the API call.
The status code or error message will be specified.
"""
if moid:
full_intersight_api_path = f"/{self.intersight_api_path}/{moid}"
else:
full_intersight_api_path = f"/{self.intersight_api_path}"
try:
self.api_client.call_api(resource_path=full_intersight_api_path,
method="POST",
body=body,
auth_settings=['cookieAuth', 'http_signature', 'oAuth2', 'oAuth2']
)
if moid:
print(f"The configuration of {self.object_type} "
f"{vsan_name} has been updated.")
else:
print(f"The configuration of {self.object_type} "
f"{vsan_name} has completed.")
return "The POST method was successful."
except Exception:
print("\nA configuration error has occurred!\n")
if moid:
print(f"Unable to update {self.object_type} "
f"{vsan_name} under the "
"Intersight API resource path "
f"'{full_intersight_api_path}'.\n")
else:
print(f"Unable to configure {self.object_type} "
f"{vsan_name} under the Intersight "
f"API resource path '{full_intersight_api_path}'.\n")
print("Exception Message: ")
traceback.print_exc()
return "The POST method failed."
if self.id_list:
vsans_id_key = f"{self.id_type}s"
fcoe_vlans_id_key = "FcoeVlans"
for id_dictionary in self.id_list:
# Enumerate provided VSAN range
vsans_enumerated_id_range = self.enumerated_id_range_retriever(id_dictionary,
vsans_id_key
)
# Enumerate provided FCoE VLAN range
fcoe_vlans_enumerated_id_range = self.enumerated_id_range_retriever(id_dictionary,
fcoe_vlans_id_key
)
# Match and pair enumerated VSAN and FCoE VLAN ranges
vsan_id_and_fcoe_vlan_id_matched_pair_list = list(zip(vsans_enumerated_id_range,
fcoe_vlans_enumerated_id_range
))
staged_intersight_api_body = copy.deepcopy(id_dictionary)
staged_intersight_api_body.pop(vsans_id_key)
staged_intersight_api_body.pop(fcoe_vlans_id_key)
# Retrieving the VSAN Policy MOID
policy_moid = intersight_object_moid_retriever(intersight_api_key_id=None,
intersight_api_key=None,
object_name=self.policy_name,
intersight_api_path=self.policy_intersight_api_path,
object_type=self.policy_type,
organization=self.organization,
intersight_base_url=self.intersight_base_url,
preconfigured_api_client=self.api_client
)
staged_intersight_api_body["FcNetworkPolicy"] = {"Moid": policy_moid}
# Retrieve MOIDs for ID attributes that require MOIDs
self.attribute_handler(staged_intersight_api_body)
current_vsan_id_name_prefix = id_dictionary.get("Name")
for current_vsan_id, current_fcoe_vlan_id in vsan_id_and_fcoe_vlan_id_matched_pair_list:
staged_intersight_api_body[f"{self.id_type}Id"] = current_vsan_id
current_vsan_id_full_name = f"{current_vsan_id_name_prefix}_{current_vsan_id}"
staged_intersight_api_body["Name"] = current_vsan_id_full_name
staged_intersight_api_body["FcoeVlan"] = current_fcoe_vlan_id
post_intersight_vsan(
current_vsan_id_full_name,
staged_intersight_api_body
)
def vsan_policy_maker(intersight_api_key_id,
intersight_api_key,
policy_name,
enable_uplink_trunking=False,
vsan_list=None,
policy_description="",
organization="default",
intersight_base_url="https://www.intersight.com/api/v1",
tags=None,
preconfigured_api_client=None,
ucs_domain_profile_name="",
fabric_interconnect="A"
):
"""This is a function used to make a VSAN Policy on Cisco Intersight.
Args:
intersight_api_key_id (str):
The ID of the Intersight API key.
intersight_api_key (str):
The system file path of the Intersight API key.
policy_name (str):
The name of the policy to be created.
enable_uplink_trunking (bool):
Optional; The setting to enable or disable trunking on all
configured FC uplink ports. The default value is False (disabled).
vsan_list (list):
Optional; The list of dictionary entries for VSANs to
be configured. The default value is None. An
example entry is:
[{"Vsans": "500",
"Name": "ESXi_FC_Storage",
"FcoeVlans": "500",
"DefaultZoning": "Disabled"
},
{"Vsans": "501-510,550,570",
"Name": "Hyper-V_FC_Storage",
"FcoeVlans": "501-510,550,570",
"DefaultZoning": "Disabled"
},
{"Vsans": "575-585",
"Name": "Xen_FC_Storage",
"FcoeVlans": "575-585",
"DefaultZoning": "Enabled"},].
A "Vsans" key with a string value must be configured in each
dictionary of the list to specify the VSAN ID or range of VSAN IDs
to be created. A "Name" key with a string value must also be
configured to specify the VSAN name prefix. A "FcoeVlans" key
must also be configured with a string value to specify the
FCoE VLAN ID or range of FCoE VLAN IDs to be created and matched
with the VSANs previously specified in the Vsans key. Optional key
settings such as "DefaultZoning" and "FcZoneSharingMode" can also
be configured in each list dictionary, if the associated system
default values are not desired. Multiple dictionaries can be placed
in the list to configure different settings for different ranges of
VSANs.
policy_description (str):
Optional; The description of the policy to be created. The default
value is an empty string ("").
organization (str):
Optional; The Intersight account organization of the policy.
The default value is "default".
intersight_base_url (str):
Optional; The base URL for Intersight API paths. The default value
is "https://www.intersight.com/api/v1". This value typically only
needs to be changed if using the Intersight Virtual Appliance.
tags (dict):
Optional; The Intersight account tags that will be assigned to the
policy. The default value is None.
preconfigured_api_client ("ApiClient"):
Optional; An ApiClient class instance which handles
Intersight client-server communication through the use of API keys.
The default value is None. If a preconfigured_api_client argument
is provided, empty strings ("") or None can be provided for the
intersight_api_key_id, intersight_api_key, and intersight_base_url
arguments.
ucs_domain_profile_name (str):
Optional; The UCS Domain Profile the policy should be attached to.
The default value is an empty string ("").
fabric_interconnect (str):
Optional; The Fabric Interconnect in the UCS Domain Profile that
the policy should be attached to. The default value is "A".
Accepted values are "A" for only Fabric Interconnect A, "B" for
only Fabric Interconnect B, and "AB" for both Fabric Interconnects A
and B. For VSAN configuration, typically each Fabric Interconnect
would have separate VSAN Policies to support different VSAN numbers
for each storage fabric.
"""
def builder(target_object):
"""This is a function used to build the objects that are components of
an overarching pool or policy on Cisco Intersight.
Args:
target_object (class):
The class representing the object to be built on Intersight.
Raises:
Exception:
An exception occurred due to an issue accessing the Intersight
API path. The status code or error message will be specified.
"""
try:
target_object.object_maker()
except Exception:
print("\nA configuration error has occurred!\n")
print("The builder function failed to configure the "
f"{target_object.object_type} settings.")
print("Please check the provided arguments for the "
f"{target_object.object_type} settings.\n")
print("Exception Message: ")
traceback.print_exc()
# Define and create VSAN Policy object in Intersight
builder(VsanPolicy(intersight_api_key_id=intersight_api_key_id,
intersight_api_key=intersight_api_key,
policy_name=policy_name,
policy_description=policy_description,
organization=organization,
intersight_base_url=intersight_base_url,
tags=tags,
preconfigured_api_client=preconfigured_api_client,
ucs_domain_profile_name=ucs_domain_profile_name,
fabric_interconnect=fabric_interconnect,
enable_uplink_trunking=enable_uplink_trunking
))
# Define and create VSAN objects in Intersight
builder(Vsan(intersight_api_key_id=intersight_api_key_id,
intersight_api_key=intersight_api_key,
policy_name=policy_name,
id_list=vsan_list,
organization=organization,
intersight_base_url=intersight_base_url,
preconfigured_api_client=preconfigured_api_client
))
def main():
# Establish Maker specific variables
maker_type = "Intersight VSAN Policy Maker"
# Establish Intersight SDK for Python API client instance
main_intersight_api_client = get_api_client(api_key_id=key_id,
api_secret_file=key,
endpoint=intersight_base_url
)
# Starting the Policy Maker for Cisco Intersight
print(f"\nStarting the {maker_type} for Cisco Intersight.\n")
# Run the Intersight API and Account Availability Test
print("Running the Intersight API and Account Availability Test.")
test_intersight_api_service(
intersight_api_key_id=None,
intersight_api_key=None,
preconfigured_api_client=main_intersight_api_client
)
# Create the Policy in Intersight
vsan_policy_maker(
intersight_api_key_id=None,
intersight_api_key=None,
policy_name=vsan_policy_name,
enable_uplink_trunking=enable_uplink_trunking,
vsan_list=vsan_list,
policy_description=vsan_policy_description,
organization=vsan_policy_organization,
intersight_base_url=intersight_base_url,
tags=vsan_policy_tags,
preconfigured_api_client=main_intersight_api_client,
ucs_domain_profile_name=ucs_domain_profile_name,
fabric_interconnect=vsan_policy_fabric_interconnect
)
# Policy Maker completion
print(f"\nThe {maker_type} has completed.\n")
if __name__ == "__main__":
main()
# Exiting the Policy Maker for Cisco Intersight
sys.exit(0)
|
import uuid
import traceback
from flask import redirect
from lyrebird import application
from lyrebird.version import VERSION
from flask_restful import Resource, request
from urllib.parse import urlencode
from lyrebird.mock import context
from lyrebird.log import get_logger
logger = get_logger()
class SanpshotImport(Resource):
def get(self):
queries = request.args
url = queries.get('path')
application.snapshot_import_uri = url
application.active_menu = {
'name': 'datamanager',
'title': 'DataManager',
'type': 'router',
'path': '/datamanager'
}
if not url:
return redirect(f"/ui/?v={VERSION}#/datamanager/import")
# is advanced save
if queries.get('isAdvancedSave') == 'true':
return redirect(f"/ui/?v={VERSION}#/datamanager/import")
new_query = {}
# auto import into parent
try:
info = context.application.data_manager.decompress_snapshot()
except Exception:
new_query['errorMsg'] = f'Import snapshot error!'
new_query_str = urlencode(new_query)
logger.error(f'Import snapshot error!\n {traceback.format_exc()}')
return redirect(f"http://localhost:9090/ui/?v={VERSION}#/datamanager?{new_query_str}")
tmp_snapshot_file_list = [
info['snapshot_storage_path'],
f'{info['snapshot_storage_path']}.lb'
]
context.application.data_manager.remove_tmp_snapshot_file(tmp_snapshot_file_list)
group_name = info['snapshot_detail']['name']
parent_path = queries.get('parent') or '/'
parent_id = context.application.data_manager.add_group_by_path(parent_path)
try:
group_id = context.application.data_manager.import_snapshot(parent_id, group_name)
new_query['groupId'] = group_id
except Exception:
new_query['errorMsg'] = f'Import snapshot error: Snapshot {group_name} is broken!'
new_query_str = urlencode(new_query)
logger.error(f'Import snapshot error!\n {traceback.format_exc()}')
return redirect(f"http://localhost:9090/ui/?v={VERSION}#/datamanager?{new_query_str}")
# auto active
if queries.get('isAutoActive') == 'true':
context.application.data_manager.deactivate()
context.application.data_manager.activate(group_id)
display_info = queries.get('displayKey', '')
if display_info:
new_query['displayKey'] = display_info
new_query_str = urlencode(new_query)
return redirect(f"/ui/?v={VERSION}#/datamanager?{new_query_str}")
def post(self):
if request.json:
parent_id = request.json.get('parentId')
name = request.json.get('snapshotName', '')
if not parent_id:
return application.make_fail_response('parent_id is required!')
context.application.data_manager.import_snapshot(parent_id, name)
elif request.files:
parent_id = request.form.get('parent_id') if request.form else ''
if not parent_id:
return application.make_fail_response('parent_id is required!')
stream = request.files['file']
if not stream:
return application.make_fail_response('file is required!')
filename = stream.filename or str(uuid.uuid4())
path = application._cm.ROOT / 'snapshot' / filename
if path.suffix != '.lb':
return application.make_fail_response(f'Unknown file type `.{path.suffix}`, `.lb` is required!')
stream.save(str(path))
application.snapshot_import_uri = f'file://{str(path)}'
name = path.stem
context.application.data_manager.import_snapshot(parent_id, name, path=path)
return application.make_ok_response()
class SnapShotImportDetail(Resource):
def get(self):
info = context.application.data_manager.decompress_snapshot()
tmp_snapshot_file_list = [
info['snapshot_storage_path'],
f'{info['snapshot_storage_path']}.lb'
]
context.application.data_manager.remove_tmp_snapshot_file(tmp_snapshot_file_list)
detail = info['snapshot_detail']
detail.pop('children')
return application.make_ok_response(data=detail)
| import uuid
import traceback
from flask import redirect
from lyrebird import application
from lyrebird.version import VERSION
from flask_restful import Resource, request
from urllib.parse import urlencode
from lyrebird.mock import context
from lyrebird.log import get_logger
logger = get_logger()
class SanpshotImport(Resource):
def get(self):
queries = request.args
url = queries.get('path')
application.snapshot_import_uri = url
application.active_menu = {
'name': 'datamanager',
'title': 'DataManager',
'type': 'router',
'path': '/datamanager'
}
if not url:
return redirect(f"/ui/?v={VERSION}#/datamanager/import")
# is advanced save
if queries.get('isAdvancedSave') == 'true':
return redirect(f"/ui/?v={VERSION}#/datamanager/import")
new_query = {}
# auto import into parent
try:
info = context.application.data_manager.decompress_snapshot()
except Exception:
new_query['errorMsg'] = f'Import snapshot error!'
new_query_str = urlencode(new_query)
logger.error(f'Import snapshot error!\n {traceback.format_exc()}')
return redirect(f"http://localhost:9090/ui/?v={VERSION}#/datamanager?{new_query_str}")
tmp_snapshot_file_list = [
info['snapshot_storage_path'],
f'{info["snapshot_storage_path"]}.lb'
]
context.application.data_manager.remove_tmp_snapshot_file(tmp_snapshot_file_list)
group_name = info['snapshot_detail']['name']
parent_path = queries.get('parent') or '/'
parent_id = context.application.data_manager.add_group_by_path(parent_path)
try:
group_id = context.application.data_manager.import_snapshot(parent_id, group_name)
new_query['groupId'] = group_id
except Exception:
new_query['errorMsg'] = f'Import snapshot error: Snapshot {group_name} is broken!'
new_query_str = urlencode(new_query)
logger.error(f'Import snapshot error!\n {traceback.format_exc()}')
return redirect(f"http://localhost:9090/ui/?v={VERSION}#/datamanager?{new_query_str}")
# auto active
if queries.get('isAutoActive') == 'true':
context.application.data_manager.deactivate()
context.application.data_manager.activate(group_id)
display_info = queries.get('displayKey', '')
if display_info:
new_query['displayKey'] = display_info
new_query_str = urlencode(new_query)
return redirect(f"/ui/?v={VERSION}#/datamanager?{new_query_str}")
def post(self):
if request.json:
parent_id = request.json.get('parentId')
name = request.json.get('snapshotName', '')
if not parent_id:
return application.make_fail_response('parent_id is required!')
context.application.data_manager.import_snapshot(parent_id, name)
elif request.files:
parent_id = request.form.get('parent_id') if request.form else ''
if not parent_id:
return application.make_fail_response('parent_id is required!')
stream = request.files['file']
if not stream:
return application.make_fail_response('file is required!')
filename = stream.filename or str(uuid.uuid4())
path = application._cm.ROOT / 'snapshot' / filename
if path.suffix != '.lb':
return application.make_fail_response(f'Unknown file type `.{path.suffix}`, `.lb` is required!')
stream.save(str(path))
application.snapshot_import_uri = f'file://{str(path)}'
name = path.stem
context.application.data_manager.import_snapshot(parent_id, name, path=path)
return application.make_ok_response()
class SnapShotImportDetail(Resource):
def get(self):
info = context.application.data_manager.decompress_snapshot()
tmp_snapshot_file_list = [
info['snapshot_storage_path'],
f'{info["snapshot_storage_path"]}.lb'
]
context.application.data_manager.remove_tmp_snapshot_file(tmp_snapshot_file_list)
detail = info['snapshot_detail']
detail.pop('children')
return application.make_ok_response(data=detail)
|
import discord, csv, json, random, re
from discord.ext import commands
from .. import converters, embeds, services, utils, views
from ..artifacts import Source
class HeraldryMisc(utils.MeldedCog, name = "General", category = "Heraldry"):
MOTTO_PARTS = re.compile("([&|!]\\w\\w\\w)")
RAND_SUB = re.compile("\n|\t")
def __init__(self, bot):
self.bot = bot
@commands.command(
help = "Displays a random historical heraldic artifact.\n"
"This can be narrowed down to an individual source:\n\n"
f"{Source.str_list()}",
aliases = ("ar", "relic")
)
@utils.trigger_typing
async def artifact(self, ctx, source = "all"):
if source == "all":
museum = Source.random()
elif source in Source.register:
museum = Source.register[source]
else:
raise utils.CustomCommandError(
"Invalid artifact source",
"Check your spelling and try again."
)
result = await museum.retrieve(ctx.bot)
title = discord.utils.escape_markdown(result[1])
embed = embeds.SEARCH_RESULT.create(title, result[2], heading = "Random artifact")
embed.url = result[0]
embed.set_footer(text = f"{result[4]} via {museum.desc}" if result[4] else museum.desc)
if result[3]: embed.set_image(url = result[3])
await ctx.send(embed = embed)
@commands.command(
name = "catalog",
help = "Looks up a term in DrawShield's repository of charges.\nCode © Karl Wilcox",
aliases = ("charge", "ca")
)
@utils.trigger_typing
async def ds_catalog(self, ctx, *, charge):
catalog = await services.ds_catalog(self.bot.session, charge)
if catalog == None: raise utils.CustomCommandError(
"Invalid catalog item",
"Check your spelling and try again."
)
embed = embeds.SEARCH_RESULT.create(
f"Catalog entry for \"{charge}\"",
catalog[1] if len(catalog) > 1 else "",
heading = "DrawShield catalog"
)
embed.set_image(url = catalog[0])
embed.set_footer(text=f"Retrieved using DrawShield; © Karl Wilcox. ")
await ctx.send(embed=embed)
@commands.command(
name = "challenge",
help = "Displays a random image using the DrawShield API.\nDesigned to serve as an"
" emblazonment challenge using DrawShield. Code © Karl Wilcox; images © coadb,"
" The Book of Public Arms, Wikimedia Commons contributors (individual sources"
" can be selected via *coadb*, *public*, and *wikimedia* respectively).",
aliases = ("ch", "cl")
)
@utils.trigger_typing
async def ds_challenge(self, ctx, source = "all"):
url = await utils.get_json(self.bot.session, f"https://drawshield.net/api/challenge/{source}")
if isinstance(url, dict) and "error" in url:
raise utils.CustomCommandError(
"Invalid challenge category",
f"Type `{ctx.clean_prefix}help challenge` to see the available categories."
)
embed = embeds.GENERIC.create("", "Try emblazoning this using DrawShield!", heading = "Random image")
embed.url = url
embed.set_footer(text = "Retrieved using DrawShield; © Karl Wilcox. ")
if url.startswith("https://commons.wikimedia.org"):
result = await services.commons(
self.bot.session, self.bot.loop, url.removeprefix("https://commons.wikimedia.org//wiki/")
)
embed.set_image(url = result.find("urls").find("thumbnail").text)
else: embed.set_image(url = url)
await ctx.send(embed = embed)
@commands.command(
help = "Illustrates arms using DrawShield.\nNote that DrawShield does not support"
" all possible blazons. Code © Karl Wilcox",
aliases = ("ds",)
)
@utils.trigger_typing
async def drawshield(self, ctx, *, blazon : str):
embed, file = await services.ds(self.bot.session, blazon, "Shield")
await ctx.send(embed = embed, file = file)
@commands.command(
help = "Generates a coat of arms based on personal details.\n If using in a DM, it is based"
" on your name and birthday; for privacy reasons, it is random otherwise. Based on a"
" chart by Snak and James.",
aliases = ("gen", "g")
)
async def generate(self, ctx):
with open("data/generator.json") as file:
parts = json.load(file)
results = {}
tinctures = ("colour", "metal")
result_tinctures = ("field", "background", "foreground")
if isinstance(ctx.channel, discord.abc.GuildChannel):
for category in parts.keys():
if category in ("colour", "metal", "fur"): continue
results[category] = random.choice(list(parts[category].values()))
if bool(random.getrandbits(1)):
tinctures = tinctures[::-1]
for i, result in enumerate(result_tinctures):
tincture = tinctures[0] if i % 2 else tinctures[1]
if tincture == "colour" and random.randrange(10) == 5: tincture = "fur"
results[result] = random.choice(list(parts[tincture].values()))
else:
def get_letter_val(letter, category):
for letters, value in category.items():
if letter.upper() in letters: return value
raise utils.BadMessageResponse("Invalid value")
added_check = lambda m: m.content in parts["charge"].keys()
message = await views.RespondOrReact(ctx, added_check = added_check).run(
"This command generates a blazon from a few details.\n"
"To start with, give me a short name of a **day**, then a **month**, like 8 Apr."
)
results["charge"] = parts["charge"][message.content]
await ctx.send("Okay. Now tell me the **first letter** of a **first name**.")
message = await utils.hard_check(ctx, lambda m: len(m.content) == 1 and m.content.isalpha())
results["ordinary"] = get_letter_val(message.content, parts["ordinary"])
await ctx.send("Great. Now tell me the **amount** of letters in that person's **last name**.")
message = await utils.hard_check(ctx, lambda m: m.content.isnumeric())
if int(message.content) % 2 == 0:
tinctures = tinctures[::-1]
await ctx.send("Thanks! Now, give me the **first three letters** of that **last name**.")
message = await utils.hard_check(ctx, lambda m: len(m.content) == 3 and m.content.isalpha())
letters = message.content
await ctx.send("And finally, give me the **last two letters** of the **first name**.")
message = await utils.hard_check(ctx, lambda m: len(m.content) == 2 and m.content.isalpha())
letters += message.content
pos = -1
for i, result in enumerate(result_tinctures):
pos = 4 if i == 2 else pos + 1
tincture = tinctures[0] if i % 2 else tinctures[1]
if tincture == "colour":
adjacent = pos - 1 if pos == 4 else pos + 1
if letters[adjacent] == letters[pos]:
tincture = "fur"
pos = adjacent
results[result] = get_letter_val(letters[pos], parts[tincture])
embed = embeds.GENERIC.create("", "", heading = "Generated blazon")
embed.set_footer(text = "Generator based on a chart by Snak and James.")
embed.title = f"*{results["field"].capitalize()}, on {utils.pronounise(results["ordinary"])}"\
f" {results["background"]} {utils.pronounise(results["charge"].lower())}"\
f" {results["foreground"]}*"
await ctx.send(embed = embed)
@commands.command(
help = "Generates a motto randomly.\nThe included functionality has several"
" advancements over previous motto generators.",
aliases = ("mt", "mot")
)
@utils.trigger_typing
async def motto(self, ctx):
with open("data/mottoparts.json") as file:
parts = json.load(file)
percent = random.randrange(1,100)
partlist = parts["templates"]["uni"] #1-20%
if percent > 20 and percent < 51:
partlist = parts["templates"]["nou"]
elif percent > 50 and percent < 71:
partlist = parts["templates"]["adj"]
elif percent > 70:
partlist = parts["templates"]["ver"]
parts["uni_resolve"] = random.choice(["nou","adj"])
parts["last_key"] = ""
def chooseTerm(match):
term_kind = match[0]
if "uni" in term_kind:
term_kind = term_kind[:1] + parts["uni_resolve"]
term_list = parts["terms"][term_kind]
if (parts["last_key"] != "1" and bool(random.getrandbits(1))
and parts["last_key"] in term_list):
#1 in 2 chance of choosing related terms for a non-initial item
result = parts["last_key"]
else:
result = random.choice(list(term_list.keys()))
parts["last_key"] = result
return term_list.pop(result)
template = random.choice(partlist)
motto = re.sub(self.MOTTO_PARTS, chooseTerm, template).capitalize()
await ctx.send(embed = embeds.GENERIC.create(f"{motto}", "", heading = "Motto generator"))
@commands.command(
help = "Randomly selects a motto from a list of over 400.\n"
"These include countries, heads of state, and universities",
aliases = ("rmot", "rm")
)
async def randmotto(self, ctx):
with open("data/mottoes.csv") as file:
row = random.choice(list(csv.reader(file, delimiter=";")))
embed = embeds.SEARCH_RESULT.create(
f"{row[1]}",
f"**{row[0]}**",
heading = "Random motto"
)
if row[2].strip(" ") != "English":
embed.description += f"\n*{row[3].strip(" ")}* ({row[2].strip(" ")})"
await ctx.send(embed=embed)
@commands.command(
name = "random",
help = "Generates random arms using the DrawShield API.\nCode © Karl Wilcox.",
aliases = ("ra",)
)
@utils.trigger_typing
async def ds_random(self, ctx):
blazon = await utils.get_text(self.bot.session, "https://drawshield.net/include/randomblazon.php")
blazon = re.sub(self.RAND_SUB, " ", blazon.removesuffix("created by Drawshield.net/random\n")).strip()
embed, file = await services.ds(self.bot.session, blazon, "Random shield")
await ctx.send(embed = embed, file = file)
def setup(bot):
bot.add_cog(HeraldryMisc(bot)) | import discord, csv, json, random, re
from discord.ext import commands
from .. import converters, embeds, services, utils, views
from ..artifacts import Source
class HeraldryMisc(utils.MeldedCog, name = "General", category = "Heraldry"):
MOTTO_PARTS = re.compile("([&|!]\\w\\w\\w)")
RAND_SUB = re.compile("\n|\t")
def __init__(self, bot):
self.bot = bot
@commands.command(
help = "Displays a random historical heraldic artifact.\n"
"This can be narrowed down to an individual source:\n\n"
f"{Source.str_list()}",
aliases = ("ar", "relic")
)
@utils.trigger_typing
async def artifact(self, ctx, source = "all"):
if source == "all":
museum = Source.random()
elif source in Source.register:
museum = Source.register[source]
else:
raise utils.CustomCommandError(
"Invalid artifact source",
"Check your spelling and try again."
)
result = await museum.retrieve(ctx.bot)
title = discord.utils.escape_markdown(result[1])
embed = embeds.SEARCH_RESULT.create(title, result[2], heading = "Random artifact")
embed.url = result[0]
embed.set_footer(text = f"{result[4]} via {museum.desc}" if result[4] else museum.desc)
if result[3]: embed.set_image(url = result[3])
await ctx.send(embed = embed)
@commands.command(
name = "catalog",
help = "Looks up a term in DrawShield's repository of charges.\nCode © Karl Wilcox",
aliases = ("charge", "ca")
)
@utils.trigger_typing
async def ds_catalog(self, ctx, *, charge):
catalog = await services.ds_catalog(self.bot.session, charge)
if catalog == None: raise utils.CustomCommandError(
"Invalid catalog item",
"Check your spelling and try again."
)
embed = embeds.SEARCH_RESULT.create(
f"Catalog entry for \"{charge}\"",
catalog[1] if len(catalog) > 1 else "",
heading = "DrawShield catalog"
)
embed.set_image(url = catalog[0])
embed.set_footer(text=f"Retrieved using DrawShield; © Karl Wilcox. ")
await ctx.send(embed=embed)
@commands.command(
name = "challenge",
help = "Displays a random image using the DrawShield API.\nDesigned to serve as an"
" emblazonment challenge using DrawShield. Code © Karl Wilcox; images © coadb,"
" The Book of Public Arms, Wikimedia Commons contributors (individual sources"
" can be selected via *coadb*, *public*, and *wikimedia* respectively).",
aliases = ("ch", "cl")
)
@utils.trigger_typing
async def ds_challenge(self, ctx, source = "all"):
url = await utils.get_json(self.bot.session, f"https://drawshield.net/api/challenge/{source}")
if isinstance(url, dict) and "error" in url:
raise utils.CustomCommandError(
"Invalid challenge category",
f"Type `{ctx.clean_prefix}help challenge` to see the available categories."
)
embed = embeds.GENERIC.create("", "Try emblazoning this using DrawShield!", heading = "Random image")
embed.url = url
embed.set_footer(text = "Retrieved using DrawShield; © Karl Wilcox. ")
if url.startswith("https://commons.wikimedia.org"):
result = await services.commons(
self.bot.session, self.bot.loop, url.removeprefix("https://commons.wikimedia.org//wiki/")
)
embed.set_image(url = result.find("urls").find("thumbnail").text)
else: embed.set_image(url = url)
await ctx.send(embed = embed)
@commands.command(
help = "Illustrates arms using DrawShield.\nNote that DrawShield does not support"
" all possible blazons. Code © Karl Wilcox",
aliases = ("ds",)
)
@utils.trigger_typing
async def drawshield(self, ctx, *, blazon : str):
embed, file = await services.ds(self.bot.session, blazon, "Shield")
await ctx.send(embed = embed, file = file)
@commands.command(
help = "Generates a coat of arms based on personal details.\n If using in a DM, it is based"
" on your name and birthday; for privacy reasons, it is random otherwise. Based on a"
" chart by Snak and James.",
aliases = ("gen", "g")
)
async def generate(self, ctx):
with open("data/generator.json") as file:
parts = json.load(file)
results = {}
tinctures = ("colour", "metal")
result_tinctures = ("field", "background", "foreground")
if isinstance(ctx.channel, discord.abc.GuildChannel):
for category in parts.keys():
if category in ("colour", "metal", "fur"): continue
results[category] = random.choice(list(parts[category].values()))
if bool(random.getrandbits(1)):
tinctures = tinctures[::-1]
for i, result in enumerate(result_tinctures):
tincture = tinctures[0] if i % 2 else tinctures[1]
if tincture == "colour" and random.randrange(10) == 5: tincture = "fur"
results[result] = random.choice(list(parts[tincture].values()))
else:
def get_letter_val(letter, category):
for letters, value in category.items():
if letter.upper() in letters: return value
raise utils.BadMessageResponse("Invalid value")
added_check = lambda m: m.content in parts["charge"].keys()
message = await views.RespondOrReact(ctx, added_check = added_check).run(
"This command generates a blazon from a few details.\n"
"To start with, give me a short name of a **day**, then a **month**, like 8 Apr."
)
results["charge"] = parts["charge"][message.content]
await ctx.send("Okay. Now tell me the **first letter** of a **first name**.")
message = await utils.hard_check(ctx, lambda m: len(m.content) == 1 and m.content.isalpha())
results["ordinary"] = get_letter_val(message.content, parts["ordinary"])
await ctx.send("Great. Now tell me the **amount** of letters in that person's **last name**.")
message = await utils.hard_check(ctx, lambda m: m.content.isnumeric())
if int(message.content) % 2 == 0:
tinctures = tinctures[::-1]
await ctx.send("Thanks! Now, give me the **first three letters** of that **last name**.")
message = await utils.hard_check(ctx, lambda m: len(m.content) == 3 and m.content.isalpha())
letters = message.content
await ctx.send("And finally, give me the **last two letters** of the **first name**.")
message = await utils.hard_check(ctx, lambda m: len(m.content) == 2 and m.content.isalpha())
letters += message.content
pos = -1
for i, result in enumerate(result_tinctures):
pos = 4 if i == 2 else pos + 1
tincture = tinctures[0] if i % 2 else tinctures[1]
if tincture == "colour":
adjacent = pos - 1 if pos == 4 else pos + 1
if letters[adjacent] == letters[pos]:
tincture = "fur"
pos = adjacent
results[result] = get_letter_val(letters[pos], parts[tincture])
embed = embeds.GENERIC.create("", "", heading = "Generated blazon")
embed.set_footer(text = "Generator based on a chart by Snak and James.")
embed.title = f"*{results['field'].capitalize()}, on {utils.pronounise(results['ordinary'])}"\
f" {results['background']} {utils.pronounise(results['charge'].lower())}"\
f" {results['foreground']}*"
await ctx.send(embed = embed)
@commands.command(
help = "Generates a motto randomly.\nThe included functionality has several"
" advancements over previous motto generators.",
aliases = ("mt", "mot")
)
@utils.trigger_typing
async def motto(self, ctx):
with open("data/mottoparts.json") as file:
parts = json.load(file)
percent = random.randrange(1,100)
partlist = parts["templates"]["uni"] #1-20%
if percent > 20 and percent < 51:
partlist = parts["templates"]["nou"]
elif percent > 50 and percent < 71:
partlist = parts["templates"]["adj"]
elif percent > 70:
partlist = parts["templates"]["ver"]
parts["uni_resolve"] = random.choice(["nou","adj"])
parts["last_key"] = ""
def chooseTerm(match):
term_kind = match[0]
if "uni" in term_kind:
term_kind = term_kind[:1] + parts["uni_resolve"]
term_list = parts["terms"][term_kind]
if (parts["last_key"] != "1" and bool(random.getrandbits(1))
and parts["last_key"] in term_list):
#1 in 2 chance of choosing related terms for a non-initial item
result = parts["last_key"]
else:
result = random.choice(list(term_list.keys()))
parts["last_key"] = result
return term_list.pop(result)
template = random.choice(partlist)
motto = re.sub(self.MOTTO_PARTS, chooseTerm, template).capitalize()
await ctx.send(embed = embeds.GENERIC.create(f"{motto}", "", heading = "Motto generator"))
@commands.command(
help = "Randomly selects a motto from a list of over 400.\n"
"These include countries, heads of state, and universities",
aliases = ("rmot", "rm")
)
async def randmotto(self, ctx):
with open("data/mottoes.csv") as file:
row = random.choice(list(csv.reader(file, delimiter=";")))
embed = embeds.SEARCH_RESULT.create(
f"{row[1]}",
f"**{row[0]}**",
heading = "Random motto"
)
if row[2].strip(" ") != "English":
embed.description += f"\n*{row[3].strip(' ')}* ({row[2].strip(' ')})"
await ctx.send(embed=embed)
@commands.command(
name = "random",
help = "Generates random arms using the DrawShield API.\nCode © Karl Wilcox.",
aliases = ("ra",)
)
@utils.trigger_typing
async def ds_random(self, ctx):
blazon = await utils.get_text(self.bot.session, "https://drawshield.net/include/randomblazon.php")
blazon = re.sub(self.RAND_SUB, " ", blazon.removesuffix("created by Drawshield.net/random\n")).strip()
embed, file = await services.ds(self.bot.session, blazon, "Random shield")
await ctx.send(embed = embed, file = file)
def setup(bot):
bot.add_cog(HeraldryMisc(bot)) |
import sys
import shlex
import logging
import threading
import urwid
import logzero
from logzero import logger
from typing import Any, Iterable, Optional, Dict, TYPE_CHECKING
from .model import Model
from .view import View
from ..extra.player import PlayerEvent, BasePlayer
from ..extra.utils import load_playlist, sec2ts, ts2sec, shorten_msg
if TYPE_CHECKING:
from ..extra.plugins import Video, Playlist # noqa: F401
HELP_TEXT = '''
__ __ _ _
\ \ / / _ __| (_) __ _
\ \ / / | | |/ _` | |/ _` |
\ V /| |_| | (_| | | (_| |
\_/ \__, |\__,_|_|\__,_|
|___/
Help Page
=========
Summon the internal commandline by typing `:`.
Press `[TAB]` for autocomplete.
The following commands are supported (in the correct context):
* Playlist View:
* `add <playlist id>`: add given playlist
* `delete`: delete currently selected playlist
* `quit`: quit Vydia (`[q]`)
* Episode View:
* `pause`: toggle pause in running episode (`<space>`)
* `info`: show video-related information (`i`)
* `reload`: reload playlist using plugin
* `reverse`: reverse episode order
* `shuffle`: shuffle episode order
* `next`: play next video (`[>]`)
* `previous`: play previous video (`[<]`)
* `continue`: continue playback from last save (`[c]`)
* `quit`: quit Vydia (`[q]`)
Furthermore, the following shortcuts exist:
* Episode View:
* `w`: (un)mark currently selected video as watched
Also, try executing `vydia --help`.
'''
class Controller:
def __init__(
self,
player_backend: BasePlayer, config: Dict[str, Any]
) -> None:
self.config = config
self.player_backend = player_backend
self.player_backend.set_controller(self)
self.current_playlist = None # type: Optional[str]
self.input_callback = None
self.player = None # type: Optional[PlayerQueue]
self.model = Model()
self.view = View(self)
self.loop = urwid.MainLoop(
self.view, unhandled_input=self._unhandled_input,
palette=[('reversed', 'standout', '')])
self._setup_logging()
def __enter__(self) -> 'Controller':
logger.info(f'Create controller')
return self
def __exit__(
self, exc_type: Any, exc_value: Any, traceback: Any
) -> None:
self.save_state()
if self.player is not None:
self.player_backend.shutdown()
logger.info(f'Destroy controller')
def _setup_logging(self) -> None:
# init logzero
logzero.loglevel(logging.WARNING)
logzero.logfile(
self.model.LOG_FILE,
maxBytes=1e6, backupCount=3)
# enforce logging of unhandled exceptions
def handle_exception(exc_type, exc_value, exc_traceback):
logger.error(
'Uncaught exception',
exc_info=(exc_type, exc_value, exc_traceback))
sys.excepthook = handle_exception
def main(self) -> None:
self.view.show_playlist_overview()
self.loop.run()
def _unhandled_input(self, key: str) -> None:
if key in ('Q', 'q', 'esc'):
raise urwid.ExitMainLoop()
elif key == ':':
self.view.show_cmdline()
return None
elif key == 'h':
self.show_helpscreen()
return None
def handle_cmdline_input(self, msg: str) -> None:
if len(msg) == 0:
return
if msg.lower() in ('q', 'quit'):
raise urwid.ExitMainLoop()
if self.view.widget is not None:
cmd, *args = shlex.split(msg)
logger.info(f'Executing command {cmd} with "{args}"')
self.view.widget.handle_command(cmd, args=args)
def on_playlist_selected(self, playlist_id: str) -> None:
self.current_playlist = playlist_id
logger.info(f'Selected playlist {self.current_playlist}')
self.view.show_episode_overview()
self._init_player()
def on_video_selected(self, video_display_name: str) -> None:
if self.player is None:
raise RuntimeError('Player was not instantiated')
if self.player.playlist is None or self.player.item_list is None:
raise RuntimeError('Player\'s playlist was not instantiated')
video_index = self.player.item_list.index(
video_display_name)
vid = self.player.playlist[video_index]
logger.info(f'Selected video {vid.title}')
self.send_msg(f'Loading video ({vid.title})')
if vid is not None:
self.player.play_video(vid)
else:
raise RuntimeError(f'Could not find video "{vid.title}"')
def get_playlist_list(self) -> Iterable[str]:
return self.model.get_playlist_list()
def get_current_playlist_info(self) -> Dict[str, str]:
if self.current_playlist is None:
raise RuntimeError('Current playlist is not set')
return self.model.get_playlist_info(self.current_playlist)
def continue_playback(self) -> None:
if self.player is None:
raise RuntimeError('Player was not instantiated')
if self.player.playlist is None:
raise RuntimeError('Player\'s playlist was not instantiated')
if self.current_playlist is None:
raise RuntimeError('Current playlist is not set')
logger.info(f'Continue playback')
_cur = self.model.get_current_video(self.current_playlist)
if _cur is None:
self.send_msg('Nothing to resume...')
return
i, vid = self.player.playlist.get_video_by_title(_cur['title'])
self.send_msg(f'Resuming "{_cur['title']}" at {_cur['timestamp']}')
if vid is not None:
self.player.play_video(vid, ts2sec(_cur['timestamp']))
else:
raise RuntimeError(f'Could not find video "{_cur['title']}"')
def mark_watched(self, entry_idx: int) -> None:
assert self.player is not None
assert self.player.playlist is not None
assert self.current_playlist is not None
vid = self.player.playlist[entry_idx]
assert vid is not None
# mark as unwatched if already watched
new_ts = sec2ts(vid.duration)
_state = self.model.get_playlist_info(self.current_playlist)
if vid.title in _state['episodes']:
if _state['episodes'][vid.title]['current_timestamp'] == new_ts:
new_ts = sec2ts(0)
self.model.update_state(
self.current_playlist, {
'episodes': {
vid.title: {
'current_timestamp': new_ts
}
}
})
self.player.setup(reload_playlist=False)
def show_video_info(self, entry_idx: int) -> None:
assert self.player is not None
assert self.player.playlist is not None
vid = self.player.playlist[entry_idx]
assert vid is not None
self.view.show_long_text(vid.get_info(), exit_key='i')
def show_helpscreen(self) -> None:
self.view.show_long_text(HELP_TEXT, exit_key='h')
def _init_player(self) -> None:
self.player = PlayerQueue(self)
self.player.setup(reset_position=True)
def save_state(self) -> None:
if self.player is not None:
logger.info('Explicit state save')
assert self.current_playlist is not None
# update current video
if self.player.current_vid is not None:
assert self.player.ts is not None
self.model.update_state(
self.current_playlist, {
'current': {
'title': self.player.current_vid.title,
'timestamp': sec2ts(self.player.ts)
},
'episodes': {
self.player.current_vid.title: {
'current_timestamp': sec2ts(self.player.ts)
}
}
})
def assemble_info_box(self) -> None:
if self.current_playlist is None:
raise RuntimeError('Current playlist is not set')
logger.info('Assembling info box')
_cur = self.model.get_current_video(self.current_playlist)
if _cur is not None:
txt = f'Resume: "{_cur['title']}" ({_cur['timestamp']})'
else:
txt = 'Nothing to resume'
assert self.view.widget is not None, 'Widget has not been assembled'
self.view.widget.update_info_box(txt)
def send_msg(self, msg: str) -> None:
assert self.view.widget is not None, 'Widget has not been assembled'
self.view.widget.update_info_text(msg)
def update_views(self) -> None:
if not self.loop.screen.started:
return None
self.loop.draw_screen()
class PlayerQueue:
def __init__(self, controller: Controller) -> None:
self.controller = controller
self.controller.player_backend.setup(
self.handle_mpv_pos, self.handle_mpv_event,
disable_video=not self.controller.config['show_video'])
if self.controller.current_playlist is None:
raise RuntimeError('Current playlist is not set')
self.id = self.controller.model.get_playlist_info(
self.controller.current_playlist)['id']
self.playlist = None # type: Optional['Playlist']
self.current_vid = None # type: Optional['Video']
self.ts = None # type: Optional[int]
self.item_list = None # type: Optional[List[str]]
def setup(
self,
reload_playlist: bool = True, reset_position: bool = False
) -> None:
def tmp() -> None:
if reload_playlist:
self.controller.send_msg('Loading...')
plugin_name, self.playlist = load_playlist(self.id)
self.controller.send_msg(
f'Loaded playlist with {plugin_name}')
else:
assert self.playlist is not None, \
'Playlist has not been loaded'
state = self.controller.model._load_state()
playlist_state = state[self.controller.current_playlist]
v = self.controller.view.widget
assert v is not None, 'Widget has not been assembled'
# adjust video title display
total_video_ts = 0
self.item_list = []
cols, _ = self.controller.loop.screen.get_cols_rows()
for vid in self.playlist:
vid_tit = vid.title
vid_len = vid.duration
if vid_tit in playlist_state['episodes']:
vid_info = playlist_state['episodes'][vid_tit]
vid_ts = ts2sec(vid_info['current_timestamp'])
else:
vid_ts = 0
total_video_ts += vid_ts
vid_perc = round((vid_ts / vid_len) * 100) \
if vid_len > 0 else 0
vid_perc = min(vid_perc, 100)
vid_tit = shorten_msg(vid_tit, cols-20)
spaces = ' ' * (cols - len(vid_tit) - 19)
cur = f'{vid_tit}{spaces} {sec2ts(vid_len):<10}{vid_perc:>3}%'
self.item_list.append(cur)
v.set_items(self.item_list)
# set episode-view title
total_video_perc = round(
(total_video_ts / self.playlist.duration) * 100) \
if self.playlist.duration > 0 else 0
total_video_perc = min(total_video_perc, 100)
pl_tit = shorten_msg(self.playlist.title, cols-20)
spaces = ' ' * (cols - len(pl_tit) - 17)
v.set_title(
f'{pl_tit}{spaces} '
f'{sec2ts(self.playlist.duration):<10}'
f'{total_video_perc:>3}%')
self.controller.assemble_info_box()
# set list focus to video watched was played last
if reset_position:
assert self.controller.current_playlist is not None
_cur = self.controller.model.get_current_video(
self.controller.current_playlist)
if _cur is not None:
idx, _ = self.playlist.get_video_by_title(_cur['title'])
v.vid_list.set_focus(idx)
self.controller.update_views()
t = threading.Thread(target=tmp)
t.start()
def handle_mpv_pos(self, pos: float) -> None:
assert self.current_vid is not None
if pos is not None:
self.ts = int(pos)
assert self.ts is not None
self.controller.send_msg(
f'Playing "{self.current_vid.title}" ({sec2ts(self.ts)})')
def handle_mpv_event(self, ev: PlayerEvent) -> None:
if ev is PlayerEvent.VIDEO_OVER:
self.onVideoEnd(play_next=True)
elif ev is PlayerEvent.VIDEO_QUIT:
self.controller.send_msg('Waiting for input')
self.onVideoEnd()
def onVideoEnd(self, play_next: bool = False) -> None:
self.controller.assemble_info_box()
if play_next:
self.play_next_video()
self.setup(reload_playlist=False)
def play_video(self, vid: 'Video', start_pos: int = 0) -> None:
self.controller.save_state()
self.ts = start_pos
self.current_vid = vid
self.controller.player_backend.play_video(
vid.get_file_stream(), vid.title,
start=start_pos)
if self.controller.config['show_titles']:
self.controller.player_backend.display_text(
vid.title, min(3000, vid.duration*1000))
def play_next_video(self) -> None:
if self.current_vid is None:
self.controller.send_msg('No video selected, cannot play next')
return
vid = self._get_video_relative(1)
if vid is None:
self.controller.send_msg('Reached end of playlist')
else:
self.play_video(vid)
def play_previous_video(self) -> None:
if self.current_vid is None:
self.controller.send_msg('No video selected, cannot play previous')
return
vid = self._get_video_relative(-1)
if vid is None:
self.controller.send_msg('Reached end of playlist')
else:
self.play_video(vid)
def _get_video_relative(self, idx_shift: int) -> Optional['Video']:
assert self.playlist is not None
assert self.current_vid is not None
idx, _ = self.playlist.get_video_by_title(self.current_vid.title)
if idx is None:
raise RuntimeError(
f'Could not find video "{self.current_vid.title}"')
next_idx = idx + idx_shift
if next_idx < len(self.playlist):
return self.playlist[next_idx]
else:
return None
| import sys
import shlex
import logging
import threading
import urwid
import logzero
from logzero import logger
from typing import Any, Iterable, Optional, Dict, TYPE_CHECKING
from .model import Model
from .view import View
from ..extra.player import PlayerEvent, BasePlayer
from ..extra.utils import load_playlist, sec2ts, ts2sec, shorten_msg
if TYPE_CHECKING:
from ..extra.plugins import Video, Playlist # noqa: F401
HELP_TEXT = '''
__ __ _ _
\ \ / / _ __| (_) __ _
\ \ / / | | |/ _` | |/ _` |
\ V /| |_| | (_| | | (_| |
\_/ \__, |\__,_|_|\__,_|
|___/
Help Page
=========
Summon the internal commandline by typing `:`.
Press `[TAB]` for autocomplete.
The following commands are supported (in the correct context):
* Playlist View:
* `add <playlist id>`: add given playlist
* `delete`: delete currently selected playlist
* `quit`: quit Vydia (`[q]`)
* Episode View:
* `pause`: toggle pause in running episode (`<space>`)
* `info`: show video-related information (`i`)
* `reload`: reload playlist using plugin
* `reverse`: reverse episode order
* `shuffle`: shuffle episode order
* `next`: play next video (`[>]`)
* `previous`: play previous video (`[<]`)
* `continue`: continue playback from last save (`[c]`)
* `quit`: quit Vydia (`[q]`)
Furthermore, the following shortcuts exist:
* Episode View:
* `w`: (un)mark currently selected video as watched
Also, try executing `vydia --help`.
'''
class Controller:
def __init__(
self,
player_backend: BasePlayer, config: Dict[str, Any]
) -> None:
self.config = config
self.player_backend = player_backend
self.player_backend.set_controller(self)
self.current_playlist = None # type: Optional[str]
self.input_callback = None
self.player = None # type: Optional[PlayerQueue]
self.model = Model()
self.view = View(self)
self.loop = urwid.MainLoop(
self.view, unhandled_input=self._unhandled_input,
palette=[('reversed', 'standout', '')])
self._setup_logging()
def __enter__(self) -> 'Controller':
logger.info(f'Create controller')
return self
def __exit__(
self, exc_type: Any, exc_value: Any, traceback: Any
) -> None:
self.save_state()
if self.player is not None:
self.player_backend.shutdown()
logger.info(f'Destroy controller')
def _setup_logging(self) -> None:
# init logzero
logzero.loglevel(logging.WARNING)
logzero.logfile(
self.model.LOG_FILE,
maxBytes=1e6, backupCount=3)
# enforce logging of unhandled exceptions
def handle_exception(exc_type, exc_value, exc_traceback):
logger.error(
'Uncaught exception',
exc_info=(exc_type, exc_value, exc_traceback))
sys.excepthook = handle_exception
def main(self) -> None:
self.view.show_playlist_overview()
self.loop.run()
def _unhandled_input(self, key: str) -> None:
if key in ('Q', 'q', 'esc'):
raise urwid.ExitMainLoop()
elif key == ':':
self.view.show_cmdline()
return None
elif key == 'h':
self.show_helpscreen()
return None
def handle_cmdline_input(self, msg: str) -> None:
if len(msg) == 0:
return
if msg.lower() in ('q', 'quit'):
raise urwid.ExitMainLoop()
if self.view.widget is not None:
cmd, *args = shlex.split(msg)
logger.info(f'Executing command {cmd} with "{args}"')
self.view.widget.handle_command(cmd, args=args)
def on_playlist_selected(self, playlist_id: str) -> None:
self.current_playlist = playlist_id
logger.info(f'Selected playlist {self.current_playlist}')
self.view.show_episode_overview()
self._init_player()
def on_video_selected(self, video_display_name: str) -> None:
if self.player is None:
raise RuntimeError('Player was not instantiated')
if self.player.playlist is None or self.player.item_list is None:
raise RuntimeError('Player\'s playlist was not instantiated')
video_index = self.player.item_list.index(
video_display_name)
vid = self.player.playlist[video_index]
logger.info(f'Selected video {vid.title}')
self.send_msg(f'Loading video ({vid.title})')
if vid is not None:
self.player.play_video(vid)
else:
raise RuntimeError(f'Could not find video "{vid.title}"')
def get_playlist_list(self) -> Iterable[str]:
return self.model.get_playlist_list()
def get_current_playlist_info(self) -> Dict[str, str]:
if self.current_playlist is None:
raise RuntimeError('Current playlist is not set')
return self.model.get_playlist_info(self.current_playlist)
def continue_playback(self) -> None:
if self.player is None:
raise RuntimeError('Player was not instantiated')
if self.player.playlist is None:
raise RuntimeError('Player\'s playlist was not instantiated')
if self.current_playlist is None:
raise RuntimeError('Current playlist is not set')
logger.info(f'Continue playback')
_cur = self.model.get_current_video(self.current_playlist)
if _cur is None:
self.send_msg('Nothing to resume...')
return
i, vid = self.player.playlist.get_video_by_title(_cur['title'])
self.send_msg(f'Resuming "{_cur["title"]}" at {_cur["timestamp"]}')
if vid is not None:
self.player.play_video(vid, ts2sec(_cur['timestamp']))
else:
raise RuntimeError(f'Could not find video "{_cur["title"]}"')
def mark_watched(self, entry_idx: int) -> None:
assert self.player is not None
assert self.player.playlist is not None
assert self.current_playlist is not None
vid = self.player.playlist[entry_idx]
assert vid is not None
# mark as unwatched if already watched
new_ts = sec2ts(vid.duration)
_state = self.model.get_playlist_info(self.current_playlist)
if vid.title in _state['episodes']:
if _state['episodes'][vid.title]['current_timestamp'] == new_ts:
new_ts = sec2ts(0)
self.model.update_state(
self.current_playlist, {
'episodes': {
vid.title: {
'current_timestamp': new_ts
}
}
})
self.player.setup(reload_playlist=False)
def show_video_info(self, entry_idx: int) -> None:
assert self.player is not None
assert self.player.playlist is not None
vid = self.player.playlist[entry_idx]
assert vid is not None
self.view.show_long_text(vid.get_info(), exit_key='i')
def show_helpscreen(self) -> None:
self.view.show_long_text(HELP_TEXT, exit_key='h')
def _init_player(self) -> None:
self.player = PlayerQueue(self)
self.player.setup(reset_position=True)
def save_state(self) -> None:
if self.player is not None:
logger.info('Explicit state save')
assert self.current_playlist is not None
# update current video
if self.player.current_vid is not None:
assert self.player.ts is not None
self.model.update_state(
self.current_playlist, {
'current': {
'title': self.player.current_vid.title,
'timestamp': sec2ts(self.player.ts)
},
'episodes': {
self.player.current_vid.title: {
'current_timestamp': sec2ts(self.player.ts)
}
}
})
def assemble_info_box(self) -> None:
if self.current_playlist is None:
raise RuntimeError('Current playlist is not set')
logger.info('Assembling info box')
_cur = self.model.get_current_video(self.current_playlist)
if _cur is not None:
txt = f'Resume: "{_cur["title"]}" ({_cur["timestamp"]})'
else:
txt = 'Nothing to resume'
assert self.view.widget is not None, 'Widget has not been assembled'
self.view.widget.update_info_box(txt)
def send_msg(self, msg: str) -> None:
assert self.view.widget is not None, 'Widget has not been assembled'
self.view.widget.update_info_text(msg)
def update_views(self) -> None:
if not self.loop.screen.started:
return None
self.loop.draw_screen()
class PlayerQueue:
def __init__(self, controller: Controller) -> None:
self.controller = controller
self.controller.player_backend.setup(
self.handle_mpv_pos, self.handle_mpv_event,
disable_video=not self.controller.config['show_video'])
if self.controller.current_playlist is None:
raise RuntimeError('Current playlist is not set')
self.id = self.controller.model.get_playlist_info(
self.controller.current_playlist)['id']
self.playlist = None # type: Optional['Playlist']
self.current_vid = None # type: Optional['Video']
self.ts = None # type: Optional[int]
self.item_list = None # type: Optional[List[str]]
def setup(
self,
reload_playlist: bool = True, reset_position: bool = False
) -> None:
def tmp() -> None:
if reload_playlist:
self.controller.send_msg('Loading...')
plugin_name, self.playlist = load_playlist(self.id)
self.controller.send_msg(
f'Loaded playlist with {plugin_name}')
else:
assert self.playlist is not None, \
'Playlist has not been loaded'
state = self.controller.model._load_state()
playlist_state = state[self.controller.current_playlist]
v = self.controller.view.widget
assert v is not None, 'Widget has not been assembled'
# adjust video title display
total_video_ts = 0
self.item_list = []
cols, _ = self.controller.loop.screen.get_cols_rows()
for vid in self.playlist:
vid_tit = vid.title
vid_len = vid.duration
if vid_tit in playlist_state['episodes']:
vid_info = playlist_state['episodes'][vid_tit]
vid_ts = ts2sec(vid_info['current_timestamp'])
else:
vid_ts = 0
total_video_ts += vid_ts
vid_perc = round((vid_ts / vid_len) * 100) \
if vid_len > 0 else 0
vid_perc = min(vid_perc, 100)
vid_tit = shorten_msg(vid_tit, cols-20)
spaces = ' ' * (cols - len(vid_tit) - 19)
cur = f'{vid_tit}{spaces} {sec2ts(vid_len):<10}{vid_perc:>3}%'
self.item_list.append(cur)
v.set_items(self.item_list)
# set episode-view title
total_video_perc = round(
(total_video_ts / self.playlist.duration) * 100) \
if self.playlist.duration > 0 else 0
total_video_perc = min(total_video_perc, 100)
pl_tit = shorten_msg(self.playlist.title, cols-20)
spaces = ' ' * (cols - len(pl_tit) - 17)
v.set_title(
f'{pl_tit}{spaces} '
f'{sec2ts(self.playlist.duration):<10}'
f'{total_video_perc:>3}%')
self.controller.assemble_info_box()
# set list focus to video watched was played last
if reset_position:
assert self.controller.current_playlist is not None
_cur = self.controller.model.get_current_video(
self.controller.current_playlist)
if _cur is not None:
idx, _ = self.playlist.get_video_by_title(_cur['title'])
v.vid_list.set_focus(idx)
self.controller.update_views()
t = threading.Thread(target=tmp)
t.start()
def handle_mpv_pos(self, pos: float) -> None:
assert self.current_vid is not None
if pos is not None:
self.ts = int(pos)
assert self.ts is not None
self.controller.send_msg(
f'Playing "{self.current_vid.title}" ({sec2ts(self.ts)})')
def handle_mpv_event(self, ev: PlayerEvent) -> None:
if ev is PlayerEvent.VIDEO_OVER:
self.onVideoEnd(play_next=True)
elif ev is PlayerEvent.VIDEO_QUIT:
self.controller.send_msg('Waiting for input')
self.onVideoEnd()
def onVideoEnd(self, play_next: bool = False) -> None:
self.controller.assemble_info_box()
if play_next:
self.play_next_video()
self.setup(reload_playlist=False)
def play_video(self, vid: 'Video', start_pos: int = 0) -> None:
self.controller.save_state()
self.ts = start_pos
self.current_vid = vid
self.controller.player_backend.play_video(
vid.get_file_stream(), vid.title,
start=start_pos)
if self.controller.config['show_titles']:
self.controller.player_backend.display_text(
vid.title, min(3000, vid.duration*1000))
def play_next_video(self) -> None:
if self.current_vid is None:
self.controller.send_msg('No video selected, cannot play next')
return
vid = self._get_video_relative(1)
if vid is None:
self.controller.send_msg('Reached end of playlist')
else:
self.play_video(vid)
def play_previous_video(self) -> None:
if self.current_vid is None:
self.controller.send_msg('No video selected, cannot play previous')
return
vid = self._get_video_relative(-1)
if vid is None:
self.controller.send_msg('Reached end of playlist')
else:
self.play_video(vid)
def _get_video_relative(self, idx_shift: int) -> Optional['Video']:
assert self.playlist is not None
assert self.current_vid is not None
idx, _ = self.playlist.get_video_by_title(self.current_vid.title)
if idx is None:
raise RuntimeError(
f'Could not find video "{self.current_vid.title}"')
next_idx = idx + idx_shift
if next_idx < len(self.playlist):
return self.playlist[next_idx]
else:
return None
|
# IBM_PROLOG_BEGIN_TAG
#
# Copyright 2021 IBM International Business Machines Corp.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# IBM_PROLOG_END_TAG
import logging
import subprocess
import re
class OcpException(Exception):
pass
class ClusterAccessor:
""" Basic cluster access provider. Currently only supports OCP."""
def __init__(self, standalone=False, clusterUrl=None, user=None, password=None, token=None, project=None):
self.loggedIn = False
self.standalone = standalone
self.clusterUrl = clusterUrl
self.user = user
self.password = password
self.token = token
self.project = project
def __enter__(self):
self.loginToCluster()
return self
def __exit__(self, exception_type, exception_value, exception_traceback):
logging.debug(f"ClusterAccessor.__exit__; etype={exception_type}; evalue={exception_value}")
#self.logoutOfCluster()
def loginToCluster(self):
logging.debug(f"logging into cluster; standalone={self.standalone}, cluster='{self.clusterUrl}'")
if self.standalone:
return
if self.clusterUrl is not None:
cmdArgs = ["oc", "login", "--server", self.clusterUrl]
if self.token is not None:
cmdArgs.extend(["--token", self.token])
else:
cmdArgs.extend(["--username", self.user, "--password", self.password])
logging.info(f"logging into cluster '{self.clusterUrl}'")
process = subprocess.Popen(cmdArgs, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
stdout, stderr = process.communicate()
if process.returncode != 0:
logging.error(f"Failed to login to cluster {cmdArgs}")
logging.error(f"output = {stderr.decode("utf-8")}")
raise OcpException(f"Failed to login to cluster {self.clusterUrl}.")
self.loggedIn = True
self.setOcpProject()
def setOcpProject(self):
cmdArgs = ["oc", "project", self.project]
logging.debug(f"Setting project to '{self.project}'")
process = subprocess.Popen(cmdArgs, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
stdout, stderr = process.communicate()
if process.returncode != 0:
logging.error(f"Failed to login to cluster {cmdArgs}")
logging.error(f"output = {stderr.decode("utf-8")}")
raise OcpException(f"Failed to connect to project {self.project}")
def logoutOfCluster(self):
""" Logs out of the OCP cluster if the script performed a login to the cluster."""
if self.loggedIn:
cmdArgs = ["oc", "logout"]
logging.info(f"logging out of cluster '{self.clusterUrl}'")
process = subprocess.run(cmdArgs, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
stdout, stderr = process.communicate()
logging.debug(stdout.decode('utf-8'))
def isStandalone(self):
return self.standalone
@staticmethod
def getPods(filterStr):
logging.debug(f"getPods; filterStr='{filterStr}")
cmdArgs = ["oc", "get", "pods"]
pods = []
process = subprocess.Popen(cmdArgs, stdout=subprocess.PIPE)
for line in process.stdout:
string = line.decode('utf-8')
logging.debug(string)
pod = string.split()[0]
if filterStr:
if re.search(filterStr, pod):
logging.debug(f"matched pod '{string}' (pod={pod}")
pods.append(pod)
else:
pods.append(pod)
process.wait()
return pods
| # IBM_PROLOG_BEGIN_TAG
#
# Copyright 2021 IBM International Business Machines Corp.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# IBM_PROLOG_END_TAG
import logging
import subprocess
import re
class OcpException(Exception):
pass
class ClusterAccessor:
""" Basic cluster access provider. Currently only supports OCP."""
def __init__(self, standalone=False, clusterUrl=None, user=None, password=None, token=None, project=None):
self.loggedIn = False
self.standalone = standalone
self.clusterUrl = clusterUrl
self.user = user
self.password = password
self.token = token
self.project = project
def __enter__(self):
self.loginToCluster()
return self
def __exit__(self, exception_type, exception_value, exception_traceback):
logging.debug(f"ClusterAccessor.__exit__; etype={exception_type}; evalue={exception_value}")
#self.logoutOfCluster()
def loginToCluster(self):
logging.debug(f"logging into cluster; standalone={self.standalone}, cluster='{self.clusterUrl}'")
if self.standalone:
return
if self.clusterUrl is not None:
cmdArgs = ["oc", "login", "--server", self.clusterUrl]
if self.token is not None:
cmdArgs.extend(["--token", self.token])
else:
cmdArgs.extend(["--username", self.user, "--password", self.password])
logging.info(f"logging into cluster '{self.clusterUrl}'")
process = subprocess.Popen(cmdArgs, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
stdout, stderr = process.communicate()
if process.returncode != 0:
logging.error(f"Failed to login to cluster {cmdArgs}")
logging.error(f"output = {stderr.decode('utf-8')}")
raise OcpException(f"Failed to login to cluster {self.clusterUrl}.")
self.loggedIn = True
self.setOcpProject()
def setOcpProject(self):
cmdArgs = ["oc", "project", self.project]
logging.debug(f"Setting project to '{self.project}'")
process = subprocess.Popen(cmdArgs, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
stdout, stderr = process.communicate()
if process.returncode != 0:
logging.error(f"Failed to login to cluster {cmdArgs}")
logging.error(f"output = {stderr.decode('utf-8')}")
raise OcpException(f"Failed to connect to project {self.project}")
def logoutOfCluster(self):
""" Logs out of the OCP cluster if the script performed a login to the cluster."""
if self.loggedIn:
cmdArgs = ["oc", "logout"]
logging.info(f"logging out of cluster '{self.clusterUrl}'")
process = subprocess.run(cmdArgs, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
stdout, stderr = process.communicate()
logging.debug(stdout.decode('utf-8'))
def isStandalone(self):
return self.standalone
@staticmethod
def getPods(filterStr):
logging.debug(f"getPods; filterStr='{filterStr}")
cmdArgs = ["oc", "get", "pods"]
pods = []
process = subprocess.Popen(cmdArgs, stdout=subprocess.PIPE)
for line in process.stdout:
string = line.decode('utf-8')
logging.debug(string)
pod = string.split()[0]
if filterStr:
if re.search(filterStr, pod):
logging.debug(f"matched pod '{string}' (pod={pod}")
pods.append(pod)
else:
pods.append(pod)
process.wait()
return pods
|
# Copyright (C) 2021 Intel Corporation
# SPDX-License-Identifier: Apache-2.0
import numpy as np
import os
import pytest
from openvino import Core, Blob, TensorDesc, StatusCode
def image_path():
path_to_repo = os.environ["DATA_PATH"]
path_to_img = os.path.join(path_to_repo, "validation_set", "224x224", "dog.bmp")
return path_to_img
def model_path(is_myriad=False):
path_to_repo = os.environ["MODELS_PATH"]
if not is_myriad:
test_xml = os.path.join(path_to_repo, "models", "test_model", "test_model_fp32.xml")
test_bin = os.path.join(path_to_repo, "models", "test_model", "test_model_fp32.bin")
else:
test_xml = os.path.join(path_to_repo, "models", "test_model", "test_model_fp16.xml")
test_bin = os.path.join(path_to_repo, "models", "test_model", "test_model_fp16.bin")
return (test_xml, test_bin)
def read_image():
import cv2
n, c, h, w = (1, 3, 32, 32)
image = cv2.imread(path_to_img)
if image is None:
raise FileNotFoundError("Input image not found")
image = cv2.resize(image, (h, w)) / 255
image = image.transpose((2, 0, 1)).astype(np.float32)
image = image.reshape((n, c, h, w))
return image
is_myriad = os.environ.get("TEST_DEVICE") == "MYRIAD"
test_net_xml, test_net_bin = model_path(is_myriad)
path_to_img = image_path()
def test_get_perf_counts(device):
ie_core = Core()
net = ie_core.read_network(test_net_xml, test_net_bin)
ie_core.set_config({"PERF_COUNT": "YES"}, device)
exec_net = ie_core.load_network(net, device)
img = read_image()
request = exec_net.create_infer_request()
td = TensorDesc("FP32", [1, 3, 32, 32], "NCHW")
input_blob = Blob(td, img)
request.set_input({"data": input_blob})
request.infer()
pc = request.get_perf_counts()
assert pc["29"]["status"] == "EXECUTED"
assert pc["29"]["layer_type"] == "FullyConnected"
del exec_net
del ie_core
del net
@pytest.mark.skipif(os.environ.get("TEST_DEVICE", "CPU") != "CPU",
reason=f"Can't run test on device {os.environ.get("TEST_DEVICE", "CPU")}, "
"Dynamic batch fully supported only on CPU")
@pytest.mark.skip(reason="Fix")
def test_set_batch_size(device):
ie_core = Core()
ie_core.set_config({"DYN_BATCH_ENABLED": "YES"}, device)
net = ie_core.read_network(test_net_xml, test_net_bin)
net.batch_size = 10
data = np.ones(shape=net.input_info["data"].input_data.shape)
exec_net = ie_core.load_network(net, device)
data[0] = read_image()[0]
request = exec_net.create_infer_request()
request.set_batch(1)
td = TensorDesc("FP32", [1, 3, 32, 32], "NCHW")
input_blob = Blob(td, data)
request.set_input({"data": input_blob})
request.infer()
assert np.allclose(int(round(request.output_blobs["fc_out"].buffer[0][2])), 1), \
"Incorrect data for 1st batch"
del exec_net
del ie_core
del net
@pytest.mark.skip(reason="Fix")
def test_set_zero_batch_size(device):
ie_core = Core()
net = ie_core.read_network(test_net_xml, test_net_bin)
exec_net = ie_core.load_network(net, device)
request = exec_net.create_infer_request()
with pytest.raises(ValueError) as e:
request.set_batch(0)
assert "Batch size should be positive integer number but 0 specified" in str(e.value)
del exec_net
del ie_core
del net
@pytest.mark.skip(reason="Fix")
def test_set_negative_batch_size(device):
ie_core = Core()
net = ie_core.read_network(test_net_xml, test_net_bin)
exec_net = ie_core.load_network(net, device)
request = exec_net.create_infer_request()
with pytest.raises(ValueError) as e:
request.set_batch(-1)
assert "Batch size should be positive integer number but -1 specified" in str(e.value)
del exec_net
del ie_core
del net
def test_blob_setter(device):
ie_core = Core()
net = ie_core.read_network(test_net_xml, test_net_bin)
exec_net_1 = ie_core.load_network(network=net, device_name=device)
net.input_info["data"].layout = "NHWC"
exec_net_2 = ie_core.load_network(network=net, device_name=device)
img = read_image()
request1 = exec_net_1.create_infer_request()
tensor_desc = TensorDesc("FP32", [1, 3, img.shape[2], img.shape[3]], "NCHW")
img_blob1 = Blob(tensor_desc, img)
request1.set_input({"data": img_blob1})
request1.infer()
res_1 = np.sort(request1.get_blob("fc_out").buffer)
img = np.transpose(img, axes=(0, 2, 3, 1)).astype(np.float32)
tensor_desc = TensorDesc("FP32", [1, 3, 32, 32], "NHWC")
img_blob = Blob(tensor_desc, img)
request = exec_net_2.create_infer_request()
request.set_blob("data", img_blob)
request.infer()
res_2 = np.sort(request.get_blob("fc_out").buffer)
assert np.allclose(res_1, res_2, atol=1e-2, rtol=1e-2)
def test_cancel(device):
ie_core = Core()
net = ie_core.read_network(test_net_xml, test_net_bin)
exec_net = ie_core.load_network(net, device)
img = read_image()
td = TensorDesc("FP32", [1, 3, 32, 32], "NCHW")
input_blob = Blob(td, img)
request = exec_net.create_infer_request()
def callback(req, code, array):
array.append(42)
data = []
request.set_completion_callback(callback, data)
request.set_input({"data": input_blob})
request.async_infer()
request.cancel()
with pytest.raises(RuntimeError) as e:
request.wait()
assert "[ INFER_CANCELLED ]" in str(e.value)
# check if callback has executed
assert data == [42]
request.async_infer()
status = request.wait()
assert status == StatusCode.OK
assert data == [42, 42]
| # Copyright (C) 2021 Intel Corporation
# SPDX-License-Identifier: Apache-2.0
import numpy as np
import os
import pytest
from openvino import Core, Blob, TensorDesc, StatusCode
def image_path():
path_to_repo = os.environ["DATA_PATH"]
path_to_img = os.path.join(path_to_repo, "validation_set", "224x224", "dog.bmp")
return path_to_img
def model_path(is_myriad=False):
path_to_repo = os.environ["MODELS_PATH"]
if not is_myriad:
test_xml = os.path.join(path_to_repo, "models", "test_model", "test_model_fp32.xml")
test_bin = os.path.join(path_to_repo, "models", "test_model", "test_model_fp32.bin")
else:
test_xml = os.path.join(path_to_repo, "models", "test_model", "test_model_fp16.xml")
test_bin = os.path.join(path_to_repo, "models", "test_model", "test_model_fp16.bin")
return (test_xml, test_bin)
def read_image():
import cv2
n, c, h, w = (1, 3, 32, 32)
image = cv2.imread(path_to_img)
if image is None:
raise FileNotFoundError("Input image not found")
image = cv2.resize(image, (h, w)) / 255
image = image.transpose((2, 0, 1)).astype(np.float32)
image = image.reshape((n, c, h, w))
return image
is_myriad = os.environ.get("TEST_DEVICE") == "MYRIAD"
test_net_xml, test_net_bin = model_path(is_myriad)
path_to_img = image_path()
def test_get_perf_counts(device):
ie_core = Core()
net = ie_core.read_network(test_net_xml, test_net_bin)
ie_core.set_config({"PERF_COUNT": "YES"}, device)
exec_net = ie_core.load_network(net, device)
img = read_image()
request = exec_net.create_infer_request()
td = TensorDesc("FP32", [1, 3, 32, 32], "NCHW")
input_blob = Blob(td, img)
request.set_input({"data": input_blob})
request.infer()
pc = request.get_perf_counts()
assert pc["29"]["status"] == "EXECUTED"
assert pc["29"]["layer_type"] == "FullyConnected"
del exec_net
del ie_core
del net
@pytest.mark.skipif(os.environ.get("TEST_DEVICE", "CPU") != "CPU",
reason=f"Can't run test on device {os.environ.get('TEST_DEVICE', 'CPU')}, "
"Dynamic batch fully supported only on CPU")
@pytest.mark.skip(reason="Fix")
def test_set_batch_size(device):
ie_core = Core()
ie_core.set_config({"DYN_BATCH_ENABLED": "YES"}, device)
net = ie_core.read_network(test_net_xml, test_net_bin)
net.batch_size = 10
data = np.ones(shape=net.input_info["data"].input_data.shape)
exec_net = ie_core.load_network(net, device)
data[0] = read_image()[0]
request = exec_net.create_infer_request()
request.set_batch(1)
td = TensorDesc("FP32", [1, 3, 32, 32], "NCHW")
input_blob = Blob(td, data)
request.set_input({"data": input_blob})
request.infer()
assert np.allclose(int(round(request.output_blobs["fc_out"].buffer[0][2])), 1), \
"Incorrect data for 1st batch"
del exec_net
del ie_core
del net
@pytest.mark.skip(reason="Fix")
def test_set_zero_batch_size(device):
ie_core = Core()
net = ie_core.read_network(test_net_xml, test_net_bin)
exec_net = ie_core.load_network(net, device)
request = exec_net.create_infer_request()
with pytest.raises(ValueError) as e:
request.set_batch(0)
assert "Batch size should be positive integer number but 0 specified" in str(e.value)
del exec_net
del ie_core
del net
@pytest.mark.skip(reason="Fix")
def test_set_negative_batch_size(device):
ie_core = Core()
net = ie_core.read_network(test_net_xml, test_net_bin)
exec_net = ie_core.load_network(net, device)
request = exec_net.create_infer_request()
with pytest.raises(ValueError) as e:
request.set_batch(-1)
assert "Batch size should be positive integer number but -1 specified" in str(e.value)
del exec_net
del ie_core
del net
def test_blob_setter(device):
ie_core = Core()
net = ie_core.read_network(test_net_xml, test_net_bin)
exec_net_1 = ie_core.load_network(network=net, device_name=device)
net.input_info["data"].layout = "NHWC"
exec_net_2 = ie_core.load_network(network=net, device_name=device)
img = read_image()
request1 = exec_net_1.create_infer_request()
tensor_desc = TensorDesc("FP32", [1, 3, img.shape[2], img.shape[3]], "NCHW")
img_blob1 = Blob(tensor_desc, img)
request1.set_input({"data": img_blob1})
request1.infer()
res_1 = np.sort(request1.get_blob("fc_out").buffer)
img = np.transpose(img, axes=(0, 2, 3, 1)).astype(np.float32)
tensor_desc = TensorDesc("FP32", [1, 3, 32, 32], "NHWC")
img_blob = Blob(tensor_desc, img)
request = exec_net_2.create_infer_request()
request.set_blob("data", img_blob)
request.infer()
res_2 = np.sort(request.get_blob("fc_out").buffer)
assert np.allclose(res_1, res_2, atol=1e-2, rtol=1e-2)
def test_cancel(device):
ie_core = Core()
net = ie_core.read_network(test_net_xml, test_net_bin)
exec_net = ie_core.load_network(net, device)
img = read_image()
td = TensorDesc("FP32", [1, 3, 32, 32], "NCHW")
input_blob = Blob(td, img)
request = exec_net.create_infer_request()
def callback(req, code, array):
array.append(42)
data = []
request.set_completion_callback(callback, data)
request.set_input({"data": input_blob})
request.async_infer()
request.cancel()
with pytest.raises(RuntimeError) as e:
request.wait()
assert "[ INFER_CANCELLED ]" in str(e.value)
# check if callback has executed
assert data == [42]
request.async_infer()
status = request.wait()
assert status == StatusCode.OK
assert data == [42, 42]
|
#!/usr/bin/env python
'''
Check glossary YAML file.
Usage: check-glossary.py [-A] [-c LL] yaml-config-file glossary-file
Flags:
- `-A`: report all missing definitions for all languages.
- `-c LL`: report missing definitions for language with code `LL` (e.g., 'fr').
Checks always performed:
- Only languages listed in `_config.yml` appear in glossary.
- Entries have all required keys (`ENTRY_REQUIRED_KEYS`).
- Only known keys are present at the top level of each entry (`ENTRY_KEYS`).
- Entries are ordered by unique slugs.
- Every definition has the required keys (`DEFINITION_REQUIRED_KEYS`).
- Definitions only have allowed keys (`DEFINITION_KEYS`).
- No duplicate definitions.
Checks performed
'''
import sys
import getopt
import re
import yaml
from collections import Counter
# Keys for entries and definitions.
ENTRY_REQUIRED_KEYS = {'slug'}
ENTRY_OPTIONAL_KEYS = {'ref'}
ENTRY_LANGUAGE_KEYS = {'af', 'am', 'ar', 'bn', 'de', 'en', 'es', 'fr', 'he', 'it', 'ja', 'ko', 'nl', 'pt', 'sw', 'tn', 'xh', 'zu'}
ENTRY_KEYS = ENTRY_REQUIRED_KEYS | \
ENTRY_OPTIONAL_KEYS | \
ENTRY_LANGUAGE_KEYS
DEFINITION_REQUIRED_KEYS = {'term', 'def'}
DEFINITION_OPTIONAL_KEYS = {'acronym'}
DEFINITION_KEYS = DEFINITION_REQUIRED_KEYS | \
DEFINITION_OPTIONAL_KEYS
# Match internal Markdown links.
LINK_PAT = re.compile(r'\[.+?\]\(#(.+?)\)')
def main():
'''Main driver.'''
checkLang, configFile, glossaryFile = parseArgs()
with open(configFile, 'r') as reader:
config = yaml.load(reader, Loader=yaml.SafeLoader)
with open(glossaryFile, 'r') as reader:
gloss = yaml.load(reader, Loader=yaml.SafeLoader)
checkLanguages(config)
for entry in gloss:
checkEntry(entry)
checkSlugs(gloss)
checkDuplicates(gloss)
checkCrossRef(gloss)
if checkLang == 'ALL':
for lang in sorted(ENTRY_LANGUAGE_KEYS):
checkMissingDefs(lang, gloss)
elif checkLang:
checkMissingDefs(checkLang, gloss)
forward = buildForward(gloss)
backward = buildBackward(forward)
def parseArgs():
'''
Parse command-line arguments, returning language to check,
configuration file path, and glossary file path. The language
to check may be 'ALL' (to check all), None (to check none), or
a known 2-letter language code.
'''
options, filenames = getopt.getopt(sys.argv[1:], 'Ac:')
if (len(filenames) != 2):
print(f'Usage: check [-A] [-c LL] configFile glossFile')
sys.exit(1)
configFile, glossFile = filenames
checkLang = None
for (opt, args) in options:
if opt == '-A':
checkLang = 'ALL'
elif opt == '-c':
checkLang = arg
if checkLang not in ENTRY_LANGUAGE_KEYS:
print(f'Unknown language {checkLang}', file=sys.stderr)
sys.exit(1)
else:
print(f'Unknown flag {opt}', file=sys.stderr)
sys.exit(1)
return checkLang, configFile, glossFile
def checkLanguages(config):
'''Compare configuration with this script's settings.'''
actual = set([c['key'] for c in config['languages']])
if actual - ENTRY_LANGUAGE_KEYS:
print(f'unexpected languages in configuration: {actual - ENTRY_LANGUAGE_KEYS}')
if ENTRY_LANGUAGE_KEYS - actual:
print(f'missing languages in configuration: {ENTRY_LANGUAGE_KEYS - actual}')
def checkEntry(entry):
'''
Check structure of individual entries, returning a language-to-set
dictionary of terms references in the body.
'''
keys = set(entry.keys())
missing = [k for k in ENTRY_REQUIRED_KEYS if k not in keys]
if missing:
print(f'Missing required keys for entry {entry}: {missing}')
slug = entry['slug']
unknown_keys = keys - ENTRY_KEYS
if unknown_keys:
print(f'Unknown keys in {slug}: {unknown_keys}')
if (len(ENTRY_LANGUAGE_KEYS - keys) == len(ENTRY_LANGUAGE_KEYS)):
print(f'No language entries for entry {entry}')
result = {}
crossrefs = set(entry['ref']) if ('ref' in entry) else set()
for lang in ENTRY_LANGUAGE_KEYS:
if lang in entry:
label = f'{slug}/{lang}'
result[lang] = checkLanguageDef(label, crossrefs, entry[lang])
return result
def checkLanguageDef(label, crossrefs, definition):
'''
Check language-specific material in definition, returning slugs
of terms referenced in the body of the definition.
'''
keys = set(definition.keys())
missing = [k for k in DEFINITION_REQUIRED_KEYS if k not in keys]
if missing:
print(f'Missing required keys for definition {label}: {missing}')
unknown_keys = keys - DEFINITION_KEYS
if unknown_keys:
print(f'Unknown keys in {label}: {unknown_keys}')
inBody = set(LINK_PAT.findall(definition['def']))
duplicate = crossrefs & inBody
if duplicate:
duplicate = ', '.join(sorted(duplicate))
print(f'Terms in both body and cross-references for {label}: {duplicate}')
return inBody
def checkSlugs(gloss):
'''Check that entries have unique slugs and are ordered by slug.'''
slugs = [entry['slug'] for entry in gloss if 'slug' in entry]
for (i, slug) in enumerate(slugs):
if (i > 0) and (slug < slugs[i-1]):
print(f'slug {slug} out of order')
counts = Counter(slugs)
dups = [s for s in counts.keys() if counts[s] > 1]
if dups:
print(f'duplicate keys: {dups}')
def checkDuplicates(gloss):
'''Check for duplicate definitions in each language.'''
for lang in ENTRY_LANGUAGE_KEYS:
terms = [entry[lang]['term'] for entry in gloss
if ((lang in entry) and 'term' in entry[lang])]
counts = Counter(terms)
dups = [s for s in counts.keys() if counts[s] > 1]
if dups:
print(f'duplicate definitions for {lang}: {dups}')
def checkCrossRef(gloss):
'''Check that all explicit cross-references resolve.'''
known = {entry['slug'] for entry in gloss}
missing = {}
for entry in gloss:
if 'ref' in entry:
if not entry['ref']:
print(f'{entry['slug']} has empty "ref" key')
else:
unknown = [slug for slug in entry['ref'] if slug not in known]
if unknown:
print(f'{entry['slug']} has unknown crossref(s) {', '.join(unknown)}')
def checkMissingDefs(lang, gloss):
'''Check for missing definitions in the given language.'''
missing = []
for entry in gloss:
if lang not in entry:
print(f'{lang}: {entry['slug']}')
def buildForward(gloss):
'''Build graph of forward references.'''
result = {}
for entry in gloss:
record = set()
for language in ENTRY_LANGUAGE_KEYS:
if language in entry:
record |= set(LINK_PAT.findall(entry[language]['def']))
result[entry['slug']] = record
return result
def buildBackward(forward):
'''Build graph of backward references, checking for missing terms.'''
result = {}
for source in forward:
result[source] = set()
failed = set()
for source in forward:
for dest in forward[source]:
if dest in result:
result[dest].add(source)
else:
failed.add(dest)
if failed:
failed = '\n '.join(sorted(failed))
print('Missing terms:\n ', failed, file=sys.stderr)
sys.exit(1)
return result
if __name__ == '__main__':
main()
| #!/usr/bin/env python
'''
Check glossary YAML file.
Usage: check-glossary.py [-A] [-c LL] yaml-config-file glossary-file
Flags:
- `-A`: report all missing definitions for all languages.
- `-c LL`: report missing definitions for language with code `LL` (e.g., 'fr').
Checks always performed:
- Only languages listed in `_config.yml` appear in glossary.
- Entries have all required keys (`ENTRY_REQUIRED_KEYS`).
- Only known keys are present at the top level of each entry (`ENTRY_KEYS`).
- Entries are ordered by unique slugs.
- Every definition has the required keys (`DEFINITION_REQUIRED_KEYS`).
- Definitions only have allowed keys (`DEFINITION_KEYS`).
- No duplicate definitions.
Checks performed
'''
import sys
import getopt
import re
import yaml
from collections import Counter
# Keys for entries and definitions.
ENTRY_REQUIRED_KEYS = {'slug'}
ENTRY_OPTIONAL_KEYS = {'ref'}
ENTRY_LANGUAGE_KEYS = {'af', 'am', 'ar', 'bn', 'de', 'en', 'es', 'fr', 'he', 'it', 'ja', 'ko', 'nl', 'pt', 'sw', 'tn', 'xh', 'zu'}
ENTRY_KEYS = ENTRY_REQUIRED_KEYS | \
ENTRY_OPTIONAL_KEYS | \
ENTRY_LANGUAGE_KEYS
DEFINITION_REQUIRED_KEYS = {'term', 'def'}
DEFINITION_OPTIONAL_KEYS = {'acronym'}
DEFINITION_KEYS = DEFINITION_REQUIRED_KEYS | \
DEFINITION_OPTIONAL_KEYS
# Match internal Markdown links.
LINK_PAT = re.compile(r'\[.+?\]\(#(.+?)\)')
def main():
'''Main driver.'''
checkLang, configFile, glossaryFile = parseArgs()
with open(configFile, 'r') as reader:
config = yaml.load(reader, Loader=yaml.SafeLoader)
with open(glossaryFile, 'r') as reader:
gloss = yaml.load(reader, Loader=yaml.SafeLoader)
checkLanguages(config)
for entry in gloss:
checkEntry(entry)
checkSlugs(gloss)
checkDuplicates(gloss)
checkCrossRef(gloss)
if checkLang == 'ALL':
for lang in sorted(ENTRY_LANGUAGE_KEYS):
checkMissingDefs(lang, gloss)
elif checkLang:
checkMissingDefs(checkLang, gloss)
forward = buildForward(gloss)
backward = buildBackward(forward)
def parseArgs():
'''
Parse command-line arguments, returning language to check,
configuration file path, and glossary file path. The language
to check may be 'ALL' (to check all), None (to check none), or
a known 2-letter language code.
'''
options, filenames = getopt.getopt(sys.argv[1:], 'Ac:')
if (len(filenames) != 2):
print(f'Usage: check [-A] [-c LL] configFile glossFile')
sys.exit(1)
configFile, glossFile = filenames
checkLang = None
for (opt, args) in options:
if opt == '-A':
checkLang = 'ALL'
elif opt == '-c':
checkLang = arg
if checkLang not in ENTRY_LANGUAGE_KEYS:
print(f'Unknown language {checkLang}', file=sys.stderr)
sys.exit(1)
else:
print(f'Unknown flag {opt}', file=sys.stderr)
sys.exit(1)
return checkLang, configFile, glossFile
def checkLanguages(config):
'''Compare configuration with this script's settings.'''
actual = set([c['key'] for c in config['languages']])
if actual - ENTRY_LANGUAGE_KEYS:
print(f'unexpected languages in configuration: {actual - ENTRY_LANGUAGE_KEYS}')
if ENTRY_LANGUAGE_KEYS - actual:
print(f'missing languages in configuration: {ENTRY_LANGUAGE_KEYS - actual}')
def checkEntry(entry):
'''
Check structure of individual entries, returning a language-to-set
dictionary of terms references in the body.
'''
keys = set(entry.keys())
missing = [k for k in ENTRY_REQUIRED_KEYS if k not in keys]
if missing:
print(f'Missing required keys for entry {entry}: {missing}')
slug = entry['slug']
unknown_keys = keys - ENTRY_KEYS
if unknown_keys:
print(f'Unknown keys in {slug}: {unknown_keys}')
if (len(ENTRY_LANGUAGE_KEYS - keys) == len(ENTRY_LANGUAGE_KEYS)):
print(f'No language entries for entry {entry}')
result = {}
crossrefs = set(entry['ref']) if ('ref' in entry) else set()
for lang in ENTRY_LANGUAGE_KEYS:
if lang in entry:
label = f'{slug}/{lang}'
result[lang] = checkLanguageDef(label, crossrefs, entry[lang])
return result
def checkLanguageDef(label, crossrefs, definition):
'''
Check language-specific material in definition, returning slugs
of terms referenced in the body of the definition.
'''
keys = set(definition.keys())
missing = [k for k in DEFINITION_REQUIRED_KEYS if k not in keys]
if missing:
print(f'Missing required keys for definition {label}: {missing}')
unknown_keys = keys - DEFINITION_KEYS
if unknown_keys:
print(f'Unknown keys in {label}: {unknown_keys}')
inBody = set(LINK_PAT.findall(definition['def']))
duplicate = crossrefs & inBody
if duplicate:
duplicate = ', '.join(sorted(duplicate))
print(f'Terms in both body and cross-references for {label}: {duplicate}')
return inBody
def checkSlugs(gloss):
'''Check that entries have unique slugs and are ordered by slug.'''
slugs = [entry['slug'] for entry in gloss if 'slug' in entry]
for (i, slug) in enumerate(slugs):
if (i > 0) and (slug < slugs[i-1]):
print(f'slug {slug} out of order')
counts = Counter(slugs)
dups = [s for s in counts.keys() if counts[s] > 1]
if dups:
print(f'duplicate keys: {dups}')
def checkDuplicates(gloss):
'''Check for duplicate definitions in each language.'''
for lang in ENTRY_LANGUAGE_KEYS:
terms = [entry[lang]['term'] for entry in gloss
if ((lang in entry) and 'term' in entry[lang])]
counts = Counter(terms)
dups = [s for s in counts.keys() if counts[s] > 1]
if dups:
print(f'duplicate definitions for {lang}: {dups}')
def checkCrossRef(gloss):
'''Check that all explicit cross-references resolve.'''
known = {entry['slug'] for entry in gloss}
missing = {}
for entry in gloss:
if 'ref' in entry:
if not entry['ref']:
print(f'{entry["slug"]} has empty "ref" key')
else:
unknown = [slug for slug in entry['ref'] if slug not in known]
if unknown:
print(f'{entry["slug"]} has unknown crossref(s) {", ".join(unknown)}')
def checkMissingDefs(lang, gloss):
'''Check for missing definitions in the given language.'''
missing = []
for entry in gloss:
if lang not in entry:
print(f'{lang}: {entry["slug"]}')
def buildForward(gloss):
'''Build graph of forward references.'''
result = {}
for entry in gloss:
record = set()
for language in ENTRY_LANGUAGE_KEYS:
if language in entry:
record |= set(LINK_PAT.findall(entry[language]['def']))
result[entry['slug']] = record
return result
def buildBackward(forward):
'''Build graph of backward references, checking for missing terms.'''
result = {}
for source in forward:
result[source] = set()
failed = set()
for source in forward:
for dest in forward[source]:
if dest in result:
result[dest].add(source)
else:
failed.add(dest)
if failed:
failed = '\n '.join(sorted(failed))
print('Missing terms:\n ', failed, file=sys.stderr)
sys.exit(1)
return result
if __name__ == '__main__':
main()
|
# -*- coding: utf-8 -*-
# Copyright (c) 2013 Spotify AB
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may not
# use this file except in compliance with the License. You may obtain a copy of
# the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations under
# the License.
import snakebite.protobuf.ClientNamenodeProtocol_pb2 as client_proto
import snakebite.glob as glob
from snakebite.errors import RequestError
from snakebite.service import RpcService
from snakebite.errors import FileNotFoundException
from snakebite.errors import DirectoryException
from snakebite.errors import FileException
from snakebite.errors import InvalidInputException
from snakebite.errors import OutOfNNException
from snakebite.channel import DataXceiverChannel
from snakebite.config import HDFSConfig
from snakebite.namenode import Namenode
import Queue
import zlib
import bz2
import logging
import os
import os.path
import pwd
import fnmatch
import inspect
import socket
import errno
import time
log = logging.getLogger(__name__)
class Client(object):
''' A pure python HDFS client.
**Example:**
>>> from snakebite.client import Client
>>> client = Client("localhost", 54310, use_trash=False)
>>> for x in client.ls(['/']):
... print x
.. warning::
Many methods return generators, which mean they need to be consumed to execute! Documentation will explicitly
specify which methods return generators.
.. note::
``paths`` parameters in methods are often passed as lists, since operations can work on multiple
paths.
.. note::
Parameters like ``include_children`` and ``recurse`` are not used
when paths contain globs.
.. note::
Different Hadoop distributions use different protocol versions. Snakebite defaults to 9, but this can be set by passing
in the ``hadoop_version`` parameter to the constructor.
'''
FILETYPES = {
1: "d",
2: "f",
3: "s"
}
def __init__(self, host, port=Namenode.DEFAULT_PORT, hadoop_version=Namenode.DEFAULT_VERSION, use_trash=False, effective_user=None):
'''
:param host: Hostname or IP address of the NameNode
:type host: string
:param port: RPC Port of the NameNode
:type port: int
:param hadoop_version: What hadoop protocol version should be used (default: 9)
:type hadoop_version: int
:param use_trash: Use a trash when removing files.
:type use_trash: boolean
:param effective_user: Effective user for the HDFS operations (default: None - current user)
:type effective_user: string
'''
if hadoop_version < 9:
raise Exception("Only protocol versions >= 9 supported")
self.host = host
self.port = port
self.service_stub_class = client_proto.ClientNamenodeProtocol_Stub
self.service = RpcService(self.service_stub_class, self.port, self.host, hadoop_version, effective_user)
self.use_trash = use_trash
self.trash = self._join_user_path(".Trash")
log.debug("Created client for %s:%s with trash=%s" % (host, port, use_trash))
def ls(self, paths, recurse=False, include_toplevel=False, include_children=True):
''' Issues 'ls' command and returns a list of maps that contain fileinfo
:param paths: Paths to list
:type paths: list
:param recurse: Recursive listing
:type recurse: boolean
:param include_toplevel: Include the given path in the listing. If the path is a file, include_toplevel is always True.
:type include_toplevel: boolean
:param include_children: Include child nodes in the listing.
:type include_children: boolean
:returns: a generator that yields dictionaries
**Examples:**
Directory listing
>>> list(client.ls(["/"]))
[{'group': u'supergroup', 'permission': 420, 'file_type': 'f', 'access_time': 1367317324982L, 'block_replication': 1, 'modification_time': 1367317325346L, 'length': 6783L, 'blocksize': 134217728L, 'owner': u'wouter', 'path': '/Makefile'}, {'group': u'supergroup', 'permission': 493, 'file_type': 'd', 'access_time': 0L, 'block_replication': 0, 'modification_time': 1367317325431L, 'length': 0L, 'blocksize': 0L, 'owner': u'wouter', 'path': '/build'}, {'group': u'supergroup', 'permission': 420, 'file_type': 'f', 'access_time': 1367317326510L, 'block_replication': 1, 'modification_time': 1367317326522L, 'length': 100L, 'blocksize': 134217728L, 'owner': u'wouter', 'path': '/index.asciidoc'}, {'group': u'supergroup', 'permission': 493, 'file_type': 'd', 'access_time': 0L, 'block_replication': 0, 'modification_time': 1367317326628L, 'length': 0L, 'blocksize': 0L, 'owner': u'wouter', 'path': '/source'}]
File listing
>>> list(client.ls(["/Makefile"]))
[{'group': u'supergroup', 'permission': 420, 'file_type': 'f', 'access_time': 1367317324982L, 'block_replication': 1, 'modification_time': 1367317325346L, 'length': 6783L, 'blocksize': 134217728L, 'owner': u'wouter', 'path': '/Makefile'}]
Get directory information
>>> list(client.ls(["/source"], include_toplevel=True, include_children=False))
[{'group': u'supergroup', 'permission': 493, 'file_type': 'd', 'access_time': 0L, 'block_replication': 0, 'modification_time': 1367317326628L, 'length': 0L, 'blocksize': 0L, 'owner': u'wouter', 'path': '/source'}]
'''
if not isinstance(paths, list):
raise InvalidInputException("Paths should be a list")
for item in self._find_items(paths, self._handle_ls,
include_toplevel=include_toplevel,
include_children=include_children,
recurse=recurse):
if item:
yield item
LISTING_ATTRIBUTES = ['length', 'owner', 'group', 'block_replication',
'modification_time', 'access_time', 'blocksize']
def _handle_ls(self, path, node):
''' Handle every node received for an ls request'''
entry = {}
entry["file_type"] = self.FILETYPES[node.fileType]
entry["permission"] = node.permission.perm
entry["path"] = path
for attribute in self.LISTING_ATTRIBUTES:
entry[attribute] = node.__getattribute__(attribute)
return entry
def chmod(self, paths, mode, recurse=False):
''' Change the mode for paths. This returns a list of maps containing the resut of the operation.
:param paths: List of paths to chmod
:type paths: list
:param mode: Octal mode (e.g. 0755)
:type mode: int
:param recurse: Recursive chmod
:type recurse: boolean
:returns: a generator that yields dictionaries
.. note:: The top level directory is always included when `recurse=True`'''
if not isinstance(paths, list):
raise InvalidInputException("Paths should be a list")
if not paths:
raise InvalidInputException("chmod: no path given")
if not mode:
raise InvalidInputException("chmod: no mode given")
processor = lambda path, node, mode=mode: self._handle_chmod(path, node, mode)
for item in self._find_items(paths, processor, include_toplevel=True,
include_children=False, recurse=recurse):
if item:
yield item
def _handle_chmod(self, path, node, mode):
request = client_proto.SetPermissionRequestProto()
request.src = path
request.permission.perm = mode
self.service.setPermission(request)
return {"result": True, "path": path}
def chown(self, paths, owner, recurse=False):
''' Change the owner for paths. The owner can be specified as `user` or `user:group`
:param paths: List of paths to chmod
:type paths: list
:param owner: New owner
:type owner: string
:param recurse: Recursive chown
:type recurse: boolean
:returns: a generator that yields dictionaries
This always include the toplevel when recursing.'''
if not isinstance(paths, list):
raise InvalidInputException("Paths should be a list")
if not paths:
raise InvalidInputException("chown: no path given")
if not owner:
raise InvalidInputException("chown: no owner given")
processor = lambda path, node, owner=owner: self._handle_chown(path, node, owner)
for item in self._find_items(paths, processor, include_toplevel=True,
include_children=False, recurse=recurse):
if item:
yield item
def _handle_chown(self, path, node, owner):
if ":" in owner:
(owner, group) = owner.split(":")
else:
group = ""
request = client_proto.SetOwnerRequestProto()
request.src = path
if owner:
request.username = owner
if group:
request.groupname = group
self.service.setOwner(request)
return {"result": True, "path": path}
def chgrp(self, paths, group, recurse=False):
''' Change the group of paths.
:param paths: List of paths to chgrp
:type paths: list
:param group: New group
:type mode: string
:param recurse: Recursive chgrp
:type recurse: boolean
:returns: a generator that yields dictionaries
'''
if not isinstance(paths, list):
raise InvalidInputException("Paths should be a list")
if not paths:
raise InvalidInputException("chgrp: no paths given")
if not group:
raise InvalidInputException("chgrp: no group given")
owner = ":%s" % group
processor = lambda path, node, owner=owner: self._handle_chown(path, node, owner)
for item in self._find_items(paths, processor, include_toplevel=True,
include_children=False, recurse=recurse):
if item:
yield item
def count(self, paths):
''' Count files in a path
:param paths: List of paths to count
:type paths: list
:returns: a generator that yields dictionaries
**Examples:**
>>> list(client.count(['/']))
[{'spaceConsumed': 260185L, 'quota': 2147483647L, 'spaceQuota': 18446744073709551615L, 'length': 260185L, 'directoryCount': 9L, 'path': '/', 'fileCount': 34L}]
'''
if not isinstance(paths, list):
raise InvalidInputException("Paths should be a list")
if not paths:
raise InvalidInputException("count: no path given")
for item in self._find_items(paths, self._handle_count, include_toplevel=True,
include_children=False, recurse=False):
if item:
yield item
COUNT_ATTRIBUTES = ['length', 'fileCount', 'directoryCount', 'quota', 'spaceConsumed', 'spaceQuota']
def _handle_count(self, path, node):
request = client_proto.GetContentSummaryRequestProto()
request.path = path
response = self.service.getContentSummary(request)
entry = {"path": path}
for attribute in self.COUNT_ATTRIBUTES:
entry[attribute] = response.summary.__getattribute__(attribute)
return entry
def df(self):
''' Get FS information
:returns: a dictionary
**Examples:**
>>> client.df()
{'used': 491520L, 'capacity': 120137519104L, 'under_replicated': 0L, 'missing_blocks': 0L, 'filesystem': 'hdfs://localhost:54310', 'remaining': 19669295104L, 'corrupt_blocks': 0L}
'''
processor = lambda path, node: self._handle_df(path, node)
return list(self._find_items(['/'], processor, include_toplevel=True, include_children=False, recurse=False))[0]
def _handle_df(self, path, node):
request = client_proto.GetFsStatusRequestProto()
response = self.service.getFsStats(request)
entry = {"filesystem": "hdfs://%s:%d" % (self.host, self.port)}
for i in ['capacity', 'used', 'remaining', 'under_replicated',
'corrupt_blocks', 'missing_blocks']:
entry[i] = response.__getattribute__(i)
return entry
def du(self, paths, include_toplevel=False, include_children=True):
'''Returns size information for paths
:param paths: Paths to du
:type paths: list
:param include_toplevel: Include the given path in the result. If the path is a file, include_toplevel is always True.
:type include_toplevel: boolean
:param include_children: Include child nodes in the result.
:type include_children: boolean
:returns: a generator that yields dictionaries
**Examples:**
Children:
>>> list(client.du(['/']))
[{'path': '/Makefile', 'length': 6783L}, {'path': '/build', 'length': 244778L}, {'path': '/index.asciidoc', 'length': 100L}, {'path': '/source', 'length': 8524L}]
Directory only:
>>> list(client.du(['/'], include_toplevel=True, include_children=False))
[{'path': '/', 'length': 260185L}]
'''
if not isinstance(paths, list):
raise InvalidInputException("Paths should be a list")
if not paths:
raise InvalidInputException("du: no path given")
processor = lambda path, node: self._handle_du(path, node)
for item in self._find_items(paths, processor, include_toplevel=include_toplevel,
include_children=include_children, recurse=False):
if item:
yield item
def _handle_du(self, path, node):
if self._is_dir(node):
request = client_proto.GetContentSummaryRequestProto()
request.path = path
try:
response = self.service.getContentSummary(request)
return {"path": path, "length": response.summary.length}
except RequestError, e:
print e
else:
return {"path": path, "length": node.length}
def rename(self, paths, dst):
''' Rename (move) path(s) to a destination
:param paths: Source paths
:type paths: list
:param dst: destination
:type dst: string
:returns: a generator that yields dictionaries
'''
if not isinstance(paths, list):
raise InvalidInputException("Paths should be a list")
if not paths:
raise InvalidInputException("rename: no path given")
if not dst:
raise InvalidInputException("rename: no destination given")
processor = lambda path, node, dst=dst: self._handle_rename(path, node, dst)
for item in self._find_items(paths, processor, include_toplevel=True):
if item:
yield item
def _handle_rename(self, path, node, dst):
if not dst.startswith("/"):
dst = self._join_user_path(dst)
request = client_proto.RenameRequestProto()
request.src = path
request.dst = dst
response = self.service.rename(request)
return {"path": path, "result": response.result}
def delete(self, paths, recurse=False):
''' Delete paths
:param paths: Paths to delete
:type paths: list
:param recurse: Recursive delete (use with care!)
:type recurse: boolean
:returns: a generator that yields dictionaries
.. note:: Recursive deletion uses the NameNode recursive deletion functionality
instead of letting the client recurse. Hadoops client recurses
by itself and thus showing all files and directories that are
deleted. Snakebite doesn't.
'''
if not isinstance(paths, list):
raise InvalidInputException("Paths should be a list")
if not paths:
raise InvalidInputException("delete: no path given")
processor = lambda path, node, recurse=recurse: self._handle_delete(path, node, recurse)
for item in self._find_items(paths, processor, include_toplevel=True):
if item:
yield item
def _handle_delete(self, path, node, recurse):
if (self._is_dir(node) and not recurse):
raise DirectoryException("rm: `%s': Is a directory" % path)
# None might be passed in for recurse
if not recurse:
recurse = False
if self.__should_move_to_trash(path):
if path.endswith("/"):
suffix_path = path[1:-1]
else:
suffix_path = path[1:]
trash_path = os.path.join(self.trash, "Current", suffix_path)
if trash_path.endswith("/"):
trash_path = trash_path[:-1]
base_trash_path = os.path.join(self.trash, "Current", os.path.dirname(suffix_path))
if base_trash_path.endswith("/"):
base_trash_path = base_trash_path[:-1]
# Try twice, in case checkpoint between mkdir() and rename()
for i in range(0, 2):
list(self.mkdir([base_trash_path], create_parent=True, mode=0700))
original_path = trash_path
while self.test(trash_path, exists=True):
unix_timestamp = str(int(time.time() * 1000))
trash_path = "%s%s" % (original_path, unix_timestamp)
result = self._handle_rename(path, node, trash_path)
if result['result']:
result['message'] = ". Moved %s to %s" % (path, trash_path)
return result
raise Exception("Failed to move to trash: %s" % path)
else:
request = client_proto.DeleteRequestProto()
request.src = path
request.recursive = recurse
response = self.service.delete(request)
return {"path": path, "result": response.result}
def __should_move_to_trash(self, path):
if not self.use_trash:
return False
if path.startswith(self.trash):
return False # Path already in trash
if os.path.dirname(self.trash).startswith(path):
raise Exception("Cannot move %s to the trash, as it contains the trash" % path)
return True
def rmdir(self, paths):
''' Delete a directory
:param paths: Paths to delete
:type paths: list
:returns: a generator that yields dictionaries
.. note: directories have to be empty.
'''
if not isinstance(paths, list):
raise InvalidInputException("Paths should be a list")
if not paths:
raise InvalidInputException("rmdir: no path given")
processor = lambda path, node: self._handle_rmdir(path, node)
for item in self._find_items(paths, processor, include_toplevel=True):
if item:
yield item
def _handle_rmdir(self, path, node):
if not self._is_dir(node):
raise DirectoryException("rmdir: `%s': Is not a directory" % path)
# Check if the directory is empty
files = self.ls([path])
if len(list(files)) > 0:
raise DirectoryException("rmdir: `%s': Directory is not empty" % path)
return self._handle_delete(path, node, recurse=True)
def touchz(self, paths, replication=None, blocksize=None):
''' Create a zero length file or updates the timestamp on a zero length file
:param paths: Paths
:type paths: list
:param replication: Replication factor
:type recurse: int
:param blocksize: Block size (in bytes) of the newly created file
:type blocksize: int
:returns: a generator that yields dictionaries
'''
if not isinstance(paths, list):
raise InvalidInputException("Paths should be a list")
if not paths:
raise InvalidInputException("touchz: no path given")
# Let's get the blocksize and replication from the server defaults
# provided by the namenode if they are not specified
if not replication or not blocksize:
defaults = self.serverdefaults()
if not replication:
replication = defaults['replication']
if not blocksize:
blocksize = defaults['blockSize']
processor = lambda path, node, replication=replication, blocksize=blocksize: self._handle_touchz(path, node, replication, blocksize)
for item in self._find_items(paths, processor, include_toplevel=True, check_nonexistence=True, include_children=False):
if item:
yield item
def _handle_touchz(self, path, node, replication, blocksize):
# Item already exists
if node:
if node.length != 0:
raise FileException("touchz: `%s': Not a zero-length file" % path)
if self._is_dir(node):
raise DirectoryException("touchz: `%s': Is a directory" % path)
response = self._create_file(path, replication, blocksize, overwrite=True)
else:
# Check if the parent directory exists
parent = self._get_file_info(os.path.dirname(path))
if not parent:
raise DirectoryException("touchz: `%s': No such file or directory" % path)
else:
response = self._create_file(path, replication, blocksize, overwrite=False)
return {"path": path, "result": response.result}
def setrep(self, paths, replication, recurse=False):
''' Set the replication factor for paths
:param paths: Paths
:type paths: list
:param replication: Replication factor
:type recurse: int
:param recurse: Apply replication factor recursive
:type recurse: boolean
:returns: a generator that yields dictionaries
'''
if not isinstance(paths, list):
raise InvalidInputException("Paths should be a list")
if not paths:
raise InvalidInputException("setrep: no path given")
if not replication:
raise InvalidInputException("setrep: no replication given")
processor = lambda path, node, replication=replication: self._handle_setrep(path, node, replication)
for item in self._find_items(paths, processor, include_toplevel=True,
include_children=False, recurse=recurse):
if item:
yield item
def _handle_setrep(self, path, node, replication):
if not self._is_dir(node):
request = client_proto.SetReplicationRequestProto()
request.src = path
request.replication = replication
response = self.service.setReplication(request)
return {"result": response.result, "path": path}
def cat(self, paths, check_crc=False):
''' Fetch all files that match the source file pattern
and display their content on stdout.
:param paths: Paths to display
:type paths: list of strings
:param check_crc: Check for checksum errors
:type check_crc: boolean
:returns: a generator that yields strings
'''
if not isinstance(paths, list):
raise InvalidInputException("Paths should be a list")
if not paths:
raise InvalidInputException("cat: no path given")
processor = lambda path, node, check_crc=check_crc: self._handle_cat(path, node, check_crc)
for item in self._find_items(paths, processor, include_toplevel=True,
include_children=False, recurse=False):
if item:
yield item
def _handle_cat(self, path, node, check_crc):
if self._is_dir(node):
raise DirectoryException("cat: `%s': Is a directory" % path)
for load in self._read_file(path, node, False, check_crc):
if load:
yield load
def copyToLocal(self, paths, dst, check_crc=False):
''' Copy files that match the file source pattern
to the local name. Source is kept. When copying multiple,
files, the destination must be a directory.
:param paths: Paths to copy
:type paths: list of strings
:param dst: Destination path
:type dst: string
:param check_crc: Check for checksum errors
:type check_crc: boolean
:returns: a generator that yields strings
'''
if not isinstance(paths, list):
raise InvalidInputException("Paths should be a list")
if not paths:
raise InvalidInputException("copyToLocal: no path given")
if not dst:
raise InvalidInputException("copyToLocal: no destination given")
self.base_source = None
processor = lambda path, node, dst=dst, check_crc=check_crc: self._handle_copyToLocal(path, node, dst, check_crc)
for item in self._find_items(paths, processor, include_toplevel=True, recurse=True, include_children=True):
if item:
yield item
def _handle_copyToLocal(self, path, node, dst, check_crc):
# Calculate base directory using the first node only
if self.base_source is None:
self.dst = os.path.abspath(dst)
if os.path.isdir(dst): # If input destination is an existing directory, include toplevel
self.base_source = os.path.dirname(path)
else:
self.base_source = path
if self.base_source.endswith("/"):
self.base_source = self.base_source[:-1]
target = dst + (path.replace(self.base_source, "", 1))
error = ""
result = False
# Target is an existing file
if os.path.isfile(target):
error += "file exists"
# Target is an existing directory
elif os.path.isdir(target):
error += "directory exists"
# Source is a directory
elif self._is_dir(node):
os.makedirs(target, mode=node.permission.perm)
result = True
# Source is a file
elif self._is_file(node):
temporary_target = "%s._COPYING_" % target
f = open(temporary_target, 'w')
try:
for load in self._read_file(path, node, tail_only=False, check_crc=check_crc):
f.write(load)
f.close()
os.rename(temporary_target, target)
result = True
except Exception, e:
result = False
error = e
if os.path.isfile(temporary_target):
os.remove(temporary_target)
return {"path": target, "result": result, "error": error, "source_path": path}
def getmerge(self, path, dst, newline=False, check_crc=False):
''' Get all the files in the directories that
match the source file pattern and merge and sort them to only
one file on local fs.
:param paths: Directory containing files that will be merged
:type paths: string
:param dst: Path of file that will be written
:type dst: string
:param nl: Add a newline character at the end of each file.
:type nl: boolean
:returns: string content of the merged file at dst
'''
if not path:
raise InvalidInputException("getmerge: no path given")
if not dst:
raise InvalidInputException("getmerge: no destination given")
temporary_target = "%s._COPYING_" % dst
f = open(temporary_target, 'w')
processor = lambda path, node, dst=dst, check_crc=check_crc: self._handle_getmerge(path, node, dst, check_crc)
try:
for item in self._find_items([path], processor, include_toplevel=True, recurse=False, include_children=True):
for load in item:
if load['result']:
f.write(load['response'])
elif not load['error'] is '':
if os.path.isfile(temporary_target):
os.remove(temporary_target)
raise Exception(load['error'])
if newline and load['response']:
f.write("\n")
yield {"path": dst, "response": '', "result": True, "error": load['error'], "source_path": path}
finally:
if os.path.isfile(temporary_target):
f.close()
os.rename(temporary_target, dst)
def _handle_getmerge(self, path, node, dst, check_crc):
log.debug("in handle getmerge")
error = ''
if not self._is_file(node):
# Target is an existing file
if os.path.isfile(dst):
error += "target file exists"
# Target is an existing directory
elif os.path.isdir(dst):
error += "target directory exists"
yield {"path": path, "response": '', "result": False, "error": error, "source_path": path}
# Source is a file
else:
if node.length == 0: # Empty file
yield {"path": path, "response": '', "result": True, "error": error, "source_path": path}
else:
try:
for load in self._read_file(path, node, tail_only=False, check_crc=check_crc):
yield {"path": path, "response": load, "result": True, "error": error, "source_path": path}
except Exception, e:
error = e
yield {"path": path, "response": '', "result": False, "error": error, "source_path": path}
def stat(self, paths):
''' Stat a fileCount
:param paths: Path
:type paths: string
:returns: a dictionary
**Example:**
>>> client.stat(['/index.asciidoc'])
{'blocksize': 134217728L, 'owner': u'wouter', 'length': 100L, 'access_time': 1367317326510L, 'group': u'supergroup', 'permission': 420, 'file_type': 'f', 'path': '/index.asciidoc', 'modification_time': 1367317326522L, 'block_replication': 1}
'''
if not isinstance(paths, list):
raise InvalidInputException("Paths should be a list")
if not paths:
raise InvalidInputException("stat: no path given")
processor = lambda path, node: self._handle_stat(path, node)
return list(self._find_items(paths, processor, include_toplevel=True))[0]
def _handle_stat(self, path, node):
return {"path": path,
"file_type": self.FILETYPES[node.fileType],
"length": node.length,
"permission": node.permission.perm,
"owner": node.owner,
"group": node.group,
"modification_time": node.modification_time,
"access_time": node.access_time,
"block_replication": node.block_replication,
"blocksize": node.blocksize}
def tail(self, path, append=False):
# Note: append is currently not implemented.
''' Show the last 1KB of the file.
:param path: Path to read
:type path: string
:param f: Shows appended data as the file grows.
:type f: boolean
:returns: a generator that yields strings
'''
if not path:
raise InvalidInputException("tail: no path given")
processor = lambda path, node, tail_only=True, append=append: self._handle_tail(path, node, tail_only, append)
for item in self._find_items([path], processor, include_toplevel=True,
include_children=False, recurse=False):
if item:
yield item
def _handle_tail(self, path, node, tail_only, append):
data = ''
for load in self._read_file(path, node, tail_only=True, check_crc=False):
data += load
# We read only the necessary packets but still
# need to cut off at the packet level.
return data[max(0, len(data)-1024):len(data)]
def test(self, path, exists=False, directory=False, zero_length=False):
'''Test if a path exist, is a directory or has zero length
:param path: Path to test
:type path: string
:param exists: Check if the path exists
:type exists: boolean
:param directory: Check if the path is a directory
:type exists: boolean
:param zero_length: Check if the path is zero-length
:type zero_length: boolean
:returns: a boolean
.. note:: directory and zero length are AND'd.
'''
if not isinstance(path, str):
raise InvalidInputException("Path should be a string")
if not path:
raise InvalidInputException("test: no path given")
processor = lambda path, node, exists=exists, directory=directory, zero_length=zero_length: self._handle_test(path, node, exists, directory, zero_length)
try:
items = list(self._find_items([path], processor, include_toplevel=True))
if len(items) == 0:
return False
return all(items)
except FileNotFoundException, e:
if exists:
return False
else:
raise e
def _handle_test(self, path, node, exists, directory, zero_length):
return self._is_directory(directory, node) and self._is_zero_length(zero_length, node)
def text(self, paths, check_crc=False):
''' Takes a source file and outputs the file in text format.
The allowed formats are gzip and bzip2
:param paths: Paths to display
:type paths: list of strings
:param check_crc: Check for checksum errors
:type check_crc: boolean
:returns: a generator that yields strings
'''
if not isinstance(paths, list):
raise InvalidInputException("Paths should be a list")
if not paths:
raise InvalidInputException("text: no path given")
processor = lambda path, node, check_crc=check_crc: self._handle_text(path, node, check_crc)
for item in self._find_items(paths, processor, include_toplevel=True,
include_children=False, recurse=False):
if item:
yield item
def _handle_text(self, path, node, check_crc):
if self._is_dir(node):
raise DirectoryException("text: `%s': Is a directory" % path)
text = ''
for load in self._read_file(path, node, False, check_crc):
text += load
extension = os.path.splitext(path)[1]
if extension == '.gz':
return zlib.decompress(text, 16+zlib.MAX_WBITS)
elif extension == '.bz2':
return bz2.decompress(text)
else:
return text
def mkdir(self, paths, create_parent=False, mode=0755):
''' Create a directoryCount
:param paths: Paths to create
:type paths: list of strings
:param create_parent: Also create the parent directories
:type create_parent: boolean
:param mode: Mode the directory should be created with
:type mode: int
:returns: a generator that yields dictionaries
'''
if not isinstance(paths, list):
raise InvalidInputException("Paths should be a list")
if not paths:
raise InvalidInputException("mkdirs: no path given")
for path in paths:
if not path.startswith("/"):
path = self._join_user_path(path)
fileinfo = self._get_file_info(path)
if not fileinfo:
try:
request = client_proto.MkdirsRequestProto()
request.src = path
request.masked.perm = mode
request.createParent = create_parent
response = self.service.mkdirs(request)
yield {"path": path, "result": response.result}
except RequestError, e:
yield {"path": path, "result": False, "error": str(e)}
else:
yield {"path": path, "result": False, "error": "mkdir: `%s': File exists" % path}
def serverdefaults(self):
'''Get server defaults
:returns: dictionary
**Example:**
>>> client.serverdefaults()
[{'writePacketSize': 65536, 'fileBufferSize': 4096, 'replication': 1, 'bytesPerChecksum': 512, 'trashInterval': 0L, 'blockSize': 134217728L, 'encryptDataTransfer': False, 'checksumType': 2}]
'''
request = client_proto.GetServerDefaultsRequestProto()
response = self.service.getServerDefaults(request).serverDefaults
return {'blockSize': response.blockSize, 'bytesPerChecksum': response.bytesPerChecksum,
'writePacketSize': response.writePacketSize, 'replication': response.replication,
'fileBufferSize': response.fileBufferSize, 'encryptDataTransfer': response.encryptDataTransfer,
'trashInterval': response.trashInterval, 'checksumType': response.checksumType}
def _is_directory(self, should_check, node):
if not should_check:
return True
return self._is_dir(node)
def _is_zero_length(self, should_check, node):
if not should_check:
return True
return node.length == 0
def _get_full_path(self, path, node):
if node.path:
return os.path.join(path, node.path)
else:
return path
def _create_file(self, path, replication, blocksize, overwrite):
if overwrite:
createFlag = 0x02
else:
createFlag = 0x01
# Issue a CreateRequestProto
request = client_proto.CreateRequestProto()
request.src = path
request.masked.perm = 0644
request.clientName = "snakebite"
request.createFlag = createFlag
request.createParent = False
request.replication = replication
request.blockSize = blocksize
# The response doesn't contain anything
self.service.create(request)
# Issue a CompleteRequestProto
request = client_proto.CompleteRequestProto()
request.src = path
request.clientName = "snakebite"
return self.service.complete(request)
def _read_file(self, path, node, tail_only, check_crc):
length = node.length
request = client_proto.GetBlockLocationsRequestProto()
request.src = path
request.length = length
if tail_only: # Only read last KB
request.offset = max(0, length - 1024)
else:
request.offset = 0L
response = self.service.getBlockLocations(request)
if response.locations.fileLength == 0: # Can't read empty file
yield ""
lastblock = response.locations.lastBlock
if tail_only:
if lastblock.b.blockId == response.locations.blocks[0].b.blockId:
num_blocks_tail = 1 # Tail is on last block
else:
num_blocks_tail = 2 # Tail is on two blocks
failed_nodes = []
total_bytes_read = 0
for block in response.locations.blocks:
length = block.b.numBytes
pool_id = block.b.poolId
offset_in_block = 0
if tail_only:
if num_blocks_tail == 2 and block.b.blockId != lastblock.b.blockId:
offset_in_block = block.b.numBytes - (1024 - lastblock.b.numBytes)
elif num_blocks_tail == 1:
offset_in_block = max(0, lastblock.b.numBytes - 1024)
# Prioritize locations to read from
locations_queue = Queue.PriorityQueue() # Primitive queuing based on a node's past failure
for location in block.locs:
if location.id.storageID in failed_nodes:
locations_queue.put((1, location)) # Priority num, data
else:
locations_queue.put((0, location))
# Read data
successful_read = False
while not locations_queue.empty():
location = locations_queue.get()[1]
host = location.id.ipAddr
port = int(location.id.xferPort)
data_xciever = DataXceiverChannel(host, port)
if data_xciever.connect():
try:
for load in data_xciever.readBlock(length, pool_id, block.b.blockId, block.b.generationStamp, offset_in_block, check_crc):
offset_in_block += len(load)
total_bytes_read += len(load)
successful_read = True
yield load
except Exception, e:
log.error(e)
if not location.id.storageID in failed_nodes:
failed_nodes.append(location.id.storageID)
successful_read = False
else:
raise Exception
if successful_read:
break
if successful_read is False:
raise Exception("Failure to read block %s" % block.b.blockId)
def _find_items(self, paths, processor, include_toplevel=False, include_children=False, recurse=False, check_nonexistence=False):
''' Request file info from the NameNode and call the processor on the node(s) returned
:param paths:
A list of paths that need to be processed
:param processor:
Method that is called on an node. Method signature should be foo(path, node). For additional
(static) params, use a lambda.
:param include_toplevel:
Boolean to enable the inclusion of the first node found.
Example: listing a directory should not include the toplevel, but chmod should
only operate on the path that is input, so it should include the toplevel.
:param include_children:
Include children (when the path is a directory) in processing. Recurse will always
include children.
Example: listing a directory should include children, but chmod shouldn't.
:param recurse:
Recurse into children if they are directories.
'''
if not paths:
paths = [os.path.join("/user", pwd.getpwuid(os.getuid())[0])]
# Expand paths if necessary (/foo/{bar,baz} --> ['/foo/bar', '/foo/baz'])
paths = glob.expand_paths(paths)
for path in paths:
if not path.startswith("/"):
path = self._join_user_path(path)
log.debug("Trying to find path %s" % path)
if glob.has_magic(path):
log.debug("Dealing with globs in %s" % path)
for item in self._glob_find(path, processor, include_toplevel):
yield item
else:
fileinfo = self._get_file_info(path)
if not fileinfo and not check_nonexistence:
raise FileNotFoundException("`%s': No such file or directory" % path)
elif not fileinfo and check_nonexistence:
yield processor(path, None)
return
if (include_toplevel and fileinfo) or not self._is_dir(fileinfo.fs):
# Construct the full path before processing
full_path = self._get_full_path(path, fileinfo.fs)
log.debug("Added %s to to result set" % full_path)
entry = processor(full_path, fileinfo.fs)
yield entry
if self._is_dir(fileinfo.fs) and (include_children or recurse):
for node in self._get_dir_listing(path):
full_path = self._get_full_path(path, node)
entry = processor(full_path, node)
yield entry
# Recurse into directories
if recurse and self._is_dir(node):
# Construct the full path before processing
full_path = os.path.join(path, node.path)
for item in self._find_items([full_path],
processor,
include_toplevel=False,
include_children=False,
recurse=recurse):
yield item
def _get_dir_listing(self, path, start_after=''):
request = client_proto.GetListingRequestProto()
request.src = path
request.startAfter = start_after
request.needLocation = False
listing = self.service.getListing(request)
if not listing:
return
for node in listing.dirList.partialListing:
start_after = node.path
yield node
if listing.dirList.remainingEntries > 0:
for node in self._get_dir_listing(path, start_after):
yield node
def _glob_find(self, path, processor, include_toplevel):
'''Handle globs in paths.
This is done by listing the directory before a glob and checking which
node matches the initial glob. If there are more globs in the path,
we don't add the found children to the result, but traverse into paths
that did have a match.
'''
# Remove the last / from the path, since hadoop doesn't understand it
if path.endswith("/"):
path = path[:-1]
# Split path elements and check where the first occurence of magic is
path_elements = path.split("/")
for i, element in enumerate(path_elements):
if glob.has_magic(element):
first_magic = i
break
# Create path that we check first to get a listing we match all children
# against. If the 2nd path element is a glob, we need to check "/", and
# we hardcode that, since "/".join(['']) doesn't return "/"
if first_magic == 1:
check_path = "/"
else:
check_path = "/".join(path_elements[:first_magic])
# Path that we need to match against
match_path = "/".join(path_elements[:first_magic + 1])
# Rest of the unmatched path. In case the rest is only one element long
# we prepend it with "/", since "/".join(['x']) doesn't return "/x"
rest_elements = path_elements[first_magic + 1:]
if len(rest_elements) == 1:
rest = rest_elements[0]
else:
rest = "/".join(rest_elements)
# Check if the path exists and that it's a directory (which it should..)
fileinfo = self._get_file_info(check_path)
if fileinfo and self._is_dir(fileinfo.fs):
# List all child nodes and match them agains the glob
for node in self._get_dir_listing(check_path):
full_path = self._get_full_path(check_path, node)
if fnmatch.fnmatch(full_path, match_path):
# If we have a match, but need to go deeper, we recurse
if rest and glob.has_magic(rest):
traverse_path = "/".join([full_path, rest])
for item in self._glob_find(traverse_path, processor, include_toplevel):
yield item
elif rest:
# we have more rest, but it's not magic, which is either a file or a directory
final_path = os.path.join(full_path, rest)
fi = self._get_file_info(final_path)
if fi and self._is_dir(fi.fs):
for n in self._get_dir_listing(final_path):
full_child_path = self._get_full_path(final_path, n)
yield processor(full_child_path, n)
elif fi:
yield processor(final_path, fi.fs)
else:
# If the matching node is a directory, we list the directory
# This is what the hadoop client does at least.
if self._is_dir(node):
if include_toplevel:
yield processor(full_path, node)
fp = self._get_full_path(check_path, node)
dir_list = self._get_dir_listing(fp)
if dir_list: # It might happen that the directory above has been removed
for n in dir_list:
full_child_path = self._get_full_path(fp, n)
yield processor(full_child_path, n)
else:
yield processor(full_path, node)
def _is_dir(self, entry):
return self.FILETYPES.get(entry.fileType) == "d"
def _is_file(self, entry):
return self.FILETYPES.get(entry.fileType) == "f"
def _get_file_info(self, path):
request = client_proto.GetFileInfoRequestProto()
request.src = path
return self.service.getFileInfo(request)
def _join_user_path(self, path):
return os.path.join("/user", pwd.getpwuid(os.getuid())[0], path)
def _remove_user_path(self, path):
dir_to_remove = os.path.join("/user", pwd.getpwuid(os.getuid())[0])
return path.replace(dir_to_remove+'/', "", 1)
class HAClient(Client):
''' Snakebite client with support for High Availability
HAClient is fully backwards compatible with the vanilla Client and can be used for a non HA cluster as well.
**Example:**
>>> from snakebite.client import HAClient
>>> from snakebite.namenode import Namenode
>>> n1 = Namenode("namenode1.mydomain", 54310)
>>> n2 = Namenode("namenode2.mydomain", 54310)
>>> client = HAClient([n1, n2], use_trash=True)
>>> for x in client.ls(['/']):
... print x
.. note::
Different Hadoop distributions use different protocol versions. Snakebite defaults to 9, but this can be set by passing
in the ``version`` parameter to the Namenode class constructor.
'''
@classmethod
def _wrap_methods(cls):
# Add HA support to all public Client methods, but only do this when we haven't done this before
for name, meth in inspect.getmembers(cls, inspect.ismethod):
if not name.startswith("_"): # Only public methods
if inspect.isgeneratorfunction(meth):
setattr(cls, name, cls._ha_gen_method(meth))
else:
setattr(cls, name, cls._ha_return_method(meth))
def __init__(self, namenodes, use_trash=False, effective_user=None):
'''
:param namenodes: Set of namenodes for HA setup
:type namenodes: list
:param use_trash: Use a trash when removing files.
:type use_trash: boolean
:param effective_user: Effective user for the HDFS operations (default: None - current user)
:type effective_user: string
'''
self.use_trash = use_trash
self.effective_user = effective_user
if not namenodes:
raise OutOfNNException("List of namenodes is empty - couldn't create the client")
self.namenode = self._switch_namenode(namenodes)
self.namenode.next()
def _switch_namenode(self, namenodes):
for namenode in namenodes:
log.debug("Switch to namenode: %s:%d" % (namenode.host, namenode.port))
yield super(HAClient, self).__init__(namenode.host,
namenode.port,
namenode.version,
self.use_trash,
self.effective_user)
else:
msg = "Request tried and failed for all %d namenodes: " % len(namenodes)
for namenode in namenodes:
msg += "\n\t* %s:%d" % (namenode.host, namenode.port)
msg += "\nLook into debug messages - add -D flag!"
raise OutOfNNException(msg)
def __handle_request_error(self, exception):
log.debug("Request failed with %s" % exception)
if exception.args[0].startswith("org.apache.hadoop.ipc.StandbyException"):
pass
else:
# There's a valid NN in active state, but there's still request error - raise
raise
self.namenode.next()
def __handle_socket_error(self, exception):
log.debug("Request failed with %s" % exception)
if exception.errno == errno.ECONNREFUSED:
# if NN is down or machine is not available, pass it:
pass
elif isinstance(exception, socket.timeout):
# if there's communication/socket timeout, pass it:
pass
else:
raise
self.namenode.next()
@staticmethod
def _ha_return_method(func):
''' Method decorator for 'return type' methods '''
def wrapped(self, *args, **kw):
while(True): # switch between all namenodes
try:
return func(self, *args, **kw)
except RequestError as e:
self.__handle_request_error(e)
except socket.error as e:
self.__handle_socket_error(e)
return wrapped
@staticmethod
def _ha_gen_method(func):
''' Method decorator for 'generator type' methods '''
def wrapped(self, *args, **kw):
while(True): # switch between all namenodes
try:
results = func(self, *args, **kw)
while(True): # yield all results
yield results.next()
except RequestError as e:
self.__handle_request_error(e)
except socket.error as e:
self.__handle_socket_error(e)
return wrapped
HAClient._wrap_methods()
class AutoConfigClient(HAClient):
''' A pure python HDFS client that support HA and is auto configured through the ``HADOOP_PATH`` environment variable.
HAClient is fully backwards compatible with the vanilla Client and can be used for a non HA cluster as well.
This client tries to read ``${HADOOP_PATH}/conf/hdfs-site.xml`` to get the address of the namenode.
The behaviour is the same as Client.
**Example:**
>>> from snakebite.client import AutoConfigClient
>>> client = AutoConfigClient()
>>> for x in client.ls(['/']):
... print x
.. note::
Different Hadoop distributions use different protocol versions. Snakebite defaults to 9, but this can be set by passing
in the ``hadoop_version`` parameter to the constructor.
'''
def __init__(self, hadoop_version=Namenode.DEFAULT_VERSION, effective_user=None):
'''
:param hadoop_version: What hadoop protocol version should be used (default: 9)
:type hadoop_version: int
:param effective_user: Effective user for the HDFS operations (default: None - current user)
:type effective_user: string
'''
configs = HDFSConfig.get_external_config()
nns = [Namenode(c['namenode'], c['port'], hadoop_version) for c in configs]
if not nns:
raise OutOfNNException("Tried and failed to find namenodes - couldn't created the client!")
super(AutoConfigClient, self).__init__(nns, HDFSConfig.use_trash, effective_user)
| # -*- coding: utf-8 -*-
# Copyright (c) 2013 Spotify AB
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may not
# use this file except in compliance with the License. You may obtain a copy of
# the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations under
# the License.
import snakebite.protobuf.ClientNamenodeProtocol_pb2 as client_proto
import snakebite.glob as glob
from snakebite.errors import RequestError
from snakebite.service import RpcService
from snakebite.errors import FileNotFoundException
from snakebite.errors import DirectoryException
from snakebite.errors import FileException
from snakebite.errors import InvalidInputException
from snakebite.errors import OutOfNNException
from snakebite.channel import DataXceiverChannel
from snakebite.config import HDFSConfig
from snakebite.namenode import Namenode
import Queue
import zlib
import bz2
import logging
import os
import os.path
import pwd
import fnmatch
import inspect
import socket
import errno
import time
log = logging.getLogger(__name__)
class Client(object):
''' A pure python HDFS client.
**Example:**
>>> from snakebite.client import Client
>>> client = Client("localhost", 54310, use_trash=False)
>>> for x in client.ls(['/']):
... print x
.. warning::
Many methods return generators, which mean they need to be consumed to execute! Documentation will explicitly
specify which methods return generators.
.. note::
``paths`` parameters in methods are often passed as lists, since operations can work on multiple
paths.
.. note::
Parameters like ``include_children`` and ``recurse`` are not used
when paths contain globs.
.. note::
Different Hadoop distributions use different protocol versions. Snakebite defaults to 9, but this can be set by passing
in the ``hadoop_version`` parameter to the constructor.
'''
FILETYPES = {
1: "d",
2: "f",
3: "s"
}
def __init__(self, host, port=Namenode.DEFAULT_PORT, hadoop_version=Namenode.DEFAULT_VERSION, use_trash=False, effective_user=None):
'''
:param host: Hostname or IP address of the NameNode
:type host: string
:param port: RPC Port of the NameNode
:type port: int
:param hadoop_version: What hadoop protocol version should be used (default: 9)
:type hadoop_version: int
:param use_trash: Use a trash when removing files.
:type use_trash: boolean
:param effective_user: Effective user for the HDFS operations (default: None - current user)
:type effective_user: string
'''
if hadoop_version < 9:
raise Exception("Only protocol versions >= 9 supported")
self.host = host
self.port = port
self.service_stub_class = client_proto.ClientNamenodeProtocol_Stub
self.service = RpcService(self.service_stub_class, self.port, self.host, hadoop_version, effective_user)
self.use_trash = use_trash
self.trash = self._join_user_path(".Trash")
log.debug("Created client for %s:%s with trash=%s" % (host, port, use_trash))
def ls(self, paths, recurse=False, include_toplevel=False, include_children=True):
''' Issues 'ls' command and returns a list of maps that contain fileinfo
:param paths: Paths to list
:type paths: list
:param recurse: Recursive listing
:type recurse: boolean
:param include_toplevel: Include the given path in the listing. If the path is a file, include_toplevel is always True.
:type include_toplevel: boolean
:param include_children: Include child nodes in the listing.
:type include_children: boolean
:returns: a generator that yields dictionaries
**Examples:**
Directory listing
>>> list(client.ls(["/"]))
[{'group': u'supergroup', 'permission': 420, 'file_type': 'f', 'access_time': 1367317324982L, 'block_replication': 1, 'modification_time': 1367317325346L, 'length': 6783L, 'blocksize': 134217728L, 'owner': u'wouter', 'path': '/Makefile'}, {'group': u'supergroup', 'permission': 493, 'file_type': 'd', 'access_time': 0L, 'block_replication': 0, 'modification_time': 1367317325431L, 'length': 0L, 'blocksize': 0L, 'owner': u'wouter', 'path': '/build'}, {'group': u'supergroup', 'permission': 420, 'file_type': 'f', 'access_time': 1367317326510L, 'block_replication': 1, 'modification_time': 1367317326522L, 'length': 100L, 'blocksize': 134217728L, 'owner': u'wouter', 'path': '/index.asciidoc'}, {'group': u'supergroup', 'permission': 493, 'file_type': 'd', 'access_time': 0L, 'block_replication': 0, 'modification_time': 1367317326628L, 'length': 0L, 'blocksize': 0L, 'owner': u'wouter', 'path': '/source'}]
File listing
>>> list(client.ls(["/Makefile"]))
[{'group': u'supergroup', 'permission': 420, 'file_type': 'f', 'access_time': 1367317324982L, 'block_replication': 1, 'modification_time': 1367317325346L, 'length': 6783L, 'blocksize': 134217728L, 'owner': u'wouter', 'path': '/Makefile'}]
Get directory information
>>> list(client.ls(["/source"], include_toplevel=True, include_children=False))
[{'group': u'supergroup', 'permission': 493, 'file_type': 'd', 'access_time': 0L, 'block_replication': 0, 'modification_time': 1367317326628L, 'length': 0L, 'blocksize': 0L, 'owner': u'wouter', 'path': '/source'}]
'''
if not isinstance(paths, list):
raise InvalidInputException("Paths should be a list")
for item in self._find_items(paths, self._handle_ls,
include_toplevel=include_toplevel,
include_children=include_children,
recurse=recurse):
if item:
yield item
LISTING_ATTRIBUTES = ['length', 'owner', 'group', 'block_replication',
'modification_time', 'access_time', 'blocksize']
def _handle_ls(self, path, node):
''' Handle every node received for an ls request'''
entry = {}
entry["file_type"] = self.FILETYPES[node.fileType]
entry["permission"] = node.permission.perm
entry["path"] = path
for attribute in self.LISTING_ATTRIBUTES:
entry[attribute] = node.__getattribute__(attribute)
return entry
def chmod(self, paths, mode, recurse=False):
''' Change the mode for paths. This returns a list of maps containing the resut of the operation.
:param paths: List of paths to chmod
:type paths: list
:param mode: Octal mode (e.g. 0755)
:type mode: int
:param recurse: Recursive chmod
:type recurse: boolean
:returns: a generator that yields dictionaries
.. note:: The top level directory is always included when `recurse=True`'''
if not isinstance(paths, list):
raise InvalidInputException("Paths should be a list")
if not paths:
raise InvalidInputException("chmod: no path given")
if not mode:
raise InvalidInputException("chmod: no mode given")
processor = lambda path, node, mode=mode: self._handle_chmod(path, node, mode)
for item in self._find_items(paths, processor, include_toplevel=True,
include_children=False, recurse=recurse):
if item:
yield item
def _handle_chmod(self, path, node, mode):
request = client_proto.SetPermissionRequestProto()
request.src = path
request.permission.perm = mode
self.service.setPermission(request)
return {"result": True, "path": path}
def chown(self, paths, owner, recurse=False):
''' Change the owner for paths. The owner can be specified as `user` or `user:group`
:param paths: List of paths to chmod
:type paths: list
:param owner: New owner
:type owner: string
:param recurse: Recursive chown
:type recurse: boolean
:returns: a generator that yields dictionaries
This always include the toplevel when recursing.'''
if not isinstance(paths, list):
raise InvalidInputException("Paths should be a list")
if not paths:
raise InvalidInputException("chown: no path given")
if not owner:
raise InvalidInputException("chown: no owner given")
processor = lambda path, node, owner=owner: self._handle_chown(path, node, owner)
for item in self._find_items(paths, processor, include_toplevel=True,
include_children=False, recurse=recurse):
if item:
yield item
def _handle_chown(self, path, node, owner):
if ":" in owner:
(owner, group) = owner.split(":")
else:
group = ""
request = client_proto.SetOwnerRequestProto()
request.src = path
if owner:
request.username = owner
if group:
request.groupname = group
self.service.setOwner(request)
return {"result": True, "path": path}
def chgrp(self, paths, group, recurse=False):
''' Change the group of paths.
:param paths: List of paths to chgrp
:type paths: list
:param group: New group
:type mode: string
:param recurse: Recursive chgrp
:type recurse: boolean
:returns: a generator that yields dictionaries
'''
if not isinstance(paths, list):
raise InvalidInputException("Paths should be a list")
if not paths:
raise InvalidInputException("chgrp: no paths given")
if not group:
raise InvalidInputException("chgrp: no group given")
owner = ":%s" % group
processor = lambda path, node, owner=owner: self._handle_chown(path, node, owner)
for item in self._find_items(paths, processor, include_toplevel=True,
include_children=False, recurse=recurse):
if item:
yield item
def count(self, paths):
''' Count files in a path
:param paths: List of paths to count
:type paths: list
:returns: a generator that yields dictionaries
**Examples:**
>>> list(client.count(['/']))
[{'spaceConsumed': 260185L, 'quota': 2147483647L, 'spaceQuota': 18446744073709551615L, 'length': 260185L, 'directoryCount': 9L, 'path': '/', 'fileCount': 34L}]
'''
if not isinstance(paths, list):
raise InvalidInputException("Paths should be a list")
if not paths:
raise InvalidInputException("count: no path given")
for item in self._find_items(paths, self._handle_count, include_toplevel=True,
include_children=False, recurse=False):
if item:
yield item
COUNT_ATTRIBUTES = ['length', 'fileCount', 'directoryCount', 'quota', 'spaceConsumed', 'spaceQuota']
def _handle_count(self, path, node):
request = client_proto.GetContentSummaryRequestProto()
request.path = path
response = self.service.getContentSummary(request)
entry = {"path": path}
for attribute in self.COUNT_ATTRIBUTES:
entry[attribute] = response.summary.__getattribute__(attribute)
return entry
def df(self):
''' Get FS information
:returns: a dictionary
**Examples:**
>>> client.df()
{'used': 491520L, 'capacity': 120137519104L, 'under_replicated': 0L, 'missing_blocks': 0L, 'filesystem': 'hdfs://localhost:54310', 'remaining': 19669295104L, 'corrupt_blocks': 0L}
'''
processor = lambda path, node: self._handle_df(path, node)
return list(self._find_items(['/'], processor, include_toplevel=True, include_children=False, recurse=False))[0]
def _handle_df(self, path, node):
request = client_proto.GetFsStatusRequestProto()
response = self.service.getFsStats(request)
entry = {"filesystem": "hdfs://%s:%d" % (self.host, self.port)}
for i in ['capacity', 'used', 'remaining', 'under_replicated',
'corrupt_blocks', 'missing_blocks']:
entry[i] = response.__getattribute__(i)
return entry
def du(self, paths, include_toplevel=False, include_children=True):
'''Returns size information for paths
:param paths: Paths to du
:type paths: list
:param include_toplevel: Include the given path in the result. If the path is a file, include_toplevel is always True.
:type include_toplevel: boolean
:param include_children: Include child nodes in the result.
:type include_children: boolean
:returns: a generator that yields dictionaries
**Examples:**
Children:
>>> list(client.du(['/']))
[{'path': '/Makefile', 'length': 6783L}, {'path': '/build', 'length': 244778L}, {'path': '/index.asciidoc', 'length': 100L}, {'path': '/source', 'length': 8524L}]
Directory only:
>>> list(client.du(['/'], include_toplevel=True, include_children=False))
[{'path': '/', 'length': 260185L}]
'''
if not isinstance(paths, list):
raise InvalidInputException("Paths should be a list")
if not paths:
raise InvalidInputException("du: no path given")
processor = lambda path, node: self._handle_du(path, node)
for item in self._find_items(paths, processor, include_toplevel=include_toplevel,
include_children=include_children, recurse=False):
if item:
yield item
def _handle_du(self, path, node):
if self._is_dir(node):
request = client_proto.GetContentSummaryRequestProto()
request.path = path
try:
response = self.service.getContentSummary(request)
return {"path": path, "length": response.summary.length}
except RequestError, e:
print e
else:
return {"path": path, "length": node.length}
def rename(self, paths, dst):
''' Rename (move) path(s) to a destination
:param paths: Source paths
:type paths: list
:param dst: destination
:type dst: string
:returns: a generator that yields dictionaries
'''
if not isinstance(paths, list):
raise InvalidInputException("Paths should be a list")
if not paths:
raise InvalidInputException("rename: no path given")
if not dst:
raise InvalidInputException("rename: no destination given")
processor = lambda path, node, dst=dst: self._handle_rename(path, node, dst)
for item in self._find_items(paths, processor, include_toplevel=True):
if item:
yield item
def _handle_rename(self, path, node, dst):
if not dst.startswith("/"):
dst = self._join_user_path(dst)
request = client_proto.RenameRequestProto()
request.src = path
request.dst = dst
response = self.service.rename(request)
return {"path": path, "result": response.result}
def delete(self, paths, recurse=False):
''' Delete paths
:param paths: Paths to delete
:type paths: list
:param recurse: Recursive delete (use with care!)
:type recurse: boolean
:returns: a generator that yields dictionaries
.. note:: Recursive deletion uses the NameNode recursive deletion functionality
instead of letting the client recurse. Hadoops client recurses
by itself and thus showing all files and directories that are
deleted. Snakebite doesn't.
'''
if not isinstance(paths, list):
raise InvalidInputException("Paths should be a list")
if not paths:
raise InvalidInputException("delete: no path given")
processor = lambda path, node, recurse=recurse: self._handle_delete(path, node, recurse)
for item in self._find_items(paths, processor, include_toplevel=True):
if item:
yield item
def _handle_delete(self, path, node, recurse):
if (self._is_dir(node) and not recurse):
raise DirectoryException("rm: `%s': Is a directory" % path)
# None might be passed in for recurse
if not recurse:
recurse = False
if self.__should_move_to_trash(path):
if path.endswith("/"):
suffix_path = path[1:-1]
else:
suffix_path = path[1:]
trash_path = os.path.join(self.trash, "Current", suffix_path)
if trash_path.endswith("/"):
trash_path = trash_path[:-1]
base_trash_path = os.path.join(self.trash, "Current", os.path.dirname(suffix_path))
if base_trash_path.endswith("/"):
base_trash_path = base_trash_path[:-1]
# Try twice, in case checkpoint between mkdir() and rename()
for i in range(0, 2):
list(self.mkdir([base_trash_path], create_parent=True, mode=0700))
original_path = trash_path
while self.test(trash_path, exists=True):
unix_timestamp = str(int(time.time() * 1000))
trash_path = "%s%s" % (original_path, unix_timestamp)
result = self._handle_rename(path, node, trash_path)
if result['result']:
result['message'] = ". Moved %s to %s" % (path, trash_path)
return result
raise Exception("Failed to move to trash: %s" % path)
else:
request = client_proto.DeleteRequestProto()
request.src = path
request.recursive = recurse
response = self.service.delete(request)
return {"path": path, "result": response.result}
def __should_move_to_trash(self, path):
if not self.use_trash:
return False
if path.startswith(self.trash):
return False # Path already in trash
if os.path.dirname(self.trash).startswith(path):
raise Exception("Cannot move %s to the trash, as it contains the trash" % path)
return True
def rmdir(self, paths):
''' Delete a directory
:param paths: Paths to delete
:type paths: list
:returns: a generator that yields dictionaries
.. note: directories have to be empty.
'''
if not isinstance(paths, list):
raise InvalidInputException("Paths should be a list")
if not paths:
raise InvalidInputException("rmdir: no path given")
processor = lambda path, node: self._handle_rmdir(path, node)
for item in self._find_items(paths, processor, include_toplevel=True):
if item:
yield item
def _handle_rmdir(self, path, node):
if not self._is_dir(node):
raise DirectoryException("rmdir: `%s': Is not a directory" % path)
# Check if the directory is empty
files = self.ls([path])
if len(list(files)) > 0:
raise DirectoryException("rmdir: `%s': Directory is not empty" % path)
return self._handle_delete(path, node, recurse=True)
def touchz(self, paths, replication=None, blocksize=None):
''' Create a zero length file or updates the timestamp on a zero length file
:param paths: Paths
:type paths: list
:param replication: Replication factor
:type recurse: int
:param blocksize: Block size (in bytes) of the newly created file
:type blocksize: int
:returns: a generator that yields dictionaries
'''
if not isinstance(paths, list):
raise InvalidInputException("Paths should be a list")
if not paths:
raise InvalidInputException("touchz: no path given")
# Let's get the blocksize and replication from the server defaults
# provided by the namenode if they are not specified
if not replication or not blocksize:
defaults = self.serverdefaults()
if not replication:
replication = defaults['replication']
if not blocksize:
blocksize = defaults['blockSize']
processor = lambda path, node, replication=replication, blocksize=blocksize: self._handle_touchz(path, node, replication, blocksize)
for item in self._find_items(paths, processor, include_toplevel=True, check_nonexistence=True, include_children=False):
if item:
yield item
def _handle_touchz(self, path, node, replication, blocksize):
# Item already exists
if node:
if node.length != 0:
raise FileException("touchz: `%s': Not a zero-length file" % path)
if self._is_dir(node):
raise DirectoryException("touchz: `%s': Is a directory" % path)
response = self._create_file(path, replication, blocksize, overwrite=True)
else:
# Check if the parent directory exists
parent = self._get_file_info(os.path.dirname(path))
if not parent:
raise DirectoryException("touchz: `%s': No such file or directory" % path)
else:
response = self._create_file(path, replication, blocksize, overwrite=False)
return {"path": path, "result": response.result}
def setrep(self, paths, replication, recurse=False):
''' Set the replication factor for paths
:param paths: Paths
:type paths: list
:param replication: Replication factor
:type recurse: int
:param recurse: Apply replication factor recursive
:type recurse: boolean
:returns: a generator that yields dictionaries
'''
if not isinstance(paths, list):
raise InvalidInputException("Paths should be a list")
if not paths:
raise InvalidInputException("setrep: no path given")
if not replication:
raise InvalidInputException("setrep: no replication given")
processor = lambda path, node, replication=replication: self._handle_setrep(path, node, replication)
for item in self._find_items(paths, processor, include_toplevel=True,
include_children=False, recurse=recurse):
if item:
yield item
def _handle_setrep(self, path, node, replication):
if not self._is_dir(node):
request = client_proto.SetReplicationRequestProto()
request.src = path
request.replication = replication
response = self.service.setReplication(request)
return {"result": response.result, "path": path}
def cat(self, paths, check_crc=False):
''' Fetch all files that match the source file pattern
and display their content on stdout.
:param paths: Paths to display
:type paths: list of strings
:param check_crc: Check for checksum errors
:type check_crc: boolean
:returns: a generator that yields strings
'''
if not isinstance(paths, list):
raise InvalidInputException("Paths should be a list")
if not paths:
raise InvalidInputException("cat: no path given")
processor = lambda path, node, check_crc=check_crc: self._handle_cat(path, node, check_crc)
for item in self._find_items(paths, processor, include_toplevel=True,
include_children=False, recurse=False):
if item:
yield item
def _handle_cat(self, path, node, check_crc):
if self._is_dir(node):
raise DirectoryException("cat: `%s': Is a directory" % path)
for load in self._read_file(path, node, False, check_crc):
if load:
yield load
def copyToLocal(self, paths, dst, check_crc=False):
''' Copy files that match the file source pattern
to the local name. Source is kept. When copying multiple,
files, the destination must be a directory.
:param paths: Paths to copy
:type paths: list of strings
:param dst: Destination path
:type dst: string
:param check_crc: Check for checksum errors
:type check_crc: boolean
:returns: a generator that yields strings
'''
if not isinstance(paths, list):
raise InvalidInputException("Paths should be a list")
if not paths:
raise InvalidInputException("copyToLocal: no path given")
if not dst:
raise InvalidInputException("copyToLocal: no destination given")
self.base_source = None
processor = lambda path, node, dst=dst, check_crc=check_crc: self._handle_copyToLocal(path, node, dst, check_crc)
for item in self._find_items(paths, processor, include_toplevel=True, recurse=True, include_children=True):
if item:
yield item
def _handle_copyToLocal(self, path, node, dst, check_crc):
# Calculate base directory using the first node only
if self.base_source is None:
self.dst = os.path.abspath(dst)
if os.path.isdir(dst): # If input destination is an existing directory, include toplevel
self.base_source = os.path.dirname(path)
else:
self.base_source = path
if self.base_source.endswith("/"):
self.base_source = self.base_source[:-1]
target = dst + (path.replace(self.base_source, "", 1))
error = ""
result = False
# Target is an existing file
if os.path.isfile(target):
error += "file exists"
# Target is an existing directory
elif os.path.isdir(target):
error += "directory exists"
# Source is a directory
elif self._is_dir(node):
os.makedirs(target, mode=node.permission.perm)
result = True
# Source is a file
elif self._is_file(node):
temporary_target = "%s._COPYING_" % target
f = open(temporary_target, 'w')
try:
for load in self._read_file(path, node, tail_only=False, check_crc=check_crc):
f.write(load)
f.close()
os.rename(temporary_target, target)
result = True
except Exception, e:
result = False
error = e
if os.path.isfile(temporary_target):
os.remove(temporary_target)
return {"path": target, "result": result, "error": error, "source_path": path}
def getmerge(self, path, dst, newline=False, check_crc=False):
''' Get all the files in the directories that
match the source file pattern and merge and sort them to only
one file on local fs.
:param paths: Directory containing files that will be merged
:type paths: string
:param dst: Path of file that will be written
:type dst: string
:param nl: Add a newline character at the end of each file.
:type nl: boolean
:returns: string content of the merged file at dst
'''
if not path:
raise InvalidInputException("getmerge: no path given")
if not dst:
raise InvalidInputException("getmerge: no destination given")
temporary_target = "%s._COPYING_" % dst
f = open(temporary_target, 'w')
processor = lambda path, node, dst=dst, check_crc=check_crc: self._handle_getmerge(path, node, dst, check_crc)
try:
for item in self._find_items([path], processor, include_toplevel=True, recurse=False, include_children=True):
for load in item:
if load['result']:
f.write(load['response'])
elif not load['error'] is '':
if os.path.isfile(temporary_target):
os.remove(temporary_target)
raise Exception(load['error'])
if newline and load['response']:
f.write("\n")
yield {"path": dst, "response": '', "result": True, "error": load['error'], "source_path": path}
finally:
if os.path.isfile(temporary_target):
f.close()
os.rename(temporary_target, dst)
def _handle_getmerge(self, path, node, dst, check_crc):
log.debug("in handle getmerge")
error = ''
if not self._is_file(node):
# Target is an existing file
if os.path.isfile(dst):
error += "target file exists"
# Target is an existing directory
elif os.path.isdir(dst):
error += "target directory exists"
yield {"path": path, "response": '', "result": False, "error": error, "source_path": path}
# Source is a file
else:
if node.length == 0: # Empty file
yield {"path": path, "response": '', "result": True, "error": error, "source_path": path}
else:
try:
for load in self._read_file(path, node, tail_only=False, check_crc=check_crc):
yield {"path": path, "response": load, "result": True, "error": error, "source_path": path}
except Exception, e:
error = e
yield {"path": path, "response": '', "result": False, "error": error, "source_path": path}
def stat(self, paths):
''' Stat a fileCount
:param paths: Path
:type paths: string
:returns: a dictionary
**Example:**
>>> client.stat(['/index.asciidoc'])
{'blocksize': 134217728L, 'owner': u'wouter', 'length': 100L, 'access_time': 1367317326510L, 'group': u'supergroup', 'permission': 420, 'file_type': 'f', 'path': '/index.asciidoc', 'modification_time': 1367317326522L, 'block_replication': 1}
'''
if not isinstance(paths, list):
raise InvalidInputException("Paths should be a list")
if not paths:
raise InvalidInputException("stat: no path given")
processor = lambda path, node: self._handle_stat(path, node)
return list(self._find_items(paths, processor, include_toplevel=True))[0]
def _handle_stat(self, path, node):
return {"path": path,
"file_type": self.FILETYPES[node.fileType],
"length": node.length,
"permission": node.permission.perm,
"owner": node.owner,
"group": node.group,
"modification_time": node.modification_time,
"access_time": node.access_time,
"block_replication": node.block_replication,
"blocksize": node.blocksize}
def tail(self, path, append=False):
# Note: append is currently not implemented.
''' Show the last 1KB of the file.
:param path: Path to read
:type path: string
:param f: Shows appended data as the file grows.
:type f: boolean
:returns: a generator that yields strings
'''
if not path:
raise InvalidInputException("tail: no path given")
processor = lambda path, node, tail_only=True, append=append: self._handle_tail(path, node, tail_only, append)
for item in self._find_items([path], processor, include_toplevel=True,
include_children=False, recurse=False):
if item:
yield item
def _handle_tail(self, path, node, tail_only, append):
data = ''
for load in self._read_file(path, node, tail_only=True, check_crc=False):
data += load
# We read only the necessary packets but still
# need to cut off at the packet level.
return data[max(0, len(data)-1024):len(data)]
def test(self, path, exists=False, directory=False, zero_length=False):
'''Test if a path exist, is a directory or has zero length
:param path: Path to test
:type path: string
:param exists: Check if the path exists
:type exists: boolean
:param directory: Check if the path is a directory
:type exists: boolean
:param zero_length: Check if the path is zero-length
:type zero_length: boolean
:returns: a boolean
.. note:: directory and zero length are AND'd.
'''
if not isinstance(path, str):
raise InvalidInputException("Path should be a string")
if not path:
raise InvalidInputException("test: no path given")
processor = lambda path, node, exists=exists, directory=directory, zero_length=zero_length: self._handle_test(path, node, exists, directory, zero_length)
try:
items = list(self._find_items([path], processor, include_toplevel=True))
if len(items) == 0:
return False
return all(items)
except FileNotFoundException, e:
if exists:
return False
else:
raise e
def _handle_test(self, path, node, exists, directory, zero_length):
return self._is_directory(directory, node) and self._is_zero_length(zero_length, node)
def text(self, paths, check_crc=False):
''' Takes a source file and outputs the file in text format.
The allowed formats are gzip and bzip2
:param paths: Paths to display
:type paths: list of strings
:param check_crc: Check for checksum errors
:type check_crc: boolean
:returns: a generator that yields strings
'''
if not isinstance(paths, list):
raise InvalidInputException("Paths should be a list")
if not paths:
raise InvalidInputException("text: no path given")
processor = lambda path, node, check_crc=check_crc: self._handle_text(path, node, check_crc)
for item in self._find_items(paths, processor, include_toplevel=True,
include_children=False, recurse=False):
if item:
yield item
def _handle_text(self, path, node, check_crc):
if self._is_dir(node):
raise DirectoryException("text: `%s': Is a directory" % path)
text = ''
for load in self._read_file(path, node, False, check_crc):
text += load
extension = os.path.splitext(path)[1]
if extension == '.gz':
return zlib.decompress(text, 16+zlib.MAX_WBITS)
elif extension == '.bz2':
return bz2.decompress(text)
else:
return text
def mkdir(self, paths, create_parent=False, mode=0755):
''' Create a directoryCount
:param paths: Paths to create
:type paths: list of strings
:param create_parent: Also create the parent directories
:type create_parent: boolean
:param mode: Mode the directory should be created with
:type mode: int
:returns: a generator that yields dictionaries
'''
if not isinstance(paths, list):
raise InvalidInputException("Paths should be a list")
if not paths:
raise InvalidInputException("mkdirs: no path given")
for path in paths:
if not path.startswith("/"):
path = self._join_user_path(path)
fileinfo = self._get_file_info(path)
if not fileinfo:
try:
request = client_proto.MkdirsRequestProto()
request.src = path
request.masked.perm = mode
request.createParent = create_parent
response = self.service.mkdirs(request)
yield {"path": path, "result": response.result}
except RequestError, e:
yield {"path": path, "result": False, "error": str(e)}
else:
yield {"path": path, "result": False, "error": "mkdir: `%s': File exists" % path}
def serverdefaults(self):
'''Get server defaults
:returns: dictionary
**Example:**
>>> client.serverdefaults()
[{'writePacketSize': 65536, 'fileBufferSize': 4096, 'replication': 1, 'bytesPerChecksum': 512, 'trashInterval': 0L, 'blockSize': 134217728L, 'encryptDataTransfer': False, 'checksumType': 2}]
'''
request = client_proto.GetServerDefaultsRequestProto()
response = self.service.getServerDefaults(request).serverDefaults
return {'blockSize': response.blockSize, 'bytesPerChecksum': response.bytesPerChecksum,
'writePacketSize': response.writePacketSize, 'replication': response.replication,
'fileBufferSize': response.fileBufferSize, 'encryptDataTransfer': response.encryptDataTransfer,
'trashInterval': response.trashInterval, 'checksumType': response.checksumType}
def _is_directory(self, should_check, node):
if not should_check:
return True
return self._is_dir(node)
def _is_zero_length(self, should_check, node):
if not should_check:
return True
return node.length == 0
def _get_full_path(self, path, node):
if node.path:
return os.path.join(path, node.path)
else:
return path
def _create_file(self, path, replication, blocksize, overwrite):
if overwrite:
createFlag = 0x02
else:
createFlag = 0x01
# Issue a CreateRequestProto
request = client_proto.CreateRequestProto()
request.src = path
request.masked.perm = 0644
request.clientName = "snakebite"
request.createFlag = createFlag
request.createParent = False
request.replication = replication
request.blockSize = blocksize
# The response doesn't contain anything
self.service.create(request)
# Issue a CompleteRequestProto
request = client_proto.CompleteRequestProto()
request.src = path
request.clientName = "snakebite"
return self.service.complete(request)
def _read_file(self, path, node, tail_only, check_crc):
length = node.length
request = client_proto.GetBlockLocationsRequestProto()
request.src = path
request.length = length
if tail_only: # Only read last KB
request.offset = max(0, length - 1024)
else:
request.offset = 0L
response = self.service.getBlockLocations(request)
if response.locations.fileLength == 0: # Can't read empty file
yield ""
lastblock = response.locations.lastBlock
if tail_only:
if lastblock.b.blockId == response.locations.blocks[0].b.blockId:
num_blocks_tail = 1 # Tail is on last block
else:
num_blocks_tail = 2 # Tail is on two blocks
failed_nodes = []
total_bytes_read = 0
for block in response.locations.blocks:
length = block.b.numBytes
pool_id = block.b.poolId
offset_in_block = 0
if tail_only:
if num_blocks_tail == 2 and block.b.blockId != lastblock.b.blockId:
offset_in_block = block.b.numBytes - (1024 - lastblock.b.numBytes)
elif num_blocks_tail == 1:
offset_in_block = max(0, lastblock.b.numBytes - 1024)
# Prioritize locations to read from
locations_queue = Queue.PriorityQueue() # Primitive queuing based on a node's past failure
for location in block.locs:
if location.id.storageID in failed_nodes:
locations_queue.put((1, location)) # Priority num, data
else:
locations_queue.put((0, location))
# Read data
successful_read = False
while not locations_queue.empty():
location = locations_queue.get()[1]
host = location.id.ipAddr
port = int(location.id.xferPort)
data_xciever = DataXceiverChannel(host, port)
if data_xciever.connect():
try:
for load in data_xciever.readBlock(length, pool_id, block.b.blockId, block.b.generationStamp, offset_in_block, check_crc):
offset_in_block += len(load)
total_bytes_read += len(load)
successful_read = True
yield load
except Exception, e:
log.error(e)
if not location.id.storageID in failed_nodes:
failed_nodes.append(location.id.storageID)
successful_read = False
else:
raise Exception
if successful_read:
break
if successful_read is False:
raise Exception("Failure to read block %s" % block.b.blockId)
def _find_items(self, paths, processor, include_toplevel=False, include_children=False, recurse=False, check_nonexistence=False):
''' Request file info from the NameNode and call the processor on the node(s) returned
:param paths:
A list of paths that need to be processed
:param processor:
Method that is called on an node. Method signature should be foo(path, node). For additional
(static) params, use a lambda.
:param include_toplevel:
Boolean to enable the inclusion of the first node found.
Example: listing a directory should not include the toplevel, but chmod should
only operate on the path that is input, so it should include the toplevel.
:param include_children:
Include children (when the path is a directory) in processing. Recurse will always
include children.
Example: listing a directory should include children, but chmod shouldn't.
:param recurse:
Recurse into children if they are directories.
'''
if not paths:
paths = [os.path.join("/user", pwd.getpwuid(os.getuid())[0])]
# Expand paths if necessary (/foo/{bar,baz} --> ['/foo/bar', '/foo/baz'])
paths = glob.expand_paths(paths)
for path in paths:
if not path.startswith("/"):
path = self._join_user_path(path)
log.debug("Trying to find path %s" % path)
if glob.has_magic(path):
log.debug("Dealing with globs in %s" % path)
for item in self._glob_find(path, processor, include_toplevel):
yield item
else:
fileinfo = self._get_file_info(path)
if not fileinfo and not check_nonexistence:
raise FileNotFoundException("`%s': No such file or directory" % path)
elif not fileinfo and check_nonexistence:
yield processor(path, None)
return
if (include_toplevel and fileinfo) or not self._is_dir(fileinfo.fs):
# Construct the full path before processing
full_path = self._get_full_path(path, fileinfo.fs)
log.debug("Added %s to to result set" % full_path)
entry = processor(full_path, fileinfo.fs)
yield entry
if self._is_dir(fileinfo.fs) and (include_children or recurse):
for node in self._get_dir_listing(path):
full_path = self._get_full_path(path, node)
entry = processor(full_path, node)
yield entry
# Recurse into directories
if recurse and self._is_dir(node):
# Construct the full path before processing
full_path = os.path.join(path, node.path)
for item in self._find_items([full_path],
processor,
include_toplevel=False,
include_children=False,
recurse=recurse):
yield item
def _get_dir_listing(self, path, start_after=''):
request = client_proto.GetListingRequestProto()
request.src = path
request.startAfter = start_after
request.needLocation = False
listing = self.service.getListing(request)
if not listing:
return
for node in listing.dirList.partialListing:
start_after = node.path
yield node
if listing.dirList.remainingEntries > 0:
for node in self._get_dir_listing(path, start_after):
yield node
def _glob_find(self, path, processor, include_toplevel):
'''Handle globs in paths.
This is done by listing the directory before a glob and checking which
node matches the initial glob. If there are more globs in the path,
we don't add the found children to the result, but traverse into paths
that did have a match.
'''
# Remove the last / from the path, since hadoop doesn't understand it
if path.endswith("/"):
path = path[:-1]
# Split path elements and check where the first occurence of magic is
path_elements = path.split("/")
for i, element in enumerate(path_elements):
if glob.has_magic(element):
first_magic = i
break
# Create path that we check first to get a listing we match all children
# against. If the 2nd path element is a glob, we need to check "/", and
# we hardcode that, since "/".join(['']) doesn't return "/"
if first_magic == 1:
check_path = "/"
else:
check_path = "/".join(path_elements[:first_magic])
# Path that we need to match against
match_path = "/".join(path_elements[:first_magic + 1])
# Rest of the unmatched path. In case the rest is only one element long
# we prepend it with "/", since "/".join(['x']) doesn't return "/x"
rest_elements = path_elements[first_magic + 1:]
if len(rest_elements) == 1:
rest = rest_elements[0]
else:
rest = "/".join(rest_elements)
# Check if the path exists and that it's a directory (which it should..)
fileinfo = self._get_file_info(check_path)
if fileinfo and self._is_dir(fileinfo.fs):
# List all child nodes and match them agains the glob
for node in self._get_dir_listing(check_path):
full_path = self._get_full_path(check_path, node)
if fnmatch.fnmatch(full_path, match_path):
# If we have a match, but need to go deeper, we recurse
if rest and glob.has_magic(rest):
traverse_path = "/".join([full_path, rest])
for item in self._glob_find(traverse_path, processor, include_toplevel):
yield item
elif rest:
# we have more rest, but it's not magic, which is either a file or a directory
final_path = os.path.join(full_path, rest)
fi = self._get_file_info(final_path)
if fi and self._is_dir(fi.fs):
for n in self._get_dir_listing(final_path):
full_child_path = self._get_full_path(final_path, n)
yield processor(full_child_path, n)
elif fi:
yield processor(final_path, fi.fs)
else:
# If the matching node is a directory, we list the directory
# This is what the hadoop client does at least.
if self._is_dir(node):
if include_toplevel:
yield processor(full_path, node)
fp = self._get_full_path(check_path, node)
dir_list = self._get_dir_listing(fp)
if dir_list: # It might happen that the directory above has been removed
for n in dir_list:
full_child_path = self._get_full_path(fp, n)
yield processor(full_child_path, n)
else:
yield processor(full_path, node)
def _is_dir(self, entry):
return self.FILETYPES.get(entry.fileType) == "d"
def _is_file(self, entry):
return self.FILETYPES.get(entry.fileType) == "f"
def _get_file_info(self, path):
request = client_proto.GetFileInfoRequestProto()
request.src = path
return self.service.getFileInfo(request)
def _join_user_path(self, path):
return os.path.join("/user", pwd.getpwuid(os.getuid())[0], path)
def _remove_user_path(self, path):
dir_to_remove = os.path.join("/user", pwd.getpwuid(os.getuid())[0])
return path.replace(dir_to_remove+'/', "", 1)
class HAClient(Client):
''' Snakebite client with support for High Availability
HAClient is fully backwards compatible with the vanilla Client and can be used for a non HA cluster as well.
**Example:**
>>> from snakebite.client import HAClient
>>> from snakebite.namenode import Namenode
>>> n1 = Namenode("namenode1.mydomain", 54310)
>>> n2 = Namenode("namenode2.mydomain", 54310)
>>> client = HAClient([n1, n2], use_trash=True)
>>> for x in client.ls(['/']):
... print x
.. note::
Different Hadoop distributions use different protocol versions. Snakebite defaults to 9, but this can be set by passing
in the ``version`` parameter to the Namenode class constructor.
'''
@classmethod
def _wrap_methods(cls):
# Add HA support to all public Client methods, but only do this when we haven't done this before
for name, meth in inspect.getmembers(cls, inspect.ismethod):
if not name.startswith("_"): # Only public methods
if inspect.isgeneratorfunction(meth):
setattr(cls, name, cls._ha_gen_method(meth))
else:
setattr(cls, name, cls._ha_return_method(meth))
def __init__(self, namenodes, use_trash=False, effective_user=None):
'''
:param namenodes: Set of namenodes for HA setup
:type namenodes: list
:param use_trash: Use a trash when removing files.
:type use_trash: boolean
:param effective_user: Effective user for the HDFS operations (default: None - current user)
:type effective_user: string
'''
self.use_trash = use_trash
self.effective_user = effective_user
if not namenodes:
raise OutOfNNException("List of namenodes is empty - couldn't create the client")
self.namenode = self._switch_namenode(namenodes)
self.namenode.next()
def _switch_namenode(self, namenodes):
for namenode in namenodes:
log.debug("Switch to namenode: %s:%d" % (namenode.host, namenode.port))
yield super(HAClient, self).__init__(namenode.host,
namenode.port,
namenode.version,
self.use_trash,
self.effective_user)
else:
msg = "Request tried and failed for all %d namenodes: " % len(namenodes)
for namenode in namenodes:
msg += "\n\t* %s:%d" % (namenode.host, namenode.port)
msg += "\nLook into debug messages - add -D flag!"
raise OutOfNNException(msg)
def __handle_request_error(self, exception):
log.debug("Request failed with %s" % exception)
if exception.args[0].startswith("org.apache.hadoop.ipc.StandbyException"):
pass
else:
# There's a valid NN in active state, but there's still request error - raise
raise
self.namenode.next()
def __handle_socket_error(self, exception):
log.debug("Request failed with %s" % exception)
if exception.errno == errno.ECONNREFUSED:
# if NN is down or machine is not available, pass it:
pass
elif isinstance(exception, socket.timeout):
# if there's communication/socket timeout, pass it:
pass
else:
raise
self.namenode.next()
@staticmethod
def _ha_return_method(func):
''' Method decorator for 'return type' methods '''
def wrapped(self, *args, **kw):
while(True): # switch between all namenodes
try:
return func(self, *args, **kw)
except RequestError as e:
self.__handle_request_error(e)
except socket.error as e:
self.__handle_socket_error(e)
return wrapped
@staticmethod
def _ha_gen_method(func):
''' Method decorator for 'generator type' methods '''
def wrapped(self, *args, **kw):
while(True): # switch between all namenodes
try:
results = func(self, *args, **kw)
while(True): # yield all results
yield results.next()
except RequestError as e:
self.__handle_request_error(e)
except socket.error as e:
self.__handle_socket_error(e)
return wrapped
HAClient._wrap_methods()
class AutoConfigClient(HAClient):
''' A pure python HDFS client that support HA and is auto configured through the ``HADOOP_PATH`` environment variable.
HAClient is fully backwards compatible with the vanilla Client and can be used for a non HA cluster as well.
This client tries to read ``${HADOOP_PATH}/conf/hdfs-site.xml`` to get the address of the namenode.
The behaviour is the same as Client.
**Example:**
>>> from snakebite.client import AutoConfigClient
>>> client = AutoConfigClient()
>>> for x in client.ls(['/']):
... print x
.. note::
Different Hadoop distributions use different protocol versions. Snakebite defaults to 9, but this can be set by passing
in the ``hadoop_version`` parameter to the constructor.
'''
def __init__(self, hadoop_version=Namenode.DEFAULT_VERSION, effective_user=None):
'''
:param hadoop_version: What hadoop protocol version should be used (default: 9)
:type hadoop_version: int
:param effective_user: Effective user for the HDFS operations (default: None - current user)
:type effective_user: string
'''
configs = HDFSConfig.get_external_config()
nns = [Namenode(c['namenode'], c['port'], hadoop_version) for c in configs]
if not nns:
raise OutOfNNException("Tried and failed to find namenodes - couldn't created the client!")
super(AutoConfigClient, self).__init__(nns, HDFSConfig.use_trash, effective_user)
|
from datetime import datetime
import requests
import time
import polyline
from os.path import exists
from os import environ
import json
import sys
import webbrowser
from builtins import input as askForInput
from activity_db_client import ActivityDBClient
from dotenv import load_dotenv
load_dotenv()
CLIENT_ID = environ.get("CLIENT_ID")
CLIENT_SECRET = environ.get("CLIENT_SECRET")
class StravaClient:
def __init__(self, page_size = 30):
self.page_size = page_size
self.activity_db = ActivityDBClient()
self.n_requests = 0
self.start_time = time.time()
if exists('./.credentials.json'):
print("Using existing credentials", flush=True)
with open('./.credentials.json', 'r') as input:
self.credentials = json.load(input)
if datetime.now().timestamp() > self.credentials["EXPIRES"]:
self._refresh_access_token()
else:
print("No credentials present. A web browser will open and ask for permission to read data from your Strava account. After authorizing the application, you will be directed to a nonexistant page with a 'code' in the URL")
webbrowser.open(f"https://www.strava.com/oauth/authorize?client_id={CLIENT_ID}&redirect_uri=http://localhost&response_type=code&scope=read,activity:read_all")
CODE = askForInput("Enter code from redirect URL:")
self._get_access_token(CODE)
def _refresh_access_token(self):
print("Refreshing expired access token...", flush=True)
self._authenticate({
"client_id": CLIENT_ID,
"client_secret": CLIENT_SECRET,
"refresh_token": self.credentials["REFRESH_TOKEN"],
"grant_type": "refresh_token"
})
def _get_access_token(self, code):
self._authenticate({
"client_id": CLIENT_ID,
"client_secret": CLIENT_SECRET,
"code": code,
"grant_type": "authorization_code"
})
def _authenticate(self, payload):
response = requests.post("https://www.strava.com/oauth/token", data=payload)
if response.status_code != 200:
print(response.json())
sys.exit(1)
data = response.json()
credentials = {
key: data[key] for key in ["access_token", "refresh_token", "expires_at"]
}
self.credentials = {
"ACCESS_TOKEN": credentials["access_token"],
"REFRESH_TOKEN": credentials["refresh_token"],
"EXPIRES": credentials["expires_at"]
}
# Save for future use
with open('./.credentials.json', 'w') as output:
json.dump(self.credentials, output)
def get_strava(self, url):
headers = {
"Authorization": f"Bearer {self.credentials["ACCESS_TOKEN"]}"
}
response = requests.get(f"https://www.strava.com/api/v3/{url}", headers=headers)
self.n_requests += 1
if response.status_code != 200:
print(response.status_code)
print(response.json())
raise Exception("Invalid response")
# Throttle to 600 req/15 minutes per API limits
if self.n_requests == 600 and time.time() - self.start_time <= (60*15):
print("Sleeping for 15 minutes...")
time.sleep(60*15)
self.n_requests = 0
self.start_time = time.time()
return response.json()
def get_activity_details(self, activity_id):
# We should also get description and device_name from here
activity_details = self.get_strava(f"activities/{str(activity_id)}")
# Not all activities have a geometry
geom = activity_details.get("map", {}).get("polyline", []) or []
return {
"description": activity_details.get("description"),
"device_name": activity_details.get("device_name"),
"geometry": polyline.decode(geom, geojson=True)
}
def get_gear(self, gear_id):
return self.get_strava(f"gear/{gear_id}")
def sync_gear(self):
orphan_gear = self.activity_db.get_orphan_gear()
if len(orphan_gear) > 0:
for gear_id in orphan_gear:
gear_details = self.get_gear(gear_id)
self.activity_db.insert_gear(gear_details)
print(f"Added {str(len(orphan_gear))} pieces of gear")
else:
print("No new pieces of gear")
def sync(self, direction = "back"):
if direction == "back":
before = self.activity_db.get_oldest_activity()
time_query = f"before={before}"
else:
after = self.activity_db.get_latest_activity()
time_query = f"after={after}"
load_more_activies = True
page = 1
while load_more_activies:
activities = self.get_strava(f"athlete/activities?page={page}&per_page={self.page_size}&{time_query}")
if len(activities) < self.page_size:
load_more_activies = False
page += 1
# Remove activities that are already in the db
new_activities = [activity for activity in activities if not self.activity_db.activity_exists(activity["id"])]
for activity in new_activities:
details = self.get_activity_details(activity["id"])
self.activity_db.insert_activity({**activity, **details})
# When done get unique gear_ids and diff with content of gear table
self.sync_gear()
| from datetime import datetime
import requests
import time
import polyline
from os.path import exists
from os import environ
import json
import sys
import webbrowser
from builtins import input as askForInput
from activity_db_client import ActivityDBClient
from dotenv import load_dotenv
load_dotenv()
CLIENT_ID = environ.get("CLIENT_ID")
CLIENT_SECRET = environ.get("CLIENT_SECRET")
class StravaClient:
def __init__(self, page_size = 30):
self.page_size = page_size
self.activity_db = ActivityDBClient()
self.n_requests = 0
self.start_time = time.time()
if exists('./.credentials.json'):
print("Using existing credentials", flush=True)
with open('./.credentials.json', 'r') as input:
self.credentials = json.load(input)
if datetime.now().timestamp() > self.credentials["EXPIRES"]:
self._refresh_access_token()
else:
print("No credentials present. A web browser will open and ask for permission to read data from your Strava account. After authorizing the application, you will be directed to a nonexistant page with a 'code' in the URL")
webbrowser.open(f"https://www.strava.com/oauth/authorize?client_id={CLIENT_ID}&redirect_uri=http://localhost&response_type=code&scope=read,activity:read_all")
CODE = askForInput("Enter code from redirect URL:")
self._get_access_token(CODE)
def _refresh_access_token(self):
print("Refreshing expired access token...", flush=True)
self._authenticate({
"client_id": CLIENT_ID,
"client_secret": CLIENT_SECRET,
"refresh_token": self.credentials["REFRESH_TOKEN"],
"grant_type": "refresh_token"
})
def _get_access_token(self, code):
self._authenticate({
"client_id": CLIENT_ID,
"client_secret": CLIENT_SECRET,
"code": code,
"grant_type": "authorization_code"
})
def _authenticate(self, payload):
response = requests.post("https://www.strava.com/oauth/token", data=payload)
if response.status_code != 200:
print(response.json())
sys.exit(1)
data = response.json()
credentials = {
key: data[key] for key in ["access_token", "refresh_token", "expires_at"]
}
self.credentials = {
"ACCESS_TOKEN": credentials["access_token"],
"REFRESH_TOKEN": credentials["refresh_token"],
"EXPIRES": credentials["expires_at"]
}
# Save for future use
with open('./.credentials.json', 'w') as output:
json.dump(self.credentials, output)
def get_strava(self, url):
headers = {
"Authorization": f"Bearer {self.credentials['ACCESS_TOKEN']}"
}
response = requests.get(f"https://www.strava.com/api/v3/{url}", headers=headers)
self.n_requests += 1
if response.status_code != 200:
print(response.status_code)
print(response.json())
raise Exception("Invalid response")
# Throttle to 600 req/15 minutes per API limits
if self.n_requests == 600 and time.time() - self.start_time <= (60*15):
print("Sleeping for 15 minutes...")
time.sleep(60*15)
self.n_requests = 0
self.start_time = time.time()
return response.json()
def get_activity_details(self, activity_id):
# We should also get description and device_name from here
activity_details = self.get_strava(f"activities/{str(activity_id)}")
# Not all activities have a geometry
geom = activity_details.get("map", {}).get("polyline", []) or []
return {
"description": activity_details.get("description"),
"device_name": activity_details.get("device_name"),
"geometry": polyline.decode(geom, geojson=True)
}
def get_gear(self, gear_id):
return self.get_strava(f"gear/{gear_id}")
def sync_gear(self):
orphan_gear = self.activity_db.get_orphan_gear()
if len(orphan_gear) > 0:
for gear_id in orphan_gear:
gear_details = self.get_gear(gear_id)
self.activity_db.insert_gear(gear_details)
print(f"Added {str(len(orphan_gear))} pieces of gear")
else:
print("No new pieces of gear")
def sync(self, direction = "back"):
if direction == "back":
before = self.activity_db.get_oldest_activity()
time_query = f"before={before}"
else:
after = self.activity_db.get_latest_activity()
time_query = f"after={after}"
load_more_activies = True
page = 1
while load_more_activies:
activities = self.get_strava(f"athlete/activities?page={page}&per_page={self.page_size}&{time_query}")
if len(activities) < self.page_size:
load_more_activies = False
page += 1
# Remove activities that are already in the db
new_activities = [activity for activity in activities if not self.activity_db.activity_exists(activity["id"])]
for activity in new_activities:
details = self.get_activity_details(activity["id"])
self.activity_db.insert_activity({**activity, **details})
# When done get unique gear_ids and diff with content of gear table
self.sync_gear()
|
"""Legacy device tracker classes."""
import asyncio
from datetime import timedelta
import hashlib
from typing import Any, List, Sequence
import voluptuous as vol
from homeassistant import util
from homeassistant.components import zone
from homeassistant.config import async_log_exception, load_yaml_config_file
from homeassistant.const import (
ATTR_ENTITY_ID,
ATTR_GPS_ACCURACY,
ATTR_ICON,
ATTR_LATITUDE,
ATTR_LONGITUDE,
ATTR_NAME,
CONF_ICON,
CONF_MAC,
CONF_NAME,
DEVICE_DEFAULT_NAME,
STATE_HOME,
STATE_NOT_HOME,
)
from homeassistant.core import callback
from homeassistant.exceptions import HomeAssistantError
import homeassistant.helpers.config_validation as cv
from homeassistant.helpers.entity_registry import async_get_registry
from homeassistant.helpers.restore_state import RestoreEntity
from homeassistant.helpers.typing import GPSType, HomeAssistantType
import homeassistant.util.dt as dt_util
from homeassistant.util.yaml import dump
from .const import (
ATTR_BATTERY,
ATTR_HOST_NAME,
ATTR_MAC,
ATTR_SOURCE_TYPE,
CONF_CONSIDER_HOME,
CONF_NEW_DEVICE_DEFAULTS,
CONF_TRACK_NEW,
DEFAULT_CONSIDER_HOME,
DEFAULT_TRACK_NEW,
DOMAIN,
LOGGER,
SOURCE_TYPE_GPS,
)
YAML_DEVICES = "known_devices.yaml"
EVENT_NEW_DEVICE = "device_tracker_new_device"
async def get_tracker(hass, config):
"""Create a tracker."""
yaml_path = hass.config.path(YAML_DEVICES)
conf = config.get(DOMAIN, [])
conf = conf[0] if conf else {}
consider_home = conf.get(CONF_CONSIDER_HOME, DEFAULT_CONSIDER_HOME)
defaults = conf.get(CONF_NEW_DEVICE_DEFAULTS, {})
track_new = conf.get(CONF_TRACK_NEW)
if track_new is None:
track_new = defaults.get(CONF_TRACK_NEW, DEFAULT_TRACK_NEW)
devices = await async_load_config(yaml_path, hass, consider_home)
tracker = DeviceTracker(hass, consider_home, track_new, defaults, devices)
return tracker
class DeviceTracker:
"""Representation of a device tracker."""
def __init__(
self,
hass: HomeAssistantType,
consider_home: timedelta,
track_new: bool,
defaults: dict,
devices: Sequence,
) -> None:
"""Initialize a device tracker."""
self.hass = hass
self.devices = {dev.dev_id: dev for dev in devices}
self.mac_to_dev = {dev.mac: dev for dev in devices if dev.mac}
self.consider_home = consider_home
self.track_new = (
track_new
if track_new is not None
else defaults.get(CONF_TRACK_NEW, DEFAULT_TRACK_NEW)
)
self.defaults = defaults
self._is_updating = asyncio.Lock()
for dev in devices:
if self.devices[dev.dev_id] is not dev:
LOGGER.warning("Duplicate device IDs detected %s", dev.dev_id)
if dev.mac and self.mac_to_dev[dev.mac] is not dev:
LOGGER.warning("Duplicate device MAC addresses detected %s", dev.mac)
def see(
self,
mac: str = None,
dev_id: str = None,
host_name: str = None,
location_name: str = None,
gps: GPSType = None,
gps_accuracy: int = None,
battery: int = None,
attributes: dict = None,
source_type: str = SOURCE_TYPE_GPS,
picture: str = None,
icon: str = None,
consider_home: timedelta = None,
):
"""Notify the device tracker that you see a device."""
self.hass.add_job(
self.async_see(
mac,
dev_id,
host_name,
location_name,
gps,
gps_accuracy,
battery,
attributes,
source_type,
picture,
icon,
consider_home,
)
)
async def async_see(
self,
mac: str = None,
dev_id: str = None,
host_name: str = None,
location_name: str = None,
gps: GPSType = None,
gps_accuracy: int = None,
battery: int = None,
attributes: dict = None,
source_type: str = SOURCE_TYPE_GPS,
picture: str = None,
icon: str = None,
consider_home: timedelta = None,
):
"""Notify the device tracker that you see a device.
This method is a coroutine.
"""
registry = await async_get_registry(self.hass)
if mac is None and dev_id is None:
raise HomeAssistantError("Neither mac or device id passed in")
if mac is not None:
mac = str(mac).upper()
device = self.mac_to_dev.get(mac)
if not device:
dev_id = util.slugify(host_name or "") or util.slugify(mac)
else:
dev_id = cv.slug(str(dev_id).lower())
device = self.devices.get(dev_id)
if device:
await device.async_seen(
host_name,
location_name,
gps,
gps_accuracy,
battery,
attributes,
source_type,
consider_home,
)
if device.track:
device.async_write_ha_state()
return
# Guard from calling see on entity registry entities.
entity_id = f"{DOMAIN}.{dev_id}"
if registry.async_is_registered(entity_id):
LOGGER.error(
"The see service is not supported for this entity %s", entity_id
)
return
# If no device can be found, create it
dev_id = util.ensure_unique_string(dev_id, self.devices.keys())
device = Device(
self.hass,
consider_home or self.consider_home,
self.track_new,
dev_id,
mac,
picture=picture,
icon=icon,
)
self.devices[dev_id] = device
if mac is not None:
self.mac_to_dev[mac] = device
await device.async_seen(
host_name,
location_name,
gps,
gps_accuracy,
battery,
attributes,
source_type,
)
if device.track:
device.async_write_ha_state()
self.hass.bus.async_fire(
EVENT_NEW_DEVICE,
{
ATTR_ENTITY_ID: device.entity_id,
ATTR_HOST_NAME: device.host_name,
ATTR_MAC: device.mac,
},
)
# update known_devices.yaml
self.hass.async_create_task(
self.async_update_config(
self.hass.config.path(YAML_DEVICES), dev_id, device
)
)
async def async_update_config(self, path, dev_id, device):
"""Add device to YAML configuration file.
This method is a coroutine.
"""
async with self._is_updating:
await self.hass.async_add_executor_job(
update_config, self.hass.config.path(YAML_DEVICES), dev_id, device
)
@callback
def async_update_stale(self, now: dt_util.dt.datetime):
"""Update stale devices.
This method must be run in the event loop.
"""
for device in self.devices.values():
if (device.track and device.last_update_home) and device.stale(now):
self.hass.async_create_task(device.async_update_ha_state(True))
async def async_setup_tracked_device(self):
"""Set up all not exists tracked devices.
This method is a coroutine.
"""
async def async_init_single_device(dev):
"""Init a single device_tracker entity."""
await dev.async_added_to_hass()
dev.async_write_ha_state()
tasks = []
for device in self.devices.values():
if device.track and not device.last_seen:
tasks.append(
self.hass.async_create_task(async_init_single_device(device))
)
if tasks:
await asyncio.wait(tasks)
class Device(RestoreEntity):
"""Represent a tracked device."""
host_name: str = None
location_name: str = None
gps: GPSType = None
gps_accuracy: int = 0
last_seen: dt_util.dt.datetime = None
consider_home: dt_util.dt.timedelta = None
battery: int = None
attributes: dict = None
icon: str = None
# Track if the last update of this device was HOME.
last_update_home = False
_state = STATE_NOT_HOME
def __init__(
self,
hass: HomeAssistantType,
consider_home: timedelta,
track: bool,
dev_id: str,
mac: str,
name: str = None,
picture: str = None,
gravatar: str = None,
icon: str = None,
) -> None:
"""Initialize a device."""
self.hass = hass
self.entity_id = f"{DOMAIN}.{dev_id}"
# Timedelta object how long we consider a device home if it is not
# detected anymore.
self.consider_home = consider_home
# Device ID
self.dev_id = dev_id
self.mac = mac
# If we should track this device
self.track = track
# Configured name
self.config_name = name
# Configured picture
if gravatar is not None:
self.config_picture = get_gravatar_for_email(gravatar)
else:
self.config_picture = picture
self.icon = icon
self.source_type = None
self._attributes = {}
@property
def name(self):
"""Return the name of the entity."""
return self.config_name or self.host_name or self.dev_id or DEVICE_DEFAULT_NAME
@property
def state(self):
"""Return the state of the device."""
return self._state
@property
def entity_picture(self):
"""Return the picture of the device."""
return self.config_picture
@property
def state_attributes(self):
"""Return the device state attributes."""
attr = {ATTR_SOURCE_TYPE: self.source_type}
if self.gps:
attr[ATTR_LATITUDE] = self.gps[0]
attr[ATTR_LONGITUDE] = self.gps[1]
attr[ATTR_GPS_ACCURACY] = self.gps_accuracy
if self.battery:
attr[ATTR_BATTERY] = self.battery
return attr
@property
def device_state_attributes(self):
"""Return device state attributes."""
return self._attributes
async def async_seen(
self,
host_name: str = None,
location_name: str = None,
gps: GPSType = None,
gps_accuracy=0,
battery: int = None,
attributes: dict = None,
source_type: str = SOURCE_TYPE_GPS,
consider_home: timedelta = None,
):
"""Mark the device as seen."""
self.source_type = source_type
self.last_seen = dt_util.utcnow()
self.host_name = host_name or self.host_name
self.location_name = location_name
self.consider_home = consider_home or self.consider_home
if battery:
self.battery = battery
if attributes:
self._attributes.update(attributes)
self.gps = None
if gps is not None:
try:
self.gps = float(gps[0]), float(gps[1])
self.gps_accuracy = gps_accuracy or 0
except (ValueError, TypeError, IndexError):
self.gps = None
self.gps_accuracy = 0
LOGGER.warning("Could not parse gps value for %s: %s", self.dev_id, gps)
await self.async_update()
def stale(self, now: dt_util.dt.datetime = None):
"""Return if device state is stale.
Async friendly.
"""
return (
self.last_seen is None
or (now or dt_util.utcnow()) - self.last_seen > self.consider_home
)
def mark_stale(self):
"""Mark the device state as stale."""
self._state = STATE_NOT_HOME
self.gps = None
self.last_update_home = False
async def async_update(self):
"""Update state of entity.
This method is a coroutine.
"""
if not self.last_seen:
return
if self.location_name:
self._state = self.location_name
elif self.gps is not None and self.source_type == SOURCE_TYPE_GPS:
zone_state = zone.async_active_zone(
self.hass, self.gps[0], self.gps[1], self.gps_accuracy
)
if zone_state is None:
self._state = STATE_NOT_HOME
elif zone_state.entity_id == zone.ENTITY_ID_HOME:
self._state = STATE_HOME
else:
self._state = zone_state.name
elif self.stale():
self.mark_stale()
else:
self._state = STATE_HOME
self.last_update_home = True
async def async_added_to_hass(self):
"""Add an entity."""
await super().async_added_to_hass()
state = await self.async_get_last_state()
if not state:
return
self._state = state.state
self.last_update_home = state.state == STATE_HOME
self.last_seen = dt_util.utcnow()
for attr, var in (
(ATTR_SOURCE_TYPE, "source_type"),
(ATTR_GPS_ACCURACY, "gps_accuracy"),
(ATTR_BATTERY, "battery"),
):
if attr in state.attributes:
setattr(self, var, state.attributes[attr])
if ATTR_LONGITUDE in state.attributes:
self.gps = (
state.attributes[ATTR_LATITUDE],
state.attributes[ATTR_LONGITUDE],
)
class DeviceScanner:
"""Device scanner object."""
hass: HomeAssistantType = None
def scan_devices(self) -> List[str]:
"""Scan for devices."""
raise NotImplementedError()
async def async_scan_devices(self) -> Any:
"""Scan for devices."""
return await self.hass.async_add_executor_job(self.scan_devices)
def get_device_name(self, device: str) -> str:
"""Get the name of a device."""
raise NotImplementedError()
async def async_get_device_name(self, device: str) -> Any:
"""Get the name of a device."""
return await self.hass.async_add_executor_job(self.get_device_name, device)
def get_extra_attributes(self, device: str) -> dict:
"""Get the extra attributes of a device."""
raise NotImplementedError()
async def async_get_extra_attributes(self, device: str) -> Any:
"""Get the extra attributes of a device."""
return await self.hass.async_add_executor_job(self.get_extra_attributes, device)
async def async_load_config(
path: str, hass: HomeAssistantType, consider_home: timedelta
):
"""Load devices from YAML configuration file.
This method is a coroutine.
"""
dev_schema = vol.Schema(
{
vol.Required(CONF_NAME): cv.string,
vol.Optional(CONF_ICON, default=None): vol.Any(None, cv.icon),
vol.Optional("track", default=False): cv.boolean,
vol.Optional(CONF_MAC, default=None): vol.Any(
None, vol.All(cv.string, vol.Upper)
),
vol.Optional("gravatar", default=None): vol.Any(None, cv.string),
vol.Optional("picture", default=None): vol.Any(None, cv.string),
vol.Optional(CONF_CONSIDER_HOME, default=consider_home): vol.All(
cv.time_period, cv.positive_timedelta
),
}
)
result = []
try:
devices = await hass.async_add_executor_job(load_yaml_config_file, path)
except HomeAssistantError as err:
LOGGER.error("Unable to load %s: %s", path, str(err))
return []
except FileNotFoundError:
return []
for dev_id, device in devices.items():
# Deprecated option. We just ignore it to avoid breaking change
device.pop("vendor", None)
device.pop("hide_if_away", None)
try:
device = dev_schema(device)
device["dev_id"] = cv.slugify(dev_id)
except vol.Invalid as exp:
async_log_exception(exp, dev_id, devices, hass)
else:
result.append(Device(hass, **device))
return result
def update_config(path: str, dev_id: str, device: Device):
"""Add device to YAML configuration file."""
with open(path, "a") as out:
device = {
device.dev_id: {
ATTR_NAME: device.name,
ATTR_MAC: device.mac,
ATTR_ICON: device.icon,
"picture": device.config_picture,
"track": device.track,
}
}
out.write("\n")
out.write(dump(device))
def get_gravatar_for_email(email: str):
"""Return an 80px Gravatar for the given email address.
Async friendly.
"""
return (
f"https://www.gravatar.com/avatar/"
f"{hashlib.md5(email.encode("utf-8").lower()).hexdigest()}.jpg?s=80&d=wavatar"
)
| """Legacy device tracker classes."""
import asyncio
from datetime import timedelta
import hashlib
from typing import Any, List, Sequence
import voluptuous as vol
from homeassistant import util
from homeassistant.components import zone
from homeassistant.config import async_log_exception, load_yaml_config_file
from homeassistant.const import (
ATTR_ENTITY_ID,
ATTR_GPS_ACCURACY,
ATTR_ICON,
ATTR_LATITUDE,
ATTR_LONGITUDE,
ATTR_NAME,
CONF_ICON,
CONF_MAC,
CONF_NAME,
DEVICE_DEFAULT_NAME,
STATE_HOME,
STATE_NOT_HOME,
)
from homeassistant.core import callback
from homeassistant.exceptions import HomeAssistantError
import homeassistant.helpers.config_validation as cv
from homeassistant.helpers.entity_registry import async_get_registry
from homeassistant.helpers.restore_state import RestoreEntity
from homeassistant.helpers.typing import GPSType, HomeAssistantType
import homeassistant.util.dt as dt_util
from homeassistant.util.yaml import dump
from .const import (
ATTR_BATTERY,
ATTR_HOST_NAME,
ATTR_MAC,
ATTR_SOURCE_TYPE,
CONF_CONSIDER_HOME,
CONF_NEW_DEVICE_DEFAULTS,
CONF_TRACK_NEW,
DEFAULT_CONSIDER_HOME,
DEFAULT_TRACK_NEW,
DOMAIN,
LOGGER,
SOURCE_TYPE_GPS,
)
YAML_DEVICES = "known_devices.yaml"
EVENT_NEW_DEVICE = "device_tracker_new_device"
async def get_tracker(hass, config):
"""Create a tracker."""
yaml_path = hass.config.path(YAML_DEVICES)
conf = config.get(DOMAIN, [])
conf = conf[0] if conf else {}
consider_home = conf.get(CONF_CONSIDER_HOME, DEFAULT_CONSIDER_HOME)
defaults = conf.get(CONF_NEW_DEVICE_DEFAULTS, {})
track_new = conf.get(CONF_TRACK_NEW)
if track_new is None:
track_new = defaults.get(CONF_TRACK_NEW, DEFAULT_TRACK_NEW)
devices = await async_load_config(yaml_path, hass, consider_home)
tracker = DeviceTracker(hass, consider_home, track_new, defaults, devices)
return tracker
class DeviceTracker:
"""Representation of a device tracker."""
def __init__(
self,
hass: HomeAssistantType,
consider_home: timedelta,
track_new: bool,
defaults: dict,
devices: Sequence,
) -> None:
"""Initialize a device tracker."""
self.hass = hass
self.devices = {dev.dev_id: dev for dev in devices}
self.mac_to_dev = {dev.mac: dev for dev in devices if dev.mac}
self.consider_home = consider_home
self.track_new = (
track_new
if track_new is not None
else defaults.get(CONF_TRACK_NEW, DEFAULT_TRACK_NEW)
)
self.defaults = defaults
self._is_updating = asyncio.Lock()
for dev in devices:
if self.devices[dev.dev_id] is not dev:
LOGGER.warning("Duplicate device IDs detected %s", dev.dev_id)
if dev.mac and self.mac_to_dev[dev.mac] is not dev:
LOGGER.warning("Duplicate device MAC addresses detected %s", dev.mac)
def see(
self,
mac: str = None,
dev_id: str = None,
host_name: str = None,
location_name: str = None,
gps: GPSType = None,
gps_accuracy: int = None,
battery: int = None,
attributes: dict = None,
source_type: str = SOURCE_TYPE_GPS,
picture: str = None,
icon: str = None,
consider_home: timedelta = None,
):
"""Notify the device tracker that you see a device."""
self.hass.add_job(
self.async_see(
mac,
dev_id,
host_name,
location_name,
gps,
gps_accuracy,
battery,
attributes,
source_type,
picture,
icon,
consider_home,
)
)
async def async_see(
self,
mac: str = None,
dev_id: str = None,
host_name: str = None,
location_name: str = None,
gps: GPSType = None,
gps_accuracy: int = None,
battery: int = None,
attributes: dict = None,
source_type: str = SOURCE_TYPE_GPS,
picture: str = None,
icon: str = None,
consider_home: timedelta = None,
):
"""Notify the device tracker that you see a device.
This method is a coroutine.
"""
registry = await async_get_registry(self.hass)
if mac is None and dev_id is None:
raise HomeAssistantError("Neither mac or device id passed in")
if mac is not None:
mac = str(mac).upper()
device = self.mac_to_dev.get(mac)
if not device:
dev_id = util.slugify(host_name or "") or util.slugify(mac)
else:
dev_id = cv.slug(str(dev_id).lower())
device = self.devices.get(dev_id)
if device:
await device.async_seen(
host_name,
location_name,
gps,
gps_accuracy,
battery,
attributes,
source_type,
consider_home,
)
if device.track:
device.async_write_ha_state()
return
# Guard from calling see on entity registry entities.
entity_id = f"{DOMAIN}.{dev_id}"
if registry.async_is_registered(entity_id):
LOGGER.error(
"The see service is not supported for this entity %s", entity_id
)
return
# If no device can be found, create it
dev_id = util.ensure_unique_string(dev_id, self.devices.keys())
device = Device(
self.hass,
consider_home or self.consider_home,
self.track_new,
dev_id,
mac,
picture=picture,
icon=icon,
)
self.devices[dev_id] = device
if mac is not None:
self.mac_to_dev[mac] = device
await device.async_seen(
host_name,
location_name,
gps,
gps_accuracy,
battery,
attributes,
source_type,
)
if device.track:
device.async_write_ha_state()
self.hass.bus.async_fire(
EVENT_NEW_DEVICE,
{
ATTR_ENTITY_ID: device.entity_id,
ATTR_HOST_NAME: device.host_name,
ATTR_MAC: device.mac,
},
)
# update known_devices.yaml
self.hass.async_create_task(
self.async_update_config(
self.hass.config.path(YAML_DEVICES), dev_id, device
)
)
async def async_update_config(self, path, dev_id, device):
"""Add device to YAML configuration file.
This method is a coroutine.
"""
async with self._is_updating:
await self.hass.async_add_executor_job(
update_config, self.hass.config.path(YAML_DEVICES), dev_id, device
)
@callback
def async_update_stale(self, now: dt_util.dt.datetime):
"""Update stale devices.
This method must be run in the event loop.
"""
for device in self.devices.values():
if (device.track and device.last_update_home) and device.stale(now):
self.hass.async_create_task(device.async_update_ha_state(True))
async def async_setup_tracked_device(self):
"""Set up all not exists tracked devices.
This method is a coroutine.
"""
async def async_init_single_device(dev):
"""Init a single device_tracker entity."""
await dev.async_added_to_hass()
dev.async_write_ha_state()
tasks = []
for device in self.devices.values():
if device.track and not device.last_seen:
tasks.append(
self.hass.async_create_task(async_init_single_device(device))
)
if tasks:
await asyncio.wait(tasks)
class Device(RestoreEntity):
"""Represent a tracked device."""
host_name: str = None
location_name: str = None
gps: GPSType = None
gps_accuracy: int = 0
last_seen: dt_util.dt.datetime = None
consider_home: dt_util.dt.timedelta = None
battery: int = None
attributes: dict = None
icon: str = None
# Track if the last update of this device was HOME.
last_update_home = False
_state = STATE_NOT_HOME
def __init__(
self,
hass: HomeAssistantType,
consider_home: timedelta,
track: bool,
dev_id: str,
mac: str,
name: str = None,
picture: str = None,
gravatar: str = None,
icon: str = None,
) -> None:
"""Initialize a device."""
self.hass = hass
self.entity_id = f"{DOMAIN}.{dev_id}"
# Timedelta object how long we consider a device home if it is not
# detected anymore.
self.consider_home = consider_home
# Device ID
self.dev_id = dev_id
self.mac = mac
# If we should track this device
self.track = track
# Configured name
self.config_name = name
# Configured picture
if gravatar is not None:
self.config_picture = get_gravatar_for_email(gravatar)
else:
self.config_picture = picture
self.icon = icon
self.source_type = None
self._attributes = {}
@property
def name(self):
"""Return the name of the entity."""
return self.config_name or self.host_name or self.dev_id or DEVICE_DEFAULT_NAME
@property
def state(self):
"""Return the state of the device."""
return self._state
@property
def entity_picture(self):
"""Return the picture of the device."""
return self.config_picture
@property
def state_attributes(self):
"""Return the device state attributes."""
attr = {ATTR_SOURCE_TYPE: self.source_type}
if self.gps:
attr[ATTR_LATITUDE] = self.gps[0]
attr[ATTR_LONGITUDE] = self.gps[1]
attr[ATTR_GPS_ACCURACY] = self.gps_accuracy
if self.battery:
attr[ATTR_BATTERY] = self.battery
return attr
@property
def device_state_attributes(self):
"""Return device state attributes."""
return self._attributes
async def async_seen(
self,
host_name: str = None,
location_name: str = None,
gps: GPSType = None,
gps_accuracy=0,
battery: int = None,
attributes: dict = None,
source_type: str = SOURCE_TYPE_GPS,
consider_home: timedelta = None,
):
"""Mark the device as seen."""
self.source_type = source_type
self.last_seen = dt_util.utcnow()
self.host_name = host_name or self.host_name
self.location_name = location_name
self.consider_home = consider_home or self.consider_home
if battery:
self.battery = battery
if attributes:
self._attributes.update(attributes)
self.gps = None
if gps is not None:
try:
self.gps = float(gps[0]), float(gps[1])
self.gps_accuracy = gps_accuracy or 0
except (ValueError, TypeError, IndexError):
self.gps = None
self.gps_accuracy = 0
LOGGER.warning("Could not parse gps value for %s: %s", self.dev_id, gps)
await self.async_update()
def stale(self, now: dt_util.dt.datetime = None):
"""Return if device state is stale.
Async friendly.
"""
return (
self.last_seen is None
or (now or dt_util.utcnow()) - self.last_seen > self.consider_home
)
def mark_stale(self):
"""Mark the device state as stale."""
self._state = STATE_NOT_HOME
self.gps = None
self.last_update_home = False
async def async_update(self):
"""Update state of entity.
This method is a coroutine.
"""
if not self.last_seen:
return
if self.location_name:
self._state = self.location_name
elif self.gps is not None and self.source_type == SOURCE_TYPE_GPS:
zone_state = zone.async_active_zone(
self.hass, self.gps[0], self.gps[1], self.gps_accuracy
)
if zone_state is None:
self._state = STATE_NOT_HOME
elif zone_state.entity_id == zone.ENTITY_ID_HOME:
self._state = STATE_HOME
else:
self._state = zone_state.name
elif self.stale():
self.mark_stale()
else:
self._state = STATE_HOME
self.last_update_home = True
async def async_added_to_hass(self):
"""Add an entity."""
await super().async_added_to_hass()
state = await self.async_get_last_state()
if not state:
return
self._state = state.state
self.last_update_home = state.state == STATE_HOME
self.last_seen = dt_util.utcnow()
for attr, var in (
(ATTR_SOURCE_TYPE, "source_type"),
(ATTR_GPS_ACCURACY, "gps_accuracy"),
(ATTR_BATTERY, "battery"),
):
if attr in state.attributes:
setattr(self, var, state.attributes[attr])
if ATTR_LONGITUDE in state.attributes:
self.gps = (
state.attributes[ATTR_LATITUDE],
state.attributes[ATTR_LONGITUDE],
)
class DeviceScanner:
"""Device scanner object."""
hass: HomeAssistantType = None
def scan_devices(self) -> List[str]:
"""Scan for devices."""
raise NotImplementedError()
async def async_scan_devices(self) -> Any:
"""Scan for devices."""
return await self.hass.async_add_executor_job(self.scan_devices)
def get_device_name(self, device: str) -> str:
"""Get the name of a device."""
raise NotImplementedError()
async def async_get_device_name(self, device: str) -> Any:
"""Get the name of a device."""
return await self.hass.async_add_executor_job(self.get_device_name, device)
def get_extra_attributes(self, device: str) -> dict:
"""Get the extra attributes of a device."""
raise NotImplementedError()
async def async_get_extra_attributes(self, device: str) -> Any:
"""Get the extra attributes of a device."""
return await self.hass.async_add_executor_job(self.get_extra_attributes, device)
async def async_load_config(
path: str, hass: HomeAssistantType, consider_home: timedelta
):
"""Load devices from YAML configuration file.
This method is a coroutine.
"""
dev_schema = vol.Schema(
{
vol.Required(CONF_NAME): cv.string,
vol.Optional(CONF_ICON, default=None): vol.Any(None, cv.icon),
vol.Optional("track", default=False): cv.boolean,
vol.Optional(CONF_MAC, default=None): vol.Any(
None, vol.All(cv.string, vol.Upper)
),
vol.Optional("gravatar", default=None): vol.Any(None, cv.string),
vol.Optional("picture", default=None): vol.Any(None, cv.string),
vol.Optional(CONF_CONSIDER_HOME, default=consider_home): vol.All(
cv.time_period, cv.positive_timedelta
),
}
)
result = []
try:
devices = await hass.async_add_executor_job(load_yaml_config_file, path)
except HomeAssistantError as err:
LOGGER.error("Unable to load %s: %s", path, str(err))
return []
except FileNotFoundError:
return []
for dev_id, device in devices.items():
# Deprecated option. We just ignore it to avoid breaking change
device.pop("vendor", None)
device.pop("hide_if_away", None)
try:
device = dev_schema(device)
device["dev_id"] = cv.slugify(dev_id)
except vol.Invalid as exp:
async_log_exception(exp, dev_id, devices, hass)
else:
result.append(Device(hass, **device))
return result
def update_config(path: str, dev_id: str, device: Device):
"""Add device to YAML configuration file."""
with open(path, "a") as out:
device = {
device.dev_id: {
ATTR_NAME: device.name,
ATTR_MAC: device.mac,
ATTR_ICON: device.icon,
"picture": device.config_picture,
"track": device.track,
}
}
out.write("\n")
out.write(dump(device))
def get_gravatar_for_email(email: str):
"""Return an 80px Gravatar for the given email address.
Async friendly.
"""
return (
f"https://www.gravatar.com/avatar/"
f"{hashlib.md5(email.encode('utf-8').lower()).hexdigest()}.jpg?s=80&d=wavatar"
)
|
#!/usr/bin/env python3
"""Linting script for rsp-environments.yaml."""
from __future__ import annotations
import argparse
import sys
from dataclasses import dataclass
from pathlib import Path
from typing import Any, Dict, List, Sequence
import requests
import yaml
def main() -> None:
args = parse_args()
found_issues = False
for p in args.file_paths:
path = Path(p)
if not path.is_file():
sys.exit(f"Could not find a file at {path}.")
issues = lint_file(path=path)
report_issues(path=path, issues=issues)
if len(issues) > 0:
found_issues = True
if found_issues:
sys.exit(1)
else:
sys.exit(0)
def parse_args() -> argparse.Namespace:
parser = argparse.ArgumentParser(
description="Lint data in the rsp-environments.yaml file"
)
parser.add_argument(
"file_paths", nargs="+", help="Path of the YAML file to check"
)
return parser.parse_args()
def lint_file(*, path: Path) -> List[Issue]:
"""Lint an RSP environemnts file (rsp-envrionments.yaml)."""
rsp_envs = yaml.safe_load(path.read_text())
issues: List[Issue] = []
for env_key, env_data in rsp_envs.items():
issues.extend(
lint_science_platform_values(
path=path, env_key=env_key, env_data=env_data
)
)
issues.extend(
lint_gafaelfawr_values(
path=path, env_key=env_key, env_data=env_data
)
)
return issues
def lint_science_platform_values(
*, path: Path, env_key: str, env_data: Dict[str, Any]
) -> List[Issue]:
"""Lint data that can be found in the science-platform/values-<env>.yaml
Helm values file.
"""
issues: List[Issue] = []
values_url = (
"https://raw.githubusercontent.com/lsst-sqre/phalanx/master/"
f"science-platform/values-{env_data["phalanx"]}.yaml"
)
r = requests.get(values_url)
if r.status_code != 200:
issues.append(
GeneralIssue(
path=path,
keys=[env_key, "phalanx"],
message=f"{env_data["phalanx"]} is not in the phalanx "
"repository.",
)
)
return issues
values_data = yaml.safe_load(r.text)
fqdn = values_data["fqdn"]
issues.extend(
lint_url(
path=path,
env_key=env_key,
env_data=env_data,
url_key="squareone",
expected_url=f"https://{fqdn}/",
)
)
issues.extend(
lint_url(
path=path,
env_key=env_key,
env_data=env_data,
url_key="nb",
expected_url=f"https://{fqdn}/nb",
)
)
if values_data.get("tap", {}).get("enabled", False):
issues.extend(
lint_url(
path=path,
env_key=env_key,
env_data=env_data,
url_key="tap",
expected_url=f"https://{fqdn}/api/tap",
)
)
else:
if "urls" in env_data and "tap" in env_data["urls"]:
issues.append(
GeneralIssue(
path=path,
keys=[env_key, "urls", "tap"],
message="TAP is not deployed in this environment",
)
)
return issues
def lint_url(
*,
path: Path,
env_key: str,
env_data: Dict[str, Any],
url_key: str,
expected_url: str,
) -> List[Issue]:
issues: List[Issue] = []
try:
url = env_data["urls"][url_key]
except KeyError:
issues.append(
GeneralIssue(
path=path,
keys=[env_key, "urls", url_key],
message="Key is missing. " f"Value should be {expected_url}",
)
)
return issues
if url != expected_url:
issues.append(
KeyValueIssue(
path=path,
keys=[env_key, "urls", url_key],
existing=url,
correct=expected_url,
)
)
return issues
def lint_gafaelfawr_values(
*, path: Path, env_key: str, env_data: Dict[str, Any]
) -> List[Issue]:
"""Lint data that can be found in the services/gafaelfawr/values-<env>.yaml
Helm values file.
"""
issues: List[Issue] = []
if "github_teams" not in env_data.keys():
# Nothing to check against
return issues
values_url = (
"https://raw.githubusercontent.com/lsst-sqre/phalanx/master/"
f"services/gafaelfawr/values-{env_data["phalanx"]}.yaml"
)
r = requests.get(values_url)
if r.status_code != 200:
issues.append(
GeneralIssue(
path=path,
keys=[env_key, "github_teams"],
message=f"A gafaelfwar values file is not available for "
f"{env_data["phalanx"]} in phalanx ",
)
)
return issues
values_data = yaml.safe_load(r.text)
try:
notebook_exec_configs = values_data["gafaelfawr"]["config"][
"groupMapping"
]["exec:notebook"]
except KeyError:
try:
notebook_exec_configs = values_data["gafaelfawr"]["config"][
"group_mapping"
]["exec:notebook"]
except KeyError:
issues.append(
GeneralIssue(
path=path,
keys=[env_key, "github_teams"],
message=f"A gafaelfwar exec:notebook group mapping is not "
f"available for {env_data["phalanx"]} in phalanx. "
f"Check {values_url}",
)
)
return issues
if len(notebook_exec_configs) != len(env_data["github_teams"]):
issues.append(
GeneralIssue(
path=path,
keys=[env_key, "github_teams"],
message=f"Number of teams does not match that in {values_url}",
)
)
for team in env_data["github_teams"]:
gafaelfawr_team_name = team.replace("/", "-")
if gafaelfawr_team_name not in notebook_exec_configs:
issues.append(
GeneralIssue(
path=path,
keys=[env_key, "github_teams"],
message=f"Team {team} does not match one of "
f"{notebook_exec_configs}",
)
)
return issues
def report_issues(*, path: Path, issues: Sequence[Issue]) -> None:
if len(issues) == 0:
print(f"✨ {path.name} looks right ✨")
return
print(f"\n🚨 Found {len(issues)} issues in {path.name}.")
for issue in issues:
print(issue)
@dataclass
class Issue:
path: Path
keys: List[str]
@property
def key_path(self) -> str:
return ".".join(self.keys)
@dataclass
class GeneralIssue(Issue):
message: str
def __str__(self) -> str:
return f"{self.path}: {self.key_path} - {self.message}"
@dataclass
class KeyValueIssue(Issue):
existing: Any
correct: Any
def __str__(self) -> str:
return (
f"{self.path}: {self.key_path} is '{self.existing}', but should "
f"be '{self.correct}'"
)
if __name__ == "__main__":
main()
| #!/usr/bin/env python3
"""Linting script for rsp-environments.yaml."""
from __future__ import annotations
import argparse
import sys
from dataclasses import dataclass
from pathlib import Path
from typing import Any, Dict, List, Sequence
import requests
import yaml
def main() -> None:
args = parse_args()
found_issues = False
for p in args.file_paths:
path = Path(p)
if not path.is_file():
sys.exit(f"Could not find a file at {path}.")
issues = lint_file(path=path)
report_issues(path=path, issues=issues)
if len(issues) > 0:
found_issues = True
if found_issues:
sys.exit(1)
else:
sys.exit(0)
def parse_args() -> argparse.Namespace:
parser = argparse.ArgumentParser(
description="Lint data in the rsp-environments.yaml file"
)
parser.add_argument(
"file_paths", nargs="+", help="Path of the YAML file to check"
)
return parser.parse_args()
def lint_file(*, path: Path) -> List[Issue]:
"""Lint an RSP environemnts file (rsp-envrionments.yaml)."""
rsp_envs = yaml.safe_load(path.read_text())
issues: List[Issue] = []
for env_key, env_data in rsp_envs.items():
issues.extend(
lint_science_platform_values(
path=path, env_key=env_key, env_data=env_data
)
)
issues.extend(
lint_gafaelfawr_values(
path=path, env_key=env_key, env_data=env_data
)
)
return issues
def lint_science_platform_values(
*, path: Path, env_key: str, env_data: Dict[str, Any]
) -> List[Issue]:
"""Lint data that can be found in the science-platform/values-<env>.yaml
Helm values file.
"""
issues: List[Issue] = []
values_url = (
"https://raw.githubusercontent.com/lsst-sqre/phalanx/master/"
f"science-platform/values-{env_data['phalanx']}.yaml"
)
r = requests.get(values_url)
if r.status_code != 200:
issues.append(
GeneralIssue(
path=path,
keys=[env_key, "phalanx"],
message=f"{env_data['phalanx']} is not in the phalanx "
"repository.",
)
)
return issues
values_data = yaml.safe_load(r.text)
fqdn = values_data["fqdn"]
issues.extend(
lint_url(
path=path,
env_key=env_key,
env_data=env_data,
url_key="squareone",
expected_url=f"https://{fqdn}/",
)
)
issues.extend(
lint_url(
path=path,
env_key=env_key,
env_data=env_data,
url_key="nb",
expected_url=f"https://{fqdn}/nb",
)
)
if values_data.get("tap", {}).get("enabled", False):
issues.extend(
lint_url(
path=path,
env_key=env_key,
env_data=env_data,
url_key="tap",
expected_url=f"https://{fqdn}/api/tap",
)
)
else:
if "urls" in env_data and "tap" in env_data["urls"]:
issues.append(
GeneralIssue(
path=path,
keys=[env_key, "urls", "tap"],
message="TAP is not deployed in this environment",
)
)
return issues
def lint_url(
*,
path: Path,
env_key: str,
env_data: Dict[str, Any],
url_key: str,
expected_url: str,
) -> List[Issue]:
issues: List[Issue] = []
try:
url = env_data["urls"][url_key]
except KeyError:
issues.append(
GeneralIssue(
path=path,
keys=[env_key, "urls", url_key],
message="Key is missing. " f"Value should be {expected_url}",
)
)
return issues
if url != expected_url:
issues.append(
KeyValueIssue(
path=path,
keys=[env_key, "urls", url_key],
existing=url,
correct=expected_url,
)
)
return issues
def lint_gafaelfawr_values(
*, path: Path, env_key: str, env_data: Dict[str, Any]
) -> List[Issue]:
"""Lint data that can be found in the services/gafaelfawr/values-<env>.yaml
Helm values file.
"""
issues: List[Issue] = []
if "github_teams" not in env_data.keys():
# Nothing to check against
return issues
values_url = (
"https://raw.githubusercontent.com/lsst-sqre/phalanx/master/"
f"services/gafaelfawr/values-{env_data['phalanx']}.yaml"
)
r = requests.get(values_url)
if r.status_code != 200:
issues.append(
GeneralIssue(
path=path,
keys=[env_key, "github_teams"],
message=f"A gafaelfwar values file is not available for "
f"{env_data['phalanx']} in phalanx ",
)
)
return issues
values_data = yaml.safe_load(r.text)
try:
notebook_exec_configs = values_data["gafaelfawr"]["config"][
"groupMapping"
]["exec:notebook"]
except KeyError:
try:
notebook_exec_configs = values_data["gafaelfawr"]["config"][
"group_mapping"
]["exec:notebook"]
except KeyError:
issues.append(
GeneralIssue(
path=path,
keys=[env_key, "github_teams"],
message=f"A gafaelfwar exec:notebook group mapping is not "
f"available for {env_data['phalanx']} in phalanx. "
f"Check {values_url}",
)
)
return issues
if len(notebook_exec_configs) != len(env_data["github_teams"]):
issues.append(
GeneralIssue(
path=path,
keys=[env_key, "github_teams"],
message=f"Number of teams does not match that in {values_url}",
)
)
for team in env_data["github_teams"]:
gafaelfawr_team_name = team.replace("/", "-")
if gafaelfawr_team_name not in notebook_exec_configs:
issues.append(
GeneralIssue(
path=path,
keys=[env_key, "github_teams"],
message=f"Team {team} does not match one of "
f"{notebook_exec_configs}",
)
)
return issues
def report_issues(*, path: Path, issues: Sequence[Issue]) -> None:
if len(issues) == 0:
print(f"✨ {path.name} looks right ✨")
return
print(f"\n🚨 Found {len(issues)} issues in {path.name}.")
for issue in issues:
print(issue)
@dataclass
class Issue:
path: Path
keys: List[str]
@property
def key_path(self) -> str:
return ".".join(self.keys)
@dataclass
class GeneralIssue(Issue):
message: str
def __str__(self) -> str:
return f"{self.path}: {self.key_path} - {self.message}"
@dataclass
class KeyValueIssue(Issue):
existing: Any
correct: Any
def __str__(self) -> str:
return (
f"{self.path}: {self.key_path} is '{self.existing}', but should "
f"be '{self.correct}'"
)
if __name__ == "__main__":
main()
|
import json
from json.decoder import JSONDecodeError
import logging
import re
import requests
from .entity import AtlasClassification, AtlasEntity
from .typedef import BaseTypeDef
from .util import AtlasException, PurviewLimitation, PurviewOnly
class AtlasClient():
"""
Provides communication between your application and the Apache Atlas
server with your entities and type definitions.
:param str endpoint_url:
The http url for communicating with your Apache Atlas server.
It will most likely end in /api/atlas/v2.
:param authentication:
The method of authentication.
:type authentication:
:class:`~pyapacheatlas.auth.base.AtlasAuthBase`
"""
def __init__(self, endpoint_url, authentication=None):
super().__init__()
self.authentication = authentication
self.endpoint_url = endpoint_url
self.is_purview = False
self._purview_url_pattern = r"https:\/\/[a-z0-9-]*?\.(catalog\.purview.azure.com)"
if re.match(self._purview_url_pattern, self.endpoint_url):
self.is_purview = True
def _handle_response(self, resp):
"""
Safely handle an Atlas Response and return the results if valid.
:param Response resp: The response from the request method.
:return: A dict containing the results.
:rtype: dict
"""
try:
results = json.loads(resp.text)
resp.raise_for_status()
except JSONDecodeError:
raise ValueError("Error in parsing: {}".format(resp.text))
except requests.RequestException as e:
if "errorCode" in results:
raise AtlasException(resp.text)
else:
raise requests.RequestException(resp.text)
return results
def delete_entity(self, guid):
"""
Delete one or many guids from your Apache Atlas server.
:param guid: The guid or guids you want to remove.
:type guid: Union(str,list(str))
:return:
An EntityMutationResponse containing guidAssignments,
mutatedEntities, and partialUpdatedEntities (list).
:rtype: dict(str, Union(dict,list))
"""
results = None
if isinstance(guid, list):
guid_str = '&guid='.join(guid)
else:
guid_str = guid
atlas_endpoint = self.endpoint_url + \
"/entity/bulk?guid={}".format(guid_str)
deleteEntity = requests.delete(
atlas_endpoint,
headers=self.authentication.get_authentication_headers())
results = self._handle_response(deleteEntity)
return results
def delete_type(self, name):
"""
Delete a type based on the given name.
:param str name: The name of the type you want to remove.
:return:
No content, should receive a 204 status code.
:rtype: None
"""
results = None
atlas_endpoint = self.endpoint_url + \
f"/types/typedef/name/{name}"
deleteType = requests.delete(
atlas_endpoint,
headers=self.authentication.get_authentication_headers())
try:
deleteType.raise_for_status()
except requests.RequestException:
raise Exception(deleteType.text)
results = {"message": f"successfully delete {name}"}
return results
def get_entity(self, guid=None, qualifiedName=None, typeName=None):
"""
Retrieve one or many guids from your Atlas backed Data Catalog.
Returns a dictionary with keys "referredEntities" and "entities". You'll
want to grab the entities values which is a list of entities.
You can provide a single guid or a list of guids. You can provide a
single typeName and multiple qualified names in a list.
:param guid:
The guid or guids you want to retrieve. Not used if using typeName
and qualifiedName.
:type guid: Union(str, list(str))
:param qualifiedName:
The qualified name of the entity you want to find. Must provide
typeName if using qualifiedName. You may search for multiple
qualified names under the same type. Ignored if using guid
parameter.
:type qualifiedName: Union(str, list(str))
:param str typeName:
The type name of the entity you want to find. Must provide
qualifiedName if using typeName. Ignored if using guid parameter.
:return:
An AtlasEntitiesWithExtInfo object which includes a list of
entities and accessible with the "entities" key.
:rtype: dict(str, Union(list(dict),dict))
"""
results = None
parameters = {}
if isinstance(guid, list):
guid_str = '&guid='.join(guid)
else:
guid_str = guid
qualifiedName_params = dict()
if isinstance(qualifiedName, list):
qualifiedName_params = {
f"attr_{idx}:qualifiedName": qname
for idx, qname in enumerate(qualifiedName)
}
else:
qualifiedName_params = {"attr_0:qualifiedName": qualifiedName}
if qualifiedName and typeName:
atlas_endpoint = self.endpoint_url + \
f"/entity/bulk/uniqueAttribute/type/{typeName}"
parameters.update(qualifiedName_params)
else:
atlas_endpoint = self.endpoint_url + \
"/entity/bulk?guid={}".format(guid_str)
getEntity = requests.get(
atlas_endpoint,
params=parameters,
headers=self.authentication.get_authentication_headers()
)
results = self._handle_response(getEntity)
return results
def get_entity_classification(self, guid, classificationName):
"""
Retrieve a specific entity from the given entity's guid.
:param str guid: The guid of the entity that you want to query.
:param str classificationName: The typeName of the classification you
want to query.
:return: An AtlasClassification object that contains entityGuid,
entityStatus, typeName, attributes, and propagate fields.
:rtype: dict(str, object)
"""
atlas_endpoint = self.endpoint_url + \
f"/entity/guid/{guid}/classification/{classificationName}"
getClassification = requests.get(
atlas_endpoint,
headers=self.authentication.get_authentication_headers()
)
results = self._handle_response(getClassification)
return results
def get_entity_classifications(self, guid):
"""
Retrieve all classifications from the given entity's guid.
:param str guid: The entity's guid.
:return: An AtlasClassifications object that contains keys 'list' (which
is the list of classifications on the entity), pageSize, sortBy,
startIndex, and totalCount.
:rtype: dict(str, object)
"""
atlas_endpoint = self.endpoint_url + \
f"/entity/guid/{guid}/classifications"
getClassification = requests.get(
atlas_endpoint,
headers=self.authentication.get_authentication_headers()
)
results = self._handle_response(getClassification)
return results
def get_entity_header(self, guid=None):
"""
Retrieve one or many entity headers from your Atlas backed Data Catalog.
:param guid:
The guid or guids you want to retrieve. Not used if using typeName
and qualifiedName.
:type guid: Union(str, list(str))
:return:
An AtlasEntityHeader dict which includes the keys: guid, attributes
(which is a dict that contains qualifiedName and name keys), an
array of classifications, and an array of glossary term headers.
:rtype: dict
"""
results = None
parameters = {}
atlas_endpoint = self.endpoint_url + \
"/entity/guid/{}/header".format(guid)
getEntity = requests.get(
atlas_endpoint,
params=parameters,
headers=self.authentication.get_authentication_headers()
)
results = self._handle_response(getEntity)
return results
def get_relationship(self, guid):
"""
Retrieve the relationship attribute for the given guid.
:param str guid: The unique guid for the relationship.
:return: A dict representing AtlasRelationshipWithExtInfo with the
relationship (what you probably care about) and referredEntities
attributes.
:rtype: dict(str, dict)
"""
results = None
atlas_endpoint = self.endpoint_url + f"/relationship/guid/{guid}"
getResponse = requests.get(
atlas_endpoint,
headers=self.authentication.get_authentication_headers()
)
results = self._handle_response(getResponse)
return results
def get_all_typedefs(self):
"""
Retrieve all of the type defs available on the Apache Atlas server.
:return:
A dict representing an AtlasTypesDef, containing lists of
type defs wrapped in their corresponding definition types
{"entityDefs", "relationshipDefs"}.
:rtype: dict(str, list(dict))
"""
results = None
atlas_endpoint = self.endpoint_url + "/types/typedefs"
getTypeDefs = requests.get(
atlas_endpoint,
headers=self.authentication.get_authentication_headers()
)
results = self._handle_response(getTypeDefs)
return results
def get_typedef(self, type_category=None, guid=None, name=None):
"""
Retrieve a single type def based on its guid, name, or type category and
(guid or name). Rule of thumb: Use guid if you have it, use name if you
want to essentially use duck typing and are testing what keys you're
reading from the response, or use type_category when you want to
guarantee the type being returned.
:param type_category:
The type category your type def belongs to. You most likely want
TypeCategory.ENTITY. Optional if name or guid is specified.
:type type_category:
:class:`~pyapacheatlas.core.typedef.TypeCategory`
:param str,optional guid: A valid guid. Optional if name is specified.
:param str,optional name: A valid name. Optional if guid is specified.
:return: A dictionary representing an Atlas{TypeCategory}Def.
:rtype: dict
"""
results = None
atlas_endpoint = self.endpoint_url + "/types/"
# If we are using type category
if type_category:
atlas_endpoint = atlas_endpoint + \
"{}def".format(type_category.value)
elif guid or name:
atlas_endpoint = atlas_endpoint + "typedef"
else:
raise ValueError(
"Either guid or name must be defined or type_category and one of guid or name must be defined.")
if guid:
atlas_endpoint = atlas_endpoint + '/guid/{}'.format(guid)
elif name:
atlas_endpoint = atlas_endpoint + '/name/{}'.format(name)
else:
raise ValueError("One of guid or name must be defined.")
getTypeDef = requests.get(
atlas_endpoint,
headers=self.authentication.get_authentication_headers()
)
results = self._handle_response(getTypeDef)
return results
def get_glossary(self, name="Glossary", guid=None, detailed=False):
"""
Retrieve the specified glossary by name or guid along with the term
headers (AtlasRelatedTermHeader: including displayText and termGuid).
Providing the glossary name only will result in a lookup of all
glossaries and returns the term headers (accessible via "terms" key)
for all glossaries.
Use detailed = True to return the full detail of terms
(AtlasGlossaryTerm) accessible via "termInfo" key.
:param str name:
The name of the glossary to use, defaults to "Glossary". Not
required if using the guid parameter.
:param str guid:
The unique guid of your glossary. Not required if using the
name parameter.
:param bool detailed:
Set to true if you want to pull back all terms and
not just headers.
:return:
The requested glossary with the term headers (AtlasGlossary) or
with detailed terms (AtlasGlossaryExtInfo).
:rtype: list(dict)
"""
results = None
if guid:
logging.debug(f"Retreiving a Glossary based on guid: {guid}")
atlas_endpoint = self.endpoint_url + "/glossary/{}".format(guid)
if detailed:
atlas_endpoint = atlas_endpoint + "/detailed"
getResult = requests.get(
atlas_endpoint,
headers=self.authentication.get_authentication_headers()
)
results = self._handle_response(getResult)
else:
logging.debug(f"Retreiving a Glossary based on name: {name}")
all_glossaries = self._get_glossaries()
logging.debug(f"Iterating over {len(all_glossaries)} glossaries")
for glossary in all_glossaries:
if glossary["name"] == name:
logging.debug(f"Found a glossary named '{name}'")
if detailed:
logging.debug(
f"Recursively calling get_glossary with guid: {glossary["guid"]}")
results = self.get_glossary(
guid=glossary["guid"], detailed=detailed)
else:
results = glossary
if results is None:
raise ValueError(
f"Glossary with a name of {name} was not found.")
return results
def _get_glossaries(self, limit=-1, offset=0, sort_order="ASC"):
"""
Retrieve all glossaries and the term headers.
:param int limit:
The maximum number of glossaries to pull back. Does not affect the
number of term headers included in the results.
:param int offset: The number of glossaries to skip.
:param str sort_order: ASC for DESC sort for glossary name.
:return: The requested glossaries with the term headers.
:rtype: list(dict)
"""
results = None
atlas_endpoint = self.endpoint_url + "/glossary"
logging.debug("Retreiving all glossaries from catalog")
# TODO: Implement paging with offset and limit
getResult = requests.get(
atlas_endpoint,
params={"limit": limit, "offset": offset, "sort": sort_order},
headers=self.authentication.get_authentication_headers()
)
results = self._handle_response(getResult)
return results
def get_glossary_term(self, guid=None, name=None, glossary_name="Glossary", glossary_guid=None):
"""
Retrieve a single glossary term based on its guid. Providing only the
glossary_name will result in a lookup for the glossary guid. If you
plan on looking up many terms, consider using the get_glossary method
with the detailed argument set to True. That method will provide all
glossary terms in a dictionary for faster lookup.
:param str guid:
The guid of your term. Not required if name is specified.
:param str name:
The name of your term's display text. Overruled if guid is
provided.
:param str glossary_name:
The name of the glossary to use, defaults to "Glossary". Not
required if using the glossary_guid parameter.
:param str glossary_guid:
The unique guid of your glossary. Not required if using the
glossary_name parameter.
:return: The requested glossary term as a dict.
:rtype: dict
"""
results = None
if guid is None and name is None:
raise ValueError("Either guid or name and glossary must be set.")
if guid:
atlas_endpoint = self.endpoint_url + \
"/glossary/term/{}".format(guid)
getTerms = requests.get(
atlas_endpoint,
headers=self.authentication.get_authentication_headers()
)
results = self._handle_response(getTerms)
else:
terms_in_glossary = self.get_glossary(
name=glossary_name, guid=glossary_guid)
for term in terms_in_glossary["terms"]:
if term["displayText"] == name:
_guid = term["termGuid"]
results = self.get_glossary_term(guid=_guid)
return results
def _get_typedefs_header(self):
"""
Get the array of AtlasTypeDefHeader that contains category, guid,
name, and serviceType. Massage it into a dict based on the available
categories.
:return: A dictionary of categories and the names of defined types.
:rtype: dict(str, list(str))
"""
atlas_endpoint = self.endpoint_url + "/types/typedefs/headers"
getHeaders = requests.get(
atlas_endpoint,
headers=self.authentication.get_authentication_headers()
)
results = self._handle_response(getHeaders)
output = dict()
for typedef in results:
active_category = typedef["category"].lower()+"Defs"
if active_category not in output:
output[active_category] = []
output[active_category].append(typedef["name"])
return output
@PurviewLimitation
def classify_bulk_entities(self, entityGuids, classification):
"""
Given a single classification, you want to apply it to many entities
and you know their guid. This call will fail if any one of the guids
already have the provided classification on that entity.
:param Union(str,list) entityGuids:
The guid or guids you want to classify.
:param classification:
The AtlasClassification object you want to apply to the entities.
:type classification:
Union(dict, :class:`~pyapacheatlas.core.entity.AtlasClassification`)
:return: A message indicating success. The only key is 'message',
containing a brief string.
:rtype: dict(str,Union(list(str),str))
"""
results = None
atlas_endpoint = self.endpoint_url + "/entity/bulk/classification"
if isinstance(classification, AtlasClassification):
classification = classification.to_json()
classification_name = classification["typeName"]
if isinstance(entityGuids, str):
entityGuids = [entityGuids]
elif isinstance(entityGuids, list):
pass
else:
raise TypeError(
"guid should be str or list, not {}".format(type(entityGuids)))
payload = {
# TODO: Accept AtlasClassification class
"classification": classification,
"entityGuids": entityGuids
}
postBulkClassifications = requests.post(
atlas_endpoint,
json=payload,
headers=self.authentication.get_authentication_headers()
)
try:
postBulkClassifications.raise_for_status()
except requests.RequestException:
raise AtlasException(postBulkClassifications.text)
results = {"message": f"Successfully assigned {classification_name}",
"entityGuids": entityGuids
}
return results
def _classify_entity_adds(self, guid, classifications):
"""
Update a given entity guid with the provided classifications.
:param str guid: The guid you want to classify.
:param list(dict) classifications:
The list of AtlasClassification object you want to apply to the
entity.
:return: The name of the classification provided are returned.
:rtype: list(str)
"""
results = None
atlas_endpoint = self.endpoint_url + \
f"/entity/guid/{guid}/classifications"
postAddMultiClassifications = requests.post(
atlas_endpoint,
json=classifications,
headers=self.authentication.get_authentication_headers()
)
try:
postAddMultiClassifications.raise_for_status()
except requests.RequestException:
raise Exception(postAddMultiClassifications.text)
results = [c["typeName"] for c in classifications]
return results
def _classify_entity_updates(self, guid, classifications):
"""
Update a given entity guid with the provided classifications.
:param str guid: The guid you want to classify.
:param list(dict) classifications:
The list of AtlasClassification object you want to update to the
entity.
:return: The name of the classification provided are returned.
:rtype: list(str)
"""
results = None
atlas_endpoint = self.endpoint_url + \
f"/entity/guid/{guid}/classifications"
putUpdateMultiClassifications = requests.put(
atlas_endpoint,
json=classifications,
headers=self.authentication.get_authentication_headers()
)
try:
putUpdateMultiClassifications.raise_for_status()
except requests.RequestException:
raise Exception(putUpdateMultiClassifications.text)
results = [c["typeName"] for c in classifications]
return results
@PurviewLimitation
def classify_entity(self, guid, classifications, force_update=False):
"""
Given a single entity, you want to apply many classifications.
This call will fail if any one of the classifications exist on the
entity already, unless you choose force_update=True.
force_update will query the existing entity and sort the classifications
into NEW (post) and UPDATE (put) and do two requests to add and update.
force_update is not transactional, it performs adds first. If the add
succeeds, it moves on to the updates. If the update fails, the adds
will continue to exist on the Atlas server and will not be rolledback.
An error can occur if, for example, the classification has some required
attribute that you do not provide.
:param str guid: The guid you want to classify.
:param classifications:
The list of AtlasClassification object you want to apply to the
entities.
:type classification:
Union(dict, :class:`~pyapacheatlas.core.entity.AtlasClassification`)
:param bool force_update: Mark as True if any of your classifications
may already exist on the given entity.
:return: A message indicating success and which classifications were
'updates' vs 'adds' for the given guid.
:rtype: dict(str, str)
"""
results = None
adds = []
updates = []
if isinstance(classifications, dict):
classifications = [classifications]
elif isinstance(classifications, AtlasClassification):
classifications = [classifications.to_json()]
elif isinstance(classifications, list):
classifications = [
c.to_json()
if isinstance(c, AtlasClassification)
else c for c in classifications]
else:
raise TypeError("classifications should be dict or list, not {}".format(
type(classifications)))
if force_update:
# Get the existing entity's classifications
existing_classifications = set([
c["typeName"] for c in
self.get_entity_classifications(guid=guid)["list"]
])
# Sort the list into adds and updates (if exists)
temp_adds = []
temp_updates = []
for classification in classifications:
if classification["typeName"] in existing_classifications:
temp_updates.append(classification)
else:
temp_adds.append(classification)
# execute adds
if len(temp_adds) > 0:
adds = self._classify_entity_adds(guid, temp_adds)
# execute updates
if len(temp_updates) > 0:
updates = self._classify_entity_updates(guid, temp_updates)
else:
# Assuming this is all new adds
# execute adds
adds = self._classify_entity_adds(guid, classifications)
results = {
"message": "Successfully assigned classifications",
"guid": guid,
"adds": adds,
"updates": updates
}
return results
def declassify_entity(self, guid, classificationName):
"""
Given an entity guid and a classification name, remove the
classification from the given entity.
:param str guid: The guid for the entity that needs to be updated.
:param str classificationName:
The name of the classification to be deleted.
:return: A success message repeating what was deleted. The only key
is 'message', containing the classification name and guid.
:rtype: dict(str, str)
"""
results = None
atlas_endpoint = self.endpoint_url + \
f"/entity/guid/{guid}/classification/{classificationName}"
deleteEntityClassification = requests.delete(
atlas_endpoint,
headers=self.authentication.get_authentication_headers()
)
try:
deleteEntityClassification.raise_for_status()
except requests.RequestException:
raise Exception(deleteEntityClassification.text)
results = {"message":
f"Successfully removed classification: {classificationName} from {guid}.",
"guid": guid,
}
return results
@staticmethod
def _prepare_type_upload(typedefs=None, **kwargs):
"""
Massage the type upload. See rules in upload_typedefs.
"""
payload = {}
required_keys = ["classificationDefs", "entityDefs",
"enumDefs", "relationshipDefs", "structDefs"]
# If typedefs is defined as a dict and it contains at least one of the
# required keys for the TypeREST definition.
if isinstance(typedefs, dict) and len(set(typedefs.keys()).intersection(required_keys)) > 0:
payload = typedefs
# It isn't in the standard form but is it defined?
elif typedefs is not None:
# Assuming this is a single typedef
key = None
if isinstance(typedefs, BaseTypeDef):
key = typedefs.category.lower() + "Defs"
val = [typedefs.to_json()]
elif isinstance(typedefs, dict):
key = typedefs["category"].lower() + "Defs"
val = [typedefs]
else:
raise NotImplementedError(
"Uploading an object of type '{}' is not supported."
.format(type(typedefs))
)
payload = {key: val}
# Did we set any of the xDefs as arguments?
elif len(set(kwargs.keys()).intersection(required_keys)) > 0:
for typeRestKey in required_keys:
# Did we specify this key?
if typeRestKey in kwargs.keys():
payload[typeRestKey] = [
t.to_json() if isinstance(t, BaseTypeDef) else t
for t in kwargs[typeRestKey]
]
else:
raise RuntimeError(
f"Failed to upload typedefs for arguments: {kwargs}"
)
return payload
def upload_typedefs(self, typedefs=None, force_update=False, **kwargs):
"""
Provides a way to upload a single or multiple type definitions.
If you provide one type def, it will format the required wrapper
for you based on the type category.
If you want to upload multiple type defs or typedefs of different
category, you can pass the in kwargs `entityDefs`, `classificationDefs`,
`enumDefs`, `relationshipDefs`, `structDefs` which take in a list of
dicts or appropriate TypeDef objects.
Otherwise, you can pass in the wrapper yourself (e.g. {"entityDefs":[],
"relationshipDefs":[]}) by providing that dict to the typedefs
parameter. If the dict you pass in contains at least one of these Def
fields it will be considered valid and an upload will be attempted.
typedefs also takes in a BaseTypeDef object or a valid AtlasTypeDef
json / dict. If you provide a value in typedefs, it will ignore the
kwargs parameters.
When using force_update, it will look up all existing types and see
if any of your provided types exist. If they do exist, they will be
updated. If they do not exist, they will be issued as new. New types
are uploaded first. Existing types are updated second. There are no
transactional updates. New types can succeed and be inserted while
a batch of existing types can fail and not be updated.
:param typedefs: The set of type definitions you want to upload.
:type typedefs: Union(dict, :class:`~pyapacheatlas.core.typedef.BaseTypeDef`)
:param bool force_update:
Set to True if your typedefs contains any existing entities.
:return: The results of your upload attempt from the Atlas server.
:rtype: dict
Kwargs:
:param entityDefs: EntityDefs to upload.
:type entityDefs: list( Union(:class:`~pyapacheatlas.core.typedef.BaseTypeDef`, dict))
:param classificationDefs: classificationDefs to upload.
:type classificationDefs: list( Union(:class:`~pyapacheatlas.core.typedef.BaseTypeDef`, dict))
:param enumDefs: enumDefs to upload.
:type enumDefs: list( Union(:class:`~pyapacheatlas.core.typedef.BaseTypeDef`, dict))
:param relationshipDefs: relationshipDefs to upload.
:type relationshipDefs: list( Union(:class:`~pyapacheatlas.core.typedef.BaseTypeDef`, dict))
:param structDefs: structDefs to upload.
:type structDefs: list( Union(:class:`~pyapacheatlas.core.typedef.BaseTypeDef`, dict))
Returns:
"""
# Should this take a list of type defs and figure out the formatting
# by itself?
# Should you pass in a AtlasTypesDef object and be forced to build
# it yourself?
results = None
atlas_endpoint = self.endpoint_url + "/types/typedefs"
payload = AtlasClient._prepare_type_upload(typedefs, **kwargs)
if not force_update:
# This is just a plain push of new entities
upload_typedefs_results = requests.post(
atlas_endpoint, json=payload,
headers=self.authentication.get_authentication_headers()
)
results = self._handle_response(upload_typedefs_results)
else:
# Look up all entities by their header
types_from_client = self._get_typedefs_header()
existing_types = dict()
new_types = dict()
# Loop over the intended upload and see if they exist already
# If they do not exist, shuffle them to the new_types upload.
# if they do exist, shuffle to the existing types upload.
for cat, typelist in payload.items():
existing_types[cat] = []
new_types[cat] = []
for t in typelist:
if t["name"] in types_from_client[cat]:
existing_types[cat].append(t)
else:
new_types[cat].append(t)
upload_new = requests.post(
atlas_endpoint, json=new_types,
headers=self.authentication.get_authentication_headers()
)
results_new = self._handle_response(upload_new)
upload_exist = requests.put(
atlas_endpoint, json=existing_types,
headers=self.authentication.get_authentication_headers()
)
results_exist = self._handle_response(upload_exist)
# Merge the results
results = results_new
for cat, updatedtypelist in results_exist.items():
if cat not in results:
results[cat] = []
results[cat].extend(updatedtypelist)
return results
@staticmethod
def _prepare_entity_upload(batch):
"""
Massages the batch to be in the right format and coerces to json/dict.
Supports list of dicts, dict of single entity, dict of AtlasEntitiesWithExtInfo.
:param batch: The batch of entities you want to upload.
:type batch: Union(list(dict), dict))
:return: Provides a dict formatted in the Atlas entity bulk upload.
:rtype: dict(str, list(dict))
"""
payload = batch
required_keys = ["entities"]
if isinstance(batch, list):
# It's a list, so we're assuming it's a list of entities
# Handles any type of AtlasEntity and mixed batches of dicts
# and AtlasEntities
dict_batch = [e.to_json() if isinstance(
e, AtlasEntity) else e for e in batch]
payload = {"entities": dict_batch}
elif isinstance(batch, dict):
current_keys = list(batch.keys())
# Does the dict entity conform to the required pattern?
if not any([req in current_keys for req in required_keys]):
# Assuming this is a single entity
# DESIGN DECISION: I'm assuming, if you're passing in
# json, you know the schema and I will not support
# AtlasEntity here.
payload = {"entities": [batch]}
elif isinstance(batch, AtlasEntity):
payload = {"entities": [batch.to_json()]}
else:
raise NotImplementedError(
f"Uploading type: {type(batch)} is not supported.")
return payload
def upload_entities(self, batch):
"""
Upload entities to your Atlas backed Data Catalog.
:param batch:
The batch of entities you want to upload. Supports a single dict,
AtlasEntity, list of dicts, list of atlas entities.
:type batch:
Union(dict, :class:`~pyapacheatlas.core.entity.AtlasEntity`,
list(dict), list(:class:`~pyapacheatlas.core.entity.AtlasEntity`) )
:return: The results of your bulk entity upload.
:rtype: dict
"""
# TODO Include a Do Not Overwrite call
results = None
atlas_endpoint = self.endpoint_url + "/entity/bulk"
payload = AtlasClient._prepare_entity_upload(batch)
postBulkEntities = requests.post(
atlas_endpoint,
json=payload,
headers=self.authentication.get_authentication_headers()
)
results = self._handle_response(postBulkEntities)
return results
def upload_relationship(self, relationship):
"""
Upload a AtlasRelationship json. Should take the form of the following::
{
"typeName": "hive_table_columns",
"attributes": {},
"guid": -100,
"end1": {
"guid": assignments["-1"]
},
"end2": {
"guid": assignments["-5"]
}
}
:param dict relationship: The relationship you want to upload.
:return: The results of your relationship upload.
:rtype: dict
"""
# TODO Include a Do Not Overwrite call
results = None
atlas_endpoint = self.endpoint_url + "/relationship"
# TODO: Handling Updates instead of just creates
relationshipResp = requests.post(
atlas_endpoint,
json=relationship,
headers=self.authentication.get_authentication_headers()
)
results = self._handle_response(relationshipResp)
return results
def upload_terms(self, batch, force_update=False):
"""
Upload terms to your Atlas backed Data Catalog. Supports Purview Term
Templates by passing in an attributes field with the term template's
name as a field within attributes and an object of the required and
optional fields.
:param batch: A list of AtlasGlossaryTerm objects to be uploaded.
:type batch: list(dict)
:return:
A list of AtlasGlossaryTerm objects that are the results from
your upload.
:rtype: list(dict)
"""
# TODO Include a Do Not Overwrite call
results = None
atlas_endpoint = self.endpoint_url + "/glossary/terms"
postResp = requests.post(
atlas_endpoint,
json=batch,
headers=self.authentication.get_authentication_headers()
)
results = self._handle_response(postResp)
return results
def _search_generator(self, search_params, starting_offset=0):
"""
Generator to page through the search query results.
"""
atlas_endpoint = self.endpoint_url + "/search/advanced"
offset = starting_offset
while True:
postSearchResults = requests.post(
atlas_endpoint,
json=search_params,
headers=self.authentication.get_authentication_headers()
)
results = self._handle_response(postSearchResults)
return_values = results["value"]
return_count = len(return_values)
if return_count == 0:
return
offset = offset + return_count
search_params["offset"] = offset
for sub_result in return_values:
try:
yield sub_result
except StopIteration:
return
@PurviewOnly
def search_entities(self, query, limit=50, search_filter=None, starting_offset=0):
"""
Search entities based on a query and automaticall handles limits and
offsets to page through results.
The limit provides how many records are returned in each batch with a
maximum of 1,000 entries per page.
:param str query: The search query to be executed.
:param int limit:
A non-zero integer representing how many entities to
return for each page of the search results.
:param dict search_filter: A search filter to reduce your results.
:return: The results of your search as a generator.
:rtype: Iterator(dict)
"""
if limit > 1000 or limit < 1:
raise ValueError(
"The limit parameter must be non-zero and less than 1,000."
)
search_params = {
"keywords": query,
"limit": limit,
"offset": 0
}
# TODO: Make this smarter, make it easier to create filters
# without having to know how to make a filter object.
if search_filter:
# Example search filter might look like:
# {"filter": {"typeName": "DataSet", "includeSubTypes": True} }
search_params.update({"filter": search_filter})
search_generator = self._search_generator(search_params, starting_offset=starting_offset)
return search_generator
def get_entity_lineage(self, guid, depth=3, width=10, direction="BOTH", includeParent=False, getDerivedLineage=False):
"""
Gets lineage info about the specified entity by guid.
:param str guid: The guid of the entity for which you want to
retrieve lineage.
:param int depth: The number of hops for lineage
:param int width: The number of max expanding width in lineage
:param str direction: The direction of the lineage, which could
be INPUT, OUTPUT or BOTH.
:param bool includeParent: True to include the parent chain in
the response
:param bool getDerivedLineage: True to include derived lineage in
the response
:return: A dict representing AtlasLineageInfo with an array
of parentRelations and an array of relations
:rtype: dict(str, dict)
"""
direction = direction.strip().upper()
assert direction in ("BOTH", "INPUT", "OUTPUT"), "Invalid direction '{}'. Valid options are: BOTH, INPUT, OUTPUT".format(direction)
atlas_endpoint = self.endpoint_url + \
f"/lineage/{guid}"
getLineageRequest = requests.get(
atlas_endpoint,
params={"depth": depth, "width": width, "direction": direction, "includeParent": includeParent, "getDerivedLineage": getDerivedLineage},
headers=self.authentication.get_authentication_headers()
)
results = self._handle_response(getLineageRequest)
return results
class PurviewClient(AtlasClient):
"""
Provides communication between your application and the Azure Purview
service. Simplifies the requirements for knowing the endpoint url and
requires only the Purview account name.
:param str account_name:
Your Purview account name.
:param authentication:
The method of authentication.
:type authentication:
:class:`~pyapacheatlas.auth.base.AtlasAuthBase`
"""
def __init__(self, account_name, authentication=None):
endpoint_url = f"https://{account_name.lower()}.catalog.purview.azure.com/api/atlas/v2"
super().__init__(endpoint_url, authentication)
@PurviewOnly
def get_entity_next_lineage(self, guid, direction, getDerivedLineage=False, offset=0, limit=-1):
"""
Returns immediate next level lineage info about entity with pagination
:param str guid: The guid of the entity for which you want to
retrieve lineage.
:param str direction: The direction of the lineage, which could
be INPUT or OUTPUT.
:param bool getDerivedLineage: True to include derived lineage in
the response
:param int offset: The offset for pagination purpose.
:param int limit: The page size - by default there is no paging.
:return: A dict representing AtlasLineageInfo with an array
of parentRelations and an array of relations
:rtype: dict(str, dict)
"""
direction = direction.strip().upper()
assert direction in ("INPUT", "OUTPUT"), "Invalid direction '{}'. Valid options are: INPUT, OUTPUT".format(direction)
atlas_endpoint = self.endpoint_url + \
f"/lineage/{guid}/next"
# TODO: Implement paging with offset and limit
getLineageRequest = requests.get(
atlas_endpoint,
params={"direction": direction, "getDerivedLineage": getDerivedLineage, "offset": offset, "limit": limit},
headers=self.authentication.get_authentication_headers()
)
results = self._handle_response(getLineageRequest)
return results
def import_terms(self, csv_path, glossary_name="Glossary", glossary_guid=None):
"""
Bulk import terms from an existing csv file. If you are using the system
default, you must include the following headers:
Name,Definition,Status,Related Terms,Synonyms,Acronym,Experts,Stewards
For custom term templates, additional attributes must include
[Attribute][termTemplateName]attributeName as the header.
:param str csv_path: Path to CSV that will be imported.
:param str glossary_name:
Name of the glossary. Defaults to 'Glossary'. Not used if
glossary_guid is provided.
:param str glossary_guid:
Guid of the glossary, optional if glossary_name is provided.
Otherwise, this parameter takes priority over glossary_name.
:return:
A dict that contains an `id` that you can use in
`import_terms_status` to get the status of the import operation.
:rtype: dict
"""
results = None
if glossary_guid:
atlas_endpoint = self.endpoint_url + \
f"/glossary/{glossary_guid}/terms/import"
elif glossary_name:
atlas_endpoint = self.endpoint_url + \
f"/glossary/name/{glossary_name}/terms/import"
else:
raise ValueError(
"Either glossary_name or glossary_guid must be defined.")
headers = self.authentication.get_authentication_headers()
# Pop the default of application/json so that request can fill in the
# multipart/form-data; boundary=xxxx that is automatically generated
# when using the files argument.
headers.pop("Content-Type")
postResp = requests.post(
atlas_endpoint,
files={'file': ("file", open(csv_path, 'rb'))},
headers=headers
)
results = self._handle_response(postResp)
return results
@PurviewOnly
def import_terms_status(self, operation_guid):
"""
Get the operation status of a glossary term import activity. You get
the operation_guid after executing the `import_terms` method and find
the `id` field in the response dict/json.
:param str operation_guid: The id of the import operation.
:return: The status of the import operation as a dict. The dict includes
a field called `status` that will report back RUNNING, SUCCESS, or
FAILED. Other fields include the number of terms detected and
number of errors.
:rtype: dict
"""
results = None
atlas_endpoint = self.endpoint_url + \
f"/glossary/terms/import/{operation_guid}"
postResp = requests.get(
atlas_endpoint,
headers=self.authentication.get_authentication_headers()
)
results = self._handle_response(postResp)
return results
@PurviewOnly
def export_terms(self, guids, csv_path, glossary_name="Glossary", glossary_guid=None):
"""
:param list(str) guids: List of guids that should be exported as csv.
:param str csv_path: Path to CSV that will be imported.
:param str glossary_name:
Name of the glossary. Defaults to 'Glossary'. Not used if
glossary_guid is provided.
:param str glossary_guid:
Guid of the glossary, optional if glossary_name is provided.
Otherwise, this parameter takes priority over glossary_name.
Providing glossary_guid is also faster as you avoid a lookup based
on glossary_name.
:return: A csv file is written to the csv_path.
:rtype: None
"""
if glossary_guid:
# Glossary guid is defined so we don't need to look up the guid
pass
elif glossary_name:
glossary = self.get_glossary(glossary_name)
glossary_guid = glossary["guid"]
else:
raise ValueError(
"Either glossary_name or glossary_guid must be defined.")
results = None
atlas_endpoint = self.endpoint_url + \
f"/glossary/{glossary_guid}/terms/export"
postResp = requests.post(
atlas_endpoint,
json=guids,
headers=self.authentication.get_authentication_headers()
)
# Can't use handle response since it expects json
try:
postResp.raise_for_status()
except requests.RequestException as e:
if "errorCode" in postResp:
raise AtlasException(postResp.text)
else:
raise requests.RequestException(postResp.text)
with open(csv_path, 'wb') as fp:
fp.write(postResp.content)
return None
| import json
from json.decoder import JSONDecodeError
import logging
import re
import requests
from .entity import AtlasClassification, AtlasEntity
from .typedef import BaseTypeDef
from .util import AtlasException, PurviewLimitation, PurviewOnly
class AtlasClient():
"""
Provides communication between your application and the Apache Atlas
server with your entities and type definitions.
:param str endpoint_url:
The http url for communicating with your Apache Atlas server.
It will most likely end in /api/atlas/v2.
:param authentication:
The method of authentication.
:type authentication:
:class:`~pyapacheatlas.auth.base.AtlasAuthBase`
"""
def __init__(self, endpoint_url, authentication=None):
super().__init__()
self.authentication = authentication
self.endpoint_url = endpoint_url
self.is_purview = False
self._purview_url_pattern = r"https:\/\/[a-z0-9-]*?\.(catalog\.purview.azure.com)"
if re.match(self._purview_url_pattern, self.endpoint_url):
self.is_purview = True
def _handle_response(self, resp):
"""
Safely handle an Atlas Response and return the results if valid.
:param Response resp: The response from the request method.
:return: A dict containing the results.
:rtype: dict
"""
try:
results = json.loads(resp.text)
resp.raise_for_status()
except JSONDecodeError:
raise ValueError("Error in parsing: {}".format(resp.text))
except requests.RequestException as e:
if "errorCode" in results:
raise AtlasException(resp.text)
else:
raise requests.RequestException(resp.text)
return results
def delete_entity(self, guid):
"""
Delete one or many guids from your Apache Atlas server.
:param guid: The guid or guids you want to remove.
:type guid: Union(str,list(str))
:return:
An EntityMutationResponse containing guidAssignments,
mutatedEntities, and partialUpdatedEntities (list).
:rtype: dict(str, Union(dict,list))
"""
results = None
if isinstance(guid, list):
guid_str = '&guid='.join(guid)
else:
guid_str = guid
atlas_endpoint = self.endpoint_url + \
"/entity/bulk?guid={}".format(guid_str)
deleteEntity = requests.delete(
atlas_endpoint,
headers=self.authentication.get_authentication_headers())
results = self._handle_response(deleteEntity)
return results
def delete_type(self, name):
"""
Delete a type based on the given name.
:param str name: The name of the type you want to remove.
:return:
No content, should receive a 204 status code.
:rtype: None
"""
results = None
atlas_endpoint = self.endpoint_url + \
f"/types/typedef/name/{name}"
deleteType = requests.delete(
atlas_endpoint,
headers=self.authentication.get_authentication_headers())
try:
deleteType.raise_for_status()
except requests.RequestException:
raise Exception(deleteType.text)
results = {"message": f"successfully delete {name}"}
return results
def get_entity(self, guid=None, qualifiedName=None, typeName=None):
"""
Retrieve one or many guids from your Atlas backed Data Catalog.
Returns a dictionary with keys "referredEntities" and "entities". You'll
want to grab the entities values which is a list of entities.
You can provide a single guid or a list of guids. You can provide a
single typeName and multiple qualified names in a list.
:param guid:
The guid or guids you want to retrieve. Not used if using typeName
and qualifiedName.
:type guid: Union(str, list(str))
:param qualifiedName:
The qualified name of the entity you want to find. Must provide
typeName if using qualifiedName. You may search for multiple
qualified names under the same type. Ignored if using guid
parameter.
:type qualifiedName: Union(str, list(str))
:param str typeName:
The type name of the entity you want to find. Must provide
qualifiedName if using typeName. Ignored if using guid parameter.
:return:
An AtlasEntitiesWithExtInfo object which includes a list of
entities and accessible with the "entities" key.
:rtype: dict(str, Union(list(dict),dict))
"""
results = None
parameters = {}
if isinstance(guid, list):
guid_str = '&guid='.join(guid)
else:
guid_str = guid
qualifiedName_params = dict()
if isinstance(qualifiedName, list):
qualifiedName_params = {
f"attr_{idx}:qualifiedName": qname
for idx, qname in enumerate(qualifiedName)
}
else:
qualifiedName_params = {"attr_0:qualifiedName": qualifiedName}
if qualifiedName and typeName:
atlas_endpoint = self.endpoint_url + \
f"/entity/bulk/uniqueAttribute/type/{typeName}"
parameters.update(qualifiedName_params)
else:
atlas_endpoint = self.endpoint_url + \
"/entity/bulk?guid={}".format(guid_str)
getEntity = requests.get(
atlas_endpoint,
params=parameters,
headers=self.authentication.get_authentication_headers()
)
results = self._handle_response(getEntity)
return results
def get_entity_classification(self, guid, classificationName):
"""
Retrieve a specific entity from the given entity's guid.
:param str guid: The guid of the entity that you want to query.
:param str classificationName: The typeName of the classification you
want to query.
:return: An AtlasClassification object that contains entityGuid,
entityStatus, typeName, attributes, and propagate fields.
:rtype: dict(str, object)
"""
atlas_endpoint = self.endpoint_url + \
f"/entity/guid/{guid}/classification/{classificationName}"
getClassification = requests.get(
atlas_endpoint,
headers=self.authentication.get_authentication_headers()
)
results = self._handle_response(getClassification)
return results
def get_entity_classifications(self, guid):
"""
Retrieve all classifications from the given entity's guid.
:param str guid: The entity's guid.
:return: An AtlasClassifications object that contains keys 'list' (which
is the list of classifications on the entity), pageSize, sortBy,
startIndex, and totalCount.
:rtype: dict(str, object)
"""
atlas_endpoint = self.endpoint_url + \
f"/entity/guid/{guid}/classifications"
getClassification = requests.get(
atlas_endpoint,
headers=self.authentication.get_authentication_headers()
)
results = self._handle_response(getClassification)
return results
def get_entity_header(self, guid=None):
"""
Retrieve one or many entity headers from your Atlas backed Data Catalog.
:param guid:
The guid or guids you want to retrieve. Not used if using typeName
and qualifiedName.
:type guid: Union(str, list(str))
:return:
An AtlasEntityHeader dict which includes the keys: guid, attributes
(which is a dict that contains qualifiedName and name keys), an
array of classifications, and an array of glossary term headers.
:rtype: dict
"""
results = None
parameters = {}
atlas_endpoint = self.endpoint_url + \
"/entity/guid/{}/header".format(guid)
getEntity = requests.get(
atlas_endpoint,
params=parameters,
headers=self.authentication.get_authentication_headers()
)
results = self._handle_response(getEntity)
return results
def get_relationship(self, guid):
"""
Retrieve the relationship attribute for the given guid.
:param str guid: The unique guid for the relationship.
:return: A dict representing AtlasRelationshipWithExtInfo with the
relationship (what you probably care about) and referredEntities
attributes.
:rtype: dict(str, dict)
"""
results = None
atlas_endpoint = self.endpoint_url + f"/relationship/guid/{guid}"
getResponse = requests.get(
atlas_endpoint,
headers=self.authentication.get_authentication_headers()
)
results = self._handle_response(getResponse)
return results
def get_all_typedefs(self):
"""
Retrieve all of the type defs available on the Apache Atlas server.
:return:
A dict representing an AtlasTypesDef, containing lists of
type defs wrapped in their corresponding definition types
{"entityDefs", "relationshipDefs"}.
:rtype: dict(str, list(dict))
"""
results = None
atlas_endpoint = self.endpoint_url + "/types/typedefs"
getTypeDefs = requests.get(
atlas_endpoint,
headers=self.authentication.get_authentication_headers()
)
results = self._handle_response(getTypeDefs)
return results
def get_typedef(self, type_category=None, guid=None, name=None):
"""
Retrieve a single type def based on its guid, name, or type category and
(guid or name). Rule of thumb: Use guid if you have it, use name if you
want to essentially use duck typing and are testing what keys you're
reading from the response, or use type_category when you want to
guarantee the type being returned.
:param type_category:
The type category your type def belongs to. You most likely want
TypeCategory.ENTITY. Optional if name or guid is specified.
:type type_category:
:class:`~pyapacheatlas.core.typedef.TypeCategory`
:param str,optional guid: A valid guid. Optional if name is specified.
:param str,optional name: A valid name. Optional if guid is specified.
:return: A dictionary representing an Atlas{TypeCategory}Def.
:rtype: dict
"""
results = None
atlas_endpoint = self.endpoint_url + "/types/"
# If we are using type category
if type_category:
atlas_endpoint = atlas_endpoint + \
"{}def".format(type_category.value)
elif guid or name:
atlas_endpoint = atlas_endpoint + "typedef"
else:
raise ValueError(
"Either guid or name must be defined or type_category and one of guid or name must be defined.")
if guid:
atlas_endpoint = atlas_endpoint + '/guid/{}'.format(guid)
elif name:
atlas_endpoint = atlas_endpoint + '/name/{}'.format(name)
else:
raise ValueError("One of guid or name must be defined.")
getTypeDef = requests.get(
atlas_endpoint,
headers=self.authentication.get_authentication_headers()
)
results = self._handle_response(getTypeDef)
return results
def get_glossary(self, name="Glossary", guid=None, detailed=False):
"""
Retrieve the specified glossary by name or guid along with the term
headers (AtlasRelatedTermHeader: including displayText and termGuid).
Providing the glossary name only will result in a lookup of all
glossaries and returns the term headers (accessible via "terms" key)
for all glossaries.
Use detailed = True to return the full detail of terms
(AtlasGlossaryTerm) accessible via "termInfo" key.
:param str name:
The name of the glossary to use, defaults to "Glossary". Not
required if using the guid parameter.
:param str guid:
The unique guid of your glossary. Not required if using the
name parameter.
:param bool detailed:
Set to true if you want to pull back all terms and
not just headers.
:return:
The requested glossary with the term headers (AtlasGlossary) or
with detailed terms (AtlasGlossaryExtInfo).
:rtype: list(dict)
"""
results = None
if guid:
logging.debug(f"Retreiving a Glossary based on guid: {guid}")
atlas_endpoint = self.endpoint_url + "/glossary/{}".format(guid)
if detailed:
atlas_endpoint = atlas_endpoint + "/detailed"
getResult = requests.get(
atlas_endpoint,
headers=self.authentication.get_authentication_headers()
)
results = self._handle_response(getResult)
else:
logging.debug(f"Retreiving a Glossary based on name: {name}")
all_glossaries = self._get_glossaries()
logging.debug(f"Iterating over {len(all_glossaries)} glossaries")
for glossary in all_glossaries:
if glossary["name"] == name:
logging.debug(f"Found a glossary named '{name}'")
if detailed:
logging.debug(
f"Recursively calling get_glossary with guid: {glossary['guid']}")
results = self.get_glossary(
guid=glossary["guid"], detailed=detailed)
else:
results = glossary
if results is None:
raise ValueError(
f"Glossary with a name of {name} was not found.")
return results
def _get_glossaries(self, limit=-1, offset=0, sort_order="ASC"):
"""
Retrieve all glossaries and the term headers.
:param int limit:
The maximum number of glossaries to pull back. Does not affect the
number of term headers included in the results.
:param int offset: The number of glossaries to skip.
:param str sort_order: ASC for DESC sort for glossary name.
:return: The requested glossaries with the term headers.
:rtype: list(dict)
"""
results = None
atlas_endpoint = self.endpoint_url + "/glossary"
logging.debug("Retreiving all glossaries from catalog")
# TODO: Implement paging with offset and limit
getResult = requests.get(
atlas_endpoint,
params={"limit": limit, "offset": offset, "sort": sort_order},
headers=self.authentication.get_authentication_headers()
)
results = self._handle_response(getResult)
return results
def get_glossary_term(self, guid=None, name=None, glossary_name="Glossary", glossary_guid=None):
"""
Retrieve a single glossary term based on its guid. Providing only the
glossary_name will result in a lookup for the glossary guid. If you
plan on looking up many terms, consider using the get_glossary method
with the detailed argument set to True. That method will provide all
glossary terms in a dictionary for faster lookup.
:param str guid:
The guid of your term. Not required if name is specified.
:param str name:
The name of your term's display text. Overruled if guid is
provided.
:param str glossary_name:
The name of the glossary to use, defaults to "Glossary". Not
required if using the glossary_guid parameter.
:param str glossary_guid:
The unique guid of your glossary. Not required if using the
glossary_name parameter.
:return: The requested glossary term as a dict.
:rtype: dict
"""
results = None
if guid is None and name is None:
raise ValueError("Either guid or name and glossary must be set.")
if guid:
atlas_endpoint = self.endpoint_url + \
"/glossary/term/{}".format(guid)
getTerms = requests.get(
atlas_endpoint,
headers=self.authentication.get_authentication_headers()
)
results = self._handle_response(getTerms)
else:
terms_in_glossary = self.get_glossary(
name=glossary_name, guid=glossary_guid)
for term in terms_in_glossary["terms"]:
if term["displayText"] == name:
_guid = term["termGuid"]
results = self.get_glossary_term(guid=_guid)
return results
def _get_typedefs_header(self):
"""
Get the array of AtlasTypeDefHeader that contains category, guid,
name, and serviceType. Massage it into a dict based on the available
categories.
:return: A dictionary of categories and the names of defined types.
:rtype: dict(str, list(str))
"""
atlas_endpoint = self.endpoint_url + "/types/typedefs/headers"
getHeaders = requests.get(
atlas_endpoint,
headers=self.authentication.get_authentication_headers()
)
results = self._handle_response(getHeaders)
output = dict()
for typedef in results:
active_category = typedef["category"].lower()+"Defs"
if active_category not in output:
output[active_category] = []
output[active_category].append(typedef["name"])
return output
@PurviewLimitation
def classify_bulk_entities(self, entityGuids, classification):
"""
Given a single classification, you want to apply it to many entities
and you know their guid. This call will fail if any one of the guids
already have the provided classification on that entity.
:param Union(str,list) entityGuids:
The guid or guids you want to classify.
:param classification:
The AtlasClassification object you want to apply to the entities.
:type classification:
Union(dict, :class:`~pyapacheatlas.core.entity.AtlasClassification`)
:return: A message indicating success. The only key is 'message',
containing a brief string.
:rtype: dict(str,Union(list(str),str))
"""
results = None
atlas_endpoint = self.endpoint_url + "/entity/bulk/classification"
if isinstance(classification, AtlasClassification):
classification = classification.to_json()
classification_name = classification["typeName"]
if isinstance(entityGuids, str):
entityGuids = [entityGuids]
elif isinstance(entityGuids, list):
pass
else:
raise TypeError(
"guid should be str or list, not {}".format(type(entityGuids)))
payload = {
# TODO: Accept AtlasClassification class
"classification": classification,
"entityGuids": entityGuids
}
postBulkClassifications = requests.post(
atlas_endpoint,
json=payload,
headers=self.authentication.get_authentication_headers()
)
try:
postBulkClassifications.raise_for_status()
except requests.RequestException:
raise AtlasException(postBulkClassifications.text)
results = {"message": f"Successfully assigned {classification_name}",
"entityGuids": entityGuids
}
return results
def _classify_entity_adds(self, guid, classifications):
"""
Update a given entity guid with the provided classifications.
:param str guid: The guid you want to classify.
:param list(dict) classifications:
The list of AtlasClassification object you want to apply to the
entity.
:return: The name of the classification provided are returned.
:rtype: list(str)
"""
results = None
atlas_endpoint = self.endpoint_url + \
f"/entity/guid/{guid}/classifications"
postAddMultiClassifications = requests.post(
atlas_endpoint,
json=classifications,
headers=self.authentication.get_authentication_headers()
)
try:
postAddMultiClassifications.raise_for_status()
except requests.RequestException:
raise Exception(postAddMultiClassifications.text)
results = [c["typeName"] for c in classifications]
return results
def _classify_entity_updates(self, guid, classifications):
"""
Update a given entity guid with the provided classifications.
:param str guid: The guid you want to classify.
:param list(dict) classifications:
The list of AtlasClassification object you want to update to the
entity.
:return: The name of the classification provided are returned.
:rtype: list(str)
"""
results = None
atlas_endpoint = self.endpoint_url + \
f"/entity/guid/{guid}/classifications"
putUpdateMultiClassifications = requests.put(
atlas_endpoint,
json=classifications,
headers=self.authentication.get_authentication_headers()
)
try:
putUpdateMultiClassifications.raise_for_status()
except requests.RequestException:
raise Exception(putUpdateMultiClassifications.text)
results = [c["typeName"] for c in classifications]
return results
@PurviewLimitation
def classify_entity(self, guid, classifications, force_update=False):
"""
Given a single entity, you want to apply many classifications.
This call will fail if any one of the classifications exist on the
entity already, unless you choose force_update=True.
force_update will query the existing entity and sort the classifications
into NEW (post) and UPDATE (put) and do two requests to add and update.
force_update is not transactional, it performs adds first. If the add
succeeds, it moves on to the updates. If the update fails, the adds
will continue to exist on the Atlas server and will not be rolledback.
An error can occur if, for example, the classification has some required
attribute that you do not provide.
:param str guid: The guid you want to classify.
:param classifications:
The list of AtlasClassification object you want to apply to the
entities.
:type classification:
Union(dict, :class:`~pyapacheatlas.core.entity.AtlasClassification`)
:param bool force_update: Mark as True if any of your classifications
may already exist on the given entity.
:return: A message indicating success and which classifications were
'updates' vs 'adds' for the given guid.
:rtype: dict(str, str)
"""
results = None
adds = []
updates = []
if isinstance(classifications, dict):
classifications = [classifications]
elif isinstance(classifications, AtlasClassification):
classifications = [classifications.to_json()]
elif isinstance(classifications, list):
classifications = [
c.to_json()
if isinstance(c, AtlasClassification)
else c for c in classifications]
else:
raise TypeError("classifications should be dict or list, not {}".format(
type(classifications)))
if force_update:
# Get the existing entity's classifications
existing_classifications = set([
c["typeName"] for c in
self.get_entity_classifications(guid=guid)["list"]
])
# Sort the list into adds and updates (if exists)
temp_adds = []
temp_updates = []
for classification in classifications:
if classification["typeName"] in existing_classifications:
temp_updates.append(classification)
else:
temp_adds.append(classification)
# execute adds
if len(temp_adds) > 0:
adds = self._classify_entity_adds(guid, temp_adds)
# execute updates
if len(temp_updates) > 0:
updates = self._classify_entity_updates(guid, temp_updates)
else:
# Assuming this is all new adds
# execute adds
adds = self._classify_entity_adds(guid, classifications)
results = {
"message": "Successfully assigned classifications",
"guid": guid,
"adds": adds,
"updates": updates
}
return results
def declassify_entity(self, guid, classificationName):
"""
Given an entity guid and a classification name, remove the
classification from the given entity.
:param str guid: The guid for the entity that needs to be updated.
:param str classificationName:
The name of the classification to be deleted.
:return: A success message repeating what was deleted. The only key
is 'message', containing the classification name and guid.
:rtype: dict(str, str)
"""
results = None
atlas_endpoint = self.endpoint_url + \
f"/entity/guid/{guid}/classification/{classificationName}"
deleteEntityClassification = requests.delete(
atlas_endpoint,
headers=self.authentication.get_authentication_headers()
)
try:
deleteEntityClassification.raise_for_status()
except requests.RequestException:
raise Exception(deleteEntityClassification.text)
results = {"message":
f"Successfully removed classification: {classificationName} from {guid}.",
"guid": guid,
}
return results
@staticmethod
def _prepare_type_upload(typedefs=None, **kwargs):
"""
Massage the type upload. See rules in upload_typedefs.
"""
payload = {}
required_keys = ["classificationDefs", "entityDefs",
"enumDefs", "relationshipDefs", "structDefs"]
# If typedefs is defined as a dict and it contains at least one of the
# required keys for the TypeREST definition.
if isinstance(typedefs, dict) and len(set(typedefs.keys()).intersection(required_keys)) > 0:
payload = typedefs
# It isn't in the standard form but is it defined?
elif typedefs is not None:
# Assuming this is a single typedef
key = None
if isinstance(typedefs, BaseTypeDef):
key = typedefs.category.lower() + "Defs"
val = [typedefs.to_json()]
elif isinstance(typedefs, dict):
key = typedefs["category"].lower() + "Defs"
val = [typedefs]
else:
raise NotImplementedError(
"Uploading an object of type '{}' is not supported."
.format(type(typedefs))
)
payload = {key: val}
# Did we set any of the xDefs as arguments?
elif len(set(kwargs.keys()).intersection(required_keys)) > 0:
for typeRestKey in required_keys:
# Did we specify this key?
if typeRestKey in kwargs.keys():
payload[typeRestKey] = [
t.to_json() if isinstance(t, BaseTypeDef) else t
for t in kwargs[typeRestKey]
]
else:
raise RuntimeError(
f"Failed to upload typedefs for arguments: {kwargs}"
)
return payload
def upload_typedefs(self, typedefs=None, force_update=False, **kwargs):
"""
Provides a way to upload a single or multiple type definitions.
If you provide one type def, it will format the required wrapper
for you based on the type category.
If you want to upload multiple type defs or typedefs of different
category, you can pass the in kwargs `entityDefs`, `classificationDefs`,
`enumDefs`, `relationshipDefs`, `structDefs` which take in a list of
dicts or appropriate TypeDef objects.
Otherwise, you can pass in the wrapper yourself (e.g. {"entityDefs":[],
"relationshipDefs":[]}) by providing that dict to the typedefs
parameter. If the dict you pass in contains at least one of these Def
fields it will be considered valid and an upload will be attempted.
typedefs also takes in a BaseTypeDef object or a valid AtlasTypeDef
json / dict. If you provide a value in typedefs, it will ignore the
kwargs parameters.
When using force_update, it will look up all existing types and see
if any of your provided types exist. If they do exist, they will be
updated. If they do not exist, they will be issued as new. New types
are uploaded first. Existing types are updated second. There are no
transactional updates. New types can succeed and be inserted while
a batch of existing types can fail and not be updated.
:param typedefs: The set of type definitions you want to upload.
:type typedefs: Union(dict, :class:`~pyapacheatlas.core.typedef.BaseTypeDef`)
:param bool force_update:
Set to True if your typedefs contains any existing entities.
:return: The results of your upload attempt from the Atlas server.
:rtype: dict
Kwargs:
:param entityDefs: EntityDefs to upload.
:type entityDefs: list( Union(:class:`~pyapacheatlas.core.typedef.BaseTypeDef`, dict))
:param classificationDefs: classificationDefs to upload.
:type classificationDefs: list( Union(:class:`~pyapacheatlas.core.typedef.BaseTypeDef`, dict))
:param enumDefs: enumDefs to upload.
:type enumDefs: list( Union(:class:`~pyapacheatlas.core.typedef.BaseTypeDef`, dict))
:param relationshipDefs: relationshipDefs to upload.
:type relationshipDefs: list( Union(:class:`~pyapacheatlas.core.typedef.BaseTypeDef`, dict))
:param structDefs: structDefs to upload.
:type structDefs: list( Union(:class:`~pyapacheatlas.core.typedef.BaseTypeDef`, dict))
Returns:
"""
# Should this take a list of type defs and figure out the formatting
# by itself?
# Should you pass in a AtlasTypesDef object and be forced to build
# it yourself?
results = None
atlas_endpoint = self.endpoint_url + "/types/typedefs"
payload = AtlasClient._prepare_type_upload(typedefs, **kwargs)
if not force_update:
# This is just a plain push of new entities
upload_typedefs_results = requests.post(
atlas_endpoint, json=payload,
headers=self.authentication.get_authentication_headers()
)
results = self._handle_response(upload_typedefs_results)
else:
# Look up all entities by their header
types_from_client = self._get_typedefs_header()
existing_types = dict()
new_types = dict()
# Loop over the intended upload and see if they exist already
# If they do not exist, shuffle them to the new_types upload.
# if they do exist, shuffle to the existing types upload.
for cat, typelist in payload.items():
existing_types[cat] = []
new_types[cat] = []
for t in typelist:
if t["name"] in types_from_client[cat]:
existing_types[cat].append(t)
else:
new_types[cat].append(t)
upload_new = requests.post(
atlas_endpoint, json=new_types,
headers=self.authentication.get_authentication_headers()
)
results_new = self._handle_response(upload_new)
upload_exist = requests.put(
atlas_endpoint, json=existing_types,
headers=self.authentication.get_authentication_headers()
)
results_exist = self._handle_response(upload_exist)
# Merge the results
results = results_new
for cat, updatedtypelist in results_exist.items():
if cat not in results:
results[cat] = []
results[cat].extend(updatedtypelist)
return results
@staticmethod
def _prepare_entity_upload(batch):
"""
Massages the batch to be in the right format and coerces to json/dict.
Supports list of dicts, dict of single entity, dict of AtlasEntitiesWithExtInfo.
:param batch: The batch of entities you want to upload.
:type batch: Union(list(dict), dict))
:return: Provides a dict formatted in the Atlas entity bulk upload.
:rtype: dict(str, list(dict))
"""
payload = batch
required_keys = ["entities"]
if isinstance(batch, list):
# It's a list, so we're assuming it's a list of entities
# Handles any type of AtlasEntity and mixed batches of dicts
# and AtlasEntities
dict_batch = [e.to_json() if isinstance(
e, AtlasEntity) else e for e in batch]
payload = {"entities": dict_batch}
elif isinstance(batch, dict):
current_keys = list(batch.keys())
# Does the dict entity conform to the required pattern?
if not any([req in current_keys for req in required_keys]):
# Assuming this is a single entity
# DESIGN DECISION: I'm assuming, if you're passing in
# json, you know the schema and I will not support
# AtlasEntity here.
payload = {"entities": [batch]}
elif isinstance(batch, AtlasEntity):
payload = {"entities": [batch.to_json()]}
else:
raise NotImplementedError(
f"Uploading type: {type(batch)} is not supported.")
return payload
def upload_entities(self, batch):
"""
Upload entities to your Atlas backed Data Catalog.
:param batch:
The batch of entities you want to upload. Supports a single dict,
AtlasEntity, list of dicts, list of atlas entities.
:type batch:
Union(dict, :class:`~pyapacheatlas.core.entity.AtlasEntity`,
list(dict), list(:class:`~pyapacheatlas.core.entity.AtlasEntity`) )
:return: The results of your bulk entity upload.
:rtype: dict
"""
# TODO Include a Do Not Overwrite call
results = None
atlas_endpoint = self.endpoint_url + "/entity/bulk"
payload = AtlasClient._prepare_entity_upload(batch)
postBulkEntities = requests.post(
atlas_endpoint,
json=payload,
headers=self.authentication.get_authentication_headers()
)
results = self._handle_response(postBulkEntities)
return results
def upload_relationship(self, relationship):
"""
Upload a AtlasRelationship json. Should take the form of the following::
{
"typeName": "hive_table_columns",
"attributes": {},
"guid": -100,
"end1": {
"guid": assignments["-1"]
},
"end2": {
"guid": assignments["-5"]
}
}
:param dict relationship: The relationship you want to upload.
:return: The results of your relationship upload.
:rtype: dict
"""
# TODO Include a Do Not Overwrite call
results = None
atlas_endpoint = self.endpoint_url + "/relationship"
# TODO: Handling Updates instead of just creates
relationshipResp = requests.post(
atlas_endpoint,
json=relationship,
headers=self.authentication.get_authentication_headers()
)
results = self._handle_response(relationshipResp)
return results
def upload_terms(self, batch, force_update=False):
"""
Upload terms to your Atlas backed Data Catalog. Supports Purview Term
Templates by passing in an attributes field with the term template's
name as a field within attributes and an object of the required and
optional fields.
:param batch: A list of AtlasGlossaryTerm objects to be uploaded.
:type batch: list(dict)
:return:
A list of AtlasGlossaryTerm objects that are the results from
your upload.
:rtype: list(dict)
"""
# TODO Include a Do Not Overwrite call
results = None
atlas_endpoint = self.endpoint_url + "/glossary/terms"
postResp = requests.post(
atlas_endpoint,
json=batch,
headers=self.authentication.get_authentication_headers()
)
results = self._handle_response(postResp)
return results
def _search_generator(self, search_params, starting_offset=0):
"""
Generator to page through the search query results.
"""
atlas_endpoint = self.endpoint_url + "/search/advanced"
offset = starting_offset
while True:
postSearchResults = requests.post(
atlas_endpoint,
json=search_params,
headers=self.authentication.get_authentication_headers()
)
results = self._handle_response(postSearchResults)
return_values = results["value"]
return_count = len(return_values)
if return_count == 0:
return
offset = offset + return_count
search_params["offset"] = offset
for sub_result in return_values:
try:
yield sub_result
except StopIteration:
return
@PurviewOnly
def search_entities(self, query, limit=50, search_filter=None, starting_offset=0):
"""
Search entities based on a query and automaticall handles limits and
offsets to page through results.
The limit provides how many records are returned in each batch with a
maximum of 1,000 entries per page.
:param str query: The search query to be executed.
:param int limit:
A non-zero integer representing how many entities to
return for each page of the search results.
:param dict search_filter: A search filter to reduce your results.
:return: The results of your search as a generator.
:rtype: Iterator(dict)
"""
if limit > 1000 or limit < 1:
raise ValueError(
"The limit parameter must be non-zero and less than 1,000."
)
search_params = {
"keywords": query,
"limit": limit,
"offset": 0
}
# TODO: Make this smarter, make it easier to create filters
# without having to know how to make a filter object.
if search_filter:
# Example search filter might look like:
# {"filter": {"typeName": "DataSet", "includeSubTypes": True} }
search_params.update({"filter": search_filter})
search_generator = self._search_generator(search_params, starting_offset=starting_offset)
return search_generator
def get_entity_lineage(self, guid, depth=3, width=10, direction="BOTH", includeParent=False, getDerivedLineage=False):
"""
Gets lineage info about the specified entity by guid.
:param str guid: The guid of the entity for which you want to
retrieve lineage.
:param int depth: The number of hops for lineage
:param int width: The number of max expanding width in lineage
:param str direction: The direction of the lineage, which could
be INPUT, OUTPUT or BOTH.
:param bool includeParent: True to include the parent chain in
the response
:param bool getDerivedLineage: True to include derived lineage in
the response
:return: A dict representing AtlasLineageInfo with an array
of parentRelations and an array of relations
:rtype: dict(str, dict)
"""
direction = direction.strip().upper()
assert direction in ("BOTH", "INPUT", "OUTPUT"), "Invalid direction '{}'. Valid options are: BOTH, INPUT, OUTPUT".format(direction)
atlas_endpoint = self.endpoint_url + \
f"/lineage/{guid}"
getLineageRequest = requests.get(
atlas_endpoint,
params={"depth": depth, "width": width, "direction": direction, "includeParent": includeParent, "getDerivedLineage": getDerivedLineage},
headers=self.authentication.get_authentication_headers()
)
results = self._handle_response(getLineageRequest)
return results
class PurviewClient(AtlasClient):
"""
Provides communication between your application and the Azure Purview
service. Simplifies the requirements for knowing the endpoint url and
requires only the Purview account name.
:param str account_name:
Your Purview account name.
:param authentication:
The method of authentication.
:type authentication:
:class:`~pyapacheatlas.auth.base.AtlasAuthBase`
"""
def __init__(self, account_name, authentication=None):
endpoint_url = f"https://{account_name.lower()}.catalog.purview.azure.com/api/atlas/v2"
super().__init__(endpoint_url, authentication)
@PurviewOnly
def get_entity_next_lineage(self, guid, direction, getDerivedLineage=False, offset=0, limit=-1):
"""
Returns immediate next level lineage info about entity with pagination
:param str guid: The guid of the entity for which you want to
retrieve lineage.
:param str direction: The direction of the lineage, which could
be INPUT or OUTPUT.
:param bool getDerivedLineage: True to include derived lineage in
the response
:param int offset: The offset for pagination purpose.
:param int limit: The page size - by default there is no paging.
:return: A dict representing AtlasLineageInfo with an array
of parentRelations and an array of relations
:rtype: dict(str, dict)
"""
direction = direction.strip().upper()
assert direction in ("INPUT", "OUTPUT"), "Invalid direction '{}'. Valid options are: INPUT, OUTPUT".format(direction)
atlas_endpoint = self.endpoint_url + \
f"/lineage/{guid}/next"
# TODO: Implement paging with offset and limit
getLineageRequest = requests.get(
atlas_endpoint,
params={"direction": direction, "getDerivedLineage": getDerivedLineage, "offset": offset, "limit": limit},
headers=self.authentication.get_authentication_headers()
)
results = self._handle_response(getLineageRequest)
return results
def import_terms(self, csv_path, glossary_name="Glossary", glossary_guid=None):
"""
Bulk import terms from an existing csv file. If you are using the system
default, you must include the following headers:
Name,Definition,Status,Related Terms,Synonyms,Acronym,Experts,Stewards
For custom term templates, additional attributes must include
[Attribute][termTemplateName]attributeName as the header.
:param str csv_path: Path to CSV that will be imported.
:param str glossary_name:
Name of the glossary. Defaults to 'Glossary'. Not used if
glossary_guid is provided.
:param str glossary_guid:
Guid of the glossary, optional if glossary_name is provided.
Otherwise, this parameter takes priority over glossary_name.
:return:
A dict that contains an `id` that you can use in
`import_terms_status` to get the status of the import operation.
:rtype: dict
"""
results = None
if glossary_guid:
atlas_endpoint = self.endpoint_url + \
f"/glossary/{glossary_guid}/terms/import"
elif glossary_name:
atlas_endpoint = self.endpoint_url + \
f"/glossary/name/{glossary_name}/terms/import"
else:
raise ValueError(
"Either glossary_name or glossary_guid must be defined.")
headers = self.authentication.get_authentication_headers()
# Pop the default of application/json so that request can fill in the
# multipart/form-data; boundary=xxxx that is automatically generated
# when using the files argument.
headers.pop("Content-Type")
postResp = requests.post(
atlas_endpoint,
files={'file': ("file", open(csv_path, 'rb'))},
headers=headers
)
results = self._handle_response(postResp)
return results
@PurviewOnly
def import_terms_status(self, operation_guid):
"""
Get the operation status of a glossary term import activity. You get
the operation_guid after executing the `import_terms` method and find
the `id` field in the response dict/json.
:param str operation_guid: The id of the import operation.
:return: The status of the import operation as a dict. The dict includes
a field called `status` that will report back RUNNING, SUCCESS, or
FAILED. Other fields include the number of terms detected and
number of errors.
:rtype: dict
"""
results = None
atlas_endpoint = self.endpoint_url + \
f"/glossary/terms/import/{operation_guid}"
postResp = requests.get(
atlas_endpoint,
headers=self.authentication.get_authentication_headers()
)
results = self._handle_response(postResp)
return results
@PurviewOnly
def export_terms(self, guids, csv_path, glossary_name="Glossary", glossary_guid=None):
"""
:param list(str) guids: List of guids that should be exported as csv.
:param str csv_path: Path to CSV that will be imported.
:param str glossary_name:
Name of the glossary. Defaults to 'Glossary'. Not used if
glossary_guid is provided.
:param str glossary_guid:
Guid of the glossary, optional if glossary_name is provided.
Otherwise, this parameter takes priority over glossary_name.
Providing glossary_guid is also faster as you avoid a lookup based
on glossary_name.
:return: A csv file is written to the csv_path.
:rtype: None
"""
if glossary_guid:
# Glossary guid is defined so we don't need to look up the guid
pass
elif glossary_name:
glossary = self.get_glossary(glossary_name)
glossary_guid = glossary["guid"]
else:
raise ValueError(
"Either glossary_name or glossary_guid must be defined.")
results = None
atlas_endpoint = self.endpoint_url + \
f"/glossary/{glossary_guid}/terms/export"
postResp = requests.post(
atlas_endpoint,
json=guids,
headers=self.authentication.get_authentication_headers()
)
# Can't use handle response since it expects json
try:
postResp.raise_for_status()
except requests.RequestException as e:
if "errorCode" in postResp:
raise AtlasException(postResp.text)
else:
raise requests.RequestException(postResp.text)
with open(csv_path, 'wb') as fp:
fp.write(postResp.content)
return None
|
#===============================================================================
# splitbam.py
#===============================================================================
"""Split a BAM file into two subsamples"""
import os.path
import pysam
import subprocess
import tempfile
from argparse import ArgumentParser
# Functions ====================================================================
def parse_arguments():
parser = ArgumentParser(description='Split a BAM file into two subsamples')
parser.add_argument(
'input',
metavar='<path/to/input.bam>',
help='path to input BAM file'
)
parser.add_argument(
'out0',
metavar='<path/to/output0.bam>',
help='path to first output BAM file'
)
parser.add_argument(
'out1',
metavar='<path/to/output1.bam>',
help='path to second output BAM file'
)
parser.add_argument(
'--processes',
metavar='<int>',
type=int,
default=1,
help='number of processes'
)
parser.add_argument(
'--memory',
metavar='<float>',
type=float,
default=768/1024,
help='memory limit in GB'
)
parser.add_argument(
'--tmp-dir',
metavar='<path/to/tmp/dir>',
help='directory for temporary files'
)
return parser.parse_args()
def main():
args = parse_arguments()
with tempfile.TemporaryDirectory(dir=args.tmp_dir) as temp_dir:
temp_in = os.path.join(temp_dir, 'namesort_in.bam')
temp_out0 = os.path.join(temp_dir, 'namesort_out1.sam')
temp_out1 = os.path.join(temp_dir, 'namesort_out2.sam')
pysam.sort(
'-@', str(args.processes - 1),
'-m', f'{int(args.memory / args.processes * 1024)}M',
'-n',
'-T', args.tmp_dir or tempfile.gettempdir(),
'-o', temp_in,
args.input
)
for out in temp_out0, temp_out1:
pysam.view(
'-@', str(args.processes - 1),
'-H',
'-o', out,
temp_in,
catch_stdout=False
)
with open(temp_in, 'r') as f:
with subprocess.Popen(
('samtools', 'view'), stdin=f, stdout=subprocess.PIPE
) as view:
subprocess.run(
('awk', f'{{if(NR%4<2) {{print >> '{temp_out0}'}} else {{print >> '{temp_out1}'}}}}'),
stdin=view.stdout
)
for temp_out, out in (temp_out0, args.out0), (temp_out1, args.out1):
pysam.sort(
'-@', str(args.processes - 1),
'-m', f'{int(args.memory / args.processes * 1024)}M',
'-T', args.tmp_dir or tempfile.gettempdir(),
'-o', out,
temp_out
)
pysam.index(out)
| #===============================================================================
# splitbam.py
#===============================================================================
"""Split a BAM file into two subsamples"""
import os.path
import pysam
import subprocess
import tempfile
from argparse import ArgumentParser
# Functions ====================================================================
def parse_arguments():
parser = ArgumentParser(description='Split a BAM file into two subsamples')
parser.add_argument(
'input',
metavar='<path/to/input.bam>',
help='path to input BAM file'
)
parser.add_argument(
'out0',
metavar='<path/to/output0.bam>',
help='path to first output BAM file'
)
parser.add_argument(
'out1',
metavar='<path/to/output1.bam>',
help='path to second output BAM file'
)
parser.add_argument(
'--processes',
metavar='<int>',
type=int,
default=1,
help='number of processes'
)
parser.add_argument(
'--memory',
metavar='<float>',
type=float,
default=768/1024,
help='memory limit in GB'
)
parser.add_argument(
'--tmp-dir',
metavar='<path/to/tmp/dir>',
help='directory for temporary files'
)
return parser.parse_args()
def main():
args = parse_arguments()
with tempfile.TemporaryDirectory(dir=args.tmp_dir) as temp_dir:
temp_in = os.path.join(temp_dir, 'namesort_in.bam')
temp_out0 = os.path.join(temp_dir, 'namesort_out1.sam')
temp_out1 = os.path.join(temp_dir, 'namesort_out2.sam')
pysam.sort(
'-@', str(args.processes - 1),
'-m', f'{int(args.memory / args.processes * 1024)}M',
'-n',
'-T', args.tmp_dir or tempfile.gettempdir(),
'-o', temp_in,
args.input
)
for out in temp_out0, temp_out1:
pysam.view(
'-@', str(args.processes - 1),
'-H',
'-o', out,
temp_in,
catch_stdout=False
)
with open(temp_in, 'r') as f:
with subprocess.Popen(
('samtools', 'view'), stdin=f, stdout=subprocess.PIPE
) as view:
subprocess.run(
('awk', f'{{if(NR%4<2) {{print >> "{temp_out0}"}} else {{print >> "{temp_out1}"}}}}'),
stdin=view.stdout
)
for temp_out, out in (temp_out0, args.out0), (temp_out1, args.out1):
pysam.sort(
'-@', str(args.processes - 1),
'-m', f'{int(args.memory / args.processes * 1024)}M',
'-T', args.tmp_dir or tempfile.gettempdir(),
'-o', out,
temp_out
)
pysam.index(out)
|
import demistomock as demisto
from CommonServerPython import *
from CommonServerUserPython import *
""" IMPORTS """
import urllib3
import traceback
from typing import List, Dict, Optional, Tuple, Generator
# disable insecure warnings
urllib3.disable_warnings()
INTEGRATION_NAME = "Cofense Feed"
_RESULTS_PER_PAGE = 50 # Max for Cofense is 100
class Client(BaseClient):
"""Implements class for miners of Cofense feed over http/https."""
available_fields = ["all", "malware"]
cofense_to_indicator = {
"IPv4 Address": FeedIndicatorType.IP,
"Domain Name": FeedIndicatorType.Domain,
"URL": FeedIndicatorType.URL,
"Email": FeedIndicatorType.Email,
}
def __init__(
self,
url: str,
auth: Tuple[str, str],
threat_type: Optional[str] = None,
verify: bool = False,
proxy: bool = False,
read_time_out: Optional[float] = 120.0,
):
"""Constructor of Client and BaseClient
Arguments:
url {str} -- url for Cofense feed
auth {Tuple[str, str]} -- (username, password)
Keyword Arguments:
threat_type {Optional[str]} -- One of available_fields (default: {None})
verify {bool} -- Should verify certificate. (default: {False})
proxy {bool} -- Should use proxy. (default: {False})
read_time_out {int} -- Read time out in seconds. (default: {30})
"""
self.read_time_out = read_time_out
self.threat_type = (
threat_type if threat_type in self.available_fields else "all"
)
# Request related attributes
self.suffix = "/apiv1/threat/search/"
super().__init__(url, verify=verify, proxy=proxy, auth=auth)
def _http_request(self, *args, **kwargs) -> dict:
if "timeout" not in kwargs:
kwargs["timeout"] = (5.0, self.read_time_out)
return super()._http_request(*args, **kwargs)
def build_iterator(
self, begin_time: Optional[int] = None, end_time: Optional[int] = None
) -> Generator:
"""Builds an iterator from given data filtered by start and end times.
Keyword Arguments:
begin_time {Optional[str, int]} --
Where to start fetching.
Timestamp represented in unix format. (default: {None})
end_time {Optional[int]} --
Time to stop fetching (if not supplied, will be time now).
Timestamp represented in unix format. (default: {None}).
Yields:
Dict -- Threat from Cofense
"""
# if not getting now
if not end_time:
end_time = get_now()
payload = {
"beginTimestamp": str(begin_time),
"endTimestamp": str(end_time),
"threatType": self.threat_type,
"resultsPerPage": _RESULTS_PER_PAGE,
}
# For first fetch, there is only start time.
if not begin_time:
payload["beginTimestamp"] = str(end_time)
del payload["endTimestamp"]
demisto.debug(f"{INTEGRATION_NAME} - pulling {begin_time}/{end_time}")
cur_page = 0
total_pages = 1
while cur_page < total_pages:
payload["page"] = cur_page
raw_response = self._http_request("post", self.suffix, params=payload)
data = raw_response.get("data", {})
if data:
if total_pages <= 1:
# Call to get all pages.
total_pages = data.get("page", {}).get("totalPages")
if total_pages is None:
return_error('No "totalPages" in response')
demisto.debug(f"total_pages set to {total_pages}")
threats = data.get("threats", [])
for t in threats:
yield t
demisto.debug(f"{INTEGRATION_NAME} - pulling {cur_page+1}/{total_pages}. page size: {_RESULTS_PER_PAGE}")
cur_page += 1
else:
return_error(f'{INTEGRATION_NAME} - no "data" in response')
@classmethod
def _convert_block(cls, block: dict) -> Tuple[str, str]:
"""Gets a Cofense block from blockSet and enriches it.
Args:
block:
Returns:
indicator type, value
"""
indicator_type = block.get("blockType", "")
indicator_type = str(cls.cofense_to_indicator.get(indicator_type))
# Only URL indicator has inside information in data_1
if indicator_type == FeedIndicatorType.URL:
value = block.get("data_1", {}).get("url")
else:
value = block.get("data_1")
# If a domain has '*' in the value it is of type domainGlob
if indicator_type == FeedIndicatorType.Domain and '*' in value:
indicator_type = FeedIndicatorType.DomainGlob
return indicator_type, value
@classmethod
def process_item(cls, threat: dict) -> List[dict]:
"""Gets a threat and processes them.
Arguments:
threat {dict} -- A threat from Cofense ("threats" key)
Returns:
list -- List of dicts representing indicators.
Examples:
>>> Client.process_item({"id": 123, "blockSet": [{"data_1": "ip", "blockType": "IPv4 Address"}]})
[{'value': 'ip', 'type': 'IP', 'rawJSON': \
{'data_1': 'ip', 'blockType': 'IPv4 Address', 'value': 'ip', 'type': 'IP', 'threat_id': 123}}]
"""
results = list()
block_set: List[dict] = threat.get("blockSet", [])
threat_id = threat.get("id")
for block in block_set:
indicator, value = cls._convert_block(block)
block["value"] = value
block["type"] = indicator
block["threat_id"] = threat_id
malware_family: dict = block.get("malwarefamily", {})
ip_detail: dict = block.get("ipDetail", {})
if indicator:
results.append({
"value": value,
"type": indicator,
"rawJSON": block,
"fields": {
"name": threat_id,
"malwarefamily": malware_family.get("familyName"),
"description": malware_family.get("description"),
"sourceoriginalseverity": block.get("impact"),
"threattypes": {
"threatcategoryconfidence": block.get("confidence"),
"threatcategory": block.get("role")
},
"geocountry": ip_detail.get("countryIsoCode"),
"geolocation": f'{ip_detail.get('latitude', '')},{ip_detail.get('longitude', '')}' if ip_detail
else ""
}
})
return results
def test_module(client: Client) -> Tuple[str, dict, dict]:
"""A simple test module
Arguments:
client {Client} -- Client derives from BaseClient
Returns:
str -- "ok" if succeeded, else raises a error.
"""
for _ in client.build_iterator():
return "ok", {}, {}
return "ok", {}, {}
def fetch_indicators_command(
client: Client,
begin_time: Optional[int] = None,
end_time: Optional[int] = None,
limit: Optional[int] = None,
) -> List[Dict]:
"""Fetches the indicators from client.
Arguments:
client {Client} -- Client derives from BaseClient
Keyword Arguments:
begin_time {Optional[int]} -- Time to start fetch from (default: {None})
end_time {Optional[int]} -- Time to stop fetch to (default: {None})
limit {Optional[int]} -- Maximum amount of indicators to fetch. (default: {None})
Returns:
List[Dict] -- List of indicators from threat
"""
indicators = list()
for threat in client.build_iterator(begin_time=begin_time, end_time=end_time):
# get maximum of limit
new_indicators = client.process_item(threat)
indicators.extend(new_indicators)
if limit and limit < len(indicators):
indicators = indicators[:limit]
break
return indicators
def build_fetch_times(fetch_time: str, last_fetch: Optional[dict] = None) -> Tuple[int, int]:
"""Build the start and end time of the fetch session.
Args:
fetch_time: fetch time (for example: "3 days")
last_fetch: Last fetch object
Returns:
begin_time, end_time
"""
if isinstance(last_fetch, dict) and last_fetch.get("timestamp"):
begin_time = last_fetch.get("timestamp", 0) # type: int
end_time = get_now()
else: # First fetch
begin_time, end_time = parse_date_range_no_milliseconds(fetch_time)
return begin_time, end_time
def parse_date_range_no_milliseconds(from_time: str) -> Tuple[int, int]:
"""Gets a range back and return time before the string and to now.
Without milliseconds.
Args:
from_time:The date range to be parsed (required)
Returns:
start time, now
Examples:
>>> parse_date_range_no_milliseconds("3 days")
(1578729151, 1578988351)
"""
begin_time, end_time = parse_date_range(from_time, to_timestamp=True)
return int(begin_time / 1000), int(end_time / 1000)
def get_indicators_command(client: Client, args: dict) -> Tuple[str, list]:
"""Getting indicators into Demisto's incident.
Arguments:
client {Client} -- A client object
args {dict} -- Usually demisto.args()
Returns:
Tuple[str, list] -- human_readable, raw_response
"""
limit = int(args.get("limit", 10))
from_time = args.get("from_time", "3 days")
begin_time, end_time = build_fetch_times(from_time)
indicators = fetch_indicators_command(
client, begin_time=begin_time, end_time=end_time, limit=limit)
human_readable = tableToMarkdown(
f"Results from {INTEGRATION_NAME}:",
[indicator.get("rawJSON") for indicator in indicators],
["threat_id", "type", "value", "impact", "confidence", "roleDescription"],
)
return human_readable, indicators
def get_now() -> int:
"""Returns time now without milliseconds
Returns:
int -- time now without milliseconds.
"""
return int(datetime.now().timestamp() / 1000)
def main():
"""Main function
"""
params = demisto.params()
# handle params
url = "https://www.threathq.com"
credentials = params.get("credentials", {})
if not credentials:
raise DemistoException("Credentials are empty. "
"Fill up the username/password fields in the integration configuration.")
auth = (credentials.get("identifier"), credentials.get("password"))
verify = not params.get("insecure")
proxy = params.get("proxy")
threat_type = params.get("threat_type")
client = Client(url, auth=auth, verify=verify, proxy=proxy, threat_type=threat_type)
demisto.info(f"Command being called is {demisto.command()}")
try:
if demisto.command() == "test-module":
return_outputs(*test_module(client))
elif demisto.command() == "fetch-indicators":
begin_time, end_time = build_fetch_times(params.get("fetch_time", "3 days"))
indicators = fetch_indicators_command(client, begin_time, end_time)
# Send indicators to demisto
for b in batch(indicators, batch_size=2000):
demisto.createIndicators(b)
elif demisto.command() == "cofense-get-indicators":
# dummy command for testing
readable_outputs, raw_response = get_indicators_command(client, demisto.args())
return_outputs(readable_outputs, {}, raw_response=raw_response)
except Exception as err:
return_error(f"Error in {INTEGRATION_NAME} integration:\n{str(err)}\n\nTrace:{traceback.format_exc()}")
if __name__ in ["__main__", "builtin", "builtins"]:
main()
| import demistomock as demisto
from CommonServerPython import *
from CommonServerUserPython import *
""" IMPORTS """
import urllib3
import traceback
from typing import List, Dict, Optional, Tuple, Generator
# disable insecure warnings
urllib3.disable_warnings()
INTEGRATION_NAME = "Cofense Feed"
_RESULTS_PER_PAGE = 50 # Max for Cofense is 100
class Client(BaseClient):
"""Implements class for miners of Cofense feed over http/https."""
available_fields = ["all", "malware"]
cofense_to_indicator = {
"IPv4 Address": FeedIndicatorType.IP,
"Domain Name": FeedIndicatorType.Domain,
"URL": FeedIndicatorType.URL,
"Email": FeedIndicatorType.Email,
}
def __init__(
self,
url: str,
auth: Tuple[str, str],
threat_type: Optional[str] = None,
verify: bool = False,
proxy: bool = False,
read_time_out: Optional[float] = 120.0,
):
"""Constructor of Client and BaseClient
Arguments:
url {str} -- url for Cofense feed
auth {Tuple[str, str]} -- (username, password)
Keyword Arguments:
threat_type {Optional[str]} -- One of available_fields (default: {None})
verify {bool} -- Should verify certificate. (default: {False})
proxy {bool} -- Should use proxy. (default: {False})
read_time_out {int} -- Read time out in seconds. (default: {30})
"""
self.read_time_out = read_time_out
self.threat_type = (
threat_type if threat_type in self.available_fields else "all"
)
# Request related attributes
self.suffix = "/apiv1/threat/search/"
super().__init__(url, verify=verify, proxy=proxy, auth=auth)
def _http_request(self, *args, **kwargs) -> dict:
if "timeout" not in kwargs:
kwargs["timeout"] = (5.0, self.read_time_out)
return super()._http_request(*args, **kwargs)
def build_iterator(
self, begin_time: Optional[int] = None, end_time: Optional[int] = None
) -> Generator:
"""Builds an iterator from given data filtered by start and end times.
Keyword Arguments:
begin_time {Optional[str, int]} --
Where to start fetching.
Timestamp represented in unix format. (default: {None})
end_time {Optional[int]} --
Time to stop fetching (if not supplied, will be time now).
Timestamp represented in unix format. (default: {None}).
Yields:
Dict -- Threat from Cofense
"""
# if not getting now
if not end_time:
end_time = get_now()
payload = {
"beginTimestamp": str(begin_time),
"endTimestamp": str(end_time),
"threatType": self.threat_type,
"resultsPerPage": _RESULTS_PER_PAGE,
}
# For first fetch, there is only start time.
if not begin_time:
payload["beginTimestamp"] = str(end_time)
del payload["endTimestamp"]
demisto.debug(f"{INTEGRATION_NAME} - pulling {begin_time}/{end_time}")
cur_page = 0
total_pages = 1
while cur_page < total_pages:
payload["page"] = cur_page
raw_response = self._http_request("post", self.suffix, params=payload)
data = raw_response.get("data", {})
if data:
if total_pages <= 1:
# Call to get all pages.
total_pages = data.get("page", {}).get("totalPages")
if total_pages is None:
return_error('No "totalPages" in response')
demisto.debug(f"total_pages set to {total_pages}")
threats = data.get("threats", [])
for t in threats:
yield t
demisto.debug(f"{INTEGRATION_NAME} - pulling {cur_page+1}/{total_pages}. page size: {_RESULTS_PER_PAGE}")
cur_page += 1
else:
return_error(f'{INTEGRATION_NAME} - no "data" in response')
@classmethod
def _convert_block(cls, block: dict) -> Tuple[str, str]:
"""Gets a Cofense block from blockSet and enriches it.
Args:
block:
Returns:
indicator type, value
"""
indicator_type = block.get("blockType", "")
indicator_type = str(cls.cofense_to_indicator.get(indicator_type))
# Only URL indicator has inside information in data_1
if indicator_type == FeedIndicatorType.URL:
value = block.get("data_1", {}).get("url")
else:
value = block.get("data_1")
# If a domain has '*' in the value it is of type domainGlob
if indicator_type == FeedIndicatorType.Domain and '*' in value:
indicator_type = FeedIndicatorType.DomainGlob
return indicator_type, value
@classmethod
def process_item(cls, threat: dict) -> List[dict]:
"""Gets a threat and processes them.
Arguments:
threat {dict} -- A threat from Cofense ("threats" key)
Returns:
list -- List of dicts representing indicators.
Examples:
>>> Client.process_item({"id": 123, "blockSet": [{"data_1": "ip", "blockType": "IPv4 Address"}]})
[{'value': 'ip', 'type': 'IP', 'rawJSON': \
{'data_1': 'ip', 'blockType': 'IPv4 Address', 'value': 'ip', 'type': 'IP', 'threat_id': 123}}]
"""
results = list()
block_set: List[dict] = threat.get("blockSet", [])
threat_id = threat.get("id")
for block in block_set:
indicator, value = cls._convert_block(block)
block["value"] = value
block["type"] = indicator
block["threat_id"] = threat_id
malware_family: dict = block.get("malwarefamily", {})
ip_detail: dict = block.get("ipDetail", {})
if indicator:
results.append({
"value": value,
"type": indicator,
"rawJSON": block,
"fields": {
"name": threat_id,
"malwarefamily": malware_family.get("familyName"),
"description": malware_family.get("description"),
"sourceoriginalseverity": block.get("impact"),
"threattypes": {
"threatcategoryconfidence": block.get("confidence"),
"threatcategory": block.get("role")
},
"geocountry": ip_detail.get("countryIsoCode"),
"geolocation": f'{ip_detail.get("latitude", "")},{ip_detail.get("longitude", "")}' if ip_detail
else ""
}
})
return results
def test_module(client: Client) -> Tuple[str, dict, dict]:
"""A simple test module
Arguments:
client {Client} -- Client derives from BaseClient
Returns:
str -- "ok" if succeeded, else raises a error.
"""
for _ in client.build_iterator():
return "ok", {}, {}
return "ok", {}, {}
def fetch_indicators_command(
client: Client,
begin_time: Optional[int] = None,
end_time: Optional[int] = None,
limit: Optional[int] = None,
) -> List[Dict]:
"""Fetches the indicators from client.
Arguments:
client {Client} -- Client derives from BaseClient
Keyword Arguments:
begin_time {Optional[int]} -- Time to start fetch from (default: {None})
end_time {Optional[int]} -- Time to stop fetch to (default: {None})
limit {Optional[int]} -- Maximum amount of indicators to fetch. (default: {None})
Returns:
List[Dict] -- List of indicators from threat
"""
indicators = list()
for threat in client.build_iterator(begin_time=begin_time, end_time=end_time):
# get maximum of limit
new_indicators = client.process_item(threat)
indicators.extend(new_indicators)
if limit and limit < len(indicators):
indicators = indicators[:limit]
break
return indicators
def build_fetch_times(fetch_time: str, last_fetch: Optional[dict] = None) -> Tuple[int, int]:
"""Build the start and end time of the fetch session.
Args:
fetch_time: fetch time (for example: "3 days")
last_fetch: Last fetch object
Returns:
begin_time, end_time
"""
if isinstance(last_fetch, dict) and last_fetch.get("timestamp"):
begin_time = last_fetch.get("timestamp", 0) # type: int
end_time = get_now()
else: # First fetch
begin_time, end_time = parse_date_range_no_milliseconds(fetch_time)
return begin_time, end_time
def parse_date_range_no_milliseconds(from_time: str) -> Tuple[int, int]:
"""Gets a range back and return time before the string and to now.
Without milliseconds.
Args:
from_time:The date range to be parsed (required)
Returns:
start time, now
Examples:
>>> parse_date_range_no_milliseconds("3 days")
(1578729151, 1578988351)
"""
begin_time, end_time = parse_date_range(from_time, to_timestamp=True)
return int(begin_time / 1000), int(end_time / 1000)
def get_indicators_command(client: Client, args: dict) -> Tuple[str, list]:
"""Getting indicators into Demisto's incident.
Arguments:
client {Client} -- A client object
args {dict} -- Usually demisto.args()
Returns:
Tuple[str, list] -- human_readable, raw_response
"""
limit = int(args.get("limit", 10))
from_time = args.get("from_time", "3 days")
begin_time, end_time = build_fetch_times(from_time)
indicators = fetch_indicators_command(
client, begin_time=begin_time, end_time=end_time, limit=limit)
human_readable = tableToMarkdown(
f"Results from {INTEGRATION_NAME}:",
[indicator.get("rawJSON") for indicator in indicators],
["threat_id", "type", "value", "impact", "confidence", "roleDescription"],
)
return human_readable, indicators
def get_now() -> int:
"""Returns time now without milliseconds
Returns:
int -- time now without milliseconds.
"""
return int(datetime.now().timestamp() / 1000)
def main():
"""Main function
"""
params = demisto.params()
# handle params
url = "https://www.threathq.com"
credentials = params.get("credentials", {})
if not credentials:
raise DemistoException("Credentials are empty. "
"Fill up the username/password fields in the integration configuration.")
auth = (credentials.get("identifier"), credentials.get("password"))
verify = not params.get("insecure")
proxy = params.get("proxy")
threat_type = params.get("threat_type")
client = Client(url, auth=auth, verify=verify, proxy=proxy, threat_type=threat_type)
demisto.info(f"Command being called is {demisto.command()}")
try:
if demisto.command() == "test-module":
return_outputs(*test_module(client))
elif demisto.command() == "fetch-indicators":
begin_time, end_time = build_fetch_times(params.get("fetch_time", "3 days"))
indicators = fetch_indicators_command(client, begin_time, end_time)
# Send indicators to demisto
for b in batch(indicators, batch_size=2000):
demisto.createIndicators(b)
elif demisto.command() == "cofense-get-indicators":
# dummy command for testing
readable_outputs, raw_response = get_indicators_command(client, demisto.args())
return_outputs(readable_outputs, {}, raw_response=raw_response)
except Exception as err:
return_error(f"Error in {INTEGRATION_NAME} integration:\n{str(err)}\n\nTrace:{traceback.format_exc()}")
if __name__ in ["__main__", "builtin", "builtins"]:
main()
|
from typing import Dict
import os
import ujson
import shutil
from multiprocessing import Pool
from ncc import tasks
from collections import Counter
from ncc.data import (
Dictionary,
indexed_dataset,
)
from ncc.tokenizers import tokenization
from ncc.data.tools.binarizer import Binarizer
from ncc.utils.file_ops.yaml_io import load_yaml
from ncc.utils.file_ops import (json_io)
from ncc.utils.path_manager import PathManager
from ncc import LOGGER
def binarize(args: Dict, filename: str, dict: Dictionary, in_file: str,
offset: int, end: int, append_eos: bool = False):
"""binarize function for multi-processing"""
ds_file = '{}.mmap'.format(in_file)
ds = indexed_dataset.make_builder(ds_file, impl=args['preprocess']['dataset_impl'], vocab_size=len(dict))
def consumer(tensor):
ds.add_item(tensor)
res = Binarizer.binarize(filename, dict, consumer, tokenize=tokenization.json_tokenizer,
append_eos=append_eos, offset=offset, end=end)
ds.finalize('{}.idx'.format(in_file))
return res
def main(args):
task = tasks.get_task(args['preprocess']['task'])
LOGGER.info('mkdir {} for {} task'.format(args['preprocess']['destdir'], args['preprocess']['task']))
PathManager.mkdir(args['preprocess']['destdir'])
def train_path(lang):
return "{}{}".format(args['preprocess']['trainpref'], ("." + lang) if lang else "")
def valid_path(lang):
return "{}{}".format(args['preprocess']['validpref'], ("." + lang) if lang else "")
def file_name(prefix, lang):
fname = prefix
if lang is not None:
fname += ".{lang}".format(lang=lang)
return fname
def dest_path(prefix, lang):
return os.path.join(args['preprocess']['destdir'], file_name(prefix, lang))
def dict_path(lang):
return dest_path(lang, "dict") + ".jsonl"
target = not args['preprocess']['only_source']
from dataset.codexglue.code_to_text import BPE_DIR
source_dict_file = os.path.join(BPE_DIR, 'csn/csn.spm.vocab')
target_dict_file = os.path.join(os.path.dirname(args['preprocess']['destdir']), 'dict.jsonl')
with open(source_dict_file, 'r') as reader, open(target_dict_file, 'w') as writer:
for line in reader:
print(json_io.json_dumps([line.split('\t')[0], 100]), file=writer)
src_dict = tgt_dict = task.load_dictionary(target_dict_file)
# shared dicts for all languages
src_dict.save(
os.path.join(os.path.dirname(args['preprocess']['destdir']), f"{args["preprocess"]["source_lang"]}.jsonl")
)
tgt_dict.save(
os.path.join(os.path.dirname(args['preprocess']['destdir']), f"{args["preprocess"]["target_lang"]}.jsonl")
)
src_dict.save(dict_path(args['preprocess']['source_lang'])) # save spm dict to ncc.dictionary
if target and tgt_dict is not None:
tgt_dict.save(dict_path(args['preprocess']['target_lang']))
# 2. ***************build dataset********************
def make_binary_dataset(vocab: Dictionary, input_file, output_file, num_workers: int):
"""make binary dataset"""
# LOGGER.info("[{}] Dictionary: {} types".format(attr, len(vocab) - 1))
n_seq_tok = [0, 0]
replaced = Counter() # save un-recorded tokens
def merge_result(worker_result):
replaced.update(worker_result["replaced"])
n_seq_tok[0] += worker_result["nseq"]
n_seq_tok[1] += worker_result["ntok"]
# split a file into different parts
# if use multi-processing, we first process 2nd to last file
# 1.txt -> 10 processor, 0(p0)(0-99), 100(p1)(100-199), ...
offsets = Binarizer.find_offsets(input_file, num_workers)
pool = None
if num_workers > 1:
# p1-pN -> (1 bin-txt, 1 idx), (N bin-txt, N idx)
pool = Pool(processes=num_workers - 1)
for worker_id in range(1, num_workers):
prefix = "{}{}".format(output_file, worker_id)
pool.apply_async(
binarize,
(
args,
input_file,
vocab,
prefix,
offsets[worker_id],
offsets[worker_id + 1]
),
callback=merge_result
)
pool.close()
# process 1th file, if multi-processing available. If not, process all file
# p0 -> 0,end
ds_file = '{}.mmap'.format(output_file)
ds = indexed_dataset.make_builder(ds_file, impl=args['preprocess']['dataset_impl'], vocab_size=len(vocab))
merge_result(
Binarizer.binarize(
input_file, vocab, lambda t: ds.add_item(t),
tokenize=tokenization.json_tokenizer, offset=0, end=offsets[1], append_eos=False,
)
)
if num_workers > 1:
# p1-pN
pool.join()
# merge sub-processors' index and data files into final files and delete them.
for worker_id in range(1, num_workers):
temp_file_path = "{}{}".format(output_file, worker_id)
ds.merge_file_(temp_file_path)
# idx, txt
os.remove(indexed_dataset.data_file_path(temp_file_path))
os.remove(indexed_dataset.index_file_path(temp_file_path))
ds.finalize('{}.idx'.format(output_file))
LOGGER.info(
"{}: {} sents, {} tokens, {:.3}% replaced by {}".format(
# attr,
input_file,
n_seq_tok[0],
n_seq_tok[1],
100 * sum(replaced.values()) / n_seq_tok[1],
vocab.unk_word,
)
)
def make_dataset(vocab, input_prefix, output_prefix, lang, num_workers=1):
if args['preprocess']['dataset_impl'] == "raw":
in_file = file_name(input_prefix, lang)
out_dir = args['preprocess']['destdir']
PathManager.mkdir(out_dir)
LOGGER.info('Copying {} into {}'.format(in_file, out_dir))
shutil.copy(src=in_file, dst=args['preprocess']['destdir'])
else:
in_file = file_name(input_prefix, lang)
out_file = dest_path(output_prefix, lang)
PathManager.mkdir(os.path.dirname(out_file))
make_binary_dataset(vocab, in_file, out_file, num_workers)
def make_all(lang, vocab):
if args['preprocess']['trainpref']:
make_dataset(vocab, args['preprocess']['trainpref'], "train", lang,
num_workers=args['preprocess']['workers'])
if args['preprocess']['validpref']:
for k, validpref in enumerate(args['preprocess']['validpref'].split(",")):
outprefix = "valid{}".format(k) if k > 0 else "valid"
make_dataset(vocab, validpref, outprefix, lang, num_workers=args['preprocess']['workers'])
if args['preprocess']['testpref']:
for k, testpref in enumerate(args['preprocess']['testpref'].split(",")):
outprefix = "test{}".format(k) if k > 0 else "test"
make_dataset(vocab, testpref, outprefix, lang, num_workers=args['preprocess']['workers'])
make_all(args['preprocess']['source_lang'], src_dict)
if target:
make_all(args['preprocess']['target_lang'], tgt_dict)
def cli_main():
import argparse
parser = argparse.ArgumentParser(
description="Downloading/Decompressing CodeSearchNet dataset(s) or Tree-Sitter Library(ies)")
parser.add_argument(
"--yaml_file", "-f", type=str, help="load {yaml_file}.yml for train",
default='config/ruby'
)
args = parser.parse_args()
LOGGER.info(args)
yaml_file = os.path.join(os.path.dirname(__file__), '{}.yml'.format(args.yaml_file))
LOGGER.info('Load arguments in {}'.format(yaml_file))
args = load_yaml(yaml_file)
LOGGER.info(args)
main(args)
if __name__ == "__main__":
cli_main()
| from typing import Dict
import os
import ujson
import shutil
from multiprocessing import Pool
from ncc import tasks
from collections import Counter
from ncc.data import (
Dictionary,
indexed_dataset,
)
from ncc.tokenizers import tokenization
from ncc.data.tools.binarizer import Binarizer
from ncc.utils.file_ops.yaml_io import load_yaml
from ncc.utils.file_ops import (json_io)
from ncc.utils.path_manager import PathManager
from ncc import LOGGER
def binarize(args: Dict, filename: str, dict: Dictionary, in_file: str,
offset: int, end: int, append_eos: bool = False):
"""binarize function for multi-processing"""
ds_file = '{}.mmap'.format(in_file)
ds = indexed_dataset.make_builder(ds_file, impl=args['preprocess']['dataset_impl'], vocab_size=len(dict))
def consumer(tensor):
ds.add_item(tensor)
res = Binarizer.binarize(filename, dict, consumer, tokenize=tokenization.json_tokenizer,
append_eos=append_eos, offset=offset, end=end)
ds.finalize('{}.idx'.format(in_file))
return res
def main(args):
task = tasks.get_task(args['preprocess']['task'])
LOGGER.info('mkdir {} for {} task'.format(args['preprocess']['destdir'], args['preprocess']['task']))
PathManager.mkdir(args['preprocess']['destdir'])
def train_path(lang):
return "{}{}".format(args['preprocess']['trainpref'], ("." + lang) if lang else "")
def valid_path(lang):
return "{}{}".format(args['preprocess']['validpref'], ("." + lang) if lang else "")
def file_name(prefix, lang):
fname = prefix
if lang is not None:
fname += ".{lang}".format(lang=lang)
return fname
def dest_path(prefix, lang):
return os.path.join(args['preprocess']['destdir'], file_name(prefix, lang))
def dict_path(lang):
return dest_path(lang, "dict") + ".jsonl"
target = not args['preprocess']['only_source']
from dataset.codexglue.code_to_text import BPE_DIR
source_dict_file = os.path.join(BPE_DIR, 'csn/csn.spm.vocab')
target_dict_file = os.path.join(os.path.dirname(args['preprocess']['destdir']), 'dict.jsonl')
with open(source_dict_file, 'r') as reader, open(target_dict_file, 'w') as writer:
for line in reader:
print(json_io.json_dumps([line.split('\t')[0], 100]), file=writer)
src_dict = tgt_dict = task.load_dictionary(target_dict_file)
# shared dicts for all languages
src_dict.save(
os.path.join(os.path.dirname(args['preprocess']['destdir']), f"{args['preprocess']['source_lang']}.jsonl")
)
tgt_dict.save(
os.path.join(os.path.dirname(args['preprocess']['destdir']), f"{args['preprocess']['target_lang']}.jsonl")
)
src_dict.save(dict_path(args['preprocess']['source_lang'])) # save spm dict to ncc.dictionary
if target and tgt_dict is not None:
tgt_dict.save(dict_path(args['preprocess']['target_lang']))
# 2. ***************build dataset********************
def make_binary_dataset(vocab: Dictionary, input_file, output_file, num_workers: int):
"""make binary dataset"""
# LOGGER.info("[{}] Dictionary: {} types".format(attr, len(vocab) - 1))
n_seq_tok = [0, 0]
replaced = Counter() # save un-recorded tokens
def merge_result(worker_result):
replaced.update(worker_result["replaced"])
n_seq_tok[0] += worker_result["nseq"]
n_seq_tok[1] += worker_result["ntok"]
# split a file into different parts
# if use multi-processing, we first process 2nd to last file
# 1.txt -> 10 processor, 0(p0)(0-99), 100(p1)(100-199), ...
offsets = Binarizer.find_offsets(input_file, num_workers)
pool = None
if num_workers > 1:
# p1-pN -> (1 bin-txt, 1 idx), (N bin-txt, N idx)
pool = Pool(processes=num_workers - 1)
for worker_id in range(1, num_workers):
prefix = "{}{}".format(output_file, worker_id)
pool.apply_async(
binarize,
(
args,
input_file,
vocab,
prefix,
offsets[worker_id],
offsets[worker_id + 1]
),
callback=merge_result
)
pool.close()
# process 1th file, if multi-processing available. If not, process all file
# p0 -> 0,end
ds_file = '{}.mmap'.format(output_file)
ds = indexed_dataset.make_builder(ds_file, impl=args['preprocess']['dataset_impl'], vocab_size=len(vocab))
merge_result(
Binarizer.binarize(
input_file, vocab, lambda t: ds.add_item(t),
tokenize=tokenization.json_tokenizer, offset=0, end=offsets[1], append_eos=False,
)
)
if num_workers > 1:
# p1-pN
pool.join()
# merge sub-processors' index and data files into final files and delete them.
for worker_id in range(1, num_workers):
temp_file_path = "{}{}".format(output_file, worker_id)
ds.merge_file_(temp_file_path)
# idx, txt
os.remove(indexed_dataset.data_file_path(temp_file_path))
os.remove(indexed_dataset.index_file_path(temp_file_path))
ds.finalize('{}.idx'.format(output_file))
LOGGER.info(
"{}: {} sents, {} tokens, {:.3}% replaced by {}".format(
# attr,
input_file,
n_seq_tok[0],
n_seq_tok[1],
100 * sum(replaced.values()) / n_seq_tok[1],
vocab.unk_word,
)
)
def make_dataset(vocab, input_prefix, output_prefix, lang, num_workers=1):
if args['preprocess']['dataset_impl'] == "raw":
in_file = file_name(input_prefix, lang)
out_dir = args['preprocess']['destdir']
PathManager.mkdir(out_dir)
LOGGER.info('Copying {} into {}'.format(in_file, out_dir))
shutil.copy(src=in_file, dst=args['preprocess']['destdir'])
else:
in_file = file_name(input_prefix, lang)
out_file = dest_path(output_prefix, lang)
PathManager.mkdir(os.path.dirname(out_file))
make_binary_dataset(vocab, in_file, out_file, num_workers)
def make_all(lang, vocab):
if args['preprocess']['trainpref']:
make_dataset(vocab, args['preprocess']['trainpref'], "train", lang,
num_workers=args['preprocess']['workers'])
if args['preprocess']['validpref']:
for k, validpref in enumerate(args['preprocess']['validpref'].split(",")):
outprefix = "valid{}".format(k) if k > 0 else "valid"
make_dataset(vocab, validpref, outprefix, lang, num_workers=args['preprocess']['workers'])
if args['preprocess']['testpref']:
for k, testpref in enumerate(args['preprocess']['testpref'].split(",")):
outprefix = "test{}".format(k) if k > 0 else "test"
make_dataset(vocab, testpref, outprefix, lang, num_workers=args['preprocess']['workers'])
make_all(args['preprocess']['source_lang'], src_dict)
if target:
make_all(args['preprocess']['target_lang'], tgt_dict)
def cli_main():
import argparse
parser = argparse.ArgumentParser(
description="Downloading/Decompressing CodeSearchNet dataset(s) or Tree-Sitter Library(ies)")
parser.add_argument(
"--yaml_file", "-f", type=str, help="load {yaml_file}.yml for train",
default='config/ruby'
)
args = parser.parse_args()
LOGGER.info(args)
yaml_file = os.path.join(os.path.dirname(__file__), '{}.yml'.format(args.yaml_file))
LOGGER.info('Load arguments in {}'.format(yaml_file))
args = load_yaml(yaml_file)
LOGGER.info(args)
main(args)
if __name__ == "__main__":
cli_main()
|
import os
import json
from airflow.decorators import dag, task
from airflow.utils.dates import days_ago
from airflow.operators.dummy_operator import DummyOperator
import ray
from ray_provider.decorators.ray_decorators import ray_task
import numpy as np
import xgboost_ray as xgbr
import xgboost as xgb
from ray import tune
from ray.tune.schedulers import ASHAScheduler
from ray_provider.xcom.ray_backend import RayBackend
from xgboost_ray.tune import TuneReportCheckpointCallback
from datetime import datetime
# These args will get passed on to each operator
# You can override them on a per-task basis during operator initialization
default_args = {
"owner": "airflow",
"on_success_callback": RayBackend.on_success_callback,
"on_failure_callback": RayBackend.on_failure_callback,
}
task_args = {"ray_conn_id": "ray_cluster_connection"}
# Change to True to load simple sklearn dataset
SIMPLE = False
# Change actors and cpus per actor here as per resources allow
XGB_RAY_PARAMS = xgbr.RayParams(max_actor_restarts=1, num_actors=1, cpus_per_actor=1)
ROOT_DIR = "."
LOCAL_DIR = f"{ROOT_DIR}/ray_results"
@dag(
default_args=default_args,
schedule_interval=None,
start_date=datetime(2021, 1, 1, 0, 0, 0),
tags=["xgboost-pandas-tune"],
)
def xgboost_pandas_tune_breast_cancer():
@ray_task(**task_args)
def load_dataframe() -> "ray.ObjectRef":
"""
build dataframe from breast cancer dataset
"""
print("Loading CSV")
if SIMPLE:
print("Loading simple from sklearn.datasets")
from sklearn import datasets
data = datasets.load_breast_cancer(return_X_y=True)
else:
import pandas as pd
url = (
"https://archive.ics.uci.edu/ml/machine-learning-databases/"
"00280/HIGGS.csv.gz"
)
colnames = ["label"] + ["feature-%02d" % i for i in range(1, 29)]
data = pd.read_csv(url, compression="gzip", names=colnames)
print("loaded higgs")
print("Loaded CSV.")
return data
@ray_task(**task_args)
def split_train_test(data):
print("Splitting Data to Train and Test Sets")
print(f"Creating data matrix: {data, SIMPLE}")
if SIMPLE:
from sklearn.model_selection import train_test_split
print("Splitting data")
data, labels = data
train_x, test_x, train_y, test_y = train_test_split(
data, labels, test_size=0.25
)
train_set = xgbr.RayDMatrix(train_x, train_y)
test_set = xgbr.RayDMatrix(test_x, test_y)
else:
df_train = data[(data["feature-01"] < 0.4)]
colnames = ["label"] + ["feature-%02d" % i for i in range(1, 29)]
train_set = xgbr.RayDMatrix(df_train, label="label", columns=colnames)
df_validation = data[
(data["feature-01"] >= 0.4) & (data["feature-01"] < 0.8)
]
test_set = xgbr.RayDMatrix(df_validation, label="label")
print("finished data matrix")
return train_set, test_set
# This could be in a library of trainables
def train_model(config, checkpoint_dir=None, data_dir=None, data=()):
dtrain, dvalidation = data
evallist = [(dvalidation, "eval")]
# evals_result = {}
config = {
"tree_method": "hist",
"eval_metric": ["logloss", "error"],
}
print("Start training with TuneReportCheckpointCallback")
bst = xgbr.train(
params=config,
dtrain=dtrain,
ray_params=XGB_RAY_PARAMS,
num_boost_round=100,
evals=evallist,
callbacks=[TuneReportCheckpointCallback(filename=f"model.xgb")],
)
@ray_task(**task_args)
def tune_model(data):
search_space = {
# You can mix constants with search space objects.
"objective": "binary:logistic",
"eval_metric": ["logloss", "error"],
"max_depth": tune.randint(1, 9),
"min_child_weight": tune.choice([1, 2, 3]),
"subsample": tune.uniform(0.5, 1.0),
"eta": tune.loguniform(1e-4, 1e-1),
}
print("enabling aggressive early stopping of bad trials")
# This will enable aggressive early stopping of bad trials.
scheduler = ASHAScheduler(
max_t=4, grace_period=1, reduction_factor=2 # 4 training iterations
)
print("Tuning")
analysis = tune.run(
tune.with_parameters(train_model, data=data),
metric="eval-logloss",
mode="min",
local_dir=LOCAL_DIR,
# You can add "gpu": 0.1 to allocate GPUs
resources_per_trial=XGB_RAY_PARAMS.get_tune_resources(),
config=search_space,
num_samples=4,
scheduler=scheduler,
)
print("Done Tuning")
return analysis
@ray_task(**task_args)
def load_best_model_checkpoint(analysis):
print("Checking Analysis")
best_bst = xgb.Booster()
print(
f"Analysis Best Result on eval-error is: {analysis.best_result["eval-error"]}"
)
print("Loading Model with Best Params")
best_bst.load_model(os.path.join(analysis.best_checkpoint, "model.xgb"))
accuracy = 1.0 - analysis.best_result["eval-error"]
print(f"Best model parameters: {analysis.best_config}")
print(f"Best model total accuracy: {accuracy:.4f}")
# We could now do further predictions with
# best_bst.predict(...)
return best_bst
build_raw_df = load_dataframe()
data = split_train_test(build_raw_df)
analysis = tune_model(data)
best_checkpoint = load_best_model_checkpoint(analysis)
kickoff_dag = DummyOperator(task_id="kickoff_dag")
complete_dag = DummyOperator(task_id="complete_dag")
kickoff_dag >> build_raw_df
best_checkpoint >> complete_dag
xgboost_pandas_tune_breast_cancer = xgboost_pandas_tune_breast_cancer()
| import os
import json
from airflow.decorators import dag, task
from airflow.utils.dates import days_ago
from airflow.operators.dummy_operator import DummyOperator
import ray
from ray_provider.decorators.ray_decorators import ray_task
import numpy as np
import xgboost_ray as xgbr
import xgboost as xgb
from ray import tune
from ray.tune.schedulers import ASHAScheduler
from ray_provider.xcom.ray_backend import RayBackend
from xgboost_ray.tune import TuneReportCheckpointCallback
from datetime import datetime
# These args will get passed on to each operator
# You can override them on a per-task basis during operator initialization
default_args = {
"owner": "airflow",
"on_success_callback": RayBackend.on_success_callback,
"on_failure_callback": RayBackend.on_failure_callback,
}
task_args = {"ray_conn_id": "ray_cluster_connection"}
# Change to True to load simple sklearn dataset
SIMPLE = False
# Change actors and cpus per actor here as per resources allow
XGB_RAY_PARAMS = xgbr.RayParams(max_actor_restarts=1, num_actors=1, cpus_per_actor=1)
ROOT_DIR = "."
LOCAL_DIR = f"{ROOT_DIR}/ray_results"
@dag(
default_args=default_args,
schedule_interval=None,
start_date=datetime(2021, 1, 1, 0, 0, 0),
tags=["xgboost-pandas-tune"],
)
def xgboost_pandas_tune_breast_cancer():
@ray_task(**task_args)
def load_dataframe() -> "ray.ObjectRef":
"""
build dataframe from breast cancer dataset
"""
print("Loading CSV")
if SIMPLE:
print("Loading simple from sklearn.datasets")
from sklearn import datasets
data = datasets.load_breast_cancer(return_X_y=True)
else:
import pandas as pd
url = (
"https://archive.ics.uci.edu/ml/machine-learning-databases/"
"00280/HIGGS.csv.gz"
)
colnames = ["label"] + ["feature-%02d" % i for i in range(1, 29)]
data = pd.read_csv(url, compression="gzip", names=colnames)
print("loaded higgs")
print("Loaded CSV.")
return data
@ray_task(**task_args)
def split_train_test(data):
print("Splitting Data to Train and Test Sets")
print(f"Creating data matrix: {data, SIMPLE}")
if SIMPLE:
from sklearn.model_selection import train_test_split
print("Splitting data")
data, labels = data
train_x, test_x, train_y, test_y = train_test_split(
data, labels, test_size=0.25
)
train_set = xgbr.RayDMatrix(train_x, train_y)
test_set = xgbr.RayDMatrix(test_x, test_y)
else:
df_train = data[(data["feature-01"] < 0.4)]
colnames = ["label"] + ["feature-%02d" % i for i in range(1, 29)]
train_set = xgbr.RayDMatrix(df_train, label="label", columns=colnames)
df_validation = data[
(data["feature-01"] >= 0.4) & (data["feature-01"] < 0.8)
]
test_set = xgbr.RayDMatrix(df_validation, label="label")
print("finished data matrix")
return train_set, test_set
# This could be in a library of trainables
def train_model(config, checkpoint_dir=None, data_dir=None, data=()):
dtrain, dvalidation = data
evallist = [(dvalidation, "eval")]
# evals_result = {}
config = {
"tree_method": "hist",
"eval_metric": ["logloss", "error"],
}
print("Start training with TuneReportCheckpointCallback")
bst = xgbr.train(
params=config,
dtrain=dtrain,
ray_params=XGB_RAY_PARAMS,
num_boost_round=100,
evals=evallist,
callbacks=[TuneReportCheckpointCallback(filename=f"model.xgb")],
)
@ray_task(**task_args)
def tune_model(data):
search_space = {
# You can mix constants with search space objects.
"objective": "binary:logistic",
"eval_metric": ["logloss", "error"],
"max_depth": tune.randint(1, 9),
"min_child_weight": tune.choice([1, 2, 3]),
"subsample": tune.uniform(0.5, 1.0),
"eta": tune.loguniform(1e-4, 1e-1),
}
print("enabling aggressive early stopping of bad trials")
# This will enable aggressive early stopping of bad trials.
scheduler = ASHAScheduler(
max_t=4, grace_period=1, reduction_factor=2 # 4 training iterations
)
print("Tuning")
analysis = tune.run(
tune.with_parameters(train_model, data=data),
metric="eval-logloss",
mode="min",
local_dir=LOCAL_DIR,
# You can add "gpu": 0.1 to allocate GPUs
resources_per_trial=XGB_RAY_PARAMS.get_tune_resources(),
config=search_space,
num_samples=4,
scheduler=scheduler,
)
print("Done Tuning")
return analysis
@ray_task(**task_args)
def load_best_model_checkpoint(analysis):
print("Checking Analysis")
best_bst = xgb.Booster()
print(
f"Analysis Best Result on eval-error is: {analysis.best_result['eval-error']}"
)
print("Loading Model with Best Params")
best_bst.load_model(os.path.join(analysis.best_checkpoint, "model.xgb"))
accuracy = 1.0 - analysis.best_result["eval-error"]
print(f"Best model parameters: {analysis.best_config}")
print(f"Best model total accuracy: {accuracy:.4f}")
# We could now do further predictions with
# best_bst.predict(...)
return best_bst
build_raw_df = load_dataframe()
data = split_train_test(build_raw_df)
analysis = tune_model(data)
best_checkpoint = load_best_model_checkpoint(analysis)
kickoff_dag = DummyOperator(task_id="kickoff_dag")
complete_dag = DummyOperator(task_id="complete_dag")
kickoff_dag >> build_raw_df
best_checkpoint >> complete_dag
xgboost_pandas_tune_breast_cancer = xgboost_pandas_tune_breast_cancer()
|
import argparse
from datasets import PhototourismDataset
import numpy as np
import os
import pickle
def get_opts():
parser = argparse.ArgumentParser()
parser.add_argument('--root_dir', type=str, required=True,
help='root directory of dataset')
parser.add_argument('--img_downscale', type=int, default=1,
help='how much to downscale the images for phototourism dataset')
return parser.parse_args()
if __name__ == '__main__':
args = get_opts()
os.makedirs(os.path.join(args.root_dir, 'cache'), exist_ok=True)
print(f'Preparing cache for scale {args.img_downscale}...')
dataset = PhototourismDataset(args.root_dir, 'train', args.img_downscale)
# save img ids
with open(os.path.join(args.root_dir, f'cache/img_ids.pkl'), 'wb') as f:
pickle.dump(dataset.img_ids, f, pickle.HIGHEST_PROTOCOL)
# save img paths
with open(os.path.join(args.root_dir, f'cache/image_paths.pkl'), 'wb') as f:
pickle.dump(dataset.image_paths, f, pickle.HIGHEST_PROTOCOL)
# save Ks
with open(os.path.join(args.root_dir, f'cache/Ks{args.img_downscale}.pkl'), 'wb') as f:
pickle.dump(dataset.Ks, f, pickle.HIGHEST_PROTOCOL)
# save scene points
np.save(os.path.join(args.root_dir, 'cache/xyz_world.npy'),
dataset.xyz_world)
# save poses
np.save(os.path.join(args.root_dir, 'cache/poses.npy'),
dataset.poses)
# save near and far bounds
with open(os.path.join(args.root_dir, f'cache/nears.pkl'), 'wb') as f:
pickle.dump(dataset.nears, f, pickle.HIGHEST_PROTOCOL)
with open(os.path.join(args.root_dir, f'cache/fars.pkl'), 'wb') as f:
pickle.dump(dataset.fars, f, pickle.HIGHEST_PROTOCOL)
# save rays and rgbs
np.save(os.path.join(args.root_dir, f'cache/rays{args.img_downscale}.npy'),
dataset.all_rays.numpy())
np.save(os.path.join(args.root_dir, f'cache/rgbs{args.img_downscale}.npy'),
dataset.all_rgbs.numpy())
print(f"Data cache saved to {os.path.join(args.root_dir, "cache")} !") | import argparse
from datasets import PhototourismDataset
import numpy as np
import os
import pickle
def get_opts():
parser = argparse.ArgumentParser()
parser.add_argument('--root_dir', type=str, required=True,
help='root directory of dataset')
parser.add_argument('--img_downscale', type=int, default=1,
help='how much to downscale the images for phototourism dataset')
return parser.parse_args()
if __name__ == '__main__':
args = get_opts()
os.makedirs(os.path.join(args.root_dir, 'cache'), exist_ok=True)
print(f'Preparing cache for scale {args.img_downscale}...')
dataset = PhototourismDataset(args.root_dir, 'train', args.img_downscale)
# save img ids
with open(os.path.join(args.root_dir, f'cache/img_ids.pkl'), 'wb') as f:
pickle.dump(dataset.img_ids, f, pickle.HIGHEST_PROTOCOL)
# save img paths
with open(os.path.join(args.root_dir, f'cache/image_paths.pkl'), 'wb') as f:
pickle.dump(dataset.image_paths, f, pickle.HIGHEST_PROTOCOL)
# save Ks
with open(os.path.join(args.root_dir, f'cache/Ks{args.img_downscale}.pkl'), 'wb') as f:
pickle.dump(dataset.Ks, f, pickle.HIGHEST_PROTOCOL)
# save scene points
np.save(os.path.join(args.root_dir, 'cache/xyz_world.npy'),
dataset.xyz_world)
# save poses
np.save(os.path.join(args.root_dir, 'cache/poses.npy'),
dataset.poses)
# save near and far bounds
with open(os.path.join(args.root_dir, f'cache/nears.pkl'), 'wb') as f:
pickle.dump(dataset.nears, f, pickle.HIGHEST_PROTOCOL)
with open(os.path.join(args.root_dir, f'cache/fars.pkl'), 'wb') as f:
pickle.dump(dataset.fars, f, pickle.HIGHEST_PROTOCOL)
# save rays and rgbs
np.save(os.path.join(args.root_dir, f'cache/rays{args.img_downscale}.npy'),
dataset.all_rays.numpy())
np.save(os.path.join(args.root_dir, f'cache/rgbs{args.img_downscale}.npy'),
dataset.all_rgbs.numpy())
print(f"Data cache saved to {os.path.join(args.root_dir, 'cache')} !") |
#!/usr/bin/env python3
"""Create trendbargraphs for various periods of electricity use and production."""
import argparse
from datetime import datetime as dt
import matplotlib.pyplot as plt
import numpy as np
import constants
# noinspection PyUnresolvedReferences
import libkamstrup as kl
DATABASE = constants.TREND['database']
OPTION = ""
def fetch_last_day(hours_to_fetch):
"""...
"""
global DATABASE
config = kl.add_time_line({"grouping": "%m-%d %Hh",
"period": hours_to_fetch,
"timeframe": "hour",
"database": DATABASE,
"table": "production",
}
)
opwekking, prod_lbls = kl.get_historic_data(config, telwerk="energy")
config["table"] = "kamstrup"
import_lo, data_lbls = kl.get_historic_data(config, telwerk="T1in")
import_hi, data_lbls = kl.get_historic_data(config, telwerk="T2in")
export_lo, data_lbls = kl.get_historic_data(config, telwerk="T1out")
export_hi, data_lbls = kl.get_historic_data(config, telwerk="T2out")
# production data may not yet have caught up to the current hour
if not (prod_lbls[-1] == data_lbls[-1]):
opwekking = opwekking[:-1]
np.append(opwekking, 0.0)
return data_lbls, import_lo, import_hi, opwekking, export_lo, export_hi
def fetch_last_month(days_to_fetch):
"""...
"""
global DATABASE
config = kl.add_time_line({"grouping": "%m-%d",
"period": days_to_fetch,
"timeframe": "day",
"database": DATABASE,
"table": "production",
}
)
opwekking, prod_lbls = kl.get_historic_data(config, telwerk="energy")
config["table"] = "kamstrup"
import_lo, data_lbls = kl.get_historic_data(config, telwerk="T1in")
import_hi, data_lbls = kl.get_historic_data(config, telwerk="T2in")
export_lo, data_lbls = kl.get_historic_data(config, telwerk="T1out")
export_hi, data_lbls = kl.get_historic_data(config, telwerk="T2out")
# production data may not yet have caught up to the current hour
if not (prod_lbls[-1] == data_lbls[-1]):
opwekking = opwekking[:-1]
np.append(opwekking, 0.0)
return data_lbls, import_lo, import_hi, opwekking, export_lo, export_hi
def fetch_last_year(months_to_fetch):
"""...
"""
global DATABASE
config = kl.add_time_line({"grouping": "%Y-%m",
"period": months_to_fetch,
"timeframe": "month",
"database": DATABASE,
"table": "production",
}
)
opwekking, prod_lbls = kl.get_historic_data(config,
telwerk="energy",
from_start_of_year=True
)
config["table"] = "kamstrup"
import_lo, data_lbls = kl.get_historic_data(config,
telwerk="T1in",
from_start_of_year=True
)
import_hi, data_lbls = kl.get_historic_data(config,
telwerk="T2in",
from_start_of_year=True
)
export_lo, data_lbls = kl.get_historic_data(config,
telwerk="T1out",
from_start_of_year=True
)
export_hi, data_lbls = kl.get_historic_data(config,
telwerk="T2out",
from_start_of_year=True
)
# production data may not yet have caught up to the current hour
if not (prod_lbls[-1] == data_lbls[-1]):
opwekking = opwekking[:-1]
np.append(opwekking, 0.0)
return data_lbls, import_lo, import_hi, opwekking, export_lo, export_hi
def fetch_last_years(years_to_fetch):
"""...
"""
global DATABASE
config = kl.add_time_line({"grouping": "%Y",
"period": years_to_fetch,
"timeframe": "year",
"database": DATABASE,
"table": "production",
}
)
opwekking, prod_lbls = kl.get_historic_data(config,
telwerk="energy",
from_start_of_year=True
)
config["table"] = "kamstrup"
import_lo, data_lbls = kl.get_historic_data(config,
telwerk="T1in",
from_start_of_year=True
)
import_hi, data_lbls = kl.get_historic_data(config,
telwerk="T2in",
from_start_of_year=True
)
export_lo, data_lbls = kl.get_historic_data(config,
telwerk="T1out",
from_start_of_year=True
)
export_hi, data_lbls = kl.get_historic_data(config,
telwerk="T2out",
from_start_of_year=True
)
# production data may not yet have caught up to the current hour
if not (prod_lbls[-1] == data_lbls[-1]):
opwekking = opwekking[:-1]
np.append(opwekking, 0.0)
return data_lbls, import_lo, import_hi, opwekking, export_lo, export_hi
def plot_graph(output_file, data_tuple, plot_title, show_data=0):
"""...
"""
data_lbls = data_tuple[0]
import_lo = data_tuple[1]
import_hi = data_tuple[2]
opwekking = data_tuple[3]
export_lo = data_tuple[4]
export_hi = data_tuple[5]
imprt = kl.contract(import_lo, import_hi)
exprt = kl.contract(export_lo, export_hi)
own_usage = kl.distract(opwekking, exprt)
usage = kl.contract(own_usage, imprt)
btm_hi = kl.contract(import_lo, own_usage)
"""
--- Start debugging:
np.set_printoptions(precision=3)
print("data_lbls: ", np.size(data_lbls), data_lbls[-5:])
print(" ")
print("opwekking: ", np.size(opwekking), opwekking[-5:])
print(" ")
print("export_hi: ", np.size(export_hi), export_hi[-5:])
print("export_lo: ", np.size(export_lo), export_lo[-5:])
print("exprt : ", np.size(exprt), exprt[-5:])
print(" ")
print("import_hi: ", np.size(import_hi), import_hi[-5:])
print("import_lo: ", np.size(import_lo), import_lo[-5:])
print("imprt : ", np.size(imprt), imprt[-5:])
print(" ")
print("own_usage: ", np.size(own_usage), own_usage[-5:])
print("usage : ", np.size(usage), usage[-5:])
print(" ")
print("btm_hi : ", np.size(btm_hi), btm_hi[-5:])
--- End debugging.
"""
# Set the bar width
bar_width = 0.75
# Set the color alpha
ahpla = 0.7
# positions of the left bar-boundaries
tick_pos = list(range(1, len(data_lbls) + 1))
# Create the general plot and the bar
plt.rc("font", size=6.5)
dummy, ax1 = plt.subplots(1, figsize=(10, 3.5))
col_import = "red"
col_export = "blue"
col_usage = "green"
# Create a bar plot of import_lo
ax1.bar(tick_pos,
import_hi,
width=bar_width,
label="Inkoop (normaal)",
alpha=ahpla,
color=col_import,
align="center",
bottom=btm_hi, # [sum(i) for i in zip(import_lo, own_usage)]
)
# Create a bar plot of import_hi
ax1.bar(tick_pos,
import_lo,
width=bar_width,
label="Inkoop (dal)",
alpha=ahpla * 0.5,
color=col_import,
align="center",
bottom=own_usage,
)
# Create a bar plot of own_usage
ax1.bar(tick_pos,
own_usage,
width=bar_width,
label="Eigen gebruik",
alpha=ahpla,
color=col_usage,
align="center",
)
if show_data == 1:
for i, v in enumerate(own_usage):
ax1.text(tick_pos[i],
10,
"{:7.3f}".format(v),
{"ha": "center", "va": "bottom"},
rotation=-90,
)
if show_data == 2:
for i, v in enumerate(usage):
ax1.text(tick_pos[i],
500,
"{:4.0f}".format(v),
{"ha": "center", "va": "bottom"},
fontsize=12,
)
# Exports hang below the y-axis
# Create a bar plot of export_lo
ax1.bar(tick_pos,
[-1 * i for i in export_lo],
width=bar_width,
label="Verkoop (dal)",
alpha=ahpla * 0.5,
color=col_export,
align="center",
)
# Create a bar plot of export_hi
ax1.bar(tick_pos,
[-1 * i for i in export_hi],
width=bar_width,
label="Verkoop (normaal)",
alpha=ahpla,
color=col_export,
align="center",
bottom=[-1 * i for i in export_lo],
)
if show_data == 1:
for i, v in enumerate(exprt):
ax1.text(tick_pos[i],
-10,
"{:7.3f}".format(v),
{"ha": "center", "va": "top"},
rotation=-90,
)
if show_data == 2:
for i, v in enumerate(exprt):
ax1.text(tick_pos[i],
-500,
"{:4.0f}".format(v),
{"ha": "center", "va": "top"},
fontsize=12,
)
# Set Axes stuff
ax1.set_ylabel("[kWh]")
if show_data == 0:
y_lo = -1 * (max(exprt) + 1)
y_hi = max(usage) + 1
if y_lo > -1.5:
y_lo = -1.5
if y_hi < 1.5:
y_hi = 1.5
ax1.set_ylim([y_lo, y_hi])
ax1.set_xlabel("Datetime")
ax1.grid(which="major",
axis="y",
color="k",
linestyle="--",
linewidth=0.5
)
ax1.axhline(y=0, color="k")
ax1.axvline(x=0, color="k")
# Set plot stuff
plt.xticks(tick_pos, data_lbls, rotation=-60)
plt.title(f"{plot_title}")
plt.legend(loc="upper left", ncol=5, framealpha=0.2)
# Fit every nicely
plt.xlim([min(tick_pos) - bar_width, max(tick_pos) + bar_width])
plt.tight_layout()
plt.savefig(fname=f"{output_file}", format="png")
def main():
"""
This is the main loop
"""
global OPTION
if OPTION.hours:
plot_graph(constants.TREND['day_graph'],
fetch_last_day(OPTION.hours),
f"Energietrend per uur afgelopen dagen ({dt.now().strftime("%d-%m-%Y %H:%M:%S")})",
)
if OPTION.days:
plot_graph(constants.TREND['month_graph'],
fetch_last_month(OPTION.days),
f"Energietrend per dag afgelopen maand ({dt.now().strftime("%d-%m-%Y %H:%M:%S")})",
)
if OPTION.months:
plot_graph(constants.TREND['year_graph'],
fetch_last_year(OPTION.months),
f"Energietrend per maand afgelopen jaren ({dt.now().strftime("%d-%m-%Y %H:%M:%S")})",
show_data=1,
)
if OPTION.years:
plot_graph(constants.TREND['vsyear_graph'],
fetch_last_years(OPTION.years),
f"Energietrend per jaar afgelopen jaren ({dt.now().strftime("%d-%m-%Y %H:%M:%S")})",
show_data=2,
)
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="Create a trendgraph")
parser.add_argument("-hr",
"--hours",
type=int,
help="create hour-trend for last <HOURS> hours",
)
parser.add_argument("-d",
"--days",
type=int,
help="create day-trend for last <DAYS> days"
)
parser.add_argument("-m",
"--months",
type=int,
help="number of months of data to use for the graph",
)
parser.add_argument("-y",
"--years",
type=int,
help="number of months of data to use for the graph",
)
OPTION = parser.parse_args()
if OPTION.hours == 0:
OPTION.hours = 50
if OPTION.days == 0:
OPTION.days = 50
if OPTION.months == 0:
OPTION.months = 38
if OPTION.years == 0:
OPTION.years = 6
main()
| #!/usr/bin/env python3
"""Create trendbargraphs for various periods of electricity use and production."""
import argparse
from datetime import datetime as dt
import matplotlib.pyplot as plt
import numpy as np
import constants
# noinspection PyUnresolvedReferences
import libkamstrup as kl
DATABASE = constants.TREND['database']
OPTION = ""
def fetch_last_day(hours_to_fetch):
"""...
"""
global DATABASE
config = kl.add_time_line({"grouping": "%m-%d %Hh",
"period": hours_to_fetch,
"timeframe": "hour",
"database": DATABASE,
"table": "production",
}
)
opwekking, prod_lbls = kl.get_historic_data(config, telwerk="energy")
config["table"] = "kamstrup"
import_lo, data_lbls = kl.get_historic_data(config, telwerk="T1in")
import_hi, data_lbls = kl.get_historic_data(config, telwerk="T2in")
export_lo, data_lbls = kl.get_historic_data(config, telwerk="T1out")
export_hi, data_lbls = kl.get_historic_data(config, telwerk="T2out")
# production data may not yet have caught up to the current hour
if not (prod_lbls[-1] == data_lbls[-1]):
opwekking = opwekking[:-1]
np.append(opwekking, 0.0)
return data_lbls, import_lo, import_hi, opwekking, export_lo, export_hi
def fetch_last_month(days_to_fetch):
"""...
"""
global DATABASE
config = kl.add_time_line({"grouping": "%m-%d",
"period": days_to_fetch,
"timeframe": "day",
"database": DATABASE,
"table": "production",
}
)
opwekking, prod_lbls = kl.get_historic_data(config, telwerk="energy")
config["table"] = "kamstrup"
import_lo, data_lbls = kl.get_historic_data(config, telwerk="T1in")
import_hi, data_lbls = kl.get_historic_data(config, telwerk="T2in")
export_lo, data_lbls = kl.get_historic_data(config, telwerk="T1out")
export_hi, data_lbls = kl.get_historic_data(config, telwerk="T2out")
# production data may not yet have caught up to the current hour
if not (prod_lbls[-1] == data_lbls[-1]):
opwekking = opwekking[:-1]
np.append(opwekking, 0.0)
return data_lbls, import_lo, import_hi, opwekking, export_lo, export_hi
def fetch_last_year(months_to_fetch):
"""...
"""
global DATABASE
config = kl.add_time_line({"grouping": "%Y-%m",
"period": months_to_fetch,
"timeframe": "month",
"database": DATABASE,
"table": "production",
}
)
opwekking, prod_lbls = kl.get_historic_data(config,
telwerk="energy",
from_start_of_year=True
)
config["table"] = "kamstrup"
import_lo, data_lbls = kl.get_historic_data(config,
telwerk="T1in",
from_start_of_year=True
)
import_hi, data_lbls = kl.get_historic_data(config,
telwerk="T2in",
from_start_of_year=True
)
export_lo, data_lbls = kl.get_historic_data(config,
telwerk="T1out",
from_start_of_year=True
)
export_hi, data_lbls = kl.get_historic_data(config,
telwerk="T2out",
from_start_of_year=True
)
# production data may not yet have caught up to the current hour
if not (prod_lbls[-1] == data_lbls[-1]):
opwekking = opwekking[:-1]
np.append(opwekking, 0.0)
return data_lbls, import_lo, import_hi, opwekking, export_lo, export_hi
def fetch_last_years(years_to_fetch):
"""...
"""
global DATABASE
config = kl.add_time_line({"grouping": "%Y",
"period": years_to_fetch,
"timeframe": "year",
"database": DATABASE,
"table": "production",
}
)
opwekking, prod_lbls = kl.get_historic_data(config,
telwerk="energy",
from_start_of_year=True
)
config["table"] = "kamstrup"
import_lo, data_lbls = kl.get_historic_data(config,
telwerk="T1in",
from_start_of_year=True
)
import_hi, data_lbls = kl.get_historic_data(config,
telwerk="T2in",
from_start_of_year=True
)
export_lo, data_lbls = kl.get_historic_data(config,
telwerk="T1out",
from_start_of_year=True
)
export_hi, data_lbls = kl.get_historic_data(config,
telwerk="T2out",
from_start_of_year=True
)
# production data may not yet have caught up to the current hour
if not (prod_lbls[-1] == data_lbls[-1]):
opwekking = opwekking[:-1]
np.append(opwekking, 0.0)
return data_lbls, import_lo, import_hi, opwekking, export_lo, export_hi
def plot_graph(output_file, data_tuple, plot_title, show_data=0):
"""...
"""
data_lbls = data_tuple[0]
import_lo = data_tuple[1]
import_hi = data_tuple[2]
opwekking = data_tuple[3]
export_lo = data_tuple[4]
export_hi = data_tuple[5]
imprt = kl.contract(import_lo, import_hi)
exprt = kl.contract(export_lo, export_hi)
own_usage = kl.distract(opwekking, exprt)
usage = kl.contract(own_usage, imprt)
btm_hi = kl.contract(import_lo, own_usage)
"""
--- Start debugging:
np.set_printoptions(precision=3)
print("data_lbls: ", np.size(data_lbls), data_lbls[-5:])
print(" ")
print("opwekking: ", np.size(opwekking), opwekking[-5:])
print(" ")
print("export_hi: ", np.size(export_hi), export_hi[-5:])
print("export_lo: ", np.size(export_lo), export_lo[-5:])
print("exprt : ", np.size(exprt), exprt[-5:])
print(" ")
print("import_hi: ", np.size(import_hi), import_hi[-5:])
print("import_lo: ", np.size(import_lo), import_lo[-5:])
print("imprt : ", np.size(imprt), imprt[-5:])
print(" ")
print("own_usage: ", np.size(own_usage), own_usage[-5:])
print("usage : ", np.size(usage), usage[-5:])
print(" ")
print("btm_hi : ", np.size(btm_hi), btm_hi[-5:])
--- End debugging.
"""
# Set the bar width
bar_width = 0.75
# Set the color alpha
ahpla = 0.7
# positions of the left bar-boundaries
tick_pos = list(range(1, len(data_lbls) + 1))
# Create the general plot and the bar
plt.rc("font", size=6.5)
dummy, ax1 = plt.subplots(1, figsize=(10, 3.5))
col_import = "red"
col_export = "blue"
col_usage = "green"
# Create a bar plot of import_lo
ax1.bar(tick_pos,
import_hi,
width=bar_width,
label="Inkoop (normaal)",
alpha=ahpla,
color=col_import,
align="center",
bottom=btm_hi, # [sum(i) for i in zip(import_lo, own_usage)]
)
# Create a bar plot of import_hi
ax1.bar(tick_pos,
import_lo,
width=bar_width,
label="Inkoop (dal)",
alpha=ahpla * 0.5,
color=col_import,
align="center",
bottom=own_usage,
)
# Create a bar plot of own_usage
ax1.bar(tick_pos,
own_usage,
width=bar_width,
label="Eigen gebruik",
alpha=ahpla,
color=col_usage,
align="center",
)
if show_data == 1:
for i, v in enumerate(own_usage):
ax1.text(tick_pos[i],
10,
"{:7.3f}".format(v),
{"ha": "center", "va": "bottom"},
rotation=-90,
)
if show_data == 2:
for i, v in enumerate(usage):
ax1.text(tick_pos[i],
500,
"{:4.0f}".format(v),
{"ha": "center", "va": "bottom"},
fontsize=12,
)
# Exports hang below the y-axis
# Create a bar plot of export_lo
ax1.bar(tick_pos,
[-1 * i for i in export_lo],
width=bar_width,
label="Verkoop (dal)",
alpha=ahpla * 0.5,
color=col_export,
align="center",
)
# Create a bar plot of export_hi
ax1.bar(tick_pos,
[-1 * i for i in export_hi],
width=bar_width,
label="Verkoop (normaal)",
alpha=ahpla,
color=col_export,
align="center",
bottom=[-1 * i for i in export_lo],
)
if show_data == 1:
for i, v in enumerate(exprt):
ax1.text(tick_pos[i],
-10,
"{:7.3f}".format(v),
{"ha": "center", "va": "top"},
rotation=-90,
)
if show_data == 2:
for i, v in enumerate(exprt):
ax1.text(tick_pos[i],
-500,
"{:4.0f}".format(v),
{"ha": "center", "va": "top"},
fontsize=12,
)
# Set Axes stuff
ax1.set_ylabel("[kWh]")
if show_data == 0:
y_lo = -1 * (max(exprt) + 1)
y_hi = max(usage) + 1
if y_lo > -1.5:
y_lo = -1.5
if y_hi < 1.5:
y_hi = 1.5
ax1.set_ylim([y_lo, y_hi])
ax1.set_xlabel("Datetime")
ax1.grid(which="major",
axis="y",
color="k",
linestyle="--",
linewidth=0.5
)
ax1.axhline(y=0, color="k")
ax1.axvline(x=0, color="k")
# Set plot stuff
plt.xticks(tick_pos, data_lbls, rotation=-60)
plt.title(f"{plot_title}")
plt.legend(loc="upper left", ncol=5, framealpha=0.2)
# Fit every nicely
plt.xlim([min(tick_pos) - bar_width, max(tick_pos) + bar_width])
plt.tight_layout()
plt.savefig(fname=f"{output_file}", format="png")
def main():
"""
This is the main loop
"""
global OPTION
if OPTION.hours:
plot_graph(constants.TREND['day_graph'],
fetch_last_day(OPTION.hours),
f"Energietrend per uur afgelopen dagen ({dt.now().strftime('%d-%m-%Y %H:%M:%S')})",
)
if OPTION.days:
plot_graph(constants.TREND['month_graph'],
fetch_last_month(OPTION.days),
f"Energietrend per dag afgelopen maand ({dt.now().strftime('%d-%m-%Y %H:%M:%S')})",
)
if OPTION.months:
plot_graph(constants.TREND['year_graph'],
fetch_last_year(OPTION.months),
f"Energietrend per maand afgelopen jaren ({dt.now().strftime('%d-%m-%Y %H:%M:%S')})",
show_data=1,
)
if OPTION.years:
plot_graph(constants.TREND['vsyear_graph'],
fetch_last_years(OPTION.years),
f"Energietrend per jaar afgelopen jaren ({dt.now().strftime('%d-%m-%Y %H:%M:%S')})",
show_data=2,
)
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="Create a trendgraph")
parser.add_argument("-hr",
"--hours",
type=int,
help="create hour-trend for last <HOURS> hours",
)
parser.add_argument("-d",
"--days",
type=int,
help="create day-trend for last <DAYS> days"
)
parser.add_argument("-m",
"--months",
type=int,
help="number of months of data to use for the graph",
)
parser.add_argument("-y",
"--years",
type=int,
help="number of months of data to use for the graph",
)
OPTION = parser.parse_args()
if OPTION.hours == 0:
OPTION.hours = 50
if OPTION.days == 0:
OPTION.days = 50
if OPTION.months == 0:
OPTION.months = 38
if OPTION.years == 0:
OPTION.years = 6
main()
|
# -*- coding: utf-8 -*-
# Copyright 2018 IBM Corp. All Rights Reserved.
# Licensed under the Apache License, Version 2.0 (the “License”)
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an “AS IS” BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import os
from datetime import datetime, timedelta
from pathlib import Path
from dotenv import load_dotenv
from flask import Flask, Response
from flask import jsonify
from flask import request, redirect
from flask_socketio import SocketIO
from flask_cors import CORS
from ibm_watson import AssistantV2
from ibm_cloud_sdk_core.authenticators import IAMAuthenticator
from ibm_watson import SpeechToTextV1
from ibm_watson import TextToSpeechV1
# get from environment, default or exception
def checkenv(checkfor, default=None):
required = os.environ.get(checkfor) or None
if required != None:
return required
elif default != None:
return default
raise ValueError(f'{checkfor} not found in: environment, .env or .flaskenv - Correct config')
'''
WatsonConnector ia a collection of API objects and a chat recorder utility.
The API objects are shared by all users you may have. No session data
'''
class WatsonConnector:
def __init__(self):
pass #do it all after Flask loaded, .flaskenv pulled in
def before_first_request(self):
#check for mandatory configuration.
#Note: some of the API Details supplied by IBM append the version and key details.
# The IBM implemnation passes the key in as a parameter and builds up the
# complete URL. If you pass in what is supplied, version and key are duplicated and causes
# errors. To avoid duplication and an invlaid URL, truncate the version and everyting after it.
#Watson Assitant
self.wa_apikey = checkenv('ASSISTANT_APIKEY')
self.wa_url = checkenv('ASSISTANT_URL').split('/v')[0] #truncate version
self.assistant_id = checkenv('ASSISTANT_ID')
self.assistant_version = checkenv("ASSISTANT_VERSION")
authenticator = IAMAuthenticator(self.wa_apikey)
record = checkenv("ASSISTANT_RECORD", "NO").lower()
if record[0] in ['n', 'y', 'u']: # record n-none, y-yes to all, u- yes to unkown(Watson does not recognize)
self.record_questions = record[0]
self.chatlog = checkenv("ASSISTANT_RECORD_FILE", 'chatlog.csv')
Path(self.chatlog).touch() #if create/access problems File???Error gets raised now
else:
self.record_questions = 'n'
self.assistant_api = AssistantV2(authenticator=authenticator, version=self.assistant_version)
self.assistant_api.set_service_url(self.wa_url)
#Speech to Text
self.s2t_apikey =checkenv('SPEECH_TO_TEXT_APIKEY')
self.s2t_url = checkenv('SPEECH_TO_TEXT_URL').split('/v')[0] #truncate version
authenticator = IAMAuthenticator(self.s2t_apikey)
self.speech_to_text = SpeechToTextV1(authenticator)
self.speech_to_text.set_service_url(self.s2t_url)
# Text to Speech
self.t2s_apikey = checkenv('TEXT_TO_SPEECH_APIKEY')
self.t2s_url = checkenv('TEXT_TO_SPEECH_URL').split('/v')[0] #truncate version
authenticator = IAMAuthenticator(self.t2s_apikey)
self.text_to_speech = TextToSpeechV1(authenticator)
self.text_to_speech.set_service_url(self.t2s_url)
print('Config:')
print(f' Watson key: {self.wa_apikey} version: {self.assistant_version} ')
print(f' Watson url: {self.wa_url}')
print(f' speech_to_text key: {self.s2t_apikey}')
print(f' speech_to_text url: {self.s2t_url}')
print(f' text_to_speech key: {self.t2s_apikey}')
print(f' text_to_speech url: {self.t2s_url}')
def record_chat(self, conv_text, response_txt, entities):
if self.record_questions == 'n':
return
elif self.record_questions == 'u' and len(entities) <= 0:
return
with open(self.chatlog,'a') as fd:
# Watson identified, we want to know what and confidence
if len(entities) > 0:
ln = f'{conv_text},{response_txt}'
news = [f'{entity['entity']}:{entity['value']}:{entity['confidence']}' for entity in entities]
ln += ',' + ','.join(news)
print(ln)
# request was unidentified, don't need to know response text
else:
ln = conv_text
ln.replace('\n', ' ')
ln.replace(',', '|')
fd.write(f'{ln}\n')
'''
WatsonSession is a long running session between a specific user and the Watson Assistant.
The sessions have context and timpout logic. The Watson Assitant has sessions, the
speech APIs do not.
'''
class WatsonSession:
def __init__(self, watson_connector):
self.wc = watson_connector
self.session_id = None
pass #do rest after Flask loaded, .flaskenv pulled in
def before_first_request(self):
self.timeout = int(checkenv("ASSISTANT_TIMEOUT", 255))
self.last_access = datetime.now() - timedelta(seconds=self.timeout + 10)
self.voice = checkenv("TEXT_TO_SPEECH_VOICE", 'en-US_AllisonVoice')
self.model = checkenv("SPEECH_TO_TEXT_MODEL", 'en-US_BroadbandModel')
print(f' model: {self.model} voice: {self.voice} ')
# create a new session if not there, otherwise return active
def get_session(self):
now = datetime.now()
elapsed = now - self.last_access
if elapsed.total_seconds() > self.timeout:
self.session_id = None #no need to delete, its gone already
self.last_access = now
if self.session_id != None:
return self.session_id
response = wconn.assistant_api.create_session(assistant_id=wconn.assistant_id).get_result()
self.session_id = response['session_id']
print(f'Session created! {self.session_id}')
return self.session_id
def delete_session(self):
if self.session_id == None:
return
try:
wconn.assistant_api.delete_session(
assistant_id=wconn.assistant_id,
session_id=self.session_id).get_result()
print(f'Session {self.session_id}deleted. Bye...')
except:
pass
self.session_id = None
app = Flask(__name__)
socketio = SocketIO(app)
CORS(app)
wconn = WatsonConnector()
wsess = WatsonSession(wconn)
# Redirect http to https on CloudFoundry
@app.before_request
def before_request():
fwd = request.headers.get('x-forwarded-proto')
# Not on Cloud Foundry
if fwd is None:
return None
# On Cloud Foundry and is https
elif fwd == "https":
return None
# On Cloud Foundry and is http, then redirect
elif fwd == "http":
url = request.url.replace('http://', 'https://', 1)
code = 301
return redirect(url, code=code)
@app.route('/')
def Welcome():
return app.send_static_file('index.html')
@app.route('/api/conversation', methods=['POST', 'GET'])
def getConvResponse():
#global session_id, assistant_id, assistant_api
conv_text = request.form.get('convText') or 'hello'
# coverse with WA Bot
input = {
'text': conv_text,
'options': {'alternate_intents': True, 'return_context': True, 'debug': True }
}
try:
response = wconn.assistant_api.message(
assistant_id=wconn.assistant_id,
session_id=wsess.get_session(),
input=input).get_result()
except:
wsess.delete_session()
return jsonify(results={
'responseText': 'session failed, retry',
'context': ''
})
print(json.dumps(response, indent=2))
response_txt = []
for item in response["output"]["generic"]:
response_txt.append(item["text"])
if isinstance(response_txt, list):
response_txt = '... '.join(response_txt)
response_details = {
'responseText': response_txt,
'context': response["context"]
}
wconn.record_chat(conv_text, response_txt, response["output"]["entities"])
#delete session if explicit from user
if (conv_text == "bye"):
wsess.delete_session()
return jsonify(results=response_details)
@app.route('/api/text-to-speech', methods=['POST'])
def get_speech_from_text():
input_text = request.form.get('text')
my_voice = request.form.get('voice', wsess.voice)
print(f'get_speech_from_text - input: {input_text} len {len(input_text)} voice: {my_voice}')
def generate():
if input_text:
audio_out = wconn.text_to_speech.synthesize(
text=input_text,
accept='audio/wav',
voice=my_voice).get_result()
data = audio_out.content
else:
print("Empty response")
data = "I have no response to that."
yield data
return Response(response=generate(), mimetype="audio/x-wav")
@app.route('/api/speech-to-text', methods=['POST'])
def getTextFromSpeech():
audio = request.get_data(cache=False)
print(f'audio size is {len(audio)}')
response = wconn.speech_to_text.recognize(
audio=audio,
content_type='audio/wav',
timestamps=True,
word_confidence=True,
smart_formatting=True).get_result()
# Ask user to repeat if STT can't transcribe the speech
if len(response['results']) < 1:
return Response(mimetype='plain/text',
response="Sorry, didn't get that. please try again!")
text_output = response['results'][0]['alternatives'][0]['transcript']
text_output = text_output.strip()
return Response(response=text_output, mimetype='plain/text')
@app.before_first_request
def before_first_request():
#delayed so Flask env loaded
wconn.before_first_request()
wsess.before_first_request()
if __name__ == "__main__":
print('hello from __main__')
port = os.environ.get("PORT") or os.environ.get("VCAP_APP_PORT") or 5000
socketio.run(app, host='0.0.0.0', port=int(port))
app.run(debug=True)
| # -*- coding: utf-8 -*-
# Copyright 2018 IBM Corp. All Rights Reserved.
# Licensed under the Apache License, Version 2.0 (the “License”)
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an “AS IS” BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import os
from datetime import datetime, timedelta
from pathlib import Path
from dotenv import load_dotenv
from flask import Flask, Response
from flask import jsonify
from flask import request, redirect
from flask_socketio import SocketIO
from flask_cors import CORS
from ibm_watson import AssistantV2
from ibm_cloud_sdk_core.authenticators import IAMAuthenticator
from ibm_watson import SpeechToTextV1
from ibm_watson import TextToSpeechV1
# get from environment, default or exception
def checkenv(checkfor, default=None):
required = os.environ.get(checkfor) or None
if required != None:
return required
elif default != None:
return default
raise ValueError(f'{checkfor} not found in: environment, .env or .flaskenv - Correct config')
'''
WatsonConnector ia a collection of API objects and a chat recorder utility.
The API objects are shared by all users you may have. No session data
'''
class WatsonConnector:
def __init__(self):
pass #do it all after Flask loaded, .flaskenv pulled in
def before_first_request(self):
#check for mandatory configuration.
#Note: some of the API Details supplied by IBM append the version and key details.
# The IBM implemnation passes the key in as a parameter and builds up the
# complete URL. If you pass in what is supplied, version and key are duplicated and causes
# errors. To avoid duplication and an invlaid URL, truncate the version and everyting after it.
#Watson Assitant
self.wa_apikey = checkenv('ASSISTANT_APIKEY')
self.wa_url = checkenv('ASSISTANT_URL').split('/v')[0] #truncate version
self.assistant_id = checkenv('ASSISTANT_ID')
self.assistant_version = checkenv("ASSISTANT_VERSION")
authenticator = IAMAuthenticator(self.wa_apikey)
record = checkenv("ASSISTANT_RECORD", "NO").lower()
if record[0] in ['n', 'y', 'u']: # record n-none, y-yes to all, u- yes to unkown(Watson does not recognize)
self.record_questions = record[0]
self.chatlog = checkenv("ASSISTANT_RECORD_FILE", 'chatlog.csv')
Path(self.chatlog).touch() #if create/access problems File???Error gets raised now
else:
self.record_questions = 'n'
self.assistant_api = AssistantV2(authenticator=authenticator, version=self.assistant_version)
self.assistant_api.set_service_url(self.wa_url)
#Speech to Text
self.s2t_apikey =checkenv('SPEECH_TO_TEXT_APIKEY')
self.s2t_url = checkenv('SPEECH_TO_TEXT_URL').split('/v')[0] #truncate version
authenticator = IAMAuthenticator(self.s2t_apikey)
self.speech_to_text = SpeechToTextV1(authenticator)
self.speech_to_text.set_service_url(self.s2t_url)
# Text to Speech
self.t2s_apikey = checkenv('TEXT_TO_SPEECH_APIKEY')
self.t2s_url = checkenv('TEXT_TO_SPEECH_URL').split('/v')[0] #truncate version
authenticator = IAMAuthenticator(self.t2s_apikey)
self.text_to_speech = TextToSpeechV1(authenticator)
self.text_to_speech.set_service_url(self.t2s_url)
print('Config:')
print(f' Watson key: {self.wa_apikey} version: {self.assistant_version} ')
print(f' Watson url: {self.wa_url}')
print(f' speech_to_text key: {self.s2t_apikey}')
print(f' speech_to_text url: {self.s2t_url}')
print(f' text_to_speech key: {self.t2s_apikey}')
print(f' text_to_speech url: {self.t2s_url}')
def record_chat(self, conv_text, response_txt, entities):
if self.record_questions == 'n':
return
elif self.record_questions == 'u' and len(entities) <= 0:
return
with open(self.chatlog,'a') as fd:
# Watson identified, we want to know what and confidence
if len(entities) > 0:
ln = f'{conv_text},{response_txt}'
news = [f'{entity["entity"]}:{entity["value"]}:{entity["confidence"]}' for entity in entities]
ln += ',' + ','.join(news)
print(ln)
# request was unidentified, don't need to know response text
else:
ln = conv_text
ln.replace('\n', ' ')
ln.replace(',', '|')
fd.write(f'{ln}\n')
'''
WatsonSession is a long running session between a specific user and the Watson Assistant.
The sessions have context and timpout logic. The Watson Assitant has sessions, the
speech APIs do not.
'''
class WatsonSession:
def __init__(self, watson_connector):
self.wc = watson_connector
self.session_id = None
pass #do rest after Flask loaded, .flaskenv pulled in
def before_first_request(self):
self.timeout = int(checkenv("ASSISTANT_TIMEOUT", 255))
self.last_access = datetime.now() - timedelta(seconds=self.timeout + 10)
self.voice = checkenv("TEXT_TO_SPEECH_VOICE", 'en-US_AllisonVoice')
self.model = checkenv("SPEECH_TO_TEXT_MODEL", 'en-US_BroadbandModel')
print(f' model: {self.model} voice: {self.voice} ')
# create a new session if not there, otherwise return active
def get_session(self):
now = datetime.now()
elapsed = now - self.last_access
if elapsed.total_seconds() > self.timeout:
self.session_id = None #no need to delete, its gone already
self.last_access = now
if self.session_id != None:
return self.session_id
response = wconn.assistant_api.create_session(assistant_id=wconn.assistant_id).get_result()
self.session_id = response['session_id']
print(f'Session created! {self.session_id}')
return self.session_id
def delete_session(self):
if self.session_id == None:
return
try:
wconn.assistant_api.delete_session(
assistant_id=wconn.assistant_id,
session_id=self.session_id).get_result()
print(f'Session {self.session_id}deleted. Bye...')
except:
pass
self.session_id = None
app = Flask(__name__)
socketio = SocketIO(app)
CORS(app)
wconn = WatsonConnector()
wsess = WatsonSession(wconn)
# Redirect http to https on CloudFoundry
@app.before_request
def before_request():
fwd = request.headers.get('x-forwarded-proto')
# Not on Cloud Foundry
if fwd is None:
return None
# On Cloud Foundry and is https
elif fwd == "https":
return None
# On Cloud Foundry and is http, then redirect
elif fwd == "http":
url = request.url.replace('http://', 'https://', 1)
code = 301
return redirect(url, code=code)
@app.route('/')
def Welcome():
return app.send_static_file('index.html')
@app.route('/api/conversation', methods=['POST', 'GET'])
def getConvResponse():
#global session_id, assistant_id, assistant_api
conv_text = request.form.get('convText') or 'hello'
# coverse with WA Bot
input = {
'text': conv_text,
'options': {'alternate_intents': True, 'return_context': True, 'debug': True }
}
try:
response = wconn.assistant_api.message(
assistant_id=wconn.assistant_id,
session_id=wsess.get_session(),
input=input).get_result()
except:
wsess.delete_session()
return jsonify(results={
'responseText': 'session failed, retry',
'context': ''
})
print(json.dumps(response, indent=2))
response_txt = []
for item in response["output"]["generic"]:
response_txt.append(item["text"])
if isinstance(response_txt, list):
response_txt = '... '.join(response_txt)
response_details = {
'responseText': response_txt,
'context': response["context"]
}
wconn.record_chat(conv_text, response_txt, response["output"]["entities"])
#delete session if explicit from user
if (conv_text == "bye"):
wsess.delete_session()
return jsonify(results=response_details)
@app.route('/api/text-to-speech', methods=['POST'])
def get_speech_from_text():
input_text = request.form.get('text')
my_voice = request.form.get('voice', wsess.voice)
print(f'get_speech_from_text - input: {input_text} len {len(input_text)} voice: {my_voice}')
def generate():
if input_text:
audio_out = wconn.text_to_speech.synthesize(
text=input_text,
accept='audio/wav',
voice=my_voice).get_result()
data = audio_out.content
else:
print("Empty response")
data = "I have no response to that."
yield data
return Response(response=generate(), mimetype="audio/x-wav")
@app.route('/api/speech-to-text', methods=['POST'])
def getTextFromSpeech():
audio = request.get_data(cache=False)
print(f'audio size is {len(audio)}')
response = wconn.speech_to_text.recognize(
audio=audio,
content_type='audio/wav',
timestamps=True,
word_confidence=True,
smart_formatting=True).get_result()
# Ask user to repeat if STT can't transcribe the speech
if len(response['results']) < 1:
return Response(mimetype='plain/text',
response="Sorry, didn't get that. please try again!")
text_output = response['results'][0]['alternatives'][0]['transcript']
text_output = text_output.strip()
return Response(response=text_output, mimetype='plain/text')
@app.before_first_request
def before_first_request():
#delayed so Flask env loaded
wconn.before_first_request()
wsess.before_first_request()
if __name__ == "__main__":
print('hello from __main__')
port = os.environ.get("PORT") or os.environ.get("VCAP_APP_PORT") or 5000
socketio.run(app, host='0.0.0.0', port=int(port))
app.run(debug=True)
|
import numpy as np
import os
import torch
import time
import matplotlib.pyplot as plt
from isaacgym import gymutil, gymtorch, gymapi
from isaacgym.torch_utils import *
from isaacgym.gymtorch import *
from isaacgymenvs.utils.torch_jit_utils import *
from tasks.base.vec_task import VecTask
class TenseBot(VecTask):
def __init__(self, cfg, sim_device, graphics_device_id, headless):
self.cfg = cfg
self.dt = self.cfg["sim"]["dt"]
self.max_episode_length = self.cfg["env"]["maxEpisodeLength"]
# self.randomization_params = self.cfg["task"]["randomization_params"]
# self.randomize = self.cfg["task"]["randomize"]
self.dof_vel_scale = self.cfg["env"]["dofVelocityScale"]
# self.contact_force_scale = self.cfg["env"]["contactForceScale"]
# self.power_scale = self.cfg["env"]["powerScale"]
self.heading_weight = self.cfg["env"]["headingWeight"]
self.up_weight = self.cfg["env"]["upWeight"]
# self.actions_cost_scale = self.cfg["env"]["actionsCost"]
# self.energy_cost_scale = self.cfg["env"]["energyCost"]
# self.joints_at_limit_cost_scale = self.cfg["env"]["jointsAtLimitCost"]
self.death_cost = self.cfg["env"]["deathCost"]
self.termination_height = self.cfg["env"]["terminationHeight"]
# self.debug_viz = self.cfg["env"]["enableDebugVis"]
self.plane_static_friction = self.cfg["env"]["plane"]["staticFriction"]
self.plane_dynamic_friction = self.cfg["env"]["plane"]["dynamicFriction"]
self.plane_restitution = self.cfg["env"]["plane"]["restitution"]
self.drive_mode = self.cfg["env"]["actuatorParams"]["driveMode"]
self.stiffness = self.cfg["env"]["actuatorParams"]["stiffness"] * self.drive_mode
self.damping = self.cfg["env"]["actuatorParams"]["damping"] * self.drive_mode
self.maxPosition = self.cfg["env"]["actuatorParams"]["maxPosition"]
self.maxSpeed = self.cfg["env"]["actuatorParams"]["maxSpeed"]
self.maxTorque = self.cfg["env"]["actuatorParams"]["maxTorque"]
self.friction = self.cfg["env"]["actuatorParams"]["friction"]
self.torqueDecay = self.cfg["env"]["actuatorParams"]["torqueDecay"]
self.angularDamping = self.cfg["env"]["assetParams"]["angularDamping"]
self.angularVelocity = self.cfg["env"]["assetParams"]["angularVelocity"]
self.goal_dist = self.cfg["env"]["goalDist"]
self.goal_threshold = self.cfg["env"]["goalThreshold"]
# obs_buf shapes: (53)
# obs_buf[0:39] = Rod State x 3 : Pos(3), Ori(4), LinVel(3), AngVel(3)
# obs_buf[39:42] = Goal Pos : Pos(3)
# obs_buf[42:45] = vector to goal (3)
# obs_buf[45:53] = actions : Spring Length Multipliers (9)
self.cfg["env"]["numObservations"] = 54
# Spring Length Mulitpliers (9)
self.cfg["env"]["numActions"] = 9
super().__init__(config=self.cfg, sim_device=sim_device, graphics_device_id=graphics_device_id, headless=headless)
# set init state
pos = self.cfg["env"]["baseInitState"]["pos"]
rot = self.cfg["env"]["baseInitState"]["rot"]
v_lin = self.cfg["env"]["baseInitState"]["vLinear"]
v_ang = self.cfg["env"]["baseInitState"]["vAngular"]
state = pos + rot + v_lin + v_ang
self.base_init_state = torch.tensor(state, device=self.device)
self.start_rotation = torch.tensor(rot, device=self.device)
# get gym GPU root state tensor
actor_root_state = self.gym.acquire_actor_root_state_tensor(self.sim)
self.root_states = gymtorch.wrap_tensor(actor_root_state)
print('root_state')
print(self.root_states.cpu().detach().numpy())
print(self.root_states.shape)
print('num_envs {}, num_actors {}'.format(self.num_envs, self.num_actors))
self.tensebot_pos = self.root_states.view(self.num_envs, self.num_actors, 13)[..., 0:3, 0:3] #num_envs, num_actors, 13 (pos,ori,Lvel,Avel)
self.tensebot_ori = self.root_states.view(self.num_envs, self.num_actors, 13)[..., 0:3, 3:7] #num_envs, num_actors, 13 (pos,ori,Lvel,Avel)
self.tensebot_linvel = self.root_states.view(self.num_envs, self.num_actors, 13)[..., 0:3, 7:10] #num_envs, num_actors, 13 (pos,ori,Lvel,Avel)
self.tensebot_angvel = self.root_states.view(self.num_envs, self.num_actors, 13)[..., 0:3, 10:13] #num_envs, num_actors, 13 (pos,ori,Lvel,Avel)
self.goal_pos = self.root_states.view(self.num_envs, self.num_actors, 13)[..., 3, 0:3] #num_envs, num_actors, 13 (pos,ori,Lvel,Avel)
self.gym.refresh_actor_root_state_tensor(self.sim)
self.tensebot_root_state = self.root_states.view(self.num_envs, self.num_actors, 13)[..., 0:3, :]
self.tensebot_initial_root_states = self.tensebot_root_state.clone()
# self.tensebot_initial_root_states[:] = to_torch(self.base_init_state, device=self.device, requires_grad=False)
rb_state_tensor = self.gym.acquire_rigid_body_state_tensor(self.sim)
self.rb_state = gymtorch.wrap_tensor(rb_state_tensor)
print('rigid_body_state')
print(self.rb_state.cpu().detach().numpy())
print(self.rb_state.shape)
print('num_envs {}, num_bodies {}'.format(self.num_envs, self.num_bodies))
self.rb_pos = self.rb_state.view(self.num_envs, self.num_bodies, 13)[:, :, 0:3] #num_envs, num_rigid_bodies, 13 (pos,ori,Lvel,Avel)
self.rb_ori = self.rb_state.view(self.num_envs, self.num_bodies, 13)[:, :, 3:7] #num_envs, num_rigid_bodies, 13 (pos,ori,Lvel,Avel)
self.rb_linvel = self.rb_state.view(self.num_envs, self.num_bodies, 13)[:, :, 7:10] #num_envs, num_rigid_bodies, 13 (pos,ori,Lvel,Avel)
self.rb_angvel = self.rb_state.view(self.num_envs, self.num_bodies, 13)[:, :, 10:13] #num_envs, num_rigid_bodies, 13 (pos,ori,Lvel,Avel)
# Used for rewarding moving towards a target
# tensebot_avg_pos = torch.mean(self.tensebot_pos, dim=1)
tensebot_avg_pos = self.tensebot_pos[:,0,:]
to_target = self.goal_pos - tensebot_avg_pos
to_target[:, 2] = 0.0
self.potentials = -torch.norm(to_target, p=2, dim=-1) / self.dt
self.prev_potentials = self.potentials.clone()
self.goal_reset = torch.ones(self.num_envs, device=self.device, dtype=torch.long)
goal_ids = self.goal_reset.nonzero(as_tuple=False).squeeze(-1)
if len(goal_ids) > 0:
self.reset_goal(goal_ids)
# Measurements for rewards
self.up_vec = to_torch(get_axis_params(1., self.up_axis_idx), device=self.device).repeat((self.num_envs, 1))
self.heading_vec = to_torch([1, 0, 0], device=self.device).repeat((self.num_envs, 1))
self.inv_start_rot = quat_conjugate(self.start_rotation).repeat((self.num_envs, 1))
self.basis_vec0 = self.heading_vec.clone()
self.basis_vec1 = self.up_vec.clone()
self.frame_count = 0
self.plot_buffer = []
self.accumulated_reward = torch.zeros_like(self.rew_buf)
camOffset = gymapi.Vec3(0, -1.5, 0.25)
camTarget = gymapi.Vec3(self.tensebot_pos[0, 0, 0],self.tensebot_pos[0, 0, 1],self.tensebot_pos[0, 0, 2])
self.gym.viewer_camera_look_at(self.viewer, None, camOffset+camTarget, camTarget)
def create_sim(self):
# set the up axis to be z-up given that assets are y-up by default
self.up_axis_idx = self.set_sim_params_up_axis(self.sim_params, 'z')
# self.sim_params.gravity = gymapi.Vec3(0.0, 0.0, 0.0)
self.sim = super().create_sim(self.device_id, self.graphics_device_id, self.physics_engine, self.sim_params)
self._create_ground_plane()
print(f'num envs {self.num_envs} env spacing {self.cfg['env']['envSpacing']}')
self._create_envs(self.num_envs, self.cfg["env"]['envSpacing'], int(np.sqrt(self.num_envs)))
def _create_ground_plane(self):
plane_params = gymapi.PlaneParams()
# set the normal force to be z dimension
plane_params.normal = gymapi.Vec3(0.0, 0.0, 1.0)
plane_params.static_friction = self.plane_static_friction
plane_params.dynamic_friction = self.plane_dynamic_friction
self.gym.add_ground(self.sim, plane_params)
def _create_envs(self, num_envs, spacing, num_per_row):
# define plane on which environments are initialized
lower = gymapi.Vec3(0.5 * -spacing, -spacing, 0.0)
upper = gymapi.Vec3(0.5 * spacing, spacing, spacing)
asset_root = os.path.join(os.path.dirname(os.path.abspath(__file__)), "../../assets")
asset_file = "urdf/RodAssembly/urdf/RodAssembly.urdf"
if "asset" in self.cfg["env"]:
asset_root = os.path.join(os.path.dirname(os.path.abspath(__file__)), self.cfg["env"]["asset"].get("assetRoot", asset_root))
asset_file = self.cfg["env"]["asset"].get("assetFileName", asset_file)
asset_path = os.path.join(asset_root, asset_file)
asset_root = os.path.dirname(asset_path)
asset_file = os.path.basename(asset_path)
asset_options = gymapi.AssetOptions()
asset_options.fix_base_link = False
asset_options.angular_damping = self.angularDamping
asset_options.max_angular_velocity = self.angularVelocity
rod_assembly_asset = self.gym.load_asset(self.sim, asset_root, asset_file, asset_options)
self.num_dof = self.gym.get_asset_dof_count(rod_assembly_asset)
goal_asset = self.gym.create_sphere(self.sim, 0.025)
self.num_bodies = self.gym.get_asset_rigid_body_count(rod_assembly_asset)*3 + self.gym.get_asset_rigid_body_count(goal_asset) #3 rod assemblies per tensebot
# self.num_actor = get_sim_actor_count
pose = gymapi.Transform()
self.rod_handles = []
self.tensebot_handles = []
self.goal_handles = []
self.envs = []
self.dof_limits_lower = []
self.dof_limits_upper = []
for i in range(self.num_envs):
# create env instance
tensebot_handle = []
env_ptr = self.gym.create_env(
self.sim, lower, upper, num_per_row
)
radius = 0.05
thetas = [0, 3.1415*2/3, 3.1415*4/3] #0 deg, 120 deg, 240 deg
for t, j in zip(thetas, range(len(thetas))):
pose.p = gymapi.Vec3(radius*torch.cos(torch.tensor(t)), radius*torch.sin(torch.tensor(t)), 0.1)
pose.r = gymapi.Quat.from_euler_zyx(-3.1415/4, 0, t)
rod_handle = self.gym.create_actor(env_ptr, rod_assembly_asset, pose, "rodassembly{}".format(j), i, 0, 0)
rand_color = torch.rand((3), device=self.device)
for j in range(self.num_bodies):
# self.gym.set_rigid_body_color(
# env_ptr, tensebot_handle, j, gymapi.MESH_VISUAL, gymapi.Vec3(0.27, 0.1, 0.66))
self.gym.set_rigid_body_color(
env_ptr, rod_handle, j, gymapi.MESH_VISUAL, gymapi.Vec3(rand_color[0],rand_color[1],rand_color[2]))
self.rod_handles.append(rod_handle)
tensebot_handle.append(rod_handle)
self.tensebot_handles.append(tensebot_handle)
self.envs.append(env_ptr)
# Set Up the Goal Actor
goal_pose = gymapi.Transform()
goal_pose.p.y = self.goal_dist
goal_handle = self.gym.create_actor(env_ptr, goal_asset, goal_pose, "goal", i, 1, 1)
self.gym.set_rigid_body_color(env_ptr, goal_handle, 0, gymapi.MESH_VISUAL, gymapi.Vec3(0.2, 0.8, 0.2))
self.goal_handles.append(goal_handle)
self.num_actors = self.gym.get_actor_count(self.envs[0])
self.body_dict = self.gym.get_actor_rigid_body_dict(env_ptr, tensebot_handle[0])
self.joint_dict = self.gym.get_actor_joint_dict(env_ptr, tensebot_handle[0])
print('body_dict:')
print(self.body_dict)
for b in self.body_dict:
print(b)
print('joint_dict:')
for j in self.joint_dict:
print(j)
def compute_reward(self):
self.rew_buf[:], self.reset_buf[:], self.goal_reset = compute_tensebot_reward(
self.tensebot_pos,
self.goal_pos,
self.reset_buf,
self.progress_buf,
self.potentials,
self.prev_potentials,
self.max_episode_length,
self.goal_threshold)
def compute_observations(self, env_ids=None):
if env_ids is None:
env_ids = np.arange(self.num_envs)
self.gym.refresh_dof_state_tensor(self.sim)
self.gym.refresh_actor_root_state_tensor(self.sim)
self.gym.refresh_rigid_body_state_tensor(self.sim)
# print('self.root_state')
# print(self.root_states[0,:])
# print(self.root_states.shape)
# time.sleep(1)
self.obs_buf[:], self.potentials[:], self.prev_potentials[:] = compute_tensebot_observations(
self.tensebot_pos,
self.tensebot_ori,
self.tensebot_linvel,
self.tensebot_angvel,
self.goal_pos,
self.potentials,
self.actions,
self.dt)
return self.obs_buf
def reset_idx(self, env_ids):
print('Resetting IDX! Env_IDs = {}'.format(env_ids))
env_ids_int32 = env_ids.to(dtype=torch.int32)*self.num_actors
env_ids_int32 = torch.cat((env_ids_int32, env_ids_int32+1, env_ids_int32+2))
self.tensebot_root_state[env_ids, :, :] = self.tensebot_initial_root_states[env_ids, :, :]
self.gym.set_actor_root_state_tensor_indexed(self.sim,
gymtorch.unwrap_tensor(self.root_states),
gymtorch.unwrap_tensor(env_ids_int32), len(env_ids_int32))
self.reset_buf[env_ids] = 0
self.progress_buf[env_ids] = 0
self.goal_reset[env_ids] = 1
# actor_root_state = self.gym.acquire_actor_root_state_tensor(self.sim)
# self.root_states = gymtorch.wrap_tensor(actor_root_state)
# self.initial_root_states = self.root_states.clone()
# self.initial_root_states[:] = to_torch(self.base_init_state, device=self.device, requires_grad=False)
# plt.plot([0,0,0])
# plt.show()
# if(self.plot_buffer):
# plot_data = np.array(self.plot_buffer)
# print(plot_data.shape)
# plt.plot(plot_data[:,0,0] + plot_data[:,1,0] + plot_data[:,2,0], label="Total Reward")
# plt.plot(plot_data[:,0,0], label="Progress Reward")
# plt.plot(plot_data[:,1,0], label="Height Reward")
# plt.plot(plot_data[:,2,0], label="Heading Reward")
# plt.ylabel('Reward')
# plt.xlabel('Steps')
# plt.grid()
# plt.legend(loc="lower right")
# plt.xlim([0, 500])
# plt.ylim([-0.1, 2.1])
# plt.show()
# self.plot_buffer = []
def reset_goal(self, env_ids):
print('reset_goal')
self.gym.refresh_actor_root_state_tensor(self.sim)
# print('Resetting Goals! Env_IDs = {}'.format(env_ids))
# print('Old Goal Position = {}'.format(self.goal_pos))
env_ids_int32 = env_ids.to(dtype=torch.int32)*self.num_actors
goal_pos_update = torch_rand_float(-self.goal_dist, self.goal_dist, (len(env_ids), 3), device=self.device)
# goal_pos_update[:,0] = 1000.0
# goal_pos_update[:,1] = 0.0
goal_pos_update[:,2] = 0.1
self.goal_pos[env_ids, :] = goal_pos_update
self.gym.set_actor_root_state_tensor_indexed(self.sim,
gymtorch.unwrap_tensor(self.root_states),
gymtorch.unwrap_tensor(env_ids_int32+3), len(env_ids_int32))
# self.gym.refresh_actor_root_state_tensor(self.sim)
# tensebot_avg_pos = torch.mean(self.tensebot_pos, dim=1)
tensebot_avg_pos = self.tensebot_pos[:,0,:]
to_target = self.goal_pos[env_ids, :] - tensebot_avg_pos[env_ids, :]
to_target[:, 2] = 0.0
self.prev_potentials[env_ids] = -torch.norm(to_target, p=2, dim=-1) / self.dt
self.potentials[env_ids] = self.prev_potentials[env_ids].clone()
self.goal_reset[env_ids] = 0
# print('New Goal Position = {}'.format(self.goal_pos))
def pre_physics_step(self, actions):
# print('actions')
# print(actions)
# print(actions.shape)
# print(actions.to(self.device).squeeze().shape())
self.actions = actions.clone().detach().to(self.device)
self.calculate_tensegrity_forces(self.actions)
def calculate_tensegrity_forces(self, actions):
# # print('actions : {}'.format(actions))
connection_list = []
# (1,2),(4,5),(7,8) end point indicies (bottom, top)
# 0, 3, 6 are the body indicies
# 9 is the goal index
# This might need a low pass filter
spring_length_multiplier = actions/4 + 1 # Multiplier range from 0.5 to 1.5
# spring_length_multiplier = torch.rand((self.num_envs, 9), device=self.device)/4 + 1
# spring_length_multiplier = torch.ones((self.num_envs, 9), device=self.device)*0.1
# Connect All Bottoms
connection_list.append((1, 4, 0.1))
connection_list.append((1, 7, 0.1))
connection_list.append((4, 7, 0.1))
#Connect All Tops
connection_list.append((2, 5, 0.1))
connection_list.append((2, 8, 0.1))
connection_list.append((5, 8, 0.1))
#Top1 to Bottom2
connection_list.append((2, 4, 0.1)) #Body0 top is connected to Body1 bottom
#Top2 to Bottom3
connection_list.append((5, 7, 0.1)) #Body0 top is connected to Body1 bottom
#Top3 to Bottom1
connection_list.append((8, 1, 0.1)) #Body0 top is connected to Body1 bottom
#Connect All The Things...
forces = torch.zeros_like(self.rb_pos, device=self.device, dtype=torch.float)
force_positions = self.rb_pos.clone()
lin_vel_mat = torch.zeros((self.num_envs, 2, 3), device=self.device, dtype=torch.float) # Used in calculating damping force
diff_matrix= torch.tensor([[-1, 1]], device=self.device, dtype=torch.float)
num_lines = len(connection_list)
line_vertices = torch.zeros((num_lines*2,3), device=self.device, dtype=torch.float)
line_colors = torch.zeros((num_lines,3), device=self.device, dtype=torch.float)
for connection, i in zip(connection_list, range(len(connection_list))):
# print(connection)
# Spring Force
P1 = self.rb_pos[:, connection[0], :]
P2 = self.rb_pos[:, connection[1], :]
endpoint_vector = P1-P2
# print('endpoint_vector.shape')
# print(endpoint_vector.shape)
spring_constant = 25
damping_coff = 0.99
spring_length = connection[2] * spring_length_multiplier[:, i]
# print('spring_length.shape')
# print(spring_length.shape)
# print('P1.shape')
# print(P1.shape)
# print('P2.shape')
# print(P2.shape)
endpoint_distance = torch.norm(endpoint_vector, dim=1)
# print('endpoint_distance.shape')
# print(endpoint_distance.shape)
endpoint_vector_normalized = torch.div(endpoint_vector, torch.unsqueeze(endpoint_distance,1).repeat(1,3))
# print('endpoint_vector_normalized.shape')
# print(endpoint_vector_normalized.shape)
spring_force = spring_constant*(endpoint_distance-spring_length)
# print('spring_force.shape')
# print(spring_force.shape)
# Set springs to only work for tension and not compression
spring_force = torch.max(torch.tensor(spring_force), torch.zeros_like(spring_force))
applied_force = torch.mul(endpoint_vector_normalized, torch.unsqueeze(spring_force,1).repeat(1,3))
applied_force = torch.nan_to_num(applied_force, nan=0.0)
# print('applied force')
# print(appled_force.shape)
# print('Spring {} Tension = {}'.format(i, spring_force))
# print('forces.shape')
# print(forces.shape)
# print(connection[0])
# print(connection[1])
forces[:, connection[0], :] -= applied_force
forces[:, connection[1], :] += applied_force
# print('forces[0,:,:]')
# print(forces[0,:,:])
# print('applied_force[0,:]')
# print(applied_force[0,:])
# print('endpoint_vector_normalized')
# print(endpoint_vector_normalized)
# print(endpoint_distance)
# Damping
lin_vel_mat[:, 0, :] = self.rb_linvel[:, connection[0], :]
lin_vel_mat[:, 1, :] = self.rb_linvel[:, connection[1], :]
EVN_mat = torch.unsqueeze(endpoint_vector_normalized, 2)
# print(lin_vel_mat.shape)
# print(EVN_mat.shape)
damping_force = torch.matmul(diff_matrix, torch.matmul(lin_vel_mat, EVN_mat))*damping_coff
# print('damping_force.shape')
# print(torch.squeeze(damping_force, dim=2).shape)
# print('endpoint_vector_normalized.shape')
# print(endpoint_vector_normalized.shape)
damping_force_vector = endpoint_vector_normalized *torch.squeeze(damping_force, dim=2)
# print('damping_force_vector.shape')
# print(damping_force_vector.shape)
damping_force_vector = torch.nan_to_num(damping_force_vector, nan=0.0)
forces[:, connection[0], :] += damping_force_vector
forces[:, connection[1], :] -= damping_force_vector
# Draw Spring Connections?
line_vertices[i*2,:] = self.rb_pos[0, connection[0], :]
line_vertices[i*2+1,:] = self.rb_pos[0, connection[1], :]
line_colors[i,:] = torch.tensor([1.0, 1.0, 1.0])
self.gym.apply_rigid_body_force_at_pos_tensors(self.sim, gymtorch.unwrap_tensor(forces), gymtorch.unwrap_tensor(force_positions), gymapi.ENV_SPACE)
self.gym.clear_lines(self.viewer)
self.gym.add_lines(self.viewer, self.envs[0], num_lines, line_vertices.cpu().detach(), line_colors.cpu().detach())
def post_physics_step(self):
self.progress_buf += 1
env_ids = self.reset_buf.nonzero(as_tuple=False).squeeze(-1)
if len(env_ids) > 0:
self.reset_idx(env_ids)
goal_ids = self.goal_reset.nonzero(as_tuple=False).squeeze(-1)
if len(goal_ids) > 0:
self.reset_goal(goal_ids)
self.compute_observations()
self.compute_reward()
# Look at the first actor
env_idx = 0
camOffset = gymapi.Vec3(0, -1.5, 0.25)
camTarget = gymapi.Vec3(self.tensebot_pos[env_idx, 0, 0],self.tensebot_pos[env_idx, 0, 1],self.tensebot_pos[env_idx, 0, 2])
camEnvOffset = gymapi.Vec3(0, 0, 0)
# print(camOffset)
# print(camTarget)
# self.gym.viewer_camera_look_at(self.viewer, None, camOffset+camTarget+camEnvOffset, camTarget+camEnvOffset)
# time.sleep(0.1)
# self.debug_printout(env_ids)
def debug_printout(self, env_ids):
self.accumulated_reward += self.rew_buf
# print('potentials and previous potentials')
# print(self.potentials)
# print(self.prev_potentials)
print('reward buf')
print(self.rew_buf)
if len(env_ids) > 0:
self.accumulated_reward[env_ids] = 0
print('self.accumulated_reward')
print(self.accumulated_reward)
# # print('DEBUG PRINTOUTS')
# # body_height = self.obs_buf[:,2]
# # up_projection = self.obs_buf[:,29]
# # heading_projection = self.obs_buf[:, 30]
# # heading_reward = self.heading_weight * heading_projection
# # # aligning up axis and environment
# # up_reward = torch.zeros_like(heading_reward)
# # up_reward = torch.where(up_projection > 0.93, up_reward + self.up_weight, up_reward)
# # # reward for duration of staying alive
# # progress_reward = self.potentials - self.prev_potentials
# # total_reward = progress_reward + up_reward + heading_reward]
# xtream_rewards = torch.abs(self.rew_buf) > 5
# # print('ProgressReward[3] : {} = {} - {}'.format(progress_reward[3], self.potentials[3], self.prev_potentials[3]))
# # print('EnvReset[3], GoalReset[3] : {}, {}'.format(self.reset_buf[3], self.goal_reset[3]))
# # print('Bot Pos, Goal Pos = {}, {}'.format(self.tensebot_pos[3,:], self.goal_pos[3,:]))
# if(torch.any(xtream_rewards)):
# print('XTREAM REWARD DETECTED')
# xtream_idx = xtream_rewards.nonzero().cpu().detach().numpy()
# print("xtream index = {}".format(xtream_idx))
# print(self.rew_buf[xtream_idx])
# print('Progress Reward : {} = {} - {}'.format(progress_reward[xtream_idx], self.potentials[xtream_idx], self.prev_potentials[xtream_idx]))
# print('EnvReset, GoalReset : {},{}'.format(self.reset_buf[xtream_idx], self.goal_reset[xtream_idx]))
# time.sleep(10)
# print()
# # print('{:.2f} = {:.2f} + {:.2f} + {:.2f}'.format(total_reward[0], heading_reward[0], up_reward[0], progress_reward[0]))
# # print(' self.reset_buf')
# # print( self.reset_buf)
# # tmp_progress_reward = self.potentials - self.prev_potentials
# # if( np.abs(tmp_progress_reward[0].cpu().detach().numpy()) > 1):
# # print('{} : {} : {}'.format(tmp_progress_reward[0], self.potentials[0], self.prev_potentials[0]))
# # time.sleep(1)
# # tmp_height_reward = self.obs_buf[:,0]
# # tmp_heading_reward = self.rew_buf - tmp_progress_reward
# # self.plot_buffer.append((tmp_progress_reward.cpu().detach().numpy(),
# # tmp_height_reward.cpu().detach().numpy(),
# # tmp_heading_reward.cpu().detach().numpy()))
#####################################################################
###=========================jit functions=========================###
#####################################################################
@torch.jit.script
def compute_tensebot_reward(
tensebot_pos,
goal_pos,
reset_buf,
progress_buf,
potentials,
prev_potentials,
max_episode_length,
goal_threshold):
# type: (Tensor, Tensor, Tensor, Tensor, Tensor, Tensor, float, float) -> Tuple[Tensor, Tensor, Tensor]
# reward for duration of staying alive
progress_reward = potentials - prev_potentials
total_reward = progress_reward
# reset agents
reset = torch.where(progress_buf >= max_episode_length - 1, torch.ones_like(reset_buf), reset_buf)
# tensebot_avg_pos = torch.mean(tensebot_pos, dim=1)
tensebot_avg_pos = tensebot_pos[:,0,:]
distance_to_goal = torch.norm(tensebot_avg_pos - goal_pos, dim=-1)
goal_reached = torch.where(distance_to_goal < goal_threshold, 1, 0)
goal_reset = torch.where(goal_reached==1, 1, 0)
return total_reward, reset, goal_reset
@torch.jit.script
def compute_tensebot_observations(tensebot_pos, #Tensor
tensebot_ori, #Tensor
tensebot_linvel, #Tensor
tensebot_angvel, #Tensor
goal_pos, #Tensor
potentials, #Tensor
actions, #Tensor
dt #float
):
# type: (Tensor, Tensor, Tensor, Tensor, Tensor, Tensor, Tensor, float) -> Tuple[Tensor, Tensor, Tensor]
# tensebot_avg_pos = torch.mean(tensebot_pos, dim=1)
tensebot_avg_pos = tensebot_pos[:,0,:]
to_target = goal_pos - tensebot_avg_pos
to_target[:, 2] = 0.0
to_target_norm = torch.div(to_target, torch.unsqueeze(torch.norm(to_target, p=2, dim=-1),1).repeat(1,3))
prev_potentials_new = potentials.clone()
potentials = -torch.norm(to_target, p=2, dim=-1) / dt
# torso_quat, up_proj, heading_proj, up_vec, heading_vec = compute_heading_and_up(
# tensebot_ori, inv_start_rot, to_target, basis_vec0, basis_vec1, 2)
# vel_loc, angvel_loc, roll, pitch, yaw, angle_to_target = compute_rot(
# torso_quat, tensebot_linvel, tensebot_angvel, goal_pos, tensebot_pos)
# obs_buf shapes: (53)
# obs_buf[0:39] = Rod State x 3 : Pos(3), Ori(4), LinVel(3), AngVel(3)
# obs_buf[39:42] = Goal Pos : Pos(3)
# obs_buf[42:45] = vector to goal (3)
# obs_buf[45:53] = actions : Spring Length Multipliers (9)
obs = torch.cat((tensebot_pos[:,0,:], tensebot_ori[:,0,:], tensebot_linvel[:,0,:], tensebot_angvel[:,0,:],
tensebot_pos[:,1,:], tensebot_ori[:,1,:], tensebot_linvel[:,1,:], tensebot_angvel[:,1,:],
tensebot_pos[:,2,:], tensebot_ori[:,2,:], tensebot_linvel[:,2,:], tensebot_angvel[:,2,:],
goal_pos, to_target_norm, actions), dim=-1)
return obs, potentials, prev_potentials_new | import numpy as np
import os
import torch
import time
import matplotlib.pyplot as plt
from isaacgym import gymutil, gymtorch, gymapi
from isaacgym.torch_utils import *
from isaacgym.gymtorch import *
from isaacgymenvs.utils.torch_jit_utils import *
from tasks.base.vec_task import VecTask
class TenseBot(VecTask):
def __init__(self, cfg, sim_device, graphics_device_id, headless):
self.cfg = cfg
self.dt = self.cfg["sim"]["dt"]
self.max_episode_length = self.cfg["env"]["maxEpisodeLength"]
# self.randomization_params = self.cfg["task"]["randomization_params"]
# self.randomize = self.cfg["task"]["randomize"]
self.dof_vel_scale = self.cfg["env"]["dofVelocityScale"]
# self.contact_force_scale = self.cfg["env"]["contactForceScale"]
# self.power_scale = self.cfg["env"]["powerScale"]
self.heading_weight = self.cfg["env"]["headingWeight"]
self.up_weight = self.cfg["env"]["upWeight"]
# self.actions_cost_scale = self.cfg["env"]["actionsCost"]
# self.energy_cost_scale = self.cfg["env"]["energyCost"]
# self.joints_at_limit_cost_scale = self.cfg["env"]["jointsAtLimitCost"]
self.death_cost = self.cfg["env"]["deathCost"]
self.termination_height = self.cfg["env"]["terminationHeight"]
# self.debug_viz = self.cfg["env"]["enableDebugVis"]
self.plane_static_friction = self.cfg["env"]["plane"]["staticFriction"]
self.plane_dynamic_friction = self.cfg["env"]["plane"]["dynamicFriction"]
self.plane_restitution = self.cfg["env"]["plane"]["restitution"]
self.drive_mode = self.cfg["env"]["actuatorParams"]["driveMode"]
self.stiffness = self.cfg["env"]["actuatorParams"]["stiffness"] * self.drive_mode
self.damping = self.cfg["env"]["actuatorParams"]["damping"] * self.drive_mode
self.maxPosition = self.cfg["env"]["actuatorParams"]["maxPosition"]
self.maxSpeed = self.cfg["env"]["actuatorParams"]["maxSpeed"]
self.maxTorque = self.cfg["env"]["actuatorParams"]["maxTorque"]
self.friction = self.cfg["env"]["actuatorParams"]["friction"]
self.torqueDecay = self.cfg["env"]["actuatorParams"]["torqueDecay"]
self.angularDamping = self.cfg["env"]["assetParams"]["angularDamping"]
self.angularVelocity = self.cfg["env"]["assetParams"]["angularVelocity"]
self.goal_dist = self.cfg["env"]["goalDist"]
self.goal_threshold = self.cfg["env"]["goalThreshold"]
# obs_buf shapes: (53)
# obs_buf[0:39] = Rod State x 3 : Pos(3), Ori(4), LinVel(3), AngVel(3)
# obs_buf[39:42] = Goal Pos : Pos(3)
# obs_buf[42:45] = vector to goal (3)
# obs_buf[45:53] = actions : Spring Length Multipliers (9)
self.cfg["env"]["numObservations"] = 54
# Spring Length Mulitpliers (9)
self.cfg["env"]["numActions"] = 9
super().__init__(config=self.cfg, sim_device=sim_device, graphics_device_id=graphics_device_id, headless=headless)
# set init state
pos = self.cfg["env"]["baseInitState"]["pos"]
rot = self.cfg["env"]["baseInitState"]["rot"]
v_lin = self.cfg["env"]["baseInitState"]["vLinear"]
v_ang = self.cfg["env"]["baseInitState"]["vAngular"]
state = pos + rot + v_lin + v_ang
self.base_init_state = torch.tensor(state, device=self.device)
self.start_rotation = torch.tensor(rot, device=self.device)
# get gym GPU root state tensor
actor_root_state = self.gym.acquire_actor_root_state_tensor(self.sim)
self.root_states = gymtorch.wrap_tensor(actor_root_state)
print('root_state')
print(self.root_states.cpu().detach().numpy())
print(self.root_states.shape)
print('num_envs {}, num_actors {}'.format(self.num_envs, self.num_actors))
self.tensebot_pos = self.root_states.view(self.num_envs, self.num_actors, 13)[..., 0:3, 0:3] #num_envs, num_actors, 13 (pos,ori,Lvel,Avel)
self.tensebot_ori = self.root_states.view(self.num_envs, self.num_actors, 13)[..., 0:3, 3:7] #num_envs, num_actors, 13 (pos,ori,Lvel,Avel)
self.tensebot_linvel = self.root_states.view(self.num_envs, self.num_actors, 13)[..., 0:3, 7:10] #num_envs, num_actors, 13 (pos,ori,Lvel,Avel)
self.tensebot_angvel = self.root_states.view(self.num_envs, self.num_actors, 13)[..., 0:3, 10:13] #num_envs, num_actors, 13 (pos,ori,Lvel,Avel)
self.goal_pos = self.root_states.view(self.num_envs, self.num_actors, 13)[..., 3, 0:3] #num_envs, num_actors, 13 (pos,ori,Lvel,Avel)
self.gym.refresh_actor_root_state_tensor(self.sim)
self.tensebot_root_state = self.root_states.view(self.num_envs, self.num_actors, 13)[..., 0:3, :]
self.tensebot_initial_root_states = self.tensebot_root_state.clone()
# self.tensebot_initial_root_states[:] = to_torch(self.base_init_state, device=self.device, requires_grad=False)
rb_state_tensor = self.gym.acquire_rigid_body_state_tensor(self.sim)
self.rb_state = gymtorch.wrap_tensor(rb_state_tensor)
print('rigid_body_state')
print(self.rb_state.cpu().detach().numpy())
print(self.rb_state.shape)
print('num_envs {}, num_bodies {}'.format(self.num_envs, self.num_bodies))
self.rb_pos = self.rb_state.view(self.num_envs, self.num_bodies, 13)[:, :, 0:3] #num_envs, num_rigid_bodies, 13 (pos,ori,Lvel,Avel)
self.rb_ori = self.rb_state.view(self.num_envs, self.num_bodies, 13)[:, :, 3:7] #num_envs, num_rigid_bodies, 13 (pos,ori,Lvel,Avel)
self.rb_linvel = self.rb_state.view(self.num_envs, self.num_bodies, 13)[:, :, 7:10] #num_envs, num_rigid_bodies, 13 (pos,ori,Lvel,Avel)
self.rb_angvel = self.rb_state.view(self.num_envs, self.num_bodies, 13)[:, :, 10:13] #num_envs, num_rigid_bodies, 13 (pos,ori,Lvel,Avel)
# Used for rewarding moving towards a target
# tensebot_avg_pos = torch.mean(self.tensebot_pos, dim=1)
tensebot_avg_pos = self.tensebot_pos[:,0,:]
to_target = self.goal_pos - tensebot_avg_pos
to_target[:, 2] = 0.0
self.potentials = -torch.norm(to_target, p=2, dim=-1) / self.dt
self.prev_potentials = self.potentials.clone()
self.goal_reset = torch.ones(self.num_envs, device=self.device, dtype=torch.long)
goal_ids = self.goal_reset.nonzero(as_tuple=False).squeeze(-1)
if len(goal_ids) > 0:
self.reset_goal(goal_ids)
# Measurements for rewards
self.up_vec = to_torch(get_axis_params(1., self.up_axis_idx), device=self.device).repeat((self.num_envs, 1))
self.heading_vec = to_torch([1, 0, 0], device=self.device).repeat((self.num_envs, 1))
self.inv_start_rot = quat_conjugate(self.start_rotation).repeat((self.num_envs, 1))
self.basis_vec0 = self.heading_vec.clone()
self.basis_vec1 = self.up_vec.clone()
self.frame_count = 0
self.plot_buffer = []
self.accumulated_reward = torch.zeros_like(self.rew_buf)
camOffset = gymapi.Vec3(0, -1.5, 0.25)
camTarget = gymapi.Vec3(self.tensebot_pos[0, 0, 0],self.tensebot_pos[0, 0, 1],self.tensebot_pos[0, 0, 2])
self.gym.viewer_camera_look_at(self.viewer, None, camOffset+camTarget, camTarget)
def create_sim(self):
# set the up axis to be z-up given that assets are y-up by default
self.up_axis_idx = self.set_sim_params_up_axis(self.sim_params, 'z')
# self.sim_params.gravity = gymapi.Vec3(0.0, 0.0, 0.0)
self.sim = super().create_sim(self.device_id, self.graphics_device_id, self.physics_engine, self.sim_params)
self._create_ground_plane()
print(f'num envs {self.num_envs} env spacing {self.cfg["env"]["envSpacing"]}')
self._create_envs(self.num_envs, self.cfg["env"]['envSpacing'], int(np.sqrt(self.num_envs)))
def _create_ground_plane(self):
plane_params = gymapi.PlaneParams()
# set the normal force to be z dimension
plane_params.normal = gymapi.Vec3(0.0, 0.0, 1.0)
plane_params.static_friction = self.plane_static_friction
plane_params.dynamic_friction = self.plane_dynamic_friction
self.gym.add_ground(self.sim, plane_params)
def _create_envs(self, num_envs, spacing, num_per_row):
# define plane on which environments are initialized
lower = gymapi.Vec3(0.5 * -spacing, -spacing, 0.0)
upper = gymapi.Vec3(0.5 * spacing, spacing, spacing)
asset_root = os.path.join(os.path.dirname(os.path.abspath(__file__)), "../../assets")
asset_file = "urdf/RodAssembly/urdf/RodAssembly.urdf"
if "asset" in self.cfg["env"]:
asset_root = os.path.join(os.path.dirname(os.path.abspath(__file__)), self.cfg["env"]["asset"].get("assetRoot", asset_root))
asset_file = self.cfg["env"]["asset"].get("assetFileName", asset_file)
asset_path = os.path.join(asset_root, asset_file)
asset_root = os.path.dirname(asset_path)
asset_file = os.path.basename(asset_path)
asset_options = gymapi.AssetOptions()
asset_options.fix_base_link = False
asset_options.angular_damping = self.angularDamping
asset_options.max_angular_velocity = self.angularVelocity
rod_assembly_asset = self.gym.load_asset(self.sim, asset_root, asset_file, asset_options)
self.num_dof = self.gym.get_asset_dof_count(rod_assembly_asset)
goal_asset = self.gym.create_sphere(self.sim, 0.025)
self.num_bodies = self.gym.get_asset_rigid_body_count(rod_assembly_asset)*3 + self.gym.get_asset_rigid_body_count(goal_asset) #3 rod assemblies per tensebot
# self.num_actor = get_sim_actor_count
pose = gymapi.Transform()
self.rod_handles = []
self.tensebot_handles = []
self.goal_handles = []
self.envs = []
self.dof_limits_lower = []
self.dof_limits_upper = []
for i in range(self.num_envs):
# create env instance
tensebot_handle = []
env_ptr = self.gym.create_env(
self.sim, lower, upper, num_per_row
)
radius = 0.05
thetas = [0, 3.1415*2/3, 3.1415*4/3] #0 deg, 120 deg, 240 deg
for t, j in zip(thetas, range(len(thetas))):
pose.p = gymapi.Vec3(radius*torch.cos(torch.tensor(t)), radius*torch.sin(torch.tensor(t)), 0.1)
pose.r = gymapi.Quat.from_euler_zyx(-3.1415/4, 0, t)
rod_handle = self.gym.create_actor(env_ptr, rod_assembly_asset, pose, "rodassembly{}".format(j), i, 0, 0)
rand_color = torch.rand((3), device=self.device)
for j in range(self.num_bodies):
# self.gym.set_rigid_body_color(
# env_ptr, tensebot_handle, j, gymapi.MESH_VISUAL, gymapi.Vec3(0.27, 0.1, 0.66))
self.gym.set_rigid_body_color(
env_ptr, rod_handle, j, gymapi.MESH_VISUAL, gymapi.Vec3(rand_color[0],rand_color[1],rand_color[2]))
self.rod_handles.append(rod_handle)
tensebot_handle.append(rod_handle)
self.tensebot_handles.append(tensebot_handle)
self.envs.append(env_ptr)
# Set Up the Goal Actor
goal_pose = gymapi.Transform()
goal_pose.p.y = self.goal_dist
goal_handle = self.gym.create_actor(env_ptr, goal_asset, goal_pose, "goal", i, 1, 1)
self.gym.set_rigid_body_color(env_ptr, goal_handle, 0, gymapi.MESH_VISUAL, gymapi.Vec3(0.2, 0.8, 0.2))
self.goal_handles.append(goal_handle)
self.num_actors = self.gym.get_actor_count(self.envs[0])
self.body_dict = self.gym.get_actor_rigid_body_dict(env_ptr, tensebot_handle[0])
self.joint_dict = self.gym.get_actor_joint_dict(env_ptr, tensebot_handle[0])
print('body_dict:')
print(self.body_dict)
for b in self.body_dict:
print(b)
print('joint_dict:')
for j in self.joint_dict:
print(j)
def compute_reward(self):
self.rew_buf[:], self.reset_buf[:], self.goal_reset = compute_tensebot_reward(
self.tensebot_pos,
self.goal_pos,
self.reset_buf,
self.progress_buf,
self.potentials,
self.prev_potentials,
self.max_episode_length,
self.goal_threshold)
def compute_observations(self, env_ids=None):
if env_ids is None:
env_ids = np.arange(self.num_envs)
self.gym.refresh_dof_state_tensor(self.sim)
self.gym.refresh_actor_root_state_tensor(self.sim)
self.gym.refresh_rigid_body_state_tensor(self.sim)
# print('self.root_state')
# print(self.root_states[0,:])
# print(self.root_states.shape)
# time.sleep(1)
self.obs_buf[:], self.potentials[:], self.prev_potentials[:] = compute_tensebot_observations(
self.tensebot_pos,
self.tensebot_ori,
self.tensebot_linvel,
self.tensebot_angvel,
self.goal_pos,
self.potentials,
self.actions,
self.dt)
return self.obs_buf
def reset_idx(self, env_ids):
print('Resetting IDX! Env_IDs = {}'.format(env_ids))
env_ids_int32 = env_ids.to(dtype=torch.int32)*self.num_actors
env_ids_int32 = torch.cat((env_ids_int32, env_ids_int32+1, env_ids_int32+2))
self.tensebot_root_state[env_ids, :, :] = self.tensebot_initial_root_states[env_ids, :, :]
self.gym.set_actor_root_state_tensor_indexed(self.sim,
gymtorch.unwrap_tensor(self.root_states),
gymtorch.unwrap_tensor(env_ids_int32), len(env_ids_int32))
self.reset_buf[env_ids] = 0
self.progress_buf[env_ids] = 0
self.goal_reset[env_ids] = 1
# actor_root_state = self.gym.acquire_actor_root_state_tensor(self.sim)
# self.root_states = gymtorch.wrap_tensor(actor_root_state)
# self.initial_root_states = self.root_states.clone()
# self.initial_root_states[:] = to_torch(self.base_init_state, device=self.device, requires_grad=False)
# plt.plot([0,0,0])
# plt.show()
# if(self.plot_buffer):
# plot_data = np.array(self.plot_buffer)
# print(plot_data.shape)
# plt.plot(plot_data[:,0,0] + plot_data[:,1,0] + plot_data[:,2,0], label="Total Reward")
# plt.plot(plot_data[:,0,0], label="Progress Reward")
# plt.plot(plot_data[:,1,0], label="Height Reward")
# plt.plot(plot_data[:,2,0], label="Heading Reward")
# plt.ylabel('Reward')
# plt.xlabel('Steps')
# plt.grid()
# plt.legend(loc="lower right")
# plt.xlim([0, 500])
# plt.ylim([-0.1, 2.1])
# plt.show()
# self.plot_buffer = []
def reset_goal(self, env_ids):
print('reset_goal')
self.gym.refresh_actor_root_state_tensor(self.sim)
# print('Resetting Goals! Env_IDs = {}'.format(env_ids))
# print('Old Goal Position = {}'.format(self.goal_pos))
env_ids_int32 = env_ids.to(dtype=torch.int32)*self.num_actors
goal_pos_update = torch_rand_float(-self.goal_dist, self.goal_dist, (len(env_ids), 3), device=self.device)
# goal_pos_update[:,0] = 1000.0
# goal_pos_update[:,1] = 0.0
goal_pos_update[:,2] = 0.1
self.goal_pos[env_ids, :] = goal_pos_update
self.gym.set_actor_root_state_tensor_indexed(self.sim,
gymtorch.unwrap_tensor(self.root_states),
gymtorch.unwrap_tensor(env_ids_int32+3), len(env_ids_int32))
# self.gym.refresh_actor_root_state_tensor(self.sim)
# tensebot_avg_pos = torch.mean(self.tensebot_pos, dim=1)
tensebot_avg_pos = self.tensebot_pos[:,0,:]
to_target = self.goal_pos[env_ids, :] - tensebot_avg_pos[env_ids, :]
to_target[:, 2] = 0.0
self.prev_potentials[env_ids] = -torch.norm(to_target, p=2, dim=-1) / self.dt
self.potentials[env_ids] = self.prev_potentials[env_ids].clone()
self.goal_reset[env_ids] = 0
# print('New Goal Position = {}'.format(self.goal_pos))
def pre_physics_step(self, actions):
# print('actions')
# print(actions)
# print(actions.shape)
# print(actions.to(self.device).squeeze().shape())
self.actions = actions.clone().detach().to(self.device)
self.calculate_tensegrity_forces(self.actions)
def calculate_tensegrity_forces(self, actions):
# # print('actions : {}'.format(actions))
connection_list = []
# (1,2),(4,5),(7,8) end point indicies (bottom, top)
# 0, 3, 6 are the body indicies
# 9 is the goal index
# This might need a low pass filter
spring_length_multiplier = actions/4 + 1 # Multiplier range from 0.5 to 1.5
# spring_length_multiplier = torch.rand((self.num_envs, 9), device=self.device)/4 + 1
# spring_length_multiplier = torch.ones((self.num_envs, 9), device=self.device)*0.1
# Connect All Bottoms
connection_list.append((1, 4, 0.1))
connection_list.append((1, 7, 0.1))
connection_list.append((4, 7, 0.1))
#Connect All Tops
connection_list.append((2, 5, 0.1))
connection_list.append((2, 8, 0.1))
connection_list.append((5, 8, 0.1))
#Top1 to Bottom2
connection_list.append((2, 4, 0.1)) #Body0 top is connected to Body1 bottom
#Top2 to Bottom3
connection_list.append((5, 7, 0.1)) #Body0 top is connected to Body1 bottom
#Top3 to Bottom1
connection_list.append((8, 1, 0.1)) #Body0 top is connected to Body1 bottom
#Connect All The Things...
forces = torch.zeros_like(self.rb_pos, device=self.device, dtype=torch.float)
force_positions = self.rb_pos.clone()
lin_vel_mat = torch.zeros((self.num_envs, 2, 3), device=self.device, dtype=torch.float) # Used in calculating damping force
diff_matrix= torch.tensor([[-1, 1]], device=self.device, dtype=torch.float)
num_lines = len(connection_list)
line_vertices = torch.zeros((num_lines*2,3), device=self.device, dtype=torch.float)
line_colors = torch.zeros((num_lines,3), device=self.device, dtype=torch.float)
for connection, i in zip(connection_list, range(len(connection_list))):
# print(connection)
# Spring Force
P1 = self.rb_pos[:, connection[0], :]
P2 = self.rb_pos[:, connection[1], :]
endpoint_vector = P1-P2
# print('endpoint_vector.shape')
# print(endpoint_vector.shape)
spring_constant = 25
damping_coff = 0.99
spring_length = connection[2] * spring_length_multiplier[:, i]
# print('spring_length.shape')
# print(spring_length.shape)
# print('P1.shape')
# print(P1.shape)
# print('P2.shape')
# print(P2.shape)
endpoint_distance = torch.norm(endpoint_vector, dim=1)
# print('endpoint_distance.shape')
# print(endpoint_distance.shape)
endpoint_vector_normalized = torch.div(endpoint_vector, torch.unsqueeze(endpoint_distance,1).repeat(1,3))
# print('endpoint_vector_normalized.shape')
# print(endpoint_vector_normalized.shape)
spring_force = spring_constant*(endpoint_distance-spring_length)
# print('spring_force.shape')
# print(spring_force.shape)
# Set springs to only work for tension and not compression
spring_force = torch.max(torch.tensor(spring_force), torch.zeros_like(spring_force))
applied_force = torch.mul(endpoint_vector_normalized, torch.unsqueeze(spring_force,1).repeat(1,3))
applied_force = torch.nan_to_num(applied_force, nan=0.0)
# print('applied force')
# print(appled_force.shape)
# print('Spring {} Tension = {}'.format(i, spring_force))
# print('forces.shape')
# print(forces.shape)
# print(connection[0])
# print(connection[1])
forces[:, connection[0], :] -= applied_force
forces[:, connection[1], :] += applied_force
# print('forces[0,:,:]')
# print(forces[0,:,:])
# print('applied_force[0,:]')
# print(applied_force[0,:])
# print('endpoint_vector_normalized')
# print(endpoint_vector_normalized)
# print(endpoint_distance)
# Damping
lin_vel_mat[:, 0, :] = self.rb_linvel[:, connection[0], :]
lin_vel_mat[:, 1, :] = self.rb_linvel[:, connection[1], :]
EVN_mat = torch.unsqueeze(endpoint_vector_normalized, 2)
# print(lin_vel_mat.shape)
# print(EVN_mat.shape)
damping_force = torch.matmul(diff_matrix, torch.matmul(lin_vel_mat, EVN_mat))*damping_coff
# print('damping_force.shape')
# print(torch.squeeze(damping_force, dim=2).shape)
# print('endpoint_vector_normalized.shape')
# print(endpoint_vector_normalized.shape)
damping_force_vector = endpoint_vector_normalized *torch.squeeze(damping_force, dim=2)
# print('damping_force_vector.shape')
# print(damping_force_vector.shape)
damping_force_vector = torch.nan_to_num(damping_force_vector, nan=0.0)
forces[:, connection[0], :] += damping_force_vector
forces[:, connection[1], :] -= damping_force_vector
# Draw Spring Connections?
line_vertices[i*2,:] = self.rb_pos[0, connection[0], :]
line_vertices[i*2+1,:] = self.rb_pos[0, connection[1], :]
line_colors[i,:] = torch.tensor([1.0, 1.0, 1.0])
self.gym.apply_rigid_body_force_at_pos_tensors(self.sim, gymtorch.unwrap_tensor(forces), gymtorch.unwrap_tensor(force_positions), gymapi.ENV_SPACE)
self.gym.clear_lines(self.viewer)
self.gym.add_lines(self.viewer, self.envs[0], num_lines, line_vertices.cpu().detach(), line_colors.cpu().detach())
def post_physics_step(self):
self.progress_buf += 1
env_ids = self.reset_buf.nonzero(as_tuple=False).squeeze(-1)
if len(env_ids) > 0:
self.reset_idx(env_ids)
goal_ids = self.goal_reset.nonzero(as_tuple=False).squeeze(-1)
if len(goal_ids) > 0:
self.reset_goal(goal_ids)
self.compute_observations()
self.compute_reward()
# Look at the first actor
env_idx = 0
camOffset = gymapi.Vec3(0, -1.5, 0.25)
camTarget = gymapi.Vec3(self.tensebot_pos[env_idx, 0, 0],self.tensebot_pos[env_idx, 0, 1],self.tensebot_pos[env_idx, 0, 2])
camEnvOffset = gymapi.Vec3(0, 0, 0)
# print(camOffset)
# print(camTarget)
# self.gym.viewer_camera_look_at(self.viewer, None, camOffset+camTarget+camEnvOffset, camTarget+camEnvOffset)
# time.sleep(0.1)
# self.debug_printout(env_ids)
def debug_printout(self, env_ids):
self.accumulated_reward += self.rew_buf
# print('potentials and previous potentials')
# print(self.potentials)
# print(self.prev_potentials)
print('reward buf')
print(self.rew_buf)
if len(env_ids) > 0:
self.accumulated_reward[env_ids] = 0
print('self.accumulated_reward')
print(self.accumulated_reward)
# # print('DEBUG PRINTOUTS')
# # body_height = self.obs_buf[:,2]
# # up_projection = self.obs_buf[:,29]
# # heading_projection = self.obs_buf[:, 30]
# # heading_reward = self.heading_weight * heading_projection
# # # aligning up axis and environment
# # up_reward = torch.zeros_like(heading_reward)
# # up_reward = torch.where(up_projection > 0.93, up_reward + self.up_weight, up_reward)
# # # reward for duration of staying alive
# # progress_reward = self.potentials - self.prev_potentials
# # total_reward = progress_reward + up_reward + heading_reward]
# xtream_rewards = torch.abs(self.rew_buf) > 5
# # print('ProgressReward[3] : {} = {} - {}'.format(progress_reward[3], self.potentials[3], self.prev_potentials[3]))
# # print('EnvReset[3], GoalReset[3] : {}, {}'.format(self.reset_buf[3], self.goal_reset[3]))
# # print('Bot Pos, Goal Pos = {}, {}'.format(self.tensebot_pos[3,:], self.goal_pos[3,:]))
# if(torch.any(xtream_rewards)):
# print('XTREAM REWARD DETECTED')
# xtream_idx = xtream_rewards.nonzero().cpu().detach().numpy()
# print("xtream index = {}".format(xtream_idx))
# print(self.rew_buf[xtream_idx])
# print('Progress Reward : {} = {} - {}'.format(progress_reward[xtream_idx], self.potentials[xtream_idx], self.prev_potentials[xtream_idx]))
# print('EnvReset, GoalReset : {},{}'.format(self.reset_buf[xtream_idx], self.goal_reset[xtream_idx]))
# time.sleep(10)
# print()
# # print('{:.2f} = {:.2f} + {:.2f} + {:.2f}'.format(total_reward[0], heading_reward[0], up_reward[0], progress_reward[0]))
# # print(' self.reset_buf')
# # print( self.reset_buf)
# # tmp_progress_reward = self.potentials - self.prev_potentials
# # if( np.abs(tmp_progress_reward[0].cpu().detach().numpy()) > 1):
# # print('{} : {} : {}'.format(tmp_progress_reward[0], self.potentials[0], self.prev_potentials[0]))
# # time.sleep(1)
# # tmp_height_reward = self.obs_buf[:,0]
# # tmp_heading_reward = self.rew_buf - tmp_progress_reward
# # self.plot_buffer.append((tmp_progress_reward.cpu().detach().numpy(),
# # tmp_height_reward.cpu().detach().numpy(),
# # tmp_heading_reward.cpu().detach().numpy()))
#####################################################################
###=========================jit functions=========================###
#####################################################################
@torch.jit.script
def compute_tensebot_reward(
tensebot_pos,
goal_pos,
reset_buf,
progress_buf,
potentials,
prev_potentials,
max_episode_length,
goal_threshold):
# type: (Tensor, Tensor, Tensor, Tensor, Tensor, Tensor, float, float) -> Tuple[Tensor, Tensor, Tensor]
# reward for duration of staying alive
progress_reward = potentials - prev_potentials
total_reward = progress_reward
# reset agents
reset = torch.where(progress_buf >= max_episode_length - 1, torch.ones_like(reset_buf), reset_buf)
# tensebot_avg_pos = torch.mean(tensebot_pos, dim=1)
tensebot_avg_pos = tensebot_pos[:,0,:]
distance_to_goal = torch.norm(tensebot_avg_pos - goal_pos, dim=-1)
goal_reached = torch.where(distance_to_goal < goal_threshold, 1, 0)
goal_reset = torch.where(goal_reached==1, 1, 0)
return total_reward, reset, goal_reset
@torch.jit.script
def compute_tensebot_observations(tensebot_pos, #Tensor
tensebot_ori, #Tensor
tensebot_linvel, #Tensor
tensebot_angvel, #Tensor
goal_pos, #Tensor
potentials, #Tensor
actions, #Tensor
dt #float
):
# type: (Tensor, Tensor, Tensor, Tensor, Tensor, Tensor, Tensor, float) -> Tuple[Tensor, Tensor, Tensor]
# tensebot_avg_pos = torch.mean(tensebot_pos, dim=1)
tensebot_avg_pos = tensebot_pos[:,0,:]
to_target = goal_pos - tensebot_avg_pos
to_target[:, 2] = 0.0
to_target_norm = torch.div(to_target, torch.unsqueeze(torch.norm(to_target, p=2, dim=-1),1).repeat(1,3))
prev_potentials_new = potentials.clone()
potentials = -torch.norm(to_target, p=2, dim=-1) / dt
# torso_quat, up_proj, heading_proj, up_vec, heading_vec = compute_heading_and_up(
# tensebot_ori, inv_start_rot, to_target, basis_vec0, basis_vec1, 2)
# vel_loc, angvel_loc, roll, pitch, yaw, angle_to_target = compute_rot(
# torso_quat, tensebot_linvel, tensebot_angvel, goal_pos, tensebot_pos)
# obs_buf shapes: (53)
# obs_buf[0:39] = Rod State x 3 : Pos(3), Ori(4), LinVel(3), AngVel(3)
# obs_buf[39:42] = Goal Pos : Pos(3)
# obs_buf[42:45] = vector to goal (3)
# obs_buf[45:53] = actions : Spring Length Multipliers (9)
obs = torch.cat((tensebot_pos[:,0,:], tensebot_ori[:,0,:], tensebot_linvel[:,0,:], tensebot_angvel[:,0,:],
tensebot_pos[:,1,:], tensebot_ori[:,1,:], tensebot_linvel[:,1,:], tensebot_angvel[:,1,:],
tensebot_pos[:,2,:], tensebot_ori[:,2,:], tensebot_linvel[:,2,:], tensebot_angvel[:,2,:],
goal_pos, to_target_norm, actions), dim=-1)
return obs, potentials, prev_potentials_new |
import logging
import os
from random import shuffle
import nltk
from utils import remove_ngram, tokenise
DIR_PATH = os.path.dirname(os.path.realpath(__file__))
def random_ngrams(ngrams, number_of_ngrams):
relevant_ngrams = ngrams[:number_of_ngrams]
remaining_ngrams = ngrams[number_of_ngrams:]
relevant_tagged = nltk.pos_tag(relevant_ngrams)
remaining_tagged = nltk.pos_tag([i for i in remaining_ngrams if i])
shuffle(remaining_tagged)
random_ngrams_list = []
for _, tag in relevant_tagged:
try:
random_pos_tag = next(pos_tag for pos_tag in remaining_tagged
if pos_tag[1] == tag)
remaining_tagged.remove(random_pos_tag)
random_ngrams_list.append(random_pos_tag[0])
except StopIteration:
continue
return random_ngrams_list
def ngram_removal(filename, ngrams, n):
training_directory = f"{DIR_PATH}/../datasets/train/"
output_directory = f"{DIR_PATH}/../output/"
dataset_filename = "SemEval2018-T3-train-taskA_emoji.txt"
logging.info(f"Removing top {n} n-grams from {dataset_filename} using "
f"{filename} (CONTROL)")
out_filename = f"CONTROL-RANDOM-{dataset_filename[:-4]}_{filename[:-4]}_{n}.txt"
fout = open(f"{training_directory}{out_filename}", "w+")
fout.write("Tweet index Label Tweet text\n")
with open(f"{training_directory}{dataset_filename}") as f:
for line in f.readlines():
if line.lower().startswith("tweet index"):
continue
# Tokenise the tweet in the same way as when calculating n-gram
# frequencies, and replace certain n-grams in it
tweet = " ".join(tokenise(line.split("\t")[2]))
for ngram in ngrams:
tweet = remove_ngram(tweet, ngram, len(ngram.split()))
# Tweet has been completely removed, so don't include it
if not tweet:
continue
# Write tokenised tweet with n-grams replaced back to a file
split_line = line.split("\t")
split_line[2] = tweet
fout.write("\t".join(split_line) + "\n")
def create_control(filename, ngrams, number_of_ngrams):
ngrams = random_ngrams(ngrams, number_of_ngrams)
with open(f"{DIR_PATH}/control_random.csv", "a") as f:
f.write(f"{filename},{",".join(ngrams)}\n")
for n in range(number_of_ngrams):
ngram_removal(filename, ngrams[:n + 1], n + 1)
def control_handler(n, number_of_ngrams):
output_directory = f"{DIR_PATH}/../output/"
for filename in os.listdir(output_directory):
if not filename.startswith(f"{n}-gram"):
continue
ngrams = []
with open(os.path.join(output_directory, filename)) as f:
for line in f.read().splitlines():
if line.lower().startswith("position\t"):
continue
_, ngram = line.split("\t")[1:]
ngrams.append(ngram)
create_control(filename, ngrams, number_of_ngrams)
if __name__ == "__main__":
control_handler(1, 20)
| import logging
import os
from random import shuffle
import nltk
from utils import remove_ngram, tokenise
DIR_PATH = os.path.dirname(os.path.realpath(__file__))
def random_ngrams(ngrams, number_of_ngrams):
relevant_ngrams = ngrams[:number_of_ngrams]
remaining_ngrams = ngrams[number_of_ngrams:]
relevant_tagged = nltk.pos_tag(relevant_ngrams)
remaining_tagged = nltk.pos_tag([i for i in remaining_ngrams if i])
shuffle(remaining_tagged)
random_ngrams_list = []
for _, tag in relevant_tagged:
try:
random_pos_tag = next(pos_tag for pos_tag in remaining_tagged
if pos_tag[1] == tag)
remaining_tagged.remove(random_pos_tag)
random_ngrams_list.append(random_pos_tag[0])
except StopIteration:
continue
return random_ngrams_list
def ngram_removal(filename, ngrams, n):
training_directory = f"{DIR_PATH}/../datasets/train/"
output_directory = f"{DIR_PATH}/../output/"
dataset_filename = "SemEval2018-T3-train-taskA_emoji.txt"
logging.info(f"Removing top {n} n-grams from {dataset_filename} using "
f"{filename} (CONTROL)")
out_filename = f"CONTROL-RANDOM-{dataset_filename[:-4]}_{filename[:-4]}_{n}.txt"
fout = open(f"{training_directory}{out_filename}", "w+")
fout.write("Tweet index Label Tweet text\n")
with open(f"{training_directory}{dataset_filename}") as f:
for line in f.readlines():
if line.lower().startswith("tweet index"):
continue
# Tokenise the tweet in the same way as when calculating n-gram
# frequencies, and replace certain n-grams in it
tweet = " ".join(tokenise(line.split("\t")[2]))
for ngram in ngrams:
tweet = remove_ngram(tweet, ngram, len(ngram.split()))
# Tweet has been completely removed, so don't include it
if not tweet:
continue
# Write tokenised tweet with n-grams replaced back to a file
split_line = line.split("\t")
split_line[2] = tweet
fout.write("\t".join(split_line) + "\n")
def create_control(filename, ngrams, number_of_ngrams):
ngrams = random_ngrams(ngrams, number_of_ngrams)
with open(f"{DIR_PATH}/control_random.csv", "a") as f:
f.write(f"{filename},{','.join(ngrams)}\n")
for n in range(number_of_ngrams):
ngram_removal(filename, ngrams[:n + 1], n + 1)
def control_handler(n, number_of_ngrams):
output_directory = f"{DIR_PATH}/../output/"
for filename in os.listdir(output_directory):
if not filename.startswith(f"{n}-gram"):
continue
ngrams = []
with open(os.path.join(output_directory, filename)) as f:
for line in f.read().splitlines():
if line.lower().startswith("position\t"):
continue
_, ngram = line.split("\t")[1:]
ngrams.append(ngram)
create_control(filename, ngrams, number_of_ngrams)
if __name__ == "__main__":
control_handler(1, 20)
|
"""Accessor class for columns containing single-level key/value mappings
The FlatSampleReader container is used to store data (in any backend) in a column
containing a single level key/value mapping from names/ids to data.
All backends are supported.
"""
from contextlib import ExitStack
from pathlib import Path
from operator import attrgetter as op_attrgetter
from typing import Tuple, Union, Iterable, Optional, Any
from .common import open_file_handles
from ..records import (
data_record_db_val_from_digest,
data_record_digest_val_from_db_val,
flat_data_db_key_from_names,
hash_data_db_key_from_raw_key,
schema_db_key_from_column,
schema_hash_db_key_from_digest,
schema_hash_record_db_val_from_spec,
schema_record_db_val_from_digest
)
from ..records.parsing import generate_sample_name
from ..backends import backend_decoder
from ..op_state import reader_checkout_only
from ..utils import is_suitable_user_key
from ..optimized_utils import valfilter, valfilterfalse
KeyType = Union[str, int]
class FlatSampleReader:
"""Class implementing get access to data in a column.
This class exposes the standard API to access data stored in a single level
key / value mapping column. Usage is modeled after the python :class:`dict`
style syntax -- with a few additional utility and inspection methods and
properties added. Methods named after those of a python :class:`dict` have
syntactically identical arguments and behavior to that of the standard
library.
If not opened in a ``write-enabled`` checkout, then attempts to add or
delete data or container properties will raise an exception (in the form of
a :class:`PermissionError`). No changes will be propogated unless a
``write-enabled`` checkout is used.
This object can be serialized -- pickled -- for parallel processing /
reading if opened in a ``read-only`` checkout. Parallel operations are both
thread and process safe, though performance may significantly differ
between multithreaded vs multiprocessed code (depending on the backend data
is stored in). Attempts to serialize objects opened in ``write-enabled``
checkouts are not supported and will raise a :class:`PermissionError` if
attempted. This behavior is enforced in order to ensure data and record
integrity while writing to the repository.
"""
__slots__ = ('_mode', '_column_name', '_samples', '_be_fs',
'_path', '_stack', '_enter_count', '_schema')
_attrs = __slots__
def __init__(self,
columnname: str,
samples,
backend_handles,
schema,
repo_path: Path,
mode: str,
*args, **kwargs):
self._stack: Optional[ExitStack] = None
self._mode = mode
self._column_name = columnname
self._samples = samples
self._be_fs = backend_handles
self._path = repo_path
self._schema = schema
self._enter_count = 0
@property
def _debug_(self): # pragma: no cover
return {
'__class__': self.__class__,
'_mode': self._mode,
'_column_name': self._column_name,
'_be_fs': self._be_fs,
'_path': self._path,
'_contains_subsamples': self.contains_subsamples,
'_stack': self._stack._exit_callbacks if self._stack else self._stack,
'_enter_count': self._enter_count,
}
def __repr__(self):
res = (
f'{self.__class__.__qualname__}('
f'repo_pth={self._path}, '
f'aset_name={self._column_name}, '
f"{[f"{key}={val}, " for key, val in self._schema.schema.items()]}, "
f'mode={self._mode})')
return res
def _repr_pretty_(self, p, cycle):
res = f'Hangar {self.__class__.__qualname__} \
\n Column Name : {self._column_name}\
\n Writeable : {self.iswriteable}\
\n Column Type : {self.column_type}\
\n Column Layout : {self.column_layout}\
\n Schema Type : {self.schema_type}\
\n DType : {self.dtype}\
\n Shape : {self.shape}\
\n Number of Samples : {self.__len__()}\
\n Partial Remote Data Refs : {bool(self.contains_remote_references)}\n'
p.text(res)
def _ipython_key_completions_(self): # pragma: no cover
"""Let ipython know that any key based access can use the column keys
Since we don't want to inherit from dict, nor mess with `__dir__` for
the sanity of developers, this is the best way to ensure users can
autocomplete keys.
Returns
-------
list
list of strings, each being one of the column keys for access.
"""
return list(self.keys())
@reader_checkout_only
def __getstate__(self) -> dict:
"""ensure multiprocess operations can pickle relevant data.
"""
return {slot: getattr(self, slot) for slot in self.__slots__}
def __setstate__(self, state: dict) -> None:
"""ensure multiprocess operations can pickle relevant data.
Technically should be decorated with @reader_checkout_only, but since
at instance creation that is not an attribute, the decorator won't
know. Since only readers can be pickled, This isn't much of an issue.
"""
for slot, value in state.items():
setattr(self, slot, value)
def __enter__(self):
return self
def __exit__(self, *exc):
return
def _destruct(self):
if isinstance(self._stack, ExitStack):
self._stack.close()
self._close()
for attr in self._attrs:
delattr(self, attr)
def __getattr__(self, name):
"""Raise permission error after checkout is closed.
Only runs after a call to :meth:`_destruct`, which is responsible for
deleting all attributes from the object instance.
"""
try:
self.__getattribute__('_mode') # once checkout is closed, this won't exist.
except AttributeError:
err = (f'Unable to operate on past checkout objects which have been '
f'closed. No operation occurred. Please use a new checkout.')
raise PermissionError(err) from None
return self.__getattribute__(name)
@property
def _is_conman(self) -> bool:
return bool(self._enter_count)
def __iter__(self) -> Iterable[KeyType]:
"""Create iterator yielding an column sample keys.
Yields
-------
Iterable[KeyType]
Sample key contained in the column.
"""
yield from self.keys()
def __len__(self) -> int:
"""Check how many samples are present in a given column.
"""
return len(self._samples)
def __contains__(self, key: KeyType) -> bool:
"""Determine if a key is a valid sample name in the column.
"""
return key in self._samples
def _open(self):
for val in self._be_fs.values():
val.open(mode=self._mode)
def _close(self):
for val in self._be_fs.values():
val.close()
def __getitem__(self, key: KeyType):
"""Retrieve data for some sample key via dict style access conventions.
.. seealso:: :meth:`get`
Parameters
----------
key : KeyType
Sample key to retrieve from the column.
Returns
-------
value
Data corresponding to the provided sample key.
Raises
------
KeyError
if no sample with the requested key exists.
"""
spec = self._samples[key]
return self._be_fs[spec.backend].read_data(spec)
def get(self, key: KeyType, default=None):
"""Retrieve the data associated with some sample key
Parameters
----------
key : KeyType
The name of the subsample(s) to retrieve. Passing a single
subsample key will return the stored data value.
default : Any
if a `key` parameter is not found, then return this value instead.
By default, None.
Returns
-------
value
data data stored under subsample key if key exists, else
default value if not found.
"""
try:
return self[key]
except KeyError:
return default
@property
def column(self) -> str:
"""Name of the column.
"""
return self._column_name
@property
def column_type(self):
"""Data container type of the column ('ndarray', 'str', etc).
"""
return self._schema.column_type
@property
def column_layout(self):
"""Column layout type ('nested', 'flat', etc).
"""
return self._schema.column_layout
@property
def schema_type(self):
"""Schema type of the contained data ('variable_shape', 'fixed_shape', etc).
"""
return self._schema.schema_type
@property
def dtype(self):
"""Dtype of the columns data (np.float, str, etc).
"""
return self._schema.dtype
@property
def shape(self):
"""(Max) shape of data that can (is) written in the column.
"""
try:
return self._schema.shape
except AttributeError:
return None
@property
def backend(self) -> str:
"""Code indicating which backing store is used when writing data.
"""
return self._schema.backend
@property
def backend_options(self):
"""Filter / Compression options applied to backend when writing data.
"""
return self._schema.backend_options
@property
def iswriteable(self) -> bool:
"""Bool indicating if this column object is write-enabled.
"""
return False if self._mode == 'r' else True
@property
def contains_subsamples(self) -> bool:
"""Bool indicating if sub-samples are contained in this column container.
"""
return False
@property
def contains_remote_references(self) -> bool:
"""Bool indicating if all samples in column exist on local disk.
The data associated with samples referencing some remote server will
need to be downloaded (``fetched`` in the hangar vocabulary) before
they can be read into memory.
Returns
-------
bool
False if at least one sample in the column references data stored
on some remote server. True if all sample data is available on the
machine's local disk.
"""
_islocal_func = op_attrgetter('islocal')
return not all(map(_islocal_func, self._samples.values()))
@property
def remote_reference_keys(self) -> Tuple[KeyType]:
"""Compute sample names whose data is stored in a remote server reference.
Returns
-------
Tuple[KeyType]
list of sample keys in the column whose data references indicate
they are stored on a remote server.
"""
_islocal_func = op_attrgetter('islocal')
return tuple(valfilterfalse(_islocal_func, self._samples).keys())
def _mode_local_aware_key_looper(self, local: bool) -> Iterable[KeyType]:
"""Generate keys for iteration with dict update safety ensured.
Parameters
----------
local : bool
True if keys should be returned which only exist on the local machine.
Fale if remote sample keys should be excluded.
Returns
-------
Iterable[KeyType]
Sample keys conforming to the `local` argument spec.
"""
_islocal_func = op_attrgetter('islocal')
if local:
if self._mode == 'r':
yield from valfilter(_islocal_func, self._samples).keys()
else:
yield from tuple(valfilter(_islocal_func, self._samples).keys())
else:
if self._mode == 'r':
yield from self._samples.keys()
else:
yield from tuple(self._samples.keys())
def keys(self, local: bool = False) -> Iterable[KeyType]:
"""Generator yielding the name (key) of every subsample.
Parameters
----------
local : bool, optional
If True, returned keys will only correspond to data which is
available for reading on the local disk, by default False.
Yields
------
Iterable[KeyType]
Keys of one subsample at a time inside the sample.
"""
yield from self._mode_local_aware_key_looper(local)
def values(self, local: bool = False) -> Iterable[Any]:
"""Generator yielding the data for every subsample.
Parameters
----------
local : bool, optional
If True, returned values will only correspond to data which is
available for reading on the local disk. No attempt will be made to
read data existing on a remote server, by default False.
Yields
------
Iterable[Any]
Values of one subsample at a time inside the sample.
"""
for key in self._mode_local_aware_key_looper(local):
yield self[key]
def items(self, local: bool = False) -> Iterable[Tuple[KeyType, Any]]:
"""Generator yielding (name, data) tuple for every subsample.
Parameters
----------
local : bool, optional
If True, returned keys/values will only correspond to data which is
available for reading on the local disk, No attempt will be made to
read data existing on a remote server, by default False.
Yields
------
Iterable[Tuple[KeyType, Any]]
Name and stored value for every subsample inside the sample.
"""
for key in self._mode_local_aware_key_looper(local):
yield (key, self[key])
# ---------------- writer methods only after this point -------------------
class FlatSampleWriter(FlatSampleReader):
__slots__ = ('_txnctx',)
_attrs = __slots__ + FlatSampleReader.__slots__
def __init__(self, aset_ctx, *args, **kwargs):
super().__init__(*args, **kwargs)
self._txnctx = aset_ctx
def __enter__(self):
with ExitStack() as stack:
self._txnctx.open_write()
stack.callback(self._txnctx.close_write)
if self._enter_count == 0:
for k in self._be_fs.keys():
stack.enter_context(self._be_fs[k])
self._enter_count += 1
self._stack = stack.pop_all()
return self
def __exit__(self, *exc):
self._stack.close()
self._enter_count -= 1
def _set_arg_validate(self, key, value):
"""Verify if key / value pair is valid to be written in this column
Parameters
----------
key
name to associate with this data piece
value
piece of data to store in the column
Raises
------
ValueError
If key is not valid type/contents or if value is not correct object
type / if it does not conform to column schema
"""
if not is_suitable_user_key(key):
raise ValueError(f'Sample name `{key}` is not suitable.')
isCompat = self._schema.verify_data_compatible(value)
if not isCompat.compatible:
raise ValueError(isCompat.reason)
def _perform_set(self, key, value):
"""Internal write method. Assumes all arguments validated and context is open
Parameters
----------
key
sample key to store
value
data to store
"""
full_hash = self._schema.data_hash_digest(value)
hashKey = hash_data_db_key_from_raw_key(full_hash)
# check if data record already exists with given key
dataRecKey = flat_data_db_key_from_names(self._column_name, key)
existingDataRecVal = self._txnctx.dataTxn.get(dataRecKey, default=False)
if existingDataRecVal:
# check if data record already with same key & hash value
existingDataRec = data_record_digest_val_from_db_val(existingDataRecVal)
if full_hash == existingDataRec.digest:
return
# write new data if data hash does not exist
existingHashVal = self._txnctx.hashTxn.get(hashKey, default=False)
if existingHashVal is False:
hashVal = self._be_fs[self._schema.backend].write_data(value)
self._txnctx.hashTxn.put(hashKey, hashVal)
self._txnctx.stageHashTxn.put(hashKey, hashVal)
hash_spec = backend_decoder(hashVal)
else:
hash_spec = backend_decoder(existingHashVal)
if hash_spec.backend not in self._be_fs:
# when adding data which is already stored in the repository, the
# backing store for the existing data location spec may not be the
# same as the backend which the data piece would have been saved in here.
#
# As only the backends actually referenced by a columns samples are
# initialized (accessible by the column), there is no guarantee that
# an accessor exists for such a sample. In order to prevent internal
# errors from occurring due to an uninitialized backend if a previously
# existing data piece is "saved" here and subsequently read back from
# the same writer checkout, we perform an existence check and backend
# initialization, if appropriate.
fh = open_file_handles(backends=(hash_spec.backend,),
path=self._path,
mode='a',
schema=self._schema)
self._be_fs[hash_spec.backend] = fh[hash_spec.backend]
# add the record to the db
dataRecVal = data_record_db_val_from_digest(full_hash)
self._txnctx.dataTxn.put(dataRecKey, dataRecVal)
self._samples[key] = hash_spec
def __setitem__(self, key, value):
"""Store a piece of data in a column.
.. seealso::
:meth:`update` for an implementation analogous to python's built in
:meth:`dict.update` method which accepts a dict or iterable of
key/value pairs to add in the same operation.
Parameters
----------
key
name to assign to the sample (assuming the column accepts named
samples), If str, can only contain alpha-numeric ascii characters
(in addition to '-', '.', '_'). Integer key must be >= 0. by
default, None
value
data to store as a sample in the column.
"""
with ExitStack() as stack:
if not self._is_conman:
stack.enter_context(self)
self._set_arg_validate(key, value)
self._perform_set(key, value)
def append(self, value) -> KeyType:
"""Store some data in a sample with an automatically generated key.
This method should only be used if the context some piece of data is
used in is independent from its value (i.e. when reading data back,
there is no useful information which needs to be conveyed between the
data source's name/id and the value of that piece of information.)
Think carefully before going this route, as this posit does not apply
to many common use cases.
To store the data with a user defined key, use :meth:`update` or
:meth:`__setitem__`
Parameters
----------
value
Piece of data to store in the column.
Returns
-------
KeyType
Name of the generated key this data is stored with.
"""
with ExitStack() as stack:
if not self._is_conman:
stack.enter_context(self)
key = generate_sample_name()
while key in self._samples:
key = generate_sample_name()
self._set_arg_validate(key, value)
self._perform_set(key, value)
return key
def update(self, other=None, **kwargs):
"""Store some data with the key/value pairs from other, overwriting existing keys.
:meth:`update` implements functionality similar to python's builtin
:meth:`dict.update` method, accepting either a dictionary or other
iterable (of length two) listing key / value pairs.
Parameters
----------
other
Accepts either another dictionary object or an iterable of
key/value pairs (as tuples or other iterables of length two).
mapping sample names to data value instances instances, If sample
name is string type, can only contain alpha-numeric ascii
characters (in addition to '-', '.', '_'). Int key must be >= 0. By
default, None.
**kwargs
keyword arguments provided will be saved with keywords as sample keys
(string type only) and values as np.array instances.
"""
with ExitStack() as stack:
if not self._is_conman:
stack.enter_context(self)
if other:
if not isinstance(other, dict):
other = dict(other)
else:
other = other.copy()
elif other is None:
other = {}
if kwargs:
# we have to merge kwargs dict with `other` before operating on
# either so all validation and writing occur atomically
other.update(kwargs)
for key, val in other.items():
self._set_arg_validate(key, val)
for key, val in other.items():
self._perform_set(key, val)
def __delitem__(self, key: KeyType) -> None:
"""Remove a sample from the column. Convenience method to :meth:`delete`.
.. seealso::
:meth:`pop` to return a value and then delete it in the same operation
Parameters
----------
key : KeyType
Name of the sample to remove from the column.
"""
with ExitStack() as stack:
if not self._is_conman:
stack.enter_context(self)
if key not in self._samples:
raise KeyError(key)
dataKey = flat_data_db_key_from_names(self._column_name, key)
isRecordDeleted = self._txnctx.dataTxn.delete(dataKey)
if isRecordDeleted is False:
raise RuntimeError(
f'Internal error. Not able to delete key {key} from staging '
f'db even though existance passed in memory verification. '
f'Please report this message in full to the hangar development team.',
f'Specified key: <{type(key)} {key}>', f'Calculated dataKey: <{dataKey}>',
f'isRecordDeleted: <{isRecordDeleted}>', f'DEBUG STRING: {self._debug_}')
del self._samples[key]
def pop(self, key: KeyType):
"""Retrieve some value for some key(s) and delete it in the same operation.
Parameters
----------
key : KeysType
Sample key to remove
Returns
-------
value
Upon success, the value of the removed key.
Raises
------
KeyError
If there is no sample with some key in the column.
"""
value = self[key]
del self[key]
return value
def change_backend(self, backend: str, backend_options: Optional[dict] = None):
"""Change the default backend and filters applied to future data writes.
.. warning::
This method is meant for advanced users only. Please refer to the
hangar backend codebase for information on accepted parameters and
options.
Parameters
----------
backend : str
Backend format code to swtich to.
backend_options : Optional[dict]
Backend option specification to use (if specified). If left to
default value of None, then default options for backend are
automatically used.
Raises
------
RuntimeError
If this method was called while this column is invoked in a
context manager
ValueError
If the backend format code is not valid.
"""
if self._is_conman:
raise RuntimeError('Cannot call method inside column context manager.')
self._schema.change_backend(backend, backend_options=backend_options)
new_schema_digest = self._schema.schema_hash_digest()
columnSchemaKey = schema_db_key_from_column(self._column_name, layout=self.column_layout)
columnSchemaVal = schema_record_db_val_from_digest(new_schema_digest)
hashSchemaKey = schema_hash_db_key_from_digest(new_schema_digest)
hashSchemaVal = schema_hash_record_db_val_from_spec(self._schema.schema)
# -------- set vals in lmdb only after schema is sure to exist --------
with self._txnctx.write() as ctx:
ctx.dataTxn.put(columnSchemaKey, columnSchemaVal)
ctx.hashTxn.put(hashSchemaKey, hashSchemaVal, overwrite=False)
new_backend = self._schema.backend
if new_backend not in self._be_fs:
fhands = open_file_handles(
backends=[new_backend],
path=self._path,
mode='a',
schema=self._schema)
self._be_fs[new_backend] = fhands[new_backend]
else:
self._be_fs[new_backend].close()
self._be_fs[new_backend].open(mode='a')
self._be_fs[new_backend].backend_opts = self._schema.backend_options
return
| """Accessor class for columns containing single-level key/value mappings
The FlatSampleReader container is used to store data (in any backend) in a column
containing a single level key/value mapping from names/ids to data.
All backends are supported.
"""
from contextlib import ExitStack
from pathlib import Path
from operator import attrgetter as op_attrgetter
from typing import Tuple, Union, Iterable, Optional, Any
from .common import open_file_handles
from ..records import (
data_record_db_val_from_digest,
data_record_digest_val_from_db_val,
flat_data_db_key_from_names,
hash_data_db_key_from_raw_key,
schema_db_key_from_column,
schema_hash_db_key_from_digest,
schema_hash_record_db_val_from_spec,
schema_record_db_val_from_digest
)
from ..records.parsing import generate_sample_name
from ..backends import backend_decoder
from ..op_state import reader_checkout_only
from ..utils import is_suitable_user_key
from ..optimized_utils import valfilter, valfilterfalse
KeyType = Union[str, int]
class FlatSampleReader:
"""Class implementing get access to data in a column.
This class exposes the standard API to access data stored in a single level
key / value mapping column. Usage is modeled after the python :class:`dict`
style syntax -- with a few additional utility and inspection methods and
properties added. Methods named after those of a python :class:`dict` have
syntactically identical arguments and behavior to that of the standard
library.
If not opened in a ``write-enabled`` checkout, then attempts to add or
delete data or container properties will raise an exception (in the form of
a :class:`PermissionError`). No changes will be propogated unless a
``write-enabled`` checkout is used.
This object can be serialized -- pickled -- for parallel processing /
reading if opened in a ``read-only`` checkout. Parallel operations are both
thread and process safe, though performance may significantly differ
between multithreaded vs multiprocessed code (depending on the backend data
is stored in). Attempts to serialize objects opened in ``write-enabled``
checkouts are not supported and will raise a :class:`PermissionError` if
attempted. This behavior is enforced in order to ensure data and record
integrity while writing to the repository.
"""
__slots__ = ('_mode', '_column_name', '_samples', '_be_fs',
'_path', '_stack', '_enter_count', '_schema')
_attrs = __slots__
def __init__(self,
columnname: str,
samples,
backend_handles,
schema,
repo_path: Path,
mode: str,
*args, **kwargs):
self._stack: Optional[ExitStack] = None
self._mode = mode
self._column_name = columnname
self._samples = samples
self._be_fs = backend_handles
self._path = repo_path
self._schema = schema
self._enter_count = 0
@property
def _debug_(self): # pragma: no cover
return {
'__class__': self.__class__,
'_mode': self._mode,
'_column_name': self._column_name,
'_be_fs': self._be_fs,
'_path': self._path,
'_contains_subsamples': self.contains_subsamples,
'_stack': self._stack._exit_callbacks if self._stack else self._stack,
'_enter_count': self._enter_count,
}
def __repr__(self):
res = (
f'{self.__class__.__qualname__}('
f'repo_pth={self._path}, '
f'aset_name={self._column_name}, '
f"{[f'{key}={val}, ' for key, val in self._schema.schema.items()]}, "
f'mode={self._mode})')
return res
def _repr_pretty_(self, p, cycle):
res = f'Hangar {self.__class__.__qualname__} \
\n Column Name : {self._column_name}\
\n Writeable : {self.iswriteable}\
\n Column Type : {self.column_type}\
\n Column Layout : {self.column_layout}\
\n Schema Type : {self.schema_type}\
\n DType : {self.dtype}\
\n Shape : {self.shape}\
\n Number of Samples : {self.__len__()}\
\n Partial Remote Data Refs : {bool(self.contains_remote_references)}\n'
p.text(res)
def _ipython_key_completions_(self): # pragma: no cover
"""Let ipython know that any key based access can use the column keys
Since we don't want to inherit from dict, nor mess with `__dir__` for
the sanity of developers, this is the best way to ensure users can
autocomplete keys.
Returns
-------
list
list of strings, each being one of the column keys for access.
"""
return list(self.keys())
@reader_checkout_only
def __getstate__(self) -> dict:
"""ensure multiprocess operations can pickle relevant data.
"""
return {slot: getattr(self, slot) for slot in self.__slots__}
def __setstate__(self, state: dict) -> None:
"""ensure multiprocess operations can pickle relevant data.
Technically should be decorated with @reader_checkout_only, but since
at instance creation that is not an attribute, the decorator won't
know. Since only readers can be pickled, This isn't much of an issue.
"""
for slot, value in state.items():
setattr(self, slot, value)
def __enter__(self):
return self
def __exit__(self, *exc):
return
def _destruct(self):
if isinstance(self._stack, ExitStack):
self._stack.close()
self._close()
for attr in self._attrs:
delattr(self, attr)
def __getattr__(self, name):
"""Raise permission error after checkout is closed.
Only runs after a call to :meth:`_destruct`, which is responsible for
deleting all attributes from the object instance.
"""
try:
self.__getattribute__('_mode') # once checkout is closed, this won't exist.
except AttributeError:
err = (f'Unable to operate on past checkout objects which have been '
f'closed. No operation occurred. Please use a new checkout.')
raise PermissionError(err) from None
return self.__getattribute__(name)
@property
def _is_conman(self) -> bool:
return bool(self._enter_count)
def __iter__(self) -> Iterable[KeyType]:
"""Create iterator yielding an column sample keys.
Yields
-------
Iterable[KeyType]
Sample key contained in the column.
"""
yield from self.keys()
def __len__(self) -> int:
"""Check how many samples are present in a given column.
"""
return len(self._samples)
def __contains__(self, key: KeyType) -> bool:
"""Determine if a key is a valid sample name in the column.
"""
return key in self._samples
def _open(self):
for val in self._be_fs.values():
val.open(mode=self._mode)
def _close(self):
for val in self._be_fs.values():
val.close()
def __getitem__(self, key: KeyType):
"""Retrieve data for some sample key via dict style access conventions.
.. seealso:: :meth:`get`
Parameters
----------
key : KeyType
Sample key to retrieve from the column.
Returns
-------
value
Data corresponding to the provided sample key.
Raises
------
KeyError
if no sample with the requested key exists.
"""
spec = self._samples[key]
return self._be_fs[spec.backend].read_data(spec)
def get(self, key: KeyType, default=None):
"""Retrieve the data associated with some sample key
Parameters
----------
key : KeyType
The name of the subsample(s) to retrieve. Passing a single
subsample key will return the stored data value.
default : Any
if a `key` parameter is not found, then return this value instead.
By default, None.
Returns
-------
value
data data stored under subsample key if key exists, else
default value if not found.
"""
try:
return self[key]
except KeyError:
return default
@property
def column(self) -> str:
"""Name of the column.
"""
return self._column_name
@property
def column_type(self):
"""Data container type of the column ('ndarray', 'str', etc).
"""
return self._schema.column_type
@property
def column_layout(self):
"""Column layout type ('nested', 'flat', etc).
"""
return self._schema.column_layout
@property
def schema_type(self):
"""Schema type of the contained data ('variable_shape', 'fixed_shape', etc).
"""
return self._schema.schema_type
@property
def dtype(self):
"""Dtype of the columns data (np.float, str, etc).
"""
return self._schema.dtype
@property
def shape(self):
"""(Max) shape of data that can (is) written in the column.
"""
try:
return self._schema.shape
except AttributeError:
return None
@property
def backend(self) -> str:
"""Code indicating which backing store is used when writing data.
"""
return self._schema.backend
@property
def backend_options(self):
"""Filter / Compression options applied to backend when writing data.
"""
return self._schema.backend_options
@property
def iswriteable(self) -> bool:
"""Bool indicating if this column object is write-enabled.
"""
return False if self._mode == 'r' else True
@property
def contains_subsamples(self) -> bool:
"""Bool indicating if sub-samples are contained in this column container.
"""
return False
@property
def contains_remote_references(self) -> bool:
"""Bool indicating if all samples in column exist on local disk.
The data associated with samples referencing some remote server will
need to be downloaded (``fetched`` in the hangar vocabulary) before
they can be read into memory.
Returns
-------
bool
False if at least one sample in the column references data stored
on some remote server. True if all sample data is available on the
machine's local disk.
"""
_islocal_func = op_attrgetter('islocal')
return not all(map(_islocal_func, self._samples.values()))
@property
def remote_reference_keys(self) -> Tuple[KeyType]:
"""Compute sample names whose data is stored in a remote server reference.
Returns
-------
Tuple[KeyType]
list of sample keys in the column whose data references indicate
they are stored on a remote server.
"""
_islocal_func = op_attrgetter('islocal')
return tuple(valfilterfalse(_islocal_func, self._samples).keys())
def _mode_local_aware_key_looper(self, local: bool) -> Iterable[KeyType]:
"""Generate keys for iteration with dict update safety ensured.
Parameters
----------
local : bool
True if keys should be returned which only exist on the local machine.
Fale if remote sample keys should be excluded.
Returns
-------
Iterable[KeyType]
Sample keys conforming to the `local` argument spec.
"""
_islocal_func = op_attrgetter('islocal')
if local:
if self._mode == 'r':
yield from valfilter(_islocal_func, self._samples).keys()
else:
yield from tuple(valfilter(_islocal_func, self._samples).keys())
else:
if self._mode == 'r':
yield from self._samples.keys()
else:
yield from tuple(self._samples.keys())
def keys(self, local: bool = False) -> Iterable[KeyType]:
"""Generator yielding the name (key) of every subsample.
Parameters
----------
local : bool, optional
If True, returned keys will only correspond to data which is
available for reading on the local disk, by default False.
Yields
------
Iterable[KeyType]
Keys of one subsample at a time inside the sample.
"""
yield from self._mode_local_aware_key_looper(local)
def values(self, local: bool = False) -> Iterable[Any]:
"""Generator yielding the data for every subsample.
Parameters
----------
local : bool, optional
If True, returned values will only correspond to data which is
available for reading on the local disk. No attempt will be made to
read data existing on a remote server, by default False.
Yields
------
Iterable[Any]
Values of one subsample at a time inside the sample.
"""
for key in self._mode_local_aware_key_looper(local):
yield self[key]
def items(self, local: bool = False) -> Iterable[Tuple[KeyType, Any]]:
"""Generator yielding (name, data) tuple for every subsample.
Parameters
----------
local : bool, optional
If True, returned keys/values will only correspond to data which is
available for reading on the local disk, No attempt will be made to
read data existing on a remote server, by default False.
Yields
------
Iterable[Tuple[KeyType, Any]]
Name and stored value for every subsample inside the sample.
"""
for key in self._mode_local_aware_key_looper(local):
yield (key, self[key])
# ---------------- writer methods only after this point -------------------
class FlatSampleWriter(FlatSampleReader):
__slots__ = ('_txnctx',)
_attrs = __slots__ + FlatSampleReader.__slots__
def __init__(self, aset_ctx, *args, **kwargs):
super().__init__(*args, **kwargs)
self._txnctx = aset_ctx
def __enter__(self):
with ExitStack() as stack:
self._txnctx.open_write()
stack.callback(self._txnctx.close_write)
if self._enter_count == 0:
for k in self._be_fs.keys():
stack.enter_context(self._be_fs[k])
self._enter_count += 1
self._stack = stack.pop_all()
return self
def __exit__(self, *exc):
self._stack.close()
self._enter_count -= 1
def _set_arg_validate(self, key, value):
"""Verify if key / value pair is valid to be written in this column
Parameters
----------
key
name to associate with this data piece
value
piece of data to store in the column
Raises
------
ValueError
If key is not valid type/contents or if value is not correct object
type / if it does not conform to column schema
"""
if not is_suitable_user_key(key):
raise ValueError(f'Sample name `{key}` is not suitable.')
isCompat = self._schema.verify_data_compatible(value)
if not isCompat.compatible:
raise ValueError(isCompat.reason)
def _perform_set(self, key, value):
"""Internal write method. Assumes all arguments validated and context is open
Parameters
----------
key
sample key to store
value
data to store
"""
full_hash = self._schema.data_hash_digest(value)
hashKey = hash_data_db_key_from_raw_key(full_hash)
# check if data record already exists with given key
dataRecKey = flat_data_db_key_from_names(self._column_name, key)
existingDataRecVal = self._txnctx.dataTxn.get(dataRecKey, default=False)
if existingDataRecVal:
# check if data record already with same key & hash value
existingDataRec = data_record_digest_val_from_db_val(existingDataRecVal)
if full_hash == existingDataRec.digest:
return
# write new data if data hash does not exist
existingHashVal = self._txnctx.hashTxn.get(hashKey, default=False)
if existingHashVal is False:
hashVal = self._be_fs[self._schema.backend].write_data(value)
self._txnctx.hashTxn.put(hashKey, hashVal)
self._txnctx.stageHashTxn.put(hashKey, hashVal)
hash_spec = backend_decoder(hashVal)
else:
hash_spec = backend_decoder(existingHashVal)
if hash_spec.backend not in self._be_fs:
# when adding data which is already stored in the repository, the
# backing store for the existing data location spec may not be the
# same as the backend which the data piece would have been saved in here.
#
# As only the backends actually referenced by a columns samples are
# initialized (accessible by the column), there is no guarantee that
# an accessor exists for such a sample. In order to prevent internal
# errors from occurring due to an uninitialized backend if a previously
# existing data piece is "saved" here and subsequently read back from
# the same writer checkout, we perform an existence check and backend
# initialization, if appropriate.
fh = open_file_handles(backends=(hash_spec.backend,),
path=self._path,
mode='a',
schema=self._schema)
self._be_fs[hash_spec.backend] = fh[hash_spec.backend]
# add the record to the db
dataRecVal = data_record_db_val_from_digest(full_hash)
self._txnctx.dataTxn.put(dataRecKey, dataRecVal)
self._samples[key] = hash_spec
def __setitem__(self, key, value):
"""Store a piece of data in a column.
.. seealso::
:meth:`update` for an implementation analogous to python's built in
:meth:`dict.update` method which accepts a dict or iterable of
key/value pairs to add in the same operation.
Parameters
----------
key
name to assign to the sample (assuming the column accepts named
samples), If str, can only contain alpha-numeric ascii characters
(in addition to '-', '.', '_'). Integer key must be >= 0. by
default, None
value
data to store as a sample in the column.
"""
with ExitStack() as stack:
if not self._is_conman:
stack.enter_context(self)
self._set_arg_validate(key, value)
self._perform_set(key, value)
def append(self, value) -> KeyType:
"""Store some data in a sample with an automatically generated key.
This method should only be used if the context some piece of data is
used in is independent from its value (i.e. when reading data back,
there is no useful information which needs to be conveyed between the
data source's name/id and the value of that piece of information.)
Think carefully before going this route, as this posit does not apply
to many common use cases.
To store the data with a user defined key, use :meth:`update` or
:meth:`__setitem__`
Parameters
----------
value
Piece of data to store in the column.
Returns
-------
KeyType
Name of the generated key this data is stored with.
"""
with ExitStack() as stack:
if not self._is_conman:
stack.enter_context(self)
key = generate_sample_name()
while key in self._samples:
key = generate_sample_name()
self._set_arg_validate(key, value)
self._perform_set(key, value)
return key
def update(self, other=None, **kwargs):
"""Store some data with the key/value pairs from other, overwriting existing keys.
:meth:`update` implements functionality similar to python's builtin
:meth:`dict.update` method, accepting either a dictionary or other
iterable (of length two) listing key / value pairs.
Parameters
----------
other
Accepts either another dictionary object or an iterable of
key/value pairs (as tuples or other iterables of length two).
mapping sample names to data value instances instances, If sample
name is string type, can only contain alpha-numeric ascii
characters (in addition to '-', '.', '_'). Int key must be >= 0. By
default, None.
**kwargs
keyword arguments provided will be saved with keywords as sample keys
(string type only) and values as np.array instances.
"""
with ExitStack() as stack:
if not self._is_conman:
stack.enter_context(self)
if other:
if not isinstance(other, dict):
other = dict(other)
else:
other = other.copy()
elif other is None:
other = {}
if kwargs:
# we have to merge kwargs dict with `other` before operating on
# either so all validation and writing occur atomically
other.update(kwargs)
for key, val in other.items():
self._set_arg_validate(key, val)
for key, val in other.items():
self._perform_set(key, val)
def __delitem__(self, key: KeyType) -> None:
"""Remove a sample from the column. Convenience method to :meth:`delete`.
.. seealso::
:meth:`pop` to return a value and then delete it in the same operation
Parameters
----------
key : KeyType
Name of the sample to remove from the column.
"""
with ExitStack() as stack:
if not self._is_conman:
stack.enter_context(self)
if key not in self._samples:
raise KeyError(key)
dataKey = flat_data_db_key_from_names(self._column_name, key)
isRecordDeleted = self._txnctx.dataTxn.delete(dataKey)
if isRecordDeleted is False:
raise RuntimeError(
f'Internal error. Not able to delete key {key} from staging '
f'db even though existance passed in memory verification. '
f'Please report this message in full to the hangar development team.',
f'Specified key: <{type(key)} {key}>', f'Calculated dataKey: <{dataKey}>',
f'isRecordDeleted: <{isRecordDeleted}>', f'DEBUG STRING: {self._debug_}')
del self._samples[key]
def pop(self, key: KeyType):
"""Retrieve some value for some key(s) and delete it in the same operation.
Parameters
----------
key : KeysType
Sample key to remove
Returns
-------
value
Upon success, the value of the removed key.
Raises
------
KeyError
If there is no sample with some key in the column.
"""
value = self[key]
del self[key]
return value
def change_backend(self, backend: str, backend_options: Optional[dict] = None):
"""Change the default backend and filters applied to future data writes.
.. warning::
This method is meant for advanced users only. Please refer to the
hangar backend codebase for information on accepted parameters and
options.
Parameters
----------
backend : str
Backend format code to swtich to.
backend_options : Optional[dict]
Backend option specification to use (if specified). If left to
default value of None, then default options for backend are
automatically used.
Raises
------
RuntimeError
If this method was called while this column is invoked in a
context manager
ValueError
If the backend format code is not valid.
"""
if self._is_conman:
raise RuntimeError('Cannot call method inside column context manager.')
self._schema.change_backend(backend, backend_options=backend_options)
new_schema_digest = self._schema.schema_hash_digest()
columnSchemaKey = schema_db_key_from_column(self._column_name, layout=self.column_layout)
columnSchemaVal = schema_record_db_val_from_digest(new_schema_digest)
hashSchemaKey = schema_hash_db_key_from_digest(new_schema_digest)
hashSchemaVal = schema_hash_record_db_val_from_spec(self._schema.schema)
# -------- set vals in lmdb only after schema is sure to exist --------
with self._txnctx.write() as ctx:
ctx.dataTxn.put(columnSchemaKey, columnSchemaVal)
ctx.hashTxn.put(hashSchemaKey, hashSchemaVal, overwrite=False)
new_backend = self._schema.backend
if new_backend not in self._be_fs:
fhands = open_file_handles(
backends=[new_backend],
path=self._path,
mode='a',
schema=self._schema)
self._be_fs[new_backend] = fhands[new_backend]
else:
self._be_fs[new_backend].close()
self._be_fs[new_backend].open(mode='a')
self._be_fs[new_backend].backend_opts = self._schema.backend_options
return
|
"""
It compares human and machine attention weights.
"""
import json
import logging
import os
import re
from tqdm import tqdm
import abc
from abc import ABCMeta
import pandas as pd
import numpy as np
import random
from copy import deepcopy
import matplotlib.pyplot as plt
import seaborn as sns
import matplotlib.image as mpimg
import os
import json
import pandas as pd
import numpy as np
from PIL import Image, ImageDraw, ImageFont
import matplotlib.patches as patches
from matplotlib.pyplot import imshow
from copy import copy
import numpy as np
import yaml
import pymongo
from pprint import pprint
from datetime import datetime
import argparse
logging.basicConfig() # required
logger = logging.getLogger('attention-comparer')
logger.setLevel(logging.INFO)
class VisualToken(object):
def __init__(self, index_id, text, x, y, width, height, clicked):
self.text = text
if (index_id == ""):
index_id = -1
self.index_id = int(index_id)
self.x = int(x)
self.y = int(y)
self.width = int(width)
self.height = int(height)
self.attention = 0
self.clicked = clicked
def draw_PIL(self, drw, global_attention=0,
guessed_right=False,
human=True,
almost_correct=False):
"""Draw the patch on the plot."""
alpha = 0.1
if global_attention != 0:
alpha = int((float(self.attention) / global_attention) * 255)
if self.attention == 0:
alpha = 0
if human:
# human
if (almost_correct):
color = (255, 127, 80, alpha) # orange)
else:
if (guessed_right):
color = (26, 255, 26, alpha) # green
else:
color = (255, 102, 102, alpha) # red
else:
# Machine
color = (0, 191, 255, alpha) # blue
border = None
if self.clicked:
border = 'red'
rect = \
drw.rectangle([
self.x,
self.y,
self.x + self.width,
self.y + self.height],
outline=border,
width=2,
fill=color)
def add_attention(self, attention):
self.attention = attention
def __repr__(self):
return 'x:' + str(self.x).zfill(3) \
+ ' - y:' + str(self.y).zfill(3) \
+ ' - width:' + str(self.width).zfill(4) \
+ ' - height:' + str(self.height).zfill(4) \
+ ' - |' + self.text + '|'
class Visualizer(object):
def __init__(self, df_human):
self.df_human = deepcopy(df_human)
self.df_human['is_warmup'] = \
(self.df_human['id'].astype('int') < 3).astype('int')
self.df_human = self.df_human[self.df_human['is_warmup'] == 0]
self.df_human.sort_values(by='time', ascending=True, inplace=True)
self.df_human.drop_duplicates(
subset=['randomcode', 'uuid'], keep='first', inplace=True
)
def plot_token_heatmap(self,
survey_code_col,
correct_col, almost_correct_col,
user_selection__col,
formatted_col, attention_col,
tokens_col, clicked_col,
id_col,
sortby,
only_users=None,
limit=None):
"""Plot Human and Machine heatmaps on token side by side."""
df = deepcopy(self.df_human)
df.sort_values(by=sortby, inplace=True)
if only_users is not None:
df = df[df[survey_code_col].isin(only_users)]
counter = 0
for row in df.iterrows():
counter += 1
if limit is not None and counter > limit:
break
idx = row[0]
record = row[1]
correct_answered = \
record[correct_col] == record[user_selection__col]
almost_correct = \
record[user_selection__col].lower() in [
x.lower().replace('_', '')
for x in record[almost_correct_col]]
idx = record[id_col]
user_code = record[survey_code_col]
print('*' * 50)
print(f"Ground Truth: {record[correct_col]} - Provenance: {record["nickname"]} - {user_code}")
print(f'Similar options: {record[almost_correct_col]}')
fig, ax = self.process_single(
tokens=record[tokens_col],
human=True,
attention=record[attention_col],
formattedcode=record[formatted_col],
correct_answered=correct_answered,
almost_correct=almost_correct,
final_clicked_tokens=record[clicked_col])
ax.set_title(
f'Ground Truth: {record[correct_col]} '
+ f'- User Selection: {record[user_selection__col]}')
plt.show()
def process_single(self, tokens, attention, human,
formattedcode,
correct_answered, almost_correct,
final_clicked_tokens=None):
"""Display attention of the given function."""
# PREPARE IMAGE
path_font_file = '../public/FreeMono.ttf'
surce_code_content = formattedcode
#img_name = folder + data['id'] + data['rawdictionarykey'][1:] + '.png'
ratio = (8.4/14)
char_height = 20
char_width = char_height * ratio
# compute max width required
lines = surce_code_content.splitlines()
lines_len = [len(line) for line in lines]
max_width = int(max(lines_len) * char_width)
max_height = int(char_height * len(lines))
img = Image.new('RGB', (max_width, max_height), color=(255, 255, 255))
fnt = ImageFont.truetype(path_font_file, char_height)
drw = ImageDraw.Draw(img, 'RGBA')
drw.text((0, 0), surce_code_content, font=fnt, fill=(0, 0, 0))
# CAN BE DELAYED AT AFTER TOKEN DRAWING img.save(img_name)
# check clicked tokens to draw squares around them
if final_clicked_tokens is not None:
clicked_tokens = np.array(final_clicked_tokens)
clicked_tokens_indices = np.where(clicked_tokens == 1)[0].tolist()
else:
clicked_tokens_indices = []
# INSTANTIATE TOKENS
# get the positon form the metadata of tokens
viz_tokens = []
# DEBUG print(tokens)
# DEBUG print(formattedcode)
for i, t in enumerate(tokens):
# print(t)
new_token = \
VisualToken(
index_id=t['index_id'],
text=t['text'],
x=char_width * int(t['start_char']),
y=char_height * int(t['line']),
width=char_width * len(t['text']),
height=char_height,
clicked=(i in clicked_tokens_indices))
viz_tokens.append(new_token)
# COMPUTE ATTENTION
global_attention = 1
# compute attention
for att, viz_token in zip(attention, viz_tokens):
viz_token.add_attention(att)
# COMPUTE REFERENCE ATTENTION TO RESCALE
# sum all the attention received by the tokens
global_attention = 0
attentions = []
for viz_token in viz_tokens:
attentions.append(viz_token.attention)
global_attention = max(attentions) * 1.33
# check user was right to decide the color of the tokens (red vs green)
# correct_answered decides the color
for viz_token in viz_tokens:
# print(token)
viz_token.draw_PIL(drw, global_attention, correct_answered, human, almost_correct)
#img.save(img_name)
#return img_name
imshow(np.asarray(img))
fig = plt.gcf()
fig.set_size_inches(18.5, 10.5)
if human:
plt.title('Human')
else:
plt.title('Machine')
ax = plt.gca()
return fig, ax
def plot_statistics(df, column_name, ax=None, color='blue'):
df = deepcopy(df)
mean = df[column_name].mean()
median = df[column_name].median()
if ax is None:
f, (ax_box, ax_hist) = plt.subplots(2, sharex=True, gridspec_kw= {"height_ratios": (0.2, 1)})
sns.boxplot(x=df[column_name], ax=ax_box, color=color)
ax_box.axvline(mean, color='r', linestyle='--')
ax_box.axvline(median, color='g', linestyle='-')
ax_box.set(xlabel='')
ax_box.yaxis.label.set_size(14)
ax_box.xaxis.label.set_size(14)
else:
ax_hist = ax
sns.histplot(x=df[column_name], ax=ax_hist, color=color)
ax_hist.axvline(mean, color='r', linestyle='--')
ax_hist.axvline(median, color='g', linestyle='-')
ax_hist.legend({f'Mean {mean:.2f}':mean, f'Median {median:.2f}':median})
ax_hist.yaxis.label.set_size(14)
ax_hist.xaxis.label.set_size(14)
if ax is None:
plt.show()
def inspect(df, column_name, comparer,
machine_col='att_vector_avg',
human_col='att_vector_w_click',
n_records_per_side=5,
center=False,
center_position=0.5,
columns_to_observe=None):
df = df.sort_values(by=column_name, ascending=True)
df = df.drop_duplicates(subset='uuid')
if center:
center_position = int(len(df) * center_position)
uuid_center = \
df.iloc[center_position - n_records_per_side:center_position + n_records_per_side]['uuid']
randomcode_center = \
df.iloc[center_position - n_records_per_side:center_position + n_records_per_side]['randomcode']
for uuid, randomcode in zip(uuid_center, randomcode_center):
comparer.plot_token_heatmap_side_by_side(
machine_col=machine_col,
human_col=human_col,
only_uuids=[uuid],
only_users=[randomcode],
columns_to_observe=columns_to_observe
)
else:
# head
print(f'Low value of {column_name}')
uuid_head = df.head(n_records_per_side)['uuid']
randomcode_head = df.head(n_records_per_side)['randomcode']
for uuid, randomcode in zip(uuid_head, randomcode_head):
comparer.plot_token_heatmap_side_by_side(
machine_col=machine_col,
human_col=human_col,
only_uuids=[uuid],
only_users=[randomcode],
columns_to_observe=columns_to_observe
)
#print(uuid_head)
# tail
print(f'High value of {column_name}')
uuid_tail = df.tail(n_records_per_side)['uuid']
randomcode_tail = df.tail(n_records_per_side)['randomcode']
#print(uuid_tail)
for uuid, randomcode in zip(uuid_tail, randomcode_tail):
comparer.plot_token_heatmap_side_by_side(
machine_col=machine_col,
human_col=human_col,
only_uuids=[uuid],
only_users=[randomcode],
columns_to_observe=columns_to_observe
)
# ----------------------------------------------------------------
from matplotlib import colors
class FlexibleVisualToken(object):
def __init__(self, index_id, text, x, y, width, height, clicked):
self.text = text
if (index_id == ""):
index_id = -1
self.index_id = int(index_id)
self.x = int(x)
self.y = int(y)
self.width = int(width)
self.height = int(height)
self.attention = 0
self.clicked = clicked
def draw_PIL(self, drw, global_attention=0,
named_color='lime'):
"""Draw the patch on the plot."""
alpha = 0.1
if global_attention != 0:
alpha = int((float(self.attention) / global_attention) * 255)
if self.attention == 0:
alpha = 0
color_rgb = list(colors.to_rgb(named_color))
color_rgb = [int(c * 255) for c in color_rgb]
color_rgba = color_rgb + [alpha]
color_rgba = tuple(color_rgba)
border = None
if self.clicked:
border = 'red'
rect = \
drw.rectangle([
self.x,
self.y,
self.x + self.width,
self.y + self.height],
outline=border,
width=2,
fill=color_rgba)
def add_attention(self, attention):
self.attention = attention
def __repr__(self):
return 'x:' + str(self.x).zfill(3) \
+ ' - y:' + str(self.y).zfill(3) \
+ ' - width:' + str(self.width).zfill(4) \
+ ' - height:' + str(self.height).zfill(4) \
+ ' - |' + self.text + '|'
def plot_maps(df,
weight_cols=[],
label_cols=None,
colors_for_cols=None,
predictor_entity_name='Entity Predictor',
prediction_col=None,
max_records=None,
output_in_folder=None,
add_participant_id=True,
add_square_for_clicks=False,
limit_visualization_to=3):
"""Print the attention weights on the method body."""
assert len(weight_cols) > 0
assert len(df) > 0
counter_visualized_maps = 0
for i, row in enumerate(df.iterrows()):
#print(i)
if max_records is not None and i > max_records:
break
content = row[1]
for j, attention_type in enumerate(weight_cols):
named_color = colors_for_cols[j] \
if colors_for_cols is not None else 'red'
final_clicked_tokens = content['finalclickedtokens'] \
if add_square_for_clicks else []
fig, ax = plot_single_map(
tokens_in_code=content['tokens_in_code'],
attention_weights=content[attention_type],
formattedcode=content['formattedcode'],
final_clicked_tokens=final_clicked_tokens,
named_color=named_color
)
if output_in_folder is not None:
attention_name = label_cols[j] \
if label_cols is not None else attention_type
filename = f'{i}-{predictor_entity_name}-{attention_name}-mtd:{content['uuid']}'
if add_participant_id:
filename += f'-ptc:{content['randomcode']}'
filename = "".join([c for c in filename if c != ' ']) + '.png'
filepath = os.path.join(output_in_folder, filename)
print(filepath)
prediction = content[prediction_col] \
if prediction_col is not None else 'undefined'
if isinstance(prediction, list):
prediction = [p for p in prediction if p != '%END%']
prediction = [p for p in prediction if p != '%UNK%']
title = f'{predictor_entity_name}: {prediction} - Original: {content['function_name']}'
title += f' (what you see: {attention_name} weights)'
plt.title(title)
fig.savefig(filepath, format='png')
if counter_visualized_maps < limit_visualization_to:
plt.show()
counter_visualized_maps += 1
def plot_single_map(tokens_in_code,
attention_weights,
named_color,
formattedcode,
final_clicked_tokens=None):
"""Display attention of the given function."""
# PREPARE IMAGE
path_font_file = '../public/FreeMono.ttf'
surce_code_content = formattedcode
#img_name = folder + data['id'] + data['rawdictionarykey'][1:] + '.png'
ratio = (8.4/14)
char_height = 20
char_width = char_height * ratio
# compute max width required
lines = surce_code_content.splitlines()
lines_len = [len(line) for line in lines]
max_width = int(max(lines_len) * char_width)
max_height = int(char_height * len(lines))
img = Image.new('RGB', (max_width, max_height), color=(255, 255, 255))
fnt = ImageFont.truetype(path_font_file, char_height)
drw = ImageDraw.Draw(img, 'RGBA')
drw.text((0, 0), surce_code_content, font=fnt, fill=(0, 0, 0))
# CAN BE DELAYED AT AFTER TOKEN DRAWING img.save(img_name)
# check clicked tokens to draw squares around them
if final_clicked_tokens is not None:
clicked_tokens = np.array(final_clicked_tokens)
clicked_tokens_indices = np.where(clicked_tokens == 1)[0].tolist()
else:
clicked_tokens_indices = []
# INSTANTIATE TOKENS
# get the positon form the metadata of tokens
viz_tokens = []
# DEBUG print(tokens)
# DEBUG print(formattedcode)
for i, t in enumerate(tokens_in_code):
# print(t)
new_token = \
FlexibleVisualToken(
index_id=t['index_id'],
text=t['text'],
x=char_width * int(t['start_char']),
y=char_height * int(t['line']),
width=char_width * len(t['text']),
height=char_height,
clicked=(i in clicked_tokens_indices))
viz_tokens.append(new_token)
# COMPUTE ATTENTION
global_attention = 1
# compute attention
for att, viz_token in zip(attention_weights, viz_tokens):
viz_token.add_attention(att)
# COMPUTE REFERENCE ATTENTION TO RESCALE
# sum all the attention received by the tokens
global_attention = 0
attentions = []
for viz_token in viz_tokens:
attentions.append(viz_token.attention)
global_attention = max(attentions) * 1.33
# check user was right to decide the color of the tokens (red vs green)
# correct_answered decides the color
for viz_token in viz_tokens:
# print(token)
viz_token.draw_PIL(drw, global_attention, named_color)
#img.save(img_name)
#return img_name
imshow(np.asarray(img))
fig = plt.gcf()
#print(f'max_width: {max_width}')
#print(f'max_width: {max_height}')
FACTOR = 60
fig.set_size_inches(max_width / FACTOR, max_height / FACTOR)
plt.title('undefined')
ax = plt.gca()
return fig, ax
| """
It compares human and machine attention weights.
"""
import json
import logging
import os
import re
from tqdm import tqdm
import abc
from abc import ABCMeta
import pandas as pd
import numpy as np
import random
from copy import deepcopy
import matplotlib.pyplot as plt
import seaborn as sns
import matplotlib.image as mpimg
import os
import json
import pandas as pd
import numpy as np
from PIL import Image, ImageDraw, ImageFont
import matplotlib.patches as patches
from matplotlib.pyplot import imshow
from copy import copy
import numpy as np
import yaml
import pymongo
from pprint import pprint
from datetime import datetime
import argparse
logging.basicConfig() # required
logger = logging.getLogger('attention-comparer')
logger.setLevel(logging.INFO)
class VisualToken(object):
def __init__(self, index_id, text, x, y, width, height, clicked):
self.text = text
if (index_id == ""):
index_id = -1
self.index_id = int(index_id)
self.x = int(x)
self.y = int(y)
self.width = int(width)
self.height = int(height)
self.attention = 0
self.clicked = clicked
def draw_PIL(self, drw, global_attention=0,
guessed_right=False,
human=True,
almost_correct=False):
"""Draw the patch on the plot."""
alpha = 0.1
if global_attention != 0:
alpha = int((float(self.attention) / global_attention) * 255)
if self.attention == 0:
alpha = 0
if human:
# human
if (almost_correct):
color = (255, 127, 80, alpha) # orange)
else:
if (guessed_right):
color = (26, 255, 26, alpha) # green
else:
color = (255, 102, 102, alpha) # red
else:
# Machine
color = (0, 191, 255, alpha) # blue
border = None
if self.clicked:
border = 'red'
rect = \
drw.rectangle([
self.x,
self.y,
self.x + self.width,
self.y + self.height],
outline=border,
width=2,
fill=color)
def add_attention(self, attention):
self.attention = attention
def __repr__(self):
return 'x:' + str(self.x).zfill(3) \
+ ' - y:' + str(self.y).zfill(3) \
+ ' - width:' + str(self.width).zfill(4) \
+ ' - height:' + str(self.height).zfill(4) \
+ ' - |' + self.text + '|'
class Visualizer(object):
def __init__(self, df_human):
self.df_human = deepcopy(df_human)
self.df_human['is_warmup'] = \
(self.df_human['id'].astype('int') < 3).astype('int')
self.df_human = self.df_human[self.df_human['is_warmup'] == 0]
self.df_human.sort_values(by='time', ascending=True, inplace=True)
self.df_human.drop_duplicates(
subset=['randomcode', 'uuid'], keep='first', inplace=True
)
def plot_token_heatmap(self,
survey_code_col,
correct_col, almost_correct_col,
user_selection__col,
formatted_col, attention_col,
tokens_col, clicked_col,
id_col,
sortby,
only_users=None,
limit=None):
"""Plot Human and Machine heatmaps on token side by side."""
df = deepcopy(self.df_human)
df.sort_values(by=sortby, inplace=True)
if only_users is not None:
df = df[df[survey_code_col].isin(only_users)]
counter = 0
for row in df.iterrows():
counter += 1
if limit is not None and counter > limit:
break
idx = row[0]
record = row[1]
correct_answered = \
record[correct_col] == record[user_selection__col]
almost_correct = \
record[user_selection__col].lower() in [
x.lower().replace('_', '')
for x in record[almost_correct_col]]
idx = record[id_col]
user_code = record[survey_code_col]
print('*' * 50)
print(f"Ground Truth: {record[correct_col]} - Provenance: {record['nickname']} - {user_code}")
print(f'Similar options: {record[almost_correct_col]}')
fig, ax = self.process_single(
tokens=record[tokens_col],
human=True,
attention=record[attention_col],
formattedcode=record[formatted_col],
correct_answered=correct_answered,
almost_correct=almost_correct,
final_clicked_tokens=record[clicked_col])
ax.set_title(
f'Ground Truth: {record[correct_col]} '
+ f'- User Selection: {record[user_selection__col]}')
plt.show()
def process_single(self, tokens, attention, human,
formattedcode,
correct_answered, almost_correct,
final_clicked_tokens=None):
"""Display attention of the given function."""
# PREPARE IMAGE
path_font_file = '../public/FreeMono.ttf'
surce_code_content = formattedcode
#img_name = folder + data['id'] + data['rawdictionarykey'][1:] + '.png'
ratio = (8.4/14)
char_height = 20
char_width = char_height * ratio
# compute max width required
lines = surce_code_content.splitlines()
lines_len = [len(line) for line in lines]
max_width = int(max(lines_len) * char_width)
max_height = int(char_height * len(lines))
img = Image.new('RGB', (max_width, max_height), color=(255, 255, 255))
fnt = ImageFont.truetype(path_font_file, char_height)
drw = ImageDraw.Draw(img, 'RGBA')
drw.text((0, 0), surce_code_content, font=fnt, fill=(0, 0, 0))
# CAN BE DELAYED AT AFTER TOKEN DRAWING img.save(img_name)
# check clicked tokens to draw squares around them
if final_clicked_tokens is not None:
clicked_tokens = np.array(final_clicked_tokens)
clicked_tokens_indices = np.where(clicked_tokens == 1)[0].tolist()
else:
clicked_tokens_indices = []
# INSTANTIATE TOKENS
# get the positon form the metadata of tokens
viz_tokens = []
# DEBUG print(tokens)
# DEBUG print(formattedcode)
for i, t in enumerate(tokens):
# print(t)
new_token = \
VisualToken(
index_id=t['index_id'],
text=t['text'],
x=char_width * int(t['start_char']),
y=char_height * int(t['line']),
width=char_width * len(t['text']),
height=char_height,
clicked=(i in clicked_tokens_indices))
viz_tokens.append(new_token)
# COMPUTE ATTENTION
global_attention = 1
# compute attention
for att, viz_token in zip(attention, viz_tokens):
viz_token.add_attention(att)
# COMPUTE REFERENCE ATTENTION TO RESCALE
# sum all the attention received by the tokens
global_attention = 0
attentions = []
for viz_token in viz_tokens:
attentions.append(viz_token.attention)
global_attention = max(attentions) * 1.33
# check user was right to decide the color of the tokens (red vs green)
# correct_answered decides the color
for viz_token in viz_tokens:
# print(token)
viz_token.draw_PIL(drw, global_attention, correct_answered, human, almost_correct)
#img.save(img_name)
#return img_name
imshow(np.asarray(img))
fig = plt.gcf()
fig.set_size_inches(18.5, 10.5)
if human:
plt.title('Human')
else:
plt.title('Machine')
ax = plt.gca()
return fig, ax
def plot_statistics(df, column_name, ax=None, color='blue'):
df = deepcopy(df)
mean = df[column_name].mean()
median = df[column_name].median()
if ax is None:
f, (ax_box, ax_hist) = plt.subplots(2, sharex=True, gridspec_kw= {"height_ratios": (0.2, 1)})
sns.boxplot(x=df[column_name], ax=ax_box, color=color)
ax_box.axvline(mean, color='r', linestyle='--')
ax_box.axvline(median, color='g', linestyle='-')
ax_box.set(xlabel='')
ax_box.yaxis.label.set_size(14)
ax_box.xaxis.label.set_size(14)
else:
ax_hist = ax
sns.histplot(x=df[column_name], ax=ax_hist, color=color)
ax_hist.axvline(mean, color='r', linestyle='--')
ax_hist.axvline(median, color='g', linestyle='-')
ax_hist.legend({f'Mean {mean:.2f}':mean, f'Median {median:.2f}':median})
ax_hist.yaxis.label.set_size(14)
ax_hist.xaxis.label.set_size(14)
if ax is None:
plt.show()
def inspect(df, column_name, comparer,
machine_col='att_vector_avg',
human_col='att_vector_w_click',
n_records_per_side=5,
center=False,
center_position=0.5,
columns_to_observe=None):
df = df.sort_values(by=column_name, ascending=True)
df = df.drop_duplicates(subset='uuid')
if center:
center_position = int(len(df) * center_position)
uuid_center = \
df.iloc[center_position - n_records_per_side:center_position + n_records_per_side]['uuid']
randomcode_center = \
df.iloc[center_position - n_records_per_side:center_position + n_records_per_side]['randomcode']
for uuid, randomcode in zip(uuid_center, randomcode_center):
comparer.plot_token_heatmap_side_by_side(
machine_col=machine_col,
human_col=human_col,
only_uuids=[uuid],
only_users=[randomcode],
columns_to_observe=columns_to_observe
)
else:
# head
print(f'Low value of {column_name}')
uuid_head = df.head(n_records_per_side)['uuid']
randomcode_head = df.head(n_records_per_side)['randomcode']
for uuid, randomcode in zip(uuid_head, randomcode_head):
comparer.plot_token_heatmap_side_by_side(
machine_col=machine_col,
human_col=human_col,
only_uuids=[uuid],
only_users=[randomcode],
columns_to_observe=columns_to_observe
)
#print(uuid_head)
# tail
print(f'High value of {column_name}')
uuid_tail = df.tail(n_records_per_side)['uuid']
randomcode_tail = df.tail(n_records_per_side)['randomcode']
#print(uuid_tail)
for uuid, randomcode in zip(uuid_tail, randomcode_tail):
comparer.plot_token_heatmap_side_by_side(
machine_col=machine_col,
human_col=human_col,
only_uuids=[uuid],
only_users=[randomcode],
columns_to_observe=columns_to_observe
)
# ----------------------------------------------------------------
from matplotlib import colors
class FlexibleVisualToken(object):
def __init__(self, index_id, text, x, y, width, height, clicked):
self.text = text
if (index_id == ""):
index_id = -1
self.index_id = int(index_id)
self.x = int(x)
self.y = int(y)
self.width = int(width)
self.height = int(height)
self.attention = 0
self.clicked = clicked
def draw_PIL(self, drw, global_attention=0,
named_color='lime'):
"""Draw the patch on the plot."""
alpha = 0.1
if global_attention != 0:
alpha = int((float(self.attention) / global_attention) * 255)
if self.attention == 0:
alpha = 0
color_rgb = list(colors.to_rgb(named_color))
color_rgb = [int(c * 255) for c in color_rgb]
color_rgba = color_rgb + [alpha]
color_rgba = tuple(color_rgba)
border = None
if self.clicked:
border = 'red'
rect = \
drw.rectangle([
self.x,
self.y,
self.x + self.width,
self.y + self.height],
outline=border,
width=2,
fill=color_rgba)
def add_attention(self, attention):
self.attention = attention
def __repr__(self):
return 'x:' + str(self.x).zfill(3) \
+ ' - y:' + str(self.y).zfill(3) \
+ ' - width:' + str(self.width).zfill(4) \
+ ' - height:' + str(self.height).zfill(4) \
+ ' - |' + self.text + '|'
def plot_maps(df,
weight_cols=[],
label_cols=None,
colors_for_cols=None,
predictor_entity_name='Entity Predictor',
prediction_col=None,
max_records=None,
output_in_folder=None,
add_participant_id=True,
add_square_for_clicks=False,
limit_visualization_to=3):
"""Print the attention weights on the method body."""
assert len(weight_cols) > 0
assert len(df) > 0
counter_visualized_maps = 0
for i, row in enumerate(df.iterrows()):
#print(i)
if max_records is not None and i > max_records:
break
content = row[1]
for j, attention_type in enumerate(weight_cols):
named_color = colors_for_cols[j] \
if colors_for_cols is not None else 'red'
final_clicked_tokens = content['finalclickedtokens'] \
if add_square_for_clicks else []
fig, ax = plot_single_map(
tokens_in_code=content['tokens_in_code'],
attention_weights=content[attention_type],
formattedcode=content['formattedcode'],
final_clicked_tokens=final_clicked_tokens,
named_color=named_color
)
if output_in_folder is not None:
attention_name = label_cols[j] \
if label_cols is not None else attention_type
filename = f'{i}-{predictor_entity_name}-{attention_name}-mtd:{content["uuid"]}'
if add_participant_id:
filename += f'-ptc:{content["randomcode"]}'
filename = "".join([c for c in filename if c != ' ']) + '.png'
filepath = os.path.join(output_in_folder, filename)
print(filepath)
prediction = content[prediction_col] \
if prediction_col is not None else 'undefined'
if isinstance(prediction, list):
prediction = [p for p in prediction if p != '%END%']
prediction = [p for p in prediction if p != '%UNK%']
title = f'{predictor_entity_name}: {prediction} - Original: {content["function_name"]}'
title += f' (what you see: {attention_name} weights)'
plt.title(title)
fig.savefig(filepath, format='png')
if counter_visualized_maps < limit_visualization_to:
plt.show()
counter_visualized_maps += 1
def plot_single_map(tokens_in_code,
attention_weights,
named_color,
formattedcode,
final_clicked_tokens=None):
"""Display attention of the given function."""
# PREPARE IMAGE
path_font_file = '../public/FreeMono.ttf'
surce_code_content = formattedcode
#img_name = folder + data['id'] + data['rawdictionarykey'][1:] + '.png'
ratio = (8.4/14)
char_height = 20
char_width = char_height * ratio
# compute max width required
lines = surce_code_content.splitlines()
lines_len = [len(line) for line in lines]
max_width = int(max(lines_len) * char_width)
max_height = int(char_height * len(lines))
img = Image.new('RGB', (max_width, max_height), color=(255, 255, 255))
fnt = ImageFont.truetype(path_font_file, char_height)
drw = ImageDraw.Draw(img, 'RGBA')
drw.text((0, 0), surce_code_content, font=fnt, fill=(0, 0, 0))
# CAN BE DELAYED AT AFTER TOKEN DRAWING img.save(img_name)
# check clicked tokens to draw squares around them
if final_clicked_tokens is not None:
clicked_tokens = np.array(final_clicked_tokens)
clicked_tokens_indices = np.where(clicked_tokens == 1)[0].tolist()
else:
clicked_tokens_indices = []
# INSTANTIATE TOKENS
# get the positon form the metadata of tokens
viz_tokens = []
# DEBUG print(tokens)
# DEBUG print(formattedcode)
for i, t in enumerate(tokens_in_code):
# print(t)
new_token = \
FlexibleVisualToken(
index_id=t['index_id'],
text=t['text'],
x=char_width * int(t['start_char']),
y=char_height * int(t['line']),
width=char_width * len(t['text']),
height=char_height,
clicked=(i in clicked_tokens_indices))
viz_tokens.append(new_token)
# COMPUTE ATTENTION
global_attention = 1
# compute attention
for att, viz_token in zip(attention_weights, viz_tokens):
viz_token.add_attention(att)
# COMPUTE REFERENCE ATTENTION TO RESCALE
# sum all the attention received by the tokens
global_attention = 0
attentions = []
for viz_token in viz_tokens:
attentions.append(viz_token.attention)
global_attention = max(attentions) * 1.33
# check user was right to decide the color of the tokens (red vs green)
# correct_answered decides the color
for viz_token in viz_tokens:
# print(token)
viz_token.draw_PIL(drw, global_attention, named_color)
#img.save(img_name)
#return img_name
imshow(np.asarray(img))
fig = plt.gcf()
#print(f'max_width: {max_width}')
#print(f'max_width: {max_height}')
FACTOR = 60
fig.set_size_inches(max_width / FACTOR, max_height / FACTOR)
plt.title('undefined')
ax = plt.gca()
return fig, ax
|
import json
from pathlib import Path
import subprocess
from sherlock.config import Config
from sherlock.core import Sherlock
def run_sherlock(robot_output, source, report=None, resource=None):
config = Config(from_cli=False)
config.output = robot_output
config.path = source
if report is not None:
config.report = report
if resource is not None:
config.resource = resource
sherlock = Sherlock(config=config)
sherlock.run() # TODO create special report readable by tests?
return sherlock
def get_output(output):
with open(output) as f:
data = json.load(f)
Path(output).unlink()
return data
def match_tree(expected, actual):
if expected["name"] != actual["name"]:
print(f"Expected name '{expected["name"]}' does not match actual name '{actual["name"]}'")
return False
if "keywords" in expected:
if len(expected["keywords"]) != len(actual["keywords"]):
print(
f"Expected number of keywords in {expected["name"]}: {len(expected["keywords"])} "
f"does not match actual: {len(actual["keywords"])}"
)
return False
for exp_keyword, act_keyword in zip(expected["keywords"], actual["keywords"]):
if "used" not in exp_keyword:
act_keyword.pop("used", None)
if "complexity" not in exp_keyword:
act_keyword.pop("complexity", None)
if "status" not in exp_keyword:
act_keyword.pop("status", None)
if exp_keyword != act_keyword:
return False
if "children" in expected:
if len(expected["children"]) != len(actual["children"]):
print(
f"Expected length of children tree: {len(expected["children"])} "
f"does not match actual: {len(actual["children"])}"
)
return False
if not all(
match_tree(exp_child, act_child) for exp_child, act_child in zip(expected["children"], actual["children"])
):
return False
if "type" in expected:
if expected["type"] != actual["type"]:
print(f"Resource type does not match: {expected["type"]} != {actual["type"]}")
return False
return True
class Tree:
def __init__(self, name, keywords=None, children=None, res_type=None):
self.name = name
self.keywords = keywords
self.children = children
self.res_type = res_type
def to_json(self):
ret = {"name": self.name}
if self.keywords is not None:
ret["keywords"] = [kw.to_json() for kw in self.keywords]
if self.children is not None:
ret["children"] = [child.to_json() for child in self.children]
if self.res_type is not None:
ret["type"] = self.res_type
return ret
class Keyword:
def __init__(self, name, used=None, complexity=None):
self.name = name
self.used = used
self.complexity = complexity
def to_json(self):
ret = {"name": self.name}
if self.used is not None:
ret["used"] = self.used
if self.complexity is not None:
ret["complexity"] = self.complexity
return ret
class AcceptanceTest:
ROOT = Path()
TEST_PATH = "test.robot"
def run_robot(self):
source = self.ROOT / self.TEST_PATH
cmd = f"robot --outputdir {self.ROOT} {source}"
subprocess.run(cmd, stdout=subprocess.DEVNULL, stderr=subprocess.DEVNULL)
def remove_robot_files(self):
for path in (self.ROOT / "log.html", self.ROOT / "output.xml", self.ROOT / "report.html"):
path.unlink(missing_ok=True)
def setup_method(self):
self.run_robot()
def teardown_method(self):
self.remove_robot_files()
def run_sherlock(self, source=None, resource=None):
robot_output = self.ROOT / "output.xml"
source = source or self.ROOT
run_sherlock(robot_output=robot_output, source=source, report=["json"], resource=resource)
data = get_output(f"sherlock_{source.name}.json")
return data
@staticmethod
def should_match_tree(expected_tree, actual):
expected = expected_tree.to_json()
assert match_tree(expected, actual)
| import json
from pathlib import Path
import subprocess
from sherlock.config import Config
from sherlock.core import Sherlock
def run_sherlock(robot_output, source, report=None, resource=None):
config = Config(from_cli=False)
config.output = robot_output
config.path = source
if report is not None:
config.report = report
if resource is not None:
config.resource = resource
sherlock = Sherlock(config=config)
sherlock.run() # TODO create special report readable by tests?
return sherlock
def get_output(output):
with open(output) as f:
data = json.load(f)
Path(output).unlink()
return data
def match_tree(expected, actual):
if expected["name"] != actual["name"]:
print(f"Expected name '{expected['name']}' does not match actual name '{actual['name']}'")
return False
if "keywords" in expected:
if len(expected["keywords"]) != len(actual["keywords"]):
print(
f"Expected number of keywords in {expected['name']}: {len(expected['keywords'])} "
f"does not match actual: {len(actual['keywords'])}"
)
return False
for exp_keyword, act_keyword in zip(expected["keywords"], actual["keywords"]):
if "used" not in exp_keyword:
act_keyword.pop("used", None)
if "complexity" not in exp_keyword:
act_keyword.pop("complexity", None)
if "status" not in exp_keyword:
act_keyword.pop("status", None)
if exp_keyword != act_keyword:
return False
if "children" in expected:
if len(expected["children"]) != len(actual["children"]):
print(
f"Expected length of children tree: {len(expected['children'])} "
f"does not match actual: {len(actual['children'])}"
)
return False
if not all(
match_tree(exp_child, act_child) for exp_child, act_child in zip(expected["children"], actual["children"])
):
return False
if "type" in expected:
if expected["type"] != actual["type"]:
print(f"Resource type does not match: {expected['type']} != {actual['type']}")
return False
return True
class Tree:
def __init__(self, name, keywords=None, children=None, res_type=None):
self.name = name
self.keywords = keywords
self.children = children
self.res_type = res_type
def to_json(self):
ret = {"name": self.name}
if self.keywords is not None:
ret["keywords"] = [kw.to_json() for kw in self.keywords]
if self.children is not None:
ret["children"] = [child.to_json() for child in self.children]
if self.res_type is not None:
ret["type"] = self.res_type
return ret
class Keyword:
def __init__(self, name, used=None, complexity=None):
self.name = name
self.used = used
self.complexity = complexity
def to_json(self):
ret = {"name": self.name}
if self.used is not None:
ret["used"] = self.used
if self.complexity is not None:
ret["complexity"] = self.complexity
return ret
class AcceptanceTest:
ROOT = Path()
TEST_PATH = "test.robot"
def run_robot(self):
source = self.ROOT / self.TEST_PATH
cmd = f"robot --outputdir {self.ROOT} {source}"
subprocess.run(cmd, stdout=subprocess.DEVNULL, stderr=subprocess.DEVNULL)
def remove_robot_files(self):
for path in (self.ROOT / "log.html", self.ROOT / "output.xml", self.ROOT / "report.html"):
path.unlink(missing_ok=True)
def setup_method(self):
self.run_robot()
def teardown_method(self):
self.remove_robot_files()
def run_sherlock(self, source=None, resource=None):
robot_output = self.ROOT / "output.xml"
source = source or self.ROOT
run_sherlock(robot_output=robot_output, source=source, report=["json"], resource=resource)
data = get_output(f"sherlock_{source.name}.json")
return data
@staticmethod
def should_match_tree(expected_tree, actual):
expected = expected_tree.to_json()
assert match_tree(expected, actual)
|
from datetime import datetime, timedelta
from os import listdir, mkdir
from os.path import exists, join
import sys
from typing import List, Tuple, TypeVar
import pandas as pd
import seaborn as sns
import matplotlib.pyplot as plt
import matplotlib.ticker as ticker
from math import floor
from multiprocessing import Process
import gc
import importlib
# Settings for input/output, basic plot stuff
GENERAL_SETTINGS = {
'id': 'general',
'overall_plot_folder': 'plot_overall',
'section_plot_folder': 'plot_section',
'figure_size': (17, 7),
'num_ticks': 50
}
WIDE_SETTINGS = {
'id': 'wide',
'overall_plot_folder': 'plot_overall_wide',
'section_plot_folder': 'plot_section_wide',
'figure_size': (50, 10),
'num_ticks': 200
}
FSP_SETTINGS = {
'id': 'fsp',
'overall_plot_folder': 'plot_overall_fsp',
'section_plot_folder': 'plot_section_fsp',
'figure_size': (17, 7),
'num_ticks': 50
}
OVERALL_FOLDER = 'overall'
SECTION_FOLDER = 'section'
# For the plotconfig.py file
MARKERS = "markers"
MARKER_DATES = 'd'
MARKER_TIME = 't'
LINE_STYLE = 'l'
LINE_COLOR = 'c'
NAME_OF_MARKER = 'n'
CONFIG_SETTINGS = 'settings'
SHADE = 's'
# Multiprocessing options
CHUNK_SIZE = 20
WIDE_CHUNK_SIZE = 10
PROCESS_COUNT = 10
T = TypeVar('T')
def subsets_with_limits(arr: List[T], num_subsets: int, max_per_elem: int) -> List[List[T]]:
arr.reverse()
subsets = []
len_to_use = max(0, len(arr) - max_per_elem * num_subsets)
idx = 0
while len(arr) > len_to_use:
if idx < len(subsets):
subsets[idx].append(arr.pop())
idx = (idx + 1) % num_subsets
continue
subsets.append([arr.pop()])
idx = (idx + 1) % num_subsets
arr.reverse()
return subsets
def process_overall(num: int, files: List[str], from_folder: str, out_folder: str, settings, config):
"""
Processes the folder containing overall data.
:param num: The process label number (just for identification).
:param files: List of files to process
:param from_folder: Folder to read from
:param out_folder: Folder to write to
:param settings: Settings to use
:param config: The configuration object, from the plotconfig.py file.
"""
# Uncomment if you want to skip the images that were already generated
# temp_files = [f for f in listdir(out_folder) if exists(join(out_folder, f))]
completed = 0
for file in files:
print(f"\t[{num}] Processing {file}.")
#if file.replace('csv', 'png') in temp_files:
# completed += 1
# print(f"\t\t[{num}] Skipped {file} (Completed {completed}/{len(files)}).")
# continue
# Read in our CSV file
df = pd.read_csv(join(from_folder, file))
if settings['id'] == 'fsp':
if len(config[MARKERS]) == 0 or "end" not in config[MARKERS][-1][NAME_OF_MARKER].lower():
completed += 1
print(f'\t\t[{num}] Skipped {file} due to no end marker despite fsp (Completed {completed}/{len(files)}).')
continue
end_date_str = config[MARKERS][-1][MARKER_DATES][-1]
# Parse this date
end_date = datetime.strptime(end_date_str, '%Y-%m-%d') + timedelta(days=1)
# Filter all rows in df so that the date is earlier than the end date, noting that
# the date in df['time'] needs to be converted first
df = df[df['time'].apply(lambda x: datetime.strptime(x, "%Y-%m-%dT%H:%M:%S") < end_date)]
if len(df.index) == 0:
completed += 1
print(f"\t\t[{num}] Skipped {file} (Completed {completed}/{len(files)}).")
continue
# Adjust the figure so it's big enough for display
plt.figure(figsize=settings['figure_size'])
max_y = 0
# Plot the number of available & waitlisted seats
if config[CONFIG_SETTINGS]['showTotal']:
sns.lineplot(data=df, x='time', y='total', color='purple', label='Total Seats', linestyle='--', linewidth=4)
max_y = df['total'].max()
sns.lineplot(data=df, x='time', y='waitlisted', color='blue', label='Waitlisted', linewidth=1)
max_y = max(max_y, df['waitlisted'].max())
if config[CONFIG_SETTINGS]['useEnrolledTtl']:
sns.lineplot(data=df, x='time', y='enrolled', color='red', label='Enrolled', linewidth=2)
max_y = max(df['enrolled'].max(), max_y)
else:
sns.lineplot(data=df, x='time', y='available', color='red', label='Available', linewidth=2)
max_y = max(df['available'].max(), max_y)
plot = plt.gca()
# Modify plot properties to make it more readable
title = file.replace('.csv', '')
if '_' in title:
course, section = title.split('_')
title = f'{course}, Section {section}'
plot.set_title(title + f' ({config[CONFIG_SETTINGS]['termName']})')
plot.set_xlabel('Time')
plot.set_ylabel('Seats')
plot.grid(True)
plot.margins(0)
# Set bottom-left corner to (0, 0)
plt.xlim(xmin=0)
plt.ylim(ymin=0, ymax=max(1.05*max_y, 1))
# To make the x-axis more readable, purposely hide some dates and then
# adjust the labels appropriately
plt.setp(plot.xaxis.get_majorticklabels(), rotation=45, ha="right")
# We want NUM_TICKS ticks on the x-axis
plot.xaxis.set_major_locator(ticker.MultipleLocator(max(floor(len(df) / settings['num_ticks']), 1)))
plot.yaxis.set_major_locator(ticker.MaxNLocator(integer=True))
if config[CONFIG_SETTINGS]['useMarkers']:
p_max = 2 if config[CONFIG_SETTINGS]['isNormal'] else 1
all_dates = df['time'].tolist()
# map all dates in all_dates to a tuple of string date and datetime object
all_dates: Tuple[str, datetime] = list(map(lambda x: (x, datetime.strptime(x, "%Y-%m-%dT%H:%M:%S")), all_dates))
spans = []
spans2 = []
seen_grades = set()
for marker in config[MARKERS]:
# index [0, 1] -> 0 = first pass, 1 = second pass
for p in range(0, p_max):
hr = marker[MARKER_TIME]
date = marker[MARKER_DATES][p]
# find the first date in all_dates whose date is equal to date
# and has the closest hour to hr
axis_date = list(filter(lambda x: x[1].strftime("%Y-%m-%d") == date and (x[1].hour == hr or\
x[1].hour == hr + 1 or x[1].hour == hr + 2 or x[1].hour == hr + 3), all_dates))
if len(axis_date) == 0:
continue
if marker[SHADE]:
(spans if p == 0 else spans2).append({
'start': axis_date[0][0],
'color': marker[LINE_COLOR],
'legend': marker[NAME_OF_MARKER],
})
plt.axvline(x=axis_date[0][0], \
color=marker[LINE_COLOR], \
linestyle=marker[LINE_STYLE], \
label=None if marker[NAME_OF_MARKER] in seen_grades else marker[NAME_OF_MARKER])
seen_grades.add(marker[NAME_OF_MARKER])
# Note that the reason why I didn't just combine the lists is because I don't want to add the "End" from first pass
# to the graph.
seen_shades = set()
# For first-pass stuff
for i in range(0, len(spans) - 1):
# fill plot between combined_spans[i] and combined_spans[i+1]
plt.axvspan(spans[i]['start'], \
spans[i+1]['start'], \
color=spans[i]['color'], \
alpha=0.2, \
label=None if spans[i]['legend'] in seen_shades else spans[i]['legend'])
seen_shades.add(spans[i]['legend'])
# For second-pass stuff
for i in range(0, len(spans2) - 1):
# fill plot between combined_spans[i] and combined_spans[i+1]
plt.axvspan(spans2[i]['start'], \
spans2[i+1]['start'], \
color=spans2[i]['color'], \
alpha=0.2, \
label=None if spans2[i]['legend'] in seen_shades else spans2[i]['legend'])
seen_shades.add(spans2[i]['legend'])
# https://matplotlib.org/2.0.2/users/legend_guide.html
plt.legend(bbox_to_anchor=(1.02, 1), loc=2, borderaxespad=0.)
# Adjusts the padding
plt.tight_layout()
# Then, saves the figure and closes it to save memory
fig = plot.get_figure()
fig.savefig(join(out_folder, file.replace('.csv', '')))
# Clear the plot, close it, and clear the memory
plot.cla()
plt.clf()
plt.cla()
plt.close('all')
del plot
del fig
del df
gc.collect()
completed += 1
print(f"\t\t[{num}] Finished {file} (Completed {completed}/{len(files)}).")
if __name__ == '__main__':
if len(sys.argv) != 3:
print("Usage: plot.py <base folder> <'s', 'o', 'sw', 'ow', 'sfsp', 'ofsp'>")
sys.exit(1)
# Get the cleaned folder
base_folder = sys.argv[-2]
if not exists(base_folder):
print(f"Folder '{base_folder}' does not exist")
sys.exit(1)
# Get the type of data to process
dt = sys.argv[-1]
if dt not in ['s', 'o', 'sw', 'ow', 'sfsp', 'ofsp']:
print(f"Invalid data type '{dt}' - must be one of:")
print("\t's' (section)")
print("\t'o' (overall)")
print("\t'sw' (section, wide display)")
print("\t'ow' (overall, wide display)")
print("\t'sfsp' (section, first/second-pass only)")
print("\t'ofsp' (overall, first/second-pass only)")
sys.exit(1)
# Get the relevant configuration object
try:
config = importlib.import_module(f'{base_folder}.plotconfig').CONFIG
except ModuleNotFoundError:
print(f'{base_folder} does not contain a plotconfig.py file. Please set one up and then try again.')
exit(1)
chunk_size = CHUNK_SIZE
if dt in ['s', 'o']:
settings_obj = GENERAL_SETTINGS
elif dt in ['sw', 'ow']:
settings_obj = WIDE_SETTINGS
chunk_size = WIDE_CHUNK_SIZE
elif dt in ['sfsp', 'ofsp']:
settings_obj = FSP_SETTINGS
plot_folder = join(base_folder, settings_obj['overall_plot_folder'] if dt in ['o', 'ow', 'ofsp'] else settings_obj['section_plot_folder'])
if not exists(plot_folder):
mkdir(plot_folder)
in_folder = join(base_folder, OVERALL_FOLDER if dt in ['o', 'ow', 'ofsp'] else SECTION_FOLDER)
all_files = listdir(in_folder)
# If we're working with sections, we only want the files that appear more than once
# Categorize each file by the class that they represent.
if dt == 's':
# The key is the course (e.g. CSE 100.csv) and the value is a list
# of all sections (e.g. CSE 100_A.csv)
file_secs = {}
for file in all_files:
f_name = file.split('_')[0]
if f_name not in file_secs:
file_secs[f_name] = [file]
else:
file_secs[f_name].append(file)
all_files = []
for f_name in file_secs:
if len(file_secs[f_name]) > 1:
all_files += file_secs[f_name]
# Begin running
print(f'Processing {len(all_files)} files into chunks of {chunk_size} files each.')
print(f'\tWide? {dt == 'sw' or dt == 'ow'}')
print(f'\tInput Folder: {in_folder}')
print(f'\tPlot Folder: {plot_folder}')
print(f'\tProcesses: {PROCESS_COUNT}')
len_of_files = len(all_files)
completed = 0
while len(all_files) > 0:
files_to_process = subsets_with_limits(all_files, PROCESS_COUNT, chunk_size)
processes = []
# Limit ourselves to PROCESS_COUNT processes, or else we might
# end up crashing the host device with too many processes.
for (i, chunk) in enumerate(files_to_process):
print(f'Starting process {i} (with count {len(chunk)}).')
# Create a process to process the chunk
p = Process(target=process_overall, args=(i, \
chunk, \
in_folder, \
plot_folder, \
settings_obj, \
config))
p.start()
processes.append(p)
# Wait for all processes to finish
for p in processes:
p.join()
completed += sum(len(x) for x in files_to_process)
print(f'\t\tCompleted {completed}/{len_of_files} files ({len(all_files)} left).') | from datetime import datetime, timedelta
from os import listdir, mkdir
from os.path import exists, join
import sys
from typing import List, Tuple, TypeVar
import pandas as pd
import seaborn as sns
import matplotlib.pyplot as plt
import matplotlib.ticker as ticker
from math import floor
from multiprocessing import Process
import gc
import importlib
# Settings for input/output, basic plot stuff
GENERAL_SETTINGS = {
'id': 'general',
'overall_plot_folder': 'plot_overall',
'section_plot_folder': 'plot_section',
'figure_size': (17, 7),
'num_ticks': 50
}
WIDE_SETTINGS = {
'id': 'wide',
'overall_plot_folder': 'plot_overall_wide',
'section_plot_folder': 'plot_section_wide',
'figure_size': (50, 10),
'num_ticks': 200
}
FSP_SETTINGS = {
'id': 'fsp',
'overall_plot_folder': 'plot_overall_fsp',
'section_plot_folder': 'plot_section_fsp',
'figure_size': (17, 7),
'num_ticks': 50
}
OVERALL_FOLDER = 'overall'
SECTION_FOLDER = 'section'
# For the plotconfig.py file
MARKERS = "markers"
MARKER_DATES = 'd'
MARKER_TIME = 't'
LINE_STYLE = 'l'
LINE_COLOR = 'c'
NAME_OF_MARKER = 'n'
CONFIG_SETTINGS = 'settings'
SHADE = 's'
# Multiprocessing options
CHUNK_SIZE = 20
WIDE_CHUNK_SIZE = 10
PROCESS_COUNT = 10
T = TypeVar('T')
def subsets_with_limits(arr: List[T], num_subsets: int, max_per_elem: int) -> List[List[T]]:
arr.reverse()
subsets = []
len_to_use = max(0, len(arr) - max_per_elem * num_subsets)
idx = 0
while len(arr) > len_to_use:
if idx < len(subsets):
subsets[idx].append(arr.pop())
idx = (idx + 1) % num_subsets
continue
subsets.append([arr.pop()])
idx = (idx + 1) % num_subsets
arr.reverse()
return subsets
def process_overall(num: int, files: List[str], from_folder: str, out_folder: str, settings, config):
"""
Processes the folder containing overall data.
:param num: The process label number (just for identification).
:param files: List of files to process
:param from_folder: Folder to read from
:param out_folder: Folder to write to
:param settings: Settings to use
:param config: The configuration object, from the plotconfig.py file.
"""
# Uncomment if you want to skip the images that were already generated
# temp_files = [f for f in listdir(out_folder) if exists(join(out_folder, f))]
completed = 0
for file in files:
print(f"\t[{num}] Processing {file}.")
#if file.replace('csv', 'png') in temp_files:
# completed += 1
# print(f"\t\t[{num}] Skipped {file} (Completed {completed}/{len(files)}).")
# continue
# Read in our CSV file
df = pd.read_csv(join(from_folder, file))
if settings['id'] == 'fsp':
if len(config[MARKERS]) == 0 or "end" not in config[MARKERS][-1][NAME_OF_MARKER].lower():
completed += 1
print(f'\t\t[{num}] Skipped {file} due to no end marker despite fsp (Completed {completed}/{len(files)}).')
continue
end_date_str = config[MARKERS][-1][MARKER_DATES][-1]
# Parse this date
end_date = datetime.strptime(end_date_str, '%Y-%m-%d') + timedelta(days=1)
# Filter all rows in df so that the date is earlier than the end date, noting that
# the date in df['time'] needs to be converted first
df = df[df['time'].apply(lambda x: datetime.strptime(x, "%Y-%m-%dT%H:%M:%S") < end_date)]
if len(df.index) == 0:
completed += 1
print(f"\t\t[{num}] Skipped {file} (Completed {completed}/{len(files)}).")
continue
# Adjust the figure so it's big enough for display
plt.figure(figsize=settings['figure_size'])
max_y = 0
# Plot the number of available & waitlisted seats
if config[CONFIG_SETTINGS]['showTotal']:
sns.lineplot(data=df, x='time', y='total', color='purple', label='Total Seats', linestyle='--', linewidth=4)
max_y = df['total'].max()
sns.lineplot(data=df, x='time', y='waitlisted', color='blue', label='Waitlisted', linewidth=1)
max_y = max(max_y, df['waitlisted'].max())
if config[CONFIG_SETTINGS]['useEnrolledTtl']:
sns.lineplot(data=df, x='time', y='enrolled', color='red', label='Enrolled', linewidth=2)
max_y = max(df['enrolled'].max(), max_y)
else:
sns.lineplot(data=df, x='time', y='available', color='red', label='Available', linewidth=2)
max_y = max(df['available'].max(), max_y)
plot = plt.gca()
# Modify plot properties to make it more readable
title = file.replace('.csv', '')
if '_' in title:
course, section = title.split('_')
title = f'{course}, Section {section}'
plot.set_title(title + f' ({config[CONFIG_SETTINGS]["termName"]})')
plot.set_xlabel('Time')
plot.set_ylabel('Seats')
plot.grid(True)
plot.margins(0)
# Set bottom-left corner to (0, 0)
plt.xlim(xmin=0)
plt.ylim(ymin=0, ymax=max(1.05*max_y, 1))
# To make the x-axis more readable, purposely hide some dates and then
# adjust the labels appropriately
plt.setp(plot.xaxis.get_majorticklabels(), rotation=45, ha="right")
# We want NUM_TICKS ticks on the x-axis
plot.xaxis.set_major_locator(ticker.MultipleLocator(max(floor(len(df) / settings['num_ticks']), 1)))
plot.yaxis.set_major_locator(ticker.MaxNLocator(integer=True))
if config[CONFIG_SETTINGS]['useMarkers']:
p_max = 2 if config[CONFIG_SETTINGS]['isNormal'] else 1
all_dates = df['time'].tolist()
# map all dates in all_dates to a tuple of string date and datetime object
all_dates: Tuple[str, datetime] = list(map(lambda x: (x, datetime.strptime(x, "%Y-%m-%dT%H:%M:%S")), all_dates))
spans = []
spans2 = []
seen_grades = set()
for marker in config[MARKERS]:
# index [0, 1] -> 0 = first pass, 1 = second pass
for p in range(0, p_max):
hr = marker[MARKER_TIME]
date = marker[MARKER_DATES][p]
# find the first date in all_dates whose date is equal to date
# and has the closest hour to hr
axis_date = list(filter(lambda x: x[1].strftime("%Y-%m-%d") == date and (x[1].hour == hr or\
x[1].hour == hr + 1 or x[1].hour == hr + 2 or x[1].hour == hr + 3), all_dates))
if len(axis_date) == 0:
continue
if marker[SHADE]:
(spans if p == 0 else spans2).append({
'start': axis_date[0][0],
'color': marker[LINE_COLOR],
'legend': marker[NAME_OF_MARKER],
})
plt.axvline(x=axis_date[0][0], \
color=marker[LINE_COLOR], \
linestyle=marker[LINE_STYLE], \
label=None if marker[NAME_OF_MARKER] in seen_grades else marker[NAME_OF_MARKER])
seen_grades.add(marker[NAME_OF_MARKER])
# Note that the reason why I didn't just combine the lists is because I don't want to add the "End" from first pass
# to the graph.
seen_shades = set()
# For first-pass stuff
for i in range(0, len(spans) - 1):
# fill plot between combined_spans[i] and combined_spans[i+1]
plt.axvspan(spans[i]['start'], \
spans[i+1]['start'], \
color=spans[i]['color'], \
alpha=0.2, \
label=None if spans[i]['legend'] in seen_shades else spans[i]['legend'])
seen_shades.add(spans[i]['legend'])
# For second-pass stuff
for i in range(0, len(spans2) - 1):
# fill plot between combined_spans[i] and combined_spans[i+1]
plt.axvspan(spans2[i]['start'], \
spans2[i+1]['start'], \
color=spans2[i]['color'], \
alpha=0.2, \
label=None if spans2[i]['legend'] in seen_shades else spans2[i]['legend'])
seen_shades.add(spans2[i]['legend'])
# https://matplotlib.org/2.0.2/users/legend_guide.html
plt.legend(bbox_to_anchor=(1.02, 1), loc=2, borderaxespad=0.)
# Adjusts the padding
plt.tight_layout()
# Then, saves the figure and closes it to save memory
fig = plot.get_figure()
fig.savefig(join(out_folder, file.replace('.csv', '')))
# Clear the plot, close it, and clear the memory
plot.cla()
plt.clf()
plt.cla()
plt.close('all')
del plot
del fig
del df
gc.collect()
completed += 1
print(f"\t\t[{num}] Finished {file} (Completed {completed}/{len(files)}).")
if __name__ == '__main__':
if len(sys.argv) != 3:
print("Usage: plot.py <base folder> <'s', 'o', 'sw', 'ow', 'sfsp', 'ofsp'>")
sys.exit(1)
# Get the cleaned folder
base_folder = sys.argv[-2]
if not exists(base_folder):
print(f"Folder '{base_folder}' does not exist")
sys.exit(1)
# Get the type of data to process
dt = sys.argv[-1]
if dt not in ['s', 'o', 'sw', 'ow', 'sfsp', 'ofsp']:
print(f"Invalid data type '{dt}' - must be one of:")
print("\t's' (section)")
print("\t'o' (overall)")
print("\t'sw' (section, wide display)")
print("\t'ow' (overall, wide display)")
print("\t'sfsp' (section, first/second-pass only)")
print("\t'ofsp' (overall, first/second-pass only)")
sys.exit(1)
# Get the relevant configuration object
try:
config = importlib.import_module(f'{base_folder}.plotconfig').CONFIG
except ModuleNotFoundError:
print(f'{base_folder} does not contain a plotconfig.py file. Please set one up and then try again.')
exit(1)
chunk_size = CHUNK_SIZE
if dt in ['s', 'o']:
settings_obj = GENERAL_SETTINGS
elif dt in ['sw', 'ow']:
settings_obj = WIDE_SETTINGS
chunk_size = WIDE_CHUNK_SIZE
elif dt in ['sfsp', 'ofsp']:
settings_obj = FSP_SETTINGS
plot_folder = join(base_folder, settings_obj['overall_plot_folder'] if dt in ['o', 'ow', 'ofsp'] else settings_obj['section_plot_folder'])
if not exists(plot_folder):
mkdir(plot_folder)
in_folder = join(base_folder, OVERALL_FOLDER if dt in ['o', 'ow', 'ofsp'] else SECTION_FOLDER)
all_files = listdir(in_folder)
# If we're working with sections, we only want the files that appear more than once
# Categorize each file by the class that they represent.
if dt == 's':
# The key is the course (e.g. CSE 100.csv) and the value is a list
# of all sections (e.g. CSE 100_A.csv)
file_secs = {}
for file in all_files:
f_name = file.split('_')[0]
if f_name not in file_secs:
file_secs[f_name] = [file]
else:
file_secs[f_name].append(file)
all_files = []
for f_name in file_secs:
if len(file_secs[f_name]) > 1:
all_files += file_secs[f_name]
# Begin running
print(f'Processing {len(all_files)} files into chunks of {chunk_size} files each.')
print(f'\tWide? {dt == "sw" or dt == "ow"}')
print(f'\tInput Folder: {in_folder}')
print(f'\tPlot Folder: {plot_folder}')
print(f'\tProcesses: {PROCESS_COUNT}')
len_of_files = len(all_files)
completed = 0
while len(all_files) > 0:
files_to_process = subsets_with_limits(all_files, PROCESS_COUNT, chunk_size)
processes = []
# Limit ourselves to PROCESS_COUNT processes, or else we might
# end up crashing the host device with too many processes.
for (i, chunk) in enumerate(files_to_process):
print(f'Starting process {i} (with count {len(chunk)}).')
# Create a process to process the chunk
p = Process(target=process_overall, args=(i, \
chunk, \
in_folder, \
plot_folder, \
settings_obj, \
config))
p.start()
processes.append(p)
# Wait for all processes to finish
for p in processes:
p.join()
completed += sum(len(x) for x in files_to_process)
print(f'\t\tCompleted {completed}/{len_of_files} files ({len(all_files)} left).') |
from random import randint
import random
# You've built an inflight entertainment system with on-demand movie streaming.
# Users on longer flights like to start a second movie right when their first one ends, but they complain that the plane usually lands before they can see the ending.
# So you're building a feature for choosing two movies whose total runtimes will equal the exact flight length.
# Write a function that takes an integer flight_length (in minutes) and a list of integers movie_lengths (in minutes) and returns a boolean indicating whether there are two numbers in movie_lengths whose sum equals flight_length.
# Try to solve with Dictionary!
# When building your function:
# Assume your users will watch exactly two movies
# Don't make your users watch the same movie twice
# Optimize for runtime over memory
print("\n-----===== Start =====-----\n")
def get_movies(flight_length, movie_lengths):
possibility = False
movies = set()
for i in movie_lengths:
if i < flight_length:
if i in movies:
possibility = True
else:
movies.add(flight_length - i)
return possibility
flight_length = randint(120,240)
movie_lengths = []
for i in range(randint(2,20)):
movie_lengths.append(randint(60,120))
print(f"Flight length: {flight_length} minutes")
print(f"Movie lengths: {movie_lengths}")
print(f"Can you watch two movies? {"yes" if get_movies(flight_length, movie_lengths) else "No"}")
print("\n-----===== End =====-----")
|
from random import randint
import random
# You've built an inflight entertainment system with on-demand movie streaming.
# Users on longer flights like to start a second movie right when their first one ends, but they complain that the plane usually lands before they can see the ending.
# So you're building a feature for choosing two movies whose total runtimes will equal the exact flight length.
# Write a function that takes an integer flight_length (in minutes) and a list of integers movie_lengths (in minutes) and returns a boolean indicating whether there are two numbers in movie_lengths whose sum equals flight_length.
# Try to solve with Dictionary!
# When building your function:
# Assume your users will watch exactly two movies
# Don't make your users watch the same movie twice
# Optimize for runtime over memory
print("\n-----===== Start =====-----\n")
def get_movies(flight_length, movie_lengths):
possibility = False
movies = set()
for i in movie_lengths:
if i < flight_length:
if i in movies:
possibility = True
else:
movies.add(flight_length - i)
return possibility
flight_length = randint(120,240)
movie_lengths = []
for i in range(randint(2,20)):
movie_lengths.append(randint(60,120))
print(f"Flight length: {flight_length} minutes")
print(f"Movie lengths: {movie_lengths}")
print(f"Can you watch two movies? {'yes' if get_movies(flight_length, movie_lengths) else 'No'}")
print("\n-----===== End =====-----")
|
from collections import defaultdict
#T composition table
T=defaultdict(dict)
U={'<','>','d','di','o','oi','m','mi','s','si','f','fi','='}
dur={'d','s','f'}
con={'di','si','fi'}
O={'d','s','f','di','si','fi','o','oi','='} #big overlap
T['<']={'<':{'<'},'>':U,'d':{'<','o','m','d','s'},
'di':{'<'},'o':{'<'},'oi':{'<','o','m','d','s'},
'm':{'<'},'mi':{'<','o','m','d','s'},'s':{'<'},
'si':{'<'},'f':{'<','o','m','d','s'},'fi':{'<'},'=':{'<'}}
T['>']={'<':U,'>':{'>'},'d':{'>','oi','mi','d','f'},
'di':{'>'},'o':{'>','oi','mi','d','f'},'oi':{'>'},
'm':{'>','oi','mi','d','f'},'mi':{'>'},
's':{'>','oi','mi','d','f'},'si':{'>'},'f':{'>'},
'fi':{'>'},'=':{'>'}}
T['d']={'<':{'<'},'>':{'>'},'d':{'d'},'di':U,
'o':{'<','o','m','d','s'},'oi':{'>','oi','mi','d','f'},
'm':{'<'},'mi':{'>'},'s':{'d'},'si':{'>','oi','mi','d','f'},
'f':{'d'},'fi':{'<','o','m','d','s'},'=':{'d'}}
T['di']={'<':{'<','o','m','di','fi'},'>':{'>','oi','di','mi','si'},
'd':O,'di':{'di'},'o':{'o','di','fi'},'oi':{'oi','di','si'},
'm':{'o','di','fi'},'mi':{'oi','di','si'},
's':{'di','fi','o'},'si':{'di'},'f':{'di','si','oi'},
'fi':{'di'},'=':{'di'}}
T['o']={'<':{'<'},'>':{'>','oi','di','mi','si'},'d':{'o','d','s'},
'di':{'<','o','m','di','fi'},'o':{'<','o','m'},'oi':O,
'm':{'<'},'mi':{'oi','di','si'},'s':{'o'},
'si':{'di','fi','o'},'f':{'d','s','o'},'fi':{'<','o','m'},
'=':{'o'}}
T['oi']={'<':{'<','o','m','di','fi'},'>':{'>'},'d':{'oi','d','f'},
'di':{'>','oi','di','mi','si'},'o':O,'oi':{'>','oi','mi'},
'm':{'di','fi','o'},'mi':{'>'},'s':{'oi','d','f'},
'si':{'oi','>','mi'},'f':{'oi'},'fi':{'oi','di','si'},
'=':{'oi'}}
T['m']={'<':{'<'},'>':{'>','oi','di','mi','si'},'d':{'o','d','s'},
'di':{'<'},'o':{'<'},'oi':{'o','d','s'},'m':{'<'},
'mi':{'f','fi','='},'s':{'m'},'si':{'m'},'f':{'o','d','s'},
'fi':{'<'},'=':{'m'}}
T['mi']={'<':{'<','o','m','di','fi'},'>':{'>'},'d':{'oi','d','f'},
'di':{'>'},'o':{'oi','d','f'},'oi':{'>'},'m':{'s','si','='},
'mi':{'>'},'s':{'oi','d','f'},'si':{'>'},'f':{'mi'},
'fi':{'mi'},'=':{'mi'}}
T['s']={'<':{'<'},'>':{'>'},'d':{'d'},'di':{'<','o','m','di','fi'},
'o':{'<','o','m'},'oi':{'oi','d','f'},'m':{'<'},'mi':{'mi'},
's':{'s'},'si':{'s','si','='},'f':{'d'},'fi':{'<','o','m'},
'=':{'s'}}
T['si']={'<':{'<','o','m','di','fi'},'>':{'>'},'d':{'oi','d','f'},
'di':{'di'},'o':{'o','di','fi'},'oi':{'oi'},
'm':{'o','di','fi'},'mi':{'mi'},'s':{'s','si','='},
'si':{'si'},'f':{'oi'},'fi':{'di'},'=':{'si'}}
T['f']={'<':{'<'},'>':{'>'},'d':{'d'},'di':{'>','oi','mi','di','si'},
'o':{'o','d','s'},'oi':{'>','oi','mi'},'m':{'m'},'mi':{'>'},
's':{'d'},'si':{'>','oi','mi'},'f':{'f'},'fi':{'f','fi','='},
'=':{'f'}}
T['fi']={'<':{'<'},'>':{'>','oi','di','mi','si'},'d':{'o','d','s'},
'di':{'di'},'o':{'o'},'oi':{'oi','di','si'},'m':{'m'},
'mi':{'si','oi','di'},'s':{'o'},'si':{'di'},
'f':{'f','fi','='},'fi':{'fi'},'=':{'fi'}}
T['=']={'<':{'<'},'>':{'>'},'d':{'d'},'di':{'di'},'o':{'o'},
'oi':{'oi'},'m':{'m'},'mi':{'mi'},'s':{'s'},'si':{'si'},
'f':{'f'},'fi':{'fi'},'=':{'='}}
L1=['<','d','o','m','s','f','=']
L2=['>','di','oi','mi','si','fi','=']
| from collections import defaultdict
#T composition table
T=defaultdict(dict)
U={'<','>','d','di','o','oi','m','mi','s','si','f','fi','='}
dur={'d','s','f'}
con={'di','si','fi'}
O={'d','s','f','di','si','fi','o','oi','='} #big overlap
T['<']={'<':{'<'},'>':U,'d':{'<','o','m','d','s'},
'di':{'<'},'o':{'<'},'oi':{'<','o','m','d','s'},
'm':{'<'},'mi':{'<','o','m','d','s'},'s':{'<'},
'si':{'<'},'f':{'<','o','m','d','s'},'fi':{'<'},'=':{'<'}}
T['>']={'<':U,'>':{'>'},'d':{'>','oi','mi','d','f'},
'di':{'>'},'o':{'>','oi','mi','d','f'},'oi':{'>'},
'm':{'>','oi','mi','d','f'},'mi':{'>'},
's':{'>','oi','mi','d','f'},'si':{'>'},'f':{'>'},
'fi':{'>'},'=':{'>'}}
T['d']={'<':{'<'},'>':{'>'},'d':{'d'},'di':U,
'o':{'<','o','m','d','s'},'oi':{'>','oi','mi','d','f'},
'm':{'<'},'mi':{'>'},'s':{'d'},'si':{'>','oi','mi','d','f'},
'f':{'d'},'fi':{'<','o','m','d','s'},'=':{'d'}}
T['di']={'<':{'<','o','m','di','fi'},'>':{'>','oi','di','mi','si'},
'd':O,'di':{'di'},'o':{'o','di','fi'},'oi':{'oi','di','si'},
'm':{'o','di','fi'},'mi':{'oi','di','si'},
's':{'di','fi','o'},'si':{'di'},'f':{'di','si','oi'},
'fi':{'di'},'=':{'di'}}
T['o']={'<':{'<'},'>':{'>','oi','di','mi','si'},'d':{'o','d','s'},
'di':{'<','o','m','di','fi'},'o':{'<','o','m'},'oi':O,
'm':{'<'},'mi':{'oi','di','si'},'s':{'o'},
'si':{'di','fi','o'},'f':{'d','s','o'},'fi':{'<','o','m'},
'=':{'o'}}
T['oi']={'<':{'<','o','m','di','fi'},'>':{'>'},'d':{'oi','d','f'},
'di':{'>','oi','di','mi','si'},'o':O,'oi':{'>','oi','mi'},
'm':{'di','fi','o'},'mi':{'>'},'s':{'oi','d','f'},
'si':{'oi','>','mi'},'f':{'oi'},'fi':{'oi','di','si'},
'=':{'oi'}}
T['m']={'<':{'<'},'>':{'>','oi','di','mi','si'},'d':{'o','d','s'},
'di':{'<'},'o':{'<'},'oi':{'o','d','s'},'m':{'<'},
'mi':{'f','fi','='},'s':{'m'},'si':{'m'},'f':{'o','d','s'},
'fi':{'<'},'=':{'m'}}
T['mi']={'<':{'<','o','m','di','fi'},'>':{'>'},'d':{'oi','d','f'},
'di':{'>'},'o':{'oi','d','f'},'oi':{'>'},'m':{'s','si','='},
'mi':{'>'},'s':{'oi','d','f'},'si':{'>'},'f':{'mi'},
'fi':{'mi'},'=':{'mi'}}
T['s']={'<':{'<'},'>':{'>'},'d':{'d'},'di':{'<','o','m','di','fi'},
'o':{'<','o','m'},'oi':{'oi','d','f'},'m':{'<'},'mi':{'mi'},
's':{'s'},'si':{'s','si','='},'f':{'d'},'fi':{'<','o','m'},
'=':{'s'}}
T['si']={'<':{'<','o','m','di','fi'},'>':{'>'},'d':{'oi','d','f'},
'di':{'di'},'o':{'o','di','fi'},'oi':{'oi'},
'm':{'o','di','fi'},'mi':{'mi'},'s':{'s','si','='},
'si':{'si'},'f':{'oi'},'fi':{'di'},'=':{'si'}}
T['f']={'<':{'<'},'>':{'>'},'d':{'d'},'di':{'>','oi','mi','di','si'},
'o':{'o','d','s'},'oi':{'>','oi','mi'},'m':{'m'},'mi':{'>'},
's':{'d'},'si':{'>','oi','mi'},'f':{'f'},'fi':{'f','fi','='},
'=':{'f'}}
T['fi']={'<':{'<'},'>':{'>','oi','di','mi','si'},'d':{'o','d','s'},
'di':{'di'},'o':{'o'},'oi':{'oi','di','si'},'m':{'m'},
'mi':{'si','oi','di'},'s':{'o'},'si':{'di'},
'f':{'f','fi','='},'fi':{'fi'},'=':{'fi'}}
T['=']={'<':{'<'},'>':{'>'},'d':{'d'},'di':{'di'},'o':{'o'},
'oi':{'oi'},'m':{'m'},'mi':{'mi'},'s':{'s'},'si':{'si'},
'f':{'f'},'fi':{'fi'},'=':{'='}}
L1=['<','d','o','m','s','f','=']
L2=['>','di','oi','mi','si','fi','=']
|
# Copyright The PyTorch Lightning team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
from datetime import timedelta
from typing import Dict, List, Optional, Union
from pytorch_lightning.callbacks import (
Callback,
GradientAccumulationScheduler,
ModelCheckpoint,
ModelSummary,
ProgressBarBase,
RichProgressBar,
TQDMProgressBar,
)
from pytorch_lightning.callbacks.rich_model_summary import RichModelSummary
from pytorch_lightning.callbacks.timer import Timer
from pytorch_lightning.utilities import ModelSummaryMode, rank_zero_info
from pytorch_lightning.utilities.exceptions import MisconfigurationException
from pytorch_lightning.utilities.warnings import rank_zero_deprecation
class CallbackConnector:
def __init__(self, trainer):
self.trainer = trainer
def on_trainer_init(
self,
callbacks: Optional[Union[List[Callback], Callback]],
checkpoint_callback: Optional[bool],
enable_checkpointing: bool,
enable_progress_bar: bool,
progress_bar_refresh_rate: Optional[int],
process_position: int,
default_root_dir: Optional[str],
weights_save_path: Optional[str],
enable_model_summary: bool,
weights_summary: Optional[str],
stochastic_weight_avg: bool,
max_time: Optional[Union[str, timedelta, Dict[str, int]]] = None,
accumulate_grad_batches: Optional[Union[int, Dict[int, int]]] = None,
):
# init folder paths for checkpoint + weights save callbacks
self.trainer._default_root_dir = default_root_dir or os.getcwd()
self.trainer._weights_save_path = weights_save_path or self.trainer._default_root_dir
if stochastic_weight_avg:
rank_zero_deprecation(
"Setting `Trainer(stochastic_weight_avg=True)` is deprecated in v1.5 and will be removed in v1.7."
" Please pass `pytorch_lightning.callbacks.stochastic_weight_avg.StochasticWeightAveraging`"
" directly to the Trainer's `callbacks` argument instead."
)
self.trainer._stochastic_weight_avg = stochastic_weight_avg
# init callbacks
if isinstance(callbacks, Callback):
callbacks = [callbacks]
self.trainer.callbacks = callbacks or []
# configure checkpoint callback
# pass through the required args to figure out defaults
self._configure_checkpoint_callbacks(checkpoint_callback, enable_checkpointing)
# configure swa callback
self._configure_swa_callbacks()
# configure the timer callback.
# responsible to stop the training when max_time is reached.
self._configure_timer_callback(max_time)
# init progress bar
if process_position != 0:
rank_zero_deprecation(
f"Setting `Trainer(process_position={process_position})` is deprecated in v1.5 and will be removed"
" in v1.7. Please pass `pytorch_lightning.callbacks.progress.TQDMProgressBar` with"
" `process_position` directly to the Trainer's `callbacks` argument instead."
)
if progress_bar_refresh_rate is not None:
rank_zero_deprecation(
f"Setting `Trainer(progress_bar_refresh_rate={progress_bar_refresh_rate})` is deprecated in v1.5 and"
" will be removed in v1.7. Please pass `pytorch_lightning.callbacks.progress.TQDMProgressBar` with"
" `refresh_rate` directly to the Trainer's `callbacks` argument instead. Or, to disable the progress"
" bar pass `enable_progress_bar = False` to the Trainer."
)
self.configure_progress_bar(progress_bar_refresh_rate, process_position, enable_progress_bar)
# configure the ModelSummary callback
self._configure_model_summary_callback(enable_model_summary, weights_summary)
# accumulated grads
self._configure_accumulated_gradients(accumulate_grad_batches)
# push all checkpoint callbacks to the end
# it is important that these are the last callbacks to run
self.trainer.callbacks = self._reorder_callbacks(self.trainer.callbacks)
def _configure_accumulated_gradients(
self, accumulate_grad_batches: Optional[Union[int, Dict[int, int]]] = None
) -> None:
grad_accum_callback = [cb for cb in self.trainer.callbacks if isinstance(cb, GradientAccumulationScheduler)]
if grad_accum_callback:
if accumulate_grad_batches is not None:
raise MisconfigurationException(
"You have set both `accumulate_grad_batches` and passed an instance of "
"`GradientAccumulationScheduler` inside callbacks. Either remove `accumulate_grad_batches` "
"from trainer or remove `GradientAccumulationScheduler` from callbacks list."
)
grad_accum_callback = grad_accum_callback[0]
else:
if accumulate_grad_batches is None:
accumulate_grad_batches = 1
if isinstance(accumulate_grad_batches, dict):
grad_accum_callback = GradientAccumulationScheduler(accumulate_grad_batches)
elif isinstance(accumulate_grad_batches, int):
grad_accum_callback = GradientAccumulationScheduler({0: accumulate_grad_batches})
else:
raise MisconfigurationException(
f"`accumulate_grad_batches` should be an int or a dict. Got {accumulate_grad_batches}."
)
self.trainer.callbacks.append(grad_accum_callback)
self.trainer.accumulate_grad_batches = grad_accum_callback.get_accumulate_grad_batches(0)
self.trainer.accumulation_scheduler = grad_accum_callback
def _configure_checkpoint_callbacks(self, checkpoint_callback: Optional[bool], enable_checkpointing: bool) -> None:
if checkpoint_callback is not None:
rank_zero_deprecation(
f"Setting `Trainer(checkpoint_callback={checkpoint_callback})` is deprecated in v1.5 and will "
f"be removed in v1.7. Please consider using `Trainer(enable_checkpointing={checkpoint_callback})`."
)
# if both are set then checkpoint only if both are True
enable_checkpointing = checkpoint_callback and enable_checkpointing
if self._trainer_has_checkpoint_callbacks() and enable_checkpointing is False:
raise MisconfigurationException(
"Trainer was configured with `enable_checkpointing=False`"
" but found `ModelCheckpoint` in callbacks list."
)
if not self._trainer_has_checkpoint_callbacks() and enable_checkpointing is True:
self.trainer.callbacks.append(ModelCheckpoint())
def _configure_model_summary_callback(
self, enable_model_summary: bool, weights_summary: Optional[str] = None
) -> None:
if weights_summary is None:
rank_zero_deprecation(
"Setting `Trainer(weights_summary=None)` is deprecated in v1.5 and will be removed"
" in v1.7. Please set `Trainer(enable_model_summary=False)` instead."
)
return
if not enable_model_summary:
return
model_summary_cbs = [type(cb) for cb in self.trainer.callbacks if isinstance(cb, ModelSummary)]
if model_summary_cbs:
rank_zero_info(
f"Trainer already configured with model summary callbacks: {model_summary_cbs}."
" Skipping setting a default `ModelSummary` callback."
)
return
if weights_summary == "top":
# special case the default value for weights_summary to preserve backward compatibility
max_depth = 1
else:
rank_zero_deprecation(
f"Setting `Trainer(weights_summary={weights_summary})` is deprecated in v1.5 and will be removed"
" in v1.7. Please pass `pytorch_lightning.callbacks.model_summary.ModelSummary` with"
" `max_depth` directly to the Trainer's `callbacks` argument instead."
)
if weights_summary not in ModelSummaryMode.supported_types():
raise MisconfigurationException(
f"`weights_summary` can be None, {", ".join(ModelSummaryMode.supported_types())}",
f" but got {weights_summary}",
)
max_depth = ModelSummaryMode.get_max_depth(weights_summary)
progress_bar_callback = self.trainer.progress_bar_callback
is_progress_bar_rich = isinstance(progress_bar_callback, RichProgressBar)
if progress_bar_callback is not None and is_progress_bar_rich:
model_summary = RichModelSummary(max_depth=max_depth)
else:
model_summary = ModelSummary(max_depth=max_depth)
self.trainer.callbacks.append(model_summary)
self.trainer._weights_summary = weights_summary
def _configure_swa_callbacks(self):
if not self.trainer._stochastic_weight_avg:
return
from pytorch_lightning.callbacks.stochastic_weight_avg import StochasticWeightAveraging
existing_swa = [cb for cb in self.trainer.callbacks if isinstance(cb, StochasticWeightAveraging)]
if not existing_swa:
self.trainer.callbacks = [StochasticWeightAveraging()] + self.trainer.callbacks
def configure_progress_bar(
self, refresh_rate: Optional[int] = None, process_position: int = 0, enable_progress_bar: bool = True
) -> None:
progress_bars = [c for c in self.trainer.callbacks if isinstance(c, ProgressBarBase)]
if len(progress_bars) > 1:
raise MisconfigurationException(
"You added multiple progress bar callbacks to the Trainer, but currently only one"
" progress bar is supported."
)
if len(progress_bars) == 1:
# the user specified the progress bar in the callbacks list
# so the trainer doesn't need to provide a default one
if enable_progress_bar:
return
# otherwise the user specified a progress bar callback but also
# elected to disable the progress bar with the trainer flag
progress_bar_callback = progress_bars[0]
raise MisconfigurationException(
"Trainer was configured with `enable_progress_bar=False`"
f" but found `{progress_bar_callback.__class__.__name__}` in callbacks list."
)
# Return early if the user intends to disable the progress bar callback
if refresh_rate == 0 or not enable_progress_bar:
return
if refresh_rate is None:
# smaller refresh rate on colab causes crashes, choose a higher value
refresh_rate = 20 if os.getenv("COLAB_GPU") else 1
progress_bar_callback = TQDMProgressBar(refresh_rate=refresh_rate, process_position=process_position)
self.trainer.callbacks.append(progress_bar_callback)
def _configure_timer_callback(self, max_time: Optional[Union[str, timedelta, Dict[str, int]]] = None) -> None:
if max_time is None:
return
if any(isinstance(cb, Timer) for cb in self.trainer.callbacks):
rank_zero_info("Ignoring `Trainer(max_time=...)`, callbacks list already contains a Timer.")
return
timer = Timer(duration=max_time, interval="step")
self.trainer.callbacks.append(timer)
def _trainer_has_checkpoint_callbacks(self):
return len(self.trainer.checkpoint_callbacks) > 0
def attach_model_logging_functions(self, model):
for callback in self.trainer.callbacks:
callback.log = model.log
callback.log_dict = model.log_dict
def _attach_model_callbacks(self) -> None:
"""Attaches the callbacks defined in the model.
If a callback returned by the model's configure_callback method has the same type as one or several
callbacks already present in the trainer callbacks list, it will replace them.
In addition, all :class:`~pytorch_lightning.callbacks.model_checkpoint.ModelCheckpoint` callbacks
will be pushed to the end of the list, ensuring they run last.
"""
model_callbacks = self.trainer.call_hook("configure_callbacks")
if not model_callbacks:
return
model_callback_types = {type(c) for c in model_callbacks}
trainer_callback_types = {type(c) for c in self.trainer.callbacks}
override_types = model_callback_types.intersection(trainer_callback_types)
if override_types:
rank_zero_info(
"The following callbacks returned in `LightningModule.configure_callbacks` will override"
" existing callbacks passed to Trainer:"
f" {", ".join(sorted(t.__name__ for t in override_types))}"
)
# remove all callbacks with a type that occurs in model callbacks
all_callbacks = [c for c in self.trainer.callbacks if type(c) not in override_types]
all_callbacks.extend(model_callbacks)
all_callbacks = CallbackConnector._reorder_callbacks(all_callbacks)
# TODO: connectors refactor: move callbacks list to connector and do not write Trainer state
self.trainer.callbacks = all_callbacks
@staticmethod
def _reorder_callbacks(callbacks: List[Callback]) -> List[Callback]:
"""Moves all ModelCheckpoint callbacks to the end of the list. The sequential order within the group of
checkpoint callbacks is preserved, as well as the order of all other callbacks.
Args:
callbacks: A list of callbacks.
Return:
A new list in which the last elements are ModelCheckpoints if there were any present in the
input.
"""
checkpoints = [c for c in callbacks if isinstance(c, ModelCheckpoint)]
not_checkpoints = [c for c in callbacks if not isinstance(c, ModelCheckpoint)]
return not_checkpoints + checkpoints
| # Copyright The PyTorch Lightning team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
from datetime import timedelta
from typing import Dict, List, Optional, Union
from pytorch_lightning.callbacks import (
Callback,
GradientAccumulationScheduler,
ModelCheckpoint,
ModelSummary,
ProgressBarBase,
RichProgressBar,
TQDMProgressBar,
)
from pytorch_lightning.callbacks.rich_model_summary import RichModelSummary
from pytorch_lightning.callbacks.timer import Timer
from pytorch_lightning.utilities import ModelSummaryMode, rank_zero_info
from pytorch_lightning.utilities.exceptions import MisconfigurationException
from pytorch_lightning.utilities.warnings import rank_zero_deprecation
class CallbackConnector:
def __init__(self, trainer):
self.trainer = trainer
def on_trainer_init(
self,
callbacks: Optional[Union[List[Callback], Callback]],
checkpoint_callback: Optional[bool],
enable_checkpointing: bool,
enable_progress_bar: bool,
progress_bar_refresh_rate: Optional[int],
process_position: int,
default_root_dir: Optional[str],
weights_save_path: Optional[str],
enable_model_summary: bool,
weights_summary: Optional[str],
stochastic_weight_avg: bool,
max_time: Optional[Union[str, timedelta, Dict[str, int]]] = None,
accumulate_grad_batches: Optional[Union[int, Dict[int, int]]] = None,
):
# init folder paths for checkpoint + weights save callbacks
self.trainer._default_root_dir = default_root_dir or os.getcwd()
self.trainer._weights_save_path = weights_save_path or self.trainer._default_root_dir
if stochastic_weight_avg:
rank_zero_deprecation(
"Setting `Trainer(stochastic_weight_avg=True)` is deprecated in v1.5 and will be removed in v1.7."
" Please pass `pytorch_lightning.callbacks.stochastic_weight_avg.StochasticWeightAveraging`"
" directly to the Trainer's `callbacks` argument instead."
)
self.trainer._stochastic_weight_avg = stochastic_weight_avg
# init callbacks
if isinstance(callbacks, Callback):
callbacks = [callbacks]
self.trainer.callbacks = callbacks or []
# configure checkpoint callback
# pass through the required args to figure out defaults
self._configure_checkpoint_callbacks(checkpoint_callback, enable_checkpointing)
# configure swa callback
self._configure_swa_callbacks()
# configure the timer callback.
# responsible to stop the training when max_time is reached.
self._configure_timer_callback(max_time)
# init progress bar
if process_position != 0:
rank_zero_deprecation(
f"Setting `Trainer(process_position={process_position})` is deprecated in v1.5 and will be removed"
" in v1.7. Please pass `pytorch_lightning.callbacks.progress.TQDMProgressBar` with"
" `process_position` directly to the Trainer's `callbacks` argument instead."
)
if progress_bar_refresh_rate is not None:
rank_zero_deprecation(
f"Setting `Trainer(progress_bar_refresh_rate={progress_bar_refresh_rate})` is deprecated in v1.5 and"
" will be removed in v1.7. Please pass `pytorch_lightning.callbacks.progress.TQDMProgressBar` with"
" `refresh_rate` directly to the Trainer's `callbacks` argument instead. Or, to disable the progress"
" bar pass `enable_progress_bar = False` to the Trainer."
)
self.configure_progress_bar(progress_bar_refresh_rate, process_position, enable_progress_bar)
# configure the ModelSummary callback
self._configure_model_summary_callback(enable_model_summary, weights_summary)
# accumulated grads
self._configure_accumulated_gradients(accumulate_grad_batches)
# push all checkpoint callbacks to the end
# it is important that these are the last callbacks to run
self.trainer.callbacks = self._reorder_callbacks(self.trainer.callbacks)
def _configure_accumulated_gradients(
self, accumulate_grad_batches: Optional[Union[int, Dict[int, int]]] = None
) -> None:
grad_accum_callback = [cb for cb in self.trainer.callbacks if isinstance(cb, GradientAccumulationScheduler)]
if grad_accum_callback:
if accumulate_grad_batches is not None:
raise MisconfigurationException(
"You have set both `accumulate_grad_batches` and passed an instance of "
"`GradientAccumulationScheduler` inside callbacks. Either remove `accumulate_grad_batches` "
"from trainer or remove `GradientAccumulationScheduler` from callbacks list."
)
grad_accum_callback = grad_accum_callback[0]
else:
if accumulate_grad_batches is None:
accumulate_grad_batches = 1
if isinstance(accumulate_grad_batches, dict):
grad_accum_callback = GradientAccumulationScheduler(accumulate_grad_batches)
elif isinstance(accumulate_grad_batches, int):
grad_accum_callback = GradientAccumulationScheduler({0: accumulate_grad_batches})
else:
raise MisconfigurationException(
f"`accumulate_grad_batches` should be an int or a dict. Got {accumulate_grad_batches}."
)
self.trainer.callbacks.append(grad_accum_callback)
self.trainer.accumulate_grad_batches = grad_accum_callback.get_accumulate_grad_batches(0)
self.trainer.accumulation_scheduler = grad_accum_callback
def _configure_checkpoint_callbacks(self, checkpoint_callback: Optional[bool], enable_checkpointing: bool) -> None:
if checkpoint_callback is not None:
rank_zero_deprecation(
f"Setting `Trainer(checkpoint_callback={checkpoint_callback})` is deprecated in v1.5 and will "
f"be removed in v1.7. Please consider using `Trainer(enable_checkpointing={checkpoint_callback})`."
)
# if both are set then checkpoint only if both are True
enable_checkpointing = checkpoint_callback and enable_checkpointing
if self._trainer_has_checkpoint_callbacks() and enable_checkpointing is False:
raise MisconfigurationException(
"Trainer was configured with `enable_checkpointing=False`"
" but found `ModelCheckpoint` in callbacks list."
)
if not self._trainer_has_checkpoint_callbacks() and enable_checkpointing is True:
self.trainer.callbacks.append(ModelCheckpoint())
def _configure_model_summary_callback(
self, enable_model_summary: bool, weights_summary: Optional[str] = None
) -> None:
if weights_summary is None:
rank_zero_deprecation(
"Setting `Trainer(weights_summary=None)` is deprecated in v1.5 and will be removed"
" in v1.7. Please set `Trainer(enable_model_summary=False)` instead."
)
return
if not enable_model_summary:
return
model_summary_cbs = [type(cb) for cb in self.trainer.callbacks if isinstance(cb, ModelSummary)]
if model_summary_cbs:
rank_zero_info(
f"Trainer already configured with model summary callbacks: {model_summary_cbs}."
" Skipping setting a default `ModelSummary` callback."
)
return
if weights_summary == "top":
# special case the default value for weights_summary to preserve backward compatibility
max_depth = 1
else:
rank_zero_deprecation(
f"Setting `Trainer(weights_summary={weights_summary})` is deprecated in v1.5 and will be removed"
" in v1.7. Please pass `pytorch_lightning.callbacks.model_summary.ModelSummary` with"
" `max_depth` directly to the Trainer's `callbacks` argument instead."
)
if weights_summary not in ModelSummaryMode.supported_types():
raise MisconfigurationException(
f"`weights_summary` can be None, {', '.join(ModelSummaryMode.supported_types())}",
f" but got {weights_summary}",
)
max_depth = ModelSummaryMode.get_max_depth(weights_summary)
progress_bar_callback = self.trainer.progress_bar_callback
is_progress_bar_rich = isinstance(progress_bar_callback, RichProgressBar)
if progress_bar_callback is not None and is_progress_bar_rich:
model_summary = RichModelSummary(max_depth=max_depth)
else:
model_summary = ModelSummary(max_depth=max_depth)
self.trainer.callbacks.append(model_summary)
self.trainer._weights_summary = weights_summary
def _configure_swa_callbacks(self):
if not self.trainer._stochastic_weight_avg:
return
from pytorch_lightning.callbacks.stochastic_weight_avg import StochasticWeightAveraging
existing_swa = [cb for cb in self.trainer.callbacks if isinstance(cb, StochasticWeightAveraging)]
if not existing_swa:
self.trainer.callbacks = [StochasticWeightAveraging()] + self.trainer.callbacks
def configure_progress_bar(
self, refresh_rate: Optional[int] = None, process_position: int = 0, enable_progress_bar: bool = True
) -> None:
progress_bars = [c for c in self.trainer.callbacks if isinstance(c, ProgressBarBase)]
if len(progress_bars) > 1:
raise MisconfigurationException(
"You added multiple progress bar callbacks to the Trainer, but currently only one"
" progress bar is supported."
)
if len(progress_bars) == 1:
# the user specified the progress bar in the callbacks list
# so the trainer doesn't need to provide a default one
if enable_progress_bar:
return
# otherwise the user specified a progress bar callback but also
# elected to disable the progress bar with the trainer flag
progress_bar_callback = progress_bars[0]
raise MisconfigurationException(
"Trainer was configured with `enable_progress_bar=False`"
f" but found `{progress_bar_callback.__class__.__name__}` in callbacks list."
)
# Return early if the user intends to disable the progress bar callback
if refresh_rate == 0 or not enable_progress_bar:
return
if refresh_rate is None:
# smaller refresh rate on colab causes crashes, choose a higher value
refresh_rate = 20 if os.getenv("COLAB_GPU") else 1
progress_bar_callback = TQDMProgressBar(refresh_rate=refresh_rate, process_position=process_position)
self.trainer.callbacks.append(progress_bar_callback)
def _configure_timer_callback(self, max_time: Optional[Union[str, timedelta, Dict[str, int]]] = None) -> None:
if max_time is None:
return
if any(isinstance(cb, Timer) for cb in self.trainer.callbacks):
rank_zero_info("Ignoring `Trainer(max_time=...)`, callbacks list already contains a Timer.")
return
timer = Timer(duration=max_time, interval="step")
self.trainer.callbacks.append(timer)
def _trainer_has_checkpoint_callbacks(self):
return len(self.trainer.checkpoint_callbacks) > 0
def attach_model_logging_functions(self, model):
for callback in self.trainer.callbacks:
callback.log = model.log
callback.log_dict = model.log_dict
def _attach_model_callbacks(self) -> None:
"""Attaches the callbacks defined in the model.
If a callback returned by the model's configure_callback method has the same type as one or several
callbacks already present in the trainer callbacks list, it will replace them.
In addition, all :class:`~pytorch_lightning.callbacks.model_checkpoint.ModelCheckpoint` callbacks
will be pushed to the end of the list, ensuring they run last.
"""
model_callbacks = self.trainer.call_hook("configure_callbacks")
if not model_callbacks:
return
model_callback_types = {type(c) for c in model_callbacks}
trainer_callback_types = {type(c) for c in self.trainer.callbacks}
override_types = model_callback_types.intersection(trainer_callback_types)
if override_types:
rank_zero_info(
"The following callbacks returned in `LightningModule.configure_callbacks` will override"
" existing callbacks passed to Trainer:"
f" {', '.join(sorted(t.__name__ for t in override_types))}"
)
# remove all callbacks with a type that occurs in model callbacks
all_callbacks = [c for c in self.trainer.callbacks if type(c) not in override_types]
all_callbacks.extend(model_callbacks)
all_callbacks = CallbackConnector._reorder_callbacks(all_callbacks)
# TODO: connectors refactor: move callbacks list to connector and do not write Trainer state
self.trainer.callbacks = all_callbacks
@staticmethod
def _reorder_callbacks(callbacks: List[Callback]) -> List[Callback]:
"""Moves all ModelCheckpoint callbacks to the end of the list. The sequential order within the group of
checkpoint callbacks is preserved, as well as the order of all other callbacks.
Args:
callbacks: A list of callbacks.
Return:
A new list in which the last elements are ModelCheckpoints if there were any present in the
input.
"""
checkpoints = [c for c in callbacks if isinstance(c, ModelCheckpoint)]
not_checkpoints = [c for c in callbacks if not isinstance(c, ModelCheckpoint)]
return not_checkpoints + checkpoints
|
import random
import json
import logging
import asyncio
import secrets
import sortedcontainers
from hailtop.utils import (
Notice,
run_if_changed,
WaitableSharedPool,
time_msecs,
retry_long_running,
secret_alnum_string,
AsyncWorkerPool,
periodically_call,
)
from ..batch_format_version import BatchFormatVersion
from ..batch_configuration import WORKER_MAX_IDLE_TIME_MSECS
from ..inst_coll_config import machine_type_to_dict, JobPrivateInstanceManagerConfig
from .create_instance import create_instance
from .instance_collection import InstanceCollection
from .instance import Instance
from .job import mark_job_creating, schedule_job
from ..utils import worker_memory_per_core_bytes, Box, ExceededSharesCounter
log = logging.getLogger('job_private_inst_coll')
class JobPrivateInstanceManager(InstanceCollection):
def __init__(self, app, machine_name_prefix: str, config: JobPrivateInstanceManagerConfig):
super().__init__(app, config.name, machine_name_prefix, is_pool=False)
global_scheduler_state_changed: Notice = app['scheduler_state_changed']
self.create_instances_state_changed = global_scheduler_state_changed.subscribe()
self.scheduler_state_changed = asyncio.Event()
self.async_worker_pool: AsyncWorkerPool = app['async_worker_pool']
self.exceeded_shares_counter = ExceededSharesCounter()
self.boot_disk_size_gb = config.boot_disk_size_gb
self.max_instances = config.max_instances
self.max_live_instances = config.max_live_instances
async def async_init(self):
log.info(f'initializing {self}')
await super().async_init()
async for record in self.db.select_and_fetchall(
'SELECT * FROM instances WHERE removed = 0 AND inst_coll = %s;', (self.name,)
):
instance = Instance.from_record(self.app, self, record)
self.add_instance(instance)
self.task_manager.ensure_future(
retry_long_running(
'create_instances_loop',
run_if_changed,
self.create_instances_state_changed,
self.create_instances_loop_body,
)
)
self.task_manager.ensure_future(
retry_long_running(
'schedule_jobs_loop', run_if_changed, self.scheduler_state_changed, self.schedule_jobs_loop_body
)
)
self.task_manager.ensure_future(periodically_call(15, self.bump_scheduler))
def config(self):
return {
'name': self.name,
'worker_disk_size_gb': self.boot_disk_size_gb,
'max_instances': self.max_instances,
'max_live_instances': self.max_live_instances,
}
async def configure(self, boot_disk_size_gb, max_instances, max_live_instances):
await self.db.just_execute(
'''
UPDATE inst_colls
SET boot_disk_size_gb = %s, max_instances = %s, max_live_instances = %s
WHERE name = %s;
''',
(boot_disk_size_gb, max_instances, max_live_instances, self.name),
)
self.boot_disk_size_gb = boot_disk_size_gb
self.max_instances = max_instances
self.max_live_instances = max_live_instances
async def bump_scheduler(self):
self.scheduler_state_changed.set()
async def schedule_jobs_loop_body(self):
log.info(f'starting scheduling jobs for {self}')
waitable_pool = WaitableSharedPool(self.async_worker_pool)
should_wait = True
n_scheduled = 0
async for record in self.db.select_and_fetchall(
'''
SELECT jobs.*, batches.format_version, batches.userdata, batches.user, attempts.instance_name
FROM batches
INNER JOIN jobs ON batches.id = jobs.batch_id
LEFT JOIN attempts ON jobs.batch_id = attempts.batch_id AND jobs.job_id = attempts.job_id
LEFT JOIN instances ON attempts.instance_name = instances.name
WHERE batches.state = 'running'
AND jobs.state = 'Creating'
AND (jobs.always_run OR NOT jobs.cancelled)
AND jobs.inst_coll = %s
AND instances.`state` = 'active'
ORDER BY instances.time_activated ASC
LIMIT 300;
''',
(self.name,),
timer_description=f'in schedule_jobs for {self}: get ready jobs with active instances',
):
batch_id = record['batch_id']
job_id = record['job_id']
instance_name = record['instance_name']
id = (batch_id, job_id)
log.info(f'scheduling job {id}')
instance = self.name_instance[instance_name]
n_scheduled += 1
should_wait = False
async def schedule_with_error_handling(app, record, id, instance):
try:
await schedule_job(app, record, instance)
except Exception:
log.info(f'scheduling job {id} on {instance} for {self}', exc_info=True)
await waitable_pool.call(schedule_with_error_handling, self.app, record, id, instance)
await waitable_pool.wait()
log.info(f'scheduled {n_scheduled} jobs for {self}')
return should_wait
def max_instances_to_create(self):
n_live_instances = self.n_instances_by_state['pending'] + self.n_instances_by_state['active']
return min(
self.max_live_instances - n_live_instances,
self.max_instances - self.n_instances,
# 20 queries/s; our GCE long-run quota
300,
)
async def compute_fair_share(self):
n_jobs_to_allocate = self.max_instances_to_create()
user_live_jobs = {}
user_total_jobs = {}
result = {}
pending_users_by_live_jobs = sortedcontainers.SortedSet(key=lambda user: user_live_jobs[user])
allocating_users_by_total_jobs = sortedcontainers.SortedSet(key=lambda user: user_total_jobs[user])
records = self.db.execute_and_fetchall(
'''
SELECT user,
CAST(COALESCE(SUM(n_ready_jobs), 0) AS SIGNED) AS n_ready_jobs,
CAST(COALESCE(SUM(n_creating_jobs), 0) AS SIGNED) AS n_creating_jobs,
CAST(COALESCE(SUM(n_running_jobs), 0) AS SIGNED) AS n_running_jobs
FROM user_inst_coll_resources
WHERE inst_coll = %s
GROUP BY user
HAVING n_ready_jobs + n_creating_jobs + n_running_jobs > 0;
''',
(self.name,),
timer_description=f'in compute_fair_share for {self}: aggregate user_inst_coll_resources',
)
async for record in records:
user = record['user']
user_live_jobs[user] = record['n_creating_jobs'] + record['n_running_jobs']
user_total_jobs[user] = record['n_ready_jobs'] + record['n_creating_jobs'] + record['n_running_jobs']
pending_users_by_live_jobs.add(user)
record['n_allocated_jobs'] = 0
result[user] = record
def allocate_jobs(user, mark):
result[user]['n_allocated_jobs'] = mark - user_live_jobs[user]
mark = 0
while n_jobs_to_allocate > 0 and (pending_users_by_live_jobs or allocating_users_by_total_jobs):
lowest_running = None
lowest_total = None
if pending_users_by_live_jobs:
lowest_running_user = pending_users_by_live_jobs[0]
lowest_running = user_live_jobs[lowest_running_user]
if lowest_running == mark:
pending_users_by_live_jobs.remove(lowest_running_user)
allocating_users_by_total_jobs.add(lowest_running_user)
continue
if allocating_users_by_total_jobs:
lowest_total_user = allocating_users_by_total_jobs[0]
lowest_total = user_total_jobs[lowest_total_user]
if lowest_total == mark:
allocating_users_by_total_jobs.remove(lowest_total_user)
allocate_jobs(lowest_total_user, mark)
continue
allocation = min([c for c in [lowest_running, lowest_total] if c is not None])
n_allocating_users = len(allocating_users_by_total_jobs)
jobs_to_allocate = n_allocating_users * (allocation - mark)
if jobs_to_allocate > n_jobs_to_allocate:
mark += int(n_jobs_to_allocate / n_allocating_users + 0.5)
n_jobs_to_allocate = 0
break
mark = allocation
n_jobs_to_allocate -= jobs_to_allocate
for user in allocating_users_by_total_jobs:
allocate_jobs(user, mark)
return result
async def create_instance(self, batch_id, job_id, machine_spec):
assert machine_spec is not None
machine_name = self.generate_machine_name()
machine_type = machine_spec['machine_type']
preemptible = machine_spec['preemptible']
storage_gb = machine_spec['storage_gib']
machine_type_dict = machine_type_to_dict(machine_type)
cores = int(machine_type_dict['cores'])
cores_mcpu = cores * 1000
worker_type = machine_type_dict['machine_type']
zone = self.zone_monitor.get_zone(cores, False, storage_gb)
if zone is None:
return
activation_token = secrets.token_urlsafe(32)
instance = await Instance.create(
self.app, self, machine_name, activation_token, cores_mcpu, zone, machine_type, preemptible
)
self.add_instance(instance)
log.info(f'created {instance} for {(batch_id, job_id)}')
worker_config = await create_instance(
app=self.app,
zone=zone,
machine_name=machine_name,
machine_type=machine_type,
activation_token=activation_token,
max_idle_time_msecs=WORKER_MAX_IDLE_TIME_MSECS,
worker_local_ssd_data_disk=False,
worker_pd_ssd_data_disk_size_gb=storage_gb,
boot_disk_size_gb=self.boot_disk_size_gb,
preemptible=preemptible,
job_private=True,
)
memory_in_bytes = worker_memory_per_core_bytes(worker_type)
resources = worker_config.resources(
cpu_in_mcpu=cores_mcpu, memory_in_bytes=memory_in_bytes, storage_in_gib=0
) # this is 0 because there's no addtl disk beyond data disk
return (instance, resources)
async def create_instances_loop_body(self):
log.info(f'create_instances for {self}: starting')
start = time_msecs()
n_instances_created = 0
user_resources = await self.compute_fair_share()
total = sum(resources['n_allocated_jobs'] for resources in user_resources.values())
if not total:
log.info(f'create_instances {self}: no allocated jobs')
should_wait = True
return should_wait
user_share = {
user: max(int(300 * resources['n_allocated_jobs'] / total + 0.5), 20)
for user, resources in user_resources.items()
}
async def user_runnable_jobs(user, remaining):
async for batch in self.db.select_and_fetchall(
'''
SELECT id, cancelled, userdata, user, format_version
FROM batches
WHERE user = %s AND `state` = 'running';
''',
(user,),
timer_description=f'in create_instances {self}: get {user} running batches',
):
async for record in self.db.select_and_fetchall(
'''
SELECT jobs.job_id, jobs.spec, jobs.cores_mcpu, COALESCE(SUM(instances.state IS NOT NULL AND
(instances.state = 'pending' OR instances.state = 'active')), 0) as live_attempts
FROM jobs FORCE INDEX(jobs_batch_id_state_always_run_inst_coll_cancelled)
LEFT JOIN attempts ON jobs.batch_id = attempts.batch_id AND jobs.job_id = attempts.job_id
LEFT JOIN instances ON attempts.instance_name = instances.name
WHERE jobs.batch_id = %s AND jobs.state = 'Ready' AND always_run = 1 AND jobs.inst_coll = %s
GROUP BY jobs.job_id, jobs.spec, jobs.cores_mcpu
HAVING live_attempts = 0
LIMIT %s;
''',
(batch['id'], self.name, remaining.value),
timer_description=f'in create_instances {self}: get {user} batch {batch['id']} runnable jobs (1)',
):
record['batch_id'] = batch['id']
record['userdata'] = batch['userdata']
record['user'] = batch['user']
record['format_version'] = batch['format_version']
yield record
if not batch['cancelled']:
async for record in self.db.select_and_fetchall(
'''
SELECT jobs.job_id, jobs.spec, jobs.cores_mcpu, COALESCE(SUM(instances.state IS NOT NULL AND
(instances.state = 'pending' OR instances.state = 'active')), 0) as live_attempts
FROM jobs FORCE INDEX(jobs_batch_id_state_always_run_cancelled)
LEFT JOIN attempts ON jobs.batch_id = attempts.batch_id AND jobs.job_id = attempts.job_id
LEFT JOIN instances ON attempts.instance_name = instances.name
WHERE jobs.batch_id = %s AND jobs.state = 'Ready' AND always_run = 0 AND jobs.inst_coll = %s AND cancelled = 0
GROUP BY jobs.job_id, jobs.spec, jobs.cores_mcpu
HAVING live_attempts = 0
LIMIT %s;
''',
(batch['id'], self.name, remaining.value),
timer_description=f'in create_instances {self}: get {user} batch {batch['id']} runnable jobs (2)',
):
record['batch_id'] = batch['id']
record['userdata'] = batch['userdata']
record['user'] = batch['user']
record['format_version'] = batch['format_version']
yield record
waitable_pool = WaitableSharedPool(self.async_worker_pool)
should_wait = True
for user, resources in user_resources.items():
n_allocated_instances = resources['n_allocated_jobs']
if n_allocated_instances == 0:
continue
n_user_instances_created = 0
share = user_share[user]
log.info(f'create_instances {self}: user-share: {user}: {share}')
remaining = Box(share)
async for record in user_runnable_jobs(user, remaining):
batch_id = record['batch_id']
job_id = record['job_id']
id = (batch_id, job_id)
attempt_id = secret_alnum_string(6)
record['attempt_id'] = attempt_id
if n_user_instances_created >= n_allocated_instances:
if random.random() > self.exceeded_shares_counter.rate():
self.exceeded_shares_counter.push(True)
self.scheduler_state_changed.set()
break
self.exceeded_shares_counter.push(False)
n_instances_created += 1
n_user_instances_created += 1
should_wait = False
log.info(f'creating job private instance for job {id}')
async def create_instance_with_error_handling(batch_id, job_id, attempt_id, record, id):
try:
batch_format_version = BatchFormatVersion(record['format_version'])
spec = json.loads(record['spec'])
machine_spec = batch_format_version.get_spec_machine_spec(spec)
instance, resources = await self.create_instance(batch_id, job_id, machine_spec)
await mark_job_creating(
self.app, batch_id, job_id, attempt_id, instance, time_msecs(), resources
)
except Exception:
log.info(f'creating job private instance for job {id}', exc_info=True)
await waitable_pool.call(create_instance_with_error_handling, batch_id, job_id, attempt_id, record, id)
remaining.value -= 1
if remaining.value <= 0:
break
await waitable_pool.wait()
end = time_msecs()
log.info(f'create_instances: created instances for {n_instances_created} jobs in {end - start}ms for {self}')
await asyncio.sleep(15) # ensure we don't create more instances than GCE limit
return should_wait
def __str__(self):
return f'jpim {self.name}'
| import random
import json
import logging
import asyncio
import secrets
import sortedcontainers
from hailtop.utils import (
Notice,
run_if_changed,
WaitableSharedPool,
time_msecs,
retry_long_running,
secret_alnum_string,
AsyncWorkerPool,
periodically_call,
)
from ..batch_format_version import BatchFormatVersion
from ..batch_configuration import WORKER_MAX_IDLE_TIME_MSECS
from ..inst_coll_config import machine_type_to_dict, JobPrivateInstanceManagerConfig
from .create_instance import create_instance
from .instance_collection import InstanceCollection
from .instance import Instance
from .job import mark_job_creating, schedule_job
from ..utils import worker_memory_per_core_bytes, Box, ExceededSharesCounter
log = logging.getLogger('job_private_inst_coll')
class JobPrivateInstanceManager(InstanceCollection):
def __init__(self, app, machine_name_prefix: str, config: JobPrivateInstanceManagerConfig):
super().__init__(app, config.name, machine_name_prefix, is_pool=False)
global_scheduler_state_changed: Notice = app['scheduler_state_changed']
self.create_instances_state_changed = global_scheduler_state_changed.subscribe()
self.scheduler_state_changed = asyncio.Event()
self.async_worker_pool: AsyncWorkerPool = app['async_worker_pool']
self.exceeded_shares_counter = ExceededSharesCounter()
self.boot_disk_size_gb = config.boot_disk_size_gb
self.max_instances = config.max_instances
self.max_live_instances = config.max_live_instances
async def async_init(self):
log.info(f'initializing {self}')
await super().async_init()
async for record in self.db.select_and_fetchall(
'SELECT * FROM instances WHERE removed = 0 AND inst_coll = %s;', (self.name,)
):
instance = Instance.from_record(self.app, self, record)
self.add_instance(instance)
self.task_manager.ensure_future(
retry_long_running(
'create_instances_loop',
run_if_changed,
self.create_instances_state_changed,
self.create_instances_loop_body,
)
)
self.task_manager.ensure_future(
retry_long_running(
'schedule_jobs_loop', run_if_changed, self.scheduler_state_changed, self.schedule_jobs_loop_body
)
)
self.task_manager.ensure_future(periodically_call(15, self.bump_scheduler))
def config(self):
return {
'name': self.name,
'worker_disk_size_gb': self.boot_disk_size_gb,
'max_instances': self.max_instances,
'max_live_instances': self.max_live_instances,
}
async def configure(self, boot_disk_size_gb, max_instances, max_live_instances):
await self.db.just_execute(
'''
UPDATE inst_colls
SET boot_disk_size_gb = %s, max_instances = %s, max_live_instances = %s
WHERE name = %s;
''',
(boot_disk_size_gb, max_instances, max_live_instances, self.name),
)
self.boot_disk_size_gb = boot_disk_size_gb
self.max_instances = max_instances
self.max_live_instances = max_live_instances
async def bump_scheduler(self):
self.scheduler_state_changed.set()
async def schedule_jobs_loop_body(self):
log.info(f'starting scheduling jobs for {self}')
waitable_pool = WaitableSharedPool(self.async_worker_pool)
should_wait = True
n_scheduled = 0
async for record in self.db.select_and_fetchall(
'''
SELECT jobs.*, batches.format_version, batches.userdata, batches.user, attempts.instance_name
FROM batches
INNER JOIN jobs ON batches.id = jobs.batch_id
LEFT JOIN attempts ON jobs.batch_id = attempts.batch_id AND jobs.job_id = attempts.job_id
LEFT JOIN instances ON attempts.instance_name = instances.name
WHERE batches.state = 'running'
AND jobs.state = 'Creating'
AND (jobs.always_run OR NOT jobs.cancelled)
AND jobs.inst_coll = %s
AND instances.`state` = 'active'
ORDER BY instances.time_activated ASC
LIMIT 300;
''',
(self.name,),
timer_description=f'in schedule_jobs for {self}: get ready jobs with active instances',
):
batch_id = record['batch_id']
job_id = record['job_id']
instance_name = record['instance_name']
id = (batch_id, job_id)
log.info(f'scheduling job {id}')
instance = self.name_instance[instance_name]
n_scheduled += 1
should_wait = False
async def schedule_with_error_handling(app, record, id, instance):
try:
await schedule_job(app, record, instance)
except Exception:
log.info(f'scheduling job {id} on {instance} for {self}', exc_info=True)
await waitable_pool.call(schedule_with_error_handling, self.app, record, id, instance)
await waitable_pool.wait()
log.info(f'scheduled {n_scheduled} jobs for {self}')
return should_wait
def max_instances_to_create(self):
n_live_instances = self.n_instances_by_state['pending'] + self.n_instances_by_state['active']
return min(
self.max_live_instances - n_live_instances,
self.max_instances - self.n_instances,
# 20 queries/s; our GCE long-run quota
300,
)
async def compute_fair_share(self):
n_jobs_to_allocate = self.max_instances_to_create()
user_live_jobs = {}
user_total_jobs = {}
result = {}
pending_users_by_live_jobs = sortedcontainers.SortedSet(key=lambda user: user_live_jobs[user])
allocating_users_by_total_jobs = sortedcontainers.SortedSet(key=lambda user: user_total_jobs[user])
records = self.db.execute_and_fetchall(
'''
SELECT user,
CAST(COALESCE(SUM(n_ready_jobs), 0) AS SIGNED) AS n_ready_jobs,
CAST(COALESCE(SUM(n_creating_jobs), 0) AS SIGNED) AS n_creating_jobs,
CAST(COALESCE(SUM(n_running_jobs), 0) AS SIGNED) AS n_running_jobs
FROM user_inst_coll_resources
WHERE inst_coll = %s
GROUP BY user
HAVING n_ready_jobs + n_creating_jobs + n_running_jobs > 0;
''',
(self.name,),
timer_description=f'in compute_fair_share for {self}: aggregate user_inst_coll_resources',
)
async for record in records:
user = record['user']
user_live_jobs[user] = record['n_creating_jobs'] + record['n_running_jobs']
user_total_jobs[user] = record['n_ready_jobs'] + record['n_creating_jobs'] + record['n_running_jobs']
pending_users_by_live_jobs.add(user)
record['n_allocated_jobs'] = 0
result[user] = record
def allocate_jobs(user, mark):
result[user]['n_allocated_jobs'] = mark - user_live_jobs[user]
mark = 0
while n_jobs_to_allocate > 0 and (pending_users_by_live_jobs or allocating_users_by_total_jobs):
lowest_running = None
lowest_total = None
if pending_users_by_live_jobs:
lowest_running_user = pending_users_by_live_jobs[0]
lowest_running = user_live_jobs[lowest_running_user]
if lowest_running == mark:
pending_users_by_live_jobs.remove(lowest_running_user)
allocating_users_by_total_jobs.add(lowest_running_user)
continue
if allocating_users_by_total_jobs:
lowest_total_user = allocating_users_by_total_jobs[0]
lowest_total = user_total_jobs[lowest_total_user]
if lowest_total == mark:
allocating_users_by_total_jobs.remove(lowest_total_user)
allocate_jobs(lowest_total_user, mark)
continue
allocation = min([c for c in [lowest_running, lowest_total] if c is not None])
n_allocating_users = len(allocating_users_by_total_jobs)
jobs_to_allocate = n_allocating_users * (allocation - mark)
if jobs_to_allocate > n_jobs_to_allocate:
mark += int(n_jobs_to_allocate / n_allocating_users + 0.5)
n_jobs_to_allocate = 0
break
mark = allocation
n_jobs_to_allocate -= jobs_to_allocate
for user in allocating_users_by_total_jobs:
allocate_jobs(user, mark)
return result
async def create_instance(self, batch_id, job_id, machine_spec):
assert machine_spec is not None
machine_name = self.generate_machine_name()
machine_type = machine_spec['machine_type']
preemptible = machine_spec['preemptible']
storage_gb = machine_spec['storage_gib']
machine_type_dict = machine_type_to_dict(machine_type)
cores = int(machine_type_dict['cores'])
cores_mcpu = cores * 1000
worker_type = machine_type_dict['machine_type']
zone = self.zone_monitor.get_zone(cores, False, storage_gb)
if zone is None:
return
activation_token = secrets.token_urlsafe(32)
instance = await Instance.create(
self.app, self, machine_name, activation_token, cores_mcpu, zone, machine_type, preemptible
)
self.add_instance(instance)
log.info(f'created {instance} for {(batch_id, job_id)}')
worker_config = await create_instance(
app=self.app,
zone=zone,
machine_name=machine_name,
machine_type=machine_type,
activation_token=activation_token,
max_idle_time_msecs=WORKER_MAX_IDLE_TIME_MSECS,
worker_local_ssd_data_disk=False,
worker_pd_ssd_data_disk_size_gb=storage_gb,
boot_disk_size_gb=self.boot_disk_size_gb,
preemptible=preemptible,
job_private=True,
)
memory_in_bytes = worker_memory_per_core_bytes(worker_type)
resources = worker_config.resources(
cpu_in_mcpu=cores_mcpu, memory_in_bytes=memory_in_bytes, storage_in_gib=0
) # this is 0 because there's no addtl disk beyond data disk
return (instance, resources)
async def create_instances_loop_body(self):
log.info(f'create_instances for {self}: starting')
start = time_msecs()
n_instances_created = 0
user_resources = await self.compute_fair_share()
total = sum(resources['n_allocated_jobs'] for resources in user_resources.values())
if not total:
log.info(f'create_instances {self}: no allocated jobs')
should_wait = True
return should_wait
user_share = {
user: max(int(300 * resources['n_allocated_jobs'] / total + 0.5), 20)
for user, resources in user_resources.items()
}
async def user_runnable_jobs(user, remaining):
async for batch in self.db.select_and_fetchall(
'''
SELECT id, cancelled, userdata, user, format_version
FROM batches
WHERE user = %s AND `state` = 'running';
''',
(user,),
timer_description=f'in create_instances {self}: get {user} running batches',
):
async for record in self.db.select_and_fetchall(
'''
SELECT jobs.job_id, jobs.spec, jobs.cores_mcpu, COALESCE(SUM(instances.state IS NOT NULL AND
(instances.state = 'pending' OR instances.state = 'active')), 0) as live_attempts
FROM jobs FORCE INDEX(jobs_batch_id_state_always_run_inst_coll_cancelled)
LEFT JOIN attempts ON jobs.batch_id = attempts.batch_id AND jobs.job_id = attempts.job_id
LEFT JOIN instances ON attempts.instance_name = instances.name
WHERE jobs.batch_id = %s AND jobs.state = 'Ready' AND always_run = 1 AND jobs.inst_coll = %s
GROUP BY jobs.job_id, jobs.spec, jobs.cores_mcpu
HAVING live_attempts = 0
LIMIT %s;
''',
(batch['id'], self.name, remaining.value),
timer_description=f'in create_instances {self}: get {user} batch {batch["id"]} runnable jobs (1)',
):
record['batch_id'] = batch['id']
record['userdata'] = batch['userdata']
record['user'] = batch['user']
record['format_version'] = batch['format_version']
yield record
if not batch['cancelled']:
async for record in self.db.select_and_fetchall(
'''
SELECT jobs.job_id, jobs.spec, jobs.cores_mcpu, COALESCE(SUM(instances.state IS NOT NULL AND
(instances.state = 'pending' OR instances.state = 'active')), 0) as live_attempts
FROM jobs FORCE INDEX(jobs_batch_id_state_always_run_cancelled)
LEFT JOIN attempts ON jobs.batch_id = attempts.batch_id AND jobs.job_id = attempts.job_id
LEFT JOIN instances ON attempts.instance_name = instances.name
WHERE jobs.batch_id = %s AND jobs.state = 'Ready' AND always_run = 0 AND jobs.inst_coll = %s AND cancelled = 0
GROUP BY jobs.job_id, jobs.spec, jobs.cores_mcpu
HAVING live_attempts = 0
LIMIT %s;
''',
(batch['id'], self.name, remaining.value),
timer_description=f'in create_instances {self}: get {user} batch {batch["id"]} runnable jobs (2)',
):
record['batch_id'] = batch['id']
record['userdata'] = batch['userdata']
record['user'] = batch['user']
record['format_version'] = batch['format_version']
yield record
waitable_pool = WaitableSharedPool(self.async_worker_pool)
should_wait = True
for user, resources in user_resources.items():
n_allocated_instances = resources['n_allocated_jobs']
if n_allocated_instances == 0:
continue
n_user_instances_created = 0
share = user_share[user]
log.info(f'create_instances {self}: user-share: {user}: {share}')
remaining = Box(share)
async for record in user_runnable_jobs(user, remaining):
batch_id = record['batch_id']
job_id = record['job_id']
id = (batch_id, job_id)
attempt_id = secret_alnum_string(6)
record['attempt_id'] = attempt_id
if n_user_instances_created >= n_allocated_instances:
if random.random() > self.exceeded_shares_counter.rate():
self.exceeded_shares_counter.push(True)
self.scheduler_state_changed.set()
break
self.exceeded_shares_counter.push(False)
n_instances_created += 1
n_user_instances_created += 1
should_wait = False
log.info(f'creating job private instance for job {id}')
async def create_instance_with_error_handling(batch_id, job_id, attempt_id, record, id):
try:
batch_format_version = BatchFormatVersion(record['format_version'])
spec = json.loads(record['spec'])
machine_spec = batch_format_version.get_spec_machine_spec(spec)
instance, resources = await self.create_instance(batch_id, job_id, machine_spec)
await mark_job_creating(
self.app, batch_id, job_id, attempt_id, instance, time_msecs(), resources
)
except Exception:
log.info(f'creating job private instance for job {id}', exc_info=True)
await waitable_pool.call(create_instance_with_error_handling, batch_id, job_id, attempt_id, record, id)
remaining.value -= 1
if remaining.value <= 0:
break
await waitable_pool.wait()
end = time_msecs()
log.info(f'create_instances: created instances for {n_instances_created} jobs in {end - start}ms for {self}')
await asyncio.sleep(15) # ensure we don't create more instances than GCE limit
return should_wait
def __str__(self):
return f'jpim {self.name}'
|
Subsets and Splits