code
stringlengths 22
1.05M
| apis
sequencelengths 1
3.31k
| extract_api
stringlengths 75
3.25M
|
---|---|---|
import os
from setuptools import setup
VERSION = "0.2"
def get_long_description():
with open(
os.path.join(os.path.dirname(os.path.abspath(__file__)), "README.md"),
encoding="utf8",
) as fp:
return fp.read()
setup(
name="instapaper-to-sqlite",
description="Save data from Instapaper to a SQLite database",
long_description=get_long_description(),
long_description_content_type="text/markdown",
author="<NAME>",
author_email="<EMAIL>",
url="https://github.com/bcongdon/instapaper-to-sqlite",
project_urls={
"Source": "https://github.com/bcongdon/instapaper-to-sqlite",
"Issues": "https://github.com/bcongdon/instapaper-to-sqlite/issues",
},
classifiers=[
"Development Status :: 5 - Production/Stable",
"Environment :: Console",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.7",
"Programming Language :: Python :: 3.8",
"Topic :: Database",
],
keywords="instapaper sqlite export dogsheep",
version=VERSION,
packages=["instapaper_to_sqlite"],
entry_points="""
[console_scripts]
instapaper-to-sqlite=instapaper_to_sqlite.cli:cli
""",
install_requires=[
"click",
"requests",
"sqlite-utils~=3.17",
"pyinstapaper @ git+https://github.com/bcongdon/pyinstapaper#egg=pyinstapaper",
],
extras_require={"test": ["pytest"]},
tests_require=["instapaper-to-sqlite[test]"],
)
| [
"os.path.abspath"
] | [((139, 164), 'os.path.abspath', 'os.path.abspath', (['__file__'], {}), '(__file__)\n', (154, 164), False, 'import os\n')] |
from typing import List
from pybm import PybmConfig
from pybm.command import CLICommand
from pybm.config import get_reporter_class
from pybm.exceptions import PybmError
from pybm.reporters import BaseReporter
from pybm.status_codes import ERROR, SUCCESS
from pybm.util.path import get_subdirs
class CompareCommand(CLICommand):
"""
Report benchmark results from specified sources.
"""
usage = "pybm compare <run> <anchor-ref> <compare-refs> [<options>]\n"
def __init__(self):
super(CompareCommand, self).__init__(name="compare")
self.config = PybmConfig.load()
def add_arguments(self):
self.parser.add_argument(
"run",
type=str,
metavar="<run>",
help="Benchmark run to report results for. "
"To report the preceding run, use the "
'"latest" keyword. To report results '
"of the n-th preceding run "
"(i.e., n runs ago), "
'use the "latest^{n}" syntax.',
)
self.parser.add_argument(
"refs",
nargs="+",
metavar="<refs>",
help="Benchmarked refs to compare. The first "
"given ref will be treated as the "
"anchor ref, relative to which all "
"differences are reported. An error is "
"raised if any of the given "
"refs are not present in the run.",
)
reporter: BaseReporter = get_reporter_class(config=self.config)
reporter_args = reporter.additional_arguments()
if reporter_args:
reporter_name = self.config.get_value("reporter.name")
reporter_group_desc = (
f"Additional options from configured reporter class {reporter_name!r}"
)
reporter_group = self.parser.add_argument_group(reporter_group_desc)
# add builder-specific options into the group
for arg in reporter_args:
reporter_group.add_argument(arg.pop("flags"), **arg)
def run(self, args: List[str]) -> int:
if not args:
self.parser.print_help()
return ERROR
self.add_arguments()
options = self.parser.parse_args(args)
reporter: BaseReporter = get_reporter_class(config=self.config)
# TODO: Parse run to fit schema
run = options.run
refs: List[str] = options.refs
result_dir = reporter.result_dir
# TODO: Make this dynamic to support other run identifiers
result = sorted(get_subdirs(result_dir))[-1]
result_path = result_dir / result
if result_path.exists():
reporter.compare(
*refs,
result=result,
target_filter=options.target_filter,
benchmark_filter=options.benchmark_filter,
context_filter=options.context_filter,
)
else:
raise PybmError(
f"No benchmark results found for the requested run {run!r}."
)
return SUCCESS
| [
"pybm.PybmConfig.load",
"pybm.util.path.get_subdirs",
"pybm.config.get_reporter_class",
"pybm.exceptions.PybmError"
] | [((583, 600), 'pybm.PybmConfig.load', 'PybmConfig.load', ([], {}), '()\n', (598, 600), False, 'from pybm import PybmConfig\n'), ((1475, 1513), 'pybm.config.get_reporter_class', 'get_reporter_class', ([], {'config': 'self.config'}), '(config=self.config)\n', (1493, 1513), False, 'from pybm.config import get_reporter_class\n'), ((2286, 2324), 'pybm.config.get_reporter_class', 'get_reporter_class', ([], {'config': 'self.config'}), '(config=self.config)\n', (2304, 2324), False, 'from pybm.config import get_reporter_class\n'), ((2966, 3037), 'pybm.exceptions.PybmError', 'PybmError', (['f"""No benchmark results found for the requested run {run!r}."""'], {}), "(f'No benchmark results found for the requested run {run!r}.')\n", (2975, 3037), False, 'from pybm.exceptions import PybmError\n'), ((2564, 2587), 'pybm.util.path.get_subdirs', 'get_subdirs', (['result_dir'], {}), '(result_dir)\n', (2575, 2587), False, 'from pybm.util.path import get_subdirs\n')] |
"""
For a given detector get a WIMPrate for a given detector (not taking into
account any detector effects
"""
import numericalunits as nu
import wimprates as wr
import dddm
export, __all__ = dddm.exporter()
@export
class SHM:
"""
class used to pass a halo model to the rate computation
must contain:
:param v_esc -- escape velocity (multiplied by units)
:param rho_dm -- density in mass/volume of dark matter at the Earth (multiplied by units)
The standard halo model also allows variation of v_0
:param v_0 -- v0 of the velocity distribution (multiplied by units)
:function velocity_dist -- function taking v,t giving normalised
velocity distribution in earth rest-frame.
"""
def __init__(self, v_0=None, v_esc=None, rho_dm=None):
self.v_0 = 230 * nu.km / nu.s if v_0 is None else v_0
self.v_esc = 544 * nu.km / nu.s if v_esc is None else v_esc
self.rho_dm = (0.3 * nu.GeV / nu.c0 ** 2 / nu.cm ** 3
if rho_dm is None else rho_dm)
def __str__(self):
# Standard Halo Model (shm)
return 'shm'
def velocity_dist(self, v, t):
"""
Get the velocity distribution in units of per velocity,
:param v: v is in units of velocity
:return: observed velocity distribution at earth
"""
return wr.observed_speed_dist(v, t, self.v_0, self.v_esc)
def parameter_dict(self):
"""Return a dict of readable parameters of the current settings"""
return dict(
v_0=self.v_0 / (nu.km / nu.s),
v_esc=self.v_esc / (nu.km / nu.s),
rho_dm=self.rho_dm / (nu.GeV / nu.c0 ** 2 / nu.cm ** 3),
)
| [
"dddm.exporter",
"wimprates.observed_speed_dist"
] | [((194, 209), 'dddm.exporter', 'dddm.exporter', ([], {}), '()\n', (207, 209), False, 'import dddm\n'), ((1381, 1431), 'wimprates.observed_speed_dist', 'wr.observed_speed_dist', (['v', 't', 'self.v_0', 'self.v_esc'], {}), '(v, t, self.v_0, self.v_esc)\n', (1403, 1431), True, 'import wimprates as wr\n')] |
# imports
import os
import json
import subprocess
abs_join = lambda p1, p2 : os.path.abspath(os.path.join(p1, p2))
# constants
SCRIPT_DIR = os.path.abspath(os.path.dirname(__file__))
SEED_RELPATH = "./strprose/example_files/_seeds.json"
SEED_FULLPATH = abs_join(SCRIPT_DIR, SEED_RELPATH)
SEED_INFO = None
with open(SEED_FULLPATH, 'r') as f:
SEED_INFO = json.load(f)
TOOL_RELPATH = "../StrPROSE-synthesizer/StrPROSE/bin/Debug/netcoreapp3.1/StrPROSE.dll"
TOOL_FULLPATH = abs_join(SCRIPT_DIR, TOOL_RELPATH)
TARGET_RELDIR = "./strprose/targets"
TARGET_FULLDIR = abs_join(SCRIPT_DIR, TARGET_RELDIR)
MAX_SAMPLE_SIZE = 2000
EXAMPLE_RELDIR = "./strprose/example_files"
EXAMPLE_FULLDIR = abs_join(SCRIPT_DIR, EXAMPLE_RELDIR)
TIME_OUT = 120
# methods
def generate_examples(bench_id, seed):
command_line_args = [
"dotnet",
TOOL_FULLPATH,
"--samplegen",
TARGET_FULLDIR,
str(bench_id),
str(seed),
str(MAX_SAMPLE_SIZE),
EXAMPLE_FULLDIR
]
try:
print(f"# -------- Start Process ({bench_id}, {seed}) --------")
done_result = subprocess.run(command_line_args, timeout=TIME_OUT)
print(f"# ^^^^^^^^ Done: {done_result.returncode} ({bench_id}, {seed}) ^^^^^^^^")
except subprocess.TimeoutExpired:
print('# Error: subprocess TIMEOUT !!!')
if __name__ == "__main__":
for bench_id in SEED_INFO["bench_seeds"]:
for seed in SEED_INFO["bench_seeds"][bench_id]:
generate_examples(bench_id, seed) | [
"json.load",
"os.path.dirname",
"subprocess.run",
"os.path.join"
] | [((158, 183), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (173, 183), False, 'import os\n'), ((359, 371), 'json.load', 'json.load', (['f'], {}), '(f)\n', (368, 371), False, 'import json\n'), ((93, 113), 'os.path.join', 'os.path.join', (['p1', 'p2'], {}), '(p1, p2)\n', (105, 113), False, 'import os\n'), ((1107, 1158), 'subprocess.run', 'subprocess.run', (['command_line_args'], {'timeout': 'TIME_OUT'}), '(command_line_args, timeout=TIME_OUT)\n', (1121, 1158), False, 'import subprocess\n')] |
import sys
from django.urls import resolve
def global_vars(request):
return {
'GLOBAL_TWITTER_ACCOUNT': '@open_apprentice',
'ORGANIZATION_NAME': 'Open Apprentice Foundation',
'ORGANIZATION_WEBSITE': 'https://openapprentice.org',
'ORGANIZATION_LOGO': '/static/img/ellie/open-apprentice-logo-full.png', # relative URL with pre /,
'SITE_LOGO_URL': '/static/img/ellie/ellie-platform-logo.png', # relative URL with pre /
'APPNAME': sys.modules[resolve(request.path_info).func.__module__].__package__,
}
| [
"django.urls.resolve"
] | [((495, 521), 'django.urls.resolve', 'resolve', (['request.path_info'], {}), '(request.path_info)\n', (502, 521), False, 'from django.urls import resolve\n')] |
""" Training script for steps_with_decay policy"""
import argparse
import os
import sys
import pickle
import resource
import traceback
import logging
from collections import defaultdict
import numpy as np
import yaml
import torch
from torch.autograd import Variable
import torch.nn as nn
import cv2
cv2.setNumThreads(0) # pytorch issue 1355: possible deadlock in dataloader
import _init_paths # pylint: disable=unused-import
import nn as mynn
import utils.net as net_utils
import utils.misc as misc_utils
from core.config import cfg, cfg_from_file, cfg_from_list, assert_and_infer_cfg
from datasets.roidb import combined_roidb_for_training
from roi_data.loader import RoiDataLoader, MinibatchSampler, BatchSampler, collate_minibatch
from modeling.model_builder import Generalized_RCNN
from utils.detectron_weight_helper import load_detectron_weight
from utils.logging import setup_logging
from utils.timer import Timer
from utils.training_stats import TrainingStats
# Set up logging and load config options
logger = setup_logging(__name__)
logging.getLogger('roi_data.loader').setLevel(logging.INFO)
# RuntimeError: received 0 items of ancdata. Issue: pytorch/pytorch#973
rlimit = resource.getrlimit(resource.RLIMIT_NOFILE)
resource.setrlimit(resource.RLIMIT_NOFILE, (4096, rlimit[1]))
def parse_args():
"""Parse input arguments"""
parser = argparse.ArgumentParser(description='Train a X-RCNN network')
parser.add_argument(
'--dataset', dest='dataset', required=True,
help='Dataset to use')
parser.add_argument(
'--num_classes', dest='num_classes',
help='Number of classes in your custom dataset',
default=None, type=int)
parser.add_argument(
'--cfg', dest='cfg_file', required=True,
help='Config file for training (and optionally testing)')
parser.add_argument(
'--set', dest='set_cfgs',
help='Set config keys. Key value sequence seperate by whitespace.'
'e.g. [key] [value] [key] [value]',
default=[], nargs='+')
parser.add_argument(
'--disp_interval',
help='Display training info every N iterations',
default=20, type=int)
parser.add_argument(
'--no_cuda', dest='cuda', help='Do not use CUDA device', action='store_false')
# Optimization
# These options has the highest prioity and can overwrite the values in config file
# or values set by set_cfgs. `None` means do not overwrite.
parser.add_argument(
'--bs', dest='batch_size',
help='Explicitly specify to overwrite the value comed from cfg_file.',
type=int)
parser.add_argument(
'--nw', dest='num_workers',
help='Explicitly specify to overwrite number of workers to load data. Defaults to 4',
type=int)
parser.add_argument(
'--iter_size',
help='Update once every iter_size steps, as in Caffe.',
default=1, type=int)
parser.add_argument(
'--o', dest='optimizer', help='Training optimizer.',
default=None)
parser.add_argument(
'--lr', help='Base learning rate.',
default=None, type=float)
parser.add_argument(
'--lr_decay_gamma',
help='Learning rate decay rate.',
default=None, type=float)
# Epoch
parser.add_argument(
'--start_step',
help='Starting step count for training epoch. 0-indexed.',
default=0, type=int)
# Resume training: requires same iterations per epoch
parser.add_argument(
'--resume',
help='resume to training on a checkpoint',
action='store_true')
parser.add_argument(
'--no_save', help='do not save anything', action='store_true')
parser.add_argument(
'--load_ckpt', help='checkpoint path to load')
parser.add_argument(
'--load_detectron', help='path to the detectron weight pickle file')
parser.add_argument(
'--use_tfboard', help='Use tensorflow tensorboard to log training info',
action='store_true')
return parser.parse_args()
def save_ckpt(output_dir, args, step, train_size, model, optimizer):
"""Save checkpoint"""
if args.no_save:
return
ckpt_dir = os.path.join(output_dir, 'ckpt')
if not os.path.exists(ckpt_dir):
os.makedirs(ckpt_dir)
save_name = os.path.join(ckpt_dir, 'model_step{}.pth'.format(step))
if isinstance(model, mynn.DataParallel):
model = model.module
model_state_dict = model.state_dict()
torch.save({
'step': step,
'train_size': train_size,
'batch_size': args.batch_size,
'model': model.state_dict(),
'optimizer': optimizer.state_dict()}, save_name)
logger.info('save model: %s', save_name)
def main():
"""Main function"""
args = parse_args()
print('Called with args:')
print(args)
if not torch.cuda.is_available():
sys.exit("Need a CUDA device to run the code.")
if args.cuda or cfg.NUM_GPUS > 0:
cfg.CUDA = True
else:
raise ValueError("Need Cuda device to run !")
if args.dataset == "custom_dataset" and args.num_classes is None:
raise ValueError("Need number of classes in your custom dataset to run!")
if args.dataset == "coco2017":
cfg.TRAIN.DATASETS = ('coco_2014_train',)
cfg.MODEL.NUM_CLASSES = 4
elif args.dataset == "keypoints_coco2017":
cfg.TRAIN.DATASETS = ('keypoints_coco_2017_train',)
cfg.MODEL.NUM_CLASSES = 2
elif args.dataset == "voc2007":
cfg.TRAIN.DATASETS = ('voc_2007_train',)
cfg.MODEL.NUM_CLASSES = 21
elif args.dataset == "voc2012":
cfg.TRAIN.DATASETS = ('voc_2012_train',)
cfg.MODEL.NUM_CLASSES = 21
elif args.dataset == "custom_dataset":
cfg.TRAIN.DATASETS = ('custom_data_train',)
cfg.MODEL.NUM_CLASSES = args.num_classes
else:
raise ValueError("Unexpected args.dataset: {}".format(args.dataset))
cfg_from_file(args.cfg_file)
if args.set_cfgs is not None:
cfg_from_list(args.set_cfgs)
### Adaptively adjust some configs ###
original_batch_size = cfg.NUM_GPUS * cfg.TRAIN.IMS_PER_BATCH
original_ims_per_batch = cfg.TRAIN.IMS_PER_BATCH
original_num_gpus = cfg.NUM_GPUS
if args.batch_size is None:
args.batch_size = original_batch_size
cfg.NUM_GPUS = torch.cuda.device_count()
assert (args.batch_size % cfg.NUM_GPUS) == 0, \
'batch_size: %d, NUM_GPUS: %d' % (args.batch_size, cfg.NUM_GPUS)
cfg.TRAIN.IMS_PER_BATCH = args.batch_size // cfg.NUM_GPUS
effective_batch_size = args.iter_size * args.batch_size
print('effective_batch_size = batch_size * iter_size = %d * %d' % (args.batch_size, args.iter_size))
print('Adaptive config changes:')
print(' effective_batch_size: %d --> %d' % (original_batch_size, effective_batch_size))
print(' NUM_GPUS: %d --> %d' % (original_num_gpus, cfg.NUM_GPUS))
print(' IMS_PER_BATCH: %d --> %d' % (original_ims_per_batch, cfg.TRAIN.IMS_PER_BATCH))
### Adjust learning based on batch size change linearly
# For iter_size > 1, gradients are `accumulated`, so lr is scaled based
# on batch_size instead of effective_batch_size
old_base_lr = cfg.SOLVER.BASE_LR
cfg.SOLVER.BASE_LR *= args.batch_size / original_batch_size
print('Adjust BASE_LR linearly according to batch_size change:\n'
' BASE_LR: {} --> {}'.format(old_base_lr, cfg.SOLVER.BASE_LR))
### Adjust solver steps
step_scale = original_batch_size / effective_batch_size
old_solver_steps = cfg.SOLVER.STEPS
old_max_iter = cfg.SOLVER.MAX_ITER
cfg.SOLVER.STEPS = list(map(lambda x: int(x * step_scale + 0.5), cfg.SOLVER.STEPS))
cfg.SOLVER.MAX_ITER = int(cfg.SOLVER.MAX_ITER * step_scale + 0.5)
print('Adjust SOLVER.STEPS and SOLVER.MAX_ITER linearly based on effective_batch_size change:\n'
' SOLVER.STEPS: {} --> {}\n'
' SOLVER.MAX_ITER: {} --> {}'.format(old_solver_steps, cfg.SOLVER.STEPS,
old_max_iter, cfg.SOLVER.MAX_ITER))
# Scale FPN rpn_proposals collect size (post_nms_topN) in `collect` function
# of `collect_and_distribute_fpn_rpn_proposals.py`
#
# post_nms_topN = int(cfg[cfg_key].RPN_POST_NMS_TOP_N * cfg.FPN.RPN_COLLECT_SCALE + 0.5)
if cfg.FPN.FPN_ON and cfg.MODEL.FASTER_RCNN:
cfg.FPN.RPN_COLLECT_SCALE = cfg.TRAIN.IMS_PER_BATCH / original_ims_per_batch
print('Scale FPN rpn_proposals collect size directly propotional to the change of IMS_PER_BATCH:\n'
' cfg.FPN.RPN_COLLECT_SCALE: {}'.format(cfg.FPN.RPN_COLLECT_SCALE))
if args.num_workers is not None:
cfg.DATA_LOADER.NUM_THREADS = args.num_workers
print('Number of data loading threads: %d' % cfg.DATA_LOADER.NUM_THREADS)
### Overwrite some solver settings from command line arguments
if args.optimizer is not None:
cfg.SOLVER.TYPE = args.optimizer
if args.lr is not None:
cfg.SOLVER.BASE_LR = args.lr
if args.lr_decay_gamma is not None:
cfg.SOLVER.GAMMA = args.lr_decay_gamma
assert_and_infer_cfg()
timers = defaultdict(Timer)
### Dataset ###
timers['roidb'].tic()
roidb, ratio_list, ratio_index = combined_roidb_for_training(
cfg.TRAIN.DATASETS, cfg.TRAIN.PROPOSAL_FILES)
timers['roidb'].toc()
roidb_size = len(roidb)
logger.info('{:d} roidb entries'.format(roidb_size))
logger.info('Takes %.2f sec(s) to construct roidb', timers['roidb'].average_time)
# Effective training sample size for one epoch
train_size = roidb_size // args.batch_size * args.batch_size
batchSampler = BatchSampler(
sampler=MinibatchSampler(ratio_list, ratio_index),
batch_size=args.batch_size,
drop_last=True
)
dataset = RoiDataLoader(
roidb,
cfg.MODEL.NUM_CLASSES,
training=True)
dataloader = torch.utils.data.DataLoader(
dataset,
batch_sampler=batchSampler,
num_workers=cfg.DATA_LOADER.NUM_THREADS,
collate_fn=collate_minibatch)
dataiterator = iter(dataloader)
### Model ###
maskRCNN = Generalized_RCNN()
if cfg.CUDA:
maskRCNN.cuda()
### Optimizer ###
gn_param_nameset = set()
for name, module in maskRCNN.named_modules():
if isinstance(module, nn.GroupNorm):
gn_param_nameset.add(name+'.weight')
gn_param_nameset.add(name+'.bias')
gn_params = []
gn_param_names = []
bias_params = []
bias_param_names = []
nonbias_params = []
nonbias_param_names = []
nograd_param_names = []
for key, value in maskRCNN.named_parameters():
if value.requires_grad:
if 'bias' in key:
bias_params.append(value)
bias_param_names.append(key)
elif key in gn_param_nameset:
gn_params.append(value)
gn_param_names.append(key)
else:
nonbias_params.append(value)
nonbias_param_names.append(key)
else:
nograd_param_names.append(key)
assert (gn_param_nameset - set(nograd_param_names) - set(bias_param_names)) == set(gn_param_names)
# Learning rate of 0 is a dummy value to be set properly at the start of training
params = [
{'params': nonbias_params,
'lr': 0,
'weight_decay': cfg.SOLVER.WEIGHT_DECAY},
{'params': bias_params,
'lr': 0 * (cfg.SOLVER.BIAS_DOUBLE_LR + 1),
'weight_decay': cfg.SOLVER.WEIGHT_DECAY if cfg.SOLVER.BIAS_WEIGHT_DECAY else 0},
{'params': gn_params,
'lr': 0,
'weight_decay': cfg.SOLVER.WEIGHT_DECAY_GN}
]
# names of paramerters for each paramter
param_names = [nonbias_param_names, bias_param_names, gn_param_names]
if cfg.SOLVER.TYPE == "SGD":
optimizer = torch.optim.SGD(params, momentum=cfg.SOLVER.MOMENTUM)
elif cfg.SOLVER.TYPE == "Adam":
optimizer = torch.optim.Adam(params)
### Load checkpoint
if args.load_ckpt:
load_name = args.load_ckpt
logging.info("loading checkpoint %s", load_name)
checkpoint = torch.load(load_name, map_location=lambda storage, loc: storage)
net_utils.load_ckpt(maskRCNN, checkpoint['model'])
if args.resume:
args.start_step = checkpoint['step'] + 1
if 'train_size' in checkpoint: # For backward compatibility
if checkpoint['train_size'] != train_size:
print('train_size value: %d different from the one in checkpoint: %d'
% (train_size, checkpoint['train_size']))
# reorder the params in optimizer checkpoint's params_groups if needed
# misc_utils.ensure_optimizer_ckpt_params_order(param_names, checkpoint)
# There is a bug in optimizer.load_state_dict on Pytorch 0.3.1.
# However it's fixed on master.
optimizer.load_state_dict(checkpoint['optimizer'])
# misc_utils.load_optimizer_state_dict(optimizer, checkpoint['optimizer'])
del checkpoint
torch.cuda.empty_cache()
if args.load_detectron: #TODO resume for detectron weights (load sgd momentum values)
logging.info("loading Detectron weights %s", args.load_detectron)
load_detectron_weight(maskRCNN, args.load_detectron)
lr = optimizer.param_groups[0]['lr'] # lr of non-bias parameters, for commmand line outputs.
maskRCNN = mynn.DataParallel(maskRCNN, cpu_keywords=['im_info', 'roidb'],
minibatch=True)
### Training Setups ###
args.run_name = misc_utils.get_run_name() + '_step'
output_dir = misc_utils.get_output_dir(args, args.run_name)
args.cfg_filename = os.path.basename(args.cfg_file)
if not args.no_save:
if not os.path.exists(output_dir):
os.makedirs(output_dir)
blob = {'cfg': yaml.dump(cfg), 'args': args}
with open(os.path.join(output_dir, 'config_and_args.pkl'), 'wb') as f:
pickle.dump(blob, f, pickle.HIGHEST_PROTOCOL)
if args.use_tfboard:
from tensorboardX import SummaryWriter
# Set the Tensorboard logger
tblogger = SummaryWriter(output_dir)
### Training Loop ###
maskRCNN.train()
CHECKPOINT_PERIOD = int(cfg.TRAIN.SNAPSHOT_ITERS / cfg.NUM_GPUS)
# Set index for decay steps
decay_steps_ind = None
for i in range(1, len(cfg.SOLVER.STEPS)):
if cfg.SOLVER.STEPS[i] >= args.start_step:
decay_steps_ind = i
break
if decay_steps_ind is None:
decay_steps_ind = len(cfg.SOLVER.STEPS)
training_stats = TrainingStats(
args,
args.disp_interval,
tblogger if args.use_tfboard and not args.no_save else None)
try:
logger.info('Training starts !')
step = args.start_step
for step in range(args.start_step, cfg.SOLVER.MAX_ITER):
# Warm up
if step < cfg.SOLVER.WARM_UP_ITERS:
method = cfg.SOLVER.WARM_UP_METHOD
if method == 'constant':
warmup_factor = cfg.SOLVER.WARM_UP_FACTOR
elif method == 'linear':
alpha = step / cfg.SOLVER.WARM_UP_ITERS
warmup_factor = cfg.SOLVER.WARM_UP_FACTOR * (1 - alpha) + alpha
else:
raise KeyError('Unknown SOLVER.WARM_UP_METHOD: {}'.format(method))
lr_new = cfg.SOLVER.BASE_LR * warmup_factor
net_utils.update_learning_rate(optimizer, lr, lr_new)
lr = optimizer.param_groups[0]['lr']
assert lr == lr_new
elif step == cfg.SOLVER.WARM_UP_ITERS:
net_utils.update_learning_rate(optimizer, lr, cfg.SOLVER.BASE_LR)
lr = optimizer.param_groups[0]['lr']
assert lr == cfg.SOLVER.BASE_LR
# Learning rate decay
if decay_steps_ind < len(cfg.SOLVER.STEPS) and \
step == cfg.SOLVER.STEPS[decay_steps_ind]:
logger.info('Decay the learning on step %d', step)
lr_new = lr * cfg.SOLVER.GAMMA
net_utils.update_learning_rate(optimizer, lr, lr_new)
lr = optimizer.param_groups[0]['lr']
assert lr == lr_new
decay_steps_ind += 1
training_stats.IterTic()
optimizer.zero_grad()
for inner_iter in range(args.iter_size):
try:
input_data = next(dataiterator)
except StopIteration:
dataiterator = iter(dataloader)
input_data = next(dataiterator)
for key in input_data:
if key != 'roidb': # roidb is a list of ndarrays with inconsistent length
input_data[key] = list(map(Variable, input_data[key]))
try:
net_outputs = maskRCNN(**input_data)
except:
continue
training_stats.UpdateIterStats(net_outputs, inner_iter)
loss = net_outputs['total_loss']
loss.backward()
optimizer.step()
training_stats.IterToc()
training_stats.LogIterStats(step, lr)
if (step+1) % CHECKPOINT_PERIOD == 0:
save_ckpt(output_dir, args, step, train_size, maskRCNN, optimizer)
# ---- Training ends ----
# Save last checkpoint
save_ckpt(output_dir, args, step, train_size, maskRCNN, optimizer)
except (RuntimeError, KeyboardInterrupt):
del dataiterator
logger.info('Save ckpt on exception ...')
save_ckpt(output_dir, args, step, train_size, maskRCNN, optimizer)
logger.info('Save ckpt done.')
stack_trace = traceback.format_exc()
print(stack_trace)
finally:
if args.use_tfboard and not args.no_save:
tblogger.close()
if __name__ == '__main__':
main()
| [
"core.config.cfg_from_file",
"logging.getLogger",
"core.config.assert_and_infer_cfg",
"utils.training_stats.TrainingStats",
"torch.cuda.device_count",
"core.config.cfg_from_list",
"torch.cuda.is_available",
"sys.exit",
"logging.info",
"utils.logging.setup_logging",
"os.path.exists",
"utils.net.load_ckpt",
"tensorboardX.SummaryWriter",
"argparse.ArgumentParser",
"utils.misc.get_run_name",
"resource.setrlimit",
"modeling.model_builder.Generalized_RCNN",
"datasets.roidb.combined_roidb_for_training",
"torch.optim.SGD",
"yaml.dump",
"utils.misc.get_output_dir",
"utils.net.update_learning_rate",
"torch.cuda.empty_cache",
"roi_data.loader.MinibatchSampler",
"nn.DataParallel",
"roi_data.loader.RoiDataLoader",
"cv2.setNumThreads",
"torch.optim.Adam",
"pickle.dump",
"os.makedirs",
"traceback.format_exc",
"resource.getrlimit",
"torch.load",
"os.path.join",
"utils.detectron_weight_helper.load_detectron_weight",
"collections.defaultdict",
"os.path.basename",
"torch.utils.data.DataLoader"
] | [((301, 321), 'cv2.setNumThreads', 'cv2.setNumThreads', (['(0)'], {}), '(0)\n', (318, 321), False, 'import cv2\n'), ((1022, 1045), 'utils.logging.setup_logging', 'setup_logging', (['__name__'], {}), '(__name__)\n', (1035, 1045), False, 'from utils.logging import setup_logging\n'), ((1188, 1230), 'resource.getrlimit', 'resource.getrlimit', (['resource.RLIMIT_NOFILE'], {}), '(resource.RLIMIT_NOFILE)\n', (1206, 1230), False, 'import resource\n'), ((1231, 1292), 'resource.setrlimit', 'resource.setrlimit', (['resource.RLIMIT_NOFILE', '(4096, rlimit[1])'], {}), '(resource.RLIMIT_NOFILE, (4096, rlimit[1]))\n', (1249, 1292), False, 'import resource\n'), ((1357, 1418), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Train a X-RCNN network"""'}), "(description='Train a X-RCNN network')\n", (1380, 1418), False, 'import argparse\n'), ((4215, 4247), 'os.path.join', 'os.path.join', (['output_dir', '"""ckpt"""'], {}), "(output_dir, 'ckpt')\n", (4227, 4247), False, 'import os\n'), ((5976, 6004), 'core.config.cfg_from_file', 'cfg_from_file', (['args.cfg_file'], {}), '(args.cfg_file)\n', (5989, 6004), False, 'from core.config import cfg, cfg_from_file, cfg_from_list, assert_and_infer_cfg\n'), ((6372, 6397), 'torch.cuda.device_count', 'torch.cuda.device_count', ([], {}), '()\n', (6395, 6397), False, 'import torch\n'), ((9181, 9203), 'core.config.assert_and_infer_cfg', 'assert_and_infer_cfg', ([], {}), '()\n', (9201, 9203), False, 'from core.config import cfg, cfg_from_file, cfg_from_list, assert_and_infer_cfg\n'), ((9218, 9236), 'collections.defaultdict', 'defaultdict', (['Timer'], {}), '(Timer)\n', (9229, 9236), False, 'from collections import defaultdict\n'), ((9321, 9394), 'datasets.roidb.combined_roidb_for_training', 'combined_roidb_for_training', (['cfg.TRAIN.DATASETS', 'cfg.TRAIN.PROPOSAL_FILES'], {}), '(cfg.TRAIN.DATASETS, cfg.TRAIN.PROPOSAL_FILES)\n', (9348, 9394), False, 'from datasets.roidb import combined_roidb_for_training\n'), ((9890, 9948), 'roi_data.loader.RoiDataLoader', 'RoiDataLoader', (['roidb', 'cfg.MODEL.NUM_CLASSES'], {'training': '(True)'}), '(roidb, cfg.MODEL.NUM_CLASSES, training=True)\n', (9903, 9948), False, 'from roi_data.loader import RoiDataLoader, MinibatchSampler, BatchSampler, collate_minibatch\n'), ((9991, 10130), 'torch.utils.data.DataLoader', 'torch.utils.data.DataLoader', (['dataset'], {'batch_sampler': 'batchSampler', 'num_workers': 'cfg.DATA_LOADER.NUM_THREADS', 'collate_fn': 'collate_minibatch'}), '(dataset, batch_sampler=batchSampler,\n num_workers=cfg.DATA_LOADER.NUM_THREADS, collate_fn=collate_minibatch)\n', (10018, 10130), False, 'import torch\n'), ((10230, 10248), 'modeling.model_builder.Generalized_RCNN', 'Generalized_RCNN', ([], {}), '()\n', (10246, 10248), False, 'from modeling.model_builder import Generalized_RCNN\n'), ((13586, 13664), 'nn.DataParallel', 'mynn.DataParallel', (['maskRCNN'], {'cpu_keywords': "['im_info', 'roidb']", 'minibatch': '(True)'}), "(maskRCNN, cpu_keywords=['im_info', 'roidb'], minibatch=True)\n", (13603, 13664), True, 'import nn as mynn\n'), ((13800, 13846), 'utils.misc.get_output_dir', 'misc_utils.get_output_dir', (['args', 'args.run_name'], {}), '(args, args.run_name)\n', (13825, 13846), True, 'import utils.misc as misc_utils\n'), ((13871, 13902), 'os.path.basename', 'os.path.basename', (['args.cfg_file'], {}), '(args.cfg_file)\n', (13887, 13902), False, 'import os\n'), ((14797, 14902), 'utils.training_stats.TrainingStats', 'TrainingStats', (['args', 'args.disp_interval', '(tblogger if args.use_tfboard and not args.no_save else None)'], {}), '(args, args.disp_interval, tblogger if args.use_tfboard and \n not args.no_save else None)\n', (14810, 14902), False, 'from utils.training_stats import TrainingStats\n'), ((1046, 1082), 'logging.getLogger', 'logging.getLogger', (['"""roi_data.loader"""'], {}), "('roi_data.loader')\n", (1063, 1082), False, 'import logging\n'), ((4259, 4283), 'os.path.exists', 'os.path.exists', (['ckpt_dir'], {}), '(ckpt_dir)\n', (4273, 4283), False, 'import os\n'), ((4293, 4314), 'os.makedirs', 'os.makedirs', (['ckpt_dir'], {}), '(ckpt_dir)\n', (4304, 4314), False, 'import os\n'), ((4876, 4901), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (4899, 4901), False, 'import torch\n'), ((4911, 4958), 'sys.exit', 'sys.exit', (['"""Need a CUDA device to run the code."""'], {}), "('Need a CUDA device to run the code.')\n", (4919, 4958), False, 'import sys\n'), ((6047, 6075), 'core.config.cfg_from_list', 'cfg_from_list', (['args.set_cfgs'], {}), '(args.set_cfgs)\n', (6060, 6075), False, 'from core.config import cfg, cfg_from_file, cfg_from_list, assert_and_infer_cfg\n'), ((11961, 12014), 'torch.optim.SGD', 'torch.optim.SGD', (['params'], {'momentum': 'cfg.SOLVER.MOMENTUM'}), '(params, momentum=cfg.SOLVER.MOMENTUM)\n', (11976, 12014), False, 'import torch\n'), ((12187, 12235), 'logging.info', 'logging.info', (['"""loading checkpoint %s"""', 'load_name'], {}), "('loading checkpoint %s', load_name)\n", (12199, 12235), False, 'import logging\n'), ((12257, 12321), 'torch.load', 'torch.load', (['load_name'], {'map_location': '(lambda storage, loc: storage)'}), '(load_name, map_location=lambda storage, loc: storage)\n', (12267, 12321), False, 'import torch\n'), ((12330, 12380), 'utils.net.load_ckpt', 'net_utils.load_ckpt', (['maskRCNN', "checkpoint['model']"], {}), "(maskRCNN, checkpoint['model'])\n", (12349, 12380), True, 'import utils.net as net_utils\n'), ((13219, 13243), 'torch.cuda.empty_cache', 'torch.cuda.empty_cache', ([], {}), '()\n', (13241, 13243), False, 'import torch\n'), ((13344, 13409), 'logging.info', 'logging.info', (['"""loading Detectron weights %s"""', 'args.load_detectron'], {}), "('loading Detectron weights %s', args.load_detectron)\n", (13356, 13409), False, 'import logging\n'), ((13418, 13470), 'utils.detectron_weight_helper.load_detectron_weight', 'load_detectron_weight', (['maskRCNN', 'args.load_detectron'], {}), '(maskRCNN, args.load_detectron)\n', (13439, 13470), False, 'from utils.detectron_weight_helper import load_detectron_weight\n'), ((13747, 13772), 'utils.misc.get_run_name', 'misc_utils.get_run_name', ([], {}), '()\n', (13770, 13772), True, 'import utils.misc as misc_utils\n'), ((9768, 9809), 'roi_data.loader.MinibatchSampler', 'MinibatchSampler', (['ratio_list', 'ratio_index'], {}), '(ratio_list, ratio_index)\n', (9784, 9809), False, 'from roi_data.loader import RoiDataLoader, MinibatchSampler, BatchSampler, collate_minibatch\n'), ((12071, 12095), 'torch.optim.Adam', 'torch.optim.Adam', (['params'], {}), '(params)\n', (12087, 12095), False, 'import torch\n'), ((13944, 13970), 'os.path.exists', 'os.path.exists', (['output_dir'], {}), '(output_dir)\n', (13958, 13970), False, 'import os\n'), ((13984, 14007), 'os.makedirs', 'os.makedirs', (['output_dir'], {}), '(output_dir)\n', (13995, 14007), False, 'import os\n'), ((14032, 14046), 'yaml.dump', 'yaml.dump', (['cfg'], {}), '(cfg)\n', (14041, 14046), False, 'import yaml\n'), ((14153, 14198), 'pickle.dump', 'pickle.dump', (['blob', 'f', 'pickle.HIGHEST_PROTOCOL'], {}), '(blob, f, pickle.HIGHEST_PROTOCOL)\n', (14164, 14198), False, 'import pickle\n'), ((14344, 14369), 'tensorboardX.SummaryWriter', 'SummaryWriter', (['output_dir'], {}), '(output_dir)\n', (14357, 14369), False, 'from tensorboardX import SummaryWriter\n'), ((17997, 18019), 'traceback.format_exc', 'traceback.format_exc', ([], {}), '()\n', (18017, 18019), False, 'import traceback\n'), ((14080, 14127), 'os.path.join', 'os.path.join', (['output_dir', '"""config_and_args.pkl"""'], {}), "(output_dir, 'config_and_args.pkl')\n", (14092, 14127), False, 'import os\n'), ((15664, 15717), 'utils.net.update_learning_rate', 'net_utils.update_learning_rate', (['optimizer', 'lr', 'lr_new'], {}), '(optimizer, lr, lr_new)\n', (15694, 15717), True, 'import utils.net as net_utils\n'), ((16330, 16383), 'utils.net.update_learning_rate', 'net_utils.update_learning_rate', (['optimizer', 'lr', 'lr_new'], {}), '(optimizer, lr, lr_new)\n', (16360, 16383), True, 'import utils.net as net_utils\n'), ((15874, 15939), 'utils.net.update_learning_rate', 'net_utils.update_learning_rate', (['optimizer', 'lr', 'cfg.SOLVER.BASE_LR'], {}), '(optimizer, lr, cfg.SOLVER.BASE_LR)\n', (15904, 15939), True, 'import utils.net as net_utils\n')] |
# Copyright (c) 2019-2020 hippo91 <<EMAIL>>
# Copyright (c) 2020 <NAME> <<EMAIL>>
# Copyright (c) 2021 <NAME> <<EMAIL>>
# Copyright (c) 2021 <NAME> <<EMAIL>>
# Licensed under the LGPL: https://www.gnu.org/licenses/old-licenses/lgpl-2.1.en.html
# For details: https://github.com/PyCQA/astroid/blob/main/LICENSE
"""Astroid hooks for numpy.core.multiarray module."""
import functools
from astroid.brain.brain_numpy_utils import infer_numpy_member, looks_like_numpy_member
from astroid.brain.helpers import register_module_extender
from astroid.builder import parse
from astroid.inference_tip import inference_tip
from astroid.manager import AstroidManager
from astroid.nodes.node_classes import Attribute, Name
def numpy_core_multiarray_transform():
return parse(
"""
# different functions defined in multiarray.py
def inner(a, b):
return numpy.ndarray([0, 0])
def vdot(a, b):
return numpy.ndarray([0, 0])
"""
)
register_module_extender(
AstroidManager(), "numpy.core.multiarray", numpy_core_multiarray_transform
)
METHODS_TO_BE_INFERRED = {
"array": """def array(object, dtype=None, copy=True, order='K', subok=False, ndmin=0):
return numpy.ndarray([0, 0])""",
"dot": """def dot(a, b, out=None):
return numpy.ndarray([0, 0])""",
"empty_like": """def empty_like(a, dtype=None, order='K', subok=True):
return numpy.ndarray((0, 0))""",
"concatenate": """def concatenate(arrays, axis=None, out=None):
return numpy.ndarray((0, 0))""",
"where": """def where(condition, x=None, y=None):
return numpy.ndarray([0, 0])""",
"empty": """def empty(shape, dtype=float, order='C'):
return numpy.ndarray([0, 0])""",
"bincount": """def bincount(x, weights=None, minlength=0):
return numpy.ndarray([0, 0])""",
"busday_count": """def busday_count(begindates, enddates, weekmask='1111100', holidays=[], busdaycal=None, out=None):
return numpy.ndarray([0, 0])""",
"busday_offset": """def busday_offset(dates, offsets, roll='raise', weekmask='1111100', holidays=None, busdaycal=None, out=None):
return numpy.ndarray([0, 0])""",
"can_cast": """def can_cast(from_, to, casting='safe'):
return True""",
"copyto": """def copyto(dst, src, casting='same_kind', where=True):
return None""",
"datetime_as_string": """def datetime_as_string(arr, unit=None, timezone='naive', casting='same_kind'):
return numpy.ndarray([0, 0])""",
"is_busday": """def is_busday(dates, weekmask='1111100', holidays=None, busdaycal=None, out=None):
return numpy.ndarray([0, 0])""",
"lexsort": """def lexsort(keys, axis=-1):
return numpy.ndarray([0, 0])""",
"may_share_memory": """def may_share_memory(a, b, max_work=None):
return True""",
# Not yet available because dtype is not yet present in those brains
# "min_scalar_type": """def min_scalar_type(a):
# return numpy.dtype('int16')""",
"packbits": """def packbits(a, axis=None, bitorder='big'):
return numpy.ndarray([0, 0])""",
# Not yet available because dtype is not yet present in those brains
# "result_type": """def result_type(*arrays_and_dtypes):
# return numpy.dtype('int16')""",
"shares_memory": """def shares_memory(a, b, max_work=None):
return True""",
"unpackbits": """def unpackbits(a, axis=None, count=None, bitorder='big'):
return numpy.ndarray([0, 0])""",
"unravel_index": """def unravel_index(indices, shape, order='C'):
return (numpy.ndarray([0, 0]),)""",
"zeros": """def zeros(shape, dtype=float, order='C'):
return numpy.ndarray([0, 0])""",
}
for method_name, function_src in METHODS_TO_BE_INFERRED.items():
inference_function = functools.partial(infer_numpy_member, function_src)
AstroidManager().register_transform(
Attribute,
inference_tip(inference_function),
functools.partial(looks_like_numpy_member, method_name),
)
AstroidManager().register_transform(
Name,
inference_tip(inference_function),
functools.partial(looks_like_numpy_member, method_name),
)
| [
"astroid.builder.parse",
"functools.partial",
"astroid.manager.AstroidManager",
"astroid.inference_tip.inference_tip"
] | [((765, 964), 'astroid.builder.parse', 'parse', (['"""\n # different functions defined in multiarray.py\n def inner(a, b):\n return numpy.ndarray([0, 0])\n\n def vdot(a, b):\n return numpy.ndarray([0, 0])\n """'], {}), '(\n """\n # different functions defined in multiarray.py\n def inner(a, b):\n return numpy.ndarray([0, 0])\n\n def vdot(a, b):\n return numpy.ndarray([0, 0])\n """\n )\n', (770, 964), False, 'from astroid.builder import parse\n'), ((1001, 1017), 'astroid.manager.AstroidManager', 'AstroidManager', ([], {}), '()\n', (1015, 1017), False, 'from astroid.manager import AstroidManager\n'), ((3899, 3950), 'functools.partial', 'functools.partial', (['infer_numpy_member', 'function_src'], {}), '(infer_numpy_member, function_src)\n', (3916, 3950), False, 'import functools\n'), ((4019, 4052), 'astroid.inference_tip.inference_tip', 'inference_tip', (['inference_function'], {}), '(inference_function)\n', (4032, 4052), False, 'from astroid.inference_tip import inference_tip\n'), ((4062, 4117), 'functools.partial', 'functools.partial', (['looks_like_numpy_member', 'method_name'], {}), '(looks_like_numpy_member, method_name)\n', (4079, 4117), False, 'import functools\n'), ((4188, 4221), 'astroid.inference_tip.inference_tip', 'inference_tip', (['inference_function'], {}), '(inference_function)\n', (4201, 4221), False, 'from astroid.inference_tip import inference_tip\n'), ((4231, 4286), 'functools.partial', 'functools.partial', (['looks_like_numpy_member', 'method_name'], {}), '(looks_like_numpy_member, method_name)\n', (4248, 4286), False, 'import functools\n'), ((3955, 3971), 'astroid.manager.AstroidManager', 'AstroidManager', ([], {}), '()\n', (3969, 3971), False, 'from astroid.manager import AstroidManager\n'), ((4129, 4145), 'astroid.manager.AstroidManager', 'AstroidManager', ([], {}), '()\n', (4143, 4145), False, 'from astroid.manager import AstroidManager\n')] |
import sys
import typing
import numpy as np
def solve(a: np.ndarray, k: int) -> typing.NoReturn:
n = len(a)
def compute_dp(a: np.ndarray) -> np.ndarray:
dp = np.zeros((n + 1, k), np.bool8)
dp[0, 0] = True
for i in range(n):
dp[i + 1] = dp[i].copy()
dp[i + 1, a[i] :] |= dp[i, : -a[i]]
return dp
dp_l = compute_dp(a)
dp_r = compute_dp(a[::-1])[::-1]
dp_r = dp_r.astype(np.int64).cumsum(axis=1)
cnt = 0
for p in range(n):
l, r = dp_l[p], dp_r[n - p]
x = a[p]
for i in np.flatnonzero(l).tolist():
if (
not r[k - i - 1]
- (0 if k - x - i - 1 < 0 else r[k - x - i - 1])
>= 1
):
continue
cnt += 1
break
print(n - cnt)
def main() -> typing.NoReturn:
n, k = map(int, input().split())
a = np.array(sys.stdin.readline().split(), dtype=np.int64)
solve(a, k)
main()
| [
"sys.stdin.readline",
"numpy.zeros",
"numpy.flatnonzero"
] | [((188, 218), 'numpy.zeros', 'np.zeros', (['(n + 1, k)', 'np.bool8'], {}), '((n + 1, k), np.bool8)\n', (196, 218), True, 'import numpy as np\n'), ((605, 622), 'numpy.flatnonzero', 'np.flatnonzero', (['l'], {}), '(l)\n', (619, 622), True, 'import numpy as np\n'), ((968, 988), 'sys.stdin.readline', 'sys.stdin.readline', ([], {}), '()\n', (986, 988), False, 'import sys\n')] |
import network
def conncb(task):
print("[{}] Connected".format(task))
def disconncb(task):
print("[{}] Disconnected".format(task))
def subscb(task):
print("[{}] Subscribed".format(task))
def pubcb(pub):
print("[{}] Published: {}".format(pub[0], pub[1]))
def datacb(msg):
print("[{}] Data arrived from topic: {}, Message:\n".format(msg[0], msg[1]), msg[2])
mqtt = network.mqtt("loboris", "mqtt://loboris.eu", user="wifimcu", password="<PASSWORD>", cleansession=True, connected_cb=conncb, disconnected_cb=disconncb, subscribed_cb=subscb, published_cb=pubcb, data_cb=datacb)
# secure connection requires more memory and may not work
# mqtts = network.mqtt("eclipse", "mqtts//iot.eclipse.org", cleansession=True, connected_cb=conncb, disconnected_cb=disconncb, subscribed_cb=subscb, published_cb=pubcb, data_cb=datacb)
# wsmqtt = network.mqtt("eclipse", "ws://iot.eclipse.org:80/ws", cleansession=True, data_cb=datacb)
mqtt.start()
#mqtt.config(lwt_topic='status', lwt_msg='Disconected')
'''
# Wait until status is: (1, 'Connected')
mqtt.subscribe('test')
mqtt.publish('test', 'Hi from Micropython')
mqtt.stop()
'''
# ==================
# ThingSpeak example
# ==================
import network
def datacb(msg):
print("[{}] Data arrived from topic: {}, Message:\n".format(msg[0], msg[1]), msg[2])
thing = network.mqtt("thingspeak", "mqtt://mqtt.thingspeak.com", user="anyName", password="<PASSWORD>", cleansession=True, data_cb=datacb)
# or secure connection
#thing = network.mqtt("thingspeak", "mqtts://mqtt.thingspeak.com", user="anyName", password="<PASSWORD>", cleansession=True, data_cb=datacb)
thingspeakChannelId = "123456" # enter Thingspeak Channel ID
thingspeakChannelWriteApiKey = "ThingspeakWriteAPIKey" # EDIT - enter Thingspeak Write API Key
thingspeakFieldNo = 1
thingSpeakChanelFormat = "json"
pubchan = "channels/{:s}/publish/{:s}".format(thingspeakChannelId, thingspeakChannelWriteApiKey)
pubfield = "channels/{:s}/publish/fields/field{}/{:s}".format(thingspeakChannelId, thingspeakFieldNo, thingspeakChannelWriteApiKey)
subchan = "channels/{:s}/subscribe/{:s}/{:s}".format(thingspeakChannelId, thingSpeakChanelFormat, thingspeakChannelWriteApiKey)
subfield = "channels/{:s}/subscribe/fields/field{}/{:s}".format(thingspeakChannelId, thingspeakFieldNo, thingspeakChannelWriteApiKey)
thing.start()
tmo = 0
while thing.status()[0] != 2:
utime.sleep_ms(100)
tmo += 1
if tmo > 80:
print("Not connected")
break
# subscribe to channel
thing.subscribe(subchan)
# subscribe to field
thing.subscribe(subfield)
# publish to channel
# Payload can include any of those fields separated b< ';':
# "field1=value;field2=value;...;field8=value;latitude=value;longitude=value;elevation=value;status=value"
thing.publish(pubchan, "field1=25.2;status=On line")
# Publish to field
thing.publish(pubfield, "24.5")
| [
"network.mqtt"
] | [((390, 608), 'network.mqtt', 'network.mqtt', (['"""loboris"""', '"""mqtt://loboris.eu"""'], {'user': '"""wifimcu"""', 'password': '"""<PASSWORD>"""', 'cleansession': '(True)', 'connected_cb': 'conncb', 'disconnected_cb': 'disconncb', 'subscribed_cb': 'subscb', 'published_cb': 'pubcb', 'data_cb': 'datacb'}), "('loboris', 'mqtt://loboris.eu', user='wifimcu', password=\n '<PASSWORD>', cleansession=True, connected_cb=conncb, disconnected_cb=\n disconncb, subscribed_cb=subscb, published_cb=pubcb, data_cb=datacb)\n", (402, 608), False, 'import network\n'), ((1345, 1479), 'network.mqtt', 'network.mqtt', (['"""thingspeak"""', '"""mqtt://mqtt.thingspeak.com"""'], {'user': '"""anyName"""', 'password': '"""<PASSWORD>"""', 'cleansession': '(True)', 'data_cb': 'datacb'}), "('thingspeak', 'mqtt://mqtt.thingspeak.com', user='anyName',\n password='<PASSWORD>', cleansession=True, data_cb=datacb)\n", (1357, 1479), False, 'import network\n')] |
# flake8: NOQA E501
import ast
import random
from textwrap import dedent
from typing import List
from main.exercises import generate_list, generate_string
from main.text import ExerciseStep, MessageStep, Page, Step, VerbatimStep, search_ast
from main.utils import returns_stdout
class IntroducingLists(Page):
class first_list(VerbatimStep):
"""
It's time to learn about a powerful new type of value called lists. Here's an example:
__program_indented__
"""
def program(self):
words = ['This', 'is', 'a', 'list']
for word in words:
print(word)
class can_contain_anything(VerbatimStep):
"""
A list is a *sequence* (an ordered collection/container) of any number of values.
The values are often referred to as *elements*.
They can be anything: numbers, strings, booleans, even lists! They can also be a mixture of types.
To create a list directly, like above:
1. Write some square brackets: `[]`
2. If you don't want an empty list, write some expressions inside to be the elements.
3. Put commas (`,`) between elements to separate them.
Here's another example of making a list:
__program_indented__
"""
def program(self):
x = 1
things = ['Hello', x, x + 3]
print(things)
class numbers_sum(VerbatimStep):
"""
As you saw above, lists are *iterable*, meaning you can iterate over them with a `for loop`.
Here's a program that adds up all the numbers in a list:
__program_indented__
"""
def program(self):
numbers = [3, 1, 4, 1, 5, 9]
total = 0
for number in numbers:
total += number
print(total)
class strings_sum(ExerciseStep):
"""
Now modify the program so that it can add up a list of strings instead of numbers.
For example, given:
words = ['This', 'is', 'a', 'list']
it should print:
Thisisalist
"""
hints = """
This is very similar to the exercises you've done building up strings character by character.
The solution is very similar to the program that adds numbers.
In fact, what happens if you try running that program with a list of strings?
The problem is that 0. You can't add 0 to a string because numbers and strings are incompatible.
Is there a similar concept among strings to 0? A blank initial value?
"""
@returns_stdout
def solution(self, words: List[str]):
total = ''
for word in words:
total += word
print(total)
tests = [
(['This', 'is', 'a', 'list'], 'Thisisalist'),
(['The', 'quick', 'brown', 'fox', 'jumps'], 'Thequickbrownfoxjumps'),
]
class double_numbers(ExerciseStep):
"""
Optional bonus challenge: extend the program to insert a separator string *between* each word.
For example, given
words = ['This', 'is', 'a', 'list']
separator = ' - '
it would output:
This - is - a - list
Lists and strings have a lot in common.
For example, you can add two lists to combine them together into a new list.
You can also create an empty list that has no elements.
Check for yourself:
numbers = [1, 2] + [3, 4]
print(numbers)
new_numbers = []
new_numbers += numbers
new_numbers += [5]
print(new_numbers)
With that knowledge, write a program which takes a list of numbers
and prints a list where each number has been doubled. For example, given:
numbers = [3, 1, 4, 1, 5, 9, 2, 6, 5]
it would print:
[6, 2, 8, 2, 10, 18, 4, 12, 10]
"""
hints = """
Remember that you can multiply numbers using `*`.
This program is structurally very similar to the programs you've written to build up strings character by character.
Make a new list, and then build it up element by element in a for loop.
Start with an empty list.
You can make a list with one element `x` by just writing `[x]`.
You can add an element to a list by adding a list containing one element.
"""
@returns_stdout
def solution(self, numbers: List[int]):
double = []
for number in numbers:
double += [number * 2]
print(double)
tests = [
([3, 1, 4, 1, 5, 9, 2, 6, 5], [6, 2, 8, 2, 10, 18, 4, 12, 10]),
([0, 1, 2, 3], [0, 2, 4, 6]),
]
class filter_numbers(ExerciseStep):
"""
Great!
When you want to add a single element to the end of a list, instead of:
some_list += [element]
it's actually more common to write:
some_list.append(element)
There isn't really a big difference between these, but `.append`
will be more familiar and readable to most people.
Now use `.append` to write a program which prints a list containing only the numbers bigger than 5.
For example, given:
numbers = [3, 1, 4, 1, 5, 9, 2, 6, 5]
it would print:
[9, 6]
"""
hints = """
This is very similar to the previous exercise.
The difference is that sometimes you should skip appending to the new list.
Use an `if` statement.
Use a comparison operator to test if a number is big enough to add.
"""
# TODO enforce not using +=
@returns_stdout
def solution(self, numbers: List[int]):
big_numbers = []
for number in numbers:
if number > 5:
big_numbers.append(number)
print(big_numbers)
tests = [
([3, 1, 4, 1, 5, 9, 2, 6, 5], [9, 6]),
([0, 2, 4, 6, 8, 10], [6, 8, 10]),
]
final_text = """
Fantastic! We're making great progress.
"""
class UsingBreak(Page):
title = "Using `break` to end a loop early"
class list_contains_exercise(ExerciseStep):
"""
Exercise: write a program which takes a list and a value and checks
if the list contains the value. For example, given:
things = ['This', 'is', 'a', 'list']
thing_to_find = 'is'
it should print `True`, but for
thing_to_find = 'other'
it should print `False`.
"""
hints = """
You will need a loop.
You will need an `if` statement.
You will need a comparison operator.
Specifically `==`.
You need a boolean variable that you print at the end.
If you find the element in the list you should set that variable to `True`.
Once you've found the element, you can't unfind it.
That means that once you set the variable to `True`, it should never be set to anything else after that.
Don't use an `else`.
There is no reason to ever set the variable to `False` inside the loop.
"""
@returns_stdout
def solution(self, things, thing_to_find):
found = False
for thing in things:
if thing == thing_to_find:
found = True
print(found)
tests = [
((['This', 'is', 'a', 'list'], 'is'), True),
((['This', 'is', 'a', 'list'], 'other'), False),
(([1, 2, 3, 4], 1), True),
(([1, 2, 3, 4], 0), False),
]
@classmethod
def generate_inputs(cls):
contained = random.choice([True, False])
things = generate_list(int)
if contained:
thing_to_find = random.choice(things)
else:
thing_to_find = random.choice([
min(things) - 1,
max(things) + 1,
])
return dict(
things=things,
thing_to_find=thing_to_find,
)
final_text = """
Nice!
A typical solution looks something like this:
found = False
for thing in things:
if thing == thing_to_find:
found = True
print(found)
Your solution is probably similar. It's fine, but it's a bit inefficient.
That's because it'll loop over the entire list even if it finds the element at the beginning.
You can stop any loop using a `break` statement, like so:
for thing in things:
if thing == thing_to_find:
found = True
break
This is just as correct but skips unnecessary iterations and checks once it finds the element.
You can use snoop to see the difference.
"""
class GettingElementsAtPosition(Page):
title = "Getting Elements at a Position"
class introducing_subscripting(VerbatimStep):
"""
Looping is great, but often you just want to retrieve a single element from the list at a known position.
Here's how:
__program_indented__
"""
def program(self):
words = ['This', 'is', 'a', 'list']
print(words[0])
print(words[1])
print(words[2])
print(words[3])
class index_error(Step):
"""
In general, you can get the element at the position `i` with `words[i]`. The operation is called *subscripting* or *indexing*, and the position is called the *index*.
You've probably noticed that the first index is 0, not 1. In programming, counting starts at 0. It seems weird, but that's how most programming languages do it, and it's generally agreed to be better.
This also means that the last index in this list of 4 elements is 3. What happens if you try getting an index greater than that?
"""
program = "words[4]"
def check(self):
return "IndexError" in self.result
class introducing_len_and_range(VerbatimStep):
"""
There you go. `words[4]` and beyond don't exist, so trying that will give you an error.
By the way, you can get the number of elements in a list (commonly called the *length*) using `len(words)`.
That means that the last valid index of the list is `len(words) - 1`, so the last element is `words[len(words) - 1]`. Try these for yourself.
So in general, the valid indices are:
[0, 1, 2, ..., len(words) - 2, len(words) - 1]
There's a handy built in function to give you these values, called `range`:
__program_indented__
"""
def program(self):
for i in range(10):
print(i)
class range_len(VerbatimStep):
"""
`range(n)` is similar to the list `[0, 1, 2, ..., n - 2, n - 1]`.
This gives us an alternative way to loop over a list:
__program_indented__
"""
def program(self):
words = ['This', 'is', 'a', 'list']
for index in range(len(words)):
print(index)
print(words[index])
class index_exercise(ExerciseStep):
"""
Let's get some exercise! Given a list `things` and a value `to_find`,
print the first index of `to_find` in the list, i.e. the lowest number `i` such that
`things[i]` is `to_find`. For example, for
things = ['on', 'the', 'way', 'to', 'the', 'store']
to_find = 'the'
your program should print `1`.
You can assume that `to_find` appears at least once.
"""
hints = """
You will need to look at all the possible indices of `things` and check which one is the answer.
To look at all possible indices, you will need a loop over `range(len(things))`.
To check if an index is the answer, you will need to use:
- `if`
- the index in a subscript
- `==`
Since you're looking for the first index, you need to stop the loop once you find one.
You learned how to stop a loop in the middle recently.
You need to use `break`.
"""
class all_indices(MessageStep, ExerciseStep):
"""
You're almost there! However, this prints all the indices,
not just the first one.
"""
@returns_stdout
def solution(self, things, to_find):
for i in range(len(things)):
if to_find == things[i]:
print(i)
tests = [
((['on', 'the', 'way', 'to', 'the', 'store'], 'the'), "1\n4"),
(([0, 1, 2, 3, 4, 5, 6, 6], 6), "6\n7"),
]
class last_index(MessageStep, ExerciseStep):
"""
You're almost there! However, this prints the *last* index,
not the first one.
"""
@returns_stdout
def solution(self, things, to_find):
answer = None
for i in range(len(things)):
if to_find == things[i]:
answer = i
print(answer)
tests = [
((['on', 'the', 'way', 'to', 'the', 'store'], 'the'), 4),
(([0, 1, 2, 3, 4, 5, 6, 6], 6), 7),
]
@returns_stdout
def solution(self, things, to_find):
for i in range(len(things)):
if to_find == things[i]:
print(i)
break
tests = [
((['on', 'the', 'way', 'to', 'the', 'store'], 'the'), 1),
(([0, 1, 2, 3, 4, 5, 6, 6], 6), 6),
]
@classmethod
def generate_inputs(cls):
things = generate_list(str)
to_find = generate_string()
things += [to_find] * random.randint(1, 3)
random.shuffle(things)
return dict(
things=things,
to_find=to_find,
)
class zip_exercise(ExerciseStep):
"""
Nice!
By the way, indexing and `len()` also work on strings. Try them out in the shell.
Here's another exercise. Given two strings of equal length, e.g:
string1 = "Hello"
string2 = "World"
print them vertically side by side, with a space between each character:
H W
e o
l r
l l
o d
"""
hints = """
Did you experiment with indexing and `len()` with strings in the shell?
Forget loops for a moment. How would you print just the first line, which has the first character of each of the two strings?
In the second line you want to print the second character of each string, and so on.
You will need a `for` loop.
You will need indexing (subscripting).
You will need `range`.
You will need `len`.
You will need `+`.
You will need to index both strings.
You will need to pass the same index to both strings each time to retrieve matching characters.
"""
@returns_stdout
def solution(self, string1, string2):
for i in range(len(string1)):
char1 = string1[i]
char2 = string2[i]
print(char1 + ' ' + char2)
tests = {
("Hello", "World"): dedent("""\
H W
e o
l r
l l
o d
"""),
("Having", "ablast"): dedent("""\
H a
a b
v l
i a
n s
g t
"""),
}
@classmethod
def generate_inputs(cls):
length = random.randrange(5, 11)
return dict(
string1=generate_string(length),
string2=generate_string(length),
)
class zip_longest_exercise(ExerciseStep):
"""
Incredible!
Your solution probably looks something like this:
for i in range(len(string1)):
char1 = string1[i]
char2 = string2[i]
print(char1 + ' ' + char2)
This doesn't work so well if the strings have different lengths.
In fact, it goes wrong in different ways depending on whether `string1` or `string2` is longer.
Your next challenge is to fix this problem by filling in 'missing' characters with spaces.
For example, for:
string1 = "Goodbye"
string2 = "World"
output:
G W
o o
o r
d l
b d
y
e
and for:
string1 = "Hello"
string2 = "Elizabeth"
output:
H E
e l
l i
l z
o a
b
e
t
h
"""
hints = [
"The solution has the same overall structure and "
"essential elements of the previous solution, "
"but it's significantly longer and will require "
"a few additional ideas and pieces.",
dedent("""
In particular, it should still contain something like:
for i in range(...):
...
print(char1 + ' ' + char2)
"""),
"What should go inside `range()`? Neither `len(string1)` nor `len(string2)` is good enough.",
"You want a loop iteration for every character in the longer string.",
"That means you need `range(<length of the longest string>)`",
"In other words you need to find the biggest of the two values "
"`len(string1)` and `len(string2)`. You've already done an exercise like that.",
"Once you've sorted out `for i in range(...)`, `i` will sometimes be too big "
"to be a valid index for both strings. You will need to check if it's too big before indexing.",
"Remember, the biggest valid index for `string1` is `len(string1) - 1`. "
"`len(string)` is too big.",
"You will need two `if` statements, one for each string.",
"You will need to set e.g. `char1 = ' '` when `string1[i]` is not valid.",
]
# TODO catch user writing string1 < string2
@returns_stdout
def solution(self, string1, string2):
length1 = len(string1)
length2 = len(string2)
if length1 > length2:
length = length1
else:
length = length2
for i in range(length):
if i < len(string1):
char1 = string1[i]
else:
char1 = ' '
if i < len(string2):
char2 = string2[i]
else:
char2 = ' '
print(char1 + ' ' + char2)
tests = {
("Goodbye", "World"): dedent("""\
G W
o o
o r
d l
b d
y
e
"""),
("Hello", "Elizabeth"): dedent("""\
H E
e l
l i
l z
o a
b
e
t
h
"""),
}
@classmethod
def generate_inputs(cls):
length1 = random.randrange(5, 11)
length2 = random.randrange(12, 20)
if random.choice([True, False]):
length1, length2 = length2, length1
return dict(
string1=generate_string(length1),
string2=generate_string(length2),
)
final_text = """
Magnificent! Take a break, you've earned it!
"""
class CallingFunctionsTerminology(Page):
title = "Terminology: Calling functions and methods"
class print_functions(VerbatimStep):
"""
It's time to expand your vocabulary some more.
`print` and `len` are ***functions***. See for yourself:
__program_indented__
"""
def program(self):
print(len)
print(print)
class introducing_callable(VerbatimStep):
"""
An expression like `len(things)` or `print(things)` is a function ***call*** - when you write that, you are ***calling*** the function `len` or `print`. The fact that this is possible means that functions are ***callable***:
__program_indented__
"""
def program(self):
print(callable(len))
class not_callable(VerbatimStep):
"""
Most things are not callable, so trying to call them will give you an error:
__program_indented__
"""
# noinspection PyCallingNonCallable
def program(self):
f = 'a string'
print(callable(f))
f()
class print_returns_none(VerbatimStep):
"""
In the call `len(things)`, `things` is an ***argument***. Sometimes you will also see the word ***parameter***, which means basically the same thing as argument. It's a bit like you're giving the argument to the function - specifically we say that the argument `things` is *passed* to `len`, and `len` *accepts* or *receives* the argument.
`len(things)` will evaluate to a number such as 3, in which case we say that `len` ***returned*** 3.
All calls have to return something...even if it's nothing. For example, `print`'s job is to display something on screen, not to return a useful value. So it returns something useless instead:
__program_indented__
"""
# noinspection PyNoneFunctionAssignment
def program(self):
things = [1, 2, 3]
length = len(things)
printed = print(length)
print(printed)
class len_of_none(VerbatimStep):
"""
`None` is a special 'null' value which can't do anything interesting. It's a common placeholder that represents the lack of a real useful value. Functions that don't want to return anything return `None` by default. If you see an error message about `None` or `NoneType`, it often means you assigned the wrong thing to a variable:
__program_indented__
"""
# noinspection PyNoneFunctionAssignment,PyUnusedLocal,PyTypeChecker
def program(self):
things = print([1, 2, 3])
length = len(things)
class methods_of_str(VerbatimStep):
"""
A ***method*** is a function which belongs to a type, and can be called on all values of that type using `.`. For example, `upper` and `lower` are methods of strings, which are called with e.g. `word.upper()`:
__program_indented__
"""
def program(self):
word = 'Hello'
print(word.upper)
print(word.upper())
class no_append_for_str(VerbatimStep):
"""
Another example is that `append` is a method of lists. But you can't use `.upper` on a list or `.append` on a string:
__program_indented__
"""
# noinspection PyUnresolvedReferences
def program(self):
word = 'Hello'
word.append('!')
final_text = """
The word 'attribute' in the error message refers to the use of `.` - the error actually comes just from `word.append`, without even a call.
"""
class FunctionsAndMethodsForLists(Page):
# TODO this is quite the information dump and I'd like it to be a little more interactive,
# but users don't need to know these functions off by heart.
class sum_list(Step):
"""
Let's review how to work with lists. Suppose we have a list `nums = [1, 2, 3]`. You can use:
- **`append`**: Add an element to the end of the list. `nums.append(4)` changes the list to `[1, 2, 3, 4]`.
- **`len`**: Returns the number of elements. `len(nums)` is `3`.
- **`range`**: `range(n)` is an object similar to the list of numbers from 0 to `n - 1`. In particular, `range(len(nums))` is like `[0, 1, 2]`.
- **`subscripting`**: Get a value at an index. `nums[0]` is 1, `nums[1]` is 2, `nums[2]` is 3.
- **`+`**: Concatenates lists. `nums + [4, 5]` is `[1, 2, 3, 4, 5]`.
Here's some new things. Try them out in the shell.
- **`subscript assignment`**: Set a value at an index. `nums[0] = 9` changes the list to `[9, 2, 3]`.
- **`join`**: Add a list of strings with a separator in between. This is a method of strings (the separator) which takes an iterable of strings as an argument. `'--'.join(['apples', 'oranges', 'bananas'])` returns `'apples--oranges--bananas'`. You can also use an empty string if you don't want a separator, e.g. `''.join(['apples', 'oranges', 'bananas'])` returns `'applesorangesbananas'`.
- **`sum`**: Add a list of numbers. `sum(nums)` is 6.
- **`in`**: A comparison operator that checks if a value is in a list. `2 in nums` is `True`, but `4 in nums` is `False`.
- **`index`**: Returns the first index of a value in a list. `[7, 8, 9, 8].index(8)` is 1. Raises an error if the value isn't there.
You may recognise some of these from your exercises. I assure you that those exercises were not pointless, as you've now learned valuable fundamental skills. For example, you can use `in` to check if a list contains 5, but there's no similarly easy way to check for a number bigger than 5.
It's useful to know these functions, but it's not easy to learn them all, and there's many more. A more important skill is being able to look things up. For example, here are some typical ways you might Google the above functions if you forgot their names:
- `append`
- python add element to list
- python add item at end of list
- `len`
- python size of list
- python number of elements in list
- python how many characters in string
- `join`
- python combine list of strings with separator
- python add together list of strings with string in between
- `sum`
- python add list of numbers
- python total of numbers
- `in`
- python check if list contains value
- python test if list has element
- `index`
- python get position of element
- python get index of value
Let's practice this skill now. Find a function/method that returns the value in a list which is bigger than any other value. For example, given the list `[21, 55, 4, 91, 62, 49]`, it will return `91`. You should write the answer in the shell as a single small expression. For example, if you were looking for the function `sum`, you could write `sum([21, 55, 4, 91, 62, 49])`. Don't solve this manually with a loop.
"""
hints = """
Use the words 'python' and 'list' in your search query.
In one word, what's special about `91` in the list `[21, 55, 4, 91, 62, 49]`?
'biggest' or 'largest'
'python biggest value in list'
"""
program = "max([21, 55, 4, 91, 62, 49])"
def check(self):
return search_ast(
self.stmt,
ast.Call(func=ast.Name(id='max')),
)
class list_insert(Step):
"""
Good find! Let's do one more. If you have a list:
nums = [1, 2, 3, 4, 5]
You could write `nums.append(9)` and `nums` would change to:
[1, 2, 3, 4, 5, 9]
But suppose you don't want the 9 to be at the end, you want it to go between the second and third elements:
[1, 2, 9, 3, 4, 5]
Call the right function/method in the shell to do that.
"""
hints = """
Use the words 'python' and 'list' in your search query.
Instead of putting the value at the beginning or end, we want to put it ____________?
'in the middle' or 'at an index' or 'at a particular position'
'python add value at index'
"""
program = "nums.insert(2, 9)"
def check(self):
return search_ast(
self.stmt,
ast.Call(func=ast.Attribute(attr='insert'),
args=[ast.Constant(value=2),
ast.Constant(value=9)]),
)
class dir_list(VerbatimStep):
"""
Perfect!
It can also be useful to Google things like "python list tutorial", e.g. if:
- Googling a specific method has failed so you want to find it manually.
- You're still confused about lists after this course.
- It's been a while since you learned about lists and you need a reminder.
- You're struggling to solve a problem with lists and you need to go back to basics and strengthen your foundations.
There are also ways to find information without any googling. Try `__program__` in the shell.
"""
program = "dir([])"
final_text = """
`dir()` returns a list of the argument's attributes, which are mostly methods. Many will start with `__` which you can ignore for now - scroll to the end of the list and you'll see some familiar methods.
Here are a few more useful functions/methods. Suppose `nums = [28, 99, 10, 81, 59, 64]`
- **`sorted`**: Takes an iterable and returns a list of the elements in order. `sorted(nums)` returns `[10, 28, 59, 64, 81, 99]`.
- **`pop`**: Removes and returns an element at a given index. `nums.pop(3)` removes `nums[3]` (`81`) from the list and returns it. Without an argument, i.e. just `nums.pop()`, it will remove and return the last element.
- **`remove`**: Removes the first occurrence of the given element. `nums.remove(10)` will leave `nums` as `[28, 99, 81, 59, 64]`. Raises an error if the value doesn't exist. Equivalent to `nums.pop(nums.index(10))`.
- **`count`**: Returns the number of times the argument appears in the list. `[1, 2, 3, 2, 7, 2, 5].count(2)` is 3.
You've already seen that `len` and subscripting work with strings, a bit as if strings are lists of characters. Strings also support some of the new methods we've learned, not just for characters but for any substring. For example:
- `'the' in 'feed the dog and the cat'` is `True`
- `'feed the dog and the cat'.count('the')` is 2
- `'feed the dog and the cat'.index('the')` is 5
Note that in most cases, methods which modify a list in place (`append`, `insert`, `remove`) merely return `None`, while the remaining functions/methods return a new useful value without changing the original argument. The only exception is the `pop` method.
Modifying a value directly is called *mutation* - types of values which can be mutated are *mutable*, while those that can't are *immutable*. Strings are immutable - they don't have any methods like `append` or even subscript assignment. You simply can't change a string - you can only create new strings and use those instead. That means that this is a useless statement on its own:
word.upper()
The string referred to by `word` isn't modified, instead `word.upper()` returned a new string which was immediately discarded. If you want to change the value that `word` refers to, you have to assign a new value to the variable:
word = word.upper()
Or you can use `word.upper()` immediately in a larger expression, e.g.
if word.lower() == 'yes':
"""
class UnderstandingProgramsWithPythonTutor(Page):
final_text = """
It's time to learn about another tool to explore programs. Put some code in the editor and then click the new "Python Tutor" button. Here's some example code if you want:
all_numbers = [2, 4, 8, 1, 9, 7]
small_numbers = []
big_numbers = []
for number in all_numbers:
if number <= 5:
small_numbers.append(number)
else:
big_numbers.append(number)
print(small_numbers)
print(big_numbers)
The button will open a new tab with a visualisation from [pythontutor.com](http://pythontutor.com).
There you can navigate through the program step by step with the "Prev" or "Next" buttons, or drag
the slider left or right. You can also see the values of variables on the right.
"""
class EqualsVsIs(Page):
title = "`==` vs `is`"
class two_separate_lists(VerbatimStep):
"""
It's time to learn some technical details that are often misunderstood and lead to errors.
Run this program:
__program_indented__
"""
def program(self):
list1 = [1, 2, 3]
list2 = [1, 2, 3]
print(list1)
print(list2)
print(list1 == list2)
print(list1 is list2)
list1.append(4)
print(list1)
print(list2)
class same_list(VerbatimStep):
"""
This program is quite straightforward and mostly consists of things you're familiar with.
We create two variables which refer to lists.
The lists have the same elements, so they are equal: `list1 == list2` is `True`.
But then there's a new comparison operator: `is`. Here `list1 is list2` is `False`.
That means that regardless of the two lists being equal,
they are still two separate, distinct, individual lists.
As a result, when you append 4 to `list1`, only `list1` changes.
Now change `list2 = [1, 2, 3]` to `list2 = list1` and see what difference it makes.
"""
program_in_text = False
def program(self):
list1 = [1, 2, 3]
list2 = list1
print(list1)
print(list2)
print(list1 == list2)
print(list1 is list2)
list1.append(4)
print(list1)
print(list2)
final_text = """
Now `list1 is list2` is `True`, because *there is only one list*, and the two variables
`list1` and `list2` both refer to that same list. `list1.append(4)` appends to the one list
and the result can be seen in both `print(list1)` and `print(list2)` because both lines
are now just different ways of printing the same list.
I recommend running both versions with Python Tutor to see how it visualises the difference.
In the second case, the two variables both have arrows pointing to a single list object.
`list2 = list1` doesn't create an eternal link between the variables. If you assign a new value
to *either* of the variables, e.g. `list1 = [7, 8, 9]`, the other variable will be unaffected
and will still point to the original list.
Basically, an assignment like:
list2 = <expression>
means 'make the variable `list2` refer to whatever `<expression>` evaluates to'.
It doesn't make a copy of that value, which is how both variables can end up pointing to the same list.
But as we've learned before, `list2` doesn't remember `<expression>`, only the value.
It doesn't know about other variables.
You can copy a list with the `copy` method:
list2 = list1.copy()
This will make the program behave like the first version again.
If you come across this kind of problem and you're still having trouble understanding this stuff, read the essay [Facts and myths about Python names and values](https://nedbatchelder.com/text/names.html).
"""
class ModifyingWhileIterating(Page):
final_text = """
Consider this program. It loops through a numbers and removes the ones smaller than 10. Or at least, it tries to. I recommend running it with Python Tutor.
numbers = [10, 7, 8, 3, 12, 15]
for i in range(len(numbers)):
number = numbers[i]
if number <= 10:
numbers.pop(i)
print(numbers)
(remember that `numbers.pop(i)` removes the element from `numbers` at index `i`)
As it runs, it clearly skips even looking at 7 or 3 and doesn't remove them, and at the end it fails when it tries to access an index that's too high. Can you see why this happens?
The index variable `i` runs through the usual values 0, 1, 2, ... as it's supposed to, but as the list changes those are no longer the positions we want. For example in the first iteration `i` is 0 and `number` is 10, which gets removed. This shifts the rest of the numbers left one position, so now 7 is in position 0. But then in the next iteration `i` is 1, and `numbers[i]` is 8. 7 got skipped.
We could try writing the program to use `remove` instead of `pop` so we don't have to use indices. It even looks nicer this way.
numbers = [10, 7, 8, 3, 12, 15]
for number in numbers:
if number <= 10:
numbers.remove(number)
print(numbers)
But it turns out this does the same thing, for the same reason. Iterating over a list still goes through the indices under the hood.
The lesson here is to ***never modify something while you iterate over it***. Keep mutation and looping separate.
The good news is that there are many ways to solve this. You can instead just loop over a copy, as in:
for number in numbers.copy():
Now the list being modified and the list being itererated over are separate objects, even if they start out with equal contents.
Similarly, you could loop over the original and modify a copy:
numbers = [10, 7, 8, 3, 12, 15]
big_numbers = numbers.copy()
for number in numbers:
if number <= 10:
big_numbers.remove(number)
print(big_numbers)
Or you could build up a new list from scratch. In this case, we've already done a similar thing in an exercise:
numbers = [10, 7, 8, 3, 12, 15]
big_numbers = []
for number in numbers:
if number > 10:
big_numbers.append(number)
print(big_numbers)
"""
| [
"ast.Attribute",
"textwrap.dedent",
"ast.Constant",
"random.choice",
"random.shuffle",
"main.exercises.generate_list",
"random.randrange",
"main.exercises.generate_string",
"ast.Name",
"random.randint"
] | [((7157, 7185), 'random.choice', 'random.choice', (['[True, False]'], {}), '([True, False])\n', (7170, 7185), False, 'import random\n'), ((7207, 7225), 'main.exercises.generate_list', 'generate_list', (['int'], {}), '(int)\n', (7220, 7225), False, 'from main.exercises import generate_list, generate_string\n'), ((12998, 13016), 'main.exercises.generate_list', 'generate_list', (['str'], {}), '(str)\n', (13011, 13016), False, 'from main.exercises import generate_list, generate_string\n'), ((13039, 13056), 'main.exercises.generate_string', 'generate_string', ([], {}), '()\n', (13054, 13056), False, 'from main.exercises import generate_list, generate_string\n'), ((13124, 13146), 'random.shuffle', 'random.shuffle', (['things'], {}), '(things)\n', (13138, 13146), False, 'import random\n'), ((14476, 14640), 'textwrap.dedent', 'dedent', (['""" H W\n e o\n l r\n l l\n o d\n """'], {}), '(\n """ H W\n e o\n l r\n l l\n o d\n """\n )\n', (14482, 14640), False, 'from textwrap import dedent\n'), ((14668, 14856), 'textwrap.dedent', 'dedent', (['""" H a\n a b\n v l\n i a\n n s\n g t\n """'], {}), '(\n """ H a\n a b\n v l\n i a\n n s\n g t\n """\n )\n', (14674, 14856), False, 'from textwrap import dedent\n'), ((14937, 14960), 'random.randrange', 'random.randrange', (['(5)', '(11)'], {}), '(5, 11)\n', (14953, 14960), False, 'import random\n'), ((16148, 16361), 'textwrap.dedent', 'dedent', (['"""\n In particular, it should still contain something like:\n\n for i in range(...):\n ...\n print(char1 + \' \' + char2)\n """'], {}), '(\n """\n In particular, it should still contain something like:\n\n for i in range(...):\n ...\n print(char1 + \' \' + char2)\n """\n )\n', (16154, 16361), False, 'from textwrap import dedent\n'), ((17990, 18202), 'textwrap.dedent', 'dedent', (['""" G W\n o o\n o r\n d l\n b d\n y \n e \n """'], {}), '(\n """ G W\n o o\n o r\n d l\n b d\n y \n e \n """\n )\n', (17996, 18202), False, 'from textwrap import dedent\n'), ((18232, 18492), 'textwrap.dedent', 'dedent', (['""" H E\n e l\n l i\n l z\n o a\n b\n e\n t\n h\n """'], {}), '(\n """ H E\n e l\n l i\n l z\n o a\n b\n e\n t\n h\n """\n )\n', (18238, 18492), False, 'from textwrap import dedent\n'), ((18574, 18597), 'random.randrange', 'random.randrange', (['(5)', '(11)'], {}), '(5, 11)\n', (18590, 18597), False, 'import random\n'), ((18620, 18644), 'random.randrange', 'random.randrange', (['(12)', '(20)'], {}), '(12, 20)\n', (18636, 18644), False, 'import random\n'), ((18660, 18688), 'random.choice', 'random.choice', (['[True, False]'], {}), '([True, False])\n', (18673, 18688), False, 'import random\n'), ((7284, 7305), 'random.choice', 'random.choice', (['things'], {}), '(things)\n', (7297, 7305), False, 'import random\n'), ((13091, 13111), 'random.randint', 'random.randint', (['(1)', '(3)'], {}), '(1, 3)\n', (13105, 13111), False, 'import random\n'), ((15010, 15033), 'main.exercises.generate_string', 'generate_string', (['length'], {}), '(length)\n', (15025, 15033), False, 'from main.exercises import generate_list, generate_string\n'), ((15059, 15082), 'main.exercises.generate_string', 'generate_string', (['length'], {}), '(length)\n', (15074, 15082), False, 'from main.exercises import generate_list, generate_string\n'), ((18791, 18815), 'main.exercises.generate_string', 'generate_string', (['length1'], {}), '(length1)\n', (18806, 18815), False, 'from main.exercises import generate_list, generate_string\n'), ((18841, 18865), 'main.exercises.generate_string', 'generate_string', (['length2'], {}), '(length2)\n', (18856, 18865), False, 'from main.exercises import generate_list, generate_string\n'), ((26048, 26066), 'ast.Name', 'ast.Name', ([], {'id': '"""max"""'}), "(id='max')\n", (26056, 26066), False, 'import ast\n'), ((26903, 26931), 'ast.Attribute', 'ast.Attribute', ([], {'attr': '"""insert"""'}), "(attr='insert')\n", (26916, 26931), False, 'import ast\n'), ((26964, 26985), 'ast.Constant', 'ast.Constant', ([], {'value': '(2)'}), '(value=2)\n', (26976, 26985), False, 'import ast\n'), ((27018, 27039), 'ast.Constant', 'ast.Constant', ([], {'value': '(9)'}), '(value=9)\n', (27030, 27039), False, 'import ast\n')] |
##########################################################################
#
# Copyright (c) 2013, <NAME>. All rights reserved.
# Copyright (c) 2013, Image Engine Design Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above
# copyright notice, this list of conditions and the following
# disclaimer.
#
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided with
# the distribution.
#
# * Neither the name of <NAME> nor the names of
# any other contributors to this software may be used to endorse or
# promote products derived from this software without specific prior
# written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
# IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
##########################################################################
import weakref
import imath
import Gaffer
import GafferUI
class ColorSwatchPlugValueWidget( GafferUI.PlugValueWidget ) :
def __init__( self, plugs, **kw ) :
self.__swatch = GafferUI.ColorSwatch()
GafferUI.PlugValueWidget.__init__( self, self.__swatch, plugs, **kw )
## \todo How do set maximum height with a public API?
self.__swatch._qtWidget().setMaximumHeight( 20 )
self._addPopupMenu( self.__swatch )
self.__swatch.buttonPressSignal().connect( Gaffer.WeakMethod( self.__buttonPress ), scoped = False )
self.__swatch.dragBeginSignal().connect( Gaffer.WeakMethod( self.__dragBegin ), scoped = False )
self.__swatch.dragEndSignal().connect( Gaffer.WeakMethod( self.__dragEnd ), scoped = False )
self.__swatch.buttonReleaseSignal().connect( Gaffer.WeakMethod( self.__buttonRelease ), scoped = False )
self._updateFromPlugs()
def setHighlighted( self, highlighted ) :
GafferUI.PlugValueWidget.setHighlighted( self, highlighted )
self.__swatch.setHighlighted( highlighted )
def _updateFromPlugs( self ) :
with self.getContext() :
value = _colorFromPlugs( self.getPlugs() )
self.__swatch.setColor( value )
def __buttonPress( self, widget, event ) :
if event.buttons == event.Buttons.Left :
return True
return False
def __dragBegin( self, widget, event ) :
GafferUI.Pointer.setCurrent( "rgba" )
return self.__swatch.getColor()
def __dragEnd( self, widget, event ) :
GafferUI.Pointer.setCurrent( None )
def __buttonRelease( self, widget, event ) :
if event.button != event.Buttons.Left :
return False
if not self._editable() :
return False
_ColorPlugValueDialogue.acquire( self.getPlugs() )
return True
def _colorFromPlugs( plugs ) :
if not len( plugs ) :
return imath.Color4f( 0 )
# ColorSwatch only supports one colour, and doesn't have
# an "indeterminate" state, so when we have multiple plugs
# the best we can do is take an average.
return sum( p.getValue() for p in plugs ) / len( plugs )
## \todo Perhaps we could make this a part of the public API? Perhaps we could also make a
# PlugValueDialogue base class to share some of the work with the dialogue made by the
# SplinePlugValueWidget. Or perhaps the `acquire()` here and `NodeSetEditor.acquire()` should
# actually be functionality of CompoundEditor?
class _ColorPlugValueDialogue( GafferUI.ColorChooserDialogue ) :
def __init__( self, plugs, parentWindow ) :
GafferUI.ColorChooserDialogue.__init__(
self,
color = _colorFromPlugs( plugs )
)
# we use these to decide which actions to merge into a single undo
self.__lastChangedReason = None
self.__mergeGroupId = 0
self.__colorChangedConnection = self.colorChooser().colorChangedSignal().connect( Gaffer.WeakMethod( self.__colorChanged ), scoped = False )
self.confirmButton.clickedSignal().connect( Gaffer.WeakMethod( self.__buttonClicked ), scoped = False )
self.cancelButton.clickedSignal().connect( Gaffer.WeakMethod( self.__buttonClicked ), scoped = False )
self.__plugs = plugs
self.__initialValues = { p : p.getValue() for p in self.__plugs }
nodes = { p.node() for p in self.__plugs }
self.__plugSetConnections = [ n.plugSetSignal().connect( Gaffer.WeakMethod( self.__plugSet ), scoped = False ) for n in nodes ]
for node in nodes :
node.parentChangedSignal().connect( Gaffer.WeakMethod( self.__destroy ), scoped = False )
plug = next( iter( self.__plugs ) )
if len( self.__plugs ) == 1 :
self.setTitle( plug.relativeName( plug.ancestor( Gaffer.ScriptNode ) ) )
else :
self.setTitle( "{} plugs".format( len( self.__plugs ) ) )
self.__plugSet( plug )
parentWindow.addChildWindow( self, removeOnClose = True )
@classmethod
def acquire( cls, plugs ) :
plug = next( iter( plugs ) )
script = plug.node().scriptNode()
scriptWindow = GafferUI.ScriptWindow.acquire( script )
for window in scriptWindow.childWindows() :
if isinstance( window, cls ) and window.__plugs == plugs :
window.setVisible( True )
return window
window = _ColorPlugValueDialogue( plugs, scriptWindow )
window.setVisible( True )
return False
def __plugSet( self, plug ) :
if plug in self.__plugs :
with Gaffer.BlockedConnection( self.__colorChangedConnection ) :
self.colorChooser().setColor( _colorFromPlugs( self.__plugs ) )
def __colorChanged( self, colorChooser, reason ) :
if not GafferUI.ColorChooser.changesShouldBeMerged( self.__lastChangedReason, reason ) :
self.__mergeGroupId += 1
self.__lastChangedReason = reason
with Gaffer.UndoScope(
next( iter( self.__plugs ) ).ancestor( Gaffer.ScriptNode ),
mergeGroup = "ColorPlugValueDialogue%d%d" % ( id( self, ), self.__mergeGroupId )
) :
with Gaffer.BlockedConnection( self.__plugSetConnections ) :
for plug in self.__plugs :
plug.setValue( self.colorChooser().getColor() )
def __buttonClicked( self, button ) :
if button is self.cancelButton :
with Gaffer.UndoScope( next( iter( self.__plugs ) ).ancestor( Gaffer.ScriptNode ) ) :
for p, v in self.__initialValues.items() :
p.setValue( v )
self.parent().removeChild( self )
# Workaround for https://bugreports.qt-project.org/browse/QTBUG-26761.
assert( not self.visible() )
GafferUI.WidgetAlgo.keepUntilIdle( self )
def __destroy( self, *unused ) :
self.parent().removeChild( self )
| [
"GafferUI.PlugValueWidget.__init__",
"GafferUI.PlugValueWidget.setHighlighted",
"imath.Color4f",
"GafferUI.ScriptWindow.acquire",
"GafferUI.ColorChooser.changesShouldBeMerged",
"Gaffer.WeakMethod",
"GafferUI.WidgetAlgo.keepUntilIdle",
"GafferUI.ColorSwatch",
"GafferUI.Pointer.setCurrent",
"Gaffer.BlockedConnection"
] | [((2030, 2052), 'GafferUI.ColorSwatch', 'GafferUI.ColorSwatch', ([], {}), '()\n', (2050, 2052), False, 'import GafferUI\n'), ((2056, 2123), 'GafferUI.PlugValueWidget.__init__', 'GafferUI.PlugValueWidget.__init__', (['self', 'self.__swatch', 'plugs'], {}), '(self, self.__swatch, plugs, **kw)\n', (2089, 2123), False, 'import GafferUI\n'), ((2752, 2810), 'GafferUI.PlugValueWidget.setHighlighted', 'GafferUI.PlugValueWidget.setHighlighted', (['self', 'highlighted'], {}), '(self, highlighted)\n', (2791, 2810), False, 'import GafferUI\n'), ((3168, 3203), 'GafferUI.Pointer.setCurrent', 'GafferUI.Pointer.setCurrent', (['"""rgba"""'], {}), "('rgba')\n", (3195, 3203), False, 'import GafferUI\n'), ((3285, 3318), 'GafferUI.Pointer.setCurrent', 'GafferUI.Pointer.setCurrent', (['None'], {}), '(None)\n', (3312, 3318), False, 'import GafferUI\n'), ((3606, 3622), 'imath.Color4f', 'imath.Color4f', (['(0)'], {}), '(0)\n', (3619, 3622), False, 'import imath\n'), ((5669, 5706), 'GafferUI.ScriptWindow.acquire', 'GafferUI.ScriptWindow.acquire', (['script'], {}), '(script)\n', (5698, 5706), False, 'import GafferUI\n'), ((7078, 7117), 'GafferUI.WidgetAlgo.keepUntilIdle', 'GafferUI.WidgetAlgo.keepUntilIdle', (['self'], {}), '(self)\n', (7111, 7117), False, 'import GafferUI\n'), ((2319, 2356), 'Gaffer.WeakMethod', 'Gaffer.WeakMethod', (['self.__buttonPress'], {}), '(self.__buttonPress)\n', (2336, 2356), False, 'import Gaffer\n'), ((2420, 2455), 'Gaffer.WeakMethod', 'Gaffer.WeakMethod', (['self.__dragBegin'], {}), '(self.__dragBegin)\n', (2437, 2455), False, 'import Gaffer\n'), ((2517, 2550), 'Gaffer.WeakMethod', 'Gaffer.WeakMethod', (['self.__dragEnd'], {}), '(self.__dragEnd)\n', (2534, 2550), False, 'import Gaffer\n'), ((2618, 2657), 'Gaffer.WeakMethod', 'Gaffer.WeakMethod', (['self.__buttonRelease'], {}), '(self.__buttonRelease)\n', (2635, 2657), False, 'import Gaffer\n'), ((4582, 4620), 'Gaffer.WeakMethod', 'Gaffer.WeakMethod', (['self.__colorChanged'], {}), '(self.__colorChanged)\n', (4599, 4620), False, 'import Gaffer\n'), ((4687, 4726), 'Gaffer.WeakMethod', 'Gaffer.WeakMethod', (['self.__buttonClicked'], {}), '(self.__buttonClicked)\n', (4704, 4726), False, 'import Gaffer\n'), ((4792, 4831), 'Gaffer.WeakMethod', 'Gaffer.WeakMethod', (['self.__buttonClicked'], {}), '(self.__buttonClicked)\n', (4809, 4831), False, 'import Gaffer\n'), ((6228, 6305), 'GafferUI.ColorChooser.changesShouldBeMerged', 'GafferUI.ColorChooser.changesShouldBeMerged', (['self.__lastChangedReason', 'reason'], {}), '(self.__lastChangedReason, reason)\n', (6271, 6305), False, 'import GafferUI\n'), ((5049, 5082), 'Gaffer.WeakMethod', 'Gaffer.WeakMethod', (['self.__plugSet'], {}), '(self.__plugSet)\n', (5066, 5082), False, 'import Gaffer\n'), ((5181, 5214), 'Gaffer.WeakMethod', 'Gaffer.WeakMethod', (['self.__destroy'], {}), '(self.__destroy)\n', (5198, 5214), False, 'import Gaffer\n'), ((6037, 6092), 'Gaffer.BlockedConnection', 'Gaffer.BlockedConnection', (['self.__colorChangedConnection'], {}), '(self.__colorChangedConnection)\n', (6061, 6092), False, 'import Gaffer\n'), ((6562, 6613), 'Gaffer.BlockedConnection', 'Gaffer.BlockedConnection', (['self.__plugSetConnections'], {}), '(self.__plugSetConnections)\n', (6586, 6613), False, 'import Gaffer\n')] |
#!/usr/bin/env python
# Copyright 2016 <NAME>. All rights reserved.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
"""
cmd_exec_test.py: Tests for cmd_exec.py
"""
import unittest
from core import test_lib
from core.meta import syntax_asdl, Id
from osh import state
suffix_op = syntax_asdl.suffix_op
osh_word = syntax_asdl.word
word_part = syntax_asdl.word_part
def InitEvaluator():
word_ev = test_lib.MakeTestEvaluator()
state.SetLocalString(word_ev.mem, 'x', 'xxx')
state.SetLocalString(word_ev.mem, 'y', 'yyy')
return word_ev
class ExpansionTest(unittest.TestCase):
def testBraceExpand(self):
arena = test_lib.MakeArena('<cmd_exec_test.py>')
c_parser = test_lib.InitCommandParser('echo _{a,b}_', arena=arena)
node = c_parser._ParseCommandLine()
print(node)
ex = test_lib.InitExecutor(arena=arena)
#print(ex.Execute(node))
#print(ex._ExpandWords(node.words))
class VarOpTest(unittest.TestCase):
def testVarOps(self):
ev = InitEvaluator() # initializes x=xxx and y=yyy
unset_sub = word_part.BracedVarSub(syntax_asdl.token(Id.VSub_Name, 'unset'))
part_vals = []
ev._EvalWordPart(unset_sub, part_vals)
print(part_vals)
set_sub = word_part.BracedVarSub(syntax_asdl.token(Id.VSub_Name, 'x'))
part_vals = []
ev._EvalWordPart(set_sub, part_vals)
print(part_vals)
# Now add some ops
part = word_part.LiteralPart(syntax_asdl.token(Id.Lit_Chars, 'default'))
arg_word = osh_word.CompoundWord([part])
test_op = suffix_op.StringUnary(Id.VTest_ColonHyphen, arg_word)
unset_sub.suffix_op = test_op
set_sub.suffix_op = test_op
part_vals = []
ev._EvalWordPart(unset_sub, part_vals)
print(part_vals)
part_vals = []
ev._EvalWordPart(set_sub, part_vals)
print(part_vals)
if __name__ == '__main__':
unittest.main()
| [
"osh.state.SetLocalString",
"core.test_lib.InitExecutor",
"core.test_lib.InitCommandParser",
"core.meta.syntax_asdl.token",
"unittest.main",
"core.test_lib.MakeTestEvaluator",
"core.test_lib.MakeArena"
] | [((576, 604), 'core.test_lib.MakeTestEvaluator', 'test_lib.MakeTestEvaluator', ([], {}), '()\n', (602, 604), False, 'from core import test_lib\n'), ((607, 652), 'osh.state.SetLocalString', 'state.SetLocalString', (['word_ev.mem', '"""x"""', '"""xxx"""'], {}), "(word_ev.mem, 'x', 'xxx')\n", (627, 652), False, 'from osh import state\n'), ((655, 700), 'osh.state.SetLocalString', 'state.SetLocalString', (['word_ev.mem', '"""y"""', '"""yyy"""'], {}), "(word_ev.mem, 'y', 'yyy')\n", (675, 700), False, 'from osh import state\n'), ((2001, 2016), 'unittest.main', 'unittest.main', ([], {}), '()\n', (2014, 2016), False, 'import unittest\n'), ((802, 842), 'core.test_lib.MakeArena', 'test_lib.MakeArena', (['"""<cmd_exec_test.py>"""'], {}), "('<cmd_exec_test.py>')\n", (820, 842), False, 'from core import test_lib\n'), ((858, 913), 'core.test_lib.InitCommandParser', 'test_lib.InitCommandParser', (['"""echo _{a,b}_"""'], {'arena': 'arena'}), "('echo _{a,b}_', arena=arena)\n", (884, 913), False, 'from core import test_lib\n'), ((980, 1014), 'core.test_lib.InitExecutor', 'test_lib.InitExecutor', ([], {'arena': 'arena'}), '(arena=arena)\n', (1001, 1014), False, 'from core import test_lib\n'), ((1242, 1282), 'core.meta.syntax_asdl.token', 'syntax_asdl.token', (['Id.VSub_Name', '"""unset"""'], {}), "(Id.VSub_Name, 'unset')\n", (1259, 1282), False, 'from core.meta import syntax_asdl, Id\n'), ((1405, 1441), 'core.meta.syntax_asdl.token', 'syntax_asdl.token', (['Id.VSub_Name', '"""x"""'], {}), "(Id.VSub_Name, 'x')\n", (1422, 1441), False, 'from core.meta import syntax_asdl, Id\n'), ((1581, 1623), 'core.meta.syntax_asdl.token', 'syntax_asdl.token', (['Id.Lit_Chars', '"""default"""'], {}), "(Id.Lit_Chars, 'default')\n", (1598, 1623), False, 'from core.meta import syntax_asdl, Id\n')] |
# Generated by Django 2.0.8 on 2019-05-29 16:00
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('blitz_api', '0019_merge_20190524_1719'),
]
operations = [
migrations.AlterField(
model_name='exportmedia',
name='file',
field=models.FileField(upload_to='export/%Y/%m/', verbose_name='file'),
),
]
| [
"django.db.models.FileField"
] | [((342, 406), 'django.db.models.FileField', 'models.FileField', ([], {'upload_to': '"""export/%Y/%m/"""', 'verbose_name': '"""file"""'}), "(upload_to='export/%Y/%m/', verbose_name='file')\n", (358, 406), False, 'from django.db import migrations, models\n')] |
import gi
gi.require_version('Gtk', '3.0')
from gi.repository import Gio, Gtk, Gdk
class AddFriendWidget(Gtk.Box):
def __init__(self, main_window, fchat_prv, friend_list):
Gtk.Box.__init__(self, spacing=7, orientation = Gtk.Orientation.VERTICAL)
self.fchat_prv = fchat_prv
self.main_window = main_window
self.friend_list = friend_list
self.fchat_prv.add_friend_gui = self
self.generate_keys_bt = Gtk.Button('Generate Key')
self.generate_keys_bt.connect('clicked', self.on_generate_keys)
self.save_bt = Gtk.Button('Save')
self.save_bt.connect('clicked', self.on_save)
self.cancel_bt = Gtk.Button('Cancel')
self.cancel_bt.connect('clicked', self.on_cancel)
self.close_bt = Gtk.Button('Close')
self.close_bt.connect('clicked', self.on_close)
self.owner_info = Gtk.Entry()
self.owner_info.set_sensitive(False)
self.copy_clipboard_bt = Gtk.Button(label='Copy to clipboard')
self.clipboard = Gtk.Clipboard.get(Gdk.SELECTION_CLIPBOARD)
self.copy_clipboard_bt.connect('clicked', self.on_copy_clipboard)
h_owner = Gtk.Box(spacing=5)
h_owner.pack_start(self.owner_info, True, True, 1)
h_owner.pack_start(self.copy_clipboard_bt, False, False, 1)
self.friend_info = Gtk.Entry()
self.friend_info.set_placeholder_text('Key of your friend')
self.spinner = Gtk.Spinner()
self.pack_start(h_owner, True, False, 7)
self.pack_start(self.friend_info, True, False, 7)
self.pack_start(self.spinner, True, False, 7)
h_bt = Gtk.Box()
h_bt.pack_start(self.generate_keys_bt, True, False, 7)
h_bt.pack_start(self.save_bt, True, False, 7)
h_bt.pack_start(self.cancel_bt, True, False, 7)
h_bt.pack_start(self.close_bt, True, False, 7)
self.pack_start(h_bt, True, False, 7)
self.job = None
def on_generate_keys(self, button):
self.pub, self.prv, self.pub_info_key, self.job = self.fchat_prv.generate_key_for_friend()
self.owner_info.set_text(self.pub_info_key)
self.on_generate_keys_start()
def on_generate_keys_start(self):
self.spinner.show()
self.spinner.start()
self.friend_info.set_sensitive(False)
self.save_bt.set_sensitive(False)
self.close_bt.set_sensitive(False)
self.generate_keys_bt.set_sensitive(False)
self.copy_clipboard_bt.set_sensitive(False)
def on_generate_keys_ok(self):
self.spinner.hide()
self.spinner.stop()
self.friend_info.set_sensitive(True)
self.save_bt.set_sensitive(True)
self.close_bt.set_sensitive(True)
self.generate_keys_bt.set_sensitive(True)
self.copy_clipboard_bt.set_sensitive(True)
def on_generate_keys_faild(self, text):
self.spinner.hide()
self.spinner.stop()
self.friend_info.set_sensitive(True)
self.save_bt.set_sensitive(True)
self.close_bt.set_sensitive(True)
self.generate_keys_bt.set_sensitive(True)
self.copy_clipboard_bt.set_sensitive(True)
def on_cancel(self, button):
if self.job:
self.job.remove_from_queue_when_finish()
def on_close(self, button):
self.main_window.application.back_main_window_or_friend_list()
def on_save(self, button):
if self.owner_info.get_text() == '':
self.msg_info('You should generate a key that contains your info')
return
if self.friend_info.get_text() == '':
self.msg_info('Friend info is required')
return
self.fchat_prv.add_friend(self.pub, self.prv, self.friend_info.get_text())
self.on_save_start()
def on_save_start(self):
self.spinner.show()
self.spinner.start()
self.friend_info.set_sensitive(False)
self.save_bt.set_sensitive(False)
self.close_bt.set_sensitive(False)
self.generate_keys_bt.set_sensitive(False)
self.copy_clipboard_bt.set_sensitive(False)
def on_save_start_ok(self):
self.spinner.hide()
self.spinner.stop()
self.friend_info.set_sensitive(True)
self.save_bt.set_sensitive(True)
self.close_bt.set_sensitive(True)
self.generate_keys_bt.set_sensitive(True)
self.copy_clipboard_bt.set_sensitive(True)
self.friend_list.sync_friends_list()
def on_save_start_duplicate(self, text):
self.msg_info(text)
def on_save_start_faild(self):
dialog = Gtk.MessageDialog(self.main_window, 0, Gtk.MessageType.ERROR, Gtk.ButtonsType.OK, "ERROR")
dialog.format_secondary_text("Error adding friend please try later")
dialog.run()
dialog.destroy()
self.spinner.hide()
self.spinner.stop()
self.friend_info.set_sensitive(True)
self.save_bt.set_sensitive(True)
self.close_bt.set_sensitive(True)
self.generate_keys_bt.set_sensitive(True)
self.copy_clipboard_bt.set_sensitive(True)
def on_copy_clipboard(self, button):
self.clipboard.set_text(self.owner_info.get_text(), -1)
def msg_info(self, text):
dialog = Gtk.MessageDialog(self.main_window, 0, Gtk.MessageType.INFO, Gtk.ButtonsType.OK, "Info")
dialog.format_secondary_text(text)
dialog.run()
dialog.destroy()
| [
"gi.repository.Gtk.Box",
"gi.repository.Gtk.Box.__init__",
"gi.repository.Gtk.Spinner",
"gi.require_version",
"gi.repository.Gtk.Entry",
"gi.repository.Gtk.MessageDialog",
"gi.repository.Gtk.Clipboard.get",
"gi.repository.Gtk.Button"
] | [((10, 42), 'gi.require_version', 'gi.require_version', (['"""Gtk"""', '"""3.0"""'], {}), "('Gtk', '3.0')\n", (28, 42), False, 'import gi\n'), ((186, 257), 'gi.repository.Gtk.Box.__init__', 'Gtk.Box.__init__', (['self'], {'spacing': '(7)', 'orientation': 'Gtk.Orientation.VERTICAL'}), '(self, spacing=7, orientation=Gtk.Orientation.VERTICAL)\n', (202, 257), False, 'from gi.repository import Gio, Gtk, Gdk\n'), ((453, 479), 'gi.repository.Gtk.Button', 'Gtk.Button', (['"""Generate Key"""'], {}), "('Generate Key')\n", (463, 479), False, 'from gi.repository import Gio, Gtk, Gdk\n'), ((576, 594), 'gi.repository.Gtk.Button', 'Gtk.Button', (['"""Save"""'], {}), "('Save')\n", (586, 594), False, 'from gi.repository import Gio, Gtk, Gdk\n'), ((675, 695), 'gi.repository.Gtk.Button', 'Gtk.Button', (['"""Cancel"""'], {}), "('Cancel')\n", (685, 695), False, 'from gi.repository import Gio, Gtk, Gdk\n'), ((779, 798), 'gi.repository.Gtk.Button', 'Gtk.Button', (['"""Close"""'], {}), "('Close')\n", (789, 798), False, 'from gi.repository import Gio, Gtk, Gdk\n'), ((883, 894), 'gi.repository.Gtk.Entry', 'Gtk.Entry', ([], {}), '()\n', (892, 894), False, 'from gi.repository import Gio, Gtk, Gdk\n'), ((982, 1019), 'gi.repository.Gtk.Button', 'Gtk.Button', ([], {'label': '"""Copy to clipboard"""'}), "(label='Copy to clipboard')\n", (992, 1019), False, 'from gi.repository import Gio, Gtk, Gdk\n'), ((1045, 1087), 'gi.repository.Gtk.Clipboard.get', 'Gtk.Clipboard.get', (['Gdk.SELECTION_CLIPBOARD'], {}), '(Gdk.SELECTION_CLIPBOARD)\n', (1062, 1087), False, 'from gi.repository import Gio, Gtk, Gdk\n'), ((1189, 1207), 'gi.repository.Gtk.Box', 'Gtk.Box', ([], {'spacing': '(5)'}), '(spacing=5)\n', (1196, 1207), False, 'from gi.repository import Gio, Gtk, Gdk\n'), ((1364, 1375), 'gi.repository.Gtk.Entry', 'Gtk.Entry', ([], {}), '()\n', (1373, 1375), False, 'from gi.repository import Gio, Gtk, Gdk\n'), ((1467, 1480), 'gi.repository.Gtk.Spinner', 'Gtk.Spinner', ([], {}), '()\n', (1478, 1480), False, 'from gi.repository import Gio, Gtk, Gdk\n'), ((1658, 1667), 'gi.repository.Gtk.Box', 'Gtk.Box', ([], {}), '()\n', (1665, 1667), False, 'from gi.repository import Gio, Gtk, Gdk\n'), ((4643, 4738), 'gi.repository.Gtk.MessageDialog', 'Gtk.MessageDialog', (['self.main_window', '(0)', 'Gtk.MessageType.ERROR', 'Gtk.ButtonsType.OK', '"""ERROR"""'], {}), "(self.main_window, 0, Gtk.MessageType.ERROR, Gtk.\n ButtonsType.OK, 'ERROR')\n", (4660, 4738), False, 'from gi.repository import Gio, Gtk, Gdk\n'), ((5297, 5390), 'gi.repository.Gtk.MessageDialog', 'Gtk.MessageDialog', (['self.main_window', '(0)', 'Gtk.MessageType.INFO', 'Gtk.ButtonsType.OK', '"""Info"""'], {}), "(self.main_window, 0, Gtk.MessageType.INFO, Gtk.\n ButtonsType.OK, 'Info')\n", (5314, 5390), False, 'from gi.repository import Gio, Gtk, Gdk\n')] |
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# Import Local Modules
import marvin
from nose.plugins.attrib import attr
from marvin.cloudstackTestCase import cloudstackTestCase
import unittest
from marvin.cloudstackAPI import *
from marvin.lib.utils import *
from marvin.lib.base import *
from marvin.lib.common import *
from marvin.codes import PASS, FAILED, SUCCESS, XEN_SERVER
from marvin.sshClient import SshClient
import requests
requests.packages.urllib3.disable_warnings()
import random
import string
import telnetlib
import os
import urllib.request, urllib.parse, urllib.error
import time
import tempfile
_multiprocess_shared_ = True
class TestBrowseUploadTemplate(cloudstackTestCase):
"""
Tests for browser based upload template feature. Once all issues in test_browse_templates.py are fixed, this should be merged back
"""
@classmethod
def setUpClass(cls):
cls.testClient = super(TestBrowseUploadTemplate, cls).getClsTestClient()
cls.testdata = cls.testClient.getParsedTestDataConfig()
cls.apiclient = cls.testClient.getApiClient()
cls.hypervisor = cls.testClient.getHypervisorInfo()
cls._cleanup = []
cls.cleanup = []
hosts = list_hosts(
cls.apiclient,
type="Routing"
)
if hosts is None:
cls.SkipTest(
"There are no hypervisor's available. Check list hosts response")
cls.uploadtemplateformat = "VHD"
cls.templatename = "test"
cls.templatehypervisor = "XenServer"
cls.templateostypeid = 142
cls.zone = get_zone(cls.apiclient, cls.testClient.getZoneForTests())
cls.domain = get_domain(cls.apiclient)
cls.pod = get_pod(cls.apiclient, cls.zone.id)
cls.account = Account.create(
cls.apiclient,
cls.testdata["account"],
domainid=cls.domain.id
)
cls._cleanup = [
cls.account
]
def waitForSystemVMAgent(self, vmname):
timeout = self.testdata["timeout"]
while True:
list_host_response = list_hosts(
self.apiclient,
name=vmname
)
if list_host_response and list_host_response[0].state == 'Up':
break
if timeout == 0:
raise Exception("Timed out waiting for SSVM agent to be Up")
time.sleep(self.testdata["sleep"])
timeout = timeout - 1
def destroy_ssvm(self):
list_ssvm_response = list_ssvms(
self.apiclient,
systemvmtype='secondarystoragevm',
state='Running',
zoneid=self.zone.id
)
self.assertEqual(
isinstance(list_ssvm_response, list),
True,
"Check list response returns a valid list"
)
ssvm_response = list_ssvm_response[0]
old_name = ssvm_response.name
self.debug("Destroying SSVM: %s" % ssvm_response.id)
cmd = destroySystemVm.destroySystemVmCmd()
cmd.id = ssvm_response.id
self.apiclient.destroySystemVm(cmd)
timeout = self.testdata["timeout"]
while True:
list_ssvm_response = list_ssvms(
self.apiclient,
zoneid=self.zone.id,
systemvmtype='secondarystoragevm'
)
if isinstance(list_ssvm_response, list):
if list_ssvm_response[0].state == 'Running':
break
if timeout == 0:
raise Exception("List SSVM call failed!")
time.sleep(self.testdata["sleep"])
timeout = timeout - 1
ssvm_response = list_ssvm_response[0]
# Verify Name, Public IP, Private IP and Link local IP
# for newly created SSVM
self.assertNotEqual(
ssvm_response.name,
old_name,
"Check SSVM new name with name of destroyed SSVM"
)
self.assertEqual(
hasattr(ssvm_response, 'privateip'),
True,
"Check whether SSVM has private IP field"
)
self.assertEqual(
hasattr(ssvm_response, 'linklocalip'),
True,
"Check whether SSVM has link local IP field"
)
self.assertEqual(
hasattr(ssvm_response, 'publicip'),
True,
"Check whether SSVM has public IP field"
)
# Wait for the agent to be up
self.waitForSystemVMAgent(ssvm_response.name)
return
@attr(tags = ["advanced", "advancedns", "smoke", "basic"], required_hardware="false")
def test_browser_upload_template_incomplete(self):
"""
Test browser based incomplete template upload, followed by SSVM destroy. Template should go to UploadAbandoned state and get cleaned up.
"""
try:
self.debug("========================= Test browser based incomplete template upload ========================")
#Only register template, without uploading
cmd = getUploadParamsForTemplate.getUploadParamsForTemplateCmd()
cmd.zoneid = self.zone.id
cmd.format = self.uploadtemplateformat
cmd.name=self.templatename+self.account.name+(random.choice(string.ascii_uppercase))
cmd.account=self.account.name
cmd.domainid=self.domain.id
cmd.displaytext=cmd.name
cmd.hypervisor=self.templatehypervisor
cmd.ostypeid=self.templateostypeid
template_response=self.apiclient.getUploadParamsForTemplate(cmd)
#Destroy SSVM, and wait for new one to start
self.destroy_ssvm()
wait_for_cleanup(self.apiclient, ["expunge.delay", "expunge.interval"])
#Verify that the template is cleaned up as part of sync-up during new SSVM start
list_template_response=Template.list(
self.apiclient,
id=template_response.id,
templatefilter="all",
zoneid=self.zone.id)
self.assertEqual(list_template_response, None, "Template is not cleaned up, some issue with template sync-up")
except Exception as e:
self.fail("Exception occurred : %s" % e)
return
@classmethod
def tearDownClass(self):
try:
self.apiclient = super(TestBrowseUploadTemplate, self).getClsTestClient().getApiClient()
cleanup_resources(self.apiclient, self._cleanup)
except Exception as e:
raise Exception("Warning: Exception during cleanup : %s" % e)
return
| [
"random.choice",
"requests.packages.urllib3.disable_warnings",
"time.sleep",
"nose.plugins.attrib.attr"
] | [((1176, 1220), 'requests.packages.urllib3.disable_warnings', 'requests.packages.urllib3.disable_warnings', ([], {}), '()\n', (1218, 1220), False, 'import requests\n'), ((5319, 5406), 'nose.plugins.attrib.attr', 'attr', ([], {'tags': "['advanced', 'advancedns', 'smoke', 'basic']", 'required_hardware': '"""false"""'}), "(tags=['advanced', 'advancedns', 'smoke', 'basic'], required_hardware=\n 'false')\n", (5323, 5406), False, 'from nose.plugins.attrib import attr\n'), ((3157, 3191), 'time.sleep', 'time.sleep', (["self.testdata['sleep']"], {}), "(self.testdata['sleep'])\n", (3167, 3191), False, 'import time\n'), ((4360, 4394), 'time.sleep', 'time.sleep', (["self.testdata['sleep']"], {}), "(self.testdata['sleep'])\n", (4370, 4394), False, 'import time\n'), ((6044, 6081), 'random.choice', 'random.choice', (['string.ascii_uppercase'], {}), '(string.ascii_uppercase)\n', (6057, 6081), False, 'import random\n')] |
"""Test OpenZWave Websocket API."""
from unittest.mock import patch
from openzwavemqtt.const import (
ATTR_CODE_SLOT,
ATTR_LABEL,
ATTR_OPTIONS,
ATTR_POSITION,
ATTR_VALUE,
ValueType,
)
from openpeerpower.components.ozw.const import ATTR_CONFIG_PARAMETER
from openpeerpower.components.ozw.lock import ATTR_USERCODE
from openpeerpower.components.ozw.websocket_api import (
ATTR_IS_AWAKE,
ATTR_IS_BEAMING,
ATTR_IS_FAILED,
ATTR_IS_FLIRS,
ATTR_IS_ROUTING,
ATTR_IS_SECURITYV1,
ATTR_IS_ZWAVE_PLUS,
ATTR_NEIGHBORS,
ATTR_NODE_BASIC_STRING,
ATTR_NODE_BAUD_RATE,
ATTR_NODE_GENERIC_STRING,
ATTR_NODE_QUERY_STAGE,
ATTR_NODE_SPECIFIC_STRING,
ID,
NODE_ID,
OZW_INSTANCE,
PARAMETER,
SCHEMA,
TYPE,
VALUE,
)
from openpeerpower.components.websocket_api.const import (
ERR_INVALID_FORMAT,
ERR_NOT_FOUND,
ERR_NOT_SUPPORTED,
)
from .common import MQTTMessage, setup_ozw
async def test_websocket_api(opp, generic_data, opp_ws_client):
"""Test the ozw websocket api."""
await setup_ozw(opp, fixture=generic_data)
client = await opp_ws_client(opp)
# Test instance list
await client.send_json({ID: 4, TYPE: "ozw/get_instances"})
msg = await client.receive_json()
assert len(msg["result"]) == 1
result = msg["result"][0]
assert result[OZW_INSTANCE] == 1
assert result["Status"] == "driverAllNodesQueried"
assert result["OpenZWave_Version"] == "1.6.1008"
# Test network status
await client.send_json({ID: 5, TYPE: "ozw/network_status"})
msg = await client.receive_json()
result = msg["result"]
assert result["Status"] == "driverAllNodesQueried"
assert result[OZW_INSTANCE] == 1
# Test node status
await client.send_json({ID: 6, TYPE: "ozw/node_status", NODE_ID: 32})
msg = await client.receive_json()
result = msg["result"]
assert result[OZW_INSTANCE] == 1
assert result[NODE_ID] == 32
assert result[ATTR_NODE_QUERY_STAGE] == "Complete"
assert result[ATTR_IS_ZWAVE_PLUS]
assert result[ATTR_IS_AWAKE]
assert not result[ATTR_IS_FAILED]
assert result[ATTR_NODE_BAUD_RATE] == 100000
assert result[ATTR_IS_BEAMING]
assert not result[ATTR_IS_FLIRS]
assert result[ATTR_IS_ROUTING]
assert not result[ATTR_IS_SECURITYV1]
assert result[ATTR_NODE_BASIC_STRING] == "Routing Slave"
assert result[ATTR_NODE_GENERIC_STRING] == "Binary Switch"
assert result[ATTR_NODE_SPECIFIC_STRING] == "Binary Power Switch"
assert result[ATTR_NEIGHBORS] == [1, 33, 36, 37, 39]
await client.send_json({ID: 7, TYPE: "ozw/node_status", NODE_ID: 999})
msg = await client.receive_json()
result = msg["error"]
assert result["code"] == ERR_NOT_FOUND
# Test node statistics
await client.send_json({ID: 8, TYPE: "ozw/node_statistics", NODE_ID: 39})
msg = await client.receive_json()
result = msg["result"]
assert result[OZW_INSTANCE] == 1
assert result[NODE_ID] == 39
assert result["send_count"] == 57
assert result["sent_failed"] == 0
assert result["retries"] == 1
assert result["last_request_rtt"] == 26
assert result["last_response_rtt"] == 38
assert result["average_request_rtt"] == 29
assert result["average_response_rtt"] == 37
assert result["received_packets"] == 3594
assert result["received_dup_packets"] == 12
assert result["received_unsolicited"] == 3546
# Test node metadata
await client.send_json({ID: 9, TYPE: "ozw/node_metadata", NODE_ID: 39})
msg = await client.receive_json()
result = msg["result"]
assert result["metadata"]["ProductPic"] == "images/aeotec/zwa002.png"
await client.send_json({ID: 10, TYPE: "ozw/node_metadata", NODE_ID: 999})
msg = await client.receive_json()
result = msg["error"]
assert result["code"] == ERR_NOT_FOUND
# Test network statistics
await client.send_json({ID: 11, TYPE: "ozw/network_statistics"})
msg = await client.receive_json()
result = msg["result"]
assert result["readCnt"] == 92220
assert result[OZW_INSTANCE] == 1
assert result["node_count"] == 5
# Test get nodes
await client.send_json({ID: 12, TYPE: "ozw/get_nodes"})
msg = await client.receive_json()
result = msg["result"]
assert len(result) == 5
assert result[2][ATTR_IS_AWAKE]
assert not result[1][ATTR_IS_FAILED]
# Test get config parameters
await client.send_json({ID: 13, TYPE: "ozw/get_config_parameters", NODE_ID: 39})
msg = await client.receive_json()
result = msg["result"]
assert len(result) == 8
for config_param in result:
assert config_param["type"] in (
ValueType.LIST.value,
ValueType.BOOL.value,
ValueType.INT.value,
ValueType.BYTE.value,
ValueType.SHORT.value,
ValueType.BITSET.value,
)
# Test set config parameter
config_param = result[0]
current_val = config_param[ATTR_VALUE]
new_val = next(
option[0]
for option in config_param[SCHEMA][0][ATTR_OPTIONS]
if option[0] != current_val
)
new_label = next(
option[1]
for option in config_param[SCHEMA][0][ATTR_OPTIONS]
if option[1] != current_val and option[0] != new_val
)
await client.send_json(
{
ID: 14,
TYPE: "ozw/set_config_parameter",
NODE_ID: 39,
PARAMETER: config_param[ATTR_CONFIG_PARAMETER],
VALUE: new_val,
}
)
msg = await client.receive_json()
assert msg["success"]
await client.send_json(
{
ID: 15,
TYPE: "ozw/set_config_parameter",
NODE_ID: 39,
PARAMETER: config_param[ATTR_CONFIG_PARAMETER],
VALUE: new_label,
}
)
msg = await client.receive_json()
assert msg["success"]
# Test OZW Instance not found error
await client.send_json(
{ID: 16, TYPE: "ozw/get_config_parameters", OZW_INSTANCE: 999, NODE_ID: 1}
)
msg = await client.receive_json()
result = msg["error"]
assert result["code"] == ERR_NOT_FOUND
# Test OZW Node not found error
await client.send_json(
{
ID: 18,
TYPE: "ozw/set_config_parameter",
NODE_ID: 999,
PARAMETER: 0,
VALUE: "test",
}
)
msg = await client.receive_json()
result = msg["error"]
assert result["code"] == ERR_NOT_FOUND
# Test parameter not found
await client.send_json(
{
ID: 19,
TYPE: "ozw/set_config_parameter",
NODE_ID: 39,
PARAMETER: 45,
VALUE: "test",
}
)
msg = await client.receive_json()
result = msg["error"]
assert result["code"] == ERR_NOT_FOUND
# Test list value not found
await client.send_json(
{
ID: 20,
TYPE: "ozw/set_config_parameter",
NODE_ID: 39,
PARAMETER: config_param[ATTR_CONFIG_PARAMETER],
VALUE: "test",
}
)
msg = await client.receive_json()
result = msg["error"]
assert result["code"] == ERR_NOT_FOUND
# Test value type invalid
await client.send_json(
{
ID: 21,
TYPE: "ozw/set_config_parameter",
NODE_ID: 39,
PARAMETER: 3,
VALUE: 0,
}
)
msg = await client.receive_json()
result = msg["error"]
assert result["code"] == ERR_NOT_SUPPORTED
# Test invalid bitset format
await client.send_json(
{
ID: 22,
TYPE: "ozw/set_config_parameter",
NODE_ID: 39,
PARAMETER: 3,
VALUE: {ATTR_POSITION: 1, ATTR_VALUE: True, ATTR_LABEL: "test"},
}
)
msg = await client.receive_json()
result = msg["error"]
assert result["code"] == ERR_INVALID_FORMAT
# Test valid bitset format passes validation
await client.send_json(
{
ID: 23,
TYPE: "ozw/set_config_parameter",
NODE_ID: 39,
PARAMETER: 10000,
VALUE: {ATTR_POSITION: 1, ATTR_VALUE: True},
}
)
msg = await client.receive_json()
result = msg["error"]
assert result["code"] == ERR_NOT_FOUND
async def test_ws_locks(opp, lock_data, opp_ws_client):
"""Test lock websocket apis."""
await setup_ozw(opp, fixture=lock_data)
client = await opp_ws_client(opp)
await client.send_json(
{
ID: 1,
TYPE: "ozw/get_code_slots",
NODE_ID: 10,
}
)
msg = await client.receive_json()
assert msg["success"]
await client.send_json(
{
ID: 2,
TYPE: "ozw/set_usercode",
NODE_ID: 10,
ATTR_CODE_SLOT: 1,
ATTR_USERCODE: "1234",
}
)
msg = await client.receive_json()
assert msg["success"]
await client.send_json(
{
ID: 3,
TYPE: "ozw/clear_usercode",
NODE_ID: 10,
ATTR_CODE_SLOT: 1,
}
)
msg = await client.receive_json()
assert msg["success"]
async def test_refresh_node(opp, generic_data, sent_messages, opp_ws_client):
"""Test the ozw refresh node api."""
receive_message = await setup_ozw(opp, fixture=generic_data)
client = await opp_ws_client(opp)
# Send the refresh_node_info command
await client.send_json({ID: 9, TYPE: "ozw/refresh_node_info", NODE_ID: 39})
msg = await client.receive_json()
assert len(sent_messages) == 1
assert msg["success"]
# Receive a mock status update from OZW
message = MQTTMessage(
topic="OpenZWave/1/node/39/",
payload={"NodeID": 39, "NodeQueryStage": "initializing"},
)
message.encode()
receive_message(message)
# Verify we got expected data on the websocket
msg = await client.receive_json()
result = msg["event"]
assert result["type"] == "node_updated"
assert result["node_query_stage"] == "initializing"
# Send another mock status update from OZW
message = MQTTMessage(
topic="OpenZWave/1/node/39/",
payload={"NodeID": 39, "NodeQueryStage": "versions"},
)
message.encode()
receive_message(message)
# Send a mock status update for a different node
message = MQTTMessage(
topic="OpenZWave/1/node/35/",
payload={"NodeID": 35, "NodeQueryStage": "fake_shouldnt_be_received"},
)
message.encode()
receive_message(message)
# Verify we received the message for node 39 but not for node 35
msg = await client.receive_json()
result = msg["event"]
assert result["type"] == "node_updated"
assert result["node_query_stage"] == "versions"
async def test_refresh_node_unsubscribe(opp, generic_data, opp_ws_client):
"""Test unsubscribing the ozw refresh node api."""
await setup_ozw(opp, fixture=generic_data)
client = await opp_ws_client(opp)
with patch("openzwavemqtt.OZWOptions.listen") as mock_listen:
# Send the refresh_node_info command
await client.send_json({ID: 9, TYPE: "ozw/refresh_node_info", NODE_ID: 39})
await client.receive_json()
# Send the unsubscribe command
await client.send_json({ID: 10, TYPE: "unsubscribe_events", "subscription": 9})
await client.receive_json()
assert mock_listen.return_value.called
| [
"unittest.mock.patch"
] | [((11067, 11107), 'unittest.mock.patch', 'patch', (['"""openzwavemqtt.OZWOptions.listen"""'], {}), "('openzwavemqtt.OZWOptions.listen')\n", (11072, 11107), False, 'from unittest.mock import patch\n')] |
import re
import datetime
from javaccflab.lexer import parse
from javaccflab.java_token import TokenType, Token, update_token_value
class Formatter:
def __init__(self, files):
self.__files = files
self.__file = None
self.__tokens = []
self.__to_fix = dict()
def process(self):
tokens = []
for file in self.__files:
tokens.append(parse(open(file, 'r').read()))
i = 0
while i < len(tokens):
self.__tokens = tokens[i]
self.__file = self.__files[i]
self.__find_to_fix()
tokens[i] = self.__tokens
i += 1
i = 0
while i < len(tokens):
self.__tokens = tokens[i]
self.__file = self.__files[i]
self.__fix()
self.__fix_comments()
tokens[i] = self.__tokens
i += 1
return tokens
def __find_to_fix(self):
i = 0
while i < len(self.__tokens):
token = self.__tokens[i]
if token.get_value() == 'package':
i = self.__fix_package(i)
elif token.get_value() in ('class', 'interface') and self.__tokens[i - 1].get_value() != '.':
i = self.__skip_ws_tokens(i + 1)
if not Formatter.is_camel_upper_case(self.__tokens[i].get_value()):
self.__to_fix[self.__tokens[i].get_value()] = Formatter.to_camel_upper_case(
self.__tokens[i].get_value())
i = self.__fix_class_body(i, self.__tokens[i].get_value())
i += 1
def __fix_package(self, pos):
pos = self.__skip_ws_tokens(pos)
while self.__tokens[pos].get_value() != ';':
if self.__tokens[pos].get_type() == TokenType.IDENTIFIER and not Formatter.is_lower_case(
self.__tokens[pos].get_value()):
self.__to_fix[self.__tokens[pos].get_value()] = Formatter.to_lower_case(
(self.__tokens[pos].get_value()))
pos += 1
return pos
def __fix_class_body(self, pos, class_name):
while self.__tokens[pos].get_value() != '{':
pos += 1
count = 1
pos += 1
while count != 0:
if self.__tokens[pos].get_value() == '{':
count += 1
elif self.__tokens[pos].get_value() == '}':
count -= 1
elif self.__tokens[pos].get_value() == 'static':
i = self.__skip_ws_tokens(pos + 1)
if self.__tokens[i].get_value() == '{':
pos = i + 1
count += 1
continue
elif self.__tokens[pos].get_type() in (TokenType.IDENTIFIER, TokenType.KEYWORD):
if self.__is_parameter(pos):
parameters, i = self.__get_field_names(pos)
if self.__is_final(pos):
for parameter in parameters:
if not Formatter.is_snake_upper_case(parameter):
self.__to_fix[parameter] = Formatter.to_snake_upper_case(parameter)
else:
for parameter in parameters:
if not Formatter.is_camel_lower_case(parameter):
self.__to_fix[parameter] = Formatter.to_camel_lower_case(parameter)
pos = i
else:
self.__fix_method_name(pos, class_name)
parameters = self.__get_method_parameters(pos)
pos = self.__fix_method_body(pos, parameters)
pos += 1
return pos
def __fix_method_name(self, i, class_name):
while self.__tokens[i].get_value() not in ('(', ';'):
i += 1
i -= 1
while self.__tokens[i].get_type() == TokenType.WHITESPACE:
i -= 1
if self.__tokens[i].get_value() != class_name and not Formatter.is_snake_lower_case(
self.__tokens[i].get_value()):
self.__to_fix[self.__tokens[i].get_value()] = Formatter.to_snake_lower_case(self.__tokens[i].get_value())
def __get_method_parameters(self, i):
parameters = dict()
while self.__tokens[i].get_value() != '(':
i += 1
while self.__tokens[i].get_value() != ')':
if self.__tokens[i + 1].get_value() in (')', ','):
pos = i
while self.__tokens[pos].get_type() == TokenType.WHITESPACE:
pos -= 1
if not Formatter.is_camel_lower_case(self.__tokens[pos].get_value()):
fixed_value = Formatter.to_camel_lower_case(self.__tokens[pos].get_value())
parameters[self.__tokens[pos].get_value()] = fixed_value
update_token_value(self.__file, self.__tokens[pos], fixed_value)
i += 1
return parameters
def __fix_method_body(self, i, method_parameters):
params = dict()
while self.__tokens[i].get_value() not in ('{', ';'):
if self.__tokens[i].get_value() in method_parameters.keys():
update_token_value(self.__file, self.__tokens[i], method_parameters[self.__tokens[i].get_value()])
i += 1
if self.__tokens[i].get_value() == ';':
return i + 1
brace_count = 1
i += 1
while brace_count != 0:
if self.__tokens[i].get_value() == '{':
brace_count += 1
elif self.__tokens[i].get_value() == '}':
brace_count -= 1
elif self.__tokens[i].get_value() in ('=', ';'):
naming_pos = i - 1
while self.__tokens[naming_pos].get_type() == TokenType.WHITESPACE:
naming_pos -= 1
if self.__tokens[naming_pos].get_type() == TokenType.IDENTIFIER:
type_pos = naming_pos - 1
while self.__tokens[type_pos].get_type() == TokenType.WHITESPACE:
type_pos -= 1
if (self.__tokens[type_pos].get_type() in (TokenType.IDENTIFIER, TokenType.KEYWORD) and \
self.__tokens[type_pos].get_value() not in ('class', 'identifier')) or self.__tokens[
type_pos].get_value() == ',':
if not Formatter.is_camel_lower_case(self.__tokens[naming_pos].get_value()):
fixed_value = Formatter.to_camel_lower_case(self.__tokens[naming_pos].get_value())
params[self.__tokens[naming_pos].get_value()] = fixed_value
update_token_value(self.__file, self.__tokens[naming_pos], fixed_value)
elif self.__tokens[i].get_type() == TokenType.IDENTIFIER and self.__tokens[
i].get_value() in params.keys():
update_token_value(self.__file, self.__tokens[i], params[self.__tokens[i].get_value()])
elif self.__tokens[i].get_type() == TokenType.IDENTIFIER and self.__tokens[
i].get_value() in method_parameters.keys():
update_token_value(self.__file, self.__tokens[i], method_parameters[self.__tokens[i].get_value()])
i += 1
return i
def __get_field_names(self, i):
params = []
while self.__tokens[i].get_value() != ';':
if self.__tokens[i + 1].get_value() in (';', '=', ','):
pos = i
while self.__tokens[pos].get_type() == TokenType.WHITESPACE:
pos -= 1
field_name = self.__tokens[pos].get_value()
is_value = False
if self.__tokens[i + 1].get_value() in (';', ','):
while pos > 0 and self.__tokens[pos].get_value() not in (';', '}'):
if self.__tokens[pos].get_value() == '=':
is_value = True
pos -= 1
if not is_value:
params.append(field_name)
i += 1
end = i
return params, end
def __is_final(self, i):
while self.__tokens[i].get_value() not in (';', '=', '('):
if self.__tokens[i].get_value() == 'final':
return True
i += 1
return False
def __is_parameter(self, pos):
while self.__tokens[pos].get_value() != ';' and pos < len(self.__tokens):
if self.__tokens[pos].get_value() == '=':
return True
elif self.__tokens[pos].get_value() in ('class', 'interface', '(', ')'):
return False
pos += 1
return True
def __fix(self):
for token in self.__tokens:
if token.get_value() in self.__to_fix and not token.is_fixed():
update_token_value(self.__file, token, self.__to_fix[token.get_value()])
def __fix_comments(self):
self.__add_start_comment()
i = 0
while i < len(self.__tokens):
if self.__tokens[i].get_value() in ('class', 'interface'):
i = self.__fix_class_comments(i)
i += 1
i += 1
# Fix start comment
def __add_start_comment(self):
if not self.__is_start_comment_exists():
comment_token = Token(None, TokenType.COMMENT)
comment_string = f'/*\n' \
f' * {self.__find_class_name()}\n' \
f' *\n' \
f' * {datetime.date.today().strftime("%B %d, %Y")}\n' \
f' */'
update_token_value(self.__file, comment_token, comment_string)
self.__tokens.insert(0, comment_token)
self.__tokens.insert(1, Token('\n', TokenType.WHITESPACE))
self.__tokens.insert(1, Token('\n', TokenType.WHITESPACE))
def __is_start_comment_exists(self):
i = self.__skip_ws_tokens(0)
return self.__tokens[i].get_type() == TokenType.COMMENT
def __find_class_name(self, i=0):
while self.__tokens[i].get_value() not in ('class', 'interface') and self.__tokens[i - 1].get_value() != '.':
i += 1
i = self.__skip_ws_tokens(i + 1)
return self.__tokens[i].get_value()
# Fix class comment
def __fix_class_comments(self, pos):
comment_token = self.__find_doc_comment_before(pos)
if comment_token is None:
comment_token = Token(None, TokenType.COMMENT)
comment_string = f'/**\n' \
f' * Implementation of {self.__find_class_name(pos)}\n' \
f' */'
update_token_value(self.__file, comment_token, comment_string)
insert_pos = self.__find_token_before(pos, '\n')
self.__tokens.insert(insert_pos, Token('\n', TokenType.WHITESPACE))
self.__tokens.insert(insert_pos + 1, comment_token)
else:
self.__fix_comment_links(comment_token)
return self.__fix_class_body_comments(pos)
# Fix comments for methods and fields
def __fix_class_body_comments(self, pos):
while self.__tokens[pos].get_value() != '{':
pos += 1
count = 1
pos += 1
while count != 0:
if self.__tokens[pos].get_value() == '{':
count += 1
elif self.__tokens[pos].get_value() == '}':
count -= 1
elif self.__tokens[pos].get_value() == 'static':
i = self.__skip_ws_tokens(pos + 1)
if self.__tokens[i].get_value() == '{':
pos = i + 1
count += 1
continue
elif self.__tokens[pos].get_type() in (TokenType.IDENTIFIER, TokenType.KEYWORD) and self.__tokens[
pos + 1].get_value() != '.' and self.__tokens[pos].get_value() not in ('class', 'interface'):
if self.__is_parameter(pos):
pos = self.__fix_field_comment(pos)
else:
pos = self.__fix_method_comment(pos)
pos += 1
return pos
def __fix_field_comment(self, pos):
comment_token = self.__find_doc_comment_before(pos)
indent = self.__get_indent(pos)
if comment_token is None:
field_names = ', '.join(self.__get_field_names(pos)[0])
visibility = self.__find_visibility(pos)
comment_token = Token(None, TokenType.COMMENT)
comment_string = comment_string = f'{indent}/**\n' \
f'{indent} * The {visibility} {field_names} {"constant" if self.__is_final(pos) else "variable"}{"s" if len(field_names) > 0 else ""}\n' \
f'{indent} */'
update_token_value(self.__file, comment_token, comment_string)
insert_pos = self.__find_token_before(pos, '\n')
self.__tokens.insert(insert_pos, Token('\n', TokenType.WHITESPACE))
self.__tokens.insert(insert_pos + 1, comment_token)
else:
self.__fix_comment_links(comment_token)
return self.__find_token_after(pos, ';')
def __find_visibility(self, pos):
pos = self.__find_token_before(pos, '\n')
while self.__tokens[pos].get_value() not in ('=', ';', '('):
if self.__tokens[pos].get_value() in ('private', 'public', 'protected'):
return self.__tokens[pos].get_value()
pos += 1
return 'package-private'
def __fix_method_comment(self, pos):
comment_token = self.__find_doc_comment_before(pos)
indent = self.__get_indent(pos)
all_params = []
if comment_token is None:
params = self.__get_parameter_list(pos)
params.extend(self.__get_type_parameter_list(pos))
if len(params) > 0:
all_params.append("\n".join([f"{indent} * @param {param}" for param in params]))
throws = self.__get_throws(pos)
if len(throws) > 0:
all_params.append("\n".join([f"{indent} * @throws {param}" for param in throws]))
return_type = self.__get_return_type(pos)
if len(return_type) > 0:
all_params.append(f"{indent} * @return {self.__get_return_type(pos)}")
comment_token = Token(None, TokenType.COMMENT)
comment_string = f'{indent}/**\n' + \
'\n'.join(all_params) + \
('' if len(params) <= 0 else ' ') + \
f'\n{indent} */'
update_token_value(self.__file, comment_token, comment_string)
insert_pos = self.__find_token_before(pos, '\n')
self.__tokens.insert(insert_pos, Token('\n', TokenType.WHITESPACE))
self.__tokens.insert(insert_pos + 1, comment_token)
else:
self.__fix_comment_links(comment_token)
params_list = self.__get_parameter_list(pos)
params_list.extend(self.__get_type_parameter_list(pos))
throws_list = self.__get_throws(pos)
return_type_value = self.__get_return_type(pos)
params, throws, return_type = self.__fix_comment_params(comment_token)
comment_string = comment_token.get_value()
append_string = ''
i = 0
if len(params) < len(params_list):
append_string += "\n" + "\n".join(
[f"{indent} * @param {param}" for param in Formatter.get_missing(params, params_list)])
i = comment_string.rfind('@param')
if i != -1:
i = comment_string.find('\n', i) if comment_string.find('\n',
i) != -1 else comment_string.find('*',
i) - 1
comment_string = comment_string[:i] + append_string + comment_string[i:]
append_string = ''
if len(throws) < len(throws_list):
append_string += "\n" + "\n".join(
[f"{indent} * @throws {param}" for param in Formatter.get_missing(throws, throws_list)])
i = comment_string.rfind('@throws')
if i != -1:
i = comment_string.find('\n', i) if comment_string.find('\n',
i) != -1 else comment_string.find('*',
i) - 1
comment_string = comment_string[:i] + append_string + comment_string[i:]
append_string = ''
i = comment_string.find('\n', i)
if len(return_type) == '':
append_string += "\n" + f"\n{indent} * @return {return_type_value}"
else:
i = comment_string.rfind('@return')
while comment_string[i] != '\n':
i -= 1
comment_string = comment_string[:i] + append_string + comment_string[i:]
if comment_string != comment_token.get_value():
update_token_value(self.__file, comment_token, comment_string)
return self.__skip_method(pos)
@staticmethod
def get_missing(before, after):
missing_params = []
for value in after:
if value not in before:
missing_params.append(value)
return missing_params
def __get_parameter_list(self, pos):
parameters = []
while self.__tokens[pos].get_value() != '(':
pos += 1
while self.__tokens[pos].get_value() != ')':
if self.__tokens[pos + 1].get_value() in (')', ','):
i = pos
while self.__tokens[i].get_type() == TokenType.WHITESPACE:
i -= 1
parameters.append(self.__tokens[i].get_value())
pos += 1
return parameters
def __get_type_parameter_list(self, pos):
parameters = []
while self.__tokens[pos].get_value() != '<':
if self.__tokens[pos].get_value() == '(':
return parameters
pos += 1
i = pos - 1
while self.__tokens[i].get_type() == TokenType.WHITESPACE:
i -= 1
if self.__tokens[i].get_type() != TokenType.KEYWORD or self.__tokens[i].get_value() not in ('}', ';'):
return parameters
while self.__tokens[pos].get_value() != '>':
if self.__tokens[pos - 1].get_value() in ('<', ','):
i = pos
while self.__tokens[i].get_type() == TokenType.WHITESPACE:
i += 1
parameters.append(self.__tokens[i].get_value())
pos += 1
return parameters
def __get_throws(self, pos):
throws = []
is_throws = False
while self.__tokens[pos].get_value() not in ('{', ';'):
if self.__tokens[pos].get_value() == 'throws':
is_throws = True
elif is_throws and self.__tokens[pos].get_type() == TokenType.IDENTIFIER:
throws.append(self.__tokens[pos].get_value())
pos += 1
return throws
def __get_return_type(self, pos):
return_type = []
while self.__tokens[pos].get_value() != '(':
pos += 1
pos -= 1
while self.__tokens[pos].get_type() == TokenType.WHITESPACE:
pos -= 1
while self.__tokens[pos].get_type() != TokenType.WHITESPACE:
pos -= 1
while self.__tokens[pos].get_type() == TokenType.WHITESPACE:
pos -= 1
if self.__tokens[pos].get_value() == '>':
while self.__tokens[pos].get_value() != '<':
return_type.append(self.__tokens[pos].get_value())
pos -= 1
return_type.append(self.__tokens[pos].get_value())
pos -= 1
while self.__tokens[pos].get_type() == TokenType.WHITESPACE:
return_type.append(self.__tokens[pos].get_value())
pos -= 1
return_type.append(self.__tokens[pos].get_value())
return_type.reverse()
return ''.join(return_type)
def __fix_comment_params(self, comment_token):
i = 0
params = []
throws = []
return_type = ''
comment_string = comment_token.get_value()
while i < len(comment_string):
if comment_string[i] == '@':
start = comment_string.find(' ', i)
macro = comment_string[i:start]
end = min(comment_string.find(' ', start + 1), comment_string.find('\n', start + 1))
end = end if end >= 0 else max(comment_string.find(' ', start + 1),
comment_string.find('\n', start + 1))
if end > 0:
value = comment_string[start + 1:end]
new_value = self.__fix_link(value)
if value != new_value:
comment_string = comment_string.replace(value, new_value)
update_token_value(self.__file, comment_token, comment_string)
value = new_value
if macro == '@param':
params.append(value)
elif macro == '@throws':
throws.append(value)
elif macro == '@return':
return_type = value
i += 1
return params, throws, return_type
def __skip_method(self, pos):
while self.__tokens[pos].get_value() != '{':
if self.__tokens[pos].get_value() == ';':
return pos + 1
pos += 1
count = 1
pos += 1
while count != 0:
if self.__tokens[pos].get_value() == '{':
count += 1
elif self.__tokens[pos].get_value() == '}':
count -= 1
pos += 1
return pos
def __find_doc_comment_before(self, pos):
while self.__tokens[pos].get_value() != '\n':
pos -= 1
while pos > 0 and self.__tokens[pos].get_type() == TokenType.WHITESPACE:
pos -= 1
if self.__tokens[pos].get_type() == TokenType.COMMENT and self.__tokens[pos].get_value().startswith('/**'):
return self.__tokens[pos]
return None
def __find_token_before(self, pos, value):
while pos > 0 and self.__tokens[pos].get_value() != value:
pos -= 1
return pos
def __find_token_after(self, pos, value):
while pos < len(self.__tokens) and self.__tokens[pos].get_value() != value:
pos += 1
return pos
def __fix_comment_links(self, comment_token):
i = 0
link = None
comment_string = comment_token.get_value()
while i < len(comment_string):
if comment_string[i] == '@':
start = comment_string.find(' ', i)
if comment_string[i:start] != '@see':
i += 1
continue
end = comment_string.find('\n', i)
link = comment_string[start:end]
elif comment_string[i] == '{':
start = comment_string.find(' ', i)
end = comment_string.find('}', i)
link = comment_string[start:end]
if link is not None:
new_link = self.__fix_link(link)
comment_string = comment_string.replace(link, new_link)
link = None
i += 1
if comment_string != comment_token.get_value():
update_token_value(self.__file, comment_token, comment_string)
def __fix_link(self, link):
for name in self.__to_fix.keys():
pos = link.find(name)
if pos != -1 and not (link[pos - 1].isalpha() or link[
pos - 1].isdigit() or link[pos - 1] == '_'):
link = link.replace(name, self.__to_fix[name])
return link
def __get_indent(self, pos):
pos = self.__find_token_before(pos, '\n')
count = 0
while self.__tokens[pos].get_type() == TokenType.WHITESPACE:
if self.__tokens[pos].get_value() == ' ':
count += 1
pos += 1
return ' ' * count
def __skip_ws_tokens(self, pos):
while self.__tokens[pos].get_type() == TokenType.WHITESPACE:
pos += 1
return pos
@staticmethod
def is_lower_case(naming):
return naming.find('_') == -1 and naming.islower()
@staticmethod
def to_lower_case(naming):
return ''.join([component.lower() for component in naming.split('_')])
@staticmethod
def is_camel_lower_case(naming):
return naming.find('_') == -1 and not naming.isupper() and not naming[0].isupper()
@staticmethod
def to_camel_lower_case(naming):
naming = Formatter.remove_underscores_around(naming)
components = [
component[0] + component[1:].lower() if component.isupper() else component[0].upper() + component[1:] for
component in naming.split('_')]
return components[0][0].lower() + components[0][1:] + ''.join(components[1:])
@staticmethod
def is_camel_upper_case(naming):
return naming.find('_') == -1 and not naming.isupper() and naming[0].isupper()
@staticmethod
def to_camel_upper_case(naming):
lower = Formatter.to_camel_lower_case(naming)
return lower[0].upper() + lower[1:]
@staticmethod
def is_snake_lower_case(naming):
return naming.islower()
@staticmethod
def to_snake_lower_case(naming):
naming = re.sub('(.)([A-Z][a-z]+)', r'\1_\2', naming)
return re.sub('([a-z0-9])([A-Z])', r'\1_\2', naming).lower()
@staticmethod
def is_snake_upper_case(naming):
return naming.isupper()
@staticmethod
def to_snake_upper_case(naming):
return Formatter.to_snake_lower_case(naming).upper()
@staticmethod
def remove_underscores_around(naming):
i = 0
while naming[i] == '_':
i += 1
naming = naming[i:]
j = len(naming) - 1
while naming[j] == '_':
i -= 1
naming = naming[:j + 1]
return naming
| [
"javaccflab.java_token.update_token_value",
"re.sub",
"datetime.date.today",
"javaccflab.java_token.Token"
] | [((26009, 26054), 're.sub', 're.sub', (['"""(.)([A-Z][a-z]+)"""', '"""\\\\1_\\\\2"""', 'naming'], {}), "('(.)([A-Z][a-z]+)', '\\\\1_\\\\2', naming)\n", (26015, 26054), False, 'import re\n'), ((9361, 9391), 'javaccflab.java_token.Token', 'Token', (['None', 'TokenType.COMMENT'], {}), '(None, TokenType.COMMENT)\n', (9366, 9391), False, 'from javaccflab.java_token import TokenType, Token, update_token_value\n'), ((9669, 9731), 'javaccflab.java_token.update_token_value', 'update_token_value', (['self.__file', 'comment_token', 'comment_string'], {}), '(self.__file, comment_token, comment_string)\n', (9687, 9731), False, 'from javaccflab.java_token import TokenType, Token, update_token_value\n'), ((10517, 10547), 'javaccflab.java_token.Token', 'Token', (['None', 'TokenType.COMMENT'], {}), '(None, TokenType.COMMENT)\n', (10522, 10547), False, 'from javaccflab.java_token import TokenType, Token, update_token_value\n'), ((10723, 10785), 'javaccflab.java_token.update_token_value', 'update_token_value', (['self.__file', 'comment_token', 'comment_string'], {}), '(self.__file, comment_token, comment_string)\n', (10741, 10785), False, 'from javaccflab.java_token import TokenType, Token, update_token_value\n'), ((12524, 12554), 'javaccflab.java_token.Token', 'Token', (['None', 'TokenType.COMMENT'], {}), '(None, TokenType.COMMENT)\n', (12529, 12554), False, 'from javaccflab.java_token import TokenType, Token, update_token_value\n'), ((12878, 12940), 'javaccflab.java_token.update_token_value', 'update_token_value', (['self.__file', 'comment_token', 'comment_string'], {}), '(self.__file, comment_token, comment_string)\n', (12896, 12940), False, 'from javaccflab.java_token import TokenType, Token, update_token_value\n'), ((14438, 14468), 'javaccflab.java_token.Token', 'Token', (['None', 'TokenType.COMMENT'], {}), '(None, TokenType.COMMENT)\n', (14443, 14468), False, 'from javaccflab.java_token import TokenType, Token, update_token_value\n'), ((14699, 14761), 'javaccflab.java_token.update_token_value', 'update_token_value', (['self.__file', 'comment_token', 'comment_string'], {}), '(self.__file, comment_token, comment_string)\n', (14717, 14761), False, 'from javaccflab.java_token import TokenType, Token, update_token_value\n'), ((23947, 24009), 'javaccflab.java_token.update_token_value', 'update_token_value', (['self.__file', 'comment_token', 'comment_string'], {}), '(self.__file, comment_token, comment_string)\n', (23965, 24009), False, 'from javaccflab.java_token import TokenType, Token, update_token_value\n'), ((9819, 9852), 'javaccflab.java_token.Token', 'Token', (['"""\n"""', 'TokenType.WHITESPACE'], {}), "('\\n', TokenType.WHITESPACE)\n", (9824, 9852), False, 'from javaccflab.java_token import TokenType, Token, update_token_value\n'), ((9890, 9923), 'javaccflab.java_token.Token', 'Token', (['"""\n"""', 'TokenType.WHITESPACE'], {}), "('\\n', TokenType.WHITESPACE)\n", (9895, 9923), False, 'from javaccflab.java_token import TokenType, Token, update_token_value\n'), ((10893, 10926), 'javaccflab.java_token.Token', 'Token', (['"""\n"""', 'TokenType.WHITESPACE'], {}), "('\\n', TokenType.WHITESPACE)\n", (10898, 10926), False, 'from javaccflab.java_token import TokenType, Token, update_token_value\n'), ((13048, 13081), 'javaccflab.java_token.Token', 'Token', (['"""\n"""', 'TokenType.WHITESPACE'], {}), "('\\n', TokenType.WHITESPACE)\n", (13053, 13081), False, 'from javaccflab.java_token import TokenType, Token, update_token_value\n'), ((14868, 14901), 'javaccflab.java_token.Token', 'Token', (['"""\n"""', 'TokenType.WHITESPACE'], {}), "('\\n', TokenType.WHITESPACE)\n", (14873, 14901), False, 'from javaccflab.java_token import TokenType, Token, update_token_value\n'), ((17396, 17458), 'javaccflab.java_token.update_token_value', 'update_token_value', (['self.__file', 'comment_token', 'comment_string'], {}), '(self.__file, comment_token, comment_string)\n', (17414, 17458), False, 'from javaccflab.java_token import TokenType, Token, update_token_value\n'), ((26069, 26115), 're.sub', 're.sub', (['"""([a-z0-9])([A-Z])"""', '"""\\\\1_\\\\2"""', 'naming'], {}), "('([a-z0-9])([A-Z])', '\\\\1_\\\\2', naming)\n", (26075, 26115), False, 'import re\n'), ((4847, 4911), 'javaccflab.java_token.update_token_value', 'update_token_value', (['self.__file', 'self.__tokens[pos]', 'fixed_value'], {}), '(self.__file, self.__tokens[pos], fixed_value)\n', (4865, 4911), False, 'from javaccflab.java_token import TokenType, Token, update_token_value\n'), ((21386, 21448), 'javaccflab.java_token.update_token_value', 'update_token_value', (['self.__file', 'comment_token', 'comment_string'], {}), '(self.__file, comment_token, comment_string)\n', (21404, 21448), False, 'from javaccflab.java_token import TokenType, Token, update_token_value\n'), ((9571, 9592), 'datetime.date.today', 'datetime.date.today', ([], {}), '()\n', (9590, 9592), False, 'import datetime\n'), ((6692, 6763), 'javaccflab.java_token.update_token_value', 'update_token_value', (['self.__file', 'self.__tokens[naming_pos]', 'fixed_value'], {}), '(self.__file, self.__tokens[naming_pos], fixed_value)\n', (6710, 6763), False, 'from javaccflab.java_token import TokenType, Token, update_token_value\n')] |
import sqlite3
class ManageData:
def __init__(self, queue_tracker_db, email_tracker_db, delivery_tracker_db):
self.queue_tracker_db = queue_tracker_db
self.email_tracker_db = email_tracker_db
self.delivery_tracker_db = delivery_tracker_db
def manage_queue_tracker(self, fields):
"""
Receive one of the following located groups as <fields>:
[('ID', <id>), ('client_email', <email>)];
[('ID', <id>), ('receivers', <email>), ('status', <status>)];
[('ID', <id>)];
and manage the <queue_tracker_db> accordingly.
"""
if len(fields) == 1:
ID = fields[0][1]
self.manage_email_tracker(ID)
self.manage_delivery_tracker(ID)
del self.queue_tracker_db[ID]
elif len(fields) == 2:
ID, client_email = (f[1] for f in fields)
self.queue_tracker_db[ID]['client_email'] = client_email
elif len(fields) == 3:
ID, receiver, status = (f[1] for f in fields)
if status == 'sent':
code = 1
else:
code = 0
self.queue_tracker_db[ID]['receivers'][receiver] = code
def manage_email_tracker(self, ID):
"""
Retrieve client's email from the <queue_tracker_db> by <ID>
with the amount of 'receivers' whose 'status' == 1
and store it in the <email_tracker_db>.
"""
client_email = self.queue_tracker_db[ID]['client_email']
receivers = self.queue_tracker_db[ID]['receivers']
delivered_mail = [r for r in receivers if receivers[r] == 1]
if client_email in self.email_tracker_db:
self.email_tracker_db[client_email] += len(delivered_mail)
else:
self.email_tracker_db[client_email] = len(delivered_mail)
def manage_delivery_tracker(self, ID):
"""
Go through all receivers of <ID> queue of <queue_tracker_db>,
and add their delivery statuses to the <delivery_tracker_db> counter
"""
receivers = self.queue_tracker_db[ID]['receivers']
for receiver in receivers:
if receivers[receiver] == 1:
self.delivery_tracker_db['delivered'] += 1
else:
self.delivery_tracker_db['undelivered'] += 1
class ManageDatabase(ManageData):
def __init__(self, path, *args, **kwargs):
self.path = path
super().__init__(*args, **kwargs)
def _execute_command(self, *command):
con = sqlite3.connect(self.path)
cursor = con.cursor()
result = cursor.execute(*command)
if result:
result = result.fetchall()
con.commit()
con.close()
return result
def create_db(self):
self._execute_command('''CREATE TABLE IF NOT EXISTS email_tracker
(client_email TEXT PRIMARY KEY, num_of_letters_sent INTEGER)''')
def transfer_data(self):
for email, num_of_letters in self.email_tracker_db.items():
self._execute_command('''INSERT INTO email_tracker VALUES
(?, ?)''', (email, num_of_letters))
| [
"sqlite3.connect"
] | [((2551, 2577), 'sqlite3.connect', 'sqlite3.connect', (['self.path'], {}), '(self.path)\n', (2566, 2577), False, 'import sqlite3\n')] |
import math
def g_path_regularize(fake_img, latents, mean_path_length, decay=0.01):
noise = torch.randn_like(fake_img) / math.sqrt(
fake_img.shape[2] * fake_img.shape[3]
)
grad, = autograd.grad(
outputs=(fake_img * noise).sum(), inputs=latents, create_graph=True
)
path_lengths = torch.sqrt(grad.pow(2).sum(2).mean(1))
path_mean = mean_path_length + decay * (path_lengths.mean() - mean_path_length)
path_penalty = (path_lengths - path_mean).pow(2).mean()
return path_penalty, path_mean.detach(), path_lengths
| [
"math.sqrt"
] | [((126, 174), 'math.sqrt', 'math.sqrt', (['(fake_img.shape[2] * fake_img.shape[3])'], {}), '(fake_img.shape[2] * fake_img.shape[3])\n', (135, 174), False, 'import math\n')] |
import numpy as np
from typing import Tuple, Union, Optional
from autoarray.structures.arrays.two_d import array_2d_util
from autoarray.geometry import geometry_util
from autoarray import numba_util
from autoarray.mask import mask_2d_util
@numba_util.jit()
def grid_2d_centre_from(grid_2d_slim: np.ndarray) -> Tuple[float, float]:
"""
Returns the centre of a grid from a 1D grid.
Parameters
----------
grid_2d_slim
The 1D grid of values which are mapped to a 2D array.
Returns
-------
(float, float)
The (y,x) central coordinates of the grid.
"""
centre_y = (np.max(grid_2d_slim[:, 0]) + np.min(grid_2d_slim[:, 0])) / 2.0
centre_x = (np.max(grid_2d_slim[:, 1]) + np.min(grid_2d_slim[:, 1])) / 2.0
return centre_y, centre_x
@numba_util.jit()
def grid_2d_slim_via_mask_from(
mask_2d: np.ndarray,
pixel_scales: Union[float, Tuple[float, float]],
sub_size: int,
origin: Tuple[float, float] = (0.0, 0.0),
) -> np.ndarray:
"""
For a sub-grid, every unmasked pixel of its 2D mask with shape (total_y_pixels, total_x_pixels) is divided into
a finer uniform grid of shape (total_y_pixels*sub_size, total_x_pixels*sub_size). This routine computes the (y,x)
scaled coordinates a the centre of every sub-pixel defined by this 2D mask array.
The sub-grid is returned on an array of shape (total_unmasked_pixels*sub_size**2, 2). y coordinates are
stored in the 0 index of the second dimension, x coordinates in the 1 index. Masked coordinates are therefore
removed and not included in the slimmed grid.
Grid2D are defined from the top-left corner, where the first unmasked sub-pixel corresponds to index 0.
Sub-pixels that are part of the same mask array pixel are indexed next to one another, such that the second
sub-pixel in the first pixel has index 1, its next sub-pixel has index 2, and so forth.
Parameters
----------
mask_2d
A 2D array of bools, where `False` values are unmasked and therefore included as part of the calculated
sub-grid.
pixel_scales
The (y,x) scaled units to pixel units conversion factor of the 2D mask array.
sub_size
The size of the sub-grid that each pixel of the 2D mask array is divided into.
origin : (float, flloat)
The (y,x) origin of the 2D array, which the sub-grid is shifted around.
Returns
-------
ndarray
A slimmed sub grid of (y,x) scaled coordinates at the centre of every pixel unmasked pixel on the 2D mask
array. The sub grid array has dimensions (total_unmasked_pixels*sub_size**2, 2).
Examples
--------
mask = np.array([[True, False, True],
[False, False, False]
[True, False, True]])
grid_slim = grid_2d_slim_via_mask_from(mask=mask, pixel_scales=(0.5, 0.5), sub_size=1, origin=(0.0, 0.0))
"""
total_sub_pixels = mask_2d_util.total_sub_pixels_2d_from(mask_2d, sub_size)
grid_slim = np.zeros(shape=(total_sub_pixels, 2))
centres_scaled = geometry_util.central_scaled_coordinate_2d_from(
shape_native=mask_2d.shape, pixel_scales=pixel_scales, origin=origin
)
sub_index = 0
y_sub_half = pixel_scales[0] / 2
y_sub_step = pixel_scales[0] / (sub_size)
x_sub_half = pixel_scales[1] / 2
x_sub_step = pixel_scales[1] / (sub_size)
for y in range(mask_2d.shape[0]):
for x in range(mask_2d.shape[1]):
if not mask_2d[y, x]:
y_scaled = (y - centres_scaled[0]) * pixel_scales[0]
x_scaled = (x - centres_scaled[1]) * pixel_scales[1]
for y1 in range(sub_size):
for x1 in range(sub_size):
grid_slim[sub_index, 0] = -(
y_scaled - y_sub_half + y1 * y_sub_step + (y_sub_step / 2.0)
)
grid_slim[sub_index, 1] = (
x_scaled - x_sub_half + x1 * x_sub_step + (x_sub_step / 2.0)
)
sub_index += 1
return grid_slim
def grid_2d_via_mask_from(
mask_2d: np.ndarray,
pixel_scales: Union[float, Tuple[float, float]],
sub_size: int,
origin: Tuple[float, float] = (0.0, 0.0),
) -> np.ndarray:
"""
For a sub-grid, every unmasked pixel of its 2D mask with shape (total_y_pixels, total_x_pixels) is divided into a
finer uniform grid of shape (total_y_pixels*sub_size, total_x_pixels*sub_size). This routine computes the (y,x)
scaled coordinates at the centre of every sub-pixel defined by this 2D mask array.
The sub-grid is returned in its native dimensions with shape (total_y_pixels*sub_size, total_x_pixels*sub_size).
y coordinates are stored in the 0 index of the second dimension, x coordinates in the 1 index. Masked pixels are
given values (0.0, 0.0).
Grids are defined from the top-left corner, where the first unmasked sub-pixel corresponds to index 0.
Sub-pixels that are part of the same mask array pixel are indexed next to one another, such that the second
sub-pixel in the first pixel has index 1, its next sub-pixel has index 2, and so forth.
Parameters
----------
mask_2d
A 2D array of bools, where `False` values are unmasked and therefore included as part of the calculated
sub-grid.
pixel_scales
The (y,x) scaled units to pixel units conversion factor of the 2D mask array.
sub_size
The size of the sub-grid that each pixel of the 2D mask array is divided into.
origin : (float, flloat)
The (y,x) origin of the 2D array, which the sub-grid is shifted around.
Returns
-------
ndarray
A sub grid of (y,x) scaled coordinates at the centre of every pixel unmasked pixel on the 2D mask
array. The sub grid array has dimensions (total_y_pixels*sub_size, total_x_pixels*sub_size).
Examples
--------
mask = np.array([[True, False, True],
[False, False, False]
[True, False, True]])
grid_2d = grid_2d_via_mask_from(mask=mask, pixel_scales=(0.5, 0.5), sub_size=1, origin=(0.0, 0.0))
"""
grid_2d_slim = grid_2d_slim_via_mask_from(
mask_2d=mask_2d, pixel_scales=pixel_scales, sub_size=sub_size, origin=origin
)
return grid_2d_native_from(
grid_2d_slim=grid_2d_slim, mask_2d=mask_2d, sub_size=sub_size
)
def grid_2d_slim_via_shape_native_from(
shape_native: Tuple[int, int],
pixel_scales: Union[float, Tuple[float, float]],
sub_size: int,
origin: Tuple[float, float] = (0.0, 0.0),
) -> np.ndarray:
"""
For a sub-grid, every unmasked pixel of its 2D mask with shape (total_y_pixels, total_x_pixels) is divided into a
finer uniform grid of shape (total_y_pixels*sub_size, total_x_pixels*sub_size). This routine computes the (y,x)
scaled coordinates at the centre of every sub-pixel defined by this 2D mask array.
The sub-grid is returned in its slimmed dimensions with shape (total_pixels**2*sub_size**2, 2). y coordinates are
stored in the 0 index of the second dimension, x coordinates in the 1 index.
Grid2D are defined from the top-left corner, where the first sub-pixel corresponds to index [0,0].
Sub-pixels that are part of the same mask array pixel are indexed next to one another, such that the second
sub-pixel in the first pixel has index 1, its next sub-pixel has index 2, and so forth.
Parameters
----------
shape_native
The (y,x) shape of the 2D array the sub-grid of coordinates is computed for.
pixel_scales
The (y,x) scaled units to pixel units conversion factor of the 2D mask array.
sub_size
The size of the sub-grid that each pixel of the 2D mask array is divided into.
origin
The (y,x) origin of the 2D array, which the sub-grid is shifted around.
Returns
-------
ndarray
A sub grid of (y,x) scaled coordinates at the centre of every pixel unmasked pixel on the 2D mask
array. The sub grid is slimmed and has dimensions (total_unmasked_pixels*sub_size**2, 2).
Examples
--------
mask = np.array([[True, False, True],
[False, False, False]
[True, False, True]])
grid_2d_slim = grid_2d_slim_via_shape_native_from(shape_native=(3,3), pixel_scales=(0.5, 0.5), sub_size=2, origin=(0.0, 0.0))
"""
return grid_2d_slim_via_mask_from(
mask_2d=np.full(fill_value=False, shape=shape_native),
pixel_scales=pixel_scales,
sub_size=sub_size,
origin=origin,
)
def grid_2d_via_shape_native_from(
shape_native: Tuple[int, int],
pixel_scales: Union[float, Tuple[float, float]],
sub_size: int,
origin: Tuple[float, float] = (0.0, 0.0),
) -> np.ndarray:
"""
For a sub-grid, every unmasked pixel of its 2D mask with shape (total_y_pixels, total_x_pixels) is divided
into a finer uniform grid of shape (total_y_pixels*sub_size, total_x_pixels*sub_size). This routine computes
the (y,x) scaled coordinates at the centre of every sub-pixel defined by this 2D mask array.
The sub-grid is returned in its native dimensions with shape (total_y_pixels*sub_size, total_x_pixels*sub_size).
y coordinates are stored in the 0 index of the second dimension, x coordinates in the 1 index.
Grids are defined from the top-left corner, where the first sub-pixel corresponds to index [0,0].
Sub-pixels that are part of the same mask array pixel are indexed next to one another, such that the second
sub-pixel in the first pixel has index 1, its next sub-pixel has index 2, and so forth.
Parameters
----------
shape_native
The (y,x) shape of the 2D array the sub-grid of coordinates is computed for.
pixel_scales
The (y,x) scaled units to pixel units conversion factor of the 2D mask array.
sub_size
The size of the sub-grid that each pixel of the 2D mask array is divided into.
origin : (float, flloat)
The (y,x) origin of the 2D array, which the sub-grid is shifted around.
Returns
-------
ndarray
A sub grid of (y,x) scaled coordinates at the centre of every pixel unmasked pixel on the 2D mask
array. The sub grid array has dimensions (total_y_pixels*sub_size, total_x_pixels*sub_size).
Examples
--------
grid_2d = grid_2d_via_shape_native_from(shape_native=(3, 3), pixel_scales=(1.0, 1.0), sub_size=2, origin=(0.0, 0.0))
"""
return grid_2d_via_mask_from(
mask_2d=np.full(fill_value=False, shape=shape_native),
pixel_scales=pixel_scales,
sub_size=sub_size,
origin=origin,
)
@numba_util.jit()
def grid_scaled_2d_slim_radial_projected_from(
extent: np.ndarray,
centre: Tuple[float, float],
pixel_scales: Union[float, Tuple[float, float]],
sub_size: int,
shape_slim: Optional[int] = 0,
) -> np.ndarray:
"""
Determine a projected radial grid of points from a 2D region of coordinates defined by an
extent [xmin, xmax, ymin, ymax] and with a (y,x) centre. This functions operates as follows:
1) Given the region defined by the extent [xmin, xmax, ymin, ymax], the algorithm finds the longest 1D distance of
the 4 paths from the (y,x) centre to the edge of the region (e.g. following the positive / negative y and x axes).
2) Use the pixel-scale corresponding to the direction chosen (e.g. if the positive x-axis was the longest, the
pixel_scale in the x dimension is used).
3) Determine the number of pixels between the centre and the edge of the region using the longest path between the
two chosen above.
4) Create a (y,x) grid of radial points where all points are at the centre's y value = 0.0 and the x values iterate
from the centre in increasing steps of the pixel-scale.
5) Rotate these radial coordinates by the input `angle` clockwise.
A schematric is shown below:
-------------------
| |
|<- - - - ->x | x = centre
| | <-> = longest radial path from centre to extent edge
| |
-------------------
Using the centre x above, this function finds the longest radial path to the edge of the extent window.
The returned `grid_radii` represents a radial set of points that in 1D sample the 2D grid outwards from its centre.
This grid stores the radial coordinates as (y,x) values (where all y values are the same) as opposed to a 1D data
structure so that it can be used in functions which require that a 2D grid structure is input.
Parameters
----------
extent
The extent of the grid the radii grid is computed using, with format [xmin, xmax, ymin, ymax]
centre : (float, flloat)
The (y,x) central coordinate which the radial grid is traced outwards from.
pixel_scales
The (y,x) scaled units to pixel units conversion factor of the 2D mask array.
sub_size
The size of the sub-grid that each pixel of the 2D mask array is divided into.
shape_slim
Manually choose the shape of the 1D projected grid that is returned. If 0, the border based on the 2D grid is
used (due to numba None cannot be used as a default value).
Returns
-------
ndarray
A radial set of points sampling the longest distance from the centre to the edge of the extent in along the
positive x-axis.
"""
distance_to_positive_x = extent[1] - centre[1]
distance_to_positive_y = extent[3] - centre[0]
distance_to_negative_x = centre[1] - extent[0]
distance_to_negative_y = centre[0] - extent[2]
scaled_distance = max(
[
distance_to_positive_x,
distance_to_positive_y,
distance_to_negative_x,
distance_to_negative_y,
]
)
if (scaled_distance == distance_to_positive_y) or (
scaled_distance == distance_to_negative_y
):
pixel_scale = pixel_scales[0]
else:
pixel_scale = pixel_scales[1]
if shape_slim == 0:
shape_slim = sub_size * int((scaled_distance / pixel_scale)) + 1
grid_scaled_2d_slim_radii = np.zeros((shape_slim, 2))
grid_scaled_2d_slim_radii[:, 0] += centre[0]
radii = centre[1]
for slim_index in range(shape_slim):
grid_scaled_2d_slim_radii[slim_index, 1] = radii
radii += pixel_scale / sub_size
return grid_scaled_2d_slim_radii
@numba_util.jit()
def grid_pixels_2d_slim_from(
grid_scaled_2d_slim: np.ndarray,
shape_native: Tuple[int, int],
pixel_scales: Union[float, Tuple[float, float]],
origin: Tuple[float, float] = (0.0, 0.0),
) -> np.ndarray:
"""
Convert a slimmed grid of 2d (y,x) scaled coordinates to a slimmed grid of 2d (y,x) pixel coordinate values. Pixel
coordinates are returned as floats such that they include the decimal offset from each pixel's top-left corner
relative to the input scaled coordinate.
The input and output grids are both slimmed and therefore shape (total_pixels, 2).
The pixel coordinate origin is at the top left corner of the grid, such that the pixel [0,0] corresponds to
the highest (most positive) y scaled coordinate and lowest (most negative) x scaled coordinate on the gird.
The scaled grid is defined by an origin and coordinates are shifted to this origin before computing their
1D grid pixel coordinate values.
Parameters
----------
grid_scaled_2d_slim: np.ndarray
The slimmed grid of 2D (y,x) coordinates in scaled units which are converted to pixel value coordinates.
shape_native
The (y,x) shape of the original 2D array the scaled coordinates were computed on.
pixel_scales
The (y,x) scaled units to pixel units conversion factor of the original 2D array.
origin : (float, flloat)
The (y,x) origin of the grid, which the scaled grid is shifted to.
Returns
-------
ndarray
A slimmed grid of 2D (y,x) pixel-value coordinates with dimensions (total_pixels, 2).
Examples
--------
grid_scaled_2d_slim = np.array([[1.0, 1.0], [2.0, 2.0], [3.0, 3.0], [4.0, 4.0]])
grid_pixels_2d_slim = grid_scaled_2d_slim_from(grid_scaled_2d_slim=grid_scaled_2d_slim, shape=(2,2),
pixel_scales=(0.5, 0.5), origin=(0.0, 0.0))
"""
grid_pixels_2d_slim = np.zeros((grid_scaled_2d_slim.shape[0], 2))
centres_scaled = geometry_util.central_scaled_coordinate_2d_from(
shape_native=shape_native, pixel_scales=pixel_scales, origin=origin
)
for slim_index in range(grid_scaled_2d_slim.shape[0]):
grid_pixels_2d_slim[slim_index, 0] = (
(-grid_scaled_2d_slim[slim_index, 0] / pixel_scales[0])
+ centres_scaled[0]
+ 0.5
)
grid_pixels_2d_slim[slim_index, 1] = (
(grid_scaled_2d_slim[slim_index, 1] / pixel_scales[1])
+ centres_scaled[1]
+ 0.5
)
return grid_pixels_2d_slim
@numba_util.jit()
def grid_pixel_centres_2d_slim_from(
grid_scaled_2d_slim: np.ndarray,
shape_native: Tuple[int, int],
pixel_scales: Union[float, Tuple[float, float]],
origin: Tuple[float, float] = (0.0, 0.0),
) -> np.ndarray:
"""
Convert a slimmed grid of 2D (y,x) scaled coordinates to a slimmed grid of 2D (y,x) pixel values. Pixel coordinates
are returned as integers such that they map directly to the pixel they are contained within.
The input and output grids are both slimmed and therefore shape (total_pixels, 2).
The pixel coordinate origin is at the top left corner of the grid, such that the pixel [0,0] corresponds to
the highest (most positive) y scaled coordinate and lowest (most negative) x scaled coordinate on the gird.
The scaled coordinate grid is defined by the class attribute origin, and coordinates are shifted to this
origin before computing their 1D grid pixel indexes.
Parameters
----------
grid_scaled_2d_slim: np.ndarray
The slimmed grid of 2D (y,x) coordinates in scaled units which is converted to pixel indexes.
shape_native
The (y,x) shape of the original 2D array the scaled coordinates were computed on.
pixel_scales
The (y,x) scaled units to pixel units conversion factor of the original 2D array.
origin : (float, flloat)
The (y,x) origin of the grid, which the scaled grid is shifted
Returns
-------
ndarray
A slimmed grid of 2D (y,x) pixel indexes with dimensions (total_pixels, 2).
Examples
--------
grid_scaled_2d_slim = np.array([[1.0, 1.0], [2.0, 2.0], [3.0, 3.0], [4.0, 4.0]])
grid_pixels_2d_slim = grid_scaled_2d_slim_from(grid_scaled_2d_slim=grid_scaled_2d_slim, shape=(2,2),
pixel_scales=(0.5, 0.5), origin=(0.0, 0.0))
"""
grid_pixels_2d_slim = np.zeros((grid_scaled_2d_slim.shape[0], 2))
centres_scaled = geometry_util.central_scaled_coordinate_2d_from(
shape_native=shape_native, pixel_scales=pixel_scales, origin=origin
)
for slim_index in range(grid_scaled_2d_slim.shape[0]):
grid_pixels_2d_slim[slim_index, 0] = int(
(-grid_scaled_2d_slim[slim_index, 0] / pixel_scales[0])
+ centres_scaled[0]
+ 0.5
)
grid_pixels_2d_slim[slim_index, 1] = int(
(grid_scaled_2d_slim[slim_index, 1] / pixel_scales[1])
+ centres_scaled[1]
+ 0.5
)
return grid_pixels_2d_slim
@numba_util.jit()
def grid_pixel_indexes_2d_slim_from(
grid_scaled_2d_slim: np.ndarray,
shape_native: Tuple[int, int],
pixel_scales: Union[float, Tuple[float, float]],
origin: Tuple[float, float] = (0.0, 0.0),
) -> np.ndarray:
"""
Convert a slimmed grid of 2D (y,x) scaled coordinates to a slimmed grid of pixel indexes. Pixel coordinates are
returned as integers such that they are the pixel from the top-left of the 2D grid going rights and then downwards.
The input and output grids are both slimmed and have shapes (total_pixels, 2) and (total_pixels,).
For example:
The pixel at the top-left, whose native index is [0,0], corresponds to slimmed pixel index 0.
The fifth pixel on the top row, whose native index is [0,5], corresponds to slimmed pixel index 4.
The first pixel on the second row, whose native index is [0,1], has slimmed pixel index 10 if a row has 10 pixels.
The scaled coordinate grid is defined by the class attribute origin, and coordinates are shifted to this
origin before computing their 1D grid pixel indexes.
The input and output grids are both of shape (total_pixels, 2).
Parameters
----------
grid_scaled_2d_slim: np.ndarray
The slimmed grid of 2D (y,x) coordinates in scaled units which is converted to slimmed pixel indexes.
shape_native
The (y,x) shape of the original 2D array the scaled coordinates were computed on.
pixel_scales
The (y,x) scaled units to pixel units conversion factor of the original 2D array.
origin : (float, flloat)
The (y,x) origin of the grid, which the scaled grid is shifted.
Returns
-------
ndarray
A grid of slimmed pixel indexes with dimensions (total_pixels,).
Examples
--------
grid_scaled_2d_slim = np.array([[1.0, 1.0], [2.0, 2.0], [3.0, 3.0], [4.0, 4.0]])
grid_pixel_indexes_2d_slim = grid_pixel_indexes_2d_slim_from(grid_scaled_2d_slim=grid_scaled_2d_slim, shape=(2,2),
pixel_scales=(0.5, 0.5), origin=(0.0, 0.0))
"""
grid_pixels_2d_slim = grid_pixel_centres_2d_slim_from(
grid_scaled_2d_slim=grid_scaled_2d_slim,
shape_native=shape_native,
pixel_scales=pixel_scales,
origin=origin,
)
grid_pixel_indexes_2d_slim = np.zeros(grid_pixels_2d_slim.shape[0])
for slim_index in range(grid_pixels_2d_slim.shape[0]):
grid_pixel_indexes_2d_slim[slim_index] = int(
grid_pixels_2d_slim[slim_index, 0] * shape_native[1]
+ grid_pixels_2d_slim[slim_index, 1]
)
return grid_pixel_indexes_2d_slim
@numba_util.jit()
def grid_scaled_2d_slim_from(
grid_pixels_2d_slim: np.ndarray,
shape_native: Tuple[int, int],
pixel_scales: Union[float, Tuple[float, float]],
origin: Tuple[float, float] = (0.0, 0.0),
) -> np.ndarray:
"""
Convert a slimmed grid of 2D (y,x) pixel coordinates to a slimmed grid of 2D (y,x) scaled values.
The input and output grids are both slimmed and therefore shape (total_pixels, 2).
The pixel coordinate origin is at the top left corner of the grid, such that the pixel [0,0] corresponds to
the highest (most positive) y scaled coordinate and lowest (most negative) x scaled coordinate on the gird.
The scaled coordinate origin is defined by the class attribute origin, and coordinates are shifted to this
origin after computing their values from the 1D grid pixel indexes.
Parameters
----------
grid_pixels_2d_slim: np.ndarray
The slimmed grid of (y,x) coordinates in pixel values which is converted to scaled coordinates.
shape_native
The (y,x) shape of the original 2D array the scaled coordinates were computed on.
pixel_scales
The (y,x) scaled units to pixel units conversion factor of the original 2D array.
origin : (float, flloat)
The (y,x) origin of the grid, which the scaled grid is shifted.
Returns
-------
ndarray
A slimmed grid of 2d scaled coordinates with dimensions (total_pixels, 2).
Examples
--------
grid_pixels_2d_slim = np.array([[0,0], [0,1], [1,0], [1,1])
grid_pixels_2d_slim = grid_scaled_2d_slim_from(grid_pixels_2d_slim=grid_pixels_2d_slim, shape=(2,2),
pixel_scales=(0.5, 0.5), origin=(0.0, 0.0))
"""
grid_scaled_2d_slim = np.zeros((grid_pixels_2d_slim.shape[0], 2))
centres_scaled = geometry_util.central_scaled_coordinate_2d_from(
shape_native=shape_native, pixel_scales=pixel_scales, origin=origin
)
for slim_index in range(grid_scaled_2d_slim.shape[0]):
grid_scaled_2d_slim[slim_index, 0] = (
-(grid_pixels_2d_slim[slim_index, 0] - centres_scaled[0] - 0.5)
* pixel_scales[0]
)
grid_scaled_2d_slim[slim_index, 1] = (
grid_pixels_2d_slim[slim_index, 1] - centres_scaled[1] - 0.5
) * pixel_scales[1]
return grid_scaled_2d_slim
@numba_util.jit()
def grid_pixel_centres_2d_from(
grid_scaled_2d: np.ndarray,
shape_native: Tuple[int, int],
pixel_scales: Union[float, Tuple[float, float]],
origin: Tuple[float, float] = (0.0, 0.0),
) -> np.ndarray:
"""
Convert a native grid of 2D (y,x) scaled coordinates to a native grid of 2D (y,x) pixel values. Pixel coordinates
are returned as integers such that they map directly to the pixel they are contained within.
The input and output grids are both native resolution and therefore have shape (y_pixels, x_pixels, 2).
The pixel coordinate origin is at the top left corner of the grid, such that the pixel [0,0] corresponds to
the highest (most positive) y scaled coordinate and lowest (most negative) x scaled coordinate on the gird.
The scaled coordinate grid is defined by the class attribute origin, and coordinates are shifted to this
origin before computing their 1D grid pixel indexes.
Parameters
----------
grid_scaled_2d: np.ndarray
The native grid of 2D (y,x) coordinates in scaled units which is converted to pixel indexes.
shape_native
The (y,x) shape of the original 2D array the scaled coordinates were computed on.
pixel_scales
The (y,x) scaled units to pixel units conversion factor of the original 2D array.
origin : (float, flloat)
The (y,x) origin of the grid, which the scaled grid is shifted
Returns
-------
ndarray
A native grid of 2D (y,x) pixel indexes with dimensions (y_pixels, x_pixels, 2).
Examples
--------
grid_scaled_2d = np.array([[1.0, 1.0], [2.0, 2.0], [3.0, 3.0], [4.0, 4.0]])
grid_pixel_centres_2d = grid_pixel_centres_2d_from(grid_scaled_2d=grid_scaled_2d, shape=(2,2),
pixel_scales=(0.5, 0.5), origin=(0.0, 0.0))
"""
grid_pixels_2d = np.zeros((grid_scaled_2d.shape[0], grid_scaled_2d.shape[1], 2))
centres_scaled = geometry_util.central_scaled_coordinate_2d_from(
shape_native=shape_native, pixel_scales=pixel_scales, origin=origin
)
for y in range(grid_scaled_2d.shape[0]):
for x in range(grid_scaled_2d.shape[1]):
grid_pixels_2d[y, x, 0] = int(
(-grid_scaled_2d[y, x, 0] / pixel_scales[0]) + centres_scaled[0] + 0.5
)
grid_pixels_2d[y, x, 1] = int(
(grid_scaled_2d[y, x, 1] / pixel_scales[1]) + centres_scaled[1] + 0.5
)
return grid_pixels_2d
@numba_util.jit()
def relocated_grid_via_jit_from(grid, border_grid):
"""
Relocate the coordinates of a grid to its border if they are outside the border, where the border is
defined as all pixels at the edge of the grid's mask (see *mask._border_1d_indexes*).
This is performed as follows:
1: Use the mean value of the grid's y and x coordinates to determine the origin of the grid.
2: Compute the radial distance of every grid coordinate from the origin.
3: For every coordinate, find its nearest pixel in the border.
4: Determine if it is outside the border, by comparing its radial distance from the origin to its paired
border pixel's radial distance.
5: If its radial distance is larger, use the ratio of radial distances to move the coordinate to the
border (if its inside the border, do nothing).
The method can be used on uniform or irregular grids, however for irregular grids the border of the
'image-plane' mask is used to define border pixels.
Parameters
----------
grid : Grid2D
The grid (uniform or irregular) whose pixels are to be relocated to the border edge if outside it.
border_grid : Grid2D
The grid of border (y,x) coordinates.
"""
grid_relocated = np.zeros(grid.shape)
grid_relocated[:, :] = grid[:, :]
border_origin = np.zeros(2)
border_origin[0] = np.mean(border_grid[:, 0])
border_origin[1] = np.mean(border_grid[:, 1])
border_grid_radii = np.sqrt(
np.add(
np.square(np.subtract(border_grid[:, 0], border_origin[0])),
np.square(np.subtract(border_grid[:, 1], border_origin[1])),
)
)
border_min_radii = np.min(border_grid_radii)
grid_radii = np.sqrt(
np.add(
np.square(np.subtract(grid[:, 0], border_origin[0])),
np.square(np.subtract(grid[:, 1], border_origin[1])),
)
)
for pixel_index in range(grid.shape[0]):
if grid_radii[pixel_index] > border_min_radii:
closest_pixel_index = np.argmin(
np.square(grid[pixel_index, 0] - border_grid[:, 0])
+ np.square(grid[pixel_index, 1] - border_grid[:, 1])
)
move_factor = (
border_grid_radii[closest_pixel_index] / grid_radii[pixel_index]
)
if move_factor < 1.0:
grid_relocated[pixel_index, :] = (
move_factor * (grid[pixel_index, :] - border_origin[:])
+ border_origin[:]
)
return grid_relocated
@numba_util.jit()
def furthest_grid_2d_slim_index_from(
grid_2d_slim: np.ndarray, slim_indexes: np.ndarray, coordinate: Tuple[float, float]
) -> int:
distance_to_centre = 0.0
for slim_index in slim_indexes:
y = grid_2d_slim[slim_index, 0]
x = grid_2d_slim[slim_index, 1]
distance_to_centre_new = (x - coordinate[1]) ** 2 + (y - coordinate[0]) ** 2
if distance_to_centre_new >= distance_to_centre:
distance_to_centre = distance_to_centre_new
furthest_grid_2d_slim_index = slim_index
return furthest_grid_2d_slim_index
def grid_2d_slim_from(
grid_2d_native: np.ndarray, mask: np.ndarray, sub_size: int
) -> np.ndarray:
"""
For a native 2D grid and mask of shape [total_y_pixels, total_x_pixels, 2], map the values of all unmasked
pixels to a slimmed grid of shape [total_unmasked_pixels, 2].
The pixel coordinate origin is at the top left corner of the native grid and goes right-wards and downwards, such
that for an grid of shape (3,3) where all pixels are unmasked:
- pixel [0,0] of the 2D grid will correspond to index 0 of the 1D grid.
- pixel [0,1] of the 2D grid will correspond to index 1 of the 1D grid.
- pixel [1,0] of the 2D grid will correspond to index 4 of the 1D grid.
Parameters
----------
grid_2d_native : ndarray
The native grid of (y,x) values which are mapped to the slimmed grid.
mask_2d
A 2D array of bools, where `False` values mean unmasked and are included in the mapping.
sub_size
The size (sub_size x sub_size) of each unmasked pixels sub-array.
Returns
-------
ndarray
A 1D grid of values mapped from the 2D grid with dimensions (total_unmasked_pixels).
"""
grid_1d_slim_y = array_2d_util.array_2d_slim_from(
array_2d_native=grid_2d_native[:, :, 0], mask_2d=mask, sub_size=sub_size
)
grid_1d_slim_x = array_2d_util.array_2d_slim_from(
array_2d_native=grid_2d_native[:, :, 1], mask_2d=mask, sub_size=sub_size
)
return np.stack((grid_1d_slim_y, grid_1d_slim_x), axis=-1)
def grid_2d_native_from(
grid_2d_slim: np.ndarray, mask_2d: np.ndarray, sub_size: int
) -> np.ndarray:
"""
For a slimmed 2D grid of shape [total_unmasked_pixels, 2], that was computed by extracting the unmasked values
from a native 2D grid of shape [total_y_pixels, total_x_pixels, 2], map the slimmed grid's coordinates back to the
native 2D grid where masked values are set to zero.
This uses a 1D array 'slim_to_native' where each index gives the 2D pixel indexes of the grid's native unmasked
pixels, for example:
- If slim_to_native[0] = [0,0], the first value of the 1D array maps to the pixels [0,0,:] of the native 2D grid.
- If slim_to_native[1] = [0,1], the second value of the 1D array maps to the pixels [0,1,:] of the native 2D grid.
- If slim_to_native[4] = [1,1], the fifth value of the 1D array maps to the pixels [1,1,:] of the native 2D grid.
Parameters
----------
grid_2d_slim
The (y,x) values of the slimmed 2D grid which are mapped to the native 2D grid.
mask_2d
A 2D array of bools, where `False` values mean unmasked and are included in the mapping.
sub_size
The size (sub_size x sub_size) of each unmasked pixels sub-array.
Returns
-------
ndarray
A NumPy array of shape [total_y_pixels, total_x_pixels, 2] corresponding to the (y,x) values of the native 2D
mapped from the slimmed grid.
"""
grid_2d_native_y = array_2d_util.array_2d_native_from(
array_2d_slim=grid_2d_slim[:, 0], mask_2d=mask_2d, sub_size=sub_size
)
grid_2d_native_x = array_2d_util.array_2d_native_from(
array_2d_slim=grid_2d_slim[:, 1], mask_2d=mask_2d, sub_size=sub_size
)
return np.stack((grid_2d_native_y, grid_2d_native_x), axis=-1)
@numba_util.jit()
def grid_2d_slim_upscaled_from(
grid_slim: np.ndarray,
upscale_factor: int,
pixel_scales: Union[float, Tuple[float, float]],
) -> np.ndarray:
"""
From an input slimmed 2D grid, return an upscaled slimmed 2D grid where (y,x) coordinates are added at an
upscaled resolution to each grid coordinate, analogous to a sub-grid.
Parameters
----------
grid_slim
The slimmed grid of (y,x) coordinates over which a square uniform grid is overlaid.
upscale_factor
The upscaled resolution at which the new grid coordinates are computed.
pixel_scales
The pixel scale of the uniform grid that laid over the irregular grid of (y,x) coordinates.
"""
grid_2d_slim_upscaled = np.zeros(
shape=(grid_slim.shape[0] * upscale_factor ** 2, 2)
)
upscale_index = 0
y_upscale_half = pixel_scales[0] / 2
y_upscale_step = pixel_scales[0] / upscale_factor
x_upscale_half = pixel_scales[1] / 2
x_upscale_step = pixel_scales[1] / upscale_factor
for slim_index in range(grid_slim.shape[0]):
y_grid = grid_slim[slim_index, 0]
x_grid = grid_slim[slim_index, 1]
for y in range(upscale_factor):
for x in range(upscale_factor):
grid_2d_slim_upscaled[upscale_index, 0] = (
y_grid
+ y_upscale_half
- y * y_upscale_step
- (y_upscale_step / 2.0)
)
grid_2d_slim_upscaled[upscale_index, 1] = (
x_grid
- x_upscale_half
+ x * x_upscale_step
+ (x_upscale_step / 2.0)
)
upscale_index += 1
return grid_2d_slim_upscaled
def grid_2d_of_points_within_radius(
radius: float, centre: Tuple[float, float], grid_2d: np.ndarray
):
y_inside = []
x_inside = []
for i in range(len(grid_2d[:, 0])):
if (grid_2d[i, 0] - centre[0]) ** 2 + (
grid_2d[i, 1] - centre[1]
) ** 2 > radius ** 2:
y_inside.append(grid_2d[i, 0])
x_inside.append(grid_2d[i, 1])
return np.asarray(y_inside, x_inside)
def compute_polygon_area(points):
x = points[:, 1]
y = points[:, 0]
return 0.5 * np.abs(np.dot(x, np.roll(y, 1)) - np.dot(y, np.roll(x, 1)))
| [
"autoarray.geometry.geometry_util.central_scaled_coordinate_2d_from",
"numpy.mean",
"autoarray.structures.arrays.two_d.array_2d_util.array_2d_native_from",
"numpy.roll",
"numpy.asarray",
"autoarray.numba_util.jit",
"numpy.max",
"numpy.subtract",
"numpy.stack",
"numpy.zeros",
"numpy.square",
"autoarray.structures.arrays.two_d.array_2d_util.array_2d_slim_from",
"autoarray.mask.mask_2d_util.total_sub_pixels_2d_from",
"numpy.min",
"numpy.full"
] | [((252, 268), 'autoarray.numba_util.jit', 'numba_util.jit', ([], {}), '()\n', (266, 268), False, 'from autoarray import numba_util\n'), ((824, 840), 'autoarray.numba_util.jit', 'numba_util.jit', ([], {}), '()\n', (838, 840), False, 'from autoarray import numba_util\n'), ((11036, 11052), 'autoarray.numba_util.jit', 'numba_util.jit', ([], {}), '()\n', (11050, 11052), False, 'from autoarray import numba_util\n'), ((14926, 14942), 'autoarray.numba_util.jit', 'numba_util.jit', ([], {}), '()\n', (14940, 14942), False, 'from autoarray import numba_util\n'), ((17600, 17616), 'autoarray.numba_util.jit', 'numba_util.jit', ([], {}), '()\n', (17614, 17616), False, 'from autoarray import numba_util\n'), ((20218, 20234), 'autoarray.numba_util.jit', 'numba_util.jit', ([], {}), '()\n', (20232, 20234), False, 'from autoarray import numba_util\n'), ((22955, 22971), 'autoarray.numba_util.jit', 'numba_util.jit', ([], {}), '()\n', (22969, 22971), False, 'from autoarray import numba_util\n'), ((25401, 25417), 'autoarray.numba_util.jit', 'numba_util.jit', ([], {}), '()\n', (25415, 25417), False, 'from autoarray import numba_util\n'), ((27988, 28004), 'autoarray.numba_util.jit', 'numba_util.jit', ([], {}), '()\n', (28002, 28004), False, 'from autoarray import numba_util\n'), ((30644, 30660), 'autoarray.numba_util.jit', 'numba_util.jit', ([], {}), '()\n', (30658, 30660), False, 'from autoarray import numba_util\n'), ((34662, 34678), 'autoarray.numba_util.jit', 'numba_util.jit', ([], {}), '()\n', (34676, 34678), False, 'from autoarray import numba_util\n'), ((3015, 3071), 'autoarray.mask.mask_2d_util.total_sub_pixels_2d_from', 'mask_2d_util.total_sub_pixels_2d_from', (['mask_2d', 'sub_size'], {}), '(mask_2d, sub_size)\n', (3052, 3071), False, 'from autoarray.mask import mask_2d_util\n'), ((3091, 3128), 'numpy.zeros', 'np.zeros', ([], {'shape': '(total_sub_pixels, 2)'}), '(shape=(total_sub_pixels, 2))\n', (3099, 3128), True, 'import numpy as np\n'), ((3153, 3274), 'autoarray.geometry.geometry_util.central_scaled_coordinate_2d_from', 'geometry_util.central_scaled_coordinate_2d_from', ([], {'shape_native': 'mask_2d.shape', 'pixel_scales': 'pixel_scales', 'origin': 'origin'}), '(shape_native=mask_2d.shape,\n pixel_scales=pixel_scales, origin=origin)\n', (3200, 3274), False, 'from autoarray.geometry import geometry_util\n'), ((14632, 14657), 'numpy.zeros', 'np.zeros', (['(shape_slim, 2)'], {}), '((shape_slim, 2))\n', (14640, 14657), True, 'import numpy as np\n'), ((16936, 16979), 'numpy.zeros', 'np.zeros', (['(grid_scaled_2d_slim.shape[0], 2)'], {}), '((grid_scaled_2d_slim.shape[0], 2))\n', (16944, 16979), True, 'import numpy as np\n'), ((17004, 17124), 'autoarray.geometry.geometry_util.central_scaled_coordinate_2d_from', 'geometry_util.central_scaled_coordinate_2d_from', ([], {'shape_native': 'shape_native', 'pixel_scales': 'pixel_scales', 'origin': 'origin'}), '(shape_native=shape_native,\n pixel_scales=pixel_scales, origin=origin)\n', (17051, 17124), False, 'from autoarray.geometry import geometry_util\n'), ((19548, 19591), 'numpy.zeros', 'np.zeros', (['(grid_scaled_2d_slim.shape[0], 2)'], {}), '((grid_scaled_2d_slim.shape[0], 2))\n', (19556, 19591), True, 'import numpy as np\n'), ((19616, 19736), 'autoarray.geometry.geometry_util.central_scaled_coordinate_2d_from', 'geometry_util.central_scaled_coordinate_2d_from', ([], {'shape_native': 'shape_native', 'pixel_scales': 'pixel_scales', 'origin': 'origin'}), '(shape_native=shape_native,\n pixel_scales=pixel_scales, origin=origin)\n', (19663, 19736), False, 'from autoarray.geometry import geometry_util\n'), ((22623, 22661), 'numpy.zeros', 'np.zeros', (['grid_pixels_2d_slim.shape[0]'], {}), '(grid_pixels_2d_slim.shape[0])\n', (22631, 22661), True, 'import numpy as np\n'), ((24778, 24821), 'numpy.zeros', 'np.zeros', (['(grid_pixels_2d_slim.shape[0], 2)'], {}), '((grid_pixels_2d_slim.shape[0], 2))\n', (24786, 24821), True, 'import numpy as np\n'), ((24846, 24966), 'autoarray.geometry.geometry_util.central_scaled_coordinate_2d_from', 'geometry_util.central_scaled_coordinate_2d_from', ([], {'shape_native': 'shape_native', 'pixel_scales': 'pixel_scales', 'origin': 'origin'}), '(shape_native=shape_native,\n pixel_scales=pixel_scales, origin=origin)\n', (24893, 24966), False, 'from autoarray.geometry import geometry_util\n'), ((27341, 27404), 'numpy.zeros', 'np.zeros', (['(grid_scaled_2d.shape[0], grid_scaled_2d.shape[1], 2)'], {}), '((grid_scaled_2d.shape[0], grid_scaled_2d.shape[1], 2))\n', (27349, 27404), True, 'import numpy as np\n'), ((27429, 27549), 'autoarray.geometry.geometry_util.central_scaled_coordinate_2d_from', 'geometry_util.central_scaled_coordinate_2d_from', ([], {'shape_native': 'shape_native', 'pixel_scales': 'pixel_scales', 'origin': 'origin'}), '(shape_native=shape_native,\n pixel_scales=pixel_scales, origin=origin)\n', (27476, 27549), False, 'from autoarray.geometry import geometry_util\n'), ((29283, 29303), 'numpy.zeros', 'np.zeros', (['grid.shape'], {}), '(grid.shape)\n', (29291, 29303), True, 'import numpy as np\n'), ((29366, 29377), 'numpy.zeros', 'np.zeros', (['(2)'], {}), '(2)\n', (29374, 29377), True, 'import numpy as np\n'), ((29402, 29428), 'numpy.mean', 'np.mean', (['border_grid[:, 0]'], {}), '(border_grid[:, 0])\n', (29409, 29428), True, 'import numpy as np\n'), ((29453, 29479), 'numpy.mean', 'np.mean', (['border_grid[:, 1]'], {}), '(border_grid[:, 1])\n', (29460, 29479), True, 'import numpy as np\n'), ((29721, 29746), 'numpy.min', 'np.min', (['border_grid_radii'], {}), '(border_grid_radii)\n', (29727, 29746), True, 'import numpy as np\n'), ((32486, 32596), 'autoarray.structures.arrays.two_d.array_2d_util.array_2d_slim_from', 'array_2d_util.array_2d_slim_from', ([], {'array_2d_native': 'grid_2d_native[:, :, 0]', 'mask_2d': 'mask', 'sub_size': 'sub_size'}), '(array_2d_native=grid_2d_native[:, :, 0],\n mask_2d=mask, sub_size=sub_size)\n', (32518, 32596), False, 'from autoarray.structures.arrays.two_d import array_2d_util\n'), ((32633, 32743), 'autoarray.structures.arrays.two_d.array_2d_util.array_2d_slim_from', 'array_2d_util.array_2d_slim_from', ([], {'array_2d_native': 'grid_2d_native[:, :, 1]', 'mask_2d': 'mask', 'sub_size': 'sub_size'}), '(array_2d_native=grid_2d_native[:, :, 1],\n mask_2d=mask, sub_size=sub_size)\n', (32665, 32743), False, 'from autoarray.structures.arrays.two_d import array_2d_util\n'), ((32770, 32821), 'numpy.stack', 'np.stack', (['(grid_1d_slim_y, grid_1d_slim_x)'], {'axis': '(-1)'}), '((grid_1d_slim_y, grid_1d_slim_x), axis=-1)\n', (32778, 32821), True, 'import numpy as np\n'), ((34318, 34426), 'autoarray.structures.arrays.two_d.array_2d_util.array_2d_native_from', 'array_2d_util.array_2d_native_from', ([], {'array_2d_slim': 'grid_2d_slim[:, 0]', 'mask_2d': 'mask_2d', 'sub_size': 'sub_size'}), '(array_2d_slim=grid_2d_slim[:, 0],\n mask_2d=mask_2d, sub_size=sub_size)\n', (34352, 34426), False, 'from autoarray.structures.arrays.two_d import array_2d_util\n'), ((34465, 34573), 'autoarray.structures.arrays.two_d.array_2d_util.array_2d_native_from', 'array_2d_util.array_2d_native_from', ([], {'array_2d_slim': 'grid_2d_slim[:, 1]', 'mask_2d': 'mask_2d', 'sub_size': 'sub_size'}), '(array_2d_slim=grid_2d_slim[:, 1],\n mask_2d=mask_2d, sub_size=sub_size)\n', (34499, 34573), False, 'from autoarray.structures.arrays.two_d import array_2d_util\n'), ((34600, 34655), 'numpy.stack', 'np.stack', (['(grid_2d_native_y, grid_2d_native_x)'], {'axis': '(-1)'}), '((grid_2d_native_y, grid_2d_native_x), axis=-1)\n', (34608, 34655), True, 'import numpy as np\n'), ((35435, 35496), 'numpy.zeros', 'np.zeros', ([], {'shape': '(grid_slim.shape[0] * upscale_factor ** 2, 2)'}), '(shape=(grid_slim.shape[0] * upscale_factor ** 2, 2))\n', (35443, 35496), True, 'import numpy as np\n'), ((36925, 36955), 'numpy.asarray', 'np.asarray', (['y_inside', 'x_inside'], {}), '(y_inside, x_inside)\n', (36935, 36955), True, 'import numpy as np\n'), ((644, 670), 'numpy.max', 'np.max', (['grid_2d_slim[:, 0]'], {}), '(grid_2d_slim[:, 0])\n', (650, 670), True, 'import numpy as np\n'), ((673, 699), 'numpy.min', 'np.min', (['grid_2d_slim[:, 0]'], {}), '(grid_2d_slim[:, 0])\n', (679, 699), True, 'import numpy as np\n'), ((724, 750), 'numpy.max', 'np.max', (['grid_2d_slim[:, 1]'], {}), '(grid_2d_slim[:, 1])\n', (730, 750), True, 'import numpy as np\n'), ((753, 779), 'numpy.min', 'np.min', (['grid_2d_slim[:, 1]'], {}), '(grid_2d_slim[:, 1])\n', (759, 779), True, 'import numpy as np\n'), ((8748, 8793), 'numpy.full', 'np.full', ([], {'fill_value': '(False)', 'shape': 'shape_native'}), '(fill_value=False, shape=shape_native)\n', (8755, 8793), True, 'import numpy as np\n'), ((10888, 10933), 'numpy.full', 'np.full', ([], {'fill_value': '(False)', 'shape': 'shape_native'}), '(fill_value=False, shape=shape_native)\n', (10895, 10933), True, 'import numpy as np\n'), ((29554, 29602), 'numpy.subtract', 'np.subtract', (['border_grid[:, 0]', 'border_origin[0]'], {}), '(border_grid[:, 0], border_origin[0])\n', (29565, 29602), True, 'import numpy as np\n'), ((29628, 29676), 'numpy.subtract', 'np.subtract', (['border_grid[:, 1]', 'border_origin[1]'], {}), '(border_grid[:, 1], border_origin[1])\n', (29639, 29676), True, 'import numpy as np\n'), ((29816, 29857), 'numpy.subtract', 'np.subtract', (['grid[:, 0]', 'border_origin[0]'], {}), '(grid[:, 0], border_origin[0])\n', (29827, 29857), True, 'import numpy as np\n'), ((29883, 29924), 'numpy.subtract', 'np.subtract', (['grid[:, 1]', 'border_origin[1]'], {}), '(grid[:, 1], border_origin[1])\n', (29894, 29924), True, 'import numpy as np\n'), ((30116, 30167), 'numpy.square', 'np.square', (['(grid[pixel_index, 0] - border_grid[:, 0])'], {}), '(grid[pixel_index, 0] - border_grid[:, 0])\n', (30125, 30167), True, 'import numpy as np\n'), ((30187, 30238), 'numpy.square', 'np.square', (['(grid[pixel_index, 1] - border_grid[:, 1])'], {}), '(grid[pixel_index, 1] - border_grid[:, 1])\n', (30196, 30238), True, 'import numpy as np\n'), ((37078, 37091), 'numpy.roll', 'np.roll', (['y', '(1)'], {}), '(y, 1)\n', (37085, 37091), True, 'import numpy as np\n'), ((37105, 37118), 'numpy.roll', 'np.roll', (['x', '(1)'], {}), '(x, 1)\n', (37112, 37118), True, 'import numpy as np\n')] |
# coding: utf-8
import requests, math
import gevent
from gevent.queue import Queue
from gevent import monkey; monkey.patch_all()
from pyquery import PyQuery
class Proxies():
def __init__(self):
self.domestic_gn_url = 'http://www.kuaidaili.com/free/inha/{0}/'
self.domestic_pt_url = 'http://www.kuaidaili.com/free/intr/{0}/'
self.abroad_gn_url = 'http://www.kuaidaili.com/free/outha/{0}/'
self.abroad_pt_url = 'http://www.kuaidaili.com/free/outtr/{0}/'
self.result_arr = []
self.s = requests.Session()
self.headers = {
'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_11_5) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/51.0.2704.103 Safari/537.36',
'Referer': 'http://www.kuaidaili.com/'
}
def fetch_urls(self, queue, quantity):
while not queue.empty():
url = queue.get()
html = self.s.get(url, headers=self.headers).text
pq = PyQuery(html)
size = pq.find('tbody tr').size()
for index in range(size):
item = pq.find('tbody tr').eq(index)
ip = item.find('td').eq(0).text()
port = item.find('td').eq(1).text()
_type = item.find('td').eq(3).text()
self.result_arr.append({
str(_type).lower(): '{0}://{1}:{2}'.format(str(_type).lower(), ip, port)
})
if len(self.result_arr) >= quantity:
break
def get_proxies(self, quantity, type):
'''
quantity: 数量
type: 类型
1.国内高匿代理
2.国内普通代理
3.国外高匿代理
4.国外普通代理
'''
url_queue = Queue()
need_pages = int(math.ceil(quantity/15))
# 判断类型
if type == 1:
# 国内高匿代理
base_url = self.domestic_gn_url
elif type == 2:
# 国内普通代理
base_url = self.domestic_pt_url
elif type == 3:
# 国外高匿代理
base_url = self.abroad_gn_url
elif type == 4:
# 国外普通代理
base_url = self.abroad_pt_url
# 获取所需要的页面URL
for index in range(need_pages):
url = base_url.format(index+1)
url_queue.put(url)
# 处理所有URL,开启2个协程
gevent_list = []
for index in range(2):
gevent_list.append(
gevent.spawn(self.fetch_urls, url_queue, quantity)
)
gevent.joinall(gevent_list)
def get_result(self):
return self.result_arr
if __name__ == '__main__':
p = Proxies()
p.get_proxies(20, 1)
result = p.get_result()
print(result)
| [
"math.ceil",
"requests.Session",
"gevent.monkey.patch_all",
"gevent.queue.Queue",
"gevent.joinall",
"pyquery.PyQuery",
"gevent.spawn"
] | [((111, 129), 'gevent.monkey.patch_all', 'monkey.patch_all', ([], {}), '()\n', (127, 129), False, 'from gevent import monkey\n'), ((536, 554), 'requests.Session', 'requests.Session', ([], {}), '()\n', (552, 554), False, 'import requests, math\n'), ((1726, 1733), 'gevent.queue.Queue', 'Queue', ([], {}), '()\n', (1731, 1733), False, 'from gevent.queue import Queue\n'), ((2486, 2513), 'gevent.joinall', 'gevent.joinall', (['gevent_list'], {}), '(gevent_list)\n', (2500, 2513), False, 'import gevent\n'), ((978, 991), 'pyquery.PyQuery', 'PyQuery', (['html'], {}), '(html)\n', (985, 991), False, 'from pyquery import PyQuery\n'), ((1759, 1783), 'math.ceil', 'math.ceil', (['(quantity / 15)'], {}), '(quantity / 15)\n', (1768, 1783), False, 'import requests, math\n'), ((2413, 2463), 'gevent.spawn', 'gevent.spawn', (['self.fetch_urls', 'url_queue', 'quantity'], {}), '(self.fetch_urls, url_queue, quantity)\n', (2425, 2463), False, 'import gevent\n')] |
# coding: utf-8
"""
Name: upper_air_humidity.py
Make upper level weather chart.
Usage: python3 upper_air_humidity.py --file <ncfile>
Author: <NAME>
Date: 2022/01/07
"""
import argparse
from ncmagics import fetchtime, japanmap, meteotool
def parse_args() -> dict:
"""parse_args.
set file path.
Args:
Returns:
dict:
"""
parser = argparse.ArgumentParser()
parser.add_argument("-f", "--file", help="set ncfile.", type=str)
p = parser.parse_args()
args = {"file": p.file}
return args
def output_name(ncfile: str, isobaric_surface: int) -> str:
"""output_name.
Args:
ncfile (str): ncfile
isobaric_surface (int): isobaric_surface
Returns:
str:
"""
date_time = fetchtime.fetch_time(ncfile)
outname = (date_time + "_" + str(isobaric_surface))
return outname
def main():
"""main.
"""
args = parse_args()
meteo_tool = meteotool.MeteoTools(args["file"])
lat, lon = meteo_tool.get_lat_lon()
isobaric_surface = (850, 500, 300)
#label_upper = (30, 0)
#lebel_min = (-30, -60)
for i, pressure in enumerate(isobaric_surface):
# get parameter
temp_c = meteo_tool.get_parameter('t', isobaric_surface=pressure) - 273.15
rh = meteo_tool.get_parameter('r', isobaric_surface=pressure)
height_gpm = meteo_tool.get_parameter('gh', isobaric_surface=pressure)
u_wind = meteo_tool.get_parameter('u', isobaric_surface=pressure)
v_wind = meteo_tool.get_parameter('v', isobaric_surface=pressure)
jp_map = japanmap.JpMap()
jp_map.contour_plot(lon, lat, height_gpm)
#jp_map.shade_plot(lon, lat, temp_c,
# label="2m temperature ($^\circ$C)",
# color_bar_label_max=label_upper[i],
# color_bar_label_min=lebel_min[i],
# color_map_type="temperature",
# double_color_bar=True,)
jp_map.shade_plot(lon, lat, rh,
label="relative humidity (%)",
color_bar_label_max=100,
color_bar_label_min=0,
color_map_type="gray",
double_color_bar=False,)
jp_map.vector_plot(lon, lat, u_wind, v_wind,
vector_interval=5, vector_scale=10, mode="wind")
#jp_map.gray_shade(lon, lat, rh,
# label="relative humidity (%)",
# color_bar_label_max=100,
# color_bar_label_min=0,
# )
if pressure == 850:
jp_map.color_line(lon, lat, temp_c, line_value=-6, color='#0000ff')
if pressure == 500:
jp_map.color_line(lon, lat, temp_c, line_value=-36, color='#b22222')
outname = output_name(args["file"], pressure)
print(outname)
jp_map.save_fig(outname, str(pressure) + "hPa")
if __name__ == "__main__":
main()
| [
"ncmagics.japanmap.JpMap",
"ncmagics.fetchtime.fetch_time",
"argparse.ArgumentParser",
"ncmagics.meteotool.MeteoTools"
] | [((366, 391), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (389, 391), False, 'import argparse\n'), ((756, 784), 'ncmagics.fetchtime.fetch_time', 'fetchtime.fetch_time', (['ncfile'], {}), '(ncfile)\n', (776, 784), False, 'from ncmagics import fetchtime, japanmap, meteotool\n'), ((937, 971), 'ncmagics.meteotool.MeteoTools', 'meteotool.MeteoTools', (["args['file']"], {}), "(args['file'])\n", (957, 971), False, 'from ncmagics import fetchtime, japanmap, meteotool\n'), ((1581, 1597), 'ncmagics.japanmap.JpMap', 'japanmap.JpMap', ([], {}), '()\n', (1595, 1597), False, 'from ncmagics import fetchtime, japanmap, meteotool\n')] |
from datetime import date
from django.core.cache import cache
from django.db.models import Q, F
from django.shortcuts import render
from django.shortcuts import get_object_or_404
from django.views.generic import ListView, DetailView
#from silk.profiling.profiler import silk_profile
from config.models import SideBar
from .models import Post, Tag, Category
from comment.models import Comment
class CommonViewMinxin:
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
context.update({
'sidebars': self.get_sidebars(),
})
context.update(self.get_navs())
return context
def get_sidebars(self):
return SideBar.objects.filter(status=SideBar.STATUS_SHOW)
def get_navs(self):
categories = Category.objects.filter(status=Category.STATUS_NORMAL)
nav_categories = []
normal_categories = []
for cate in categories:
if cate.is_nav:
nav_categories.append(cate)
else:
normal_categories.append(cate)
return {
'navs': nav_categories,
'categories': normal_categories,
}
class IndexView(CommonViewMinxin, ListView):
queryset = Post.latest_posts()
paginate_by = 5
context_object_name = 'post_list'
template_name = 'blog/list.html'
class CategoryView(IndexView):
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
category_id = self.kwargs.get('category_id')
category = get_object_or_404(Category, pk=category_id)
context.update({
'category': category,
})
return context
def get_queryset(self):
'''重写queryset,根据分类过滤'''
queryset = super().get_queryset()
category_id = self.kwargs.get('category_id')
return queryset.filter(category_id=category_id)
class TagView(IndexView):
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
tag_id = self.kwargs.get('tag_id')
tag = get_object_or_404(Tag, pk=tag_id)
context.update({
'tag': tag,
})
return context
def get_queryset(self):
'''重写queryset,根据标签过滤'''
queryset = super().get_queryset()
tag_id = self.kwargs.get('tag_id')
return queryset.filter(tag__id=tag_id)
class PostDetailView(CommonViewMinxin, DetailView):
queryset = Post.latest_posts()
template_name = 'blog/detail.html'
context_object_name = 'post'
pk_url_kwarg = 'post_id'
def get(self, request, *args, **kwargs):
response = super().get(request, *args, **kwargs)
self.handle_visited()
return response
def handle_visited(self):
increase_pv = False
increase_uv = False
uid = self.request.uid
pv_key = 'pv:%s:%s' % (uid, self.request.path)
uv_key = 'uv:%s:%s:%s' % (uid, str(date.today()), self.request.path)
if not cache.get(pv_key):
increase_pv = True
cache.set(pv_key, 1, 1*60) #1分钟有效
if not cache.get(uv_key):
increase_uv = True
cache.set(uv_key, 1, 24*60*60)
if increase_pv and increase_uv:
Post.objects.filter(pk=self.object.id).update(pv=F('pv') + 1,
uv=F('uv') + 1)
elif increase_pv:
Post.objects.filter(pk=self.object.id).update(pv=F('pv') + 1)
elif increase_pv:
Post.objects.filter(pk=self.object.id).update(pv=F('uv') + 1)
class SearchView(IndexView):
def get_context_data(self):
context = super().get_context_data()
context.update({
'keyword': self.request.GET.get('keyword', '')
})
return context
def get_queryset(self):
queryset = super().get_queryset()
keyword = self.request.GET.get('keyword')
if not keyword:
return queryset
return queryset.filter(Q(title__icontains=keyword) | Q(desc__icontains
=keyword))
class AuthorView(IndexView):
def get_queryset(self):
queryset = super().get_queryset()
author_id = self.kwargs.get('owner_id')
return queryset.filter(owner_id=author_id)
'''
def post_list(request, category_id=None, tag_id=None):
tag = None
category = None
if tag_id:
post_list, tag = Post.get_by_tag(tag_id)
elif category_id:
post_list, category=Post.get_by_category(category_id)
else:
post_list = Post.latest_posts()
context = {
'category': category,
'tag': tag,
'post_list': post_list,
'sidebars': SideBar.get_all(),
}
context.update(Category.get_navs())
return render(request, 'blog/list.html', context=context)
def post_detail(request, post_id=None):
try:
post = Post.objects.get(id=post_id)
except Post.DoesNotExist:
raise Http404('Post does not exist!')
context={
'post': post,
'sidebars': SideBar.get_all(),
}
context.update(Category.get_navs())
return render(request, 'blog/detail.html', context=context)
'''
| [
"django.shortcuts.get_object_or_404",
"django.db.models.F",
"datetime.date.today",
"django.core.cache.cache.set",
"django.db.models.Q",
"config.models.SideBar.objects.filter",
"django.core.cache.cache.get"
] | [((653, 703), 'config.models.SideBar.objects.filter', 'SideBar.objects.filter', ([], {'status': 'SideBar.STATUS_SHOW'}), '(status=SideBar.STATUS_SHOW)\n', (675, 703), False, 'from config.models import SideBar\n'), ((1387, 1430), 'django.shortcuts.get_object_or_404', 'get_object_or_404', (['Category'], {'pk': 'category_id'}), '(Category, pk=category_id)\n', (1404, 1430), False, 'from django.shortcuts import get_object_or_404\n'), ((1843, 1876), 'django.shortcuts.get_object_or_404', 'get_object_or_404', (['Tag'], {'pk': 'tag_id'}), '(Tag, pk=tag_id)\n', (1860, 1876), False, 'from django.shortcuts import get_object_or_404\n'), ((2642, 2659), 'django.core.cache.cache.get', 'cache.get', (['pv_key'], {}), '(pv_key)\n', (2651, 2659), False, 'from django.core.cache import cache\n'), ((2686, 2714), 'django.core.cache.cache.set', 'cache.set', (['pv_key', '(1)', '(1 * 60)'], {}), '(pv_key, 1, 1 * 60)\n', (2695, 2714), False, 'from django.core.cache import cache\n'), ((2733, 2750), 'django.core.cache.cache.get', 'cache.get', (['uv_key'], {}), '(uv_key)\n', (2742, 2750), False, 'from django.core.cache import cache\n'), ((2777, 2811), 'django.core.cache.cache.set', 'cache.set', (['uv_key', '(1)', '(24 * 60 * 60)'], {}), '(uv_key, 1, 24 * 60 * 60)\n', (2786, 2811), False, 'from django.core.cache import cache\n'), ((3457, 3484), 'django.db.models.Q', 'Q', ([], {'title__icontains': 'keyword'}), '(title__icontains=keyword)\n', (3458, 3484), False, 'from django.db.models import Q, F\n'), ((3487, 3513), 'django.db.models.Q', 'Q', ([], {'desc__icontains': 'keyword'}), '(desc__icontains=keyword)\n', (3488, 3513), False, 'from django.db.models import Q, F\n'), ((2599, 2611), 'datetime.date.today', 'date.today', ([], {}), '()\n', (2609, 2611), False, 'from datetime import date\n'), ((2895, 2902), 'django.db.models.F', 'F', (['"""pv"""'], {}), "('pv')\n", (2896, 2902), False, 'from django.db.models import Q, F\n'), ((2915, 2922), 'django.db.models.F', 'F', (['"""uv"""'], {}), "('uv')\n", (2916, 2922), False, 'from django.db.models import Q, F\n'), ((3000, 3007), 'django.db.models.F', 'F', (['"""pv"""'], {}), "('pv')\n", (3001, 3007), False, 'from django.db.models import Q, F\n'), ((3085, 3092), 'django.db.models.F', 'F', (['"""uv"""'], {}), "('uv')\n", (3086, 3092), False, 'from django.db.models import Q, F\n')] |
# -*- coding: utf-8 -*-
# Copyright (c) 2014, Vispy Development Team.
# Distributed under the (new) BSD License. See LICENSE.txt for more info.
# Adapted from PyQtGraph
import sys
from . import ptime
from .. import config
class Profiler(object):
"""Simple profiler allowing directed, hierarchical measurement of time
intervals.
By default, profilers are disabled. To enable profiling, set the
environment variable `VISPYPROFILE` to a comma-separated list of
fully-qualified names of profiled functions.
Calling a profiler registers a message (defaulting to an increasing
counter) that contains the time elapsed since the last call. When the
profiler is about to be garbage-collected, the messages are passed to the
outer profiler if one is running, or printed to stdout otherwise.
If `delayed` is set to False, messages are immediately printed instead.
Example:
def function(...):
profiler = Profiler()
... do stuff ...
profiler('did stuff')
... do other stuff ...
profiler('did other stuff')
# profiler is garbage-collected and flushed at function end
If this function is a method of class C, setting `VISPYPROFILE` to
"C.function" (without the module name) will enable this profiler.
For regular functions, use the qualified name of the function, stripping
only the initial "vispy.." prefix from the module.
"""
_profilers = (config['profile'].split(",") if config['profile'] is not None
else [])
_depth = 0
_msgs = []
# set this flag to disable all or individual profilers at runtime
disable = False
class DisabledProfiler(object):
def __init__(self, *args, **kwds):
pass
def __call__(self, *args):
pass
def finish(self):
pass
def mark(self, msg=None):
pass
_disabled_profiler = DisabledProfiler()
def __new__(cls, msg=None, disabled='env', delayed=True):
"""Optionally create a new profiler based on caller's qualname.
"""
if (disabled is True or
(disabled == 'env' and len(cls._profilers) == 0)):
return cls._disabled_profiler
# determine the qualified name of the caller function
caller_frame = sys._getframe(1)
try:
caller_object_type = type(caller_frame.f_locals["self"])
except KeyError: # we are in a regular function
qualifier = caller_frame.f_globals["__name__"].split(".", 1)[1]
else: # we are in a method
qualifier = caller_object_type.__name__
func_qualname = qualifier + "." + caller_frame.f_code.co_name
if (disabled == 'env' and func_qualname not in cls._profilers and
'all' not in cls._profilers): # don't do anything
return cls._disabled_profiler
# create an actual profiling object
cls._depth += 1
obj = super(Profiler, cls).__new__(cls)
obj._name = msg or func_qualname
obj._delayed = delayed
obj._mark_count = 0
obj._finished = False
obj._firstTime = obj._last_time = ptime.time()
obj._new_msg("> Entering " + obj._name)
return obj
def __call__(self, msg=None, *args):
"""Register or print a new message with timing information.
"""
if self.disable:
return
if msg is None:
msg = str(self._mark_count)
self._mark_count += 1
new_time = ptime.time()
elapsed = (new_time - self._last_time) * 1000
self._new_msg(" " + msg + ": %0.4f ms", *(args + (elapsed,)))
self._last_time = new_time
def mark(self, msg=None):
self(msg)
def _new_msg(self, msg, *args):
msg = " " * (self._depth - 1) + msg
if self._delayed:
self._msgs.append((msg, args))
else:
self.flush()
print(msg % args)
def __del__(self):
self.finish()
def finish(self, msg=None):
"""Add a final message; flush the message list if no parent profiler.
"""
if self._finished or self.disable:
return
self._finished = True
if msg is not None:
self(msg)
self._new_msg("< Exiting %s, total time: %0.4f ms",
self._name, (ptime.time() - self._firstTime) * 1000)
type(self)._depth -= 1
if self._depth < 1:
self.flush()
def flush(self):
if self._msgs:
print("\n".join([m[0] % m[1] for m in self._msgs]))
type(self)._msgs = []
| [
"sys._getframe"
] | [((2435, 2451), 'sys._getframe', 'sys._getframe', (['(1)'], {}), '(1)\n', (2448, 2451), False, 'import sys\n')] |
from django.contrib.auth import get_user_model
from djangosaml2idp.processors import BaseProcessor
User = get_user_model()
class TestBaseProcessor:
def test_extract_user_id_configure_by_user_class(self):
user = User()
user.USERNAME_FIELD = 'email'
user.email = 'test_email'
assert BaseProcessor('entity-id').get_user_id(user) == 'test_email'
def test_extract_user_id_configure_by_settings(self, settings):
"""Should use `settings.SAML_IDP_DJANGO_USERNAME_FIELD` to determine the user id field"""
settings.SAML_IDP_DJANGO_USERNAME_FIELD = 'first_name'
user = User()
user.first_name = 'test_first_name'
assert BaseProcessor('entity-id').get_user_id(user) == 'test_first_name'
| [
"django.contrib.auth.get_user_model",
"djangosaml2idp.processors.BaseProcessor"
] | [((107, 123), 'django.contrib.auth.get_user_model', 'get_user_model', ([], {}), '()\n', (121, 123), False, 'from django.contrib.auth import get_user_model\n'), ((323, 349), 'djangosaml2idp.processors.BaseProcessor', 'BaseProcessor', (['"""entity-id"""'], {}), "('entity-id')\n", (336, 349), False, 'from djangosaml2idp.processors import BaseProcessor\n'), ((698, 724), 'djangosaml2idp.processors.BaseProcessor', 'BaseProcessor', (['"""entity-id"""'], {}), "('entity-id')\n", (711, 724), False, 'from djangosaml2idp.processors import BaseProcessor\n')] |
from datetime import datetime
from typing import Any, List, Optional, Union
from pydantic import BaseModel, Field, HttpUrl, validator
from pydantic.dataclasses import dataclass
class Index(BaseModel):
id: str
name: str
time_gate: HttpUrl = Field(alias="timegate")
cdx_api: HttpUrl = Field(alias="cdx-api")
@dataclass(frozen=True)
class ResultBody:
mime_detected: Optional[str]
data: Optional[str]
text: Optional[List[str]]
@dataclass(frozen=True)
class ResultMeta:
# todo: these are still raw strings
warc_request_meta: Optional[str]
response_header: Optional[str]
class Result(BaseModel):
url_key: str = Field(alias="urlkey")
timestamp: datetime
url: str
mime: str
mime_detected: str = Field(alias="mime-detected")
status: int
digest: str
length: int
offset: int
filename: str
languages: Optional[str]
encoding: Optional[str]
index_id: Optional[str]
body: Optional[ResultBody]
meta: Optional[ResultMeta]
@validator("timestamp", pre=True)
def parse_timestamp(cls, value: Any) -> Union[datetime, Any]:
if isinstance(value, str):
datetime_value = datetime.strptime(value, "%Y%m%d%H%M%S")
return datetime_value
return value
class SearchPagesRequest(BaseModel):
"""Request existing pages on one index for a given url."""
index: Index
url: str
show_num_pages: str = Field(alias="showNumPages", default="true", const=True)
output: str = "json"
class SearchPagesResponse(BaseModel):
"""Response with the total number of pages in this index for a given url."""
index: Index
url: str
pages: int
class SearchIndexRequest(BaseModel):
"""One page that contains records to be fetched."""
index: Index
url: str
page: int
output: str = "json"
| [
"datetime.datetime.strptime",
"pydantic.Field",
"pydantic.validator",
"pydantic.dataclasses.dataclass"
] | [((328, 350), 'pydantic.dataclasses.dataclass', 'dataclass', ([], {'frozen': '(True)'}), '(frozen=True)\n', (337, 350), False, 'from pydantic.dataclasses import dataclass\n'), ((459, 481), 'pydantic.dataclasses.dataclass', 'dataclass', ([], {'frozen': '(True)'}), '(frozen=True)\n', (468, 481), False, 'from pydantic.dataclasses import dataclass\n'), ((255, 278), 'pydantic.Field', 'Field', ([], {'alias': '"""timegate"""'}), "(alias='timegate')\n", (260, 278), False, 'from pydantic import BaseModel, Field, HttpUrl, validator\n'), ((302, 324), 'pydantic.Field', 'Field', ([], {'alias': '"""cdx-api"""'}), "(alias='cdx-api')\n", (307, 324), False, 'from pydantic import BaseModel, Field, HttpUrl, validator\n'), ((658, 679), 'pydantic.Field', 'Field', ([], {'alias': '"""urlkey"""'}), "(alias='urlkey')\n", (663, 679), False, 'from pydantic import BaseModel, Field, HttpUrl, validator\n'), ((756, 784), 'pydantic.Field', 'Field', ([], {'alias': '"""mime-detected"""'}), "(alias='mime-detected')\n", (761, 784), False, 'from pydantic import BaseModel, Field, HttpUrl, validator\n'), ((1020, 1052), 'pydantic.validator', 'validator', (['"""timestamp"""'], {'pre': '(True)'}), "('timestamp', pre=True)\n", (1029, 1052), False, 'from pydantic import BaseModel, Field, HttpUrl, validator\n'), ((1438, 1493), 'pydantic.Field', 'Field', ([], {'alias': '"""showNumPages"""', 'default': '"""true"""', 'const': '(True)'}), "(alias='showNumPages', default='true', const=True)\n", (1443, 1493), False, 'from pydantic import BaseModel, Field, HttpUrl, validator\n'), ((1183, 1223), 'datetime.datetime.strptime', 'datetime.strptime', (['value', '"""%Y%m%d%H%M%S"""'], {}), "(value, '%Y%m%d%H%M%S')\n", (1200, 1223), False, 'from datetime import datetime\n')] |
import re
import os
from bs4 import BeautifulSoup
from django.conf import settings
from django.core.exceptions import ImproperlyConfigured
from django.core.files import File
from pages.models import Page, Image
PEP_TEMPLATE = 'pages/pep-page.html'
pep_url = lambda num: 'dev/peps/pep-{}/'.format(num)
def check_paths():
""" Checks to ensure our PEP_REPO_PATH is setup correctly """
if not hasattr(settings, 'PEP_REPO_PATH'):
raise ImproperlyConfigured("No PEP_REPO_PATH in settings")
if not os.path.exists(settings.PEP_REPO_PATH):
raise ImproperlyConfigured("PEP_REPO_PATH in settings does not exist")
def convert_pep0():
"""
Take existing generated pep-0000.html and convert to something suitable
for a Python.org Page returns the core body HTML necessary only
"""
check_paths()
pep0_path = os.path.join(settings.PEP_REPO_PATH, 'pep-0000.html')
pep0_content = open(pep0_path).read()
soup = BeautifulSoup(pep0_content)
body_children = list(soup.body.children)
# Grab header and PEP body
header = body_children[3]
pep_content = body_children[7]
# Fix PEP links
body_links = pep_content.find_all("a")
pep_href_re = re.compile(r'pep-(\d+)\.html')
for b in body_links:
m = pep_href_re.search(b.attrs['href'])
# Skip anything not matching 'pep-XXXX.html'
if not m:
continue
b.attrs['href'] = '/dev/peps/pep-{}/'.format(m.group(1))
# Remove Version from header
header_rows = header.find_all('th')
for t in header_rows:
if 'Version:' in t.text and 'N/A' in t.next_sibling.text:
t.parent.extract()
return ''.join([header.prettify(), pep_content.prettify()])
def get_pep0_page(commit=True):
"""
Using convert_pep0 above, create a CMS ready pep0 page and return it
pep0 is used as the directory index, but it's also an actual pep, so we
return both Page objects.
"""
pep0_content = convert_pep0()
pep0_page, _ = Page.objects.get_or_create(path='dev/peps/')
pep0000_page, _ = Page.objects.get_or_create(path='dev/peps/pep-0000/')
for page in [pep0_page, pep0000_page]:
page.content = pep0_content
page.content_markup_type = 'html'
page.title = "PEP 0 -- Index of Python Enhancement Proposals (PEPs)"
page.template_name = PEP_TEMPLATE
if commit:
page.save()
return pep0_page, pep0000_page
def fix_headers(soup, data):
""" Remove empty or unwanted headers and find our title """
header_rows = soup.find_all('th')
for t in header_rows:
if 'Version:' in t.text:
if t.next_sibling.text == '$Revision$':
t.parent.extract()
if t.next_sibling.text == '':
t.parent.extract()
if 'Last-Modified:' in t.text:
if '$Date$'in t.next_sibling.text:
t.parent.extract()
if t.next_sibling.text == '':
t.parent.extract()
if t.text == 'Title:':
data['title'] = t.next_sibling.text
if t.text == 'Content-Type:':
t.parent.extract()
if 'Version:' in t.text and 'N/A' in t.next_sibling.text:
t.parent.extract()
return soup, data
def convert_pep_page(pep_number, content):
"""
Handle different formats that pep2html.py outputs
"""
check_paths()
data = {
'title': None,
}
if '<html>' in content:
soup = BeautifulSoup(content)
data['title'] = soup.title.text
if not re.search(r'PEP \d+', data['title']):
data['title'] = 'PEP {} -- {}'.format(
pep_number,
soup.title.text,
)
header = soup.body.find('div', class_="header")
header, data = fix_headers(header, data)
data['header'] = header.prettify()
main_content = soup.body.find('div', class_="content")
data['main_content'] = main_content.prettify()
data['content'] = ''.join([
data['header'],
data['main_content']
])
else:
soup = BeautifulSoup(content)
soup, data = fix_headers(soup, data)
if not data['title']:
data['title'] = "PEP {} -- ".format(pep_number)
else:
if not re.search(r'PEP \d+', data['title']):
data['title'] = "PEP {} -- {}".format(
pep_number,
data['title'],
)
data['content'] = soup.prettify()
# Fix PEP links
pep_content = BeautifulSoup(data['content'])
body_links = pep_content.find_all("a")
pep_href_re = re.compile(r'pep-(\d+)\.html')
for b in body_links:
m = pep_href_re.search(b.attrs['href'])
# Skip anything not matching 'pep-XXXX.html'
if not m:
continue
b.attrs['href'] = '/dev/peps/pep-{}/'.format(m.group(1))
data['content'] = pep_content.prettify()
hg_link = "https://hg.python.org/peps/file/tip/pep-{0}.txt".format(pep_number)
data['content'] += """Source: <a href="{0}">{0}</a>""".format(hg_link)
return data
def get_pep_page(pep_number, commit=True):
"""
Given a pep_number retrieve original PEP source text, rst, or html.
Get or create the associated Page and return it
"""
pep_path = os.path.join(settings.PEP_REPO_PATH, 'pep-{}.html'.format(pep_number))
if not os.path.exists(pep_path):
print("PEP Path '{}' does not exist, skipping".format(pep_path))
pep_content = convert_pep_page(pep_number, open(pep_path).read())
pep_page, _ = Page.objects.get_or_create(path=pep_url(pep_number))
# Remove leading zeros from PEP number for display purposes
pep_number_string = str(pep_number)
pep_number_string = re.sub(r'^0+', '', pep_number_string)
pep_page.title = pep_content['title']
pep_page.content = pep_content['content']
pep_page.content_markup_type = 'html'
pep_page.template_name = PEP_TEMPLATE
if commit:
pep_page.save()
return pep_page
def add_pep_image(pep_number, path):
image_path = os.path.join(settings.PEP_REPO_PATH, path)
if not os.path.exists(image_path):
print("Image Path '{}' does not exist, skipping".format(image_path))
try:
page = Page.objects.get(path=pep_url(pep_number))
except Page.DoesNotExist:
print("Could not find backing PEP {}".format(pep_number))
return
# Find existing images, we have to loop here as we can't use the ORM
# to query against image__path
existing_images = Image.objects.filter(page=page)
MISSING = False
FOUND = False
for image in existing_images:
image_root_path = os.path.join(settings.MEDIA_ROOT, page.path, path)
if image.image.path.endswith(path):
FOUND = True
# File is missing on disk, recreate
if not os.path.exists(image_root_path):
MISSING = image
break
if not FOUND or MISSING:
image = None
if MISSING:
image = MISSING
else:
image = Image(page=page)
with open(image_path, 'rb') as image_obj:
image.image.save(path, File(image_obj))
image.save()
# Old images used to live alongside html, but now they're in different
# places, so update the page accordingly.
soup = BeautifulSoup(page.content.raw)
for img_tag in soup.findAll('img'):
if img_tag['src'] == path:
img_tag['src'] = os.path.join(settings.MEDIA_URL, page.path, path)
page.content.raw = soup.prettify()
page.save()
return image
def get_peps_rss():
rss_feed = os.path.join(settings.PEP_REPO_PATH, 'peps.rss')
if not os.path.exists(rss_feed):
return
page, _ = Page.objects.get_or_create(
path="dev/peps/peps.rss",
template_name="pages/raw.html",
)
with open(rss_feed, "r") as rss_content:
content = rss_content.read()
page.content = content
page.is_published = True
page.content_type = "application/rss+xml"
page.save()
return page
| [
"os.path.exists",
"django.core.files.File",
"re.compile",
"os.path.join",
"pages.models.Image.objects.filter",
"pages.models.Image",
"bs4.BeautifulSoup",
"re.sub",
"pages.models.Page.objects.get_or_create",
"django.core.exceptions.ImproperlyConfigured",
"re.search"
] | [((854, 907), 'os.path.join', 'os.path.join', (['settings.PEP_REPO_PATH', '"""pep-0000.html"""'], {}), "(settings.PEP_REPO_PATH, 'pep-0000.html')\n", (866, 907), False, 'import os\n'), ((962, 989), 'bs4.BeautifulSoup', 'BeautifulSoup', (['pep0_content'], {}), '(pep0_content)\n', (975, 989), False, 'from bs4 import BeautifulSoup\n'), ((1216, 1247), 're.compile', 're.compile', (['"""pep-(\\\\d+)\\\\.html"""'], {}), "('pep-(\\\\d+)\\\\.html')\n", (1226, 1247), False, 'import re\n'), ((2025, 2069), 'pages.models.Page.objects.get_or_create', 'Page.objects.get_or_create', ([], {'path': '"""dev/peps/"""'}), "(path='dev/peps/')\n", (2051, 2069), False, 'from pages.models import Page, Image\n'), ((2092, 2145), 'pages.models.Page.objects.get_or_create', 'Page.objects.get_or_create', ([], {'path': '"""dev/peps/pep-0000/"""'}), "(path='dev/peps/pep-0000/')\n", (2118, 2145), False, 'from pages.models import Page, Image\n'), ((4605, 4635), 'bs4.BeautifulSoup', 'BeautifulSoup', (["data['content']"], {}), "(data['content'])\n", (4618, 4635), False, 'from bs4 import BeautifulSoup\n'), ((4698, 4729), 're.compile', 're.compile', (['"""pep-(\\\\d+)\\\\.html"""'], {}), "('pep-(\\\\d+)\\\\.html')\n", (4708, 4729), False, 'import re\n'), ((5836, 5872), 're.sub', 're.sub', (['"""^0+"""', '""""""', 'pep_number_string'], {}), "('^0+', '', pep_number_string)\n", (5842, 5872), False, 'import re\n'), ((6165, 6207), 'os.path.join', 'os.path.join', (['settings.PEP_REPO_PATH', 'path'], {}), '(settings.PEP_REPO_PATH, path)\n', (6177, 6207), False, 'import os\n'), ((6634, 6665), 'pages.models.Image.objects.filter', 'Image.objects.filter', ([], {'page': 'page'}), '(page=page)\n', (6654, 6665), False, 'from pages.models import Page, Image\n'), ((7444, 7475), 'bs4.BeautifulSoup', 'BeautifulSoup', (['page.content.raw'], {}), '(page.content.raw)\n', (7457, 7475), False, 'from bs4 import BeautifulSoup\n'), ((7741, 7789), 'os.path.join', 'os.path.join', (['settings.PEP_REPO_PATH', '"""peps.rss"""'], {}), "(settings.PEP_REPO_PATH, 'peps.rss')\n", (7753, 7789), False, 'import os\n'), ((7857, 7946), 'pages.models.Page.objects.get_or_create', 'Page.objects.get_or_create', ([], {'path': '"""dev/peps/peps.rss"""', 'template_name': '"""pages/raw.html"""'}), "(path='dev/peps/peps.rss', template_name=\n 'pages/raw.html')\n", (7883, 7946), False, 'from pages.models import Page, Image\n'), ((453, 505), 'django.core.exceptions.ImproperlyConfigured', 'ImproperlyConfigured', (['"""No PEP_REPO_PATH in settings"""'], {}), "('No PEP_REPO_PATH in settings')\n", (473, 505), False, 'from django.core.exceptions import ImproperlyConfigured\n'), ((518, 556), 'os.path.exists', 'os.path.exists', (['settings.PEP_REPO_PATH'], {}), '(settings.PEP_REPO_PATH)\n', (532, 556), False, 'import os\n'), ((572, 636), 'django.core.exceptions.ImproperlyConfigured', 'ImproperlyConfigured', (['"""PEP_REPO_PATH in settings does not exist"""'], {}), "('PEP_REPO_PATH in settings does not exist')\n", (592, 636), False, 'from django.core.exceptions import ImproperlyConfigured\n'), ((3507, 3529), 'bs4.BeautifulSoup', 'BeautifulSoup', (['content'], {}), '(content)\n', (3520, 3529), False, 'from bs4 import BeautifulSoup\n'), ((4153, 4175), 'bs4.BeautifulSoup', 'BeautifulSoup', (['content'], {}), '(content)\n', (4166, 4175), False, 'from bs4 import BeautifulSoup\n'), ((5465, 5489), 'os.path.exists', 'os.path.exists', (['pep_path'], {}), '(pep_path)\n', (5479, 5489), False, 'import os\n'), ((6219, 6245), 'os.path.exists', 'os.path.exists', (['image_path'], {}), '(image_path)\n', (6233, 6245), False, 'import os\n'), ((6765, 6815), 'os.path.join', 'os.path.join', (['settings.MEDIA_ROOT', 'page.path', 'path'], {}), '(settings.MEDIA_ROOT, page.path, path)\n', (6777, 6815), False, 'import os\n'), ((7801, 7825), 'os.path.exists', 'os.path.exists', (['rss_feed'], {}), '(rss_feed)\n', (7815, 7825), False, 'import os\n'), ((3586, 3622), 're.search', 're.search', (['"""PEP \\\\d+"""', "data['title']"], {}), "('PEP \\\\d+', data['title'])\n", (3595, 3622), False, 'import re\n'), ((7170, 7186), 'pages.models.Image', 'Image', ([], {'page': 'page'}), '(page=page)\n', (7175, 7186), False, 'from pages.models import Page, Image\n'), ((7580, 7629), 'os.path.join', 'os.path.join', (['settings.MEDIA_URL', 'page.path', 'path'], {}), '(settings.MEDIA_URL, page.path, path)\n', (7592, 7629), False, 'import os\n'), ((4345, 4381), 're.search', 're.search', (['"""PEP \\\\d+"""', "data['title']"], {}), "('PEP \\\\d+', data['title'])\n", (4354, 4381), False, 'import re\n'), ((6953, 6984), 'os.path.exists', 'os.path.exists', (['image_root_path'], {}), '(image_root_path)\n', (6967, 6984), False, 'import os\n'), ((7273, 7288), 'django.core.files.File', 'File', (['image_obj'], {}), '(image_obj)\n', (7277, 7288), False, 'from django.core.files import File\n')] |
from flask import Flask
app = Flask(__name__, static_folder='static')
from app import routes
| [
"flask.Flask"
] | [((31, 70), 'flask.Flask', 'Flask', (['__name__'], {'static_folder': '"""static"""'}), "(__name__, static_folder='static')\n", (36, 70), False, 'from flask import Flask\n')] |
import logging
import george
import numpy as np
from robo.priors.default_priors import DefaultPrior
from robo.models.gaussian_process import GaussianProcess
from robo.models.gaussian_process_mcmc import GaussianProcessMCMC
from robo.maximizers.random_sampling import RandomSampling
from robo.maximizers.scipy_optimizer import SciPyOptimizer
from robo.maximizers.differential_evolution import DifferentialEvolution
from robo.solver.bayesian_optimization import BayesianOptimization
from robo.acquisition_functions.information_gain import InformationGain
from robo.acquisition_functions.ei import EI
from robo.acquisition_functions.marginalization import MarginalizationGPMCMC
from robo.initial_design import init_latin_hypercube_sampling
logger = logging.getLogger(__name__)
def entropy_search(objective_function, lower, upper, num_iterations=30,
maximizer="random", model="gp_mcmc",
n_init=3, output_path=None, rng=None):
"""
Entropy search for global black box optimization problems. This is a reimplemenation of the entropy search
algorithm by Henning and Schuler[1].
[1] Entropy search for information-efficient global optimization.
<NAME> and <NAME>.
JMLR, (1), 2012.
Parameters
----------
objective_function: function
The objective function that is minimized. This function gets a numpy array (D,) as input and returns
the function value (scalar)
lower: np.ndarray (D,)
The lower bound of the search space
upper: np.ndarray (D,)
The upper bound of the search space
num_iterations: int
The number of iterations (initial design + BO)
maximizer: {"random", "scipy", "differential_evolution"}
Defines how the acquisition function is maximized.
model: {"gp", "gp_mcmc"}
The model for the objective function.
n_init: int
Number of points for the initial design. Make sure that it is <= num_iterations.
output_path: string
Specifies the path where the intermediate output after each iteration will be saved.
If None no output will be saved to disk.
rng: numpy.random.RandomState
Random number generator
Returns
-------
dict with all results
"""
assert upper.shape[0] == lower.shape[0], "Dimension miss match"
assert np.all(lower < upper), "Lower bound >= upper bound"
assert n_init <= num_iterations, "Number of initial design point has to be <= than the number of iterations"
if rng is None:
rng = np.random.RandomState(np.random.randint(0, 10000))
cov_amp = 2
n_dims = lower.shape[0]
initial_ls = np.ones([n_dims])
exp_kernel = george.kernels.Matern52Kernel(initial_ls,
ndim=n_dims)
kernel = cov_amp * exp_kernel
prior = DefaultPrior(len(kernel) + 1)
n_hypers = 3 * len(kernel)
if n_hypers % 2 == 1:
n_hypers += 1
if model == "gp":
gp = GaussianProcess(kernel, prior=prior, rng=rng,
normalize_output=False, normalize_input=True,
lower=lower, upper=upper)
elif model == "gp_mcmc":
gp = GaussianProcessMCMC(kernel, prior=prior,
n_hypers=n_hypers,
chain_length=200,
burnin_steps=100,
normalize_input=True,
normalize_output=False,
rng=rng, lower=lower, upper=upper)
else:
print("ERROR: %s is not a valid model!" % model)
return
a = InformationGain(gp, lower=lower, upper=upper, sampling_acquisition=EI)
if model == "gp":
acquisition_func = a
elif model == "gp_mcmc":
acquisition_func = MarginalizationGPMCMC(a)
if maximizer == "random":
max_func = RandomSampling(acquisition_func, lower, upper, rng=rng)
elif maximizer == "scipy":
max_func = SciPyOptimizer(acquisition_func, lower, upper, rng=rng)
elif maximizer == "differential_evolution":
max_func = DifferentialEvolution(acquisition_func, lower, upper, rng=rng)
else:
print("ERROR: %s is not a valid function to maximize the acquisition function!" % maximizer)
return
bo = BayesianOptimization(objective_function, lower, upper, acquisition_func, gp, max_func,
initial_design=init_latin_hypercube_sampling,
initial_points=n_init, rng=rng, output_path=output_path)
x_best, f_min = bo.run(num_iterations)
results = dict()
results["x_opt"] = x_best
results["f_opt"] = f_min
results["incumbents"] = [inc for inc in bo.incumbents]
results["incumbent_values"] = [val for val in bo.incumbents_values]
results["runtime"] = bo.runtime
results["overhead"] = bo.time_overhead
results["X"] = [x.tolist() for x in bo.X]
results["y"] = [y for y in bo.y]
return results
| [
"logging.getLogger",
"george.kernels.Matern52Kernel",
"numpy.ones",
"robo.maximizers.random_sampling.RandomSampling",
"robo.models.gaussian_process_mcmc.GaussianProcessMCMC",
"robo.maximizers.differential_evolution.DifferentialEvolution",
"robo.acquisition_functions.information_gain.InformationGain",
"numpy.random.randint",
"robo.models.gaussian_process.GaussianProcess",
"robo.solver.bayesian_optimization.BayesianOptimization",
"robo.acquisition_functions.marginalization.MarginalizationGPMCMC",
"numpy.all",
"robo.maximizers.scipy_optimizer.SciPyOptimizer"
] | [((748, 775), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (765, 775), False, 'import logging\n'), ((2351, 2372), 'numpy.all', 'np.all', (['(lower < upper)'], {}), '(lower < upper)\n', (2357, 2372), True, 'import numpy as np\n'), ((2665, 2682), 'numpy.ones', 'np.ones', (['[n_dims]'], {}), '([n_dims])\n', (2672, 2682), True, 'import numpy as np\n'), ((2700, 2754), 'george.kernels.Matern52Kernel', 'george.kernels.Matern52Kernel', (['initial_ls'], {'ndim': 'n_dims'}), '(initial_ls, ndim=n_dims)\n', (2729, 2754), False, 'import george\n'), ((3679, 3749), 'robo.acquisition_functions.information_gain.InformationGain', 'InformationGain', (['gp'], {'lower': 'lower', 'upper': 'upper', 'sampling_acquisition': 'EI'}), '(gp, lower=lower, upper=upper, sampling_acquisition=EI)\n', (3694, 3749), False, 'from robo.acquisition_functions.information_gain import InformationGain\n'), ((4361, 4559), 'robo.solver.bayesian_optimization.BayesianOptimization', 'BayesianOptimization', (['objective_function', 'lower', 'upper', 'acquisition_func', 'gp', 'max_func'], {'initial_design': 'init_latin_hypercube_sampling', 'initial_points': 'n_init', 'rng': 'rng', 'output_path': 'output_path'}), '(objective_function, lower, upper, acquisition_func, gp,\n max_func, initial_design=init_latin_hypercube_sampling, initial_points=\n n_init, rng=rng, output_path=output_path)\n', (4381, 4559), False, 'from robo.solver.bayesian_optimization import BayesianOptimization\n'), ((2995, 3116), 'robo.models.gaussian_process.GaussianProcess', 'GaussianProcess', (['kernel'], {'prior': 'prior', 'rng': 'rng', 'normalize_output': '(False)', 'normalize_input': '(True)', 'lower': 'lower', 'upper': 'upper'}), '(kernel, prior=prior, rng=rng, normalize_output=False,\n normalize_input=True, lower=lower, upper=upper)\n', (3010, 3116), False, 'from robo.models.gaussian_process import GaussianProcess\n'), ((3933, 3988), 'robo.maximizers.random_sampling.RandomSampling', 'RandomSampling', (['acquisition_func', 'lower', 'upper'], {'rng': 'rng'}), '(acquisition_func, lower, upper, rng=rng)\n', (3947, 3988), False, 'from robo.maximizers.random_sampling import RandomSampling\n'), ((2573, 2600), 'numpy.random.randint', 'np.random.randint', (['(0)', '(10000)'], {}), '(0, 10000)\n', (2590, 2600), True, 'import numpy as np\n'), ((3213, 3398), 'robo.models.gaussian_process_mcmc.GaussianProcessMCMC', 'GaussianProcessMCMC', (['kernel'], {'prior': 'prior', 'n_hypers': 'n_hypers', 'chain_length': '(200)', 'burnin_steps': '(100)', 'normalize_input': '(True)', 'normalize_output': '(False)', 'rng': 'rng', 'lower': 'lower', 'upper': 'upper'}), '(kernel, prior=prior, n_hypers=n_hypers, chain_length=\n 200, burnin_steps=100, normalize_input=True, normalize_output=False,\n rng=rng, lower=lower, upper=upper)\n', (3232, 3398), False, 'from robo.models.gaussian_process_mcmc import GaussianProcessMCMC\n'), ((3858, 3882), 'robo.acquisition_functions.marginalization.MarginalizationGPMCMC', 'MarginalizationGPMCMC', (['a'], {}), '(a)\n', (3879, 3882), False, 'from robo.acquisition_functions.marginalization import MarginalizationGPMCMC\n'), ((4039, 4094), 'robo.maximizers.scipy_optimizer.SciPyOptimizer', 'SciPyOptimizer', (['acquisition_func', 'lower', 'upper'], {'rng': 'rng'}), '(acquisition_func, lower, upper, rng=rng)\n', (4053, 4094), False, 'from robo.maximizers.scipy_optimizer import SciPyOptimizer\n'), ((4162, 4224), 'robo.maximizers.differential_evolution.DifferentialEvolution', 'DifferentialEvolution', (['acquisition_func', 'lower', 'upper'], {'rng': 'rng'}), '(acquisition_func, lower, upper, rng=rng)\n', (4183, 4224), False, 'from robo.maximizers.differential_evolution import DifferentialEvolution\n')] |
# Generated by Django 3.1.13 on 2021-10-29 11:07
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('app', '0095_bisericapage_utitle'),
]
operations = [
migrations.AddField(
model_name='bisericapage',
name='datare_an',
field=models.IntegerField(blank=True, null=True),
),
]
| [
"django.db.models.IntegerField"
] | [((341, 383), 'django.db.models.IntegerField', 'models.IntegerField', ([], {'blank': '(True)', 'null': '(True)'}), '(blank=True, null=True)\n', (360, 383), False, 'from django.db import migrations, models\n')] |
"""
Functions for loading input data.
Author: <NAME> <<EMAIL>>
"""
import os
import numpy as np
def load_img(path: str, img_nums: list, shape: tuple) -> np.array:
"""
Loads a image in the human-readable format.
Args:
path:
The path to the to the folder with mnist images.
img_nums:
A list with the numbers of the images we want to load.
shape:
The shape of a single image.
Returns:
The images as a MxCx28x28 numpy array.
"""
images = np.zeros((len(img_nums), *shape), dtype=float)
for idx, i in enumerate(img_nums):
file = os.path.join(path, "image" + str(i))
with open(file, "r") as f:
data = [float(pixel) for pixel in f.readlines()[0].split(",")[:-1]]
images[idx, :, :] = np.array(data).reshape(*shape)
return images
def load_mnist_human_readable(path: str, img_nums: list) -> np.array:
"""
Loads a mnist image from the neurify dataset.
Args:
path:
The path to the to the folder with mnist images.
img_nums:
A list with the numbers of the images we want to load.
Returns:
The images as a Mx28x28 numpy array.
"""
return load_img(path, img_nums, (28, 28))
def load_cifar10_human_readable(path: str, img_nums: list) -> np.array:
"""
Loads the Cifar10 images in human readable format.
Args:
path:
The path to the to the folder with mnist images.
img_nums:
A list with the numbers of the images we want to load.
Returns:
The images as a Mx3x32x32 numpy array.
"""
return load_img(path, img_nums, (3, 32, 32))
def load_images_eran(img_csv: str = "../../resources/images/cifar10_test.csv", num_images: int = 100,
image_shape: tuple = (3, 32, 32)) -> tuple:
"""
Loads the images from the eran csv.
Args:
The csv path
Returns:
images, targets
"""
num_images = 100
images_array = np.zeros((num_images, np.prod(image_shape)), dtype=np.float32)
targets_array = np.zeros(num_images, dtype=int)
with open(img_csv, "r") as file:
for j in range(num_images):
line_arr = file.readline().split(",")
targets_array[j] = int(line_arr[0])
images_array[j] = [float(pixel) for pixel in line_arr[1:]]
return images_array.reshape((num_images, *image_shape)), targets_array
| [
"numpy.array",
"numpy.prod",
"numpy.zeros"
] | [((2132, 2163), 'numpy.zeros', 'np.zeros', (['num_images'], {'dtype': 'int'}), '(num_images, dtype=int)\n', (2140, 2163), True, 'import numpy as np\n'), ((2071, 2091), 'numpy.prod', 'np.prod', (['image_shape'], {}), '(image_shape)\n', (2078, 2091), True, 'import numpy as np\n'), ((821, 835), 'numpy.array', 'np.array', (['data'], {}), '(data)\n', (829, 835), True, 'import numpy as np\n')] |
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the Apache 2.0 License.
import infra.e2e_args
import infra.ccf
import infra.jsonrpc
import logging
from time import gmtime, strftime
import csv
import random
from loguru import logger as LOG
class AppUser:
def __init__(self, network, name, country, curve):
self.name = name
self.country = country
primary, _ = network.find_primary()
network.create_users([self.name], curve)
network.consortium.add_users(primary, [self.name])
with primary.user_client(user_id=self.name) as client:
self.ccf_id = client.rpc("whoAmI", {}).result["caller_id"]
def __str__(self):
return f"{self.ccf_id} ({self.name})"
def run(args):
hosts = ["localhost"]
with infra.ccf.network(
hosts, args.build_dir, args.debug_nodes, args.perf_nodes, pdb=args.pdb
) as network:
check = infra.checker.Checker()
network.start_and_join(args)
primary, others = network.find_nodes()
script = "if tonumber(amt) > 200000 then return true else return false end"
if args.lua_script is not None:
data = []
with open(args.lua_script, "r") as f:
data = f.readlines()
script = "".join(data)
manager = AppUser(network, "manager", "GB", args.default_curve)
regulator = AppUser(network, "auditor", "GB", args.default_curve)
banks = [
AppUser(network, f"bank{country}", country, args.default_curve)
for country in ("US", "GB", "GR", "FR")
]
transactions = []
with open(args.datafile, newline="") as f:
datafile = csv.DictReader(f)
for i, row in enumerate(datafile):
# read first 10 lines
if i > 10:
break
json_tx = {
"src": row["origin"],
"dst": row["destination"],
"amt": row["amount"],
"type": row["type"],
"timestamp": strftime("%a, %d %b %Y %H:%M:%S +0000", gmtime()),
"src_country": row["src_country"],
"dst_country": row["dst_country"],
}
transactions.append(json_tx)
# Manager is granted special privileges by members, which is later read by app to enforce access restrictions
proposal_result, error = network.consortium.propose(
0,
primary,
f"""
return Calls:call(
"set_user_data",
{{
user_id = {manager.ccf_id},
user_data = {{
privileges = {{
REGISTER_REGULATORS = true,
REGISTER_BANKS = true,
}}
}}
}}
)
""",
)
network.consortium.vote_using_majority(primary, proposal_result["id"])
# Check permissions are enforced
with primary.user_client(user_id=regulator.name) as c:
check(
c.rpc("REG_register", {}),
error=lambda e: e is not None
and e["code"] == infra.jsonrpc.ErrorCode.INVALID_CALLER_ID.value,
)
check(
c.rpc("BK_register", {}),
error=lambda e: e is not None
and e["code"] == infra.jsonrpc.ErrorCode.INVALID_CALLER_ID.value,
)
with primary.user_client(user_id=banks[0].name) as c:
check(
c.rpc("REG_register", {}),
error=lambda e: e is not None
and e["code"] == infra.jsonrpc.ErrorCode.INVALID_CALLER_ID.value,
)
check(
c.rpc("BK_register", {}),
error=lambda e: e is not None
and e["code"] == infra.jsonrpc.ErrorCode.INVALID_CALLER_ID.value,
)
# As permissioned manager, register regulator and banks
with primary.node_client() as mc:
check_commit = infra.checker.Checker(mc)
with primary.user_client(format="msgpack", user_id=manager.name) as c:
check(
c.rpc(
"REG_register",
{
"regulator_id": regulator.ccf_id,
"country": regulator.country,
"script": script,
},
),
result=regulator.ccf_id,
)
check(
c.rpc("REG_get", {"id": regulator.ccf_id}),
result=[regulator.country, script],
)
check(
c.rpc(
"BK_register",
{"bank_id": regulator.ccf_id, "country": regulator.country},
),
error=lambda e: e is not None
and e["code"] == infra.jsonrpc.ErrorCode.INVALID_PARAMS.value,
)
LOG.debug(f"User {regulator} successfully registered as regulator")
for bank in banks:
check(
c.rpc(
"BK_register",
{"bank_id": bank.ccf_id, "country": bank.country},
),
result=bank.ccf_id,
)
check(c.rpc("BK_get", {"id": bank.ccf_id}), result=bank.country)
check(
c.rpc(
"REG_register",
{"regulator_id": bank.ccf_id, "country": bank.country},
),
error=lambda e: e is not None
and e["code"] == infra.jsonrpc.ErrorCode.INVALID_PARAMS.value,
)
LOG.debug(f"User {bank} successfully registered as bank")
LOG.success(f"{1} regulator and {len(banks)} bank(s) successfully setup")
tx_id = 0 # Tracks how many transactions have been issued
# tracks flagged/non flagged and revealed/non revealed transactions for validation
flagged_txs = {}
revealed_tx_ids = []
flagged_ids = []
non_flagged_ids = []
flagged_amt = 200000
for i, bank in enumerate(banks):
with primary.user_client(format="msgpack", user_id=bank.name) as c:
# Destination account is the next one in the list of banks
for transaction in transactions:
print(transaction)
amount = transaction["amt"]
check(c.rpc("TX_record", transaction), result=tx_id)
check(
c.rpc("TX_get", {"tx_id": tx_id}),
result={
"amt": amount,
"bank_id": bank.ccf_id,
"dst": transaction["dst"],
"dst_country": transaction["dst_country"],
"src": transaction["src"],
"src_country": transaction["src_country"],
"timestamp": transaction["timestamp"],
"type": transaction["type"],
},
)
if float(amount) > flagged_amt:
check(
c.rpc("FLAGGED_TX_get", {"tx_id": tx_id}),
result=[regulator.ccf_id, False, transaction["timestamp"]],
)
flagged_tx = {
"amt": amount,
"bank_id": bank.ccf_id,
"dst": transaction["dst"],
"dst_country": transaction["dst_country"],
"src": transaction["src"],
"src_country": transaction["src_country"],
"timestamp": transaction["timestamp"],
"tx_id": tx_id,
"type": transaction["type"],
}
flagged_ids.append(tx_id)
flagged_txs[tx_id] = flagged_tx
else:
check(
c.rpc("FLAGGED_TX_get", {"tx_id": tx_id}),
error=lambda e: e is not None
and e["code"]
== infra.jsonrpc.ErrorCode.INVALID_PARAMS.value,
)
non_flagged_ids.append(tx_id)
tx_id += 1
LOG.success(f"{tx_id} transactions have been successfully issued")
# bank that issued first flagged transaction
with primary.user_client(format="msgpack", user_id=bank.name) as c:
# try to poll flagged but fail as you are not a regulator
check(
c.rpc("REG_poll_flagged", {}),
error=lambda e: e is not None
and e["code"] == infra.jsonrpc.ErrorCode.INVALID_CALLER_ID.value,
)
# bank reveal some transactions that were flagged
for i, tx_id in enumerate(flagged_ids):
if i % 2 == 0:
check(c.rpc("TX_reveal", {"tx_id": tx_id}), result=True)
revealed_tx_ids.append(tx_id)
# bank try to reveal non flagged txs
for tx_id in non_flagged_ids:
check(
c.rpc("TX_reveal", {"tx_id": tx_id}),
error=lambda e: e is not None
and e["code"] == infra.jsonrpc.ErrorCode.INVALID_PARAMS.value,
)
# regulator poll for transactions that are flagged
with primary.node_client() as mc:
with primary.user_client(format="msgpack", user_id=regulator.name) as c:
# assert that the flagged txs that we poll for are correct
resp = c.rpc("REG_poll_flagged", {})
poll_flagged_ids = []
for poll_flagged in resp.result:
# poll flagged is a list [tx_id, regulator_id]
poll_flagged_ids.append(poll_flagged[0])
poll_flagged_ids.sort()
assert poll_flagged_ids == flagged_ids
for tx_id in flagged_ids:
# get from flagged txs, try to get the flagged one that was not revealed
if tx_id not in revealed_tx_ids:
check(
c.rpc("REG_get_revealed", {"tx_id": tx_id}),
error=lambda e: e is not None
and e["code"]
== infra.jsonrpc.ErrorCode.INVALID_PARAMS.value,
)
# get from flagged txs, try to get the flagged ones that were revealed
for tx_id in revealed_tx_ids:
check(
c.rpc("REG_get_revealed", {"tx_id": tx_id}),
result=flagged_txs[tx_id],
)
if __name__ == "__main__":
def add(parser):
parser.add_argument(
"--lua-script", help="Regulator checker loaded as lua script file", type=str
)
parser.add_argument(
"--datafile", help="Load an existing scenario file (csv)", type=str
)
args = infra.e2e_args.cli_args(add)
args.package = args.app_script and "libluageneric" or "liblogging"
run(args)
| [
"loguru.logger.success",
"csv.DictReader",
"loguru.logger.debug",
"time.gmtime"
] | [((8939, 9005), 'loguru.logger.success', 'LOG.success', (['f"""{tx_id} transactions have been successfully issued"""'], {}), "(f'{tx_id} transactions have been successfully issued')\n", (8950, 9005), True, 'from loguru import logger as LOG\n'), ((1723, 1740), 'csv.DictReader', 'csv.DictReader', (['f'], {}), '(f)\n', (1737, 1740), False, 'import csv\n'), ((5218, 5285), 'loguru.logger.debug', 'LOG.debug', (['f"""User {regulator} successfully registered as regulator"""'], {}), "(f'User {regulator} successfully registered as regulator')\n", (5227, 5285), True, 'from loguru import logger as LOG\n'), ((6077, 6134), 'loguru.logger.debug', 'LOG.debug', (['f"""User {bank} successfully registered as bank"""'], {}), "(f'User {bank} successfully registered as bank')\n", (6086, 6134), True, 'from loguru import logger as LOG\n'), ((2152, 2160), 'time.gmtime', 'gmtime', ([], {}), '()\n', (2158, 2160), False, 'from time import gmtime, strftime\n')] |
# Generated by Django 3.0.2 on 2020-03-17 08:44
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('myApp', '0016_usergroup_buyer'),
]
operations = [
migrations.CreateModel(
name='Chat',
fields=[
('id', models.CharField(max_length=31, primary_key=True, serialize=False)),
('chatinfo', models.CharField(max_length=20000)),
('shopid', models.CharField(max_length=30)),
('user1', models.CharField(max_length=50)),
('user2', models.CharField(max_length=50)),
('name1', models.CharField(max_length=50)),
('name2', models.CharField(max_length=50)),
],
),
]
| [
"django.db.models.CharField"
] | [((323, 389), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(31)', 'primary_key': '(True)', 'serialize': '(False)'}), '(max_length=31, primary_key=True, serialize=False)\n', (339, 389), False, 'from django.db import migrations, models\n'), ((421, 455), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(20000)'}), '(max_length=20000)\n', (437, 455), False, 'from django.db import migrations, models\n'), ((485, 516), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(30)'}), '(max_length=30)\n', (501, 516), False, 'from django.db import migrations, models\n'), ((545, 576), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(50)'}), '(max_length=50)\n', (561, 576), False, 'from django.db import migrations, models\n'), ((605, 636), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(50)'}), '(max_length=50)\n', (621, 636), False, 'from django.db import migrations, models\n'), ((665, 696), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(50)'}), '(max_length=50)\n', (681, 696), False, 'from django.db import migrations, models\n'), ((725, 756), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(50)'}), '(max_length=50)\n', (741, 756), False, 'from django.db import migrations, models\n')] |
# Copyright 2012 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Controllers for miscellaneous services."""
__author__ = '<NAME>'
import base64
import json
from core.controllers import base
class FileReadHandler(base.BaseHandler):
"""Returns a base64-encoded ascii string with uploaded file's content."""
def post(self):
raw_file_content = self.request.get('file')
encoded_content = base64.b64encode(raw_file_content)
self.response.headers['Content-Type'] = 'application/json'
response = {
'base64_file_content': encoded_content,
}
self.response.out.write(json.dumps(response))
| [
"base64.b64encode",
"json.dumps"
] | [((950, 984), 'base64.b64encode', 'base64.b64encode', (['raw_file_content'], {}), '(raw_file_content)\n', (966, 984), False, 'import base64\n'), ((1168, 1188), 'json.dumps', 'json.dumps', (['response'], {}), '(response)\n', (1178, 1188), False, 'import json\n')] |
# -*- coding: utf-8 -*-
from __future__ import division, print_function, absolute_import
import os
import tensorflow as tf
'''
gluoncv backbone + multi_gpu
'''
# ------------------------------------------------
VERSION = 'Cascade_FPN_Res50_COCO_1x_20190421_v3'
NET_NAME = 'resnet50_v1d'
ADD_BOX_IN_TENSORBOARD = True
# ---------------------------------------- System_config
ROOT_PATH = os.path.abspath('../')
print(20*"++--")
print(ROOT_PATH)
GPU_GROUP = "0,1,2,3,4,5,6,7"
NUM_GPU = len(GPU_GROUP.strip().split(','))
SHOW_TRAIN_INFO_INTE = 20
SMRY_ITER = 200
SAVE_WEIGHTS_INTE = 80000
SUMMARY_PATH = ROOT_PATH + '/output/summary'
TEST_SAVE_PATH = ROOT_PATH + '/tools/test_result'
INFERENCE_IMAGE_PATH = ROOT_PATH + '/tools/inference_image'
INFERENCE_SAVE_PATH = ROOT_PATH + '/tools/inference_results'
if NET_NAME.startswith("resnet"):
weights_name = NET_NAME
elif NET_NAME.startswith("MobilenetV2"):
weights_name = "mobilenet/mobilenet_v2_1.0_224"
else:
raise NotImplementedError
PRETRAINED_CKPT = ROOT_PATH + '/data/pretrained_weights/' + weights_name + '.ckpt'
TRAINED_CKPT = os.path.join(ROOT_PATH, 'output/trained_weights')
EVALUATE_DIR = ROOT_PATH + '/output/evaluate_result_pickle/'
# ------------------------------------------ Train config
RESTORE_FROM_RPN = False
IS_FILTER_OUTSIDE_BOXES = False
FIXED_BLOCKS = 0 # allow 0~3
FREEZE_BLOCKS = [True, False, False, False, False] # for gluoncv backbone
USE_07_METRIC = True
CUDA9 = True
EVAL_THRESHOLD = 0.5
RPN_LOCATION_LOSS_WEIGHT = 1.
RPN_CLASSIFICATION_LOSS_WEIGHT = 1.0
FAST_RCNN_LOCATION_LOSS_WEIGHT = 1.0
FAST_RCNN_CLASSIFICATION_LOSS_WEIGHT = 1.0
RPN_SIGMA = 3.0
FASTRCNN_SIGMA = 1.0
MUTILPY_BIAS_GRADIENT = None # 2.0 # if None, will not multipy
GRADIENT_CLIPPING_BY_NORM = None # 10.0 if None, will not clip
EPSILON = 1e-5
MOMENTUM = 0.9
BATCH_SIZE = 1
WARM_SETP = int(0.25 * SAVE_WEIGHTS_INTE)
LR = 5e-4 * 2 * 1.25 * NUM_GPU * BATCH_SIZE
DECAY_STEP = [11*SAVE_WEIGHTS_INTE, 16*SAVE_WEIGHTS_INTE, 20*SAVE_WEIGHTS_INTE] # 50000, 70000
MAX_ITERATION = 20*SAVE_WEIGHTS_INTE
# -------------------------------------------- Data_preprocess_config
DATASET_NAME = 'coco' # 'pascal', 'coco'
PIXEL_MEAN = [123.68, 116.779, 103.939] # R, G, B. In tf, channel is RGB. In openCV, channel is BGR
PIXEL_MEAN_ = [0.485, 0.456, 0.406]
PIXEL_STD = [0.229, 0.224, 0.225] # R, G, B. In tf, channel is RGB. In openCV, channel is BGR
IMG_SHORT_SIDE_LEN = 800
IMG_MAX_LENGTH = 1333
CLASS_NUM = 80
# --------------------------------------------- Network_config
INITIALIZER = tf.random_normal_initializer(mean=0.0, stddev=0.01)
BBOX_INITIALIZER = tf.random_normal_initializer(mean=0.0, stddev=0.001)
WEIGHT_DECAY = 0.00004 if NET_NAME.startswith('Mobilenet') else 0.0001
IS_ASSIGN = True
# ---------------------------------------------Anchor config
USE_CENTER_OFFSET = True
LEVLES = ['P2', 'P3', 'P4', 'P5', 'P6']
BASE_ANCHOR_SIZE_LIST = [32, 64, 128, 256, 512]
ANCHOR_STRIDE_LIST = [4, 8, 16, 32, 64]
ANCHOR_SCALES = [1.0]
ANCHOR_RATIOS = [0.5, 1., 2.0]
ROI_SCALE_FACTORS = [[10., 10., 5.0, 5.0], [20., 20., 10.0, 10.0], [40., 40., 20.0, 20.0]]
ANCHOR_SCALE_FACTORS = [10., 10., 5.0, 5.0]
# --------------------------------------------FPN config
SHARE_HEADS = True
KERNEL_SIZE = 3
RPN_IOU_POSITIVE_THRESHOLD = 0.7
RPN_IOU_NEGATIVE_THRESHOLD = 0.3
TRAIN_RPN_CLOOBER_POSITIVES = False
RPN_MINIBATCH_SIZE = 256
RPN_POSITIVE_RATE = 0.5
RPN_NMS_IOU_THRESHOLD = 0.7
RPN_TOP_K_NMS_TRAIN = 12000
RPN_MAXIMUM_PROPOSAL_TARIN = 2000
RPN_TOP_K_NMS_TEST = 6000
RPN_MAXIMUM_PROPOSAL_TEST = 1000
# -------------------------------------------Fast-RCNN config
ROI_SIZE = 14
ROI_POOL_KERNEL_SIZE = 2
USE_DROPOUT = False
KEEP_PROB = 1.0
SHOW_SCORE_THRSHOLD = 0.6 # only show in tensorboard
FAST_RCNN_NMS_IOU_THRESHOLD = 0.5 # 0.6
FAST_RCNN_NMS_MAX_BOXES_PER_CLASS = 100
FAST_RCNN_IOU_POSITIVE_THRESHOLD = 0.5
FAST_RCNN_IOU_NEGATIVE_THRESHOLD = 0.0 # 0.1 < IOU < 0.5 is negative
FAST_RCNN_MINIBATCH_SIZE = 512 # if is -1, that is train with OHEM
FAST_RCNN_POSITIVE_RATE = 0.25
ADD_GTBOXES_TO_TRAIN = False
| [
"os.path.abspath",
"os.path.join",
"tensorflow.random_normal_initializer"
] | [((389, 411), 'os.path.abspath', 'os.path.abspath', (['"""../"""'], {}), "('../')\n", (404, 411), False, 'import os\n'), ((1096, 1145), 'os.path.join', 'os.path.join', (['ROOT_PATH', '"""output/trained_weights"""'], {}), "(ROOT_PATH, 'output/trained_weights')\n", (1108, 1145), False, 'import os\n'), ((2554, 2605), 'tensorflow.random_normal_initializer', 'tf.random_normal_initializer', ([], {'mean': '(0.0)', 'stddev': '(0.01)'}), '(mean=0.0, stddev=0.01)\n', (2582, 2605), True, 'import tensorflow as tf\n'), ((2625, 2677), 'tensorflow.random_normal_initializer', 'tf.random_normal_initializer', ([], {'mean': '(0.0)', 'stddev': '(0.001)'}), '(mean=0.0, stddev=0.001)\n', (2653, 2677), True, 'import tensorflow as tf\n')] |
import numpy as np
from stumpff import C, S
from CelestialBody import BODIES
from numerical import newton, laguerre
from lagrange import calc_f, calc_fd, calc_g, calc_gd
def kepler_chi(chi, alpha, r0, vr0, mu, dt):
''' Kepler's Equation of the universal anomaly, modified
for use in numerical solvers. '''
z = alpha*chi**2
return (r0*vr0/np.sqrt(mu))*chi**2*C(z) + \
(1 - alpha*r0)*chi**3*S(z) + \
r0*chi - np.sqrt(mu)*dt
def dkepler_dchi(chi, alpha, r0, vr0, mu, dt):
''' Derivative of Kepler's Equation of the universal anomaly,
modified for use in numerical solvers. '''
z = alpha*chi**2
return (r0*vr0/np.sqrt(mu))*chi*(1 - alpha*chi**2*S(z)) + \
(1 - alpha*r0)*chi**2*C(z) + r0
def d2kepler_dchi2(chi, alpha, r0, vr0, mu, dt):
''' Second derivative of Kepler's Equation of the universal
anomaly, modified for use in numerical solvers. '''
z = alpha*chi**2
S_ = S(z)
return (r0*vr0/np.sqrt(mu))*(1 - 3*z*S_ + z*(C(z) - 3*S_)) + \
chi*(1 - z*S_)*(1 - alpha*r0)
def solve_kepler_chi(r_0, v_0, dt, body=BODIES['Earth'], method='laguerre', tol=1e-7, max_iters=100):
''' Solve Kepler's Equation of the universal anomaly chi using the specified
numerical method. Applies Algorithm 3.4 from Orbital Mechanics for Engineering
Students, 4 ed, Curtis.
:param r_0: `iterable` (km) initial position 3-vector
:param v_0: `iterable` (km/s) initial velocity 3-vector
:param dt: `float` (s) time after initial state to solve for r, v as 3-vectors
:param body: `CelestialBody` (--) the celestial body to use for orbital parameters
:param method: `str` (--) which numerical method to use to solve Kepler's Equation
:param tol: `float` (--) decimal tolerance for numerical method (default 1e-7 is IEEE 745 single precision)
:param max_iters: `int` (--) maximum number of iterations in numerical method before breaking
:return: (km) final position 3-vector, (km/s) final velocity 3-vector
'''
VALID_METHODS = ('laguerre', 'newton')
mu = body.mu # (km**3/s**2) gravitational parameter of the specified primary body
r0 = np.linalg.norm(r_0) # (km) initial position magnitude
v0 = np.linalg.norm(v_0) # (km/s) initial velocity magnitude
vr0 = np.dot(v_0, r_0)/r0 # (km/s) initial radial velocity magnitude
alpha = 2/r0 - v0**2/mu # (1/km) inverse of semi-major axis
chi0 = np.sqrt(mu)*np.abs(alpha)*dt
if method not in VALID_METHODS:
print(f'Method \'{method}\' is not valid, must be one of {VALID_METHODS}.\nDefaulting to laguerre method.')
chi, _, _ = laguerre(chi0, kepler_chi, dkepler_dchi, d2kepler_dchi2, alpha, r0, vr0, mu, dt)
elif method == 'newton':
chi, _, _ = newton(chi0, kepler_chi, dkepler_dchi, alpha, r0, vr0, mu, dt)
else: # method == 'laguerre'
chi, _, _ = laguerre(chi0, kepler_chi, dkepler_dchi, d2kepler_dchi2, alpha, r0, vr0, mu, dt)
f = calc_f(chi, r0, alpha)
g = calc_g(dt, mu, chi, alpha)
r_1 = f*r_0 + g*v_0
r1 = np.linalg.norm(r_1)
fd = calc_fd(mu, r1, r0, alpha, chi)
gd = calc_gd(chi, r1, alpha)
v_1 = fd*r_0 + gd*v_0
return r_1, v_1
def solve_kepler_E(e, Me, tol=1e-7, max_iters=100):
''' Solve Kepler's Equation in the form containing Eccentric Anomaly (E),
eccentricity (e), and Mean Anomaly of Ellipse (Me). Uses Algorithm 3.1 from Orbital
Mechanics for Engineering Students, 4 ed, Curtis. '''
# TODO: have this function make use of one of the numerical methods in numerical.py
def f(E, e, Me):
return E - e*np.sin(E) - Me
def fp(E, e):
return 1 - e*np.cos(E)
E = Me + e/2 if Me < np.pi else Me - e/2
ratio = f(E, e, Me)/fp(E, e)
iters = 0
while abs(ratio) > tol and iters < max_iters:
E -= ratio
ratio = f(E, e, Me)/fp(E, e)
iters += 1
E -= ratio
converged = np.abs(ratio) <= tol
return E, iters, converged
def test():
''' Test the functionality of solve_kepler_chi
and solve_kepler_laguerre using Problem 3.20 from
Orbital Mechanics for Engineering Students, 4 ed, Curtis.
'''
# given starting information
Earth = BODIES['Earth'] # `CelestialBody` (--) Earth and all the Earth things
r_0 = np.array([20000, -105000, -19000]) # (km) initial position vector
v_0 = np.array([0.9, -3.4, -1.5]) # (km/s) initial velocity vector
dt = 2*60*60 # (s) time of interest after initial time
# given correct answer from textbook
correct_r_1 = np.array([26338, -128750, -29656]) # (km) final position vector
correct_v_1 = np.array([0.86280, -3.2116, -1.4613]) # (km/s) final velocity vector
# solve using above methods
r_n, v_n = solve_kepler_chi(r_0, v_0, dt, Earth, method='newton')
r_l, v_l = solve_kepler_chi(r_0, v_0, dt, Earth, method='laguerre')
# check correctness
# tolerance based on significant figures of given answers
newton_valid = np.allclose(r_n, correct_r_1, atol=1) and np.allclose(v_n, correct_v_1, atol=1e-4)
laguerre_valid = np.allclose(r_l, correct_r_1, atol=1) and np.allclose(v_l, correct_v_1, atol=1e-4)
return all([newton_valid, laguerre_valid])
if __name__ == '__main__':
print(test())
| [
"numpy.abs",
"numpy.allclose",
"numpy.sqrt",
"lagrange.calc_f",
"numerical.laguerre",
"lagrange.calc_g",
"numpy.array",
"numpy.dot",
"numerical.newton",
"lagrange.calc_fd",
"lagrange.calc_gd",
"numpy.cos",
"numpy.linalg.norm",
"numpy.sin",
"stumpff.S",
"stumpff.C"
] | [((953, 957), 'stumpff.S', 'S', (['z'], {}), '(z)\n', (954, 957), False, 'from stumpff import C, S\n'), ((2171, 2190), 'numpy.linalg.norm', 'np.linalg.norm', (['r_0'], {}), '(r_0)\n', (2185, 2190), True, 'import numpy as np\n'), ((2235, 2254), 'numpy.linalg.norm', 'np.linalg.norm', (['v_0'], {}), '(v_0)\n', (2249, 2254), True, 'import numpy as np\n'), ((2982, 3004), 'lagrange.calc_f', 'calc_f', (['chi', 'r0', 'alpha'], {}), '(chi, r0, alpha)\n', (2988, 3004), False, 'from lagrange import calc_f, calc_fd, calc_g, calc_gd\n'), ((3013, 3039), 'lagrange.calc_g', 'calc_g', (['dt', 'mu', 'chi', 'alpha'], {}), '(dt, mu, chi, alpha)\n', (3019, 3039), False, 'from lagrange import calc_f, calc_fd, calc_g, calc_gd\n'), ((3073, 3092), 'numpy.linalg.norm', 'np.linalg.norm', (['r_1'], {}), '(r_1)\n', (3087, 3092), True, 'import numpy as np\n'), ((3103, 3134), 'lagrange.calc_fd', 'calc_fd', (['mu', 'r1', 'r0', 'alpha', 'chi'], {}), '(mu, r1, r0, alpha, chi)\n', (3110, 3134), False, 'from lagrange import calc_f, calc_fd, calc_g, calc_gd\n'), ((3144, 3167), 'lagrange.calc_gd', 'calc_gd', (['chi', 'r1', 'alpha'], {}), '(chi, r1, alpha)\n', (3151, 3167), False, 'from lagrange import calc_f, calc_fd, calc_g, calc_gd\n'), ((4309, 4343), 'numpy.array', 'np.array', (['[20000, -105000, -19000]'], {}), '([20000, -105000, -19000])\n', (4317, 4343), True, 'import numpy as np\n'), ((4386, 4413), 'numpy.array', 'np.array', (['[0.9, -3.4, -1.5]'], {}), '([0.9, -3.4, -1.5])\n', (4394, 4413), True, 'import numpy as np\n'), ((4568, 4602), 'numpy.array', 'np.array', (['[26338, -128750, -29656]'], {}), '([26338, -128750, -29656])\n', (4576, 4602), True, 'import numpy as np\n'), ((4651, 4687), 'numpy.array', 'np.array', (['[0.8628, -3.2116, -1.4613]'], {}), '([0.8628, -3.2116, -1.4613])\n', (4659, 4687), True, 'import numpy as np\n'), ((2302, 2318), 'numpy.dot', 'np.dot', (['v_0', 'r_0'], {}), '(v_0, r_0)\n', (2308, 2318), True, 'import numpy as np\n'), ((2645, 2730), 'numerical.laguerre', 'laguerre', (['chi0', 'kepler_chi', 'dkepler_dchi', 'd2kepler_dchi2', 'alpha', 'r0', 'vr0', 'mu', 'dt'], {}), '(chi0, kepler_chi, dkepler_dchi, d2kepler_dchi2, alpha, r0, vr0, mu, dt\n )\n', (2653, 2730), False, 'from numerical import newton, laguerre\n'), ((3940, 3953), 'numpy.abs', 'np.abs', (['ratio'], {}), '(ratio)\n', (3946, 3953), True, 'import numpy as np\n'), ((5002, 5039), 'numpy.allclose', 'np.allclose', (['r_n', 'correct_r_1'], {'atol': '(1)'}), '(r_n, correct_r_1, atol=1)\n', (5013, 5039), True, 'import numpy as np\n'), ((5044, 5086), 'numpy.allclose', 'np.allclose', (['v_n', 'correct_v_1'], {'atol': '(0.0001)'}), '(v_n, correct_v_1, atol=0.0001)\n', (5055, 5086), True, 'import numpy as np\n'), ((5106, 5143), 'numpy.allclose', 'np.allclose', (['r_l', 'correct_r_1'], {'atol': '(1)'}), '(r_l, correct_r_1, atol=1)\n', (5117, 5143), True, 'import numpy as np\n'), ((5148, 5190), 'numpy.allclose', 'np.allclose', (['v_l', 'correct_v_1'], {'atol': '(0.0001)'}), '(v_l, correct_v_1, atol=0.0001)\n', (5159, 5190), True, 'import numpy as np\n'), ((447, 458), 'numpy.sqrt', 'np.sqrt', (['mu'], {}), '(mu)\n', (454, 458), True, 'import numpy as np\n'), ((2443, 2454), 'numpy.sqrt', 'np.sqrt', (['mu'], {}), '(mu)\n', (2450, 2454), True, 'import numpy as np\n'), ((2455, 2468), 'numpy.abs', 'np.abs', (['alpha'], {}), '(alpha)\n', (2461, 2468), True, 'import numpy as np\n'), ((2775, 2837), 'numerical.newton', 'newton', (['chi0', 'kepler_chi', 'dkepler_dchi', 'alpha', 'r0', 'vr0', 'mu', 'dt'], {}), '(chi0, kepler_chi, dkepler_dchi, alpha, r0, vr0, mu, dt)\n', (2781, 2837), False, 'from numerical import newton, laguerre\n'), ((2892, 2977), 'numerical.laguerre', 'laguerre', (['chi0', 'kepler_chi', 'dkepler_dchi', 'd2kepler_dchi2', 'alpha', 'r0', 'vr0', 'mu', 'dt'], {}), '(chi0, kepler_chi, dkepler_dchi, d2kepler_dchi2, alpha, r0, vr0, mu, dt\n )\n', (2900, 2977), False, 'from numerical import newton, laguerre\n'), ((742, 746), 'stumpff.C', 'C', (['z'], {}), '(z)\n', (743, 746), False, 'from stumpff import C, S\n'), ((977, 988), 'numpy.sqrt', 'np.sqrt', (['mu'], {}), '(mu)\n', (984, 988), True, 'import numpy as np\n'), ((3680, 3689), 'numpy.cos', 'np.cos', (['E'], {}), '(E)\n', (3686, 3689), True, 'import numpy as np\n'), ((376, 380), 'stumpff.C', 'C', (['z'], {}), '(z)\n', (377, 380), False, 'from stumpff import C, S\n'), ((418, 422), 'stumpff.S', 'S', (['z'], {}), '(z)\n', (419, 422), False, 'from stumpff import C, S\n'), ((3625, 3634), 'numpy.sin', 'np.sin', (['E'], {}), '(E)\n', (3631, 3634), True, 'import numpy as np\n'), ((664, 675), 'numpy.sqrt', 'np.sqrt', (['mu'], {}), '(mu)\n', (671, 675), True, 'import numpy as np\n'), ((699, 703), 'stumpff.S', 'S', (['z'], {}), '(z)\n', (700, 703), False, 'from stumpff import C, S\n'), ((1007, 1011), 'stumpff.C', 'C', (['z'], {}), '(z)\n', (1008, 1011), False, 'from stumpff import C, S\n'), ((356, 367), 'numpy.sqrt', 'np.sqrt', (['mu'], {}), '(mu)\n', (363, 367), True, 'import numpy as np\n')] |
# Copyright 2021 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Test Subtokenizer and string helper methods."""
import collections
import tempfile
import tensorflow as tf
from official.nlp.transformer.utils import tokenizer
class SubtokenizerTest(tf.test.TestCase):
def _init_subtokenizer(self, vocab_list):
temp_file = tempfile.NamedTemporaryFile(delete=False)
with tf.io.gfile.GFile(temp_file.name, "w") as w:
for subtoken in vocab_list:
w.write("'%s'" % subtoken)
w.write("\n")
return tokenizer.Subtokenizer(temp_file.name, reserved_tokens=[])
def test_encode(self):
vocab_list = ["123_", "test", "ing_"]
subtokenizer = self._init_subtokenizer(vocab_list)
s = "testing 123"
encoded_list = subtokenizer.encode(s)
self.assertEqual([1, 2, 0], encoded_list)
def test_decode(self):
vocab_list = ["123_", "test", "ing_"]
subtokenizer = self._init_subtokenizer(vocab_list)
encoded_list = [1, 2, 0] # testing 123
decoded_str = subtokenizer.decode(encoded_list)
self.assertEqual("testing 123", decoded_str)
def test_subtoken_ids_to_tokens(self):
vocab_list = ["123_", "test", "ing_"]
subtokenizer = self._init_subtokenizer(vocab_list)
encoded_list = [1, 2, 0] # testing 123
token_list = subtokenizer._subtoken_ids_to_tokens(encoded_list)
self.assertEqual([u"testing", u"123"], token_list)
class StringHelperTest(tf.test.TestCase):
def test_split_string_to_tokens(self):
text = "test? testing 123."
tokens = tokenizer._split_string_to_tokens(text,
tokenizer._ALPHANUMERIC_CHAR_SET)
self.assertEqual(["test", "? ", "testing", "123", "."], tokens)
def test_join_tokens_to_string(self):
tokens = ["test", "? ", "testing", "123", "."]
s = tokenizer._join_tokens_to_string(tokens,
tokenizer._ALPHANUMERIC_CHAR_SET)
self.assertEqual("test? testing 123.", s)
def test_escape_token(self):
token = u"abc_\\4"
alphabet = set("abc_\\u;")
escaped_token = tokenizer._escape_token(token, alphabet)
self.assertEqual("abc\\u\\\\\\52;_", escaped_token)
def test_unescape_token(self):
escaped_token = u"Underline: \\u, Backslash: \\\\, Unicode: \\52;"
unescaped_token = tokenizer._unescape_token(escaped_token)
self.assertEqual("Underline: _, Backslash: \\, Unicode: 4", unescaped_token)
def test_list_to_index_dict(self):
lst = ["test", "strings"]
d = tokenizer._list_to_index_dict(lst)
self.assertDictEqual({"test": 0, "strings": 1}, d)
def test_split_token_to_subtokens(self):
token = "abc"
subtoken_dict = {"a": 0, "b": 1, "c": 2, "ab": 3}
max_subtoken_length = 2
subtokens = tokenizer._split_token_to_subtokens(token, subtoken_dict,
max_subtoken_length)
self.assertEqual(["ab", "c"], subtokens)
def test_generate_alphabet_dict(self):
s = ["testing", "123"]
reserved_tokens = ["???"]
alphabet = tokenizer._generate_alphabet_dict(s, reserved_tokens)
self.assertIn("?", alphabet)
self.assertIn("t", alphabet)
self.assertIn("e", alphabet)
self.assertIn("s", alphabet)
self.assertIn("i", alphabet)
self.assertIn("n", alphabet)
self.assertIn("g", alphabet)
self.assertIn("1", alphabet)
self.assertIn("2", alphabet)
self.assertIn("3", alphabet)
def test_count_and_gen_subtokens(self):
token_counts = {"abc": 5}
alphabet = set("abc_")
subtoken_dict = {"a": 0, "b": 1, "c": 2, "_": 3}
max_subtoken_length = 2
subtoken_counts = tokenizer._count_and_gen_subtokens(
token_counts, alphabet, subtoken_dict, max_subtoken_length)
self.assertIsInstance(subtoken_counts, collections.defaultdict)
self.assertDictEqual(
{
"a": 5,
"b": 5,
"c": 5,
"_": 5,
"ab": 5,
"bc": 5,
"c_": 5,
"abc": 5,
"bc_": 5,
"abc_": 5
}, subtoken_counts)
def test_filter_and_bucket_subtokens(self):
subtoken_counts = collections.defaultdict(int, {
"a": 2,
"b": 4,
"c": 1,
"ab": 6,
"ac": 3,
"abbc": 5
})
min_count = 3
subtoken_buckets = tokenizer._filter_and_bucket_subtokens(
subtoken_counts, min_count)
self.assertEqual(len(subtoken_buckets[0]), 0)
self.assertEqual(set("b"), subtoken_buckets[1])
self.assertEqual(set(["ab", "ac"]), subtoken_buckets[2])
self.assertEqual(len(subtoken_buckets[3]), 0)
self.assertEqual(set(["abbc"]), subtoken_buckets[4])
def test_gen_new_subtoken_list(self):
subtoken_counts = collections.defaultdict(int, {
"translate": 10,
"t": 40,
"tr": 16,
"tra": 12
})
min_count = 5
alphabet = set("translate")
reserved_tokens = ["reserved", "tokens"]
subtoken_list, max_token_length = tokenizer._gen_new_subtoken_list(
subtoken_counts, min_count, alphabet, reserved_tokens)
# Check that "tra" isn"t in the list (its count should be decremented to 2,
# so it should not be added to the canddiate list).
self.assertNotIn("tra", subtoken_list)
self.assertIn("tr", subtoken_list)
self.assertIn("t", subtoken_list)
self.assertEqual(len("translate"), max_token_length)
def test_generate_subtokens(self):
token_counts = {"ab": 1, "bc": 3, "abc": 5}
alphabet = set("abc_")
min_count = 100
num_iterations = 1
reserved_tokens = ["reserved", "tokens"]
vocab_list = tokenizer._generate_subtokens(token_counts, alphabet,
min_count, num_iterations,
reserved_tokens)
# Check that reserved tokens are at the front of the list
self.assertEqual(vocab_list[:2], reserved_tokens)
# Check that each character in alphabet is in the vocab list
for c in alphabet:
self.assertIn(c, vocab_list)
if __name__ == "__main__":
tf.test.main()
| [
"official.nlp.transformer.utils.tokenizer._escape_token",
"official.nlp.transformer.utils.tokenizer._unescape_token",
"tensorflow.io.gfile.GFile",
"official.nlp.transformer.utils.tokenizer._join_tokens_to_string",
"official.nlp.transformer.utils.tokenizer._list_to_index_dict",
"official.nlp.transformer.utils.tokenizer._count_and_gen_subtokens",
"official.nlp.transformer.utils.tokenizer._generate_subtokens",
"tensorflow.test.main",
"official.nlp.transformer.utils.tokenizer._filter_and_bucket_subtokens",
"official.nlp.transformer.utils.tokenizer._split_token_to_subtokens",
"official.nlp.transformer.utils.tokenizer.Subtokenizer",
"collections.defaultdict",
"tempfile.NamedTemporaryFile",
"official.nlp.transformer.utils.tokenizer._gen_new_subtoken_list",
"official.nlp.transformer.utils.tokenizer._generate_alphabet_dict",
"official.nlp.transformer.utils.tokenizer._split_string_to_tokens"
] | [((6843, 6857), 'tensorflow.test.main', 'tf.test.main', ([], {}), '()\n', (6855, 6857), True, 'import tensorflow as tf\n'), ((907, 948), 'tempfile.NamedTemporaryFile', 'tempfile.NamedTemporaryFile', ([], {'delete': '(False)'}), '(delete=False)\n', (934, 948), False, 'import tempfile\n'), ((1110, 1168), 'official.nlp.transformer.utils.tokenizer.Subtokenizer', 'tokenizer.Subtokenizer', (['temp_file.name'], {'reserved_tokens': '[]'}), '(temp_file.name, reserved_tokens=[])\n', (1132, 1168), False, 'from official.nlp.transformer.utils import tokenizer\n'), ((2137, 2210), 'official.nlp.transformer.utils.tokenizer._split_string_to_tokens', 'tokenizer._split_string_to_tokens', (['text', 'tokenizer._ALPHANUMERIC_CHAR_SET'], {}), '(text, tokenizer._ALPHANUMERIC_CHAR_SET)\n', (2170, 2210), False, 'from official.nlp.transformer.utils import tokenizer\n'), ((2434, 2508), 'official.nlp.transformer.utils.tokenizer._join_tokens_to_string', 'tokenizer._join_tokens_to_string', (['tokens', 'tokenizer._ALPHANUMERIC_CHAR_SET'], {}), '(tokens, tokenizer._ALPHANUMERIC_CHAR_SET)\n', (2466, 2508), False, 'from official.nlp.transformer.utils import tokenizer\n'), ((2711, 2751), 'official.nlp.transformer.utils.tokenizer._escape_token', 'tokenizer._escape_token', (['token', 'alphabet'], {}), '(token, alphabet)\n', (2734, 2751), False, 'from official.nlp.transformer.utils import tokenizer\n'), ((2942, 2982), 'official.nlp.transformer.utils.tokenizer._unescape_token', 'tokenizer._unescape_token', (['escaped_token'], {}), '(escaped_token)\n', (2967, 2982), False, 'from official.nlp.transformer.utils import tokenizer\n'), ((3147, 3181), 'official.nlp.transformer.utils.tokenizer._list_to_index_dict', 'tokenizer._list_to_index_dict', (['lst'], {}), '(lst)\n', (3176, 3181), False, 'from official.nlp.transformer.utils import tokenizer\n'), ((3406, 3484), 'official.nlp.transformer.utils.tokenizer._split_token_to_subtokens', 'tokenizer._split_token_to_subtokens', (['token', 'subtoken_dict', 'max_subtoken_length'], {}), '(token, subtoken_dict, max_subtoken_length)\n', (3441, 3484), False, 'from official.nlp.transformer.utils import tokenizer\n'), ((3705, 3758), 'official.nlp.transformer.utils.tokenizer._generate_alphabet_dict', 'tokenizer._generate_alphabet_dict', (['s', 'reserved_tokens'], {}), '(s, reserved_tokens)\n', (3738, 3758), False, 'from official.nlp.transformer.utils import tokenizer\n'), ((4311, 4409), 'official.nlp.transformer.utils.tokenizer._count_and_gen_subtokens', 'tokenizer._count_and_gen_subtokens', (['token_counts', 'alphabet', 'subtoken_dict', 'max_subtoken_length'], {}), '(token_counts, alphabet, subtoken_dict,\n max_subtoken_length)\n', (4345, 4409), False, 'from official.nlp.transformer.utils import tokenizer\n'), ((4845, 4932), 'collections.defaultdict', 'collections.defaultdict', (['int', "{'a': 2, 'b': 4, 'c': 1, 'ab': 6, 'ac': 3, 'abbc': 5}"], {}), "(int, {'a': 2, 'b': 4, 'c': 1, 'ab': 6, 'ac': 3,\n 'abbc': 5})\n", (4868, 4932), False, 'import collections\n'), ((5035, 5101), 'official.nlp.transformer.utils.tokenizer._filter_and_bucket_subtokens', 'tokenizer._filter_and_bucket_subtokens', (['subtoken_counts', 'min_count'], {}), '(subtoken_counts, min_count)\n', (5073, 5101), False, 'from official.nlp.transformer.utils import tokenizer\n'), ((5455, 5532), 'collections.defaultdict', 'collections.defaultdict', (['int', "{'translate': 10, 't': 40, 'tr': 16, 'tra': 12}"], {}), "(int, {'translate': 10, 't': 40, 'tr': 16, 'tra': 12})\n", (5478, 5532), False, 'import collections\n'), ((5715, 5806), 'official.nlp.transformer.utils.tokenizer._gen_new_subtoken_list', 'tokenizer._gen_new_subtoken_list', (['subtoken_counts', 'min_count', 'alphabet', 'reserved_tokens'], {}), '(subtoken_counts, min_count, alphabet,\n reserved_tokens)\n', (5747, 5806), False, 'from official.nlp.transformer.utils import tokenizer\n'), ((6366, 6467), 'official.nlp.transformer.utils.tokenizer._generate_subtokens', 'tokenizer._generate_subtokens', (['token_counts', 'alphabet', 'min_count', 'num_iterations', 'reserved_tokens'], {}), '(token_counts, alphabet, min_count,\n num_iterations, reserved_tokens)\n', (6395, 6467), False, 'from official.nlp.transformer.utils import tokenizer\n'), ((959, 997), 'tensorflow.io.gfile.GFile', 'tf.io.gfile.GFile', (['temp_file.name', '"""w"""'], {}), "(temp_file.name, 'w')\n", (976, 997), True, 'import tensorflow as tf\n')] |
# -*- coding: utf-8 -*-
# Thanks to @skelsec for his awesome tool Pypykatz
# Checks his project here: https://github.com/skelsec/pypykatz
import codecs
import traceback
from lazagne.config.module_info import ModuleInfo
from lazagne.config.constant import constant
from pypykatz.pypykatz import pypykatz
class Pypykatz(ModuleInfo):
"""
Pypykatz dumps all secrets from the lsass.exe memory
It does not work if:
- LSASS is running as a protected process
- A security product blocks this access
"""
def __init__(self):
ModuleInfo.__init__(self, 'pypykatz', 'windows', system_module=True)
def run(self):
mimi = None
try:
mimi = pypykatz.go_live()
except Exception:
self.debug(traceback.format_exc())
if mimi:
results = {}
logon_sessions = mimi.to_dict().get('logon_sessions', [])
for logon_session in logon_sessions:
# Right now kerberos_creds, dpapi_creds results are not used
user = logon_sessions[logon_session]
# Get cleartext password
for i in ['credman_creds', 'ssp_creds', 'livessp_creds', 'tspkg_creds', 'wdigest_creds']:
for data in user.get(i, []):
if all((data['username'], data['password'])):
login = data['username']
if login not in results:
results[login] = {}
results[login]['Type'] = i
results[login]['Domain'] = data.get('domainname', 'N/A')
results[login]['Password'] = data['password']
# msv_creds to get sha1 user hash
for data in user.get('msv_creds', []):
if data['username']:
login = data['username']
else:
login = user['username']
if login not in results:
results[login] = {}
if data['SHAHash']:
results[login]['Shahash'] = codecs.encode(data['SHAHash'], 'hex')
if data['LMHash']:
results[login]['Lmhash'] = codecs.encode(data['LMHash'], 'hex')
if data['NThash']:
results[login]['Nthash'] = codecs.encode(data['NThash'], 'hex')
constant.pypykatz_result = results
pwd_found = []
for user in results:
results[user]['Login'] = user
pwd_found.append(results[user])
return pwd_found
| [
"traceback.format_exc",
"codecs.encode",
"lazagne.config.module_info.ModuleInfo.__init__",
"pypykatz.pypykatz.pypykatz.go_live"
] | [((579, 647), 'lazagne.config.module_info.ModuleInfo.__init__', 'ModuleInfo.__init__', (['self', '"""pypykatz"""', '"""windows"""'], {'system_module': '(True)'}), "(self, 'pypykatz', 'windows', system_module=True)\n", (598, 647), False, 'from lazagne.config.module_info import ModuleInfo\n'), ((725, 743), 'pypykatz.pypykatz.pypykatz.go_live', 'pypykatz.go_live', ([], {}), '()\n', (741, 743), False, 'from pypykatz.pypykatz import pypykatz\n'), ((795, 817), 'traceback.format_exc', 'traceback.format_exc', ([], {}), '()\n', (815, 817), False, 'import traceback\n'), ((2239, 2276), 'codecs.encode', 'codecs.encode', (["data['SHAHash']", '"""hex"""'], {}), "(data['SHAHash'], 'hex')\n", (2252, 2276), False, 'import codecs\n'), ((2369, 2405), 'codecs.encode', 'codecs.encode', (["data['LMHash']", '"""hex"""'], {}), "(data['LMHash'], 'hex')\n", (2382, 2405), False, 'import codecs\n'), ((2498, 2534), 'codecs.encode', 'codecs.encode', (["data['NThash']", '"""hex"""'], {}), "(data['NThash'], 'hex')\n", (2511, 2534), False, 'import codecs\n')] |
from django.contrib.auth.models import Permission, User
from django.db import models
class Album(models.Model):
user = models.ForeignKey(User, default=1,on_delete=models.CASCADE)
artist = models.CharField(max_length=250)
album_title = models.CharField(max_length=500)
genre = models.CharField(max_length=100)
album_logo = models.FileField(default="avatar.jpg")
album_visibility = models.CharField(max_length=100, default="private")
is_favorite = models.BooleanField(default=False)
def __str__(self):
return self.album_title + '-' + self.artist + '-' + self.genre
class Song(models.Model):
user = models.ForeignKey(User, default=1,on_delete=models.CASCADE)
album = models.ForeignKey(Album, on_delete=models.CASCADE, null=True)
song_title = models.CharField(max_length=250)
audio_file = models.FileField(default='')
song_visibility = models.CharField(max_length=100, default="private")
is_favorite = models.BooleanField(default=False)
def __str__(self):
return self.song_title | [
"django.db.models.FileField",
"django.db.models.CharField",
"django.db.models.BooleanField",
"django.db.models.ForeignKey"
] | [((125, 185), 'django.db.models.ForeignKey', 'models.ForeignKey', (['User'], {'default': '(1)', 'on_delete': 'models.CASCADE'}), '(User, default=1, on_delete=models.CASCADE)\n', (142, 185), False, 'from django.db import models\n'), ((198, 230), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(250)'}), '(max_length=250)\n', (214, 230), False, 'from django.db import models\n'), ((249, 281), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(500)'}), '(max_length=500)\n', (265, 281), False, 'from django.db import models\n'), ((294, 326), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(100)'}), '(max_length=100)\n', (310, 326), False, 'from django.db import models\n'), ((344, 382), 'django.db.models.FileField', 'models.FileField', ([], {'default': '"""avatar.jpg"""'}), "(default='avatar.jpg')\n", (360, 382), False, 'from django.db import models\n'), ((406, 457), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(100)', 'default': '"""private"""'}), "(max_length=100, default='private')\n", (422, 457), False, 'from django.db import models\n'), ((476, 510), 'django.db.models.BooleanField', 'models.BooleanField', ([], {'default': '(False)'}), '(default=False)\n', (495, 510), False, 'from django.db import models\n'), ((646, 706), 'django.db.models.ForeignKey', 'models.ForeignKey', (['User'], {'default': '(1)', 'on_delete': 'models.CASCADE'}), '(User, default=1, on_delete=models.CASCADE)\n', (663, 706), False, 'from django.db import models\n'), ((718, 779), 'django.db.models.ForeignKey', 'models.ForeignKey', (['Album'], {'on_delete': 'models.CASCADE', 'null': '(True)'}), '(Album, on_delete=models.CASCADE, null=True)\n', (735, 779), False, 'from django.db import models\n'), ((797, 829), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(250)'}), '(max_length=250)\n', (813, 829), False, 'from django.db import models\n'), ((847, 875), 'django.db.models.FileField', 'models.FileField', ([], {'default': '""""""'}), "(default='')\n", (863, 875), False, 'from django.db import models\n'), ((898, 949), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(100)', 'default': '"""private"""'}), "(max_length=100, default='private')\n", (914, 949), False, 'from django.db import models\n'), ((968, 1002), 'django.db.models.BooleanField', 'models.BooleanField', ([], {'default': '(False)'}), '(default=False)\n', (987, 1002), False, 'from django.db import models\n')] |
"""Configures a Kafka Connector for Postgres Station data"""
import json
import logging
import requests
from settings import Settings
logger = logging.getLogger(__name__)
KAFKA_CONNECT_URL = f"{Settings.URLs.KAFKA_CONNECT_URL}/connectors"
CONNECTOR_NAME = "stations"
def configure_connector():
"""Starts and configures the Kafka Connect connector"""
logging.debug("Creating or updating kafka connect connector...")
resp = requests.get(f"{KAFKA_CONNECT_URL}/{CONNECTOR_NAME}")
if resp.status_code == 200:
logging.debug("Connector already created skipping recreation")
return
config = {
"connector.class": "io.confluent.connect.jdbc.JdbcSourceConnector",
"key.converter": "org.apache.kafka.connect.json.JsonConverter",
"key.converter.schemas.enable": "false",
"value.converter": "org.apache.kafka.connect.json.JsonConverter",
"value.converter.schemas.enable": "false",
"topic.prefix": "com.connect.transportation.",
"connection.url": "jdbc:postgresql://postgres:5432/cta",
"connection.user": "cta_admin",
"connection.password": "<PASSWORD>",
"batch.max.rows": "500",
"table.whitelist": "stations",
"poll.interval.ms": "5000", # Poll every 5 seconds
"mode": "incrementing",
"incrementing.column.name": "stop_id",
}
# TODO: Complete the Kafka Connect Config below.
# Directions: Use the JDBC Source Connector to connect to Postgres. Load the `stations` table
# using incrementing mode, with `stop_id` as the incrementing column name.
# Make sure to think about what an appropriate topic prefix would be, and how frequently Kafka
# Connect should run this connector (hint: not very often!)
data = json.dumps({"name": CONNECTOR_NAME, "config": config})
resp = requests.post(
KAFKA_CONNECT_URL,
headers={"Content-Type": "application/json"},
data=data,
)
# Ensure a healthy response was given
resp.raise_for_status()
logging.info("-------Connector created successfully-------")
if __name__ == "__main__":
configure_connector()
| [
"logging.getLogger",
"requests.post",
"logging.debug",
"json.dumps",
"requests.get",
"logging.info"
] | [((145, 172), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (162, 172), False, 'import logging\n'), ((364, 428), 'logging.debug', 'logging.debug', (['"""Creating or updating kafka connect connector..."""'], {}), "('Creating or updating kafka connect connector...')\n", (377, 428), False, 'import logging\n'), ((441, 494), 'requests.get', 'requests.get', (['f"""{KAFKA_CONNECT_URL}/{CONNECTOR_NAME}"""'], {}), "(f'{KAFKA_CONNECT_URL}/{CONNECTOR_NAME}')\n", (453, 494), False, 'import requests\n'), ((1778, 1832), 'json.dumps', 'json.dumps', (["{'name': CONNECTOR_NAME, 'config': config}"], {}), "({'name': CONNECTOR_NAME, 'config': config})\n", (1788, 1832), False, 'import json\n'), ((1845, 1938), 'requests.post', 'requests.post', (['KAFKA_CONNECT_URL'], {'headers': "{'Content-Type': 'application/json'}", 'data': 'data'}), "(KAFKA_CONNECT_URL, headers={'Content-Type':\n 'application/json'}, data=data)\n", (1858, 1938), False, 'import requests\n'), ((2041, 2101), 'logging.info', 'logging.info', (['"""-------Connector created successfully-------"""'], {}), "('-------Connector created successfully-------')\n", (2053, 2101), False, 'import logging\n'), ((535, 597), 'logging.debug', 'logging.debug', (['"""Connector already created skipping recreation"""'], {}), "('Connector already created skipping recreation')\n", (548, 597), False, 'import logging\n')] |
import io
import logging
import json
import numpy
import torch
import numpy as np
from tqdm import tqdm
from clie.inputters import constant
from clie.objects import Sentence
from torch.utils.data import Dataset
from torch.utils.data.sampler import Sampler
logger = logging.getLogger(__name__)
def load_word_embeddings(file):
embeddings_index = {}
fin = io.open(file, 'r', encoding='utf-8', newline='\n', errors='ignore')
n, d = map(int, fin.readline().split())
for i, line in tqdm(enumerate(fin), total=n):
tokens = line.rstrip().split(' ')
v = numpy.array(tokens[1:], dtype=float)
embeddings_index[tokens[0]] = v
return embeddings_index
# ------------------------------------------------------------------------------
# Data loading
# ------------------------------------------------------------------------------
def load_data(filename, src_lang, tgt_lang, knn_file,
knn_size, max_examples=-1):
examples = []
wrong_subj_pos, wrong_obj_pos = 0, 0
with open(filename) as f:
data = json.load(f)
knn_dict = None
if knn_file:
with open(knn_file) as f:
knn_dict = json.load(f)
for idx, ex in enumerate(tqdm(data, total=len(data))):
sentence = Sentence(ex['id'])
sentence.language = src_lang
sentence.words = ex['token']
sentence.pos = ex['stanford_pos']
sentence.ner = ex['stanford_ner']
sentence.deprel = ex['stanford_deprel']
sentence.head = [int(x) for x in ex['stanford_head']]
sentence.subj_type = ex['subj_type']
sentence.obj_type = ex['obj_type']
sentence.relation = ex['relation']
if ex['subj_end'] - ex['subj_start'] < 0:
# we swap the start and end index
wrong_subj_pos += 1
sentence.subject = [ex['subj_end'], ex['subj_start']]
else:
sentence.subject = [ex['subj_start'], ex['subj_end']]
if ex['obj_end'] - ex['obj_start'] < 0:
# we swap the start and end index
wrong_obj_pos += 1
sentence.object = [ex['obj_end'], ex['obj_start']]
else:
sentence.object = [ex['obj_start'], ex['obj_end']]
# store KNN word info
if knn_dict:
sentence.tgt_lang = tgt_lang
knn_words = []
for w in ex['token']:
w = '!{}_{}'.format(src_lang, w)
if w in knn_dict:
assert len(knn_dict[w]) == knn_size
knn_words.append(knn_dict[w])
else:
knn_words.append([constant.UNK_WORD] * knn_size)
sentence.knn_words = knn_words
examples.append(sentence)
if max_examples != -1 and len(examples) > max_examples:
break
if wrong_subj_pos > 0 or wrong_obj_pos > 0:
logger.info('{} and {} wrong subject and object positions found!'.format(
wrong_subj_pos, wrong_obj_pos))
return examples
def vectorize(ex, model, iseval):
"""Torchify a single example."""
words = ['!{}_{}'.format(ex.language, w) for w in ex.words]
words = [model.word_dict[w] for w in words]
knn_word = None
if ex.knn_words:
knn_word = [[model.word_dict[w] for w in knn]
for knn in ex.knn_words]
knn_word = torch.LongTensor(knn_word)
word = torch.LongTensor(words)
pos = torch.LongTensor([model.pos_dict[p] for p in ex.pos])
ner = torch.LongTensor([model.ner_dict[n] for n in ex.ner])
deprel = torch.LongTensor([model.deprel_dict[d] for d in ex.deprel])
assert any([x == 0 for x in ex.head])
head = torch.LongTensor(ex.head)
subj_position = torch.LongTensor(ex.subj_position)
obj_position = torch.LongTensor(ex.obj_position)
type = [0] * len(ex.words)
ttype = model.type_dict[ex.subj_type]
start, end = ex.subject
type[start: end + 1] = [ttype] * (end - start + 1)
atype = model.type_dict[ex.obj_type]
start, end = ex.object
type[start: end + 1] = [atype] * (end - start + 1)
type = torch.LongTensor(type)
return {
'id': ex.id,
'language': ex.language,
'word': word,
'pos': pos,
'ner': ner,
'deprel': deprel,
'type': type,
'head': head,
'subject': ex.subj_text,
'object': ex.obj_text,
'subject_pos': subj_position,
'object_pos': obj_position,
'relation': model.label_dict[ex.relation],
'knn_word': knn_word
}
def batchify(batch):
"""Gather a batch of individual examples into one batch."""
# batch is a list of vectorized examples
batch_size = len(batch)
ids = [ex['id'] for ex in batch]
language = [ex['language'] for ex in batch]
use_knn = batch[0]['knn_word'] is not None
# NOTE. batch[0]['knn_word'] is a 2d list
knn_size = len(batch[0]['knn_word'][0]) if use_knn else 0
# --------- Prepare Code tensors ---------
max_len = max([ex['word'].size(0) for ex in batch])
# Batch Code Representations
len_rep = torch.LongTensor(batch_size).fill_(constant.PAD)
word_rep = torch.LongTensor(batch_size, max_len).fill_(constant.PAD)
head_rep = torch.LongTensor(batch_size, max_len).fill_(constant.PAD)
subject_pos_rep = torch.LongTensor(batch_size, max_len).fill_(constant.PAD)
object_pos_rep = torch.LongTensor(batch_size, max_len).fill_(constant.PAD)
pos_rep = torch.LongTensor(batch_size, max_len).fill_(constant.PAD)
ner_rep = torch.LongTensor(batch_size, max_len).fill_(constant.PAD)
deprel_rep = torch.LongTensor(batch_size, max_len).fill_(constant.PAD)
type_rep = torch.LongTensor(batch_size, max_len).fill_(constant.PAD)
labels = torch.LongTensor(batch_size)
subject = []
object = []
knn_rep = None
if use_knn:
knn_rep = torch.LongTensor(batch_size, max_len, knn_size).fill_(constant.PAD)
for i, ex in enumerate(batch):
len_rep[i] = ex['word'].size(0)
labels[i] = ex['relation']
word_rep[i, :len_rep[i]] = ex['word']
head_rep[i, :len_rep[i]] = ex['head']
subject_pos_rep[i, :len_rep[i]] = ex['subject_pos']
object_pos_rep[i, :len_rep[i]] = ex['object_pos']
pos_rep[i, :len_rep[i]] = ex['pos']
ner_rep[i, :len_rep[i]] = ex['ner']
deprel_rep[i, :len_rep[i]] = ex['deprel']
type_rep[i, :len_rep[i]] = ex['type']
subject.append(ex['subject'])
object.append(ex['object'])
if use_knn:
knn_rep[i, :len_rep[i]] = ex['knn_word']
return {
'ids': ids,
'language': language,
'batch_size': batch_size,
'len_rep': len_rep,
'word_rep': word_rep,
'knn_rep': knn_rep,
'head_rep': head_rep,
'subject': subject,
'object': object,
'subject_pos_rep': subject_pos_rep,
'object_pos_rep': object_pos_rep,
'labels': labels,
'pos_rep': pos_rep,
'ner_rep': ner_rep,
'deprel_rep': deprel_rep,
'type_rep': type_rep
}
class ACE05Dataset(Dataset):
def __init__(self, examples, model, evaluation=False):
self.model = model
self.examples = examples
self.evaluation = evaluation
def __len__(self):
return len(self.examples)
def __getitem__(self, index):
return vectorize(self.examples[index], self.model,
iseval=self.evaluation)
def lengths(self):
return [len(ex.words) for ex in self.examples]
class SortedBatchSampler(Sampler):
def __init__(self, lengths, batch_size, shuffle=True):
self.lengths = lengths
self.batch_size = batch_size
self.shuffle = shuffle
def __iter__(self):
lengths = np.array(
[(-l, np.random.random()) for l in self.lengths],
dtype=[('l1', np.int_), ('rand', np.float_)]
)
indices = np.argsort(lengths, order=('l1', 'rand'))
batches = [indices[i:i + self.batch_size]
for i in range(0, len(indices), self.batch_size)]
if self.shuffle:
np.random.shuffle(batches)
return iter([i for batch in batches for i in batch])
def __len__(self):
return len(self.lengths)
| [
"logging.getLogger",
"numpy.random.random",
"torch.LongTensor",
"clie.objects.Sentence",
"io.open",
"numpy.argsort",
"numpy.array",
"json.load",
"numpy.random.shuffle"
] | [((266, 293), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (283, 293), False, 'import logging\n'), ((364, 431), 'io.open', 'io.open', (['file', '"""r"""'], {'encoding': '"""utf-8"""', 'newline': '"""\n"""', 'errors': '"""ignore"""'}), "(file, 'r', encoding='utf-8', newline='\\n', errors='ignore')\n", (371, 431), False, 'import io\n'), ((3566, 3589), 'torch.LongTensor', 'torch.LongTensor', (['words'], {}), '(words)\n', (3582, 3589), False, 'import torch\n'), ((3600, 3653), 'torch.LongTensor', 'torch.LongTensor', (['[model.pos_dict[p] for p in ex.pos]'], {}), '([model.pos_dict[p] for p in ex.pos])\n', (3616, 3653), False, 'import torch\n'), ((3664, 3717), 'torch.LongTensor', 'torch.LongTensor', (['[model.ner_dict[n] for n in ex.ner]'], {}), '([model.ner_dict[n] for n in ex.ner])\n', (3680, 3717), False, 'import torch\n'), ((3731, 3790), 'torch.LongTensor', 'torch.LongTensor', (['[model.deprel_dict[d] for d in ex.deprel]'], {}), '([model.deprel_dict[d] for d in ex.deprel])\n', (3747, 3790), False, 'import torch\n'), ((3844, 3869), 'torch.LongTensor', 'torch.LongTensor', (['ex.head'], {}), '(ex.head)\n', (3860, 3869), False, 'import torch\n'), ((3890, 3924), 'torch.LongTensor', 'torch.LongTensor', (['ex.subj_position'], {}), '(ex.subj_position)\n', (3906, 3924), False, 'import torch\n'), ((3944, 3977), 'torch.LongTensor', 'torch.LongTensor', (['ex.obj_position'], {}), '(ex.obj_position)\n', (3960, 3977), False, 'import torch\n'), ((4269, 4291), 'torch.LongTensor', 'torch.LongTensor', (['type'], {}), '(type)\n', (4285, 4291), False, 'import torch\n'), ((5928, 5956), 'torch.LongTensor', 'torch.LongTensor', (['batch_size'], {}), '(batch_size)\n', (5944, 5956), False, 'import torch\n'), ((580, 616), 'numpy.array', 'numpy.array', (['tokens[1:]'], {'dtype': 'float'}), '(tokens[1:], dtype=float)\n', (591, 616), False, 'import numpy\n'), ((1067, 1079), 'json.load', 'json.load', (['f'], {}), '(f)\n', (1076, 1079), False, 'import json\n'), ((3527, 3553), 'torch.LongTensor', 'torch.LongTensor', (['knn_word'], {}), '(knn_word)\n', (3543, 3553), False, 'import torch\n'), ((8131, 8172), 'numpy.argsort', 'np.argsort', (['lengths'], {'order': "('l1', 'rand')"}), "(lengths, order=('l1', 'rand'))\n", (8141, 8172), True, 'import numpy as np\n'), ((1289, 1307), 'clie.objects.Sentence', 'Sentence', (["ex['id']"], {}), "(ex['id'])\n", (1297, 1307), False, 'from clie.objects import Sentence\n'), ((5269, 5297), 'torch.LongTensor', 'torch.LongTensor', (['batch_size'], {}), '(batch_size)\n', (5285, 5297), False, 'import torch\n'), ((5333, 5370), 'torch.LongTensor', 'torch.LongTensor', (['batch_size', 'max_len'], {}), '(batch_size, max_len)\n', (5349, 5370), False, 'import torch\n'), ((5406, 5443), 'torch.LongTensor', 'torch.LongTensor', (['batch_size', 'max_len'], {}), '(batch_size, max_len)\n', (5422, 5443), False, 'import torch\n'), ((5486, 5523), 'torch.LongTensor', 'torch.LongTensor', (['batch_size', 'max_len'], {}), '(batch_size, max_len)\n', (5502, 5523), False, 'import torch\n'), ((5565, 5602), 'torch.LongTensor', 'torch.LongTensor', (['batch_size', 'max_len'], {}), '(batch_size, max_len)\n', (5581, 5602), False, 'import torch\n'), ((5637, 5674), 'torch.LongTensor', 'torch.LongTensor', (['batch_size', 'max_len'], {}), '(batch_size, max_len)\n', (5653, 5674), False, 'import torch\n'), ((5709, 5746), 'torch.LongTensor', 'torch.LongTensor', (['batch_size', 'max_len'], {}), '(batch_size, max_len)\n', (5725, 5746), False, 'import torch\n'), ((5784, 5821), 'torch.LongTensor', 'torch.LongTensor', (['batch_size', 'max_len'], {}), '(batch_size, max_len)\n', (5800, 5821), False, 'import torch\n'), ((5857, 5894), 'torch.LongTensor', 'torch.LongTensor', (['batch_size', 'max_len'], {}), '(batch_size, max_len)\n', (5873, 5894), False, 'import torch\n'), ((8329, 8355), 'numpy.random.shuffle', 'np.random.shuffle', (['batches'], {}), '(batches)\n', (8346, 8355), True, 'import numpy as np\n'), ((1190, 1202), 'json.load', 'json.load', (['f'], {}), '(f)\n', (1199, 1202), False, 'import json\n'), ((6044, 6091), 'torch.LongTensor', 'torch.LongTensor', (['batch_size', 'max_len', 'knn_size'], {}), '(batch_size, max_len, knn_size)\n', (6060, 6091), False, 'import torch\n'), ((8002, 8020), 'numpy.random.random', 'np.random.random', ([], {}), '()\n', (8018, 8020), True, 'import numpy as np\n')] |
from distutils.extension import Extension
cmdclass = {}
try:
# with Cython
from Cython.Build import build_ext
cmdclass["build_ext"] = build_ext
module_src = "cgranges/python/cgranges.pyx"
except ImportError: # without Cython
module_src = "cgranges/python/cgranges.c"
def build(setup_kwargs):
"""
This function is mandatory in order to build the extensions.
"""
setup_kwargs.update(
{
"ext_modules": [
Extension(
"cgranges",
sources=[module_src, "cgranges/cgranges.c"],
depends=[
"cgranges/cgranges.h",
"cgranges/khash.h",
"cgranges/python/cgranges.pyx"
],
include_dirs=["cgranges"]
)
],
"cmdclass": cmdclass
}
)
| [
"distutils.extension.Extension"
] | [((479, 666), 'distutils.extension.Extension', 'Extension', (['"""cgranges"""'], {'sources': "[module_src, 'cgranges/cgranges.c']", 'depends': "['cgranges/cgranges.h', 'cgranges/khash.h', 'cgranges/python/cgranges.pyx']", 'include_dirs': "['cgranges']"}), "('cgranges', sources=[module_src, 'cgranges/cgranges.c'], depends=\n ['cgranges/cgranges.h', 'cgranges/khash.h',\n 'cgranges/python/cgranges.pyx'], include_dirs=['cgranges'])\n", (488, 666), False, 'from distutils.extension import Extension\n')] |
# coding: UTF-8
import time
import torch
import numpy as np
from train_eval import train, init_network
from importlib import import_module
import argparse
parser = argparse.ArgumentParser(description='Chinese Text Classification')
parser.add_argument('--model', type=str, required=True, help='choose a model: TextCNN')
parser.add_argument('--embedding', default='pre_trained', type=str, help='random or pre_trained')
parser.add_argument('--word', default=False, type=bool, help='True for word, False for char')
args = parser.parse_args()
if __name__ == '__main__':
dataset = 'THUCNews' # 数据集
# 搜狗新闻:embedding_SougouNews.npz, 腾讯:embedding_Tencent.npz, 随机初始化:random
# embedding = 'random'
model_name = args.model # TextCNN
from utils import build_dataset, build_iterator, get_time_dif
x = import_module('models.' + model_name)
from config import Config
config = Config(dataset)
np.random.seed(1)
torch.manual_seed(1)
torch.cuda.manual_seed_all(1)
torch.backends.cudnn.deterministic = True # 保证每次结果一样
start_time = time.time()
print("Loading data...")
vocab, train_data, dev_data, test_data = build_dataset(config, args.word)
train_iter = build_iterator(train_data, config)
dev_iter = build_iterator(dev_data, config)
test_iter = build_iterator(test_data, config)
time_dif = get_time_dif(start_time)
print("Time usage:", time_dif)
# train
config.n_vocab = len(vocab)
model = x.Model().to(config.device)
init_network(model)
print(model.parameters)
train(config, model, train_iter, dev_iter, test_iter)
| [
"torch.cuda.manual_seed_all",
"torch.manual_seed",
"utils.get_time_dif",
"importlib.import_module",
"argparse.ArgumentParser",
"config.Config",
"train_eval.init_network",
"train_eval.train",
"utils.build_iterator",
"utils.build_dataset",
"numpy.random.seed",
"time.time"
] | [((165, 231), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Chinese Text Classification"""'}), "(description='Chinese Text Classification')\n", (188, 231), False, 'import argparse\n'), ((820, 857), 'importlib.import_module', 'import_module', (["('models.' + model_name)"], {}), "('models.' + model_name)\n", (833, 857), False, 'from importlib import import_module\n'), ((901, 916), 'config.Config', 'Config', (['dataset'], {}), '(dataset)\n', (907, 916), False, 'from config import Config\n'), ((921, 938), 'numpy.random.seed', 'np.random.seed', (['(1)'], {}), '(1)\n', (935, 938), True, 'import numpy as np\n'), ((943, 963), 'torch.manual_seed', 'torch.manual_seed', (['(1)'], {}), '(1)\n', (960, 963), False, 'import torch\n'), ((968, 997), 'torch.cuda.manual_seed_all', 'torch.cuda.manual_seed_all', (['(1)'], {}), '(1)\n', (994, 997), False, 'import torch\n'), ((1074, 1085), 'time.time', 'time.time', ([], {}), '()\n', (1083, 1085), False, 'import time\n'), ((1160, 1192), 'utils.build_dataset', 'build_dataset', (['config', 'args.word'], {}), '(config, args.word)\n', (1173, 1192), False, 'from utils import build_dataset, build_iterator, get_time_dif\n'), ((1210, 1244), 'utils.build_iterator', 'build_iterator', (['train_data', 'config'], {}), '(train_data, config)\n', (1224, 1244), False, 'from utils import build_dataset, build_iterator, get_time_dif\n'), ((1260, 1292), 'utils.build_iterator', 'build_iterator', (['dev_data', 'config'], {}), '(dev_data, config)\n', (1274, 1292), False, 'from utils import build_dataset, build_iterator, get_time_dif\n'), ((1309, 1342), 'utils.build_iterator', 'build_iterator', (['test_data', 'config'], {}), '(test_data, config)\n', (1323, 1342), False, 'from utils import build_dataset, build_iterator, get_time_dif\n'), ((1358, 1382), 'utils.get_time_dif', 'get_time_dif', (['start_time'], {}), '(start_time)\n', (1370, 1382), False, 'from utils import build_dataset, build_iterator, get_time_dif\n'), ((1507, 1526), 'train_eval.init_network', 'init_network', (['model'], {}), '(model)\n', (1519, 1526), False, 'from train_eval import train, init_network\n'), ((1559, 1612), 'train_eval.train', 'train', (['config', 'model', 'train_iter', 'dev_iter', 'test_iter'], {}), '(config, model, train_iter, dev_iter, test_iter)\n', (1564, 1612), False, 'from train_eval import train, init_network\n')] |
#!/usr/bin/python3
# -*- coding: UTF-8 -*-
import time
busyTime = 10
idleTime = busyTime
while True:
start = time.clock()
while time.clock() - start < busyTime:
pass
time.sleep(busyTime / 1000)
| [
"time.sleep",
"time.clock"
] | [((117, 129), 'time.clock', 'time.clock', ([], {}), '()\n', (127, 129), False, 'import time\n'), ((190, 217), 'time.sleep', 'time.sleep', (['(busyTime / 1000)'], {}), '(busyTime / 1000)\n', (200, 217), False, 'import time\n'), ((140, 152), 'time.clock', 'time.clock', ([], {}), '()\n', (150, 152), False, 'import time\n')] |
"""
Module for working with named and anonymous maps
.. module:: carto.maps
:platform: Unix, Windows
:synopsis: Module for working with named and anonymous maps
.. moduleauthor:: <NAME> <<EMAIL>>
.. moduleauthor:: <NAME> <<EMAIL>>
"""
try:
from urllib.parse import urljoin
except ImportError:
from urlparse import urljoin
from pyrestcli.resources import Manager, Resource
from .exceptions import CartoException, CartoRateLimitException
API_VERSION = "v1"
NAMED_API_ENDPOINT = "api/{api_version}/map/named/"
ANONYMOUS_API_ENDPOINT = "api/{api_version}/map/"
class BaseMap(Resource):
"""
Base class for NamedMap and AnonymousMap
"""
def __init__(self, auth_client):
"""
Initializes a BaseMap instance
:param auth_client: Auth client
"""
super(BaseMap, self).__init__(auth_client)
def get_tile_url(self, x, y, z, layer_id=None, feature_id=None,
filter=None, extension="png"):
"""
Prepares a URL to get data (raster or vector) from a NamedMap or
AnonymousMap
:param x: The x tile
:param y: The y tile
:param z: The zoom level
:param layer_id: Can be a number (referring to the # layer of your \
map), all layers of your map, or a list of layers.
To show just the basemap layer, enter the value 0
To show the first layer, enter the value 1
To show all layers, enter the value 'all'
To show a list of layers, enter the comma separated \
layer value as '0,1,2'
:param feature_id: The id of the feature
:param filter: The filter to be applied to the layer
:param extension: The format of the data to be retrieved: png, mvt, ...
:type x: int
:type y: int
:type z: int
:type layer_id: str
:type feature_id: str
:type filter: str
:type extension: str
:return: A URL to download data
:rtype: str
:raise: CartoException
"""
base_url = self.client.base_url + self.Meta.collection_endpoint
template_id = self.template_id if hasattr(self, 'template_id') \
else self.layergroupid
if layer_id is not None and feature_id is not None:
url = urljoin(base_url,
"{template_id}/{layer}/attributes/{feature_id}"). \
format(template_id=template_id,
layer=layer_id,
feature_id=feature_id)
elif layer_id is not None and filter is not None:
url = urljoin(base_url,
"{template_id}/{filter}/{z}/{x}/{y}.{extension}"). \
format(template_id=template_id,
filter=filter,
z=z, x=x, y=y,
extension=extension)
elif layer_id is not None:
url = urljoin(base_url,
"{template_id}/{layer}/{z}/{x}/{y}.{extension}"). \
format(template_id=template_id,
layer=layer_id,
z=z, x=x, y=y,
extension=extension)
else:
url = urljoin(base_url, "{template_id}/{z}/{x}/{y}.{extension}"). \
format(
template_id=template_id,
z=z, x=x, y=y,
extension=extension)
if hasattr(self, 'auth') and self.auth is not None \
and len(self.auth['valid_tokens']) > 0:
url = urljoin(url, "?auth_token={auth_token}"). \
format(auth_token=self.auth['valid_tokens'][0])
return url
class NamedMap(BaseMap):
"""
Equivalent to creating a named map in CARTO.
"""
class Meta:
collection_endpoint = NAMED_API_ENDPOINT.format(
api_version=API_VERSION)
id_field = "template_id"
name_field = "name"
def __str__(self):
try:
return unicode(self.name).encode("utf-8")
except AttributeError:
return super(NamedMap, self).__repr__()
def __init__(self, auth_client):
"""
Initializes a NamedMap instance
:param auth_client: Auth client
"""
self.fields = ["version",
"name",
"auth",
"placeholders",
"layergroup",
"view"]
# Optional fields can be assigned by some responses create, instantiate,
# but are not saved to the backend
self.optional_fields = ["template_id", "layergroupid", "last_updated"]
super(NamedMap, self).__init__(auth_client)
def instantiate(self, params, auth=None):
"""
Allows you to fetch the map tiles of a created map
:param params: The json with the styling info for the named map
:param auth: The auth client
:type params: dict
:type auth: :class:`carto.auth.APIKeyAuthClient`
:return:
:raise: CartoException
"""
try:
endpoint = (self.Meta.collection_endpoint
+ "{template_id}"). \
format(template_id=self.template_id)
if (auth is not None):
endpoint = (endpoint + "?auth_token={auth_token}"). \
format(auth_token=auth)
self.send(endpoint, "POST", json=params)
except CartoRateLimitException as e:
raise e
except Exception as e:
raise CartoException(e)
def update_from_dict(self, attribute_dict):
"""
Method overriden from the base class
"""
if 'template' in attribute_dict:
self.update_from_dict(attribute_dict['template'])
setattr(self,
self.Meta.id_field, attribute_dict['template']['name'])
return
try:
for k, v in attribute_dict.items():
if k in self.fields + self.optional_fields:
setattr(self, k, v)
except Exception:
setattr(self, self.Meta.id_field, attribute_dict)
class AnonymousMap(BaseMap):
"""
Equivalent to creating an anonymous map in CARTO.
"""
class Meta:
collection_endpoint = ANONYMOUS_API_ENDPOINT.format(
api_version=API_VERSION)
def __init__(self, auth_client):
"""
Initializes an AnonymousMap instance
:param auth_client: Auth client
"""
self.optional_fields = ['cdn_url', 'last_updated', 'layergroupid', 'metadata']
super(AnonymousMap, self).__init__(auth_client)
def instantiate(self, params):
"""
Allows you to fetch the map tiles of a created map
:param params: The json with the styling info for the named map
:type params: dict
:return:
:raise: CartoException
"""
try:
self.send(self.Meta.collection_endpoint, "POST", json=params)
except CartoRateLimitException as e:
raise e
except Exception as e:
raise CartoException(e)
def update_from_dict(self, attribute_dict):
for k, v in attribute_dict.items():
if k in self.fields + self.optional_fields:
setattr(self, k, v)
class NamedMapManager(Manager):
"""
Manager for the NamedMap class
"""
resource_class = NamedMap
json_collection_attribute = "template_ids"
def create(self, **kwargs):
"""
Creates a named map
:param kwargs: Attributes for creating the named map. Specifically
an attribute `template` must contain the JSON object
defining the named map
:type kwargs: kwargs
:return: New named map object
:rtype: NamedMap
:raise: CartoException
"""
resource = self.resource_class(self.client)
resource.update_from_dict(kwargs['template'])
resource.save(force_create=True)
return resource
| [
"urlparse.urljoin"
] | [((2391, 2457), 'urlparse.urljoin', 'urljoin', (['base_url', '"""{template_id}/{layer}/attributes/{feature_id}"""'], {}), "(base_url, '{template_id}/{layer}/attributes/{feature_id}')\n", (2398, 2457), False, 'from urlparse import urljoin\n'), ((3802, 3842), 'urlparse.urljoin', 'urljoin', (['url', '"""?auth_token={auth_token}"""'], {}), "(url, '?auth_token={auth_token}')\n", (3809, 3842), False, 'from urlparse import urljoin\n'), ((2726, 2793), 'urlparse.urljoin', 'urljoin', (['base_url', '"""{template_id}/{filter}/{z}/{x}/{y}.{extension}"""'], {}), "(base_url, '{template_id}/{filter}/{z}/{x}/{y}.{extension}')\n", (2733, 2793), False, 'from urlparse import urljoin\n'), ((3084, 3150), 'urlparse.urljoin', 'urljoin', (['base_url', '"""{template_id}/{layer}/{z}/{x}/{y}.{extension}"""'], {}), "(base_url, '{template_id}/{layer}/{z}/{x}/{y}.{extension}')\n", (3091, 3150), False, 'from urlparse import urljoin\n'), ((3421, 3479), 'urlparse.urljoin', 'urljoin', (['base_url', '"""{template_id}/{z}/{x}/{y}.{extension}"""'], {}), "(base_url, '{template_id}/{z}/{x}/{y}.{extension}')\n", (3428, 3479), False, 'from urlparse import urljoin\n')] |
from kv_client.kv_client import KVClient
def main():
kvSlave = KVClient(1, "127.0.0.1", 3456)
kvSlave.start()
if __name__ == "__main__":
main() | [
"kv_client.kv_client.KVClient"
] | [((68, 98), 'kv_client.kv_client.KVClient', 'KVClient', (['(1)', '"""127.0.0.1"""', '(3456)'], {}), "(1, '127.0.0.1', 3456)\n", (76, 98), False, 'from kv_client.kv_client import KVClient\n')] |
"""Python interfaces to DGL farthest point sampler."""
from dgl._ffi.base import DGLError
import numpy as np
from .._ffi.function import _init_api
from .. import backend as F
from .. import ndarray as nd
def _farthest_point_sampler(data, batch_size, sample_points, dist, start_idx, result):
r"""Farthest Point Sampler
Parameters
----------
data : tensor
A tensor of shape (N, d) where N is the number of points and d is the dimension.
batch_size : int
The number of batches in the ``data``. N should be divisible by batch_size.
sample_points : int
The number of points to sample in each batch.
dist : tensor
Pre-allocated tensor of shape (N, ) for to-sample distance.
start_idx : tensor of int
Pre-allocated tensor of shape (batch_size, ) for the starting sample in each batch.
result : tensor of int
Pre-allocated tensor of shape (sample_points * batch_size, ) for the sampled index.
Returns
-------
No return value. The input variable ``result`` will be overwriten with sampled indices.
"""
assert F.shape(data)[0] >= sample_points * batch_size
assert F.shape(data)[0] % batch_size == 0
_CAPI_FarthestPointSampler(F.zerocopy_to_dgl_ndarray(data),
batch_size, sample_points,
F.zerocopy_to_dgl_ndarray(dist),
F.zerocopy_to_dgl_ndarray(start_idx),
F.zerocopy_to_dgl_ndarray(result))
def _neighbor_matching(graph_idx, num_nodes, edge_weights=None, relabel_idx=True):
"""
Description
-----------
The neighbor matching procedure of edge coarsening used in
`Metis <http://cacs.usc.edu/education/cs653/Karypis-METIS-SIAMJSC98.pdf>`__
and
`Graclus <https://www.cs.utexas.edu/users/inderjit/public_papers/multilevel_pami.pdf>`__
for homogeneous graph coarsening. This procedure keeps picking an unmarked
vertex and matching it with one its unmarked neighbors (that maximizes its
edge weight) until no match can be done.
If no edge weight is given, this procedure will randomly pick neighbor for each
vertex.
The GPU implementation is based on `A GPU Algorithm for Greedy Graph Matching
<http://www.staff.science.uu.nl/~bisse101/Articles/match12.pdf>`__
NOTE: The input graph must be bi-directed (undirected) graph. Call :obj:`dgl.to_bidirected`
if you are not sure your graph is bi-directed.
Parameters
----------
graph : HeteroGraphIndex
The input homogeneous graph.
num_nodes : int
The number of nodes in this homogeneous graph.
edge_weight : tensor, optional
The edge weight tensor holding non-negative scalar weight for each edge.
default: :obj:`None`
relabel_idx : bool, optional
If true, relabel resulting node labels to have consecutive node ids.
default: :obj:`True`
Returns
-------
a 1-D tensor
A vector with each element that indicates the cluster ID of a vertex.
"""
edge_weight_capi = nd.NULL["int64"]
if edge_weights is not None:
edge_weight_capi = F.zerocopy_to_dgl_ndarray(edge_weights)
node_label = F.full_1d(
num_nodes, -1, getattr(F, graph_idx.dtype), F.to_backend_ctx(graph_idx.ctx))
node_label_capi = F.zerocopy_to_dgl_ndarray_for_write(node_label)
_CAPI_NeighborMatching(graph_idx, edge_weight_capi, node_label_capi)
if F.reduce_sum(node_label < 0).item() != 0:
raise DGLError("Find unmatched node")
# reorder node id
# TODO: actually we can add `return_inverse` option for `unique`
# function in backend for efficiency.
if relabel_idx:
node_label_np = F.zerocopy_to_numpy(node_label)
_, node_label_np = np.unique(node_label_np, return_inverse=True)
return F.tensor(node_label_np)
else:
return node_label
_init_api('dgl.geometry', __name__)
| [
"dgl._ffi.base.DGLError",
"numpy.unique"
] | [((3538, 3569), 'dgl._ffi.base.DGLError', 'DGLError', (['"""Find unmatched node"""'], {}), "('Find unmatched node')\n", (3546, 3569), False, 'from dgl._ffi.base import DGLError\n'), ((3813, 3858), 'numpy.unique', 'np.unique', (['node_label_np'], {'return_inverse': '(True)'}), '(node_label_np, return_inverse=True)\n', (3822, 3858), True, 'import numpy as np\n')] |
import shutil
import hashlib
from pathlib import Path
from typing import TextIO, BinaryIO, IO, Union
from datetime import datetime
from os.path import getmtime
from .low import ObservableDict
class Data:
def __init__(self, data_name: str, parent, bucket,
protected_parent_methods: Union[None, dict] = None):
self.__data_name__ = data_name
self.__parent__ = parent
self.__bucket__ = bucket
self.__protected_parent_methods__ = protected_parent_methods
self.__protected_parent_methods__['increase_data_count']()
self.init_metadata()
self.init_properties()
@property
def database(self):
return self.__bucket__.db
@property
def db(self):
return self.__bucket__.db
@property
def bucket(self):
return self.__bucket__
def init_metadata(self):
if self.__data_name__ not in self.__parent__.metadata:
self.__parent__.metadata[self.__data_name__] = dict()
def init_properties(self):
if self.__data_name__ not in self.__parent__.properties:
self.__parent__.properties[self.__data_name__] = dict()
def set_metadata(self, metadata: Union[None, dict], merge: bool = True):
if metadata is None:
return
if merge:
metadata = {**self.metadata, **metadata}
self.__parent__.metadata[self.__data_name__] = metadata
def set_properties(self, properties: Union[None, dict], merge: bool = True):
if properties is None:
return
if merge:
properties = {**self.properties, **properties}
self.__parent__.properties[self.__data_name__] = properties
@property
def parent(self):
return self.__parent__
@property
def path(self) -> Path:
return self.__parent__.path / self.__data_name__
@property
def name(self) -> str:
return self.__data_name__
@property
def metadata(self) -> ObservableDict:
return self.__parent__.metadata[self.__data_name__]
@property
def properties(self) -> ObservableDict:
return self.__parent__.properties[self.__data_name__]
def rename(self, new_name: str):
shutil.move(str(self.path), str(self.__parent__.path / new_name))
self.__data_name__ = new_name
def reader(self, binary: bool = False, **kwargs) -> [IO, BinaryIO, TextIO, None]:
mode = 'r'
mode += 'b' if binary else ''
return open(str(self.path), mode=mode, **kwargs)
def creator(self,
binary: bool = False,
confirm: bool = False,
feedback: bool = False,
**kwargs) -> [IO, BinaryIO, TextIO, None]:
if confirm and not feedback:
return None
mode = 'x'
mode += 'b' if binary else ''
return open(str(self.path), mode=mode, **kwargs)
def writer(self,
binary: bool = False,
append: bool = True,
allow_overwrite: bool = False,
confirm: bool = True,
feedback: bool = False,
**kwargs) -> [IO, BinaryIO, TextIO, None]:
if not allow_overwrite and not append:
raise PermissionError('Trying to overwrite existed data.')
if confirm and not feedback:
return
mode = 'a' if append else 'w'
mode += 'b' if binary else ''
return open(str(self.path), mode=mode, **kwargs)
def __repr__(self):
return f"Data('{self.__data_name__}')"
def import_file(self, src_path: [str, Path], allow_overwrite=False, confirm=True, feedback=False):
if self.path.exists() and not allow_overwrite:
return
if confirm and not feedback:
return
shutil.copyfile(str(src_path), str(self.path))
def export_file(self, dst_path: [str, Path], allow_overwrite=False):
if Path(dst_path).exists() and not allow_overwrite:
return
shutil.copyfile(str(self.path), str(dst_path))
def __calc_hash__(self, h, buffer_size: int = 131072):
if not self.path.exists():
return None
with open(str(self.path), 'rb') as file_reader:
while True:
data = file_reader.read(buffer_size)
if not data:
break
h.update(data)
return h.hexdigest()
def md5(self, buffer_size: int = 131072, require_update: bool = False) -> [str, None]:
if not self.path.exists():
return None
last_modified_time = getmtime(str(self.path))
if require_update \
or 'md5' not in self.metadata \
or 'md5-timestamp' not in self.metadata \
or self.metadata['md5-timestamp'] < last_modified_time:
result = self.__calc_hash__(hashlib.md5(), buffer_size)
self.metadata['md5'] = result
self.metadata['md5-timestamp'] = datetime.now().timestamp()
return result
else:
return self.metadata['md5']
def sha1(self, buffer_size: int = 131072, require_update: bool = False) -> [str, None]:
if not self.path.exists():
return None
last_modified_time = getmtime(str(self.path))
if require_update \
or 'sha1' not in self.metadata \
or 'sha1-timestamp' not in self.metadata \
or self.metadata['sha1-timestamp'] < last_modified_time:
result = self.__calc_hash__(hashlib.sha1(), buffer_size)
self.metadata['sha1'] = result
self.metadata['sha1-timestamp'] = datetime.now().timestamp()
return result
else:
return self.metadata['sha1']
def sha256(self, buffer_size: int = 131072, require_update: bool = False) -> [str, None]:
if not self.path.exists():
return None
last_modified_time = getmtime(str(self.path))
if require_update \
or 'sha256' not in self.metadata \
or 'sha256-timestamp' not in self.metadata \
or self.metadata['sha256-timestamp'] < last_modified_time:
result = self.__calc_hash__(hashlib.sha256(), buffer_size)
self.metadata['sha256'] = result
self.metadata['sha256-timestamp'] = datetime.now().timestamp()
return result
else:
return self.metadata['sha256']
| [
"hashlib.sha256",
"hashlib.md5",
"pathlib.Path",
"datetime.datetime.now",
"hashlib.sha1"
] | [((4878, 4891), 'hashlib.md5', 'hashlib.md5', ([], {}), '()\n', (4889, 4891), False, 'import hashlib\n'), ((5555, 5569), 'hashlib.sha1', 'hashlib.sha1', ([], {}), '()\n', (5567, 5569), False, 'import hashlib\n'), ((6244, 6260), 'hashlib.sha256', 'hashlib.sha256', ([], {}), '()\n', (6258, 6260), False, 'import hashlib\n'), ((3937, 3951), 'pathlib.Path', 'Path', (['dst_path'], {}), '(dst_path)\n', (3941, 3951), False, 'from pathlib import Path\n'), ((4993, 5007), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (5005, 5007), False, 'from datetime import datetime\n'), ((5673, 5687), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (5685, 5687), False, 'from datetime import datetime\n'), ((6368, 6382), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (6380, 6382), False, 'from datetime import datetime\n')] |
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Version 2 of class Optimizer."""
# pylint: disable=g-bad-name
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import abc
import functools
import six
from tensorflow.python.distribute import distribution_strategy_context as distribute_ctx
from tensorflow.python.distribute import reduce_util as ds_reduce_util
from tensorflow.python.eager import backprop
from tensorflow.python.eager import context
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_util
from tensorflow.python.keras import backend
from tensorflow.python.keras import initializers
from tensorflow.python.keras.engine import base_layer_utils
from tensorflow.python.keras.optimizer_v2 import learning_rate_schedule
from tensorflow.python.keras.utils import tf_utils
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import clip_ops
from tensorflow.python.ops import gradients
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import resource_variable_ops
from tensorflow.python.ops import variables as tf_variables
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.saved_model import revived_types
from tensorflow.python.training.tracking import base as trackable
from tensorflow.python.util import nest
from tensorflow.python.util.tf_export import keras_export
def _deduplicate_indexed_slices(values, indices):
"""Sums `values` associated with any non-unique `indices`.
Args:
values: A `Tensor` with rank >= 1.
indices: A one-dimensional integer `Tensor`, indexing into the first
dimension of `values` (as in an IndexedSlices object).
Returns:
A tuple of (`summed_values`, `unique_indices`) where `unique_indices` is a
de-duplicated version of `indices` and `summed_values` contains the sum of
`values` slices associated with each unique index.
"""
unique_indices, new_index_positions = array_ops.unique(indices)
summed_values = math_ops.unsorted_segment_sum(
values, new_index_positions,
array_ops.shape(unique_indices)[0])
return (summed_values, unique_indices)
@six.add_metaclass(abc.ABCMeta)
@keras_export("keras.optimizers.Optimizer")
class OptimizerV2(trackable.Trackable):
"""Updated base class for optimizers.
This class defines the API to add Ops to train a model. You never use this
class directly, but instead instantiate one of its subclasses such as
`tf.keras.optimizers.SGD`, `tf.keras.optimizers.Adam`.
### Usage
```python
# Create an optimizer with the desired parameters.
opt = tf.keras.optimizers.SGD(learning_rate=0.1)
# `loss` is a callable that takes no argument and returns the value
# to minimize.
loss = lambda: 3 * var1 * var1 + 2 * var2 * var2
# In graph mode, returns op that minimizes the loss by updating the listed
# variables.
opt_op = opt.minimize(loss, var_list=[var1, var2])
opt_op.run()
# In eager mode, simply call minimize to update the list of variables.
opt.minimize(loss, var_list=[var1, var2])
```
### Custom training loop with Keras models
In Keras models, sometimes variables are created when the model is first
called, instead of construction time. Examples include 1) sequential models
without input shape pre-defined, or 2) subclassed models. Pass var_list as
callable in these cases.
Example:
```python
opt = tf.keras.optimizers.SGD(learning_rate=0.1)
model = tf.keras.Sequential()
model.add(tf.keras.layers.Dense(num_hidden, activation='relu'))
model.add(tf.keras.layers.Dense(num_classes, activation='sigmoid')
loss_fn = lambda: tf.keras.losses.mse(model(input), output)
var_list_fn = lambda: model.trainable_weights
for input, output in data:
opt.minimize(loss_fn, var_list_fn)
```
### Processing gradients before applying them.
Calling `minimize()` takes care of both computing the gradients and
applying them to the variables. If you want to process the gradients
before applying them you can instead use the optimizer in three steps:
1. Compute the gradients with `tf.GradientTape`.
2. Process the gradients as you wish.
3. Apply the processed gradients with `apply_gradients()`.
Example:
```python
# Create an optimizer.
opt = tf.keras.optimizers.SGD(learning_rate=0.1)
# Compute the gradients for a list of variables.
with tf.GradientTape() as tape:
loss = <call_loss_function>
vars = <list_of_variables>
grads = tape.gradient(loss, vars)
processed_grads = [process_gradient(g) for g in grads]
grads_and_vars = zip(processed_grads, var_list)
# grads_and_vars is a list of tuples (gradient, variable). Do whatever you
# need to the 'gradient' part, for example cap them, etc.
capped_grads_and_vars = [(MyCapper(gv[0]), gv[1]) for gv in grads_and_vars]
# Ask the optimizer to apply the capped gradients.
opt.apply_gradients(capped_grads_and_vars)
```
### Use with `tf.distribute.Strategy`.
This optimizer class is `tf.distribute.Strategy` aware, which means it
automatically sums gradients across all replicas. To average gradients,
you divide your loss by the global batch size, which is done
automatically if you use `tf.keras` built-in training or evaluation loops.
See the `reduction` argument of your loss which should be set to
`tf.keras.losses.Reduction.SUM_OVER_BATCH_SIZE` for averaging or
`tf.keras.losses.Reduction.SUM` for not.
If you are not using these and you want to average gradients, you should use
`tf.math.reduce_sum` to add up your per-example losses and then divide by the
global batch size. Note that when using `tf.distribute.Strategy`, the first
component of a tensor's shape is the *replica-local* batch size, which is off
by a factor equal to the number of replicas being used to compute a single
step. As a result, using `tf.math.reduce_mean` will give the wrong answer,
resulting in gradients that can be many times too big.
### Variable Constraint
All Keras optimizers respect variable constraints. If constraint function is
passed to any variable, the constraint will be applied to the variable after
the gradient has been applied to the variable.
Important: If gradient is sparse tensor, variable constraint is not supported.
### Thread Compatibility
The entire optimizer is currently thread compatible, not thread-safe. The user
needs to perform synchronization if necessary.
### Slots
Many optimizer subclasses, such as `Adam` and `Adagrad` allocate and manage
additional variables associated with the variables to train. These are called
<i>Slots</i>. Slots have names and you can ask the optimizer for the names of
the slots that it uses. Once you have a slot name you can ask the optimizer
for the variable it created to hold the slot value.
This can be useful if you want to log debug a training algorithm, report stats
about the slots, etc.
### Hyper parameters
These are arguments passed to the optimizer subclass constructor
(the `__init__` method), and then passed to `self._set_hyper()`.
They can be either regular Python values (like 1.0), tensors, or
callables. If they are callable, the callable will be called during
`apply_gradients()` to get the value for the hyper parameter.
Hyper parameters can be overwritten through user code:
Example:
```python
# Create an optimizer with the desired parameters.
opt = tf.keras.optimizers.SGD(learning_rate=0.1)
# `loss` is a callable that takes no argument and returns the value
# to minimize.
loss = lambda: 3 * var1 + 2 * var2
# In eager mode, simply call minimize to update the list of variables.
opt.minimize(loss, var_list=[var1, var2])
# update learning rate
opt.learning_rate = 0.05
opt.minimize(loss, var_list=[var1, var2])
```
### Write a customized optimizer.
If you intend to create your own optimization algorithm, simply inherit from
this class and override the following methods:
- resource_apply_dense (update variable given gradient tensor is dense)
- resource_apply_sparse (update variable given gradient tensor is sparse)
- create_slots (if your optimizer algorithm requires additional variables)
- get_config (serialization of the optimizer, include all hyper parameters)
"""
def __init__(self, name, **kwargs):
"""Create a new Optimizer.
This must be called by the constructors of subclasses.
Note that Optimizer instances should not bind to a single graph,
and so shouldn't keep Tensors as member variables. Generally
you should be able to use the _set_hyper()/state.get_hyper()
facility instead.
This class in stateful and thread-compatible.
Args:
name: A non-empty string. The name to use for accumulators created
for the optimizer.
**kwargs: keyword arguments. Allowed to be {`clipnorm`, `clipvalue`, `lr`,
`decay`}. `clipnorm` is clip gradients by norm; `clipvalue` is clip
gradients by value, `decay` is included for backward compatibility to
allow time inverse decay of learning rate. `lr` is included for backward
compatibility, recommended to use `learning_rate` instead.
Raises:
ValueError: If name is malformed.
RuntimeError: If _create_slots has been overridden instead of
_create_vars.
"""
allowed_kwargs = {"clipnorm", "clipvalue", "lr", "decay"}
for k in kwargs:
if k not in allowed_kwargs:
raise TypeError("Unexpected keyword argument "
"passed to optimizer: " + str(k))
# checks that all keyword arguments are non-negative.
if kwargs[k] < 0:
raise ValueError("Expected {} >= 0, received: {}".format(k, kwargs[k]))
self._use_locking = True
self._name = name
self._hyper = {}
# dict: {variable name : {slot name : variable}}
self._slots = {}
self._slot_names = []
self._weights = []
self._iterations = None
# For implementing Trackable. Stores information about how to restore
# slot variables which have not yet been created
# (trackable._CheckpointPosition objects).
# {slot_name :
# {_var_key(variable_to_train): [checkpoint_position, ... ], ... },
# ... }
self._deferred_slot_restorations = {}
decay = kwargs.pop("decay", 0.0)
if decay < 0.:
raise ValueError("decay cannot be less than 0: {}".format(decay))
self._initial_decay = decay
if "clipnorm" in kwargs:
self.clipnorm = kwargs.pop("clipnorm")
if "clipvalue" in kwargs:
self.clipvalue = kwargs.pop("clipvalue")
self._hypers_created = False
def minimize(self, loss, var_list, grad_loss=None, name=None):
"""Minimize `loss` by updating `var_list`.
This method simply computes gradient using `tf.GradientTape` and calls
`apply_gradients()`. If you want to process the gradient before applying
then call `tf.GradientTape` and `apply_gradients()` explicitly instead
of using this function.
Args:
loss: A callable taking no arguments which returns the value to minimize.
var_list: list or tuple of `Variable` objects to update to minimize
`loss`, or a callable returning the list or tuple of `Variable` objects.
Use callable when the variable list would otherwise be incomplete before
`minimize` since the variables are created at the first time `loss` is
called.
grad_loss: Optional. A `Tensor` holding the gradient computed for `loss`.
name: Optional name for the returned operation.
Returns:
An Operation that updates the variables in `var_list`. If `global_step`
was not `None`, that operation also increments `global_step`.
Raises:
ValueError: If some of the variables are not `Variable` objects.
"""
grads_and_vars = self._compute_gradients(
loss, var_list=var_list, grad_loss=grad_loss)
return self.apply_gradients(grads_and_vars, name=name)
def _compute_gradients(self, loss, var_list, grad_loss=None):
"""Compute gradients of `loss` for the variables in `var_list`.
This is the first part of `minimize()`. It returns a list
of (gradient, variable) pairs where "gradient" is the gradient
for "variable". Note that "gradient" can be a `Tensor`, an
`IndexedSlices`, or `None` if there is no gradient for the
given variable.
Args:
loss: A callable taking no arguments which returns the value to minimize.
var_list: list or tuple of `Variable` objects to update to minimize
`loss`, or a callable returning the list or tuple of `Variable` objects.
Use callable when the variable list would otherwise be incomplete before
`minimize` and the variables are created at the first time when `loss`
is called.
grad_loss: Optional. A `Tensor` holding the gradient computed for `loss`.
Returns:
A list of (gradient, variable) pairs. Variable is always present, but
gradient can be `None`.
Raises:
TypeError: If `var_list` contains anything else than `Variable` objects.
ValueError: If some arguments are invalid, or var_list is None.
"""
# TODO(josh11b): Test that we handle weight decay in a reasonable way.
with backprop.GradientTape() as tape:
if not callable(var_list):
tape.watch(var_list)
loss_value = loss()
if callable(var_list):
var_list = var_list()
var_list = nest.flatten(var_list)
grads = tape.gradient(loss_value, var_list, grad_loss)
if hasattr(self, "clipnorm"):
grads = [clip_ops.clip_by_norm(g, self.clipnorm) for g in grads]
if hasattr(self, "clipvalue"):
grads = [
clip_ops.clip_by_value(g, -self.clipvalue, self.clipvalue)
for g in grads
]
grads_and_vars = list(zip(grads, var_list))
self._assert_valid_dtypes([
v for g, v in grads_and_vars
if g is not None and v.dtype != dtypes.resource
])
return grads_and_vars
def get_gradients(self, loss, params):
"""Returns gradients of `loss` with respect to `params`.
Arguments:
loss: Loss tensor.
params: List of variables.
Returns:
List of gradient tensors.
Raises:
ValueError: In case any gradient cannot be computed (e.g. if gradient
function not implemented).
"""
params = nest.flatten(params)
with backend.get_graph().as_default():
grads = gradients.gradients(loss, params)
for grad, param in zip(grads, params):
if grad is None:
raise ValueError("Variable {} has `None` for gradient. "
"Please make sure that all of your ops have a "
"gradient defined (i.e. are differentiable). "
"Common ops without gradient: "
"K.argmax, K.round, K.eval.".format(param))
if hasattr(self, "clipnorm"):
grads = [clip_ops.clip_by_norm(g, self.clipnorm) for g in grads]
if hasattr(self, "clipvalue"):
grads = [
clip_ops.clip_by_value(g, -self.clipvalue, self.clipvalue)
for g in grads
]
return grads
def apply_gradients(self, grads_and_vars, name=None):
"""Apply gradients to variables.
This is the second part of `minimize()`. It returns an `Operation` that
applies gradients.
Args:
grads_and_vars: List of (gradient, variable) pairs.
name: Optional name for the returned operation. Default to the name
passed to the `Optimizer` constructor.
Returns:
An `Operation` that applies the specified gradients. If `global_step`
was not None, that operation also increments `global_step`.
Raises:
TypeError: If `grads_and_vars` is malformed.
ValueError: If none of the variables have gradients.
"""
grads_and_vars = _filter_grads(grads_and_vars)
var_list = [v for (_, v) in grads_and_vars]
# Create iteration if necessary.
with ops.init_scope():
_ = self.iterations
self._create_hypers()
self._create_slots(var_list)
self._prepare(var_list)
return distribute_ctx.get_replica_context().merge_call(
self._distributed_apply, args=(grads_and_vars,), kwargs={"name": name})
def _distributed_apply(self, distribution, grads_and_vars, name):
"""`apply_gradients` using a `DistributionStrategy`."""
reduced_grads = distribution.extended.batch_reduce_to(
ds_reduce_util.ReduceOp.SUM, grads_and_vars)
var_list = [v for _, v in grads_and_vars]
grads_and_vars = zip(reduced_grads, var_list)
def apply_grad_to_update_var(var, grad):
"""Apply gradient to variable."""
if isinstance(var, ops.Tensor):
raise NotImplementedError("Trying to update a Tensor ", var)
if isinstance(grad, ops.IndexedSlices):
if var.constraint is not None:
raise RuntimeError(
"Cannot use a constraint function on a sparse variable.")
return self._resource_apply_sparse_duplicate_indices(
grad.values, var, grad.indices)
update_op = self._resource_apply_dense(grad, var)
if var.constraint is not None:
with ops.control_dependencies([update_op]):
return var.assign(var.constraint(var))
else:
return update_op
update_ops = []
with backend.name_scope(name or self._name):
for grad, var in grads_and_vars:
scope_name = ("" if ops.executing_eagerly_outside_functions() else
"_" + var.op.name)
with backend.name_scope("update" + scope_name):
update_ops.extend(
distribution.extended.update(
var, apply_grad_to_update_var, args=(grad,), group=False))
any_symbolic = any(isinstance(i, ops.Operation) or
tf_utils.is_symbolic_tensor(i) for i in update_ops)
if not context.executing_eagerly() or any_symbolic:
# If the current context is graph mode or any of the update ops are
# symbolic then the step update should be carried out under a graph
# context. (eager updates execute immediately)
with ops._get_graph_from_inputs(update_ops).as_default(): # pylint: disable=protected-access
with ops.control_dependencies(update_ops):
return self._iterations.assign_add(1).op
return self._iterations.assign_add(1)
def get_updates(self, loss, params):
grads = self.get_gradients(loss, params)
grads_and_vars = list(zip(grads, params))
self._assert_valid_dtypes([
v for g, v in grads_and_vars
if g is not None and v.dtype != dtypes.resource
])
return [self.apply_gradients(grads_and_vars)]
def _set_hyper(self, name, value):
"""set hyper `name` to value. value can be callable, tensor, numeric."""
if isinstance(value, trackable.Trackable):
self._track_trackable(value, name, overwrite=True)
if name not in self._hyper:
self._hyper[name] = value
else:
prev_value = self._hyper[name]
if (callable(prev_value)
or isinstance(prev_value,
(ops.Tensor, int, float,
learning_rate_schedule.LearningRateSchedule))
or isinstance(value, learning_rate_schedule.LearningRateSchedule)):
self._hyper[name] = value
else:
backend.set_value(self._hyper[name], value)
def _get_hyper(self, name, dtype=None):
if not self._hypers_created:
self._create_hypers()
value = self._hyper[name]
if isinstance(value, learning_rate_schedule.LearningRateSchedule):
return value
if callable(value):
value = value()
if dtype:
return math_ops.cast(value, dtype)
else:
return value
def __getattribute__(self, name):
"""Overridden to support hyperparameter access."""
try:
return super(OptimizerV2, self).__getattribute__(name)
except AttributeError as e:
# Needed to avoid infinite recursion with __setattr__.
if name == "_hyper":
raise e
# Backwards compatibility with Keras optimizers.
if name == "lr":
name = "learning_rate"
if name in self._hyper:
return self._get_hyper(name)
raise e
def __setattr__(self, name, value):
"""Override setattr to support dynamic hyperparameter setting."""
# Backwards compatibility with Keras optimizers.
if name == "lr":
name = "learning_rate"
if hasattr(self, "_hyper") and name in self._hyper:
self._set_hyper(name, value)
else:
super(OptimizerV2, self).__setattr__(name, value)
def get_slot_names(self):
"""A list of names for this optimizer's slots."""
return self._slot_names
def add_slot(self, var, slot_name, initializer="zeros"):
"""Add a new slot variable for `var`."""
if slot_name not in self._slot_names:
self._slot_names.append(slot_name)
var_key = _var_key(var)
slot_dict = self._slots.setdefault(var_key, {})
weight = slot_dict.get(slot_name, None)
if weight is None:
if isinstance(initializer, six.string_types) or callable(initializer):
initializer = initializers.get(initializer)
initial_value = functools.partial(
initializer, shape=var.shape, dtype=var.dtype)
else:
initial_value = initializer
strategy = distribute_ctx.get_strategy()
with strategy.extended.colocate_vars_with(var):
weight = tf_variables.Variable(
name="%s/%s" % (var._shared_name, slot_name), # pylint: disable=protected-access
dtype=var.dtype,
trainable=False,
initial_value=initial_value)
backend.track_variable(weight)
slot_dict[slot_name] = weight
self._restore_slot_variable(
slot_name=slot_name, variable=var,
slot_variable=weight)
self._weights.append(weight)
return weight
def get_slot(self, var, slot_name):
var_key = _var_key(var)
slot_dict = self._slots[var_key]
return slot_dict[slot_name]
def _prepare(self, var_list):
pass
def _create_hypers(self):
if self._hypers_created:
return
# Iterate hyper values deterministically.
for name, value in sorted(self._hyper.items()):
if isinstance(value, ops.Tensor) or callable(value):
continue
else:
self._hyper[name] = self.add_weight(
name,
shape=[],
trainable=False,
initializer=value,
aggregation=tf_variables.VariableAggregation.ONLY_FIRST_REPLICA)
self._hypers_created = True
@property
def iterations(self):
"""Variable. The number of training steps this Optimizer has run."""
if self._iterations is None:
self._iterations = self.add_weight(
"iter",
shape=[],
dtype=dtypes.int64,
trainable=False,
aggregation=tf_variables.VariableAggregation.ONLY_FIRST_REPLICA)
self._weights.append(self._iterations)
return self._iterations
@iterations.setter
def iterations(self, variable):
if self._iterations is not None:
raise RuntimeError("Cannot set `iterations` to a new Variable after "
"the Optimizer weights have been created")
self._iterations = variable
self._weights.append(self._iterations)
def _decayed_lr(self, var_dtype):
"""Get decayed learning rate as a Tensor with dtype=var_dtype."""
lr_t = self._get_hyper("learning_rate", var_dtype)
if isinstance(lr_t, learning_rate_schedule.LearningRateSchedule):
local_step = math_ops.cast(self.iterations, var_dtype)
lr_t = math_ops.cast(lr_t(local_step), var_dtype)
if self._initial_decay > 0.:
local_step = math_ops.cast(self.iterations, var_dtype)
decay_t = self._get_hyper("decay", var_dtype)
lr_t = lr_t / (1. + decay_t * local_step)
return lr_t
@abc.abstractmethod
def get_config(self):
"""Returns the config of the optimimizer.
An optimizer config is a Python dictionary (serializable)
containing the configuration of an optimizer.
The same optimizer can be reinstantiated later
(without any saved state) from this configuration.
Returns:
Python dictionary.
"""
config = {"name": self._name}
if hasattr(self, "clipnorm"):
config["clipnorm"] = self.clipnorm
if hasattr(self, "clipvalue"):
config["clipvalue"] = self.clipvalue
return config
@classmethod
def from_config(cls, config, custom_objects=None):
"""Creates an optimizer from its config.
This method is the reverse of `get_config`,
capable of instantiating the same optimizer from the config
dictionary.
Arguments:
config: A Python dictionary, typically the output of get_config.
custom_objects: A Python dictionary mapping names to additional Python
objects used to create this optimizer, such as a function used for a
hyperparameter.
Returns:
An optimizer instance.
"""
if "lr" in config:
config["learning_rate"] = config.pop("lr")
if "learning_rate" in config:
if isinstance(config["learning_rate"], dict):
config["learning_rate"] = learning_rate_schedule.deserialize(
config["learning_rate"], custom_objects=custom_objects)
return cls(**config)
def _serialize_hyperparameter(self, hyperparameter_name):
"""Serialize a hyperparameter that can be a float, callable, or Tensor."""
value = self._hyper[hyperparameter_name]
if isinstance(value, learning_rate_schedule.LearningRateSchedule):
return learning_rate_schedule.serialize(value)
if callable(value):
return value()
if tensor_util.is_tensor(value):
return backend.get_value(value)
return value
def variables(self):
"""Returns variables of this Optimizer based on the order created."""
return self._weights
@property
def weights(self):
"""Returns variables of this Optimizer based on the order created."""
return self._weights
def get_weights(self):
params = self.weights
return backend.batch_get_value(params)
# TODO(tanzheny): Maybe share this logic with base_layer.
def set_weights(self, weights):
params = self.weights
if len(params) != len(weights):
raise ValueError(
"You called `set_weights(weights)` on optimizer " + self._name +
" with a weight list of length " + str(len(weights)) +
", but the optimizer was expecting " + str(len(params)) +
" weights. Provided weights: " + str(weights)[:50] + "...")
if not params:
return
weight_value_tuples = []
param_values = backend.batch_get_value(params)
for pv, p, w in zip(param_values, params, weights):
if pv.shape != w.shape:
raise ValueError("Optimizer weight shape " + str(pv.shape) +
" not compatible with "
"provided weight shape " + str(w.shape))
weight_value_tuples.append((p, w))
backend.batch_set_value(weight_value_tuples)
def add_weight(self,
name,
shape,
dtype=None,
initializer="zeros",
trainable=None,
synchronization=tf_variables.VariableSynchronization.AUTO,
aggregation=tf_variables.VariableAggregation.NONE):
if dtype is None:
dtype = dtypes.float32
if isinstance(initializer, six.string_types) or callable(initializer):
initializer = initializers.get(initializer)
if synchronization == tf_variables.VariableSynchronization.ON_READ:
if trainable:
raise ValueError(
"Synchronization value can be set to "
"VariableSynchronization.ON_READ only for non-trainable variables. "
"You have specified trainable=True and "
"synchronization=VariableSynchronization.ON_READ.")
else:
# Set trainable to be false when variable is to be synced on read.
trainable = False
elif trainable is None:
trainable = True
variable = self._add_variable_with_custom_getter(
name=name,
shape=shape,
getter=base_layer_utils.make_variable,
overwrite=True,
initializer=initializer,
dtype=dtype,
trainable=trainable,
use_resource=True,
synchronization=synchronization,
aggregation=aggregation)
backend.track_variable(variable)
return variable
def _assert_valid_dtypes(self, tensors):
"""Asserts tensors are all valid types (see `_valid_dtypes`).
Args:
tensors: Tensors to check.
Raises:
ValueError: If any tensor is not a valid type.
"""
valid_dtypes = self._valid_dtypes()
for t in tensors:
dtype = t.dtype.base_dtype
if dtype not in valid_dtypes:
raise ValueError("Invalid type %r for %s, expected: %s." %
(dtype, t.name, [v for v in valid_dtypes]))
def _valid_dtypes(self):
"""Valid types for loss, variables and gradients.
Subclasses should override to allow other float types.
Returns:
Valid types for loss, variables and gradients.
"""
return set(
[dtypes.float16, dtypes.bfloat16, dtypes.float32, dtypes.float64])
def _call_if_callable(self, param):
"""Call the function if param is callable."""
return param() if callable(param) else param
def _resource_apply_dense(self, grad, handle):
"""Add ops to apply dense gradients to the variable `handle`.
Args:
grad: a `Tensor` representing the gradient.
handle: a `Tensor` of dtype `resource` which points to the variable to be
updated.
Returns:
An `Operation` which updates the value of the variable.
"""
raise NotImplementedError()
def _resource_apply_sparse_duplicate_indices(self, grad, handle, indices):
"""Add ops to apply sparse gradients to `handle`, with repeated indices.
Optimizers which override this method must deal with repeated indices. See
the docstring of `_apply_sparse_duplicate_indices` for details. By default
the correct behavior, to sum non-unique indices and their associated
gradients, is enforced by first pre-processing `grad` and `indices` and
passing them on to `_resource_apply_sparse`. Optimizers which deal correctly
with duplicate indices may instead override this method to avoid the
overhead of summing.
Args:
grad: a `Tensor` representing the gradient for the affected indices.
handle: a `Tensor` of dtype `resource` which points to the variable to be
updated.
indices: a `Tensor` of integral type representing the indices for which
the gradient is nonzero. Indices may be repeated.
Returns:
An `Operation` which updates the value of the variable.
"""
summed_grad, unique_indices = _deduplicate_indexed_slices(
values=grad, indices=indices)
return self._resource_apply_sparse(summed_grad, handle, unique_indices)
def _resource_apply_sparse(self, grad, handle, indices):
"""Add ops to apply sparse gradients to the variable `handle`.
Similar to `_apply_sparse`, the `indices` argument to this method has been
de-duplicated. Optimizers which deal correctly with non-unique indices may
instead override `_resource_apply_sparse_duplicate_indices` to avoid this
overhead.
Args:
grad: a `Tensor` representing the gradient for the affected indices.
handle: a `Tensor` of dtype `resource` which points to the variable to be
updated.
indices: a `Tensor` of integral type representing the indices for which
the gradient is nonzero. Indices are unique.
Returns:
An `Operation` which updates the value of the variable.
"""
raise NotImplementedError()
def _resource_scatter_add(self, x, i, v):
with ops.control_dependencies(
[resource_variable_ops.resource_scatter_add(x.handle, i, v)]):
return x.value()
def _resource_scatter_update(self, x, i, v):
with ops.control_dependencies(
[resource_variable_ops.resource_scatter_update(x.handle, i, v)]):
return x.value()
# ---------------
# For implementing the trackable interface
# ---------------
def _restore_slot_variable(self, slot_name, variable, slot_variable):
"""Restore a newly created slot variable's value."""
variable_key = _var_key(variable)
deferred_restorations = self._deferred_slot_restorations.get(
slot_name, {}).pop(variable_key, [])
# Iterate over restores, highest restore UID first to minimize the number
# of assignments.
deferred_restorations.sort(key=lambda position: position.restore_uid,
reverse=True)
for checkpoint_position in deferred_restorations:
checkpoint_position.restore(slot_variable)
def _create_or_restore_slot_variable(
self, slot_variable_position, slot_name, variable):
"""Restore a slot variable's value, possibly creating it.
Called when a variable which has an associated slot variable is created or
restored. When executing eagerly, we create the slot variable with a
restoring initializer.
No new variables are created when graph building. Instead,
_restore_slot_variable catches these after normal creation and adds restore
ops to the graph. This method is nonetheless important when graph building
for the case when a slot variable has already been created but `variable`
has just been added to a dependency graph (causing us to realize that the
slot variable needs to be restored).
Args:
slot_variable_position: A `trackable._CheckpointPosition` object
indicating the slot variable `Trackable` object to be restored.
slot_name: The name of this `Optimizer`'s slot to restore into.
variable: The variable object this slot is being created for.
"""
variable_key = _var_key(variable)
slot_dict = self._slots.get(variable_key, {})
slot_variable = slot_dict.get(slot_name, None)
if (slot_variable is None and context.executing_eagerly() and
slot_variable_position.is_simple_variable()
# Defer slot variable creation if there is an active variable creator
# scope. Generally we'd like to eagerly create/restore slot variables
# when possible, but this may mean that scopes intended to catch
# `variable` also catch its eagerly created slot variable
# unintentionally (specifically make_template would add a dependency on
# a slot variable if not for this case). Deferring is mostly harmless
# (aside from double initialization), and makes variable creator scopes
# behave the same way they do when graph building.
and not ops.get_default_graph()._variable_creator_stack): # pylint: disable=protected-access
initializer = trackable.CheckpointInitialValue(
checkpoint_position=slot_variable_position)
slot_variable = self.add_slot(
var=variable,
initializer=initializer,
slot_name=slot_name)
# Slot variables are not owned by any one object (because we don't want to
# save the slot variable if the optimizer is saved without the non-slot
# variable, or if the non-slot variable is saved without the optimizer;
# it's a dependency hypergraph with edges of the form (optimizer, non-slot
# variable, variable)). So we don't _track_ slot variables anywhere, and
# instead special-case this dependency and otherwise pretend it's a normal
# graph.
if slot_variable is not None:
# If we've either made this slot variable, or if we've pulled out an
# existing slot variable, we should restore it.
slot_variable_position.restore(slot_variable)
else:
# We didn't make the slot variable. Defer restoring until it gets created
# normally. We keep a list rather than the one with the highest restore
# UID in case slot variables have their own dependencies, in which case
# those could differ between restores.
self._deferred_slot_restorations.setdefault(
slot_name, {}).setdefault(variable_key, []).append(
slot_variable_position)
def _filter_grads(grads_and_vars):
"""Filter out iterable with grad equal to None."""
grads_and_vars = tuple(grads_and_vars)
if not grads_and_vars:
return grads_and_vars
filtered = []
vars_with_empty_grads = []
for grad, var in grads_and_vars:
if grad is None:
vars_with_empty_grads.append(var)
else:
filtered.append((grad, var))
filtered = tuple(filtered)
if not filtered:
raise ValueError("No gradients provided for any variable: %s." %
([v.name for _, v in grads_and_vars],))
if vars_with_empty_grads:
logging.warning(
("Gradients does not exist for variables %s when minimizing the loss."),
([v.name for v in vars_with_empty_grads]))
return filtered
def _var_key(var):
"""Key for representing a primary variable, for looking up slots.
In graph mode the name is derived from the var shared name.
In eager mode the name is derived from the var unique id.
If distribution strategy exists, get the primary variable first.
Args:
var: the variable.
Returns:
the unique name of the variable.
"""
# pylint: disable=protected-access
# Get the distributed variable if it exists.
if getattr(var, "_distributed_container", None) is not None:
var = var._distributed_container()
if var._in_graph_mode:
return var._shared_name
return var._unique_id
def _get_slot_key_from_var(var, slot_name):
"""Get the slot key for the variable: var_name/slot_name."""
name = _var_key(var)
return name + "/" + slot_name
class _RestoredOptimizer(OptimizerV2):
"""A non-functional Optimizer implementation for checkpoint compatibility.
Holds slot variables and hyperparameters when an optimizer is restored from a
SavedModel. These variables may be referenced in functions along with ops
created by the original optimizer, but currently we do not support using the
optimizer object iself (e.g. through `apply_gradients`).
"""
# TODO(allenl): Make the restored optimizer functional by tracing its apply
# methods.
def __init__(self):
super(_RestoredOptimizer, self).__init__("_RestoredOptimizer")
self._hypers_created = True
def get_config(self):
# TODO(allenl): Save and restore the Optimizer's config
raise NotImplementedError(
"Restoring functional Optimzers from SavedModels is not currently "
"supported. Please file a feature request if this limitation bothers "
"you.")
revived_types.register_revived_type(
"optimizer",
lambda obj: isinstance(obj, OptimizerV2),
versions=[revived_types.VersionedTypeRegistration(
object_factory=lambda proto: _RestoredOptimizer(),
version=1,
min_producer_version=1,
min_consumer_version=1,
setter=_RestoredOptimizer._set_hyper # pylint: disable=protected-access
)])
| [
"tensorflow.python.keras.backend.get_value",
"tensorflow.python.keras.initializers.get",
"tensorflow.python.util.tf_export.keras_export",
"tensorflow.python.ops.gradients.gradients",
"tensorflow.python.framework.ops.init_scope",
"tensorflow.python.framework.ops.get_default_graph",
"tensorflow.python.ops.variables.Variable",
"tensorflow.python.keras.backend.batch_set_value",
"tensorflow.python.keras.backend.set_value",
"tensorflow.python.ops.clip_ops.clip_by_norm",
"tensorflow.python.util.nest.flatten",
"tensorflow.python.keras.optimizer_v2.learning_rate_schedule.deserialize",
"tensorflow.python.ops.resource_variable_ops.resource_scatter_update",
"tensorflow.python.framework.ops.executing_eagerly_outside_functions",
"tensorflow.python.distribute.distribution_strategy_context.get_replica_context",
"tensorflow.python.ops.math_ops.cast",
"tensorflow.python.framework.tensor_util.is_tensor",
"tensorflow.python.eager.backprop.GradientTape",
"tensorflow.python.keras.backend.track_variable",
"tensorflow.python.keras.optimizer_v2.learning_rate_schedule.serialize",
"tensorflow.python.ops.resource_variable_ops.resource_scatter_add",
"tensorflow.python.ops.array_ops.shape",
"tensorflow.python.framework.ops._get_graph_from_inputs",
"tensorflow.python.framework.ops.control_dependencies",
"tensorflow.python.keras.backend.batch_get_value",
"tensorflow.python.training.tracking.base.CheckpointInitialValue",
"tensorflow.python.eager.context.executing_eagerly",
"tensorflow.python.keras.backend.get_graph",
"six.add_metaclass",
"tensorflow.python.ops.clip_ops.clip_by_value",
"tensorflow.python.platform.tf_logging.warning",
"tensorflow.python.ops.array_ops.unique",
"functools.partial",
"tensorflow.python.keras.backend.name_scope",
"tensorflow.python.keras.utils.tf_utils.is_symbolic_tensor",
"tensorflow.python.distribute.distribution_strategy_context.get_strategy"
] | [((2908, 2938), 'six.add_metaclass', 'six.add_metaclass', (['abc.ABCMeta'], {}), '(abc.ABCMeta)\n', (2925, 2938), False, 'import six\n'), ((2940, 2982), 'tensorflow.python.util.tf_export.keras_export', 'keras_export', (['"""keras.optimizers.Optimizer"""'], {}), "('keras.optimizers.Optimizer')\n", (2952, 2982), False, 'from tensorflow.python.util.tf_export import keras_export\n'), ((2712, 2737), 'tensorflow.python.ops.array_ops.unique', 'array_ops.unique', (['indices'], {}), '(indices)\n', (2728, 2737), False, 'from tensorflow.python.ops import array_ops\n'), ((14228, 14250), 'tensorflow.python.util.nest.flatten', 'nest.flatten', (['var_list'], {}), '(var_list)\n', (14240, 14250), False, 'from tensorflow.python.util import nest\n'), ((15145, 15165), 'tensorflow.python.util.nest.flatten', 'nest.flatten', (['params'], {}), '(params)\n', (15157, 15165), False, 'from tensorflow.python.util import nest\n'), ((26486, 26514), 'tensorflow.python.framework.tensor_util.is_tensor', 'tensor_util.is_tensor', (['value'], {}), '(value)\n', (26507, 26514), False, 'from tensorflow.python.framework import tensor_util\n'), ((26890, 26921), 'tensorflow.python.keras.backend.batch_get_value', 'backend.batch_get_value', (['params'], {}), '(params)\n', (26913, 26921), False, 'from tensorflow.python.keras import backend\n'), ((27462, 27493), 'tensorflow.python.keras.backend.batch_get_value', 'backend.batch_get_value', (['params'], {}), '(params)\n', (27485, 27493), False, 'from tensorflow.python.keras import backend\n'), ((27809, 27853), 'tensorflow.python.keras.backend.batch_set_value', 'backend.batch_set_value', (['weight_value_tuples'], {}), '(weight_value_tuples)\n', (27832, 27853), False, 'from tensorflow.python.keras import backend\n'), ((29233, 29265), 'tensorflow.python.keras.backend.track_variable', 'backend.track_variable', (['variable'], {}), '(variable)\n', (29255, 29265), False, 'from tensorflow.python.keras import backend\n'), ((37664, 37800), 'tensorflow.python.platform.tf_logging.warning', 'logging.warning', (['"""Gradients does not exist for variables %s when minimizing the loss."""', '[v.name for v in vars_with_empty_grads]'], {}), "(\n 'Gradients does not exist for variables %s when minimizing the loss.',\n [v.name for v in vars_with_empty_grads])\n", (37679, 37800), True, 'from tensorflow.python.platform import tf_logging as logging\n'), ((2828, 2859), 'tensorflow.python.ops.array_ops.shape', 'array_ops.shape', (['unique_indices'], {}), '(unique_indices)\n', (2843, 2859), False, 'from tensorflow.python.ops import array_ops\n'), ((14037, 14060), 'tensorflow.python.eager.backprop.GradientTape', 'backprop.GradientTape', ([], {}), '()\n', (14058, 14060), False, 'from tensorflow.python.eager import backprop\n'), ((15223, 15256), 'tensorflow.python.ops.gradients.gradients', 'gradients.gradients', (['loss', 'params'], {}), '(loss, params)\n', (15242, 15256), False, 'from tensorflow.python.ops import gradients\n'), ((16752, 16768), 'tensorflow.python.framework.ops.init_scope', 'ops.init_scope', ([], {}), '()\n', (16766, 16768), False, 'from tensorflow.python.framework import ops\n'), ((18113, 18151), 'tensorflow.python.keras.backend.name_scope', 'backend.name_scope', (['(name or self._name)'], {}), '(name or self._name)\n', (18131, 18151), False, 'from tensorflow.python.keras import backend\n'), ((20470, 20497), 'tensorflow.python.ops.math_ops.cast', 'math_ops.cast', (['value', 'dtype'], {}), '(value, dtype)\n', (20483, 20497), False, 'from tensorflow.python.ops import math_ops\n'), ((22126, 22155), 'tensorflow.python.distribute.distribution_strategy_context.get_strategy', 'distribute_ctx.get_strategy', ([], {}), '()\n', (22153, 22155), True, 'from tensorflow.python.distribute import distribution_strategy_context as distribute_ctx\n'), ((22449, 22479), 'tensorflow.python.keras.backend.track_variable', 'backend.track_variable', (['weight'], {}), '(weight)\n', (22471, 22479), False, 'from tensorflow.python.keras import backend\n'), ((24361, 24402), 'tensorflow.python.ops.math_ops.cast', 'math_ops.cast', (['self.iterations', 'var_dtype'], {}), '(self.iterations, var_dtype)\n', (24374, 24402), False, 'from tensorflow.python.ops import math_ops\n'), ((24511, 24552), 'tensorflow.python.ops.math_ops.cast', 'math_ops.cast', (['self.iterations', 'var_dtype'], {}), '(self.iterations, var_dtype)\n', (24524, 24552), False, 'from tensorflow.python.ops import math_ops\n'), ((26394, 26433), 'tensorflow.python.keras.optimizer_v2.learning_rate_schedule.serialize', 'learning_rate_schedule.serialize', (['value'], {}), '(value)\n', (26426, 26433), False, 'from tensorflow.python.keras.optimizer_v2 import learning_rate_schedule\n'), ((26529, 26553), 'tensorflow.python.keras.backend.get_value', 'backend.get_value', (['value'], {}), '(value)\n', (26546, 26553), False, 'from tensorflow.python.keras import backend\n'), ((28317, 28346), 'tensorflow.python.keras.initializers.get', 'initializers.get', (['initializer'], {}), '(initializer)\n', (28333, 28346), False, 'from tensorflow.python.keras import initializers\n'), ((34923, 34950), 'tensorflow.python.eager.context.executing_eagerly', 'context.executing_eagerly', ([], {}), '()\n', (34948, 34950), False, 'from tensorflow.python.eager import context\n'), ((35721, 35797), 'tensorflow.python.training.tracking.base.CheckpointInitialValue', 'trackable.CheckpointInitialValue', ([], {'checkpoint_position': 'slot_variable_position'}), '(checkpoint_position=slot_variable_position)\n', (35753, 35797), True, 'from tensorflow.python.training.tracking import base as trackable\n'), ((14360, 14399), 'tensorflow.python.ops.clip_ops.clip_by_norm', 'clip_ops.clip_by_norm', (['g', 'self.clipnorm'], {}), '(g, self.clipnorm)\n', (14381, 14399), False, 'from tensorflow.python.ops import clip_ops\n'), ((14477, 14535), 'tensorflow.python.ops.clip_ops.clip_by_value', 'clip_ops.clip_by_value', (['g', '(-self.clipvalue)', 'self.clipvalue'], {}), '(g, -self.clipvalue, self.clipvalue)\n', (14499, 14535), False, 'from tensorflow.python.ops import clip_ops\n'), ((15708, 15747), 'tensorflow.python.ops.clip_ops.clip_by_norm', 'clip_ops.clip_by_norm', (['g', 'self.clipnorm'], {}), '(g, self.clipnorm)\n', (15729, 15747), False, 'from tensorflow.python.ops import clip_ops\n'), ((15825, 15883), 'tensorflow.python.ops.clip_ops.clip_by_value', 'clip_ops.clip_by_value', (['g', '(-self.clipvalue)', 'self.clipvalue'], {}), '(g, -self.clipvalue, self.clipvalue)\n', (15847, 15883), False, 'from tensorflow.python.ops import clip_ops\n'), ((16900, 16936), 'tensorflow.python.distribute.distribution_strategy_context.get_replica_context', 'distribute_ctx.get_replica_context', ([], {}), '()\n', (16934, 16936), True, 'from tensorflow.python.distribute import distribution_strategy_context as distribute_ctx\n'), ((20129, 20172), 'tensorflow.python.keras.backend.set_value', 'backend.set_value', (['self._hyper[name]', 'value'], {}), '(self._hyper[name], value)\n', (20146, 20172), False, 'from tensorflow.python.keras import backend\n'), ((21929, 21958), 'tensorflow.python.keras.initializers.get', 'initializers.get', (['initializer'], {}), '(initializer)\n', (21945, 21958), False, 'from tensorflow.python.keras import initializers\n'), ((21983, 22047), 'functools.partial', 'functools.partial', (['initializer'], {'shape': 'var.shape', 'dtype': 'var.dtype'}), '(initializer, shape=var.shape, dtype=var.dtype)\n', (22000, 22047), False, 'import functools\n'), ((22227, 22362), 'tensorflow.python.ops.variables.Variable', 'tf_variables.Variable', ([], {'name': "('%s/%s' % (var._shared_name, slot_name))", 'dtype': 'var.dtype', 'trainable': '(False)', 'initial_value': 'initial_value'}), "(name='%s/%s' % (var._shared_name, slot_name), dtype=\n var.dtype, trainable=False, initial_value=initial_value)\n", (22248, 22362), True, 'from tensorflow.python.ops import variables as tf_variables\n'), ((25996, 26091), 'tensorflow.python.keras.optimizer_v2.learning_rate_schedule.deserialize', 'learning_rate_schedule.deserialize', (["config['learning_rate']"], {'custom_objects': 'custom_objects'}), "(config['learning_rate'], custom_objects=\n custom_objects)\n", (26030, 26091), False, 'from tensorflow.python.keras.optimizer_v2 import learning_rate_schedule\n'), ((15175, 15194), 'tensorflow.python.keras.backend.get_graph', 'backend.get_graph', ([], {}), '()\n', (15192, 15194), False, 'from tensorflow.python.keras import backend\n'), ((17958, 17995), 'tensorflow.python.framework.ops.control_dependencies', 'ops.control_dependencies', (['[update_op]'], {}), '([update_op])\n', (17982, 17995), False, 'from tensorflow.python.framework import ops\n'), ((18220, 18261), 'tensorflow.python.framework.ops.executing_eagerly_outside_functions', 'ops.executing_eagerly_outside_functions', ([], {}), '()\n', (18259, 18261), False, 'from tensorflow.python.framework import ops\n'), ((18321, 18362), 'tensorflow.python.keras.backend.name_scope', 'backend.name_scope', (["('update' + scope_name)"], {}), "('update' + scope_name)\n", (18339, 18362), False, 'from tensorflow.python.keras import backend\n'), ((18662, 18689), 'tensorflow.python.eager.context.executing_eagerly', 'context.executing_eagerly', ([], {}), '()\n', (18687, 18689), False, 'from tensorflow.python.eager import context\n'), ((32737, 32795), 'tensorflow.python.ops.resource_variable_ops.resource_scatter_add', 'resource_variable_ops.resource_scatter_add', (['x.handle', 'i', 'v'], {}), '(x.handle, i, v)\n', (32779, 32795), False, 'from tensorflow.python.ops import resource_variable_ops\n'), ((32914, 32975), 'tensorflow.python.ops.resource_variable_ops.resource_scatter_update', 'resource_variable_ops.resource_scatter_update', (['x.handle', 'i', 'v'], {}), '(x.handle, i, v)\n', (32959, 32975), False, 'from tensorflow.python.ops import resource_variable_ops\n'), ((35615, 35638), 'tensorflow.python.framework.ops.get_default_graph', 'ops.get_default_graph', ([], {}), '()\n', (35636, 35638), False, 'from tensorflow.python.framework import ops\n'), ((18597, 18627), 'tensorflow.python.keras.utils.tf_utils.is_symbolic_tensor', 'tf_utils.is_symbolic_tensor', (['i'], {}), '(i)\n', (18624, 18627), False, 'from tensorflow.python.keras.utils import tf_utils\n'), ((19031, 19067), 'tensorflow.python.framework.ops.control_dependencies', 'ops.control_dependencies', (['update_ops'], {}), '(update_ops)\n', (19055, 19067), False, 'from tensorflow.python.framework import ops\n'), ((18927, 18965), 'tensorflow.python.framework.ops._get_graph_from_inputs', 'ops._get_graph_from_inputs', (['update_ops'], {}), '(update_ops)\n', (18953, 18965), False, 'from tensorflow.python.framework import ops\n')] |
import boto3
import src.app as app
import csv
import psycopg2 as ps
import os
from dotenv import load_dotenv
load_dotenv()
dbname = os.environ["db"]
host = os.environ["host"]
port = os.environ["port"]
user = os.environ["user"]
password = os.environ["pass"]
connection = ps.connect(dbname=dbname,
host=host,
port=port,
user=user,
password=password)
def handle(event, context):
cursor = connection.cursor()
cursor.execute("SELECT 1", ())
print(cursor.fetchall())
# Get key and bucket informaition
key = event['Records'][0]['s3']['object']['key']
bucket = event['Records'][0]['s3']['bucket']['name']
# use boto3 library to get object from S3
s3 = boto3.client('s3')
s3_object = s3.get_object(Bucket = bucket, Key = key)
data = s3_object['Body'].read().decode('utf-8')
all_lines = []
# read CSV
# csv_data = csv.reader(data.splitlines())
# for row in csv_data:
# datestr = row[0] #.replace('/', '-')
# # print(datestr)
# date_obj = datetime.strptime(datestr, '%d/%m/%Y %H:%M')
# # print(date_obj)
# # time = str(row[0][-5:])
# location = str(row[1])
# order = str(row[3])
# total = str(row[4])
# all_lines.append({'date':date_obj, 'location':location, 'order':order, 'total':total})
# return cached_list
# print(all_lines)
app.start_app(all_lines, data)
print_all_lines = [print(line) for line in all_lines]
print_all_lines
return {"message": "success!!! Check the cloud watch logs for this lambda in cloudwatch https://eu-west-1.console.aws.amazon.com/cloudwatch/home?region=eu-west-1#logsV2:log-groups"}
# Form all the lines of data into a list of lists
# all_lines = [line for line in csv_data]
# print(data)
# print(all_lines) | [
"psycopg2.connect",
"src.app.start_app",
"boto3.client",
"dotenv.load_dotenv"
] | [((111, 124), 'dotenv.load_dotenv', 'load_dotenv', ([], {}), '()\n', (122, 124), False, 'from dotenv import load_dotenv\n'), ((273, 350), 'psycopg2.connect', 'ps.connect', ([], {'dbname': 'dbname', 'host': 'host', 'port': 'port', 'user': 'user', 'password': 'password'}), '(dbname=dbname, host=host, port=port, user=user, password=password)\n', (283, 350), True, 'import psycopg2 as ps\n'), ((805, 823), 'boto3.client', 'boto3.client', (['"""s3"""'], {}), "('s3')\n", (817, 823), False, 'import boto3\n'), ((1498, 1528), 'src.app.start_app', 'app.start_app', (['all_lines', 'data'], {}), '(all_lines, data)\n', (1511, 1528), True, 'import src.app as app\n')] |
# -*- coding: utf-8 -*-
# -----------------------------------------------------------------------------
# (C) British Crown Copyright 2017-2021 Met Office.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# * Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
""" Tests of precipitation_type utilities"""
import numpy as np
import pytest
from iris.exceptions import CoordinateNotFoundError
from improver.metadata.constants import FLOAT_DTYPE
from improver.precipitation_type.utilities import make_shower_condition_cube
from improver.synthetic_data.set_up_test_cubes import set_up_probability_cube
def set_up_test_cube(n_thresholds=1):
"""Set up a cube testing shower condition conversion"""
thresholds = np.arange(n_thresholds)
shape = [2, 2]
shape = [n_thresholds, *shape] if n_thresholds > 0 else shape
data = np.ones(shape, dtype=FLOAT_DTYPE)
cube = set_up_probability_cube(
data,
thresholds,
variable_name="texture_of_cloud_area_fraction",
threshold_units=1,
spatial_grid="equalarea",
)
return cube
def test_basic():
"""Test that with a valid input the cube is transformed into a shower
condition cube."""
cube = set_up_test_cube()
result = make_shower_condition_cube(cube)
threshold_coord = result.coord(var_name="threshold")
assert result.name() == "probability_of_shower_condition_above_threshold"
assert result.dtype == FLOAT_DTYPE
assert (result.data == cube.data).all()
assert threshold_coord.name() == "shower_condition"
assert threshold_coord.units == 1
def test_no_threshold_coord():
"""Test an exception is raised if the proxy diagnostic cube does not have
a threshold coordinate."""
cube = set_up_test_cube()
cube.remove_coord("texture_of_cloud_area_fraction")
expected = "Input has no threshold coordinate and cannot be used"
with pytest.raises(CoordinateNotFoundError, match=expected):
make_shower_condition_cube(cube)
def test_multi_valued_threshold_coord():
"""Test an exception is raised if the proxy diagnostic cube has a multi
valued threshold coordinate."""
cube = set_up_test_cube(n_thresholds=2)
expected = "Expected a single valued threshold coordinate.*"
with pytest.raises(ValueError, match=expected):
make_shower_condition_cube(cube)
| [
"numpy.ones",
"improver.synthetic_data.set_up_test_cubes.set_up_probability_cube",
"pytest.raises",
"improver.precipitation_type.utilities.make_shower_condition_cube",
"numpy.arange"
] | [((2118, 2141), 'numpy.arange', 'np.arange', (['n_thresholds'], {}), '(n_thresholds)\n', (2127, 2141), True, 'import numpy as np\n'), ((2238, 2271), 'numpy.ones', 'np.ones', (['shape'], {'dtype': 'FLOAT_DTYPE'}), '(shape, dtype=FLOAT_DTYPE)\n', (2245, 2271), True, 'import numpy as np\n'), ((2283, 2427), 'improver.synthetic_data.set_up_test_cubes.set_up_probability_cube', 'set_up_probability_cube', (['data', 'thresholds'], {'variable_name': '"""texture_of_cloud_area_fraction"""', 'threshold_units': '(1)', 'spatial_grid': '"""equalarea"""'}), "(data, thresholds, variable_name=\n 'texture_of_cloud_area_fraction', threshold_units=1, spatial_grid=\n 'equalarea')\n", (2306, 2427), False, 'from improver.synthetic_data.set_up_test_cubes import set_up_probability_cube\n'), ((2642, 2674), 'improver.precipitation_type.utilities.make_shower_condition_cube', 'make_shower_condition_cube', (['cube'], {}), '(cube)\n', (2668, 2674), False, 'from improver.precipitation_type.utilities import make_shower_condition_cube\n'), ((3297, 3351), 'pytest.raises', 'pytest.raises', (['CoordinateNotFoundError'], {'match': 'expected'}), '(CoordinateNotFoundError, match=expected)\n', (3310, 3351), False, 'import pytest\n'), ((3361, 3393), 'improver.precipitation_type.utilities.make_shower_condition_cube', 'make_shower_condition_cube', (['cube'], {}), '(cube)\n', (3387, 3393), False, 'from improver.precipitation_type.utilities import make_shower_condition_cube\n'), ((3669, 3710), 'pytest.raises', 'pytest.raises', (['ValueError'], {'match': 'expected'}), '(ValueError, match=expected)\n', (3682, 3710), False, 'import pytest\n'), ((3720, 3752), 'improver.precipitation_type.utilities.make_shower_condition_cube', 'make_shower_condition_cube', (['cube'], {}), '(cube)\n', (3746, 3752), False, 'from improver.precipitation_type.utilities import make_shower_condition_cube\n')] |
# Copyright 2014 varnishapi authors. All rights reserved.
# Use of this source code is governed by a BSD-style
# license that can be found in the LICENSE file.
import time
from feaas import storage
class Base(object):
def __init__(self, manager, interval, *locks):
self.manager = manager
self.storage = manager.storage
self.interval = interval
def init_locker(self, *lock_names):
self.locker = storage.MultiLocker(self.storage)
for lock_name in lock_names:
self.locker.init(lock_name)
def loop(self):
self.running = True
while self.running:
self.run()
time.sleep(self.interval)
def stop(self):
self.running = False
| [
"feaas.storage.MultiLocker",
"time.sleep"
] | [((440, 473), 'feaas.storage.MultiLocker', 'storage.MultiLocker', (['self.storage'], {}), '(self.storage)\n', (459, 473), False, 'from feaas import storage\n'), ((663, 688), 'time.sleep', 'time.sleep', (['self.interval'], {}), '(self.interval)\n', (673, 688), False, 'import time\n')] |
# Copyright 2014 Open Source Robotics Foundation, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import os
from .common import PlatformPackageDescriptor
from .http_cache import fetch_and_cache_gzip
def get_debian_repo_index(debian_repository_baseurl, target, cache_dir):
url = os.path.join(
debian_repository_baseurl, 'dists', target.os_code_name, 'main')
if target.arch == 'source':
url = os.path.join(url, 'source', 'Sources.gz')
else:
url = os.path.join(url, 'binary-%s' % target.arch, 'Packages.gz')
cache_filename = fetch_and_cache_gzip(url, cache_dir)
logging.debug('Reading file: %s' % cache_filename)
# split package blocks
with open(cache_filename, 'rb') as f:
blocks = f.read().decode('utf8').split('\n\n')
blocks = [b.splitlines() for b in blocks if b]
# extract version number of every package
package_versions = {}
for lines in blocks:
prefix = 'Package: '
assert lines[0].startswith(prefix)
debian_pkg_name = lines[0][len(prefix):]
prefix = 'Version: '
versions = [l[len(prefix):] for l in lines if l.startswith(prefix)]
version = versions[0] if len(versions) == 1 else None
prefix = 'Source: '
source_names = [l[len(prefix):] for l in lines if l.startswith(prefix)]
source_name = source_names[0] if len(source_names) == 1 else None
package_versions[debian_pkg_name] = PlatformPackageDescriptor(version, source_name)
return package_versions
| [
"os.path.join",
"logging.debug"
] | [((804, 881), 'os.path.join', 'os.path.join', (['debian_repository_baseurl', '"""dists"""', 'target.os_code_name', '"""main"""'], {}), "(debian_repository_baseurl, 'dists', target.os_code_name, 'main')\n", (816, 881), False, 'import os\n'), ((1127, 1177), 'logging.debug', 'logging.debug', (["('Reading file: %s' % cache_filename)"], {}), "('Reading file: %s' % cache_filename)\n", (1140, 1177), False, 'import logging\n'), ((937, 978), 'os.path.join', 'os.path.join', (['url', '"""source"""', '"""Sources.gz"""'], {}), "(url, 'source', 'Sources.gz')\n", (949, 978), False, 'import os\n'), ((1003, 1062), 'os.path.join', 'os.path.join', (['url', "('binary-%s' % target.arch)", '"""Packages.gz"""'], {}), "(url, 'binary-%s' % target.arch, 'Packages.gz')\n", (1015, 1062), False, 'import os\n')] |
# Copyright 2022 ConvolutedDog (https://github.com/ConvolutedDog/)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#!/usr/bin/python3
import torch
import torch.nn as nn
import torch.nn.functional as F
from graphviz import Digraph, render
from torch.autograd import Variable
@torch.no_grad()
def cross_entropy_loss(y_predict, y_true):
print('\n=========================== Layer:'+' {0:18}'.format('cross_entropy_loss')+' Start ===========================')
print('# y_predict.shape: ', list(y_predict.shape))
print('# y_true.shape: ', list(y_true.shape))
y_shift = torch.sub(y_predict, torch.max(y_predict, dim=1, keepdim=True).values)
y_exp = torch.exp(y_shift)
y_probability = torch.div(y_exp, torch.sum(y_exp, dim=1, keepdim=True))
ypred_loss = torch.mean(-torch.sum(torch.mul(y_true, torch.log(y_probability)), dim=1, keepdim=True))
dLoss_dypred = y_probability - y_true
print('# dLoss_dypred.shape: ', list(dLoss_dypred.shape))
print('# Self calculated loss: ', ypred_loss.item())
print('=========================== Layer:'+' {0:18}'.format('cross_entropy_loss')+' End =============================')
return ypred_loss, dLoss_dypred
@torch.no_grad()
def fc_backward(dLoss_dnextz, z, w):
print('# next_dz.shape: ', list(dLoss_dnextz.shape))
print('# z.shape: ', list(z.shape))
print('# weight.shape: ', list(w.shape))
print('# bias.shape: ', '['+str(dLoss_dnextz.shape[1])+']')
N = z.shape[0]
if len(z.shape) == 4:
z = z.view(z.size(0), -1)
dLoss_dz = torch.matmul(dLoss_dnextz, w) #delta
dLoss_dfcW = torch.matmul(dLoss_dnextz.t(), z)
dLoss_dfcB = torch.sum(dLoss_dnextz, dim=0)
print('# dz.shape: ', list(dLoss_dz.shape))
print('# dweight.shape: ', list(dLoss_dfcW.shape))
print('# dbias.shape: ', list(dLoss_dfcB.shape))
return dLoss_dz, dLoss_dfcW/N, dLoss_dfcB/N
@torch.no_grad()
def view_backward(dLoss_dnextz, last_z, params):
print('# next_dz.shape: ', list(dLoss_dnextz.shape))
print('# last_z.shape: ', list(last_z.shape))
if params:
pooling = params[0]
stride = params[1]
padding = params[2]
output_size = (int((last_z.shape[2]-pooling[0]+2*padding[0])/stride[0]+1), \
int((last_z.shape[3]-pooling[0]+2*padding[0])/stride[0]+1))
dLoss_dz = dLoss_dnextz.reshape(last_z.shape[0], last_z.shape[1], output_size[0], output_size[1])
else:
dLoss_dz = dLoss_dnextz.reshape(last_z.shape)
print('# dz.shape: ', list(dLoss_dz.shape))
return dLoss_dz
def add_backward(dLoss_dnextz):
print('# next_dz.shape: ', list(dLoss_dnextz.shape))
dLoss_dz = dLoss_dnextz
print('# dz.shape: ', list(dLoss_dz.shape))
return dLoss_dz
@torch.no_grad()
def relu_backward(next_dz, z):
print('# next_dz.shape: ', list(next_dz.shape))
print('# z.shape: ', list(z.shape))
zeros_tensor = torch.zeros_like(next_dz)
dLoss_dz = torch.where(torch.gt(z, 0), next_dz, zeros_tensor)
print('# dz.shape: ', list(dLoss_dz.shape))
return dLoss_dz
@torch.no_grad()
def dropback_backward(next_dz, mask, p):
print('# zeros probability: ', p)
print('# next_dz.shape: ', list(next_dz.shape))
print('# mask.shape: ', list(mask.shape))
zeros_tensor = torch.zeros_like(mask)
dLoss_dz = torch.mul(torch.where(torch.eq(mask, 1.), next_dz, zeros_tensor), 1./(1.-p))
print('# dz.shape: ', list(dLoss_dz.shape))
return dLoss_dz
@torch.no_grad()
def max_pooling_backward(next_dz, z, pooling, strides, padding=(0, 0)):
print('# next_dz.shape: ', list(next_dz.shape))
print('# z.shape: ', list(z.shape))
print('# padding: ', padding)
print('# strides: ', strides)
N, C, H, W = z.shape
_, _, out_h, out_w = next_dz.shape
padding_z = F.pad(z, pad=(padding[1],padding[1],padding[0],\
padding[0],0,0), mode='constant', value=0)
padding_dz = torch.zeros_like(padding_z)
for n in torch.arange(N):
for c in torch.arange(C):
for i in torch.arange(out_h):
for j in torch.arange(out_w):
flat_idx = torch.argmax(padding_z[n, c,
strides[0] * i:strides[0] * i + pooling[0],
strides[1] * j:strides[1] * j + pooling[1]])
h_idx = strides[0] * i + flat_idx // pooling[1]
w_idx = strides[1] * j + flat_idx % pooling[1]
padding_dz[n, c, h_idx, w_idx] += next_dz[n, c, i, j]
dz = _remove_padding(padding_dz, padding) # padding_z[:, :, padding[0]:-padding[0], padding[1]:-padding[1]]
print('# dz.shape: ', list(dz.shape))
return dz
@torch.no_grad()
def batchnorm2d_backward(next_dz, z, eps, gamma=torch.Tensor([1.,1.,1.])):
print('# next_dz.shape: ', list(next_dz.shape))
print('# z.shape: ', list(z.shape))
print('# eps: ', eps)
print('# gamma.shape: ', list(gamma.shape))
N, C, H, W = z.shape
m = N*H*W
shape = [N,C,H,W]
import numpy as np
ax = list(np.arange(len(shape)))
shape.pop(1)
ax.pop(1)
axis = tuple(ax)
dxhut = torch.zeros_like(next_dz)
for c in range(C):
dxhut[:,c] = next_dz[:,c]*gamma[c]
dz1 = m*dxhut
mu = z.mean(axis=axis, keepdim=True)
xmu = z - mu
xmu2 = xmu**2
var = xmu2.sum(axis=axis, keepdim=True)/m
ivar = 1./torch.pow(var+eps, 0.5)
dz2 = (ivar**2)*((dxhut*xmu).sum(axis=axis, keepdim=True))*xmu
dz3 = dxhut.sum(axis=axis, keepdim=True)
dz = ivar/m*(dz1-dz2-dz3)
print('# dz.shape: ', list(dz.shape))
return dz
@torch.no_grad()
def average_pooling_backward(next_dz, z, pooling, strides, padding=(0, 0)):
print('# next_dz.shape: ', list(next_dz.shape))
print('# z.shape: ', list(z.shape))
print('# padding: ', padding)
print('# strides: ', strides)
N, C, H, W = z.shape
_, _, out_h, out_w = next_dz.shape
padding_z = F.pad(z, pad=(padding[1],padding[1],padding[0],\
padding[0],0,0), mode='constant', value=0)
padding_dz = torch.zeros_like(padding_z)
for n in torch.arange(N):
for c in torch.arange(C):
for i in torch.arange(out_h):
for j in torch.arange(out_w):
padding_dz[n, c,
strides[0] * i:strides[0] * i + pooling[0],
strides[1] * j:strides[1] * j + pooling[1]] += next_dz[n, c, i, j] / (pooling[0] * pooling[1])
dz = _remove_padding(padding_dz, padding) # padding_z[:, :, padding[0]:-padding[0], padding[1]:-padding[1]]
print('# dz.shape: ', list(dz.shape))
return dz
@torch.no_grad()
def _remove_padding(z, padding):
if padding[0] > 0 and padding[1] > 0:
return z[:, :, padding[0]:-padding[0], padding[1]:-padding[1]]
elif padding[0] > 0:
return z[:, :, padding[0]:-padding[0], :]
elif padding[1] > 0:
return z[:, :, :, padding[1]:-padding[1]]
else:
return z
@torch.no_grad()
def conv_backward(next_dz, K, z, padding=(0, 0), strides=(1, 1)):
N, C, H, W = z.shape
D, C, k1, k2 = K.shape
N, D, H1, W1 = next_dz.shape
print('# next_dz.shape: ', list(next_dz.shape))
print('# z.shape: ', list(z.shape))
print('# weight.shape: ', list(K.shape))
print('# bias.shape: ', '['+str(K.shape[0])+']')
print('# padding: ', padding)
print('# strides: ', strides)
padding_next_dz = _insert_zeros(next_dz, strides)
flip_K = torch.flip(K, (2, 3))
swap_flip_K = torch.swapaxes(flip_K, 0, 1)
ppadding_next_dz = F.pad(padding_next_dz, pad=(k2-1-padding[1],k2-1-padding[1],\
k1-1-padding[0],k1-1-padding[0],0,0), mode='constant', value=0)
dz = _conv_forward(ppadding_next_dz, swap_flip_K)
swap_z = torch.swapaxes(z, 0, 1)
dK = _conv_forward(torch.swapaxes(F.pad(z, pad=(padding[1],padding[1],\
padding[0],padding[0],0,0), mode='constant', value=0), 0, 1), torch.swapaxes(padding_next_dz, 0, 1))
db = torch.sum(torch.sum(torch.sum(next_dz, axis=-1), axis=-1), axis=0) # 在高度、宽度上相加;批量大小上相加
print('# dz.shape: ', list(dz.shape))
print('# dweight.shape: ', list(dK.transpose(0,1).shape))
print('# dbias.shape: ', list(db.shape))
return dz, (dK/N).transpose(0,1), db/N
@torch.no_grad()
def _conv_forward(x, weight, strides=(1,1)):
n, c, h_in, w_in = x.shape
d, c, k, j = weight.shape
x_pad = x
x_pad = x_pad.unfold(2, k, strides[0])
x_pad = x_pad.unfold(3, j, strides[1])
out = torch.einsum(
'nchwkj,dckj->ndhw',
x_pad, weight)
return out
@torch.no_grad()
def _insert_zeros(dz, strides):
N, D, H, W = dz.shape
H_last = (H-1)*(strides[0]-1) + H
W_last = (W-1)*(strides[1]-1) + W
pz = torch.zeros(N, D, H_last, W_last)
for n in range(N):
for d in range(D):
for h in range(0, H_last, strides[0]):
for w in range(0, W_last, strides[1]):
pz[n,d,h,w] = dz[n,d,h//strides[0],w//strides[1]]
return pz
@torch.no_grad()
def judge_tensors_equal(tensor_A, tensor_B):
if(not tensor_A.shape == tensor_B.shape):
print('Shape of two compard tensors is not equal.')
return None
error = 0
error_tolerance = 0.001
np_A = tensor_A.detach().numpy()
np_B = tensor_B.detach().numpy()
if len(tensor_A.shape) == 4:
N, C, H, W = tensor_A.shape
for n in range(N):
for c in range(C):
for h in range(H):
for w in range(W):
if np_A[n,c,h,w]-np_B[n,c,h,w] > error_tolerance or np_B[n,c,h,w]-np_A[n,c,h,w] > error_tolerance:
error += 1
if error%20 == 0:
pass
print('error', np_A[n,c,h,w], np_B[n,c,h,w])
else:
if n*c*h*w % 20000000000000 == 0:
pass
#print('right', np_A[n,c,h,w], np_B[n,c,h,w])
#print('Error rate: ', error/(N*C*H*W))
print('4D-error-rate: ', end=' ')
return error/(N*C*H*W)
elif len(tensor_A.shape) == 1:
C = tensor_A.shape[0]
for c in range(C):
if np_A[c]-np_B[c] > error_tolerance or np_B[c]-np_A[c] > error_tolerance:
#print(np_A[c], np_B[c])
error += 1
#print('Error rate: ', error/C)
print('1D-error-rate: ', end=' ')
return error/C
elif len(tensor_A.shape) == 2:
N, C = tensor_A.shape
for n in range(N):
for c in range(C):
if np_A[n,c]-np_B[n,c] > error_tolerance or np_B[n,c]-np_A[n,c] > error_tolerance:
#print(np_A[n,c], np_B[n,c])
error += 1
#print('Error rate: ', error/(C*N))
print('2D-error-rate: ', end=' ')
return error/(C*N)
@torch.no_grad()
def get_featuremap(featuremap_dir=None):
import os
featuremap = []
if featuremap_dir == None:
pth_dir = "./tmp_file/"
else:
pth_dir = featuremap_dir
files = os.listdir(pth_dir)
file_nums = []
for i in range(len(files)):
if '.pth' in files[i]:
file_nums.append(int(files[i].split('.pth')[0]))
file_nums.sort()
for file_num in file_nums:
tensor = torch.load(pth_dir+str(file_num)+'.pth')
featuremap.append(tensor)
delete_allpths(pth_dir=None)
return featuremap
@torch.no_grad()
def get_structure_parameters_v1(model):
layers = []
for layer in model.modules():
if not ':' in str(layer):
layers.append(layer)
parameters = []
fc_conv_weights = []
for layer in layers:
if isinstance(layer, nn.Conv2d):
layer_name = 'Conv2d'
Conv2d_params = {}
Conv2d_params['layer_name'] = layer_name
# in_channel
in_channel = layer.__dict__.get('in_channels')
Conv2d_params['in_channel'] = in_channel
# out_channel
out_channel = layer.__dict__.get('out_channels')
Conv2d_params['out_channel'] = out_channel
# kernel_size
kernel_size = layer.__dict__.get('kernel_size')
if not isinstance(kernel_size, tuple):
Conv2d_params['kernel_size'] = (kernel_size, kernel_size)
else:
Conv2d_params['kernel_size'] = kernel_size
# stride
stride = layer.__dict__.get('stride')
if not isinstance(stride, tuple):
Conv2d_params['stride'] = (stride, stride)
else:
Conv2d_params['stride'] = stride
# padding
padding = layer.__dict__.get('padding')
if not isinstance(padding, tuple):
Conv2d_params['padding'] = (padding, padding)
else:
Conv2d_params['padding'] = padding
# return
fc_conv_weights.append(layer.weight)
parameters.append(Conv2d_params)
elif isinstance(layer, nn.ReLU):
layer_name = 'ReLU'
parameters.append({'layer_name': layer_name})
elif isinstance(layer, nn.MaxPool2d):
layer_name = 'MaxPool2d'
MaxPool2d_params = {}
MaxPool2d_params['layer_name'] = layer_name
# kernel_size
kernel_size = layer.__dict__.get('kernel_size')
if not isinstance(kernel_size, tuple):
MaxPool2d_params['kernel_size'] = (kernel_size, kernel_size)
else:
MaxPool2d_params['kernel_size'] = kernel_size
# stride
stride = layer.__dict__.get('stride')
if not isinstance(stride, tuple):
MaxPool2d_params['stride'] = (stride, stride)
else:
MaxPool2d_params['stride'] = stride
# padding
padding = layer.__dict__.get('padding')
if not isinstance(padding, tuple):
MaxPool2d_params['padding'] = (padding, padding)
else:
MaxPool2d_params['padding'] = padding
# return
parameters.append(MaxPool2d_params)
elif isinstance(layer, nn.AvgPool2d):
layer_name = 'AvgPool2d'
AvgPool2d_params = {}
AvgPool2d_params['layer_name'] = layer_name
# kernel_size
kernel_size = layer.__dict__.get('kernel_size')
if not isinstance(kernel_size, tuple):
AvgPool2d_params['kernel_size'] = (kernel_size, kernel_size)
else:
AvgPool2d_params['kernel_size'] = kernel_size
# stride
stride = layer.__dict__.get('stride')
if not isinstance(stride, tuple):
AvgPool2d_params['stride'] = (stride, stride)
else:
AvgPool2d_params['stride'] = stride
# padding
padding = layer.__dict__.get('padding')
if not isinstance(padding, tuple):
AvgPool2d_params['padding'] = (padding, padding)
else:
AvgPool2d_params['padding'] = padding
# return
parameters.append(AvgPool2d_params)
elif isinstance(layer, nn.Dropout):
layer_name = 'Dropout'
Dropout_params = {}
Dropout_params['layer_name'] = layer_name
# p
p = layer.__dict__.get('p')
Dropout_params['p'] = p
# return
parameters.append(Dropout_params)
elif isinstance(layer, nn.BatchNorm2d):
layer_name = 'BatchNorm2d'
BatchNorm2d_params = {}
BatchNorm2d_params['layer_name'] = layer_name
# num_features
num_features = layer.__dict__.get('num_features')
BatchNorm2d_params['num_features'] = num_features
# eps
eps = layer.__dict__.get('eps')
BatchNorm2d_params['eps'] = eps
# return
fc_conv_weights.append(layer.weight)
parameters.append(BatchNorm2d_params)
elif isinstance(layer, nn.Linear):
layer_name = 'Linear'
Linear_params = {}
Linear_params['layer_name'] = layer_name
# in_features
in_features = layer.__dict__.get('in_features')
Linear_params['in_features'] = in_features
# out_features
out_features = layer.__dict__.get('out_features')
Linear_params['out_features'] = out_features
# return
fc_conv_weights.append(layer.weight)
parameters.append(Linear_params)
elif isinstance(layer, nn.AdaptiveAvgPool2d):
layer_name = 'AdaptiveAvgPool2d'
AdaptiveAvgPool2d_params = {}
AdaptiveAvgPool2d_params['layer_name'] = layer_name
# output_size
output_size = layer.__dict__.get('output_size')
if not isinstance(output_size, tuple):
AdaptiveAvgPool2d_params['output_size'] = (output_size, output_size)
else:
AdaptiveAvgPool2d_params['output_size'] = output_size
# return
parameters.append(AdaptiveAvgPool2d_params)
else:
print('The layer has not been processed in get_structure_parameters_v1!')
return parameters, fc_conv_weights
@torch.no_grad()
def delete_allpths(pth_dir=None):
import os
if pth_dir == None:
pth_dir = "./tmp_file/"
for root, dirs, files in os.walk(pth_dir, topdown=False):
for name in files:
if name.endswith('.pth',):
os.remove(os.path.join(root, name))
@torch.no_grad()
def mul_items(tensor_size):
x = list(tensor_size)
mul = 1.
for i in range(len(x)):
mul *= x[i]
return mul
@torch.no_grad()
def gradient_backward_v1(model, img, label, num_class=1000):
return_dz = []
parameters, fc_conv_weights = get_structure_parameters_v1(model)
featuremap = get_featuremap(featuremap_dir=None)
featuremap.insert(0, img) ###
y_true = F.one_hot(label, num_classes=num_class).float()
loss, dLoss_dz = cross_entropy_loss(featuremap[-1], y_true)
print('Self calculated loss: ', loss)
featuremap.pop()
return_dz.append(dLoss_dz)
dW_dB_fc_conv = []
for i in range(len(parameters)-1, -1, -1):
layer = parameters[i]
print('\n======================== {0:3} Layer: '.format(str(i))+'{0:9}'.format(layer['layer_name'])+' Backward Start ========================')
if layer['layer_name'] == 'Conv2d':
z = featuremap[-1]
weight_z = fc_conv_weights[-1]
try:
padding = layer['padding']
except:
padding = (0, 0)
stride = layer['stride']
dLoss_dz, dLoss_dW, dLoss_dB = conv_backward(dLoss_dz, weight_z, z, padding, stride)
return_dz.append(dLoss_dz)
fc_conv_weights.pop()
if not len(featuremap) == 1:
lastpop = featuremap.pop()
if not len(dLoss_dz.shape) == len(lastpop.shape):
dLoss_dz = dLoss_dz.reshape(lastpop.shape)
elif layer['layer_name'] == 'ReLU':
z = featuremap[-1]
dLoss_dz = relu_backward(dLoss_dz, z)
return_dz.append(dLoss_dz)
lastpop = featuremap.pop()
if not len(dLoss_dz.shape) == len(lastpop.shape):
dLoss_dz = dLoss_dz.reshape(lastpop.shape)
elif layer['layer_name'] == 'MaxPool2d':
z = featuremap[-1]
pooling = layer['kernel_size']
stride = layer['stride']
padding = layer['padding']
dLoss_dz = max_pooling_backward(dLoss_dz, z, pooling, stride, padding)
return_dz.append(dLoss_dz)
lastpop = featuremap.pop()
if not len(dLoss_dz.shape) == len(lastpop.shape):
dLoss_dz = dLoss_dz.reshape(lastpop.shape)
elif layer['layer_name'] == 'AvgPool2d':
z = featuremap[-1]
pooling = layer['kernel_size']
stride = layer['stride']
padding = layer['padding']
dLoss_dz = average_pooling_backward(dLoss_dz, z, pooling, stride, padding)
return_dz.append(dLoss_dz)
lastpop = featuremap.pop()
if not len(dLoss_dz.shape) == len(lastpop.shape):
dLoss_dz = dLoss_dz.reshape(lastpop.shape)
elif layer['layer_name'] == 'Linear':
weight_z = fc_conv_weights[-1]
z = featuremap[-1]
dLoss_dz, dLoss_dW, dLoss_dB = fc_backward(dLoss_dz, z, weight_z)
return_dz.append(dLoss_dz)
fc_conv_weights.pop()
lastpop = featuremap.pop()
if not len(dLoss_dz.shape) == len(lastpop.shape):
dLoss_dz = dLoss_dz.reshape(lastpop.shape)
elif layer['layer_name'] == 'Dropout':
p = layer['p']
mask = featuremap[-1]
dLoss_dz = dropback_backward(dLoss_dz, mask, p)
return_dz.append(dLoss_dz)
featuremap.pop()
lastpop = featuremap.pop()
if not len(dLoss_dz.shape) == len(lastpop.shape):
dLoss_dz = dLoss_dz.reshape(lastpop.shape)
elif layer['layer_name'] == 'BatchNorm2d':
eps = layer['eps']
z = featuremap[-1]
gamma = fc_conv_weights[-1]
dLoss_dz = batchnorm2d_backward(dLoss_dz, z, eps, gamma)
return_dz.append(dLoss_dz)
fc_conv_weights.pop()
lastpop = featuremap.pop()
if not len(dLoss_dz.shape) == len(lastpop.shape):
dLoss_dz = dLoss_dz.reshape(lastpop.shape)
else:
print('Not completed in gradient_backward_v1!')
print('======================== {0:3} Layer: '.format(str(i))+'{0:9}'.format(layer['layer_name'])+' Backward End ==========================')
delete_allpths(pth_dir=None)
return return_dz, dLoss_dW, dLoss_dB
@torch.no_grad()
def make_dot(var, params=None):
""" Produces Graphviz representation of PyTorch autograd graph
Blue nodes are the Variables that require grad, orange are Tensors
saved for backward in torch.autograd.Function
Args:
var: output Variable
params: dict of (name, Variable) to add names to node that
require grad (TODO: make optional)
"""
if params is not None:
assert isinstance(params.values()[0], Variable)
param_map = {id(v): k for k, v in params.items()}
node_attr = dict(style='filled',
shape='box',
align='left',
fontsize='12',
ranksep='0.1',
height='0.2')
dot = Digraph(node_attr=node_attr, graph_attr=dict(size="12,12"))
seen = set()
def size_to_str(size):
return '('+(', ').join(['%d' % v for v in size])+')'
def add_nodes(var):
if var not in seen:
if torch.is_tensor(var):
dot.node(str(id(var)), size_to_str(var.size()), fillcolor='orange')
elif hasattr(var, 'variable'):
u = var.variable
name = param_map[id(u)] if params is not None else ''
node_name = '%s\n %s' % (name, size_to_str(u.size()))
dot.node(str(id(var)), node_name, fillcolor='lightblue')
else:
dot.node(str(id(var)), str(type(var).__name__))
seen.add(var)
if hasattr(var, 'next_functions'):
for u in var.next_functions:
if u[0] is not None:
dot.edge(str(id(u[0])), str(id(var)))
add_nodes(u[0])
if hasattr(var, 'saved_tensors'):
for t in var.saved_tensors:
dot.edge(str(id(t)), str(id(var)))
add_nodes(t)
print(var)
add_nodes(var.grad_fn)
return dot
def generate_g(model, x):
delete_allpths(pth_dir=None)
print('\n=========================== Store network model Results Start =========================')
y = model(x)
print('=========================== Store network model Results End ===========================\n')
if 'GoogLeNet' in str(model).split('\n')[0]:
g = make_dot(y[0])
return g
else:
g = make_dot(y)
return g
@torch.no_grad()
def exchange_name(name):
if 'Relu' in name:
return 'ReLU'
elif 'AddmmBackward' in name:
return 'Linear'
elif 'ViewBackward' in name:
return 'View'
elif 'Mean' in name or 'Avg' in name:
return 'AvgPool2d'
elif 'BatchNorm' in name:
return 'BatchNorm2d'
elif 'Conv' in name:
return 'Conv2d'
elif 'MaxPool' in name:
return 'MaxPool2d'
elif 'MulBackward' in name:
return 'Dropout_2'
elif 'DivBackward' in name:
return 'Dropout_1'
elif 'AddBackward' in name:
return 'Add'
elif 'Cat' in name:
return 'Cat'
elif 'Hardtanh' in name:
return 'ReLU6'
else:
return 'None'
@torch.no_grad()
def generate_connections(g):
graph = str(g).split('\n')
labels = {}
connections = []
for i in range(len(graph)):
if 'label' in graph[i] and graph[i][-1] == '"':
labels[(graph[i]+graph[i+1][1:]).split('\t')[1].split(' ')[0]]=\
(graph[i]+graph[i+1][1:]).split('\t')[1].split('"')[1]
if 'label' in graph[i] and graph[i][-1] == ']':
labels[graph[i].split('\t')[1].split(' ')[0]]=\
graph[i].split('\t')[1].split('=')[1].split(']')[0]
for i in range(len(graph)):
if '->' in graph[i]:
connections.append({labels[graph[i].split('\t')[1].split(' -> ')[0]]+'_'+\
graph[i].split('\t')[1].split(' -> ')[0]:\
labels[graph[i].split('\t')[1].split(' -> ')[1]]+'_'+\
graph[i].split('\t')[1].split(' -> ')[1]})
pop_index = []
for i in range(len(connections)):
item_key = list(connections[i].keys())[0]
if '(' in item_key or 'TBackward' in item_key:
pop_index.append(connections[i])
for i in range(len(pop_index)-1, -1, -1):
connections.remove(pop_index[i])
new_connections = []
for item in connections:
key, value = list(item.items())[0]
key1 = exchange_name(key.split('_')[0]) + '_' + key.split('_')[1]
value1 = exchange_name(value.split('_')[0]) + '_' + value.split('_')[1]
if 'None' in key1 or 'None' in value1:
print('Not completed for '+key+' or '+value+'! Check exchange_name function!')
exit()
new_connections.append({key1: value1})
if not len(new_connections) == len(connections):
print('Generate connections not done! Check generate_connections function!')
exit()
new_connections.insert(0, {list(new_connections[0].values())[0]: None})
new_connections.append({'None': 'None'})
return connections, new_connections
@torch.no_grad()
def get_split_connections(connections):
return_connections = []
tmp_split = []
for i in range(len(connections)):
item = connections[i]
if len(tmp_split) == 0:
tmp_split.append(item)
continue
value = list(item.values())[0]
last_key = list(tmp_split[-1].keys())[0]
if value == last_key:
tmp_split.append(item)
else:
return_connections.append(tmp_split)
tmp_split = [item]
return return_connections
@torch.no_grad()
def find_start_end(list_dic_key_value, i, j):
key1 = list(list_dic_key_value[i].values())[0]
key2 = list(list_dic_key_value[j].keys())[0]
start = 0
end = len(list_dic_key_value)-1
for index in range(len(list_dic_key_value)):
if key1 == list(list_dic_key_value[index].keys())[0]:
start = index
break
for index in range(len(list_dic_key_value)):
if key2 == list(list_dic_key_value[index].keys())[0]:
end = index
break
return start+1, end-1
@torch.no_grad()
def merge_connections(connections):
import copy
last_connections = copy.deepcopy(connections)
connections.append({'None':'None'})
num_Throwed = 0
notchoosed = []
print('\n=========================== Restore network model Start ===============================')
for i in range(len(connections)):
print('# Restore network model: processing {}/{}'.format(i, len(connections)-1))
item_key = list(connections[i].keys())[0]
if not 'None' in item_key:
if i == 0:
pass
else:
last_item_key = list(connections[i-1].keys())[0]
if not connections[i][item_key] == last_item_key:
for j in range(i+1, len(connections)):
if not list(connections[j].values())[0] == list(connections[j-1].keys())[0]:
notchoosed.append(i)
start, end = find_start_end(connections, i, j-1)
tmp = []
tmp.append(connections[start:end+1])
tmp.append(connections[i:j-1])
last_connections[start:end+1] = [tmp]
for kk in range(end-start):
last_connections.insert(start, 'Throwed')
num_Throwed += 1
break
if not notchoosed == []:
last_connections = last_connections[:notchoosed[0]]
else:
pass
for i in range(num_Throwed):
last_connections.remove('Throwed')
if last_connections[-1] == {'None': 'None'}:
last_connections.remove({'None': 'None'})
print('=========================== Restore network model End =================================\n')
return last_connections
@torch.no_grad()
def find_next_layer_by_name(layers, name, start_i):
for i in range(start_i, len(layers)):
layer = layers[i]
if name in str(layer):
return layer, i
@torch.no_grad()
def get_layers(last_connections, model):
return_layers = []
tmp_layers = []
for layer in model.modules():
if not ':' in str(layer):
tmp_layers.append(layer)
index_tmp_layers = 0
for i in range(len(last_connections)-1, -1, -1):
if not isinstance(last_connections[i], list):
# 单一层,无分支
current_layer_name = list(last_connections[i].keys())[0].split('_')[0]
if 'ReLU' in current_layer_name:
return_layers.insert(0, torch.nn.ReLU(inplace=True))
elif 'Add' in current_layer_name:
return_layers.insert(0, 'Add')
elif 'View' in current_layer_name:
return_layers.insert(0, 'View')
else:
tmp = find_next_layer_by_name(tmp_layers, current_layer_name, index_tmp_layers)
return_layers.insert(0, tmp[0])
if isinstance(last_connections[i-1], list):
index_tmp_layers = tmp[1] + 1
elif not list(last_connections[i-1].keys())[0].split('_')[0] == 'Dropout':
index_tmp_layers = tmp[1] + 1
else:
return_layers.insert(0, [])
for j in range(len(last_connections[i])):
return_layers[0].append([])
if len(last_connections[i][j]) == 0:
continue
for k in range(len(last_connections[i][j])-1, -1, -1):
current_layer_name = list(last_connections[i][j][k].keys())[0].split('_')[0]
if 'ReLU' in current_layer_name:
return_layers[0][j].insert(0, torch.nn.ReLU(inplace=True))
elif 'Add' in current_layer_name:
return_layers[0][j].insert(0, 'Add')
elif 'View' in current_layer_name:
return_layers.insert(0, 'View')
else:
tmp = find_next_layer_by_name(tmp_layers, current_layer_name, index_tmp_layers)
return_layers[0][j].insert(0, tmp[0])
if not list(last_connections[i][j][k-1].keys())[0].split('_')[0] == 'Dropout':
index_tmp_layers = tmp[1] + 1
return return_layers
@torch.no_grad()
def get_tensors(last_connections):
tensors = get_featuremap(featuremap_dir=None)
index_tensors = 0
import copy
last_tensors = copy.deepcopy(last_connections)
for i in range(len(last_connections)-1, -1, -1):
if not isinstance(last_connections[i], list):
current_layer_name = list(last_connections[i].keys())[0].split('_')[0]
if 'Add' in current_layer_name:
last_tensors[i] = 'Add'
elif 'View' in current_layer_name:
last_tensors[i] = 'View'
else:
last_tensors[i] = tensors[index_tensors]
index_tensors += 1
else:
for j in range(len(last_connections[i])):
if len(last_connections[i][j]) == 0:
continue
for k in range(len(last_connections[i][j])-1, -1, -1):
current_layer_name = list(last_connections[i][j][k].keys())[0].split('_')[0]
if 'Add' in current_layer_name:
last_tensors[i][j][k] = 'Add'
elif 'View' in current_layer_name:
last_tensors[i][j][k] = 'View'
else:
last_tensors[i][j][k] = tensors[index_tensors]
index_tensors += 1
for i in range(len(last_tensors)-1, -1, -1):
if isinstance(last_tensors[i], str):
# Add or View
if last_tensors[i] == 'Add':
last_tensors[i] = last_tensors[i+1][0][0] + last_tensors[i+1][1][0]
if last_tensors[i] == 'View':
last_tensors[i] = last_tensors[i+1].view(last_tensors[i+1].size(0), -1)
elif isinstance(last_tensors[i], list):
for j in range(len(last_tensors[i])):
if len(last_tensors[i][j]) == 0:
last_tensors[i][j].append(last_tensors[i+1])
return last_tensors
@torch.no_grad()
def get_structure_parameters(return_layers):
import copy
parameters = copy.deepcopy(return_layers)
fc_conv_weights = copy.deepcopy(return_layers)
for i in range(len(return_layers)):
layer = return_layers[i]
if isinstance(layer, nn.Conv2d):
layer_name = 'Conv2d'
Conv2d_params = {}
Conv2d_params['layer_name'] = layer_name
# in_channel
in_channel = layer.__dict__.get('in_channels')
Conv2d_params['in_channel'] = in_channel
# out_channel
out_channel = layer.__dict__.get('out_channels')
Conv2d_params['out_channel'] = out_channel
# kernel_size
kernel_size = layer.__dict__.get('kernel_size')
if not isinstance(kernel_size, tuple):
Conv2d_params['kernel_size'] = (kernel_size, kernel_size)
else:
Conv2d_params['kernel_size'] = kernel_size
# stride
stride = layer.__dict__.get('stride')
if not isinstance(stride, tuple):
Conv2d_params['stride'] = (stride, stride)
else:
Conv2d_params['stride'] = stride
# padding
padding = layer.__dict__.get('padding')
if not isinstance(padding, tuple):
Conv2d_params['padding'] = (padding, padding)
else:
Conv2d_params['padding'] = padding
# return
fc_conv_weights[i] = layer.weight
parameters[i] = Conv2d_params
elif isinstance(layer, nn.ReLU):
layer_name = 'ReLU'
parameters[i] = {'layer_name': layer_name}
elif layer == 'Add':
layer_name = 'Add'
parameters[i] = {'layer_name': layer_name}
elif layer == 'View':
layer_name = 'View'
parameters[i] = {'layer_name': layer_name}
elif layer == 'Cat':
layer_name = 'Cat'
parameters[i] = {'layer_name': layer_name}
elif isinstance(layer, nn.MaxPool2d):
layer_name = 'MaxPool2d'
MaxPool2d_params = {}
MaxPool2d_params['layer_name'] = layer_name
# kernel_size
kernel_size = layer.__dict__.get('kernel_size')
if not isinstance(kernel_size, tuple):
MaxPool2d_params['kernel_size'] = (kernel_size, kernel_size)
else:
MaxPool2d_params['kernel_size'] = kernel_size
# stride
stride = layer.__dict__.get('stride')
if not isinstance(stride, tuple):
MaxPool2d_params['stride'] = (stride, stride)
else:
MaxPool2d_params['stride'] = stride
# padding
padding = layer.__dict__.get('padding')
if not isinstance(padding, tuple):
MaxPool2d_params['padding'] = (padding, padding)
else:
MaxPool2d_params['padding'] = padding
# return
parameters[i] = MaxPool2d_params
elif isinstance(layer, nn.AvgPool2d):
layer_name = 'AvgPool2d'
AvgPool2d_params = {}
AvgPool2d_params['layer_name'] = layer_name
# kernel_size
kernel_size = layer.__dict__.get('kernel_size')
if not isinstance(kernel_size, tuple):
AvgPool2d_params['kernel_size'] = (kernel_size, kernel_size)
else:
AvgPool2d_params['kernel_size'] = kernel_size
# stride
stride = layer.__dict__.get('stride')
if not isinstance(stride, tuple):
AvgPool2d_params['stride'] = (stride, stride)
else:
AvgPool2d_params['stride'] = stride
# padding
padding = layer.__dict__.get('padding')
if not isinstance(padding, tuple):
AvgPool2d_params['padding'] = (padding, padding)
else:
AvgPool2d_params['padding'] = padding
# return
parameters[i] = AvgPool2d_params
elif isinstance(layer, nn.Dropout):
layer_name = 'Dropout'
Dropout_params = {}
Dropout_params['layer_name'] = layer_name
# p
p = layer.__dict__.get('p')
Dropout_params['p'] = p
# return
parameters[i] = Dropout_params
elif isinstance(layer, nn.BatchNorm2d):
layer_name = 'BatchNorm2d'
BatchNorm2d_params = {}
BatchNorm2d_params['layer_name'] = layer_name
# num_features
num_features = layer.__dict__.get('num_features')
BatchNorm2d_params['num_features'] = num_features
# eps
eps = layer.__dict__.get('eps')
BatchNorm2d_params['eps'] = eps
# return
fc_conv_weights[i] = layer.weight
parameters[i] = BatchNorm2d_params
elif isinstance(layer, nn.Linear):
layer_name = 'Linear'
Linear_params = {}
Linear_params['layer_name'] = layer_name
# in_features
in_features = layer.__dict__.get('in_features')
Linear_params['in_features'] = in_features
# out_features
out_features = layer.__dict__.get('out_features')
Linear_params['out_features'] = out_features
# return
fc_conv_weights[i] = layer.weight
parameters[i] = Linear_params
elif isinstance(layer, nn.AdaptiveAvgPool2d):
layer_name = 'AdaptiveAvgPool2d'
AdaptiveAvgPool2d_params = {}
AdaptiveAvgPool2d_params['layer_name'] = layer_name
# output_size
output_size = layer.__dict__.get('output_size')
if not isinstance(output_size, tuple):
AdaptiveAvgPool2d_params['output_size'] = (output_size, output_size)
else:
AdaptiveAvgPool2d_params['output_size'] = output_size
# return
parameters[i] = AdaptiveAvgPool2d_params
elif isinstance(layer, list):
for j in range(len(layer)):
for k in range(len(layer[j])):
tmp_layer = layer[j][k]
###
if isinstance(tmp_layer, nn.Conv2d):
layer_name = 'Conv2d'
Conv2d_params = {}
Conv2d_params['layer_name'] = layer_name
# in_channel
in_channel = tmp_layer.__dict__.get('in_channels')
Conv2d_params['in_channel'] = in_channel
# out_channel
out_channel = tmp_layer.__dict__.get('out_channels')
Conv2d_params['out_channel'] = out_channel
# kernel_size
kernel_size = tmp_layer.__dict__.get('kernel_size')
if not isinstance(kernel_size, tuple):
Conv2d_params['kernel_size'] = (kernel_size, kernel_size)
else:
Conv2d_params['kernel_size'] = kernel_size
# stride
stride = tmp_layer.__dict__.get('stride')
if not isinstance(stride, tuple):
Conv2d_params['stride'] = (stride, stride)
else:
Conv2d_params['stride'] = stride
# padding
padding = tmp_layer.__dict__.get('padding')
if not isinstance(padding, tuple):
Conv2d_params['padding'] = (padding, padding)
else:
Conv2d_params['padding'] = padding
# return
fc_conv_weights[i][j][k] = tmp_layer.weight
parameters[i][j][k] = Conv2d_params
elif isinstance(tmp_layer, nn.ReLU):
layer_name = 'ReLU'
parameters[i][j][k] = {'layer_name': layer_name}
elif tmp_layer == 'Add':
layer_name = 'Add'
parameters[i][j][k] = {'layer_name': layer_name}
elif tmp_layer == 'View':
layer_name = 'View'
parameters[i][j][k] = {'layer_name': layer_name}
elif tmp_layer == 'Cat':
layer_name = 'Cat'
parameters[i][j][k] = {'layer_name': layer_name}
elif isinstance(tmp_layer, nn.MaxPool2d):
layer_name = 'MaxPool2d'
MaxPool2d_params = {}
MaxPool2d_params['layer_name'] = layer_name
# kernel_size
kernel_size = tmp_layer.__dict__.get('kernel_size')
if not isinstance(kernel_size, tuple):
MaxPool2d_params['kernel_size'] = (kernel_size, kernel_size)
else:
MaxPool2d_params['kernel_size'] = kernel_size
# stride
stride = tmp_layer.__dict__.get('stride')
if not isinstance(stride, tuple):
MaxPool2d_params['stride'] = (stride, stride)
else:
MaxPool2d_params['stride'] = stride
# padding
padding = tmp_layer.__dict__.get('padding')
if not isinstance(padding, tuple):
MaxPool2d_params['padding'] = (padding, padding)
else:
MaxPool2d_params['padding'] = padding
# return
parameters[i][j][k] = MaxPool2d_params
elif isinstance(tmp_layer, nn.AvgPool2d):
layer_name = 'AvgPool2d'
AvgPool2d_params = {}
AvgPool2d_params['layer_name'] = layer_name
# kernel_size
kernel_size = tmp_layer.__dict__.get('kernel_size')
if not isinstance(kernel_size, tuple):
AvgPool2d_params['kernel_size'] = (kernel_size, kernel_size)
else:
AvgPool2d_params['kernel_size'] = kernel_size
# stride
stride = tmp_layer.__dict__.get('stride')
if not isinstance(stride, tuple):
AvgPool2d_params['stride'] = (stride, stride)
else:
AvgPool2d_params['stride'] = stride
# padding
padding = tmp_layer.__dict__.get('padding')
if not isinstance(padding, tuple):
AvgPool2d_params['padding'] = (padding, padding)
else:
AvgPool2d_params['padding'] = padding
# return
parameters[i][j][k] = AvgPool2d_params
elif isinstance(tmp_layer, nn.Dropout):
layer_name = 'Dropout'
Dropout_params = {}
Dropout_params['layer_name'] = layer_name
# p
p = tmp_layer.__dict__.get('p')
Dropout_params['p'] = p
# return
parameters[i][j][k] = Dropout_params
elif isinstance(tmp_layer, nn.BatchNorm2d):
layer_name = 'BatchNorm2d'
BatchNorm2d_params = {}
BatchNorm2d_params['layer_name'] = layer_name
# num_features
num_features = tmp_layer.__dict__.get('num_features')
BatchNorm2d_params['num_features'] = num_features
# eps
eps = tmp_layer.__dict__.get('eps')
BatchNorm2d_params['eps'] = eps
# return
fc_conv_weights[i][j][k] = tmp_layer.weight
parameters[i][j][k] = BatchNorm2d_params
elif isinstance(tmp_layer, nn.Linear):
layer_name = 'Linear'
Linear_params = {}
Linear_params['layer_name'] = layer_name
# in_features
in_features = tmp_layer.__dict__.get('in_features')
Linear_params['in_features'] = in_features
# out_features
out_features = tmp_layer.__dict__.get('out_features')
Linear_params['out_features'] = out_features
# return
fc_conv_weights[i][j][k] = tmp_layer.weight
parameters[i][j][k] = Linear_params
elif isinstance(tmp_layer, nn.AdaptiveAvgPool2d):
layer_name = 'AdaptiveAvgPool2d'
AdaptiveAvgPool2d_params = {}
AdaptiveAvgPool2d_params['layer_name'] = layer_name
# output_size
output_size = tmp_layer.__dict__.get('output_size')
if not isinstance(output_size, tuple):
AdaptiveAvgPool2d_params['output_size'] = (output_size, output_size)
else:
AdaptiveAvgPool2d_params['output_size'] = output_size
# return
parameters[i][j][k] = AdaptiveAvgPool2d_params
###
else:
print('The layer has not been processed in get_structure_parameters!')
return parameters, fc_conv_weights
def gradient_backward_v2(model, img, label, num_class=1000, g_view=False):
x = Variable(img)
g = generate_g(model, x)
if g_view:
g.view()
delete_allpths(pth_dir=None)
print('\n=========================== Generate Tensors Start ====================================')
result = model(img)
print('=========================== Generate Tensors End ======================================\n')
Loss = nn.CrossEntropyLoss()
if 'GoogLeNet' in str(model).split('\n')[0]:
loss_torch = Loss(result[0], label)
else:
loss_torch = Loss(result, label)
_, connections = generate_connections(g)
last_connections = merge_connections(connections)
return_layers = get_layers(last_connections, model)
return_tensors = get_tensors(last_connections)
parameters, fc_conv_weights = get_structure_parameters(return_layers)
'''
print('================')
for i in range(len(last_connections)):
print(i, last_connections[i])
print('================')
print('================')
for i in range(len(return_layers)):
print(i, return_layers[i])
print('================')
print('================')
for i in range(len(parameters)):
print(i, parameters[i])
print('================')
print('================')
for i in range(len(return_tensors)):
if not isinstance(return_tensors[i], list) and not isinstance(return_tensors[i], str):
print('=========', i, return_tensors[i].shape)
print('================')
'''
import copy
return_dz = copy.deepcopy(last_connections)
featuremap = return_tensors
featuremap.append(img)
y_true = F.one_hot(label, num_classes=num_class).float()
loss, dLoss_dz = cross_entropy_loss(featuremap[0], y_true)
featuremap.pop(0)
return_dz.append(dLoss_dz)
#####################tensors
'''
for i in range(len(last_connections)):
print(last_connections[i])
for i in range(len(featuremap)):
if not isinstance(featuremap[i], list):
print('=========', i, featuremap[i].shape)
else:
for j in range(len(featuremap[i])):
for k in range(len(featuremap[i][j])):
print(' =========', i, j, k, featuremap[i][j][k].shape)
'''
#####################
# 前面n层倒序遍历
for i in range(len(parameters)):
layer = parameters[i]
if not isinstance(layer, list):
print('\n======================== {0:3} Layer: '.format(str(len(parameters)-1-i))+'{0:11}'.format(layer['layer_name'])+' Backward Start ========================')
if layer['layer_name'] == 'Conv2d':
z = featuremap[i]
weight_z = fc_conv_weights[i]
try:
padding = layer['padding']
except:
padding = (0, 0)
stride = layer['stride']
dLoss_dz, dLoss_dW, dLoss_dB = conv_backward(dLoss_dz, weight_z, z, padding, stride)
return_dz[i] = dLoss_dz
elif layer['layer_name'] == 'ReLU':
z = featuremap[i]
dLoss_dz = relu_backward(dLoss_dz, z)
return_dz[i] = dLoss_dz
elif layer['layer_name'] == 'MaxPool2d':
z = featuremap[i]
pooling = layer['kernel_size']
stride = layer['stride']
padding = layer['padding']
dLoss_dz = max_pooling_backward(dLoss_dz, z, pooling, stride, padding)
return_dz[i] = dLoss_dz
elif layer['layer_name'] == 'AvgPool2d':
z = featuremap[i]
pooling = layer['kernel_size']
stride = layer['stride']
padding = layer['padding']
dLoss_dz = average_pooling_backward(dLoss_dz, z, pooling, stride, padding)
return_dz[i] = dLoss_dz
elif layer['layer_name'] == 'Linear':
weight_z = fc_conv_weights[i]
z = featuremap[i]
dLoss_dz, dLoss_dW, dLoss_dB = fc_backward(dLoss_dz, z, weight_z)
return_dz[i] = dLoss_dz
elif layer['layer_name'] == 'View':
last_z = featuremap[i+1]
if 'Pool' in parameters[i+1]['layer_name']:
params = (parameters[i+1]['kernel_size'], parameters[i+1]['stride'], parameters[i+1]['padding'])
else:
params = None
dLoss_dz = view_backward(dLoss_dz, last_z, params)
return_dz[i] = dLoss_dz
elif layer['layer_name'] == 'Add':
dLoss_dz = add_backward(dLoss_dz)
return_dz[i] = dLoss_dz
elif layer['layer_name'] == 'Dropout':
if parameters[i-1]['layer_name'] == 'Dropout':
return_dz[i] = dLoss_dz
print('# Skip this layer because the layer has been calcualted!')
print('======================== {0:3} Layer: '.format(str(len(parameters)-1-i))+'{0:11}'.\
format(layer['layer_name'])+' Backward End ==========================')
continue
p = layer['p']
mask = featuremap[i]
dLoss_dz = dropback_backward(dLoss_dz, mask, p)
return_dz[i] = dLoss_dz
elif layer['layer_name'] == 'BatchNorm2d':
eps = layer['eps']
z = featuremap[i]
gamma = fc_conv_weights[i]
dLoss_dz = batchnorm2d_backward(dLoss_dz, z, eps, gamma)
return_dz[i] = dLoss_dz
print('======================== {0:3} Layer: '.format(str(len(parameters)-1-i))+'{0:11}'.format(layer['layer_name'])+' Backward End ==========================')
elif isinstance(layer, list):
import copy
tmp_dLoss_dz = []
for j in range(len(layer)):
tmp_dLoss_dz.append(copy.deepcopy(dLoss_dz))
for k in range(len(layer[j])):
tmp_layer = layer[j][k]
print('\n=========================== {0:3} Branch: '.format(str(len(parameters)-1-i))+'{0:11}'.format(tmp_layer['layer_name'])+' Backward Start ====================')
if tmp_layer['layer_name'] == 'Conv2d':
if k+1 >= len(featuremap[i-1][j]):
z = featuremap[i]
else:
z = featuremap[i-1][j][k+1]
weight_z = fc_conv_weights[i][j][k]
try:
padding = tmp_layer['padding']
except:
padding = (0, 0)
stride = tmp_layer['stride']
tmp_dLoss_dz[-1], dLoss_dW, dLoss_dB = conv_backward(tmp_dLoss_dz[-1], weight_z, z, padding, stride)
return_dz[i][j][k] = tmp_dLoss_dz[-1]
elif tmp_layer['layer_name'] == 'ReLU':
z = featuremap[i-1][j][k+1]
tmp_dLoss_dz[-1] = relu_backward(tmp_dLoss_dz[-1], z)
return_dz[i][j][k] = tmp_dLoss_dz[-1]
elif tmp_layer['layer_name'] == 'BatchNorm2d':
eps = tmp_layer['eps']
z = featuremap[i-1][j][k+1]
gamma = fc_conv_weights[i][j][k]
tmp_dLoss_dz[-1] = batchnorm2d_backward(tmp_dLoss_dz[-1], z, eps, gamma)
return_dz[i][j][k] = tmp_dLoss_dz[-1]
print('=========================== {0:3} Branch: '.format(str(len(parameters)-1-i))+'{0:11}'.format(tmp_layer['layer_name'])+' Backward End ======================')
print(tmp_dLoss_dz[0].shape, tmp_dLoss_dz[1].shape)
dLoss_dz = tmp_dLoss_dz[0] + tmp_dLoss_dz[1]
else:
print('Not completed in gradient_backward!')
print('# Torch calculated loss: ', loss_torch.detach().numpy())
loss_torch.backward()
if 'VGG' in str(model) or 'AlexNet' in str(model):
print(judge_tensors_equal(dLoss_dW, model.features[0].weight.grad))
elif 'ResNet' in str(model):
print(judge_tensors_equal(dLoss_dW, model.conv1.weight.grad))
delete_allpths(pth_dir=None)
return return_dz, dLoss_dW, dLoss_dB | [
"torch.nn.ReLU",
"torch.nn.CrossEntropyLoss",
"torch.max",
"torch.exp",
"torch.pow",
"torch.eq",
"torch.sum",
"torch.flip",
"copy.deepcopy",
"torch.nn.functional.pad",
"os.walk",
"torch.arange",
"os.listdir",
"torch.matmul",
"torch.zeros_like",
"torch.autograd.Variable",
"torch.argmax",
"torch.swapaxes",
"torch.Tensor",
"torch.is_tensor",
"torch.einsum",
"torch.nn.functional.one_hot",
"torch.log",
"torch.gt",
"os.path.join",
"torch.no_grad",
"torch.zeros"
] | [((806, 821), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (819, 821), False, 'import torch\n'), ((1697, 1712), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (1710, 1712), False, 'import torch\n'), ((2365, 2380), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (2378, 2380), False, 'import torch\n'), ((3167, 3182), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (3180, 3182), False, 'import torch\n'), ((3478, 3493), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (3491, 3493), False, 'import torch\n'), ((3864, 3879), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (3877, 3879), False, 'import torch\n'), ((4947, 4962), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (4960, 4962), False, 'import torch\n'), ((5815, 5830), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (5828, 5830), False, 'import torch\n'), ((6750, 6765), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (6763, 6765), False, 'import torch\n'), ((7066, 7081), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (7079, 7081), False, 'import torch\n'), ((8339, 8354), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (8352, 8354), False, 'import torch\n'), ((8660, 8675), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (8673, 8675), False, 'import torch\n'), ((9052, 9067), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (9065, 9067), False, 'import torch\n'), ((10590, 10605), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (10603, 10605), False, 'import torch\n'), ((11113, 11128), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (11126, 11128), False, 'import torch\n'), ((15997, 16012), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (16010, 16012), False, 'import torch\n'), ((16267, 16282), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (16280, 16282), False, 'import torch\n'), ((16407, 16422), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (16420, 16422), False, 'import torch\n'), ((20048, 20063), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (20061, 20063), False, 'import torch\n'), ((22082, 22097), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (22095, 22097), False, 'import torch\n'), ((22729, 22744), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (22742, 22744), False, 'import torch\n'), ((24482, 24497), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (24495, 24497), False, 'import torch\n'), ((24945, 24960), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (24958, 24960), False, 'import torch\n'), ((25440, 25455), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (25453, 25455), False, 'import torch\n'), ((26957, 26972), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (26970, 26972), False, 'import torch\n'), ((27137, 27152), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (27150, 27152), False, 'import torch\n'), ((29020, 29035), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (29033, 29035), False, 'import torch\n'), ((30623, 30638), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (30636, 30638), False, 'import torch\n'), ((1185, 1203), 'torch.exp', 'torch.exp', (['y_shift'], {}), '(y_shift)\n', (1194, 1203), False, 'import torch\n'), ((2033, 2062), 'torch.matmul', 'torch.matmul', (['dLoss_dnextz', 'w'], {}), '(dLoss_dnextz, w)\n', (2045, 2062), False, 'import torch\n'), ((2134, 2164), 'torch.sum', 'torch.sum', (['dLoss_dnextz'], {'dim': '(0)'}), '(dLoss_dnextz, dim=0)\n', (2143, 2164), False, 'import torch\n'), ((3320, 3345), 'torch.zeros_like', 'torch.zeros_like', (['next_dz'], {}), '(next_dz)\n', (3336, 3345), False, 'import torch\n'), ((3683, 3705), 'torch.zeros_like', 'torch.zeros_like', (['mask'], {}), '(mask)\n', (3699, 3705), False, 'import torch\n'), ((4181, 4280), 'torch.nn.functional.pad', 'F.pad', (['z'], {'pad': '(padding[1], padding[1], padding[0], padding[0], 0, 0)', 'mode': '"""constant"""', 'value': '(0)'}), "(z, pad=(padding[1], padding[1], padding[0], padding[0], 0, 0), mode=\n 'constant', value=0)\n", (4186, 4280), True, 'import torch.nn.functional as F\n'), ((4298, 4325), 'torch.zeros_like', 'torch.zeros_like', (['padding_z'], {}), '(padding_z)\n', (4314, 4325), False, 'import torch\n'), ((4337, 4352), 'torch.arange', 'torch.arange', (['N'], {}), '(N)\n', (4349, 4352), False, 'import torch\n'), ((5012, 5041), 'torch.Tensor', 'torch.Tensor', (['[1.0, 1.0, 1.0]'], {}), '([1.0, 1.0, 1.0])\n', (5024, 5041), False, 'import torch\n'), ((5366, 5391), 'torch.zeros_like', 'torch.zeros_like', (['next_dz'], {}), '(next_dz)\n', (5382, 5391), False, 'import torch\n'), ((6136, 6235), 'torch.nn.functional.pad', 'F.pad', (['z'], {'pad': '(padding[1], padding[1], padding[0], padding[0], 0, 0)', 'mode': '"""constant"""', 'value': '(0)'}), "(z, pad=(padding[1], padding[1], padding[0], padding[0], 0, 0), mode=\n 'constant', value=0)\n", (6141, 6235), True, 'import torch.nn.functional as F\n'), ((6253, 6280), 'torch.zeros_like', 'torch.zeros_like', (['padding_z'], {}), '(padding_z)\n', (6269, 6280), False, 'import torch\n'), ((6292, 6307), 'torch.arange', 'torch.arange', (['N'], {}), '(N)\n', (6304, 6307), False, 'import torch\n'), ((7545, 7566), 'torch.flip', 'torch.flip', (['K', '(2, 3)'], {}), '(K, (2, 3))\n', (7555, 7566), False, 'import torch\n'), ((7585, 7613), 'torch.swapaxes', 'torch.swapaxes', (['flip_K', '(0)', '(1)'], {}), '(flip_K, 0, 1)\n', (7599, 7613), False, 'import torch\n'), ((7637, 7786), 'torch.nn.functional.pad', 'F.pad', (['padding_next_dz'], {'pad': '(k2 - 1 - padding[1], k2 - 1 - padding[1], k1 - 1 - padding[0], k1 - 1 -\n padding[0], 0, 0)', 'mode': '"""constant"""', 'value': '(0)'}), "(padding_next_dz, pad=(k2 - 1 - padding[1], k2 - 1 - padding[1], k1 - \n 1 - padding[0], k1 - 1 - padding[0], 0, 0), mode='constant', value=0)\n", (7642, 7786), True, 'import torch.nn.functional as F\n'), ((7840, 7863), 'torch.swapaxes', 'torch.swapaxes', (['z', '(0)', '(1)'], {}), '(z, 0, 1)\n', (7854, 7863), False, 'import torch\n'), ((8565, 8613), 'torch.einsum', 'torch.einsum', (['"""nchwkj,dckj->ndhw"""', 'x_pad', 'weight'], {}), "('nchwkj,dckj->ndhw', x_pad, weight)\n", (8577, 8613), False, 'import torch\n'), ((8812, 8845), 'torch.zeros', 'torch.zeros', (['N', 'D', 'H_last', 'W_last'], {}), '(N, D, H_last, W_last)\n', (8823, 8845), False, 'import torch\n'), ((10780, 10799), 'os.listdir', 'os.listdir', (['pth_dir'], {}), '(pth_dir)\n', (10790, 10799), False, 'import os\n'), ((16136, 16167), 'os.walk', 'os.walk', (['pth_dir'], {'topdown': '(False)'}), '(pth_dir, topdown=False)\n', (16143, 16167), False, 'import os\n'), ((25528, 25554), 'copy.deepcopy', 'copy.deepcopy', (['connections'], {}), '(connections)\n', (25541, 25554), False, 'import copy\n'), ((29171, 29202), 'copy.deepcopy', 'copy.deepcopy', (['last_connections'], {}), '(last_connections)\n', (29184, 29202), False, 'import copy\n'), ((30714, 30742), 'copy.deepcopy', 'copy.deepcopy', (['return_layers'], {}), '(return_layers)\n', (30727, 30742), False, 'import copy\n'), ((30763, 30791), 'copy.deepcopy', 'copy.deepcopy', (['return_layers'], {}), '(return_layers)\n', (30776, 30791), False, 'import copy\n'), ((41412, 41425), 'torch.autograd.Variable', 'Variable', (['img'], {}), '(img)\n', (41420, 41425), False, 'from torch.autograd import Variable\n'), ((41746, 41767), 'torch.nn.CrossEntropyLoss', 'nn.CrossEntropyLoss', ([], {}), '()\n', (41765, 41767), True, 'import torch.nn as nn\n'), ((42829, 42860), 'copy.deepcopy', 'copy.deepcopy', (['last_connections'], {}), '(last_connections)\n', (42842, 42860), False, 'import copy\n'), ((1239, 1276), 'torch.sum', 'torch.sum', (['y_exp'], {'dim': '(1)', 'keepdim': '(True)'}), '(y_exp, dim=1, keepdim=True)\n', (1248, 1276), False, 'import torch\n'), ((3371, 3385), 'torch.gt', 'torch.gt', (['z', '(0)'], {}), '(z, 0)\n', (3379, 3385), False, 'import torch\n'), ((4366, 4381), 'torch.arange', 'torch.arange', (['C'], {}), '(C)\n', (4378, 4381), False, 'import torch\n'), ((5597, 5622), 'torch.pow', 'torch.pow', (['(var + eps)', '(0.5)'], {}), '(var + eps, 0.5)\n', (5606, 5622), False, 'import torch\n'), ((6321, 6336), 'torch.arange', 'torch.arange', (['C'], {}), '(C)\n', (6333, 6336), False, 'import torch\n'), ((8012, 8049), 'torch.swapaxes', 'torch.swapaxes', (['padding_next_dz', '(0)', '(1)'], {}), '(padding_next_dz, 0, 1)\n', (8026, 8049), False, 'import torch\n'), ((1125, 1166), 'torch.max', 'torch.max', (['y_predict'], {'dim': '(1)', 'keepdim': '(True)'}), '(y_predict, dim=1, keepdim=True)\n', (1134, 1166), False, 'import torch\n'), ((3741, 3760), 'torch.eq', 'torch.eq', (['mask', '(1.0)'], {}), '(mask, 1.0)\n', (3749, 3760), False, 'import torch\n'), ((4396, 4415), 'torch.arange', 'torch.arange', (['out_h'], {}), '(out_h)\n', (4408, 4415), False, 'import torch\n'), ((6351, 6370), 'torch.arange', 'torch.arange', (['out_h'], {}), '(out_h)\n', (6363, 6370), False, 'import torch\n'), ((7903, 8002), 'torch.nn.functional.pad', 'F.pad', (['z'], {'pad': '(padding[1], padding[1], padding[0], padding[0], 0, 0)', 'mode': '"""constant"""', 'value': '(0)'}), "(z, pad=(padding[1], padding[1], padding[0], padding[0], 0, 0), mode=\n 'constant', value=0)\n", (7908, 8002), True, 'import torch.nn.functional as F\n'), ((8081, 8108), 'torch.sum', 'torch.sum', (['next_dz'], {'axis': '(-1)'}), '(next_dz, axis=-1)\n', (8090, 8108), False, 'import torch\n'), ((16663, 16702), 'torch.nn.functional.one_hot', 'F.one_hot', (['label'], {'num_classes': 'num_class'}), '(label, num_classes=num_class)\n', (16672, 16702), True, 'import torch.nn.functional as F\n'), ((20916, 20936), 'torch.is_tensor', 'torch.is_tensor', (['var'], {}), '(var)\n', (20931, 20936), False, 'import torch\n'), ((42935, 42974), 'torch.nn.functional.one_hot', 'F.one_hot', (['label'], {'num_classes': 'num_class'}), '(label, num_classes=num_class)\n', (42944, 42974), True, 'import torch.nn.functional as F\n'), ((4431, 4450), 'torch.arange', 'torch.arange', (['out_w'], {}), '(out_w)\n', (4443, 4450), False, 'import torch\n'), ((6386, 6405), 'torch.arange', 'torch.arange', (['out_w'], {}), '(out_w)\n', (6398, 6405), False, 'import torch\n'), ((1333, 1357), 'torch.log', 'torch.log', (['y_probability'], {}), '(y_probability)\n', (1342, 1357), False, 'import torch\n'), ((4469, 4591), 'torch.argmax', 'torch.argmax', (['padding_z[n, c, strides[0] * i:strides[0] * i + pooling[0], strides[1] * j:\n strides[1] * j + pooling[1]]'], {}), '(padding_z[n, c, strides[0] * i:strides[0] * i + pooling[0], \n strides[1] * j:strides[1] * j + pooling[1]])\n', (4481, 4591), False, 'import torch\n'), ((16237, 16261), 'os.path.join', 'os.path.join', (['root', 'name'], {}), '(root, name)\n', (16249, 16261), False, 'import os\n'), ((27606, 27633), 'torch.nn.ReLU', 'torch.nn.ReLU', ([], {'inplace': '(True)'}), '(inplace=True)\n', (27619, 27633), False, 'import torch\n'), ((46488, 46511), 'copy.deepcopy', 'copy.deepcopy', (['dLoss_dz'], {}), '(dLoss_dz)\n', (46501, 46511), False, 'import copy\n'), ((28526, 28553), 'torch.nn.ReLU', 'torch.nn.ReLU', ([], {'inplace': '(True)'}), '(inplace=True)\n', (28539, 28553), False, 'import torch\n')] |
import cv2
import torch
import yaml
import imageio
import throttle
import numpy as np
import matplotlib.pyplot as plt
from argparse import ArgumentParser
from skimage.transform import resize
from scipy.spatial import ConvexHull
from modules.generator import OcclusionAwareGenerator
from modules.keypoint_detector import KPDetector
from sync_batchnorm import DataParallelWithCallback
#from animate import normalize_kp
# command = [ffmpeg,
# '-y',
# '-f', 'rawvideo',
# '-vcodec','rawvideo',
# '-pix_fmt', 'bgr24',
# '-s', dimension,
# '-i', '-',
# '-c:v', 'libx264',
# '-pix_fmt', 'yuv420p',
# '-preset', 'ultrafast',
# '-f', 'flv',
# 'rtmp://10.10.10.80/live/mystream']
def normalize_kp(kp_source, kp_driving, kp_driving_initial, adapt_movement_scale=False,
use_relative_movement=False, use_relative_jacobian=False):
if adapt_movement_scale:
source_area = ConvexHull(kp_source['value'][0].data.cpu().numpy()).volume
driving_area = ConvexHull(kp_driving_initial['value'][0].data.cpu().numpy()).volume
adapt_movement_scale = np.sqrt(source_area) / np.sqrt(driving_area)
else:
adapt_movement_scale = 1
kp_new = {k: v for k, v in kp_driving.items()}
if use_relative_movement:
kp_value_diff = (kp_driving['value'] - kp_driving_initial['value'])
kp_value_diff *= adapt_movement_scale
kp_new['value'] = kp_value_diff + kp_source['value']
if use_relative_jacobian:
jacobian_diff = torch.matmul(kp_driving['jacobian'], torch.inverse(kp_driving_initial['jacobian']))
kp_new['jacobian'] = torch.matmul(jacobian_diff, kp_source['jacobian'])
return kp_new
def load_checkpoints(config_path, checkpoint_path, cpu=False):
with open(config_path) as f:
config = yaml.load(f)
generator = OcclusionAwareGenerator(**config['model_params']['generator_params'],
**config['model_params']['common_params'])
if not cpu:
generator.cuda()
kp_detector = KPDetector(**config['model_params']['kp_detector_params'],
**config['model_params']['common_params'])
if not cpu:
kp_detector.cuda()
if cpu:
checkpoint = torch.load(checkpoint_path, map_location=torch.device('cpu'))
else:
checkpoint = torch.load(checkpoint_path)
generator.load_state_dict(checkpoint['generator'])
kp_detector.load_state_dict(checkpoint['kp_detector'])
if not cpu:
generator = DataParallelWithCallback(generator)
kp_detector = DataParallelWithCallback(kp_detector)
generator.eval()
kp_detector.eval()
return generator, kp_detector
@throttle.wrap(1, 2)
def forward(source_image, driving_frame, kp_source, kp_driving_initial, generator, kp_detector, relative=True, adapt_scale=True, cpu=True):
kp_driving = kp_detector(driving_frame)
kp_norm = normalize_kp(
kp_source=kp_source,
kp_driving=kp_driving,
kp_driving_initial=kp_driving_initial,
use_relative_movement=relative,
use_relative_jacobian=relative,
adapt_movement_scale=adapt_scale
)
out = generator(source_image, kp_source=kp_source, kp_driving=kp_norm)
return np.transpose(out["prediction"].data.cpu().numpy(), [0, 2, 3, 1])[0]
if __name__ == "__main__":
parser = ArgumentParser()
parser.add_argument("--config", required=True, help="path to config")
parser.add_argument("--source_image", required=True, help="path to source image")
parser.add_argument("--checkpoint", default="vox-cpk.pth.tar", help="path to checkpoint")
parser.add_argument("--relative", dest="relative", action="store_true", help="use relative or absolute keypoint coordinates")
parser.add_argument("--adapt_scale", dest="adapt_scale", action="store_true", help="adapt movement scale based on convex hull of keypoints")
parser.add_argument("--cpu", dest="cpu", action="store_true", help="CPU mode")
parser.set_defaults(relative=False)
parser.set_defaults(adapt_scale=False)
opt = parser.parse_args()
generator, kp_detector = load_checkpoints(config_path=opt.config, checkpoint_path=opt.checkpoint, cpu=opt.cpu)
source_image = imageio.imread(opt.source_image)
source_image = resize(source_image, (256, 256))[..., :3]
source = torch.tensor(source_image[np.newaxis].astype(np.float32)).permute(0, 3, 1, 2)
if not opt.cpu:
source = source.cuda()
kp_source = kp_detector(source)
#out = cv2.VideoWriter('outpy.avi', cv2.VideoWriter_fourcc('M','J','P','G'), 30, (256, 256))
kp_driving_initial = None
camera = cv2.VideoCapture(0)
ret, frame = camera.read()
while True:
ret, frame = camera.read()
resized = resize(frame, (256, 256))[..., :3]
if not opt.cpu:
resized = resized.cuda()
# y = torch.tensor(np.array(resized))
# x = y.cpu().numpy()
# image = cv2.cvtColor(x, cv2.COLOR_BGR2RGB)
# # x = y.permute(1, 2, 0)
# plt.imshow(np.array(image))
# plt.show()
driving_resized = torch.tensor(np.array(resized)[np.newaxis].astype(np.float32)).permute(0, 3, 1, 2)
if not kp_driving_initial:
kp_driving_initial = kp_detector(driving_resized)
fake_frame = forward(
source,
driving_resized,
kp_source,
kp_driving_initial,
generator,
kp_detector,
relative=opt.relative,
adapt_scale=opt.adapt_scale,
cpu=opt.cpu
)
cv2.imshow("frame", fake_frame)
#x = np.squeeze(driving_resized, axis=(0,))
#x = driving_resized[0].permute(1, 2, 0)
# plt_driving = driving_resized #permute(2, 3, 1)
#print(plt_driving.shape)
#plt.imshow(x)
#plt.show()
if cv2.waitKey(1) & 0xFF == ord('q'):
break
camera.release()
cv2.destroyAllWindows()
| [
"sync_batchnorm.DataParallelWithCallback",
"numpy.sqrt",
"modules.generator.OcclusionAwareGenerator",
"argparse.ArgumentParser",
"torch.load",
"modules.keypoint_detector.KPDetector",
"throttle.wrap",
"yaml.load",
"cv2.imshow",
"numpy.array",
"cv2.destroyAllWindows",
"cv2.VideoCapture",
"torch.matmul",
"imageio.imread",
"skimage.transform.resize",
"cv2.waitKey",
"torch.inverse",
"torch.device"
] | [((2735, 2754), 'throttle.wrap', 'throttle.wrap', (['(1)', '(2)'], {}), '(1, 2)\n', (2748, 2754), False, 'import throttle\n'), ((1863, 1980), 'modules.generator.OcclusionAwareGenerator', 'OcclusionAwareGenerator', ([], {}), "(**config['model_params']['generator_params'], **\n config['model_params']['common_params'])\n", (1886, 1980), False, 'from modules.generator import OcclusionAwareGenerator\n'), ((2076, 2182), 'modules.keypoint_detector.KPDetector', 'KPDetector', ([], {}), "(**config['model_params']['kp_detector_params'], **config[\n 'model_params']['common_params'])\n", (2086, 2182), False, 'from modules.keypoint_detector import KPDetector\n'), ((3360, 3376), 'argparse.ArgumentParser', 'ArgumentParser', ([], {}), '()\n', (3374, 3376), False, 'from argparse import ArgumentParser\n'), ((4216, 4248), 'imageio.imread', 'imageio.imread', (['opt.source_image'], {}), '(opt.source_image)\n', (4230, 4248), False, 'import imageio\n'), ((4614, 4633), 'cv2.VideoCapture', 'cv2.VideoCapture', (['(0)'], {}), '(0)\n', (4630, 4633), False, 'import cv2\n'), ((5766, 5789), 'cv2.destroyAllWindows', 'cv2.destroyAllWindows', ([], {}), '()\n', (5787, 5789), False, 'import cv2\n'), ((1833, 1845), 'yaml.load', 'yaml.load', (['f'], {}), '(f)\n', (1842, 1845), False, 'import yaml\n'), ((2377, 2404), 'torch.load', 'torch.load', (['checkpoint_path'], {}), '(checkpoint_path)\n', (2387, 2404), False, 'import torch\n'), ((2557, 2592), 'sync_batchnorm.DataParallelWithCallback', 'DataParallelWithCallback', (['generator'], {}), '(generator)\n', (2581, 2592), False, 'from sync_batchnorm import DataParallelWithCallback\n'), ((2615, 2652), 'sync_batchnorm.DataParallelWithCallback', 'DataParallelWithCallback', (['kp_detector'], {}), '(kp_detector)\n', (2639, 2652), False, 'from sync_batchnorm import DataParallelWithCallback\n'), ((4266, 4298), 'skimage.transform.resize', 'resize', (['source_image', '(256, 256)'], {}), '(source_image, (256, 256))\n', (4272, 4298), False, 'from skimage.transform import resize\n'), ((5441, 5472), 'cv2.imshow', 'cv2.imshow', (['"""frame"""', 'fake_frame'], {}), "('frame', fake_frame)\n", (5451, 5472), False, 'import cv2\n'), ((1115, 1135), 'numpy.sqrt', 'np.sqrt', (['source_area'], {}), '(source_area)\n', (1122, 1135), True, 'import numpy as np\n'), ((1138, 1159), 'numpy.sqrt', 'np.sqrt', (['driving_area'], {}), '(driving_area)\n', (1145, 1159), True, 'import numpy as np\n'), ((1649, 1699), 'torch.matmul', 'torch.matmul', (['jacobian_diff', "kp_source['jacobian']"], {}), "(jacobian_diff, kp_source['jacobian'])\n", (1661, 1699), False, 'import torch\n'), ((4723, 4748), 'skimage.transform.resize', 'resize', (['frame', '(256, 256)'], {}), '(frame, (256, 256))\n', (4729, 4748), False, 'from skimage.transform import resize\n'), ((1569, 1614), 'torch.inverse', 'torch.inverse', (["kp_driving_initial['jacobian']"], {}), "(kp_driving_initial['jacobian'])\n", (1582, 1614), False, 'import torch\n'), ((2325, 2344), 'torch.device', 'torch.device', (['"""cpu"""'], {}), "('cpu')\n", (2337, 2344), False, 'import torch\n'), ((5694, 5708), 'cv2.waitKey', 'cv2.waitKey', (['(1)'], {}), '(1)\n', (5705, 5708), False, 'import cv2\n'), ((5046, 5063), 'numpy.array', 'np.array', (['resized'], {}), '(resized)\n', (5054, 5063), True, 'import numpy as np\n')] |
import numpy as np
from albumentations import (Compose, HorizontalFlip, VerticalFlip, Rotate, RandomRotate90,
ShiftScaleRotate, ElasticTransform,
GridDistortion, RandomSizedCrop, RandomCrop, CenterCrop,
RandomBrightnessContrast, HueSaturationValue, IAASharpen,
RandomGamma, RandomBrightness, RandomBrightnessContrast,
GaussianBlur,CLAHE,
Cutout, CoarseDropout, GaussNoise, ChannelShuffle, ToGray, OpticalDistortion,
Normalize, OneOf, NoOp)
from albumentations.pytorch import ToTensorV2 as ToTensor
from get_config import get_config
config = get_config()
MEAN = np.array([0.485, 0.456, 0.406])
STD = np.array([0.229, 0.224, 0.225])
def get_transforms_train():
transform_train = Compose([
#Basic
RandomRotate90(p=1),
HorizontalFlip(p=0.5),
#Morphology
ShiftScaleRotate(shift_limit=0, scale_limit=(-0.2,0.2), rotate_limit=(-30,30),
interpolation=1, border_mode=0, value=(0,0,0), p=0.5),
GaussNoise(var_limit=(0,50.0), mean=0, p=0.5),
GaussianBlur(blur_limit=(3,7), p=0.5),
#Color
RandomBrightnessContrast(brightness_limit=0.35, contrast_limit=0.5,
brightness_by_max=True,p=0.5),
HueSaturationValue(hue_shift_limit=30, sat_shift_limit=30,
val_shift_limit=0, p=0.5),
CoarseDropout(max_holes=2,
max_height=config['input_resolution'][0]//4, max_width=config['input_resolution'][1]//4,
min_holes=1,
min_height=config['input_resolution'][0]//16, min_width=config['input_resolution'][1]//16,
fill_value=0, mask_fill_value=0, p=0.5),
Normalize(mean=(MEAN[0], MEAN[1], MEAN[2]),
std=(STD[0], STD[1], STD[2])),
ToTensor(),
])
return transform_train
def get_transforms_valid():
transform_valid = Compose([
Normalize(mean=(MEAN[0], MEAN[1], MEAN[2]),
std=(STD[0], STD[1], STD[2])),
ToTensor(),
] )
return transform_valid
def denormalize(z, mean=MEAN.reshape(-1,1,1), std=STD.reshape(-1,1,1)):
return std*z + mean
| [
"albumentations.ShiftScaleRotate",
"albumentations.pytorch.ToTensorV2",
"albumentations.RandomBrightnessContrast",
"albumentations.GaussianBlur",
"albumentations.CoarseDropout",
"albumentations.GaussNoise",
"albumentations.HueSaturationValue",
"numpy.array",
"albumentations.Normalize",
"get_config.get_config",
"albumentations.RandomRotate90",
"albumentations.HorizontalFlip"
] | [((737, 749), 'get_config.get_config', 'get_config', ([], {}), '()\n', (747, 749), False, 'from get_config import get_config\n'), ((758, 789), 'numpy.array', 'np.array', (['[0.485, 0.456, 0.406]'], {}), '([0.485, 0.456, 0.406])\n', (766, 789), True, 'import numpy as np\n'), ((797, 828), 'numpy.array', 'np.array', (['[0.229, 0.224, 0.225]'], {}), '([0.229, 0.224, 0.225])\n', (805, 828), True, 'import numpy as np\n'), ((913, 932), 'albumentations.RandomRotate90', 'RandomRotate90', ([], {'p': '(1)'}), '(p=1)\n', (927, 932), False, 'from albumentations import Compose, HorizontalFlip, VerticalFlip, Rotate, RandomRotate90, ShiftScaleRotate, ElasticTransform, GridDistortion, RandomSizedCrop, RandomCrop, CenterCrop, RandomBrightnessContrast, HueSaturationValue, IAASharpen, RandomGamma, RandomBrightness, RandomBrightnessContrast, GaussianBlur, CLAHE, Cutout, CoarseDropout, GaussNoise, ChannelShuffle, ToGray, OpticalDistortion, Normalize, OneOf, NoOp\n'), ((942, 963), 'albumentations.HorizontalFlip', 'HorizontalFlip', ([], {'p': '(0.5)'}), '(p=0.5)\n', (956, 963), False, 'from albumentations import Compose, HorizontalFlip, VerticalFlip, Rotate, RandomRotate90, ShiftScaleRotate, ElasticTransform, GridDistortion, RandomSizedCrop, RandomCrop, CenterCrop, RandomBrightnessContrast, HueSaturationValue, IAASharpen, RandomGamma, RandomBrightness, RandomBrightnessContrast, GaussianBlur, CLAHE, Cutout, CoarseDropout, GaussNoise, ChannelShuffle, ToGray, OpticalDistortion, Normalize, OneOf, NoOp\n'), ((1002, 1142), 'albumentations.ShiftScaleRotate', 'ShiftScaleRotate', ([], {'shift_limit': '(0)', 'scale_limit': '(-0.2, 0.2)', 'rotate_limit': '(-30, 30)', 'interpolation': '(1)', 'border_mode': '(0)', 'value': '(0, 0, 0)', 'p': '(0.5)'}), '(shift_limit=0, scale_limit=(-0.2, 0.2), rotate_limit=(-30,\n 30), interpolation=1, border_mode=0, value=(0, 0, 0), p=0.5)\n', (1018, 1142), False, 'from albumentations import Compose, HorizontalFlip, VerticalFlip, Rotate, RandomRotate90, ShiftScaleRotate, ElasticTransform, GridDistortion, RandomSizedCrop, RandomCrop, CenterCrop, RandomBrightnessContrast, HueSaturationValue, IAASharpen, RandomGamma, RandomBrightness, RandomBrightnessContrast, GaussianBlur, CLAHE, Cutout, CoarseDropout, GaussNoise, ChannelShuffle, ToGray, OpticalDistortion, Normalize, OneOf, NoOp\n'), ((1170, 1216), 'albumentations.GaussNoise', 'GaussNoise', ([], {'var_limit': '(0, 50.0)', 'mean': '(0)', 'p': '(0.5)'}), '(var_limit=(0, 50.0), mean=0, p=0.5)\n', (1180, 1216), False, 'from albumentations import Compose, HorizontalFlip, VerticalFlip, Rotate, RandomRotate90, ShiftScaleRotate, ElasticTransform, GridDistortion, RandomSizedCrop, RandomCrop, CenterCrop, RandomBrightnessContrast, HueSaturationValue, IAASharpen, RandomGamma, RandomBrightness, RandomBrightnessContrast, GaussianBlur, CLAHE, Cutout, CoarseDropout, GaussNoise, ChannelShuffle, ToGray, OpticalDistortion, Normalize, OneOf, NoOp\n'), ((1225, 1263), 'albumentations.GaussianBlur', 'GaussianBlur', ([], {'blur_limit': '(3, 7)', 'p': '(0.5)'}), '(blur_limit=(3, 7), p=0.5)\n', (1237, 1263), False, 'from albumentations import Compose, HorizontalFlip, VerticalFlip, Rotate, RandomRotate90, ShiftScaleRotate, ElasticTransform, GridDistortion, RandomSizedCrop, RandomCrop, CenterCrop, RandomBrightnessContrast, HueSaturationValue, IAASharpen, RandomGamma, RandomBrightness, RandomBrightnessContrast, GaussianBlur, CLAHE, Cutout, CoarseDropout, GaussNoise, ChannelShuffle, ToGray, OpticalDistortion, Normalize, OneOf, NoOp\n'), ((1296, 1398), 'albumentations.RandomBrightnessContrast', 'RandomBrightnessContrast', ([], {'brightness_limit': '(0.35)', 'contrast_limit': '(0.5)', 'brightness_by_max': '(True)', 'p': '(0.5)'}), '(brightness_limit=0.35, contrast_limit=0.5,\n brightness_by_max=True, p=0.5)\n', (1320, 1398), False, 'from albumentations import Compose, HorizontalFlip, VerticalFlip, Rotate, RandomRotate90, ShiftScaleRotate, ElasticTransform, GridDistortion, RandomSizedCrop, RandomCrop, CenterCrop, RandomBrightnessContrast, HueSaturationValue, IAASharpen, RandomGamma, RandomBrightness, RandomBrightnessContrast, GaussianBlur, CLAHE, Cutout, CoarseDropout, GaussNoise, ChannelShuffle, ToGray, OpticalDistortion, Normalize, OneOf, NoOp\n'), ((1437, 1526), 'albumentations.HueSaturationValue', 'HueSaturationValue', ([], {'hue_shift_limit': '(30)', 'sat_shift_limit': '(30)', 'val_shift_limit': '(0)', 'p': '(0.5)'}), '(hue_shift_limit=30, sat_shift_limit=30, val_shift_limit=\n 0, p=0.5)\n', (1455, 1526), False, 'from albumentations import Compose, HorizontalFlip, VerticalFlip, Rotate, RandomRotate90, ShiftScaleRotate, ElasticTransform, GridDistortion, RandomSizedCrop, RandomCrop, CenterCrop, RandomBrightnessContrast, HueSaturationValue, IAASharpen, RandomGamma, RandomBrightness, RandomBrightnessContrast, GaussianBlur, CLAHE, Cutout, CoarseDropout, GaussNoise, ChannelShuffle, ToGray, OpticalDistortion, Normalize, OneOf, NoOp\n'), ((1568, 1849), 'albumentations.CoarseDropout', 'CoarseDropout', ([], {'max_holes': '(2)', 'max_height': "(config['input_resolution'][0] // 4)", 'max_width': "(config['input_resolution'][1] // 4)", 'min_holes': '(1)', 'min_height': "(config['input_resolution'][0] // 16)", 'min_width': "(config['input_resolution'][1] // 16)", 'fill_value': '(0)', 'mask_fill_value': '(0)', 'p': '(0.5)'}), "(max_holes=2, max_height=config['input_resolution'][0] // 4,\n max_width=config['input_resolution'][1] // 4, min_holes=1, min_height=\n config['input_resolution'][0] // 16, min_width=config[\n 'input_resolution'][1] // 16, fill_value=0, mask_fill_value=0, p=0.5)\n", (1581, 1849), False, 'from albumentations import Compose, HorizontalFlip, VerticalFlip, Rotate, RandomRotate90, ShiftScaleRotate, ElasticTransform, GridDistortion, RandomSizedCrop, RandomCrop, CenterCrop, RandomBrightnessContrast, HueSaturationValue, IAASharpen, RandomGamma, RandomBrightness, RandomBrightnessContrast, GaussianBlur, CLAHE, Cutout, CoarseDropout, GaussNoise, ChannelShuffle, ToGray, OpticalDistortion, Normalize, OneOf, NoOp\n'), ((1937, 2010), 'albumentations.Normalize', 'Normalize', ([], {'mean': '(MEAN[0], MEAN[1], MEAN[2])', 'std': '(STD[0], STD[1], STD[2])'}), '(mean=(MEAN[0], MEAN[1], MEAN[2]), std=(STD[0], STD[1], STD[2]))\n', (1946, 2010), False, 'from albumentations import Compose, HorizontalFlip, VerticalFlip, Rotate, RandomRotate90, ShiftScaleRotate, ElasticTransform, GridDistortion, RandomSizedCrop, RandomCrop, CenterCrop, RandomBrightnessContrast, HueSaturationValue, IAASharpen, RandomGamma, RandomBrightness, RandomBrightnessContrast, GaussianBlur, CLAHE, Cutout, CoarseDropout, GaussNoise, ChannelShuffle, ToGray, OpticalDistortion, Normalize, OneOf, NoOp\n'), ((2039, 2049), 'albumentations.pytorch.ToTensorV2', 'ToTensor', ([], {}), '()\n', (2047, 2049), True, 'from albumentations.pytorch import ToTensorV2 as ToTensor\n'), ((2155, 2228), 'albumentations.Normalize', 'Normalize', ([], {'mean': '(MEAN[0], MEAN[1], MEAN[2])', 'std': '(STD[0], STD[1], STD[2])'}), '(mean=(MEAN[0], MEAN[1], MEAN[2]), std=(STD[0], STD[1], STD[2]))\n', (2164, 2228), False, 'from albumentations import Compose, HorizontalFlip, VerticalFlip, Rotate, RandomRotate90, ShiftScaleRotate, ElasticTransform, GridDistortion, RandomSizedCrop, RandomCrop, CenterCrop, RandomBrightnessContrast, HueSaturationValue, IAASharpen, RandomGamma, RandomBrightness, RandomBrightnessContrast, GaussianBlur, CLAHE, Cutout, CoarseDropout, GaussNoise, ChannelShuffle, ToGray, OpticalDistortion, Normalize, OneOf, NoOp\n'), ((2257, 2267), 'albumentations.pytorch.ToTensorV2', 'ToTensor', ([], {}), '()\n', (2265, 2267), True, 'from albumentations.pytorch import ToTensorV2 as ToTensor\n')] |
# Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
Defines coordinate frames and ties them to data axes.
"""
from __future__ import absolute_import, division, unicode_literals, print_function
import numpy as np
from astropy import units as u
from astropy import utils as astutil
from astropy import coordinates as coord
from astropy.extern import six
from . import utils as gwutils
__all__ = ['Frame2D', 'CelestialFrame', 'SpectralFrame', 'CompositeFrame',
'CoordinateFrame']
STANDARD_REFERENCE_FRAMES = [frame.upper() for frame in coord.builtin_frames.__all__]
STANDARD_REFERENCE_POSITION = ["GEOCENTER", "BARYCENTER", "HELIOCENTER",
"TOPOCENTER", "LSR", "LSRK", "LSRD",
"GALACTIC_CENTER", "LOCAL_GROUP_CENTER"]
class CoordinateFrame(object):
"""
Base class for CoordinateFrames.
Parameters
----------
naxes : int
Number of axes.
axes_type : str
One of ["SPATIAL", "SPECTRAL", "TIME"]
axes_order : tuple of int
A dimension in the input data that corresponds to this axis.
reference_frame : astropy.coordinates.builtin_frames
Reference frame (usually used with output_frame to convert to world coordinate objects).
reference_position : str
Reference position - one of `STANDARD_REFERENCE_POSITION`
unit : list of astropy.units.Unit
Unit for each axis.
axes_names : list
Names of the axes in this frame.
name : str
Name of this frame.
"""
def __init__(self, naxes, axes_type, axes_order, reference_frame=None,
reference_position=None, unit=None, axes_names=None,
name=None):
self._naxes = naxes
self._axes_order = tuple(axes_order)
if isinstance(axes_type, six.string_types):
self._axes_type = (axes_type,)
else:
self._axes_type = tuple(axes_type)
self._reference_frame = reference_frame
if unit is not None:
if astutil.isiterable(unit):
unit = tuple(unit)
else:
unit = (unit,)
if len(unit) != naxes:
raise ValueError("Number of units does not match number of axes.")
else:
self._unit = tuple([u.Unit(au) for au in unit])
if axes_names is not None:
if isinstance(axes_names, six.string_types):
axes_names = (axes_names,)
else:
axes_names = tuple(axes_names)
if len(axes_names) != naxes:
raise ValueError("Number of axes names does not match number of axes.")
else:
axes_names = tuple([""] * naxes)
self._axes_names = axes_names
if name is None:
self._name = self.__class__.__name__
else:
self._name = name
if reference_position is not None:
self._reference_position = reference_position
else:
self._reference_position = None
super(CoordinateFrame, self).__init__()
def __repr__(self):
fmt = '<{0}(name="{1}", unit={2}, axes_names={3}, axes_order={4}'.format(
self.__class__.__name__, self.name,
self.unit, self.axes_names, self.axes_order)
if self.reference_position is not None:
fmt += ', reference_position="{0}"'.format(self.reference_position)
if self.reference_frame is not None:
fmt += ", reference_frame={0}".format(self.reference_frame)
fmt += ")>"
return fmt
def __str__(self):
if self._name is not None:
return self._name
else:
return self.__class__.__name__
@property
def name(self):
""" A custom name of this frame."""
return self._name
@name.setter
def name(self, val):
""" A custom name of this frame."""
self._name = val
@property
def naxes(self):
""" The number of axes intheis frame."""
return self._naxes
@property
def unit(self):
"""The unit of this frame."""
return self._unit
@property
def axes_names(self):
""" Names of axes in the frame."""
return self._axes_names
@property
def axes_order(self):
""" A tuple of indices which map inputs to axes."""
return self._axes_order
@property
def reference_frame(self):
return self._reference_frame
@property
def reference_position(self):
try:
return self._reference_position
except AttributeError:
return None
def input_axes(self, start_frame=None):
"""
Computes which axes in `start_frame` contribute to each axis in the current frame.
Parameters
----------
start_frame : ~gwcs.coordinate_frames.CoordinateFrame
A frame in the WCS pipeline
The transform between start_frame and the current frame is used to compute the
mapping inputs: outputs.
"""
sep = self._separable(start_frame)
inputs = []
for ax in self.axes_order:
inputs.append(list(sep[ax].nonzero()[0]))
return inputs
@property
def axes_type(self):
""" Type of this frame : 'SPATIAL', 'SPECTRAL', 'TIME'. """
return self._axes_type
def coordinates(self, *args):
""" Create world coordinates object"""
raise NotImplementedError("Subclasses may implement this")
class CelestialFrame(CoordinateFrame):
"""
Celestial Frame Representation
Parameters
----------
axes_order : tuple of int
A dimension in the input data that corresponds to this axis.
reference_frame : astropy.coordinates.builtin_frames
A reference frame.
reference_position : str
Reference position.
unit : str or units.Unit instance or iterable of those
Units on axes.
axes_names : list
Names of the axes in this frame.
name : str
Name of this frame.
"""
def __init__(self, axes_order=None, reference_frame=None,
unit=None, axes_names=None,
name=None):
naxes = 2
if reference_frame is not None:
if reference_frame.name.upper() in STANDARD_REFERENCE_FRAMES:
_axes_names = list(reference_frame.representation_component_names.values())
if 'distance' in _axes_names:
_axes_names.remove('distance')
if axes_names is None:
axes_names = _axes_names
naxes = len(_axes_names)
_unit = list(reference_frame.representation_component_units.values())
if unit is None and _unit:
unit = _unit
if axes_order is None:
axes_order = tuple(range(naxes))
if unit is None:
unit = tuple([u.degree] * naxes)
axes_type = ['SPATIAL'] * naxes
super(CelestialFrame, self).__init__(naxes=naxes, axes_type=axes_type,
axes_order=axes_order,
reference_frame=reference_frame,
unit=unit,
axes_names=axes_names,
name=name)
def coordinates(self, *args):
"""
Create a SkyCoord object.
Parameters
----------
args : float
inputs to wcs.input_frame
"""
# Reorder axes if necesary.
try:
return coord.SkyCoord(*args, unit=self.unit, frame=self._reference_frame)
except:
raise
class SpectralFrame(CoordinateFrame):
"""
Represents Spectral Frame
Parameters
----------
axes_order : tuple or int
A dimension in the input data that corresponds to this axis.
reference_frame : astropy.coordinates.builtin_frames
Reference frame (usually used with output_frame to convert to world coordinate objects).
unit : str or units.Unit instance
Spectral unit.
axes_names : str
Spectral axis name.
name : str
Name for this frame.
"""
def __init__(self, axes_order=(0,), reference_frame=None, unit=None,
axes_names=None, name=None, reference_position=None):
super(SpectralFrame, self).__init__(naxes=1, axes_type="SPECTRAL", axes_order=axes_order,
axes_names=axes_names, reference_frame=reference_frame,
unit=unit, name=name,
reference_position=reference_position)
def coordinates(self, *args):
if np.isscalar(args):
return args * self.unit[0]
else:
return args[0] * self.unit[0]
class CompositeFrame(CoordinateFrame):
"""
Represents one or more frames.
Parameters
----------
frames : list
List of frames (TimeFrame, CelestialFrame, SpectralFrame, CoordinateFrame).
name : str
Name for this frame.
"""
def __init__(self, frames, name=None):
self._frames = frames[:]
naxes = sum([frame._naxes for frame in self._frames])
axes_type = list(range(naxes))
unit = list(range(naxes))
axes_names = list(range(naxes))
axes_order = []
for frame in frames:
axes_order.extend(frame.axes_order)
for frame in frames:
for ind, axtype, un, n in zip(frame.axes_order, frame.axes_type,
frame.unit, frame.axes_names):
axes_type[ind] = axtype
axes_names[ind] = n
unit[ind] = un
if len(np.unique(axes_order)) != len(axes_order):
raise ValueError("Incorrect numbering of axes, "
"axes_order should contain unique numbers, "
"got {}.".format(axes_order))
super(CompositeFrame, self).__init__(naxes, axes_type=axes_type,
axes_order=axes_order,
unit=unit, axes_names=axes_names,
name=name)
@property
def frames(self):
return self._frames
def __repr__(self):
return repr(self.frames)
def coordinates(self, *args):
coo = []
for frame in self.frames:
fargs = [args[i] for i in frame.axes_order]
print(frame, fargs, frame.axes_order)
coo.append(frame.coordinates(*fargs))
return coo
class Frame2D(CoordinateFrame):
"""
A 2D coordinate frame.
Parameters
----------
axes_order : tuple of int
A dimension in the input data that corresponds to this axis.
unit : list of astropy.units.Unit
Unit for each axis.
axes_names : list
Names of the axes in this frame.
name : str
Name of this frame.
"""
def __init__(self, axes_order=(0, 1), unit=(u.pix, u.pix), axes_names=('x', 'y'),
name=None):
super(Frame2D, self).__init__(2, ["SPATIAL", "SPATIAL"], axes_order, name=name,
axes_names=axes_names, unit=unit)
def coordinates(self, *args):
args = [args[i] for i in self.axes_order]
coo = tuple([arg * un for arg, un in zip(args, self.unit)])
return coo
| [
"numpy.unique",
"numpy.isscalar",
"astropy.units.Unit",
"astropy.coordinates.SkyCoord",
"astropy.utils.isiterable"
] | [((8880, 8897), 'numpy.isscalar', 'np.isscalar', (['args'], {}), '(args)\n', (8891, 8897), True, 'import numpy as np\n'), ((2050, 2074), 'astropy.utils.isiterable', 'astutil.isiterable', (['unit'], {}), '(unit)\n', (2068, 2074), True, 'from astropy import utils as astutil\n'), ((7717, 7783), 'astropy.coordinates.SkyCoord', 'coord.SkyCoord', (['*args'], {'unit': 'self.unit', 'frame': 'self._reference_frame'}), '(*args, unit=self.unit, frame=self._reference_frame)\n', (7731, 7783), True, 'from astropy import coordinates as coord\n'), ((9919, 9940), 'numpy.unique', 'np.unique', (['axes_order'], {}), '(axes_order)\n', (9928, 9940), True, 'import numpy as np\n'), ((2332, 2342), 'astropy.units.Unit', 'u.Unit', (['au'], {}), '(au)\n', (2338, 2342), True, 'from astropy import units as u\n')] |
import lx
import modo
import select
import item
from run import run
class ChannelModifierUtils(object):
@classmethod
def attachModifierToItem(cls, modifierModoItem, hostModoItem):
"""
Allows for attaching modifier to locator type item.
Attached item will show up under the locator item in item list
(you can unfold it with a little plus icons next to item name in item list).
Attached modifiers are getting deleted together with locator they are attached to.
Parameters
----------
modifierModoItem : modo.Item
Modifier item that you want to attach.
hostModoItem : modo.Item
Locator type item you want to attach modifier to.
"""
item.ItemUtils.addForwardGraphConnections(modifierModoItem, hostModoItem, 'chanMods')
class TransformConstraintOperation(object):
POSITION = 'pos'
ROTATION = 'rot'
SCALE = 'scl'
class CMTransformConstraint(object):
"""
This class represents Transform Constraint channel modifier.
Parameters
----------
modoItem : modo.Item
The constraint modo item.
"""
Operation = TransformConstraintOperation
@classmethod
def new(cls, assemblyItem, hostItem, name='TransformConstraint'):
"""
Adds new transform constraint to the scene.
Parameters
----------
assemblyItem : modo.Item
This is assembly item to which the constraint will be added.
Passing this item is mandatory. However, if you don't want to add constraints
to any assembly pass an item that is not a group.
This doesn't throw an error and it doesn't add constraint to any groups either.
hostItem : modo.Item
Constraint can be attached to an item such that it'll be under this item
in item list. It'll also get deleted when the host item is deleted.
name : str
Name for new constraint item.
Returns
-------
CMTransformConstraint
"""
itemSelection = select.ItemSelection()
itemSelection.clear()
run('modifier.create "cmTransformConstraint:rot" item:{%s} insert:false' % assemblyItem.id)
cnsItem = itemSelection.getOfTypeModo("cmTransformConstraint")[0]
cnsItem.name = name
ChannelModifierUtils.attachModifierToItem(cnsItem, hostItem)
return CMTransformConstraint(cnsItem)
@property
def operation(self):
"""
Gets the type of the constraint.
Returns
-------
str
One of TransformConstraintOperation constants.
"""
return self._item.channel('operation').get()
@property
def inputChannel(self):
return self._item.channel('matrixInput')
@property
def outputChannel(self):
return self._item.channel('matrixOutput')
@property
def isRotationConstraint(self):
"""
Tests if this is rotation constraint.
Returns
-------
bool
"""
return self.operation == self.Operation.ROTATION
@property
def offset(self):
"""
Gets the constraint offset vector.
Returns
-------
modo.Vector3
"""
x = self._item.channel('offset.X').get()
y = self._item.channel('offset.Y').get()
z = self._item.channel('offset.Z').get()
return modo.Vector3(x, y, z)
@offset.setter
def offset(self, offsetVec):
"""
Sets new offset for the constraint.
Parameters
----------
offsetVec : modo.Vector3
"""
self._item.channel('offset.X').set(offsetVec[0], 0.0, key=False, action=lx.symbol.s_ACTIONLAYER_SETUP)
self._item.channel('offset.Y').set(offsetVec[1], 0.0, key=False, action=lx.symbol.s_ACTIONLAYER_SETUP)
self._item.channel('offset.Z').set(offsetVec[2], 0.0, key=False, action=lx.symbol.s_ACTIONLAYER_SETUP)
@property
def modoItem(self):
return self._item
# -------- Private methods
def __init__(self, modoItem):
if modoItem.type != 'cmTransformConstraint':
raise TypeError
self._item = modoItem | [
"run.run",
"modo.Vector3",
"item.ItemUtils.addForwardGraphConnections",
"select.ItemSelection"
] | [((757, 846), 'item.ItemUtils.addForwardGraphConnections', 'item.ItemUtils.addForwardGraphConnections', (['modifierModoItem', 'hostModoItem', '"""chanMods"""'], {}), "(modifierModoItem, hostModoItem,\n 'chanMods')\n", (798, 846), False, 'import item\n'), ((2133, 2155), 'select.ItemSelection', 'select.ItemSelection', ([], {}), '()\n', (2153, 2155), False, 'import select\n'), ((2195, 2290), 'run.run', 'run', (['(\'modifier.create "cmTransformConstraint:rot" item:{%s} insert:false\' %\n assemblyItem.id)'], {}), '(\'modifier.create "cmTransformConstraint:rot" item:{%s} insert:false\' %\n assemblyItem.id)\n', (2198, 2290), False, 'from run import run\n'), ((3496, 3517), 'modo.Vector3', 'modo.Vector3', (['x', 'y', 'z'], {}), '(x, y, z)\n', (3508, 3517), False, 'import modo\n')] |
from brownie import FundMe
from scripts.helpful_scripts import get_account
def fund():
fund_me = FundMe[-1]
account = get_account()
entrance_fee = fund_me.getEntranceFee()
print(f"entrance is {entrance_fee}")
print("funding..")
fund_me.fund({"from": account, "value": entrance_fee})
def withdraw():
fund_me = FundMe[-1]
account = get_account()
fund_me.withdraw({"from": account})
def main():
fund()
withdraw()
if __name__ == "__main__":
main()
| [
"scripts.helpful_scripts.get_account"
] | [((128, 141), 'scripts.helpful_scripts.get_account', 'get_account', ([], {}), '()\n', (139, 141), False, 'from scripts.helpful_scripts import get_account\n'), ((366, 379), 'scripts.helpful_scripts.get_account', 'get_account', ([], {}), '()\n', (377, 379), False, 'from scripts.helpful_scripts import get_account\n')] |
# Um professor quer sortear um dos seus quatro alunos para apagar o quadro. Faça um programa que ajude ele, lendo o nome dos alunos e escrevendo na tela o nome do escolhido.
from random import choice
nome1 = input('Digite um nome: ')
nome2 = input('Digite outro nome: ')
nome3 = input('Digite mais um nome: ')
nome4 = input('Digite o último nome: ')
nome = [nome1, nome2, nome3, nome4]
print(choice(nome))
| [
"random.choice"
] | [((395, 407), 'random.choice', 'choice', (['nome'], {}), '(nome)\n', (401, 407), False, 'from random import choice\n')] |
import datetime
import os
import shutil
import tempfile
from joblib import Parallel, delayed
from fastpic_upload import upload_file_to_fastpic
_n_jobs_for_upload = 20
_root_folders_set = (
'/path/to/folder',
)
_spoiler_for_each_file = True
def process_one_pic(result_key, pic_path, tmp_dir):
pic_url, pic_link = upload_file_to_fastpic(pic_path, tmp_dir)
print(pic_url)
return result_key, (pic_url, pic_link)
def upload_from_folder(folder_path):
pics_to_upload = {}
for root, dirs, files in os.walk(folder_path):
for file in files:
if file.split('.')[-1] not in ('jpg', 'jpeg', 'bmp', 'png'):
continue
file_path = os.path.join(root, file)
pics_to_upload[file] = file_path
print(pics_to_upload)
print('Need upload {} photo'.format(len(pics_to_upload)))
result = {}
tmp_dir = tempfile.mkdtemp()
try:
sub_results = Parallel(n_jobs=_n_jobs_for_upload, backend='threading')(
delayed(process_one_pic)(key, pics_to_upload[key], tmp_dir) for key in sorted(pics_to_upload))
for sub_result in sub_results:
result[sub_result[0]] = sub_result[1]
finally:
shutil.rmtree(tmp_dir)
return result
def print_result_to_file(result, result_file_path):
with open(result_file_path, 'w', encoding='utf8', newline='') as codes_file:
codes_file.write('[spoiler="Скриншоты"]')
codes_file.write(os.linesep)
codes_file.write(os.linesep)
for result_key in sorted(result):
if _spoiler_for_each_file:
codes_file.write('[spoiler="{}"]'.format(result_key))
codes_file.write(os.linesep)
url, link = result[result_key]
codes_file.write('[url={}][img]{}[/img][/url]'.format(link, url))
if _spoiler_for_each_file:
codes_file.write(os.linesep)
codes_file.write('[/spoiler]')
codes_file.write(os.linesep)
codes_file.write(os.linesep)
codes_file.write('[/spoiler]')
def main():
for root_folder in _root_folders_set:
result = upload_from_folder(root_folder)
print_result_to_file(result, os.path.join(root_folder, 'result_codes.txt'))
if __name__ == '__main__':
started = datetime.datetime.now()
print(started, 'started')
main()
finished = datetime.datetime.now()
print(finished, 'all done in', finished - started)
| [
"os.path.join",
"fastpic_upload.upload_file_to_fastpic",
"datetime.datetime.now",
"joblib.Parallel",
"tempfile.mkdtemp",
"shutil.rmtree",
"joblib.delayed",
"os.walk"
] | [((326, 367), 'fastpic_upload.upload_file_to_fastpic', 'upload_file_to_fastpic', (['pic_path', 'tmp_dir'], {}), '(pic_path, tmp_dir)\n', (348, 367), False, 'from fastpic_upload import upload_file_to_fastpic\n'), ((523, 543), 'os.walk', 'os.walk', (['folder_path'], {}), '(folder_path)\n', (530, 543), False, 'import os\n'), ((884, 902), 'tempfile.mkdtemp', 'tempfile.mkdtemp', ([], {}), '()\n', (900, 902), False, 'import tempfile\n'), ((2311, 2334), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (2332, 2334), False, 'import datetime\n'), ((2391, 2414), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (2412, 2414), False, 'import datetime\n'), ((1210, 1232), 'shutil.rmtree', 'shutil.rmtree', (['tmp_dir'], {}), '(tmp_dir)\n', (1223, 1232), False, 'import shutil\n'), ((694, 718), 'os.path.join', 'os.path.join', (['root', 'file'], {}), '(root, file)\n', (706, 718), False, 'import os\n'), ((934, 990), 'joblib.Parallel', 'Parallel', ([], {'n_jobs': '_n_jobs_for_upload', 'backend': '"""threading"""'}), "(n_jobs=_n_jobs_for_upload, backend='threading')\n", (942, 990), False, 'from joblib import Parallel, delayed\n'), ((2221, 2266), 'os.path.join', 'os.path.join', (['root_folder', '"""result_codes.txt"""'], {}), "(root_folder, 'result_codes.txt')\n", (2233, 2266), False, 'import os\n'), ((1004, 1028), 'joblib.delayed', 'delayed', (['process_one_pic'], {}), '(process_one_pic)\n', (1011, 1028), False, 'from joblib import Parallel, delayed\n')] |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
'''
Created on Dec 8, 2019
.. codeauthor: <NAME>
<<EMAIL>>
Index docs into ES
https://qbox.io/blog/building-an-elasticsearch-index-with-python
'''
from settings import *
import glob
import re
# n first characters for the doc preview
LIMIT_START = 100
txts_path = '%s/artdatis/tagging/OCRed/typed/' % DATA_PATH
text_corpus = []
def corpus_iterator():
# filter out and collect text files
for file_path in glob.glob(txts_path+'*_text.txt'):
with open(file_path, encoding="utf-8") as file:
text = file.read()
# filter duplicates
if text not in text_corpus:
text_corpus.append(text)
text = re.sub(' +', ' ', text)
start_text = text.lstrip()[:LIMIT_START]
with open(file_path.split('_text.txt')[0]+'_path.txt') as path_file:
path = path_file.read().strip().replace(DATA_PATH, '/images')
yield {
"_index": INDEX_NAME,
"_type": TYPE_NAME,
"_source": {"file_path": path, "text": text, "start_text": start_text},
}
print("Loaded %d documents"%len(text_corpus))
from elasticsearch import Elasticsearch
from elasticsearch.helpers import bulk
# create ES client, create index
es = Elasticsearch(hosts = [ES_HOST])
if es.indices.exists(INDEX_NAME):
print("deleting '%s' index..." % (INDEX_NAME))
res = es.indices.delete(index = INDEX_NAME)
print(" response: '%s'" % (res))
request_body = {
"settings" : {
"number_of_shards": 1,
"number_of_replicas": 0
}
}
print("creating '%s' index..." % (INDEX_NAME))
res = es.indices.create(index = INDEX_NAME, body = request_body)
print(" response: '%s'" % (res))
# bulk index the data
print("bulk indexing...")
bulk(es, corpus_iterator())
# sanity check
res = es.search(index = INDEX_NAME, size=2, body={"query": {"match_all": {}}})
print("results:")
for hit in res['hits']['hits']:
print(hit["_source"])
| [
"re.sub",
"elasticsearch.Elasticsearch",
"glob.glob"
] | [((1394, 1424), 'elasticsearch.Elasticsearch', 'Elasticsearch', ([], {'hosts': '[ES_HOST]'}), '(hosts=[ES_HOST])\n', (1407, 1424), False, 'from elasticsearch import Elasticsearch\n'), ((466, 501), 'glob.glob', 'glob.glob', (["(txts_path + '*_text.txt')"], {}), "(txts_path + '*_text.txt')\n", (475, 501), False, 'import glob\n'), ((724, 747), 're.sub', 're.sub', (['""" +"""', '""" """', 'text'], {}), "(' +', ' ', text)\n", (730, 747), False, 'import re\n')] |
from django.db import models
from django.db.models import Q
from django.contrib.auth.models import User
from django.urls import reverse
class ProjectQuerySet(models.QuerySet):
def projects_per_user(self, user):
return self.filter(
Q(project_owner=user.username)
)
class Projects(models.Model):
project_name = models.CharField(max_length=60)
project_owner = models.CharField(default=User, max_length=60)
project_created = models.DateTimeField(auto_now_add=True)
project_description = models.CharField(max_length=255)
project_level = models.IntegerField(default=0)
objects = ProjectQuerySet.as_manager()
def __str__(self):
return str(self.pk)
| [
"django.db.models.DateTimeField",
"django.db.models.Q",
"django.db.models.CharField",
"django.db.models.IntegerField"
] | [((349, 380), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(60)'}), '(max_length=60)\n', (365, 380), False, 'from django.db import models\n'), ((401, 446), 'django.db.models.CharField', 'models.CharField', ([], {'default': 'User', 'max_length': '(60)'}), '(default=User, max_length=60)\n', (417, 446), False, 'from django.db import models\n'), ((469, 508), 'django.db.models.DateTimeField', 'models.DateTimeField', ([], {'auto_now_add': '(True)'}), '(auto_now_add=True)\n', (489, 508), False, 'from django.db import models\n'), ((535, 567), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(255)'}), '(max_length=255)\n', (551, 567), False, 'from django.db import models\n'), ((588, 618), 'django.db.models.IntegerField', 'models.IntegerField', ([], {'default': '(0)'}), '(default=0)\n', (607, 618), False, 'from django.db import models\n'), ((257, 287), 'django.db.models.Q', 'Q', ([], {'project_owner': 'user.username'}), '(project_owner=user.username)\n', (258, 287), False, 'from django.db.models import Q\n')] |
import os
import unittest
from checkov.serverless.checks.function.aws.AdminPolicyDocument import check
from checkov.serverless.runner import Runner
from checkov.runner_filter import RunnerFilter
class TestAdminPolicyDocument(unittest.TestCase):
def test_summary(self):
runner = Runner()
current_dir = os.path.dirname(os.path.realpath(__file__))
# Used in
os.environ["sneaky_var"] = "*"
test_files_dir = current_dir + "/example_AdminPolicyDocument"
report = runner.run(root_folder=test_files_dir, runner_filter=RunnerFilter(checks=[check.id]))
summary = report.get_summary()
self.assertEqual(summary['passed'], 2,
f"Passed checks: {[fc.file_path for fc in report.passed_checks]}")
self.assertEqual(summary['failed'], 6,
f"Failed checks: {[fc.file_path for fc in report.failed_checks]}")
self.assertEqual(summary['skipped'], 0,
f"Skipped checks: {[fc.file_path for fc in report.skipped_checks]}")
self.assertEqual(summary['parsing_errors'], 0)
if __name__ == '__main__':
unittest.main()
| [
"unittest.main",
"os.path.realpath",
"checkov.serverless.runner.Runner",
"checkov.runner_filter.RunnerFilter"
] | [((1148, 1163), 'unittest.main', 'unittest.main', ([], {}), '()\n', (1161, 1163), False, 'import unittest\n'), ((293, 301), 'checkov.serverless.runner.Runner', 'Runner', ([], {}), '()\n', (299, 301), False, 'from checkov.serverless.runner import Runner\n'), ((340, 366), 'os.path.realpath', 'os.path.realpath', (['__file__'], {}), '(__file__)\n', (356, 366), False, 'import os\n'), ((567, 598), 'checkov.runner_filter.RunnerFilter', 'RunnerFilter', ([], {'checks': '[check.id]'}), '(checks=[check.id])\n', (579, 598), False, 'from checkov.runner_filter import RunnerFilter\n')] |
#!/usr/bin/python3
# encoding: utf-8
import os
import sys
import getopt
import logging
import shutil
import psutil
from modules.com_run import ComGenerator
from modules.web_server import ListenServer
from modules.Wlisten_server import WListenServer
from modules.payload_builder_factory import PayloadBuilderFactory
from common import utils, mp_session, help
from common.utils import MSTypes
from common.definitions import VERSION, LOGLEVEL
if sys.platform == "win32":
try:
import win32com.client #@UnresolvedImport @UnusedImport
except:
print("Error: Could not find win32com.")
sys.exit(1)
MP_TYPE="Pro"
if utils.checkModuleExist("pro_core"):
from pro_modules.utilities.dcom_run import DcomGenerator
from pro_modules.payload_builders.containers import ContainerGenerator
from pro_core.payload_builder_factory_pro import PayloadBuilderFactoryPro
from pro_core import arg_mgt_pro, mp_session_pro
else:
MP_TYPE="Community"
from colorama import init
from termcolor import colored
# {PyArmor Protection Code}
# {PyArmor Plugins}
# use Colorama to make Termcolor work on Windows too
init()
WORKING_DIR = "temp"
BANNER = help.getToolPres()
def main(argv):
global MP_TYPE
logLevel = LOGLEVEL
# initialize macro_pack session object
working_directory = os.path.join(os.getcwd(), WORKING_DIR)
if MP_TYPE == "Pro":
mpSession = mp_session_pro.MpSessionPro(working_directory, VERSION, MP_TYPE)
else:
mpSession = mp_session.MpSession(working_directory, VERSION, MP_TYPE)
try:
longOptions = ["embed=", "listen=", "port=", "webdav-listen=", "generate=", "quiet", "input-file=", "encode",
"obfuscate", "obfuscate-form", "obfuscate-names", "obfuscate-declares", "obfuscate-strings",
"obfuscate-names-charset=", "obfuscate-names-minlen=", "obfuscate-names-maxlen=",
"file=","template=","listtemplates","listformats","icon=", "start-function=","uac-bypass",
"unicode-rtlo=", "dde", "print", "force-yes", "help"]
shortOptions= "e:l:w:s:f:t:G:hqmop"
# only for Pro release
if MP_TYPE == "Pro":
longOptions.extend(arg_mgt_pro.proArgsLongOptions)
shortOptions += arg_mgt_pro.proArgsShortOptions
# Only enabled on windows
if sys.platform == "win32":
longOptions.extend(["run=", "run-visible"])
opts, args = getopt.getopt(argv, shortOptions, longOptions) # @UnusedVariable
except getopt.GetoptError:
help.printUsage(BANNER, sys.argv[0])
sys.exit(2)
for opt, arg in opts:
if opt in ("-o", "--obfuscate"):
mpSession.obfuscateForm = True
mpSession.obfuscateNames = True
mpSession.obfuscateStrings = True
mpSession.obfuscateDeclares = True
elif opt=="--obfuscate-form":
mpSession.obfuscateForm = True
elif opt=="--obfuscate-declares":
mpSession.obfuscateDeclares = True
elif opt=="--obfuscate-names":
mpSession.obfuscateNames = True
elif opt=="--obfuscate-names-charset":
try:
mpSession.obfuscatedNamesCharset = arg
except ValueError:
help.printUsage(BANNER, sys.argv[0])
sys.exit(0)
elif opt=="--obfuscate-names-minlen":
try:
mpSession.obfuscatedNamesMinLen = int(arg)
except ValueError:
help.printUsage(BANNER, sys.argv[0])
sys.exit(0)
if mpSession.obfuscatedNamesMinLen < 4 or mpSession.obfuscatedNamesMinLen > 255:
help.printUsage(BANNER, sys.argv[0])
sys.exit(0)
elif opt=="--obfuscate-names-maxlen":
try:
mpSession.obfuscatedNamesMaxLen = int(arg)
except ValueError:
help.printUsage(BANNER, sys.argv[0])
sys.exit(0)
if mpSession.obfuscatedNamesMaxLen < 4 or mpSession.obfuscatedNamesMaxLen > 255:
help.printUsage(BANNER, sys.argv[0])
sys.exit(0)
elif opt=="--obfuscate-strings":
mpSession.obfuscateStrings = True
elif opt=="-s" or opt=="--start-function":
mpSession.startFunction = arg
elif opt=="-l" or opt=="--listen":
mpSession.listen = True
mpSession.listenRoot = os.path.abspath(arg)
elif opt=="--port":
mpSession.listenPort = int(arg)
mpSession.WlistenPort = int(arg)
elif opt=="--icon":
mpSession.icon = arg
elif opt=="-w" or opt=="--webdav-listen":
mpSession.Wlisten = True
mpSession.WRoot = os.path.abspath(arg)
elif opt == "-f" or opt== "--input-file":
mpSession.fileInput = arg
elif opt == "-e" or opt== "--embed":
mpSession.embeddedFilePath = os.path.abspath(arg)
elif opt=="-t" or opt=="--template":
mpSession.template = arg
elif opt == "--listtemplates":
help.printTemplatesUsage(BANNER, sys.argv[0])
sys.exit(0)
elif opt=="-q" or opt=="--quiet":
logLevel = "WARN"
elif opt=="-p" or opt=="--print":
mpSession.printFile = True
elif opt == "--dde":
if sys.platform == "win32":
mpSession.ddeMode = True
elif opt == "--run":
if sys.platform == "win32":
mpSession.runTarget = os.path.abspath(arg)
elif opt == "--run-visible":
if sys.platform == "win32":
mpSession.runVisible = True
elif opt == "--force-yes":
mpSession.forceYes = True
elif opt=="--uac-bypass":
mpSession.uacBypass = True
elif opt == "--unicode-rtlo":
mpSession.unicodeRtlo = arg
elif opt in ("-G", "--generate"):
mpSession.outputFilePath = os.path.abspath(arg)
elif opt == "--listformats":
help.printAvailableFormats(BANNER)
sys.exit(0)
elif opt=="-h" or opt=="--help":
help.printUsage(BANNER, sys.argv[0])
sys.exit(0)
else:
if MP_TYPE == "Pro":
arg_mgt_pro.processProArg(opt, arg, mpSession, BANNER)
else:
help.printUsage(BANNER, sys.argv[0])
sys.exit(0)
if logLevel == "INFO":
os.system('cls' if os.name == 'nt' else 'clear')
# Logging
logging.basicConfig(level=getattr(logging, logLevel),format="%(message)s", handlers=[utils.ColorLogFiler()])
logging.info(colored(BANNER, 'green'))
logging.info(" [+] Preparations...")
# check input args
if mpSession.fileInput is None:
# Argument not supplied, try to get file content from stdin
if not os.isatty(0): # check if something is being piped
logging.info(" [-] Waiting for piped input feed...")
mpSession.stdinContent = sys.stdin.readlines()
# Close Stdin pipe, so we can call input() later without triggering EOF
#sys.stdin.close()
if sys.platform == "win32":
sys.stdin = open("conIN$")
else:
sys.stdin = sys.__stdin__
else:
if not os.path.isfile(mpSession.fileInput):
logging.error(" [!] ERROR: Could not find %s!" % mpSession.fileInput)
sys.exit(2)
else:
logging.info(" [-] Input file path: %s" % mpSession.fileInput)
if MP_TYPE == "Pro":
if mpSession.communityMode:
logging.warning(" [!] Running in community mode (pro features not applied)")
MP_TYPE="Community"
else:
arg_mgt_pro.verify(mpSession)
# Check output file format
if mpSession.outputFilePath:
if not os.path.isdir(os.path.dirname(mpSession.outputFilePath)):
logging.error(" [!] Could not find output folder %s." % os.path.dirname(mpSession.outputFilePath))
sys.exit(2)
if mpSession.outputFileType == MSTypes.UNKNOWN:
logging.error(" [!] %s is not a supported extension. Use --listformats to view supported MacroPack formats." % os.path.splitext(mpSession.outputFilePath)[1])
sys.exit(2)
else:
logging.info(" [-] Target output format: %s" % mpSession.outputFileType)
elif not mpSession.listen and not mpSession.Wlisten and mpSession.runTarget is None and (MP_TYPE != "Pro" or mpSession.dcomTarget is None):
logging.error(" [!] You need to provide an output file! (get help using %s -h)" % os.path.basename(utils.getRunningApp()))
sys.exit(2)
if not mpSession.isTrojanMode:
# verify that output file does not already exist
if os.path.isfile(mpSession.outputFilePath):
logging.error(" [!] ERROR: Output file %s already exist!" % mpSession.outputFilePath)
sys.exit(2)
#Create temporary folder
logging.info(" [-] Temporary working dir: %s" % working_directory)
if not os.path.exists(working_directory):
os.makedirs(working_directory)
try:
# Create temporary work file.
if mpSession.ddeMode or mpSession.template or (mpSession.outputFileType not in MSTypes.VB_FORMATS+[MSTypes.VBA] and not mpSession.htaMacro):
inputFile = os.path.join(working_directory, "command.cmd")
else:
inputFile = os.path.join(working_directory, utils.randomAlpha(9)) + ".vba"
if mpSession.stdinContent is not None:
import time
time.sleep(0.4) # Needed to avoid some weird race condition
logging.info(" [-] Store std input in file...")
f = open(inputFile, 'w')
f.writelines(mpSession.stdinContent)
f.close()
else:
# Create temporary work file
if mpSession.fileInput is not None:
# Check there are not binary chars in input fil
if utils.isBinaryString(open(mpSession.fileInput, 'rb').read(1024)):
logging.error(" [!] ERROR: Invalid format for %s. Input should be text format containing your VBA script." % mpSession.fileInput)
logging.info(" [+] Cleaning...")
if os.path.isdir(working_directory):
shutil.rmtree(working_directory)
sys.exit(2)
logging.info(" [-] Store input file...")
shutil.copy2(mpSession.fileInput, inputFile)
if os.path.isfile(inputFile):
logging.info(" [-] Temporary input file: %s" % inputFile)
# Edit outputfile name to spoof extension if unicodeRtlo option is enabled
if mpSession.unicodeRtlo:
# Reminder; mpSession.unicodeRtlo contains the extension we want to spoof, such as "jpg"
logging.info(" [+] Inject %s false extension with unicode RTLO" % mpSession.unicodeRtlo)
# Separate document path and extension
(fileName, fileExtension) = os.path.splitext(mpSession.outputFilePath)
logging.info(" [-] Extension %s " % fileExtension)
# Append unicode RTLO to file name
fileName += '\u202e'
# Append extension to spoof in reverse order
fileName += '\u200b' + mpSession.unicodeRtlo[::-1] # Prepend invisible space so filename does not end with flagged extension
# Append file extension
fileName += fileExtension
mpSession.outputFilePath = fileName
logging.info(" [-] File name modified to: %s" % mpSession.outputFilePath)
# Retrieve the right payload builder
if mpSession.outputFileType != MSTypes.UNKNOWN:
if MP_TYPE == "Pro" and not mpSession.communityMode:
payloadBuilder = PayloadBuilderFactoryPro().getPayloadBuilder(mpSession)
else:
payloadBuilder = PayloadBuilderFactory().getPayloadBuilder(mpSession)
# Build payload
if payloadBuilder is not None:
payloadBuilder.run()
if MP_TYPE == "Pro":
generator = ContainerGenerator(mpSession)
generator.run()
#run com attack
if mpSession.runTarget:
generator = ComGenerator(mpSession)
generator.run()
if MP_TYPE == "Pro":
#run dcom attack
if mpSession.dcom:
generator = DcomGenerator(mpSession)
generator.run()
# Activate Web server
if mpSession.listen:
listener = ListenServer(mpSession)
listener.run()
# Activate WebDav server
if mpSession.Wlisten:
Wlistener = WListenServer(mpSession)
Wlistener.run()
except Exception:
logging.exception(" [!] Exception caught!")
except KeyboardInterrupt:
logging.error(" [!] Keyboard interrupt caught!")
logging.info(" [+] Cleaning...")
if os.path.isdir(working_directory):
shutil.rmtree(working_directory)
logging.info(" Done!\n")
sys.exit(0)
if __name__ == '__main__':
# check if running from explorer, if yes restart from cmd line
# running_from = psutil.Process(os.getpid()).parent().parent().name()
# if running_from == 'explorer.exe':
# os.system("cmd.exe /k \"%s\"" % utils.getRunningApp())
# PyArmor Plugin: checkPlug()
main(sys.argv[1:])
| [
"common.utils.randomAlpha",
"pro_modules.payload_builders.containers.ContainerGenerator",
"common.utils.checkModuleExist",
"pro_core.payload_builder_factory_pro.PayloadBuilderFactoryPro",
"time.sleep",
"logging.exception",
"pro_modules.utilities.dcom_run.DcomGenerator",
"modules.com_run.ComGenerator",
"common.help.printTemplatesUsage",
"modules.payload_builder_factory.PayloadBuilderFactory",
"pro_core.arg_mgt_pro.processProArg",
"modules.web_server.ListenServer",
"sys.exit",
"logging.info",
"logging.error",
"colorama.init",
"os.path.exists",
"common.help.getToolPres",
"os.isatty",
"shutil.copy2",
"common.mp_session.MpSession",
"pro_core.arg_mgt_pro.verify",
"os.path.isdir",
"getopt.getopt",
"common.help.printAvailableFormats",
"logging.warning",
"common.help.printUsage",
"os.path.splitext",
"common.utils.ColorLogFiler",
"os.path.isfile",
"os.path.dirname",
"pro_core.mp_session_pro.MpSessionPro",
"modules.Wlisten_server.WListenServer",
"termcolor.colored",
"os.makedirs",
"os.path.join",
"sys.stdin.readlines",
"os.getcwd",
"shutil.rmtree",
"os.path.abspath",
"os.system",
"common.utils.getRunningApp"
] | [((640, 674), 'common.utils.checkModuleExist', 'utils.checkModuleExist', (['"""pro_core"""'], {}), "('pro_core')\n", (662, 674), False, 'from common import utils, mp_session, help\n'), ((1132, 1138), 'colorama.init', 'init', ([], {}), '()\n', (1136, 1138), False, 'from colorama import init\n'), ((1173, 1191), 'common.help.getToolPres', 'help.getToolPres', ([], {}), '()\n', (1189, 1191), False, 'from common import utils, mp_session, help\n'), ((6758, 6794), 'logging.info', 'logging.info', (['""" [+] Preparations..."""'], {}), "(' [+] Preparations...')\n", (6770, 6794), False, 'import logging\n'), ((9157, 9225), 'logging.info', 'logging.info', (["(' [-] Temporary working dir: %s' % working_directory)"], {}), "(' [-] Temporary working dir: %s' % working_directory)\n", (9169, 9225), False, 'import logging\n'), ((13256, 13288), 'logging.info', 'logging.info', (['""" [+] Cleaning..."""'], {}), "(' [+] Cleaning...')\n", (13268, 13288), False, 'import logging\n'), ((13296, 13328), 'os.path.isdir', 'os.path.isdir', (['working_directory'], {}), '(working_directory)\n', (13309, 13328), False, 'import os\n'), ((13376, 13400), 'logging.info', 'logging.info', (['""" Done!\n"""'], {}), "(' Done!\\n')\n", (13388, 13400), False, 'import logging\n'), ((13407, 13418), 'sys.exit', 'sys.exit', (['(0)'], {}), '(0)\n', (13415, 13418), False, 'import sys\n'), ((1333, 1344), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (1342, 1344), False, 'import os\n'), ((1404, 1468), 'pro_core.mp_session_pro.MpSessionPro', 'mp_session_pro.MpSessionPro', (['working_directory', 'VERSION', 'MP_TYPE'], {}), '(working_directory, VERSION, MP_TYPE)\n', (1431, 1468), False, 'from pro_core import arg_mgt_pro, mp_session_pro\n'), ((1499, 1556), 'common.mp_session.MpSession', 'mp_session.MpSession', (['working_directory', 'VERSION', 'MP_TYPE'], {}), '(working_directory, VERSION, MP_TYPE)\n', (1519, 1556), False, 'from common import utils, mp_session, help\n'), ((2472, 2518), 'getopt.getopt', 'getopt.getopt', (['argv', 'shortOptions', 'longOptions'], {}), '(argv, shortOptions, longOptions)\n', (2485, 2518), False, 'import getopt\n'), ((6531, 6579), 'os.system', 'os.system', (["('cls' if os.name == 'nt' else 'clear')"], {}), "('cls' if os.name == 'nt' else 'clear')\n", (6540, 6579), False, 'import os\n'), ((6727, 6751), 'termcolor.colored', 'colored', (['BANNER', '"""green"""'], {}), "(BANNER, 'green')\n", (6734, 6751), False, 'from termcolor import colored\n'), ((8957, 8997), 'os.path.isfile', 'os.path.isfile', (['mpSession.outputFilePath'], {}), '(mpSession.outputFilePath)\n', (8971, 8997), False, 'import os\n'), ((9237, 9270), 'os.path.exists', 'os.path.exists', (['working_directory'], {}), '(working_directory)\n', (9251, 9270), False, 'import os\n'), ((9280, 9310), 'os.makedirs', 'os.makedirs', (['working_directory'], {}), '(working_directory)\n', (9291, 9310), False, 'import os\n'), ((10737, 10762), 'os.path.isfile', 'os.path.isfile', (['inputFile'], {}), '(inputFile)\n', (10751, 10762), False, 'import os\n'), ((13338, 13370), 'shutil.rmtree', 'shutil.rmtree', (['working_directory'], {}), '(working_directory)\n', (13351, 13370), False, 'import shutil\n'), ((611, 622), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (619, 622), False, 'import sys\n'), ((2576, 2612), 'common.help.printUsage', 'help.printUsage', (['BANNER', 'sys.argv[0]'], {}), '(BANNER, sys.argv[0])\n', (2591, 2612), False, 'from common import utils, mp_session, help\n'), ((2621, 2632), 'sys.exit', 'sys.exit', (['(2)'], {}), '(2)\n', (2629, 2632), False, 'import sys\n'), ((6938, 6950), 'os.isatty', 'os.isatty', (['(0)'], {}), '(0)\n', (6947, 6950), False, 'import os\n'), ((7000, 7054), 'logging.info', 'logging.info', (['""" [-] Waiting for piped input feed..."""'], {}), "(' [-] Waiting for piped input feed...')\n", (7012, 7054), False, 'import logging\n'), ((7092, 7113), 'sys.stdin.readlines', 'sys.stdin.readlines', ([], {}), '()\n', (7111, 7113), False, 'import sys\n'), ((7423, 7458), 'os.path.isfile', 'os.path.isfile', (['mpSession.fileInput'], {}), '(mpSession.fileInput)\n', (7437, 7458), False, 'import os\n'), ((7472, 7543), 'logging.error', 'logging.error', (["(' [!] ERROR: Could not find %s!' % mpSession.fileInput)"], {}), "(' [!] ERROR: Could not find %s!' % mpSession.fileInput)\n", (7485, 7543), False, 'import logging\n'), ((7556, 7567), 'sys.exit', 'sys.exit', (['(2)'], {}), '(2)\n', (7564, 7567), False, 'import sys\n'), ((7594, 7658), 'logging.info', 'logging.info', (["(' [-] Input file path: %s' % mpSession.fileInput)"], {}), "(' [-] Input file path: %s' % mpSession.fileInput)\n", (7606, 7658), False, 'import logging\n'), ((7733, 7811), 'logging.warning', 'logging.warning', (['""" [!] Running in community mode (pro features not applied)"""'], {}), "(' [!] Running in community mode (pro features not applied)')\n", (7748, 7811), False, 'import logging\n'), ((7870, 7899), 'pro_core.arg_mgt_pro.verify', 'arg_mgt_pro.verify', (['mpSession'], {}), '(mpSession)\n', (7888, 7899), False, 'from pro_core import arg_mgt_pro, mp_session_pro\n'), ((8180, 8191), 'sys.exit', 'sys.exit', (['(2)'], {}), '(2)\n', (8188, 8191), False, 'import sys\n'), ((8441, 8452), 'sys.exit', 'sys.exit', (['(2)'], {}), '(2)\n', (8449, 8452), False, 'import sys\n'), ((8479, 8553), 'logging.info', 'logging.info', (["(' [-] Target output format: %s' % mpSession.outputFileType)"], {}), "(' [-] Target output format: %s' % mpSession.outputFileType)\n", (8491, 8553), False, 'import logging\n'), ((8840, 8851), 'sys.exit', 'sys.exit', (['(2)'], {}), '(2)\n', (8848, 8851), False, 'import sys\n'), ((9011, 9103), 'logging.error', 'logging.error', (["(' [!] ERROR: Output file %s already exist!' % mpSession.outputFilePath)"], {}), "(' [!] ERROR: Output file %s already exist!' % mpSession.\n outputFilePath)\n", (9024, 9103), False, 'import logging\n'), ((9111, 9122), 'sys.exit', 'sys.exit', (['(2)'], {}), '(2)\n', (9119, 9122), False, 'import sys\n'), ((9532, 9578), 'os.path.join', 'os.path.join', (['working_directory', '"""command.cmd"""'], {}), "(working_directory, 'command.cmd')\n", (9544, 9578), False, 'import os\n'), ((9763, 9778), 'time.sleep', 'time.sleep', (['(0.4)'], {}), '(0.4)\n', (9773, 9778), False, 'import time\n'), ((9835, 9884), 'logging.info', 'logging.info', (['""" [-] Store std input in file..."""'], {}), "(' [-] Store std input in file...')\n", (9847, 9884), False, 'import logging\n'), ((10777, 10836), 'logging.info', 'logging.info', (["(' [-] Temporary input file: %s' % inputFile)"], {}), "(' [-] Temporary input file: %s' % inputFile)\n", (10789, 10836), False, 'import logging\n'), ((11094, 11187), 'logging.info', 'logging.info', (["(' [+] Inject %s false extension with unicode RTLO' % mpSession.unicodeRtlo)"], {}), "(' [+] Inject %s false extension with unicode RTLO' % mpSession\n .unicodeRtlo)\n", (11106, 11187), False, 'import logging\n'), ((11274, 11316), 'os.path.splitext', 'os.path.splitext', (['mpSession.outputFilePath'], {}), '(mpSession.outputFilePath)\n', (11290, 11316), False, 'import os\n'), ((11342, 11394), 'logging.info', 'logging.info', (["(' [-] Extension %s ' % fileExtension)"], {}), "(' [-] Extension %s ' % fileExtension)\n", (11354, 11394), False, 'import logging\n'), ((11808, 11883), 'logging.info', 'logging.info', (["(' [-] File name modified to: %s' % mpSession.outputFilePath)"], {}), "(' [-] File name modified to: %s' % mpSession.outputFilePath)\n", (11820, 11883), False, 'import logging\n'), ((12586, 12609), 'modules.com_run.ComGenerator', 'ComGenerator', (['mpSession'], {}), '(mpSession)\n', (12598, 12609), False, 'from modules.com_run import ComGenerator\n'), ((12896, 12919), 'modules.web_server.ListenServer', 'ListenServer', (['mpSession'], {}), '(mpSession)\n', (12908, 12919), False, 'from modules.web_server import ListenServer\n'), ((13035, 13059), 'modules.Wlisten_server.WListenServer', 'WListenServer', (['mpSession'], {}), '(mpSession)\n', (13048, 13059), False, 'from modules.Wlisten_server import WListenServer\n'), ((13119, 13162), 'logging.exception', 'logging.exception', (['""" [!] Exception caught!"""'], {}), "(' [!] Exception caught!')\n", (13136, 13162), False, 'import logging\n'), ((13201, 13249), 'logging.error', 'logging.error', (['""" [!] Keyboard interrupt caught!"""'], {}), "(' [!] Keyboard interrupt caught!')\n", (13214, 13249), False, 'import logging\n'), ((6684, 6705), 'common.utils.ColorLogFiler', 'utils.ColorLogFiler', ([], {}), '()\n', (6703, 6705), False, 'from common import utils, mp_session, help\n'), ((8011, 8052), 'os.path.dirname', 'os.path.dirname', (['mpSession.outputFilePath'], {}), '(mpSession.outputFilePath)\n', (8026, 8052), False, 'import os\n'), ((10613, 10655), 'logging.info', 'logging.info', (['""" [-] Store input file..."""'], {}), "(' [-] Store input file...')\n", (10625, 10655), False, 'import logging\n'), ((10672, 10716), 'shutil.copy2', 'shutil.copy2', (['mpSession.fileInput', 'inputFile'], {}), '(mpSession.fileInput, inputFile)\n', (10684, 10716), False, 'import shutil\n'), ((12756, 12780), 'pro_modules.utilities.dcom_run.DcomGenerator', 'DcomGenerator', (['mpSession'], {}), '(mpSession)\n', (12769, 12780), False, 'from pro_modules.utilities.dcom_run import DcomGenerator\n'), ((8125, 8166), 'os.path.dirname', 'os.path.dirname', (['mpSession.outputFilePath'], {}), '(mpSession.outputFilePath)\n', (8140, 8166), False, 'import os\n'), ((9649, 9669), 'common.utils.randomAlpha', 'utils.randomAlpha', (['(9)'], {}), '(9)\n', (9666, 9669), False, 'from common import utils, mp_session, help\n'), ((10266, 10407), 'logging.error', 'logging.error', (["(' [!] ERROR: Invalid format for %s. Input should be text format containing your VBA script.'\n % mpSession.fileInput)"], {}), "(\n ' [!] ERROR: Invalid format for %s. Input should be text format containing your VBA script.'\n % mpSession.fileInput)\n", (10279, 10407), False, 'import logging\n'), ((10418, 10450), 'logging.info', 'logging.info', (['""" [+] Cleaning..."""'], {}), "(' [+] Cleaning...')\n", (10430, 10450), False, 'import logging\n'), ((10474, 10506), 'os.path.isdir', 'os.path.isdir', (['working_directory'], {}), '(working_directory)\n', (10487, 10506), False, 'import os\n'), ((10585, 10596), 'sys.exit', 'sys.exit', (['(2)'], {}), '(2)\n', (10593, 10596), False, 'import sys\n'), ((12439, 12468), 'pro_modules.payload_builders.containers.ContainerGenerator', 'ContainerGenerator', (['mpSession'], {}), '(mpSession)\n', (12457, 12468), False, 'from pro_modules.payload_builders.containers import ContainerGenerator\n'), ((8382, 8424), 'os.path.splitext', 'os.path.splitext', (['mpSession.outputFilePath'], {}), '(mpSession.outputFilePath)\n', (8398, 8424), False, 'import os\n'), ((8808, 8829), 'common.utils.getRunningApp', 'utils.getRunningApp', ([], {}), '()\n', (8827, 8829), False, 'from common import utils, mp_session, help\n'), ((10532, 10564), 'shutil.rmtree', 'shutil.rmtree', (['working_directory'], {}), '(working_directory)\n', (10545, 10564), False, 'import shutil\n'), ((12102, 12128), 'pro_core.payload_builder_factory_pro.PayloadBuilderFactoryPro', 'PayloadBuilderFactoryPro', ([], {}), '()\n', (12126, 12128), False, 'from pro_core.payload_builder_factory_pro import PayloadBuilderFactoryPro\n'), ((12209, 12232), 'modules.payload_builder_factory.PayloadBuilderFactory', 'PayloadBuilderFactory', ([], {}), '()\n', (12230, 12232), False, 'from modules.payload_builder_factory import PayloadBuilderFactory\n'), ((3305, 3341), 'common.help.printUsage', 'help.printUsage', (['BANNER', 'sys.argv[0]'], {}), '(BANNER, sys.argv[0])\n', (3320, 3341), False, 'from common import utils, mp_session, help\n'), ((3358, 3369), 'sys.exit', 'sys.exit', (['(0)'], {}), '(0)\n', (3366, 3369), False, 'import sys\n'), ((3713, 3749), 'common.help.printUsage', 'help.printUsage', (['BANNER', 'sys.argv[0]'], {}), '(BANNER, sys.argv[0])\n', (3728, 3749), False, 'from common import utils, mp_session, help\n'), ((3766, 3777), 'sys.exit', 'sys.exit', (['(0)'], {}), '(0)\n', (3774, 3777), False, 'import sys\n'), ((3539, 3575), 'common.help.printUsage', 'help.printUsage', (['BANNER', 'sys.argv[0]'], {}), '(BANNER, sys.argv[0])\n', (3554, 3575), False, 'from common import utils, mp_session, help\n'), ((3592, 3603), 'sys.exit', 'sys.exit', (['(0)'], {}), '(0)\n', (3600, 3603), False, 'import sys\n'), ((4121, 4157), 'common.help.printUsage', 'help.printUsage', (['BANNER', 'sys.argv[0]'], {}), '(BANNER, sys.argv[0])\n', (4136, 4157), False, 'from common import utils, mp_session, help\n'), ((4174, 4185), 'sys.exit', 'sys.exit', (['(0)'], {}), '(0)\n', (4182, 4185), False, 'import sys\n'), ((3947, 3983), 'common.help.printUsage', 'help.printUsage', (['BANNER', 'sys.argv[0]'], {}), '(BANNER, sys.argv[0])\n', (3962, 3983), False, 'from common import utils, mp_session, help\n'), ((4000, 4011), 'sys.exit', 'sys.exit', (['(0)'], {}), '(0)\n', (4008, 4011), False, 'import sys\n'), ((4482, 4502), 'os.path.abspath', 'os.path.abspath', (['arg'], {}), '(arg)\n', (4497, 4502), False, 'import os\n'), ((4799, 4819), 'os.path.abspath', 'os.path.abspath', (['arg'], {}), '(arg)\n', (4814, 4819), False, 'import os\n'), ((4994, 5014), 'os.path.abspath', 'os.path.abspath', (['arg'], {}), '(arg)\n', (5009, 5014), False, 'import os\n'), ((5148, 5193), 'common.help.printTemplatesUsage', 'help.printTemplatesUsage', (['BANNER', 'sys.argv[0]'], {}), '(BANNER, sys.argv[0])\n', (5172, 5193), False, 'from common import utils, mp_session, help\n'), ((5206, 5217), 'sys.exit', 'sys.exit', (['(0)'], {}), '(0)\n', (5214, 5217), False, 'import sys\n'), ((5588, 5608), 'os.path.abspath', 'os.path.abspath', (['arg'], {}), '(arg)\n', (5603, 5608), False, 'import os\n'), ((6035, 6055), 'os.path.abspath', 'os.path.abspath', (['arg'], {}), '(arg)\n', (6050, 6055), False, 'import os\n'), ((6105, 6139), 'common.help.printAvailableFormats', 'help.printAvailableFormats', (['BANNER'], {}), '(BANNER)\n', (6131, 6139), False, 'from common import utils, mp_session, help\n'), ((6152, 6163), 'sys.exit', 'sys.exit', (['(0)'], {}), '(0)\n', (6160, 6163), False, 'import sys\n'), ((6217, 6253), 'common.help.printUsage', 'help.printUsage', (['BANNER', 'sys.argv[0]'], {}), '(BANNER, sys.argv[0])\n', (6232, 6253), False, 'from common import utils, mp_session, help\n'), ((6266, 6277), 'sys.exit', 'sys.exit', (['(0)'], {}), '(0)\n', (6274, 6277), False, 'import sys\n'), ((6341, 6395), 'pro_core.arg_mgt_pro.processProArg', 'arg_mgt_pro.processProArg', (['opt', 'arg', 'mpSession', 'BANNER'], {}), '(opt, arg, mpSession, BANNER)\n', (6366, 6395), False, 'from pro_core import arg_mgt_pro, mp_session_pro\n'), ((6430, 6466), 'common.help.printUsage', 'help.printUsage', (['BANNER', 'sys.argv[0]'], {}), '(BANNER, sys.argv[0])\n', (6445, 6466), False, 'from common import utils, mp_session, help\n'), ((6483, 6494), 'sys.exit', 'sys.exit', (['(0)'], {}), '(0)\n', (6491, 6494), False, 'import sys\n')] |
"""
This module defines the ``geoplot`` coordinate reference system classes, wrappers on
``cartopy.crs`` objects meant to be used as parameters to the ``projection`` parameter of all
front-end ``geoplot`` outputs. For the list of Cartopy CRS objects this module derives from,
refer to http://scitools.org.uk/cartopy/docs/latest/crs/projections.html.
"""
import cartopy.crs as ccrs
import geopandas as gpd
class Base:
# TODO: RotatedPole
"""
Generate instances of ``cartopy.crs``.*name* where *name* matches the instance's class name.
Parameters
----------
`load` : Return a Cartopy CRS initialized with defaults from the `centerings` dictionary,
overridden by initialization parameters.
`_as_mpl_axes` : Return the result of calling cartopy's ``_as_mpl_axes`` for `self.load`
called with empty `df` and `centerings`.
"""
def __init__(self, **kwargs):
"""Save parameters that initialize Cartopy CRSs."""
self.args = kwargs
def load(self, df, centerings):
"""
A meta-method which abstracts the internals of individual projections' load procedures.
Parameters
----------
df : GeoDataFrame
The GeoDataFrame which has been passed as input to the plotter at the top level.
This data is needed to calculate reasonable centering variables in cases in which the
user does not already provide them; which is, incidentally, the reason behind all of
this funny twice-instantiation loading in the first place.
centerings: dict
A dictionary containing names and centering methods. Certain projections have certain
centering parameters whilst others lack them. For example, the geospatial projection
contains both ``central_longitude`` and ``central_latitude`` instance parameter, which
together control the center of the plot, while the North Pole Stereo projection has
only a ``central_longitude`` instance parameter, implying that latitude is fixed (as
indeed it is, as this projection is centered on the North Pole!).
A top-level centerings method is provided in each of the ``geoplot`` top-level plot
functions; each of the projection wrapper classes defined here in turn selects the
functions from this list relevent to this particular instance and passes them to
the ``_generic_load`` method here.
We then in turn execute these functions to get defaults for our ``df`` and pass them
off to our output ``cartopy.crs`` instance.
Returns
-------
crs : ``cartopy.crs`` object instance
Returns a ``cartopy.crs`` object instance whose appropriate instance variables have
been set to reasonable defaults wherever not already provided by the user.
"""
return getattr(ccrs, self.__class__.__name__)(**{**centerings, **self.args})
def _as_mpl_axes(self):
"""
When ``matplotlib`` is provided a projection via a ``projection`` keyword argument, it
expects to get something with a callable ``as_mpl_axes`` method. The precise details of
what this method does, exactly, are not important: it suffices to know that every
``cartopy`` coordinate reference system object has one.
When we pass a ``geoplot.crs`` crs object to a ``geoplot`` function, the loading and
centering of the data occurs automatically (using the function defined immediately above).
Since we control what ``geoplot`` does at execution, we gracefully integrate this two-step
procedure into the function body.
But there are also use cases outside of our control in which we are forced to pass a
``geoplot.crs`` object without having first called ``load``: most prominently, when
creating a plot containing subplots, the "overall" projection must be pre-loaded. It's
possible to get around this by using ``cartopy.crs`` objects instead, but this is
inelegant. This method is a better way: when a ``geoplot.crs`` object called by
``matplotlib``, it silently swaps itself out for a vanilla version of its ``cartopy.crs``
mirror, and calls that function's ``_as_mpl_axes`` instead.
Parameters
----------
proj : geoplot.crs projection instance
The instance in question (self, in the method body).
Returns
-------
Mutates into a ``cartopy.crs`` object and returns the result of executing ``_as_mpl_axes``
on that object instead.
"""
proj = self.load(gpd.GeoDataFrame(), dict())
return proj._as_mpl_axes()
class Filtering(Base):
"""CRS that `load`s with `centering` restricted to keys in `self.filter_`."""
def load(self, df, centerings):
"""Call `load` method with `centerings` filtered to keys in `self.filter_`."""
return super().load(
df,
{key: value
for key, value in centerings.items()
if key in self.filter_}
)
class LongitudeCentering(Filtering):
"""Form a CRS that centers by longitude."""
filter_ = {'central_longitude'}
class LatitudeCentering(Filtering):
"""For a CRS that centers by latitude."""
filter_ = {'central_latitude'}
PlateCarree,\
LambertCylindrical,\
Mercator,\
Miller,\
Mollweide,\
Robinson,\
Sinusoidal,\
InterruptedGoodeHomolosine,\
Geostationary,\
NorthPolarStereo,\
SouthPolarStereo = tuple(
type(name, (LongitudeCentering,), {})
for name in ('PlateCarree',
'LambertCylindrical',
'Mercator',
'Miller',
'Mollweide',
'Robinson',
'Sinusoidal',
'InterruptedGoodeHomolosine',
'Geostationary',
'NorthPolarStereo',
'SouthPolarStereo')
)
Gnomonic = type('Gnomonic', (LatitudeCentering,), {})
AlbersEqualArea,\
AzimuthalEquidistant,\
LambertConformal,\
Orthographic,\
Stereographic,\
TransverseMercator,\
LambertAzimuthalEqualArea,\
UTM,\
OSGB,\
EuroPP,\
OSNI = tuple(
type(name, (Base,), {})
for name in ('AlbersEqualArea',
'AzimuthalEquidistant',
'LambertConformal',
'Orthographic',
'Stereographic',
'TransverseMercator',
'LambertAzimuthalEqualArea',
'UTM',
'OSGB',
'EuroPP',
'OSNI')
)
| [
"geopandas.GeoDataFrame"
] | [((4688, 4706), 'geopandas.GeoDataFrame', 'gpd.GeoDataFrame', ([], {}), '()\n', (4704, 4706), True, 'import geopandas as gpd\n')] |
from common_fixtures import * # NOQA
import websocket as ws
import pytest
def get_logs(client):
hosts = client.list_host(kind='docker', removed_null=True)
assert len(hosts) > 0
in_log = random_str()
cmd = '/bin/bash -c "echo {}; sleep 2"'.format(in_log)
c = client.create_container(image=TEST_IMAGE_UUID, command=cmd)
c = client.wait_success(c)
logs = c.logs()
return logs, in_log, c
def test_logs_token(client):
logs, in_log, c = get_logs(client)
conn = ws.create_connection(logs.url + '?token='+logs.token)
result = conn.recv()
assert result is not None
assert in_log in result
delete_all(client, [c])
def test_logs_no_token(client):
logs, _, c = get_logs(client)
with pytest.raises(Exception) as excinfo:
ws.create_connection(logs.url)
assert 'Handshake status 401' in str(excinfo.value)
delete_all(client, [c])
def test_host_api_garbage_token(client):
logs, _, c = get_logs(client)
with pytest.raises(Exception) as excinfo:
ws.create_connection(logs.url+'?token=random.garbage.token')
assert 'Handshake status 401' in str(excinfo.value)
delete_all(client, [c])
| [
"websocket.create_connection",
"pytest.raises"
] | [((500, 555), 'websocket.create_connection', 'ws.create_connection', (["(logs.url + '?token=' + logs.token)"], {}), "(logs.url + '?token=' + logs.token)\n", (520, 555), True, 'import websocket as ws\n'), ((743, 767), 'pytest.raises', 'pytest.raises', (['Exception'], {}), '(Exception)\n', (756, 767), False, 'import pytest\n'), ((792, 822), 'websocket.create_connection', 'ws.create_connection', (['logs.url'], {}), '(logs.url)\n', (812, 822), True, 'import websocket as ws\n'), ((993, 1017), 'pytest.raises', 'pytest.raises', (['Exception'], {}), '(Exception)\n', (1006, 1017), False, 'import pytest\n'), ((1038, 1100), 'websocket.create_connection', 'ws.create_connection', (["(logs.url + '?token=random.garbage.token')"], {}), "(logs.url + '?token=random.garbage.token')\n", (1058, 1100), True, 'import websocket as ws\n')] |
'''ResNet using PSG in PyTorch.
For Pre-activation ResNet, see 'preact_resnet.py'.
Reference:
[1] <NAME>, <NAME>, <NAME>, <NAME>
Deep Residual Learning for Image Recognition. arXiv:1512.03385
'''
from numpy.lib.arraysetops import isin
import torch
import torch.nn as nn
import torch.nn.functional as F
import math
import numpy as np
from models.masked_psg_seed_conv import PredictiveSeedConv2d
from masked_layers import layers
# Fixed
NUM_BITS = 32
NUM_BITS_WEIGHT = 32
NUM_BITS_GRAD = None
BIPRECISION = False
PREDICTIVE_FORWARD = False
WRITER = None
WRITER_PREFIX_COUNTER = 0
# Tunable
PREDICTIVE_BACKWARD = True
MSB_BITS = 4
MSB_BITS_WEIGHT = 4
MSB_BITS_GRAD = 8
THRESHOLD = 0.0
SPARSIFY = False
SIGN = True
def conv1x1(in_planes, out_planes, stride=1, input_signed=True, predictive_forward=True, writer_prefix=""):
"1x1 convolution with no padding"
predictive_forward = PREDICTIVE_FORWARD and predictive_forward
return PredictiveSeedConv2d(
in_planes, out_planes, kernel_size=1, stride=stride, padding=0, bias=False,
num_bits=NUM_BITS, num_bits_weight=NUM_BITS_WEIGHT, num_bits_grad=NUM_BITS_GRAD,
biprecision=BIPRECISION, input_signed=input_signed,
predictive_forward=predictive_forward, predictive_backward=PREDICTIVE_BACKWARD,
msb_bits=MSB_BITS, msb_bits_weight=MSB_BITS_WEIGHT, msb_bits_grad=MSB_BITS_GRAD,
threshold=THRESHOLD, sparsify=SPARSIFY, sign=SIGN,
writer=WRITER, writer_prefix=writer_prefix)
def conv3x3(in_planes, out_planes, stride=1, input_signed=False, predictive_forward=True, writer_prefix=""):
"3x3 convolution with padding"
predictive_forward = PREDICTIVE_FORWARD and predictive_forward
return PredictiveSeedConv2d(
in_planes, out_planes, kernel_size=3, stride=stride, padding=1, bias=False,
num_bits=NUM_BITS, num_bits_weight=NUM_BITS_WEIGHT, num_bits_grad=NUM_BITS_GRAD,
biprecision=BIPRECISION, input_signed=input_signed,
predictive_forward=predictive_forward, predictive_backward=PREDICTIVE_BACKWARD,
msb_bits=MSB_BITS, msb_bits_weight=MSB_BITS_WEIGHT, msb_bits_grad=MSB_BITS_GRAD,
threshold=THRESHOLD, sparsify=SPARSIFY, sign=SIGN,
writer=WRITER, writer_prefix=writer_prefix)
class BasicBlock(nn.Module):
expansion = 1
def __init__(self, in_planes, planes, stride=1):
super(BasicBlock, self).__init__()
self.conv1 = conv3x3(in_planes, planes, stride=stride, input_signed=False, predictive_forward=False, writer_prefix=None)
self.bn1 = nn.BatchNorm2d(planes)
self.conv2 = conv3x3(planes, planes, stride=1, input_signed=False, predictive_forward=False, writer_prefix=None)
self.bn2 = nn.BatchNorm2d(planes)
self.shortcut = nn.Sequential()
if stride != 1 or in_planes != self.expansion*planes:
self.shortcut = nn.Sequential(
# nn.Conv2d(in_planes, self.expansion*planes, kernel_size=1, stride=stride, bias=False),
conv1x1(in_planes, self.expansion*planes, stride=stride, input_signed=False, predictive_forward=False, writer_prefix=None),
nn.BatchNorm2d(self.expansion*planes)
)
def forward(self, x):
out = F.relu(self.bn1(self.conv1(x)))
out = self.bn2(self.conv2(out))
out += self.shortcut(x)
out = F.relu(out)
return out
class Bottleneck(nn.Module):
expansion = 4
def __init__(self, in_planes, planes, stride=1):
super(Bottleneck, self).__init__()
# self.conv1 = nn.Conv2d(in_planes, planes, kernel_size=1, bias=False)
self.conv1 = conv1x1(in_planes, planes, stride=1, input_signed=False, predictive_forward=False, writer_prefix=None)
self.bn1 = nn.BatchNorm2d(planes)
# self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=stride, padding=1, bias=False)
self.conv2 = conv3x3(planes, planes, stride=stride, input_signed=False, predictive_forward=False, writer_prefix=None)
self.bn2 = nn.BatchNorm2d(planes)
# self.conv3 = nn.Conv2d(planes, self.expansion*planes, kernel_size=1, bias=False)
self.conv3 = conv1x1(planes, self.expansion*planes, stride=1, input_signed=False, predictive_forward=False, writer_prefix=None)
self.bn3 = nn.BatchNorm2d(self.expansion*planes)
self.shortcut = nn.Sequential()
if stride != 1 or in_planes != self.expansion*planes:
self.shortcut = nn.Sequential(
# nn.Conv2d(in_planes, self.expansion*planes, kernel_size=1, stride=stride, bias=False),
conv1x1(in_planes, self.expansion*planes, stride=stride, input_signed=False, predictive_forward=False, writer_prefix=None),
nn.BatchNorm2d(self.expansion*planes)
)
def forward(self, x):
out = F.relu(self.bn1(self.conv1(x)))
out = F.relu(self.bn2(self.conv2(out)))
out = self.bn3(self.conv3(out))
out += self.shortcut(x)
out = F.relu(out)
return out
class ResNet(nn.Module):
def __init__(self, block, num_blocks, in_planes=64, num_classes=10, init_method='standard'):
super(ResNet, self).__init__()
self.in_planes = in_planes
self.conv1 = conv3x3(3, self.in_planes, stride=1, input_signed=True, predictive_forward=False, writer_prefix=None)
self.bn1 = nn.BatchNorm2d(self.in_planes)
if self.in_planes == 64:
# self.conv1 = nn.Conv2d(3, 64, kernel_size=3, stride=1, padding=1, bias=False)
self.layer1 = self._make_layer(block, 64, num_blocks[0], stride=1)
self.layer2 = self._make_layer(block, 128, num_blocks[1], stride=2)
self.layer3 = self._make_layer(block, 256, num_blocks[2], stride=2)
self.layer4 = self._make_layer(block, 512, num_blocks[3], stride=2)
self.linear = nn.Linear(512*block.expansion, num_classes)
#self.linear = layers.Linear(512*block.expansion, num_classes)
elif self.in_planes == 16:
self.layer1 = self._make_layer(block, 16, num_blocks[0], stride=1)
self.layer2 = self._make_layer(block, 32, num_blocks[1], stride=2)
self.layer3 = self._make_layer(block, 64, num_blocks[2], stride=2)
self.layer4 = None
self.linear = nn.Linear(64, num_classes)
self.reset_conv_parameters(init_method)
print('conv weights reset to {}'.format(init_method))
def reset_parameters(self, module, init_method="kaiming_uniform") -> None:
if init_method == "kaiming_constant_signed":
fan = nn.init._calculate_correct_fan(module.weight, "fan_in")
gain = nn.init.calculate_gain("relu")
std = gain / math.sqrt(fan)
with torch.no_grad():
module.weight.data = module.weight.data.sign() * std
elif init_method == "kaiming_constant_unsigned":
fan = nn.init._calculate_correct_fan(module.weight, "fan_in")
gain = nn.init.calculate_gain("relu")
std = gain / math.sqrt(fan)
with torch.no_grad():
module.weight.data = torch.ones_like(module.weight.data) * std
elif init_method == "kaiming_normal":
nn.init.kaiming_normal_(module.weight, mode="fan_in", nonlinearity="relu")
elif init_method == "kaiming_uniform":
nn.init.kaiming_uniform_(module.weight, mode="fan_in", nonlinearity="relu")
elif init_method == "kaiming_laplace":
fan = nn.init._calculate_correct_fan(module.weight, "fan_in")
gain = nn.init.calculate_gain("relu")
scale = gain / math.sqrt(2.0 * fan)
with torch.no_grad():
new_weight = np.random.laplace(loc=0.0, scale=scale, size=module.weight.shape)
module.weight.data = module.weight.data.new_tensor(torch.from_numpy(new_weight).clone().detach())
elif init_method == "xavier_normal":
nn.init.xavier_normal_(module.weight)
elif init_method == "xavier_constant":
fan_in, fan_out = nn.init._calculate_fan_in_and_fan_out(module.weight)
std = math.sqrt(2.0 / float(fan_in + fan_out))
with torch.no_grad():
module.weight.data = module.weight.data.sign() * std
elif init_method == "standard":
nn.init.kaiming_uniform_(module.weight, a=math.sqrt(5))
else:
raise ValueError(f"{init_method} is not an initialization option!")
def reset_conv_parameters(self, init_method="standard") -> None:
for m in self.modules():
if isinstance(m, nn.Conv2d):
self.reset_parameters(m, init_method)
def get_bop_params(self):
bop_params = []
for m in self.modules():
if isinstance(m, nn.Conv2d):
bop_params += list(m.parameters())
return bop_params
def get_non_bop_params(self):
non_bop_params = []
for m in self.modules():
if isinstance(m, (nn.Linear, nn.BatchNorm2d,)):
non_bop_params += list(m.parameters())
return non_bop_params
def _make_layer(self, block, planes, num_blocks, stride):
strides = [stride] + [1]*(num_blocks-1)
layers = []
for stride in strides:
layers.append(block(self.in_planes, planes, stride))
self.in_planes = planes * block.expansion
return nn.Sequential(*layers)
def forward(self, x):
out = F.relu(self.bn1(self.conv1(x)))
out = self.layer1(out)
out = self.layer2(out)
out = self.layer3(out)
if self.layer4 is not None:
out = self.layer4(out)
# out = F.avg_pool2d(out, 4)
out = F.avg_pool2d(out, out.size()[3])
out = out.view(out.size(0), -1)
out = self.linear(out)
return out
def PsgSeedResNet20(
num_classes=10,
init_method='standard',
predictive_backward=True,
msb_bits=4,
msb_bits_weight=4,
msb_bits_grad=8,
threshold=0.0,
sparsify=False,
sign=True
):
global PREDICTIVE_BACKWARD, MSB_BITS, MSB_BITS_WEIGHT, MSB_BITS_GRAD, THRESHOLD, SPARSIFY, SIGN
PREDICTIVE_BACKWARD = predictive_backward
MSB_BITS = msb_bits
MSB_BITS_WEIGHT = msb_bits_weight
MSB_BITS_GRAD = msb_bits_grad
THRESHOLD = threshold
SPARSIFY = sparsify
SIGN = sign
return ResNet(BasicBlock, [3,3,3], in_planes=16, num_classes=num_classes, init_method=init_method)
def PsgSeedResNet18(
num_classes=10,
init_method='standard',
predictive_backward=True,
msb_bits=4,
msb_bits_weight=4,
msb_bits_grad=8,
threshold=0.0,
sparsify=False,
sign=True
):
global PREDICTIVE_BACKWARD, MSB_BITS, MSB_BITS_WEIGHT, MSB_BITS_GRAD, THRESHOLD, SPARSIFY, SIGN
PREDICTIVE_BACKWARD = predictive_backward
MSB_BITS = msb_bits
MSB_BITS_WEIGHT = msb_bits_weight
MSB_BITS_GRAD = msb_bits_grad
THRESHOLD = threshold
SPARSIFY = sparsify
SIGN = sign
return ResNet(BasicBlock, [2,2,2,2], num_classes=num_classes, init_method=init_method)
def PsgSeedResNet34(
num_classes=10,
init_method='standard',
predictive_backward=True,
msb_bits=4,
msb_bits_weight=4,
msb_bits_grad=8,
threshold=0.0,
sparsify=False,
sign=True
):
global PREDICTIVE_BACKWARD, MSB_BITS, MSB_BITS_WEIGHT, MSB_BITS_GRAD, THRESHOLD, SPARSIFY, SIGN
PREDICTIVE_BACKWARD = predictive_backward
MSB_BITS = msb_bits
MSB_BITS_WEIGHT = msb_bits_weight
MSB_BITS_GRAD = msb_bits_grad
THRESHOLD = threshold
SPARSIFY = sparsify
SIGN = sign
return ResNet(BasicBlock, [3,4,6,3], num_classes=num_classes, init_method=init_method)
def PsgSeedResNet50(
num_classes=10,
init_method='standard',
predictive_backward=True,
msb_bits=4,
msb_bits_weight=4,
msb_bits_grad=8,
threshold=0.0,
sparsify=False,
sign=True
):
global PREDICTIVE_BACKWARD, MSB_BITS, MSB_BITS_WEIGHT, MSB_BITS_GRAD, THRESHOLD, SPARSIFY, SIGN
PREDICTIVE_BACKWARD = predictive_backward
MSB_BITS = msb_bits
MSB_BITS_WEIGHT = msb_bits_weight
MSB_BITS_GRAD = msb_bits_grad
THRESHOLD = threshold
SPARSIFY = sparsify
SIGN = sign
return ResNet(Bottleneck, [3,4,6,3], num_classes=num_classes, init_method=init_method)
def PsgSeedResNet101(
num_classes=10,
init_method='standard',
predictive_backward=True,
msb_bits=4,
msb_bits_weight=4,
msb_bits_grad=8,
threshold=0.0,
sparsify=False,
sign=True
):
global PREDICTIVE_BACKWARD, MSB_BITS, MSB_BITS_WEIGHT, MSB_BITS_GRAD, THRESHOLD, SPARSIFY, SIGN
PREDICTIVE_BACKWARD = predictive_backward
MSB_BITS = msb_bits
MSB_BITS_WEIGHT = msb_bits_weight
MSB_BITS_GRAD = msb_bits_grad
THRESHOLD = threshold
SPARSIFY = sparsify
SIGN = sign
return ResNet(Bottleneck, [3,4,23,3], num_classes=num_classes, init_method=init_method)
def PsgSeedResNet152(
num_classes=10,
init_method='standard',
predictive_backward=True,
msb_bits=4,
msb_bits_weight=4,
msb_bits_grad=8,
threshold=0.0,
sparsify=False,
sign=True
):
global PREDICTIVE_BACKWARD, MSB_BITS, MSB_BITS_WEIGHT, MSB_BITS_GRAD, THRESHOLD, SPARSIFY, SIGN
PREDICTIVE_BACKWARD = predictive_backward
MSB_BITS = msb_bits
MSB_BITS_WEIGHT = msb_bits_weight
MSB_BITS_GRAD = msb_bits_grad
THRESHOLD = threshold
SPARSIFY = sparsify
SIGN = sign
return ResNet(Bottleneck, [3,8,36,3], num_classes=num_classes, init_method=init_method)
def test():
net = ResNet18()
y = net(torch.randn(1,3,32,32))
print(y.size())
# test()
| [
"torch.nn.BatchNorm2d",
"torch.ones_like",
"torch.nn.Sequential",
"torch.nn.init._calculate_correct_fan",
"math.sqrt",
"models.masked_psg_seed_conv.PredictiveSeedConv2d",
"torch.nn.init.kaiming_normal_",
"torch.nn.init.kaiming_uniform_",
"torch.nn.init.xavier_normal_",
"torch.nn.init._calculate_fan_in_and_fan_out",
"torch.from_numpy",
"numpy.random.laplace",
"torch.nn.functional.relu",
"torch.nn.Linear",
"torch.nn.init.calculate_gain",
"torch.no_grad",
"torch.randn"
] | [((952, 1468), 'models.masked_psg_seed_conv.PredictiveSeedConv2d', 'PredictiveSeedConv2d', (['in_planes', 'out_planes'], {'kernel_size': '(1)', 'stride': 'stride', 'padding': '(0)', 'bias': '(False)', 'num_bits': 'NUM_BITS', 'num_bits_weight': 'NUM_BITS_WEIGHT', 'num_bits_grad': 'NUM_BITS_GRAD', 'biprecision': 'BIPRECISION', 'input_signed': 'input_signed', 'predictive_forward': 'predictive_forward', 'predictive_backward': 'PREDICTIVE_BACKWARD', 'msb_bits': 'MSB_BITS', 'msb_bits_weight': 'MSB_BITS_WEIGHT', 'msb_bits_grad': 'MSB_BITS_GRAD', 'threshold': 'THRESHOLD', 'sparsify': 'SPARSIFY', 'sign': 'SIGN', 'writer': 'WRITER', 'writer_prefix': 'writer_prefix'}), '(in_planes, out_planes, kernel_size=1, stride=stride,\n padding=0, bias=False, num_bits=NUM_BITS, num_bits_weight=\n NUM_BITS_WEIGHT, num_bits_grad=NUM_BITS_GRAD, biprecision=BIPRECISION,\n input_signed=input_signed, predictive_forward=predictive_forward,\n predictive_backward=PREDICTIVE_BACKWARD, msb_bits=MSB_BITS,\n msb_bits_weight=MSB_BITS_WEIGHT, msb_bits_grad=MSB_BITS_GRAD, threshold\n =THRESHOLD, sparsify=SPARSIFY, sign=SIGN, writer=WRITER, writer_prefix=\n writer_prefix)\n', (972, 1468), False, 'from models.masked_psg_seed_conv import PredictiveSeedConv2d\n'), ((1719, 2235), 'models.masked_psg_seed_conv.PredictiveSeedConv2d', 'PredictiveSeedConv2d', (['in_planes', 'out_planes'], {'kernel_size': '(3)', 'stride': 'stride', 'padding': '(1)', 'bias': '(False)', 'num_bits': 'NUM_BITS', 'num_bits_weight': 'NUM_BITS_WEIGHT', 'num_bits_grad': 'NUM_BITS_GRAD', 'biprecision': 'BIPRECISION', 'input_signed': 'input_signed', 'predictive_forward': 'predictive_forward', 'predictive_backward': 'PREDICTIVE_BACKWARD', 'msb_bits': 'MSB_BITS', 'msb_bits_weight': 'MSB_BITS_WEIGHT', 'msb_bits_grad': 'MSB_BITS_GRAD', 'threshold': 'THRESHOLD', 'sparsify': 'SPARSIFY', 'sign': 'SIGN', 'writer': 'WRITER', 'writer_prefix': 'writer_prefix'}), '(in_planes, out_planes, kernel_size=3, stride=stride,\n padding=1, bias=False, num_bits=NUM_BITS, num_bits_weight=\n NUM_BITS_WEIGHT, num_bits_grad=NUM_BITS_GRAD, biprecision=BIPRECISION,\n input_signed=input_signed, predictive_forward=predictive_forward,\n predictive_backward=PREDICTIVE_BACKWARD, msb_bits=MSB_BITS,\n msb_bits_weight=MSB_BITS_WEIGHT, msb_bits_grad=MSB_BITS_GRAD, threshold\n =THRESHOLD, sparsify=SPARSIFY, sign=SIGN, writer=WRITER, writer_prefix=\n writer_prefix)\n', (1739, 2235), False, 'from models.masked_psg_seed_conv import PredictiveSeedConv2d\n'), ((2556, 2578), 'torch.nn.BatchNorm2d', 'nn.BatchNorm2d', (['planes'], {}), '(planes)\n', (2570, 2578), True, 'import torch.nn as nn\n'), ((2719, 2741), 'torch.nn.BatchNorm2d', 'nn.BatchNorm2d', (['planes'], {}), '(planes)\n', (2733, 2741), True, 'import torch.nn as nn\n'), ((2767, 2782), 'torch.nn.Sequential', 'nn.Sequential', ([], {}), '()\n', (2780, 2782), True, 'import torch.nn as nn\n'), ((3360, 3371), 'torch.nn.functional.relu', 'F.relu', (['out'], {}), '(out)\n', (3366, 3371), True, 'import torch.nn.functional as F\n'), ((3759, 3781), 'torch.nn.BatchNorm2d', 'nn.BatchNorm2d', (['planes'], {}), '(planes)\n', (3773, 3781), True, 'import torch.nn as nn\n'), ((4029, 4051), 'torch.nn.BatchNorm2d', 'nn.BatchNorm2d', (['planes'], {}), '(planes)\n', (4043, 4051), True, 'import torch.nn as nn\n'), ((4298, 4337), 'torch.nn.BatchNorm2d', 'nn.BatchNorm2d', (['(self.expansion * planes)'], {}), '(self.expansion * planes)\n', (4312, 4337), True, 'import torch.nn as nn\n'), ((4361, 4376), 'torch.nn.Sequential', 'nn.Sequential', ([], {}), '()\n', (4374, 4376), True, 'import torch.nn as nn\n'), ((5002, 5013), 'torch.nn.functional.relu', 'F.relu', (['out'], {}), '(out)\n', (5008, 5013), True, 'import torch.nn.functional as F\n'), ((5374, 5404), 'torch.nn.BatchNorm2d', 'nn.BatchNorm2d', (['self.in_planes'], {}), '(self.in_planes)\n', (5388, 5404), True, 'import torch.nn as nn\n'), ((9456, 9478), 'torch.nn.Sequential', 'nn.Sequential', (['*layers'], {}), '(*layers)\n', (9469, 9478), True, 'import torch.nn as nn\n'), ((13644, 13669), 'torch.randn', 'torch.randn', (['(1)', '(3)', '(32)', '(32)'], {}), '(1, 3, 32, 32)\n', (13655, 13669), False, 'import torch\n'), ((5876, 5921), 'torch.nn.Linear', 'nn.Linear', (['(512 * block.expansion)', 'num_classes'], {}), '(512 * block.expansion, num_classes)\n', (5885, 5921), True, 'import torch.nn as nn\n'), ((6613, 6668), 'torch.nn.init._calculate_correct_fan', 'nn.init._calculate_correct_fan', (['module.weight', '"""fan_in"""'], {}), "(module.weight, 'fan_in')\n", (6643, 6668), True, 'import torch.nn as nn\n'), ((6688, 6718), 'torch.nn.init.calculate_gain', 'nn.init.calculate_gain', (['"""relu"""'], {}), "('relu')\n", (6710, 6718), True, 'import torch.nn as nn\n'), ((3149, 3188), 'torch.nn.BatchNorm2d', 'nn.BatchNorm2d', (['(self.expansion * planes)'], {}), '(self.expansion * planes)\n', (3163, 3188), True, 'import torch.nn as nn\n'), ((4743, 4782), 'torch.nn.BatchNorm2d', 'nn.BatchNorm2d', (['(self.expansion * planes)'], {}), '(self.expansion * planes)\n', (4757, 4782), True, 'import torch.nn as nn\n'), ((6324, 6350), 'torch.nn.Linear', 'nn.Linear', (['(64)', 'num_classes'], {}), '(64, num_classes)\n', (6333, 6350), True, 'import torch.nn as nn\n'), ((6744, 6758), 'math.sqrt', 'math.sqrt', (['fan'], {}), '(fan)\n', (6753, 6758), False, 'import math\n'), ((6776, 6791), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (6789, 6791), False, 'import torch\n'), ((6937, 6992), 'torch.nn.init._calculate_correct_fan', 'nn.init._calculate_correct_fan', (['module.weight', '"""fan_in"""'], {}), "(module.weight, 'fan_in')\n", (6967, 6992), True, 'import torch.nn as nn\n'), ((7012, 7042), 'torch.nn.init.calculate_gain', 'nn.init.calculate_gain', (['"""relu"""'], {}), "('relu')\n", (7034, 7042), True, 'import torch.nn as nn\n'), ((7068, 7082), 'math.sqrt', 'math.sqrt', (['fan'], {}), '(fan)\n', (7077, 7082), False, 'import math\n'), ((7100, 7115), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (7113, 7115), False, 'import torch\n'), ((7254, 7328), 'torch.nn.init.kaiming_normal_', 'nn.init.kaiming_normal_', (['module.weight'], {'mode': '"""fan_in"""', 'nonlinearity': '"""relu"""'}), "(module.weight, mode='fan_in', nonlinearity='relu')\n", (7277, 7328), True, 'import torch.nn as nn\n'), ((7154, 7189), 'torch.ones_like', 'torch.ones_like', (['module.weight.data'], {}), '(module.weight.data)\n', (7169, 7189), False, 'import torch\n'), ((7388, 7463), 'torch.nn.init.kaiming_uniform_', 'nn.init.kaiming_uniform_', (['module.weight'], {'mode': '"""fan_in"""', 'nonlinearity': '"""relu"""'}), "(module.weight, mode='fan_in', nonlinearity='relu')\n", (7412, 7463), True, 'import torch.nn as nn\n'), ((7529, 7584), 'torch.nn.init._calculate_correct_fan', 'nn.init._calculate_correct_fan', (['module.weight', '"""fan_in"""'], {}), "(module.weight, 'fan_in')\n", (7559, 7584), True, 'import torch.nn as nn\n'), ((7604, 7634), 'torch.nn.init.calculate_gain', 'nn.init.calculate_gain', (['"""relu"""'], {}), "('relu')\n", (7626, 7634), True, 'import torch.nn as nn\n'), ((7662, 7682), 'math.sqrt', 'math.sqrt', (['(2.0 * fan)'], {}), '(2.0 * fan)\n', (7671, 7682), False, 'import math\n'), ((7700, 7715), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (7713, 7715), False, 'import torch\n'), ((7746, 7811), 'numpy.random.laplace', 'np.random.laplace', ([], {'loc': '(0.0)', 'scale': 'scale', 'size': 'module.weight.shape'}), '(loc=0.0, scale=scale, size=module.weight.shape)\n', (7763, 7811), True, 'import numpy as np\n'), ((7983, 8020), 'torch.nn.init.xavier_normal_', 'nn.init.xavier_normal_', (['module.weight'], {}), '(module.weight)\n', (8005, 8020), True, 'import torch.nn as nn\n'), ((8098, 8150), 'torch.nn.init._calculate_fan_in_and_fan_out', 'nn.init._calculate_fan_in_and_fan_out', (['module.weight'], {}), '(module.weight)\n', (8135, 8150), True, 'import torch.nn as nn\n'), ((8227, 8242), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (8240, 8242), False, 'import torch\n'), ((8407, 8419), 'math.sqrt', 'math.sqrt', (['(5)'], {}), '(5)\n', (8416, 8419), False, 'import math\n'), ((7879, 7907), 'torch.from_numpy', 'torch.from_numpy', (['new_weight'], {}), '(new_weight)\n', (7895, 7907), False, 'import torch\n')] |
import cv2
import time
import socket
import threading
class Response(object):
def __init__(self):
pass
def recv(self, data):
pass
def pop(self):
pass
def empty(self):
pass
class Command(Response):
def __init__(self):
super(Command, self).__init__()
self.response = None
self.lock = threading.RLock()
def recv(self, data):
with self.lock:
self.response = data.decode('utf-8')
def pop(self):
with self.lock:
response, self.response = self.response, None
return response
def empty(self):
with self.lock:
return self.response is None
class State(Response):
def __init__(self):
super(State, self).__init__()
self.response = {}
self.lock = threading.RLock()
def recv(self, data):
with self.lock:
self.response = {item.split(':')[0]:float(item.split(':')[1]) for item in data.decode('utf-8').split(';') if ':' in item}
def pop(self):
return self.response
def empty(self):
return False
class Client(object):
def __init__(self, local_port, buffer_size, daemon, response):
self.response = response
self.buffer_size = buffer_size
self.socket = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
self.socket.bind(('', local_port))
self.receive_thread = threading.Thread(target=self._receive_thread)
self.receive_thread.daemon = daemon
self.receive_thread.start()
def __del__(self):
"""Closes the local socket."""
self.socket.close()
def _receive_thread(self):
"""Listens for responses from the Tello.
Runs as a thread, sets self.response to whatever the Tello last returned.
"""
while True:
try:
self.response.recv(self.socket.recv(self.buffer_size))
except Exception as e:
print(e)
break
def empty(self):
return self.response.empty()
def pop(self):
return self.response.pop()
class Video(object):
def __init__(self, daemon=True):
self.video = cv2.VideoCapture('udp://@0.0.0.0:11111')
if not self.video.isOpened():
raise RuntimeError('Failed to connect to Tello')
self.frame = None
self.lock = threading.RLock()
self.thread = threading.Thread(target=self._update_thread)
self.thread.daemon = daemon
self.thread.start()
def __del__(self):
self.video.release()
def _update_thread(self):
while True:
ok, frame = self.video.read()
if ok:
with self.lock:
self.frame = frame
def empty(self):
with self.lock:
return self.frame is None
def pop(self):
with self.lock:
frame, self.frame = self.frame, None
return frame
class Tello(object):
def __init__(self, local_port=9999, command_timeout=0.35, state=True, video=True):
"""Connects to Tello in command mode.
Args:
local_port (int): port of local machine for receiving command response.
command_timeout (float): seconds to wait for a response of command.
state (bool): receive state from Tello?
video (bool): receive video from Tello?
Raises:
RuntimeError: If the Tello rejects the attempt to enter command mode or open the video stream.
"""
self.command_timeout = command_timeout
self.response_client = Client(local_port, 1024, True, Command())
self.state_client = Client(8890, 1024, True, State()) if state else None
self.tello_address = ('192.168.10.1', 8889)
self.enter_command_mode()
self.video_client = None
if video:
self.open_video_stream()
self.video_client = Video(True)
def send_command(self, command, with_return=True):
"""Sends a command to the Tello and waits for a response.
If self.command_timeout is exceeded before a response is received,
a RuntimeError exception is raised.
Args:
command (str): Command to send.
Returns:
str: Response from Tello.
Raises:
RuntimeError: If no response is received within self.timeout seconds.
"""
self.response_client.pop()
self.response_client.socket.sendto(command.encode('utf-8'), self.tello_address)
if not with_return:
return
st = time.time()
while self.response_client.empty():
if time.time() - st >= self.command_timeout:
raise RuntimeError('No response to command')
return self.response_client.pop()
def state(self):
return self.state_client.pop() if self.state_client else None
def read_frame(self):
if self.video_client is None:
raise RuntimeError('Video is not available')
while self.video_client.empty():
pass
return self.video_client.pop()
def enter_command_mode(self):
if self.send_command('command') != 'ok':
raise RuntimeError('Tello rejected the attempt to enter command mode')
def take_off(self):
"""
return: 'ok' or 'error'
"""
return self.send_command('takeoff')
def land(self):
"""
return: 'ok' or 'error'
"""
return self.send_command('land')
def open_video_stream(self):
if self.send_command('streamon') != 'ok':
raise RuntimeError('Tello rejected to open the video stream')
def close_video_stream(self):
"""
return: 'ok' or 'error'
"""
return self.send_command('streamoff')
def emergency_shutdown(self):
"""
return: 'ok' or 'error'
"""
return self.send_command('emergency')
def move_up(self, x, with_return=False):
"""
param x: int, [20, 500]
param with_return: bool
return: 'ok' or 'error'
"""
return self.send_command('up {}'.format(x), with_return)
def move_down(self, x, with_return=False):
"""
param x: int, [20, 500]
param with_return: bool
return: 'ok' or 'error'
"""
return self.send_command('down {}'.format(x), with_return)
def move_left(self, x, with_return=False):
"""
param x: int, [20, 500]
param with_return: bool
return: 'ok' or 'error'
"""
return self.send_command('left {}'.format(x), with_return)
def move_right(self, x, with_return=False):
"""
param x: int, [20, 500]
param with_return: bool
return: 'ok' or 'error'
"""
return self.send_command('right {}'.format(x), with_return)
def move_forward(self, x, with_return=False):
"""
param x: int, [20, 500]
param with_return: bool
return: 'ok' or 'error'
"""
return self.send_command('forward {}'.format(x), with_return)
def move_backward(self, x, with_return=False):
"""
param x: int, [20, 500]
param with_return: bool
return: 'ok' or 'error'
"""
return self.send_command('back {}'.format(x), with_return)
def rotate_clockwise(self, x, with_return=False):
"""
param x: int, [1, 3600]
param with_return: bool
return: 'ok' or 'error'
"""
return self.send_command('cw {}'.format(x), with_return)
def rotate_counter_clockwise(self, x, with_return=False):
"""
param x: int, [1, 3600]
param with_return: bool
return: 'ok' or 'error'
"""
return self.send_command('ccw {}'.format(x), with_return)
def flip_left(self, with_return=False):
"""
param with_return: bool
return: 'ok' or 'error'
"""
return self.send_command('flip l', with_return)
def flip_right(self, with_return=False):
"""
param with_return: bool
return: 'ok' or 'error'
"""
return self.send_command('flip r', with_return)
def flip_forward(self, with_return=False):
"""
param with_return: bool
return: 'ok' or 'error'
"""
return self.send_command('flip f', with_return)
def flip_backward(self, with_return=False):
"""
param with_return: bool
return: 'ok' or 'error'
"""
return self.send_command('flip b', with_return)
def goto(self, x, y, z, speed, with_return=False):
"""
param x: int, [20, 500]
param y: int, [20, 500]
param z: int, [20, 500]
param speed: int, [10-100]
param with_return: bool
return: 'ok' or 'error'
"""
return self.send_command('go {} {} {} {}'.format(x, y, z, speed), with_return)
def goto_curve(self, x1, y1, z1, x2, y2, z2, speed, with_return=False):
"""fly a curve defined by (0, 0, 0), (x1, y1, z1), (x2, y2, z2) with speed
param x1, x2: int, [-500, 500]
param y1, y2: int, [-500, 500]
param z1, z2: int, [-500, 500]
param speed: int, [10-60]
param with_return: bool
return: 'ok' or 'error'
"""
return self.send_command('curve {} {} {} {} {} {} {}'.format(x1, y1, z1, x2, y2, z2, speed), with_return)
def set_speed(self, speed, with_return=False):
"""
param speed: int, [10-100]
param with_return: bool
return: 'ok' or 'error'
"""
return self.send_command('speed {}'.format(speed), with_return)
def set_remote_controller_command(self, left_right_velocity, forward_backward_velocity, up_down_velocity, rotate_velocity, with_return=False):
"""
param left_right_velocity: int, [-100, 100]
param forward_backward_velocity: int, [-100, 100]
param up_down_velocity: int, [-100, 100]
param rotate_velocity: int, [-100, 100]
param with_return: bool
return: 'ok' or 'error'
"""
return self.send_command('rc {} {} {} {}'.format(left_right_velocity, forward_backward_velocity, up_down_velocity, rotate_velocity), with_return)
def get(self, command, split=False):
"""
param command
param split: bool, multiple values?
return: int or list(int)
"""
result = self.send_command(command)
if split:
return [int(x) for x in result.split(' ')]
else:
return int(result)
def get_speed(self):
"""
return: int, [10, 100]
"""
return self.get('speed?')
def get_battery(self):
"""
return: int, [0, 100]
"""
return self.get('battery?')
def get_flight_time(self):
"""
return: int
"""
return self.get('time?')
def get_relative_height(self):
"""
return: int, [10, 3000]
"""
return self.get('height?')
def get_temperature(self):
"""
return: int, [0, 90]
"""
return self.get('temp?')
def get_imu_pose(self):
"""[pitch, roll, yaw]
return: list(int), [[-89, 89], [-179, 179], [-179, 179]]
"""
return self.get('attitude?', split=True)
def get_absolute_height(self):
"""
return: int
"""
return self.get('baro?')
def get_imu_acceleration(self):
"""
return: list(int)
"""
return self.get('acceleration?', split=True)
def get_tof_height(self):
"""
return: int, [10, 400]; 6553: out of bounds
"""
return self.get('tof?')
| [
"socket.socket",
"threading.RLock",
"cv2.VideoCapture",
"threading.Thread",
"time.time"
] | [((365, 382), 'threading.RLock', 'threading.RLock', ([], {}), '()\n', (380, 382), False, 'import threading\n'), ((830, 847), 'threading.RLock', 'threading.RLock', ([], {}), '()\n', (845, 847), False, 'import threading\n'), ((1311, 1359), 'socket.socket', 'socket.socket', (['socket.AF_INET', 'socket.SOCK_DGRAM'], {}), '(socket.AF_INET, socket.SOCK_DGRAM)\n', (1324, 1359), False, 'import socket\n'), ((1434, 1479), 'threading.Thread', 'threading.Thread', ([], {'target': 'self._receive_thread'}), '(target=self._receive_thread)\n', (1450, 1479), False, 'import threading\n'), ((2212, 2252), 'cv2.VideoCapture', 'cv2.VideoCapture', (['"""udp://@0.0.0.0:11111"""'], {}), "('udp://@0.0.0.0:11111')\n", (2228, 2252), False, 'import cv2\n'), ((2398, 2415), 'threading.RLock', 'threading.RLock', ([], {}), '()\n', (2413, 2415), False, 'import threading\n'), ((2438, 2482), 'threading.Thread', 'threading.Thread', ([], {'target': 'self._update_thread'}), '(target=self._update_thread)\n', (2454, 2482), False, 'import threading\n'), ((4630, 4641), 'time.time', 'time.time', ([], {}), '()\n', (4639, 4641), False, 'import time\n'), ((4701, 4712), 'time.time', 'time.time', ([], {}), '()\n', (4710, 4712), False, 'import time\n')] |
#!/usr/bin/env python3
import tensorflow as tf
physical_devices = tf.config.list_physical_devices('GPU')
try:
tf.config.experimental.set_memory_growth(physical_devices[0], True)
except:
# Invalid device or cannot modify virtual devices once initialized.
pass
import numpy as np
import os, time, csv
import tqdm
import umap
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import datetime
import signal
import net
from matplotlib import rcParams
rcParams['font.family'] = 'sans-serif'
rcParams['font.sans-serif'] = ['Hiragino Maru Gothic Pro', 'Yu Gothic', 'Meirio', 'Takao', 'IPAexGothic', 'IPAPGothic', 'Noto Sans CJK JP']
import net
class SimpleEncodeDecoder:
def __init__(self):
self.save_dir = './result/step1/'
self.result_dir = './result/plot/'
os.makedirs(self.result_dir, exist_ok=True)
checkpoint_dir = self.save_dir
self.max_epoch = 300
self.steps_per_epoch = 1000
self.batch_size = 64
lr = tf.keras.optimizers.schedules.ExponentialDecay(1e-3, 1e5, 0.5)
self.optimizer = tf.keras.optimizers.Adam(lr)
self.encoder = net.FeatureBlock()
self.encoder.summary()
self.decoder = net.SimpleDecoderBlock()
self.decoder.summary()
inputs = {
'image': tf.keras.Input(shape=(128,128,3)),
}
feature_out = self.encoder(inputs)
outputs = self.decoder(feature_out)
self.model = tf.keras.Model(inputs, outputs, name='SimpleEncodeDecoder')
checkpoint = tf.train.Checkpoint(optimizer=self.optimizer,
model=self.model)
last = tf.train.latest_checkpoint(checkpoint_dir)
checkpoint.restore(last)
self.manager = tf.train.CheckpointManager(
checkpoint, directory=checkpoint_dir, max_to_keep=2)
if not last is None:
self.init_epoch = int(os.path.basename(last).split('-')[1])
print('loaded %d epoch'%self.init_epoch)
else:
self.init_epoch = 0
self.model.summary()
def eval(self):
self.data = net.FontData()
print("Plot: ", self.init_epoch + 1)
acc = self.make_plot(self.data.test_data(self.batch_size), (self.init_epoch + 1))
print('acc', acc)
@tf.function
def eval_substep(self, inputs):
input_data = {
'image': inputs['input'],
}
feature = self.encoder(input_data)
outputs = self.decoder(feature)
target_id = inputs['index']
target_id1 = inputs['idx1']
target_id2 = inputs['idx2']
pred_id1 = tf.nn.softmax(outputs['id1'], -1)
pred_id2 = tf.nn.softmax(outputs['id2'], -1)
return {
'feature': feature,
'pred_id1': pred_id1,
'pred_id2': pred_id2,
'target_id': target_id,
'target_id1': target_id1,
'target_id2': target_id2,
}
def make_plot(self, test_ds, epoch):
result = []
labels = []
with open(os.path.join(self.result_dir,'test_result-%d.txt'%epoch),'w') as txt:
correct_count = 0
failed_count = 0
with tqdm.tqdm(total=len(self.data.test_keys)) as pbar:
for inputs in test_ds:
pred = self.eval_substep(inputs)
result += [pred['feature']]
labels += [pred['target_id']]
for i in range(pred['target_id1'].shape[0]):
txt.write('---\n')
target = pred['target_id'][i].numpy()
txt.write('target: id %d = %s\n'%(target, self.data.glyphs[target-1]))
predid1 = np.argmax(pred['pred_id1'][i])
predid2 = np.argmax(pred['pred_id2'][i])
predid = predid1 * 100 + predid2
if predid == 0:
txt.write('predict: id %d nothing (p=%f)\n'%(predid, pred['pred_id1'][i][predid1] * pred['pred_id2'][i][predid2]))
elif predid > self.data.id_count + 1:
txt.write('predict: id %d nothing (p=%f)\n'%(predid, pred['pred_id1'][i][predid1] * pred['pred_id2'][i][predid2]))
else:
txt.write('predict: id %d = %s (p=%f)\n'%(predid, self.data.glyphs[predid-1], pred['pred_id1'][i][predid1] * pred['pred_id2'][i][predid2]))
if target == predid:
txt.write('Correct!\n')
correct_count += 1
else:
txt.write('Failed!\n')
failed_count += 1
pbar.update(1)
acc = correct_count / (correct_count + failed_count)
txt.write('==============\n')
txt.write('Correct = %d\n'%correct_count)
txt.write('Failed = %d\n'%failed_count)
txt.write('accuracy = %f\n'%acc)
result = np.concatenate(result)
labels = np.concatenate(labels)
print('run UMAP')
X_reduced = umap.UMAP(metric='cosine').fit_transform(result)
fig, ax = plt.subplots(figsize=(50, 50))
ax.scatter(X_reduced[:, 0], X_reduced[:, 1], c=labels, cmap=plt.get_cmap('hsv'))
print('plot UMAP')
for i, label in enumerate(labels):
ax.annotate(self.data.glyphs[label-1], (X_reduced[i,0], X_reduced[i,1]))
plt.savefig(os.path.join(self.result_dir,'test_result-%d.png'%epoch), dpi=300)
plt.close('all')
return acc
def eval():
encoder = SimpleEncodeDecoder()
encoder.eval()
if __name__ == '__main__':
eval()
| [
"tensorflow.train.Checkpoint",
"tensorflow.config.list_physical_devices",
"tensorflow.nn.softmax",
"umap.UMAP",
"net.FeatureBlock",
"matplotlib.pyplot.close",
"net.FontData",
"net.SimpleDecoderBlock",
"numpy.concatenate",
"tensorflow.train.CheckpointManager",
"matplotlib.use",
"numpy.argmax",
"tensorflow.keras.Input",
"tensorflow.train.latest_checkpoint",
"matplotlib.pyplot.get_cmap",
"os.makedirs",
"tensorflow.config.experimental.set_memory_growth",
"tensorflow.keras.optimizers.schedules.ExponentialDecay",
"os.path.join",
"tensorflow.keras.optimizers.Adam",
"os.path.basename",
"tensorflow.keras.Model",
"matplotlib.pyplot.subplots"
] | [((67, 105), 'tensorflow.config.list_physical_devices', 'tf.config.list_physical_devices', (['"""GPU"""'], {}), "('GPU')\n", (98, 105), True, 'import tensorflow as tf\n'), ((355, 376), 'matplotlib.use', 'matplotlib.use', (['"""Agg"""'], {}), "('Agg')\n", (369, 376), False, 'import matplotlib\n'), ((115, 182), 'tensorflow.config.experimental.set_memory_growth', 'tf.config.experimental.set_memory_growth', (['physical_devices[0]', '(True)'], {}), '(physical_devices[0], True)\n', (155, 182), True, 'import tensorflow as tf\n'), ((820, 863), 'os.makedirs', 'os.makedirs', (['self.result_dir'], {'exist_ok': '(True)'}), '(self.result_dir, exist_ok=True)\n', (831, 863), False, 'import os, time, csv\n'), ((1012, 1080), 'tensorflow.keras.optimizers.schedules.ExponentialDecay', 'tf.keras.optimizers.schedules.ExponentialDecay', (['(0.001)', '(100000.0)', '(0.5)'], {}), '(0.001, 100000.0, 0.5)\n', (1058, 1080), True, 'import tensorflow as tf\n'), ((1100, 1128), 'tensorflow.keras.optimizers.Adam', 'tf.keras.optimizers.Adam', (['lr'], {}), '(lr)\n', (1124, 1128), True, 'import tensorflow as tf\n'), ((1153, 1171), 'net.FeatureBlock', 'net.FeatureBlock', ([], {}), '()\n', (1169, 1171), False, 'import net\n'), ((1226, 1250), 'net.SimpleDecoderBlock', 'net.SimpleDecoderBlock', ([], {}), '()\n', (1248, 1250), False, 'import net\n'), ((1475, 1534), 'tensorflow.keras.Model', 'tf.keras.Model', (['inputs', 'outputs'], {'name': '"""SimpleEncodeDecoder"""'}), "(inputs, outputs, name='SimpleEncodeDecoder')\n", (1489, 1534), True, 'import tensorflow as tf\n'), ((1556, 1619), 'tensorflow.train.Checkpoint', 'tf.train.Checkpoint', ([], {'optimizer': 'self.optimizer', 'model': 'self.model'}), '(optimizer=self.optimizer, model=self.model)\n', (1575, 1619), True, 'import tensorflow as tf\n'), ((1664, 1706), 'tensorflow.train.latest_checkpoint', 'tf.train.latest_checkpoint', (['checkpoint_dir'], {}), '(checkpoint_dir)\n', (1690, 1706), True, 'import tensorflow as tf\n'), ((1763, 1842), 'tensorflow.train.CheckpointManager', 'tf.train.CheckpointManager', (['checkpoint'], {'directory': 'checkpoint_dir', 'max_to_keep': '(2)'}), '(checkpoint, directory=checkpoint_dir, max_to_keep=2)\n', (1789, 1842), True, 'import tensorflow as tf\n'), ((2131, 2145), 'net.FontData', 'net.FontData', ([], {}), '()\n', (2143, 2145), False, 'import net\n'), ((2642, 2675), 'tensorflow.nn.softmax', 'tf.nn.softmax', (["outputs['id1']", '(-1)'], {}), "(outputs['id1'], -1)\n", (2655, 2675), True, 'import tensorflow as tf\n'), ((2695, 2728), 'tensorflow.nn.softmax', 'tf.nn.softmax', (["outputs['id2']", '(-1)'], {}), "(outputs['id2'], -1)\n", (2708, 2728), True, 'import tensorflow as tf\n'), ((5079, 5101), 'numpy.concatenate', 'np.concatenate', (['result'], {}), '(result)\n', (5093, 5101), True, 'import numpy as np\n'), ((5119, 5141), 'numpy.concatenate', 'np.concatenate', (['labels'], {}), '(labels)\n', (5133, 5141), True, 'import numpy as np\n'), ((5256, 5286), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'figsize': '(50, 50)'}), '(figsize=(50, 50))\n', (5268, 5286), True, 'import matplotlib.pyplot as plt\n'), ((5636, 5652), 'matplotlib.pyplot.close', 'plt.close', (['"""all"""'], {}), "('all')\n", (5645, 5652), True, 'import matplotlib.pyplot as plt\n'), ((1322, 1357), 'tensorflow.keras.Input', 'tf.keras.Input', ([], {'shape': '(128, 128, 3)'}), '(shape=(128, 128, 3))\n', (1336, 1357), True, 'import tensorflow as tf\n'), ((5561, 5620), 'os.path.join', 'os.path.join', (['self.result_dir', "('test_result-%d.png' % epoch)"], {}), "(self.result_dir, 'test_result-%d.png' % epoch)\n", (5573, 5620), False, 'import os, time, csv\n'), ((3068, 3127), 'os.path.join', 'os.path.join', (['self.result_dir', "('test_result-%d.txt' % epoch)"], {}), "(self.result_dir, 'test_result-%d.txt' % epoch)\n", (3080, 3127), False, 'import os, time, csv\n'), ((5189, 5215), 'umap.UMAP', 'umap.UMAP', ([], {'metric': '"""cosine"""'}), "(metric='cosine')\n", (5198, 5215), False, 'import umap\n'), ((5355, 5374), 'matplotlib.pyplot.get_cmap', 'plt.get_cmap', (['"""hsv"""'], {}), "('hsv')\n", (5367, 5374), True, 'import matplotlib.pyplot as plt\n'), ((3754, 3784), 'numpy.argmax', 'np.argmax', (["pred['pred_id1'][i]"], {}), "(pred['pred_id1'][i])\n", (3763, 3784), True, 'import numpy as np\n'), ((3819, 3849), 'numpy.argmax', 'np.argmax', (["pred['pred_id2'][i]"], {}), "(pred['pred_id2'][i])\n", (3828, 3849), True, 'import numpy as np\n'), ((1923, 1945), 'os.path.basename', 'os.path.basename', (['last'], {}), '(last)\n', (1939, 1945), False, 'import os, time, csv\n')] |
from __future__ import absolute_import, division, print_function
import logging
import docker
import tempfile
import requests
from requests.exceptions import RequestException
import json
import pprint
import time
import re
import os
import tarfile
import sys
from cloudpickle import CloudPickler
import pickle
import numpy as np
from google.protobuf.json_format import MessageToDict
if sys.version_info < (3, 0):
try:
from cStringIO import StringIO
except ImportError:
from StringIO import StringIO
PY3 = False
else:
from io import BytesIO as StringIO
PY3 = True
import grpc
from .rpc import model_pb2_grpc
from .rpc import model_pb2
from .rpc import prediction_pb2_grpc
from .rpc import prediction_pb2
from .rpc import management_pb2
from .rpc import management_pb2_grpc
from .container_manager import CONTAINERLESS_MODEL_IMAGE, ClusterAdapter
from .exceptions import ClipperException, UnconnectedException
from .version import __version__, __registry__
from . import graph_parser
DEFAULT_LABEL = []
DEFAULT_PREDICTION_CACHE_SIZE_BYTES = 33554432
CLIPPER_TEMP_DIR = "/tmp/clipper" # Used Internally for Test; Not Windows Compatible
logging.basicConfig(
format='%(asctime)s %(levelname)-8s %(message)s',
datefmt='%y-%m-%d:%H:%M:%S',
level=logging.INFO)
# logging.basicConfig(
# format='%(asctime)s %(levelname)-8s [%(filename)s:%(lineno)d] %(message)s',
# datefmt='%y-%m-%d:%H:%M:%S',
# level=logging.INFO)
logger = logging.getLogger(__name__)
deploy_regex_str = "[a-z0-9]([-a-z0-9]*[a-z0-9])?\Z"
deployment_regex = re.compile(deploy_regex_str)
def _validate_versioned_model_name(name, version):
if deployment_regex.match(name) is None:
raise ClipperException(
"Invalid value: {name}: a model name must be a valid DNS-1123 "
" subdomain. It must consist of lower case "
"alphanumeric characters, '-' or '.', and must start and end with "
"an alphanumeric character (e.g. 'example.com', regex used for "
"validation is '{reg}'".format(name=name, reg=deploy_regex_str))
if deployment_regex.match(version) is None:
raise ClipperException(
"Invalid value: {version}: a model version must be a valid DNS-1123 "
" subdomain. It must consist of lower case "
"alphanumeric characters, '-' or '.', and must start and end with "
"an alphanumeric character (e.g. 'example.com', regex used for "
"validation is '{reg}'".format(
version=version, reg=deploy_regex_str))
class ClipperConnection(object):
def __init__(self, container_manager):
self.connected = False
self.cm = container_manager
#############TEST################
self.runtime_dag = ""
self.lock = False
#################################
self.logger = ClusterAdapter(logger, {
'cluster_name': self.cm.cluster_identifier
})
def start_clipper(self,
mgmt_frontend_image='{}/management_frontend:{}'.format(
__registry__, __version__),
cache_size=DEFAULT_PREDICTION_CACHE_SIZE_BYTES):
try:
self.cm.start_clipper(mgmt_frontend_image)
# while True:
# try:
# query_frontend_url = "http://{host}/metrics".format(
# host=self.cm.get_query_addr())
# mgmt_frontend_url = "http://{host}/admin/ping".format(
# host=self.cm.get_admin_addr())
# for name, url in [('query frontend', query_frontend_url),
# ('management frontend', mgmt_frontend_url)]:
# r = requests.get(url, timeout=5)
# if r.status_code != requests.codes.ok:
# raise RequestException(
# "{name} end point {url} health check failed".format(name=name, url=url))
# break
# except RequestException as e:
# self.logger.info("Clipper still initializing: \n {}".format(e))
# time.sleep(1)
self.logger.info("Clipper is running")
self.connected = True
except ClipperException as e:
self.logger.warning("Error starting Clipper: {}".format(e.msg))
raise e
def connect(self):
"""Connect to a running Clipper cluster."""
self.cm.connect()
self.connected = True
self.logger.info(
"Successfully connected to Clipper cluster at {}".format(
self.cm.get_query_addr()))
def build_and_deploy_DAG(self,
name,
version,
dag_description,
labels):
if not self.connected:
raise UnconnectedException()
def build_and_deploy_model(self,
name,
version,
input_type,
model_data_path,
base_image,
labels=None,
container_registry=None,
num_replicas=1,
batch_size=-1,
pkgs_to_install=None):
if not self.connected:
raise UnconnectedException()
image = self.build_model(name, version, model_data_path, base_image,
container_registry, pkgs_to_install)
self.deploy_model(name, version, input_type, image, labels,
num_replicas, batch_size)
def build_model(self,
name,
version,
model_data_path,
base_image,
container_registry=None,
pkgs_to_install=None):
version = str(version)
_validate_versioned_model_name(name, version)
run_cmd = ''
if pkgs_to_install:
run_as_lst = 'RUN apt-get -y install build-essential && pip install'.split(
' ')
run_cmd = ' '.join(run_as_lst + pkgs_to_install)
with tempfile.NamedTemporaryFile(
mode="w+b", suffix="tar") as context_file:
# Create build context tarfile
with tarfile.TarFile(
fileobj=context_file, mode="w") as context_tar:
context_tar.add(model_data_path)
# From https://stackoverflow.com/a/740854/814642
try:
df_contents = StringIO(
str.encode(
"FROM {container_name}\n{run_command}\nCOPY {data_path} /model/\n".
format(
container_name=base_image,
data_path=model_data_path,
run_command=run_cmd)))
df_tarinfo = tarfile.TarInfo('Dockerfile')
df_contents.seek(0, os.SEEK_END)
df_tarinfo.size = df_contents.tell()
df_contents.seek(0)
context_tar.addfile(df_tarinfo, df_contents)
except TypeError:
df_contents = StringIO(
"FROM {container_name}\n{run_command}\nCOPY {data_path} /model/\n".
format(
container_name=base_image,
data_path=model_data_path,
run_command=run_cmd))
df_tarinfo = tarfile.TarInfo('Dockerfile')
df_contents.seek(0, os.SEEK_END)
df_tarinfo.size = df_contents.tell()
df_contents.seek(0)
context_tar.addfile(df_tarinfo, df_contents)
# Exit Tarfile context manager to finish the tar file
# Seek back to beginning of file for reading
context_file.seek(0)
image = "{cluster}-{name}:{version}".format(
cluster=self.cm.cluster_identifier, name=name, version=version)
if container_registry is not None:
image = "{reg}/{image}".format(
reg=container_registry, image=image)
docker_client = docker.from_env()
self.logger.info(
"Building model Docker image with model data from {}".format(
model_data_path))
image_result, build_logs = docker_client.images.build(
fileobj=context_file, custom_context=True, tag=image)
for b in build_logs:
if 'stream' in b and b['stream'] != '\n': #log build steps only
self.logger.info(b['stream'].rstrip())
self.logger.info("Pushing model Docker image to {}".format(image))
for line in docker_client.images.push(repository=image, stream=True):
self.logger.debug(line)
return image
def deploy_model(self,
name,
version,
input_type,
image,
labels=None,
num_replicas=1,
batch_size=-1):
if not self.connected:
raise UnconnectedException()
version = str(version)
_validate_versioned_model_name(name, version)
self.cm.deploy_model(
name=name,
version=version,
input_type=input_type,
image=image,
num_replicas=num_replicas)
# self.register_model(
# name,
# version,
# input_type,
# image=image,
# labels=labels,
# batch_size=batch_size)
self.logger.info("Done deploying model {name}:{version}.".format(
name=name, version=version))
def connect_host(self, host_ip, host_port):
self.cm.connect_host(host_ip, "2375")
def add_model(self,
model_name,
model_version,
image,
input_type="string",
output_type="string",
stateful=False):
modelinfo = management_pb2.ModelInfo(modelname=model_name,
modelversion=model_version,
image=image,
inputtype=input_type,
outputtype=output_type,
stateful=stateful).SerializeToString()
self.cm.grpc_client("zsxhku/grpcclient", "--addmodel %s %s %s "%("localhost","33333", modelinfo))
return
def deploy_DAG(self, name, version, dag_description=None, runtime=""):
if not self.connected:
raise UnconnectedException()
# model_info = self.get_all_models()
dag_description_ = dag_description
#self.logger.info("dag_description: %s"%(dag_description_))
#if(dag_description==None):
# dag_description_=self.get_dag_description()
nodes_list = graph_parser.get_all_nodes(dag_description_)
container_info = []
proxy_info = []
backup_info = []
count = 1
for model_info in nodes_list:
model_name,model_version,model_image = graph_parser.get_name_version(model_info)
container_name, container_id, host = self.cm.add_replica(model_name, model_version, "22222", model_image, runtime=runtime)
self.logger.info("Started %s with container %s:%s (HOST:%s)"%(model_name, container_name, container_id, host))
container_ip = self.cm.get_container_ip(host, container_id)
proxy_name, proxy_id = self.cm.set_proxy("mxschen/ai-proxy:latest", container_name, container_ip, host)
## get the ip of the instances
proxy_ip = self.cm.get_container_ip(host, proxy_id)
proxy_info.append([proxy_name,proxy_id,proxy_ip])
container_info.append([container_name, container_id, container_ip])
if graph_parser.is_stateful(model_info):
backup_name, backup_id, backup_host = self.cm.add_replica(model_name, model_version, "22222", model_image)
self.logger.info("[Backup] Started %s with container %s:%s (HOST:%s)"%(model_name, backup_name, backup_id, backup_host))
backup_ip = self.cm.get_container_ip(backup_host, backup_id)
backup_proxy_name, backup_proxy_id = self.cm.set_proxy("mxschen/ai-proxy:latest", backup_name, backup_ip, backup_host)
backup_proxy_ip= self.cm.get_container_ip(backup_host, backup_proxy_id)
backup_info.append([backup_name, backup_id, backup_ip, backup_proxy_name, backup_proxy_id, backup_proxy_ip])
else:
backup_info.append([])
#self.cm.check_container_status(host, container_id, 0.3, 20)
#self.cm.check_container_status(host, proxy_id, 0.3, 20)
#time.sleep(25)
#self.logger.info("proxy_ip:%s"%(proxy_ip))
self.cm.grpc_client("zsxhku/grpcclient", "--setmodel %s %s %s %s %s %s"%(proxy_ip, "22223", container_name, count, container_ip, "22222" ))
self.logger.info('[DEPLOYMENT] Finished setting model info to proxy')
if(graph_parser.is_stateful(model_info)):
self.cm.grpc_client("zsxhku/grpcclient", "--setmodel %s %s %s %s %s %s"%(backup_info[-1][-1], "22223", backup_info[-1][0], count, backup_info[-1][2], "22222" ))
self.logger.info('[DEPLOYMENT][Backup] Finished setting model info to proxy')
count += 1
# self.cm.grpc_client("zsxhku/grpcclient", "--setproxy %s %s %s %s"%(container_ip, "22222", proxy_name, "22223"))
# self.logger.info('[DEPLOYMENT] Finished setting proxy info to model')
# if(graph_parser.is_stateful(model_info)):
# self.cm.grpc_client("zsxhku/grpcclient", "--setproxy %s %s %s %s"%(backup_info[-1][2], "22222", backup_info[-1][3], "22223"))
# self.logger.info('[DEPLOYMENT][Backup] Finished setting proxy info to model')
runtime_dag_id = name+version+str(1)
## Starting frontend
frontend_name, frontend_container_id = self.cm.add_frontend("localhost", "mxschen/frontend",runtime_dag_id, proxy_info[0][2], "22223", max_workers=2048)
frontend_ip = self.cm.get_container_ip("localhost", frontend_container_id)
frontend_info = [frontend_name, frontend_container_id, frontend_ip]
self.logger.info("[DEPLOYMENT] ################ Started Frontend #################")
#expand the dag description with the model/proxy instances info
expanded_dag = graph_parser.expand_dag(dag_description_, name, version, container_info, proxy_info, backup_info, frontend_info)
self.runtime_dag = expanded_dag
# TODO: need to modularize
self.cm.grpc_client("zsxhku/grpcclient", "--addruntimedag %s %s %s %s %s %s %s"%('1', name, version, 'old' , self.cm.admin_ip, self.cm.admin_port, expanded_dag))
self.logger.info("Added new runtime DAG to admin daemon\n%s"%(expanded_dag))
#tells the proxy runtime dag info
for tup in proxy_info:
proxy_name = tup[0]
proxy_id = tup[1]
proxy_ip = tup[2]
self.cm.grpc_client("zsxhku/grpcclient", "--setdag %s %s %s"%(proxy_ip, "22223", expanded_dag))
self.logger.info('[DEPLOYMENT] Finished setting DAG for proxy {proxy_name} '.format(proxy_name=proxy_name))
#tells the backups runtime dag info
for tup in backup_info:
if tup:
self.cm.grpc_client("zsxhku/grpcclient", "--setdag %s %s %s"%(tup[-1], "22223", expanded_dag))
self.logger.info('[DEPLOYMENT][Backup] Finished setting DAG for proxy {proxy_name} '.format(proxy_name=tup[-1]))
return
def inspect_instance(self):
"""Fetches performance metrics from the running Clipper cluster.
Returns
-------
str
The JSON string containing the current set of metrics
for this instance. On error, the string will be an error message
(not JSON formatted).
Raises
------
:py:exc:`clipper.UnconnectedException`
:py:exc:`clipper.ClipperException`
"""
def get_query_addr(self):
"""Get the IP address at which the query frontend can be reached request predictions.
Returns
-------
str
The address as an IP address or hostname.
Raises
------
:py:exc:`clipper.UnconnectedException`
versions. All replicas for each version of each model will be stopped.
"""
if not self.connected:
raise UnconnectedException()
return self.cm.get_query_addr()
def stop_models(self, model_names):
"""Stops all versions of the specified models.
This is a convenience method to avoid the need to explicitly list all versions
of a model when calling :py:meth:`clipper_admin.ClipperConnection.stop_versioned_models`.
Parameters
----------
model_names : list(str)
A list of model names. All replicas of all versions of each model specified in the list
will be stopped.
Raises
------
:py:exc:`clipper.UnconnectedException`
versions. All replicas for each version of each model will be stopped.
"""
# if not self.connected:
# raise UnconnectedException()
# model_info = self.get_all_models(verbose=True)
# model_dict = {}
# for m in model_info:
# if m["model_name"] in model_names:
# if m["model_name"] in model_dict:
# model_dict[m["model_name"]].append(m["model_version"])
# else:
# model_dict[m["model_name"]] = [m["model_version"]]
# self.cm.stop_models(model_dict)
# pp = pprint.PrettyPrinter(indent=4)
# self.logger.info(
# "Stopped all containers for these models and versions:\n{}".format(
# pp.pformat(model_dict)))
def stop_versioned_models(self, model_versions_dict):
"""Stops the specified versions of the specified models.
Parameters
----------
model_versions_dict : dict(str, list(str))
For each entry in the dict, the key is a model name and the value is a list of model
Raises
------
:py:exc:`clipper.UnconnectedException`
versions. All replicas for each version of each model will be stopped.
Note
----
This method will stop the currently deployed versions of models if you specify them. You
almost certainly want to use one of the other stop_* methods. Use with caution.
"""
# if not self.connected:
# raise UnconnectedException()
# self.cm.stop_models(model_versions_dict)
# pp = pprint.PrettyPrinter(indent=4)
# self.logger.info(
# "Stopped all containers for these models and versions:\n{}".format(
# pp.pformat(model_versions_dict)))
def stop_inactive_model_versions(self, model_names):
"""Stops all model containers serving stale versions of the specified models.
For example, if you have deployed versions 1, 2, and 3 of model "music_recommender"
and version 3 is the current version::
clipper_conn.stop_inactive_model_versions(["music_recommender"])
will stop any containers serving versions 1 and 2 but will leave containers serving
version 3 untouched.
Parameters
----------
model_names : list(str)
The names of the models whose old containers you want to stop.
Raises
------
:py:exc:`clipper.UnconnectedException`
"""
# if not self.connected:
# raise UnconnectedException()
# model_info = self.get_all_models(verbose=True)
# model_dict = {}
# for m in model_info:
# if m["model_name"] in model_names and not m["is_current_version"]:
# if m["model_name"] in model_dict:
# model_dict[m["model_name"]].append(m["model_version"])
# else:
# model_dict[m["model_name"]] = [m["model_version"]]
# self.cm.stop_models(model_dict)
# pp = pprint.PrettyPrinter(indent=4)
# self.logger.info(
# "Stopped all containers for these models and versions:\n{}".format(
# pp.pformat(model_dict)))
def stop_all_model_containers(self):
"""Stops all model containers started via Clipper admin commands.
This method can be used to clean up leftover Clipper model containers even if the
Clipper management frontend or Redis has crashed. It can also be called without calling
``connect`` first.
"""
self.cm.stop_all_model_containers()
self.logger.info("Stopped all Clipper model containers")
def stop_all(self, graceful=True):
"""Stops all processes that were started via Clipper admin commands.
This includes the query and management frontend Docker containers and all model containers.
If you started Redis independently, this will not affect Redis. It can also be called
without calling ``connect`` first.
If graceful=False, Clipper will issue Docker Kill if it's in the Docker Mode. This parameter
will take not effect in Kubernetes.
"""
self.cm.stop_all(graceful=graceful)
self.logger.info(
"Stopped all Clipper cluster and all model containers")
| [
"logging.basicConfig",
"logging.getLogger",
"re.compile",
"tarfile.TarFile",
"docker.from_env",
"tempfile.NamedTemporaryFile",
"tarfile.TarInfo"
] | [((1180, 1302), 'logging.basicConfig', 'logging.basicConfig', ([], {'format': '"""%(asctime)s %(levelname)-8s %(message)s"""', 'datefmt': '"""%y-%m-%d:%H:%M:%S"""', 'level': 'logging.INFO'}), "(format='%(asctime)s %(levelname)-8s %(message)s',\n datefmt='%y-%m-%d:%H:%M:%S', level=logging.INFO)\n", (1199, 1302), False, 'import logging\n'), ((1489, 1516), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (1506, 1516), False, 'import logging\n'), ((1590, 1618), 're.compile', 're.compile', (['deploy_regex_str'], {}), '(deploy_regex_str)\n', (1600, 1618), False, 'import re\n'), ((6449, 6502), 'tempfile.NamedTemporaryFile', 'tempfile.NamedTemporaryFile', ([], {'mode': '"""w+b"""', 'suffix': '"""tar"""'}), "(mode='w+b', suffix='tar')\n", (6476, 6502), False, 'import tempfile\n'), ((8593, 8610), 'docker.from_env', 'docker.from_env', ([], {}), '()\n', (8608, 8610), False, 'import docker\n'), ((6597, 6644), 'tarfile.TarFile', 'tarfile.TarFile', ([], {'fileobj': 'context_file', 'mode': '"""w"""'}), "(fileobj=context_file, mode='w')\n", (6612, 6644), False, 'import tarfile\n'), ((7235, 7264), 'tarfile.TarInfo', 'tarfile.TarInfo', (['"""Dockerfile"""'], {}), "('Dockerfile')\n", (7250, 7264), False, 'import tarfile\n'), ((7875, 7904), 'tarfile.TarInfo', 'tarfile.TarInfo', (['"""Dockerfile"""'], {}), "('Dockerfile')\n", (7890, 7904), False, 'import tarfile\n')] |
from keras import layers
# Single-layer LSTM model for next-character prediction
model = keras.models.Sequential()
model.add(layers.LSTM(128, input_shape=(maxlen, len(chars))))
model.add(layers.Dense(len(chars), activation='softmax'))
# Model compilation configuration
optimizer = keras.optimizers.RMSprop(lr=0.01)
model.compile(loss='categorical_crossentropy', optimizer=optimizer)
# Function to sample the next character given the model’s predictions
def sample(preds, temperature=1.0):
preds = np.asarray(preds).astype('float64')
preds = np.log(preds) / temperature
exp_preds = np.exp(preds)
preds = exp_preds / np.sum(exp_preds)
probas = np.random.multinominal(1, preds, 1)
return np.argmax(probas)
# Text-generation loop
import sys
import random
# Trains the model for 60 epochs
for epoch in range(1, 60):
print(f'Epoch: {epoch}')
model.fit(x, y, batch_size=128, epochs=1)
# Selects a text seed at random
start_index = random.randint(0, len(text) - maxlen - 1)
generated_text = text[start_index: start_index + maxlen]
print(f'--- Generating with seed: {generated_text} ---')
# Tries a range of different sampling temperatures
for temperature in [0.2, 0.5, 1.0, 1.2]:
print(f'--- Temperature {temperature} ---')
sys.stdout.write(generated_text)
# Generates 400 characters, starting from the seed text
for i in range(400):
sampled = np.zeros((1, maxlen, len(chars)))
for t, char in enumerate(generated_text):
sampled[0, t, char_indices[char]] = 1.
# Samples the next character
preds = model.predict(sampled, verbose=0)[0]
next_index = sample(preds, temperature)
next_char = chars[next_index]
generated_text += next_char
generated_text = generated_text[1:]
sys.stdout.write(next_char)
| [
"sys.stdout.write"
] | [((1298, 1330), 'sys.stdout.write', 'sys.stdout.write', (['generated_text'], {}), '(generated_text)\n', (1314, 1330), False, 'import sys\n'), ((1884, 1911), 'sys.stdout.write', 'sys.stdout.write', (['next_char'], {}), '(next_char)\n', (1900, 1911), False, 'import sys\n')] |
"""
Example of ModECI MDF - Testing state variables
"""
from modeci_mdf.mdf import *
import sys
def main():
mod = Model(id="States")
mod_graph = Graph(id="state_example")
mod.graphs.append(mod_graph)
## Counter node
counter_node = Node(id="counter_node")
p1 = Parameter(id="increment", value=1)
counter_node.parameters.append(p1)
p2 = Parameter(id="count", value="count + increment")
counter_node.parameters.append(p2)
op1 = OutputPort(id="out_port", value=p2.id)
counter_node.output_ports.append(op1)
mod_graph.nodes.append(counter_node)
## Sine node...
sine_node = Node(id="sine_node")
sine_node.parameters.append(Parameter(id="amp", value=3))
sine_node.parameters.append(Parameter(id="period", value=0.4))
s1 = Parameter(
id="level", default_initial_value=0, time_derivative="6.283185 * rate / period"
)
sine_node.parameters.append(s1)
s2 = Parameter(
id="rate",
default_initial_value=1,
time_derivative="-1 * 6.283185 * level / period",
)
sine_node.parameters.append(s2)
op1 = OutputPort(id="out_port", value="amp * level")
sine_node.output_ports.append(op1)
mod_graph.nodes.append(sine_node)
new_file = mod.to_json_file("%s.json" % mod.id)
new_file = mod.to_yaml_file("%s.yaml" % mod.id)
if "-run" in sys.argv:
verbose = True
# verbose = False
from modeci_mdf.utils import load_mdf, print_summary
from modeci_mdf.execution_engine import EvaluableGraph
eg = EvaluableGraph(mod_graph, verbose)
dt = 0.01
duration = 2
t = 0
recorded = {}
times = []
s = []
while t <= duration:
times.append(t)
print("====== Evaluating at t = %s ======" % (t))
if t == 0:
eg.evaluate() # replace with initialize?
else:
eg.evaluate(time_increment=dt)
s.append(eg.enodes["sine_node"].evaluable_outputs["out_port"].curr_value)
t += dt
if "-nogui" not in sys.argv:
import matplotlib.pyplot as plt
plt.plot(times, s)
plt.show()
if "-graph" in sys.argv:
mod.to_graph_image(
engine="dot",
output_format="png",
view_on_render=False,
level=3,
filename_root="states",
only_warn_on_fail=True, # Makes sure test of this doesn't fail on Windows on GitHub Actions
)
return mod_graph
if __name__ == "__main__":
main()
| [
"matplotlib.pyplot.plot",
"matplotlib.pyplot.show",
"modeci_mdf.execution_engine.EvaluableGraph"
] | [((1568, 1602), 'modeci_mdf.execution_engine.EvaluableGraph', 'EvaluableGraph', (['mod_graph', 'verbose'], {}), '(mod_graph, verbose)\n', (1582, 1602), False, 'from modeci_mdf.execution_engine import EvaluableGraph\n'), ((2183, 2201), 'matplotlib.pyplot.plot', 'plt.plot', (['times', 's'], {}), '(times, s)\n', (2191, 2201), True, 'import matplotlib.pyplot as plt\n'), ((2214, 2224), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (2222, 2224), True, 'import matplotlib.pyplot as plt\n')] |
# -*- coding: utf-8 -*-
'''
Tests for the file state
'''
# Import python libs
from __future__ import absolute_import
import errno
import os
import textwrap
import tempfile
# Import Salt Testing libs
from tests.support.case import ModuleCase
from tests.support.paths import TMP_STATE_TREE
from tests.support.mixins import SaltReturnAssertsMixin
# Import salt libs
import salt.utils
IS_WINDOWS = salt.utils.is_windows()
class CMDTest(ModuleCase, SaltReturnAssertsMixin):
'''
Validate the cmd state
'''
def test_run_simple(self):
'''
cmd.run
'''
cmd = 'dir' if IS_WINDOWS else 'ls'
ret = self.run_state('cmd.run', name=cmd, cwd=tempfile.gettempdir())
self.assertSaltTrueReturn(ret)
def test_test_run_simple(self):
'''
cmd.run test interface
'''
ret = self.run_state('cmd.run', name='ls',
cwd=tempfile.gettempdir(), test=True)
self.assertSaltNoneReturn(ret)
class CMDRunRedirectTest(ModuleCase, SaltReturnAssertsMixin):
'''
Validate the cmd state of run_redirect
'''
def setUp(self):
self.state_name = 'run_redirect'
state_filename = self.state_name + '.sls'
self.state_file = os.path.join(TMP_STATE_TREE, state_filename)
# Create the testfile and release the handle
fd, self.test_file = tempfile.mkstemp()
try:
os.close(fd)
except OSError as exc:
if exc.errno != errno.EBADF:
raise exc
# Create the testfile and release the handle
fd, self.test_tmp_path = tempfile.mkstemp()
try:
os.close(fd)
except OSError as exc:
if exc.errno != errno.EBADF:
raise exc
super(CMDRunRedirectTest, self).setUp()
def tearDown(self):
for path in (self.state_file, self.test_tmp_path, self.test_file):
try:
os.remove(path)
except OSError:
# Not all of the tests leave files around that we want to remove
# As some of the tests create the sls files in the test itself,
# And some are using files in the integration test file state tree.
pass
super(CMDRunRedirectTest, self).tearDown()
def test_run_unless(self):
'''
test cmd.run unless
'''
state_key = 'cmd_|-{0}_|-{0}_|-run'.format(self.test_tmp_path)
with salt.utils.fopen(self.state_file, 'w') as fb_:
fb_.write(textwrap.dedent('''
{0}:
cmd.run:
- unless: echo cheese > {1}
'''.format(self.test_tmp_path, self.test_file)))
ret = self.run_function('state.sls', [self.state_name])
self.assertTrue(ret[state_key]['result'])
def test_run_unless_multiple_cmds(self):
'''
test cmd.run using multiple unless options where the first cmd in the
list will pass, but the second will fail. This tests the fix for issue
#35384. (The fix is in PR #35545.)
'''
sls = self.run_function('state.sls', mods='issue-35384')
self.assertSaltTrueReturn(sls)
# We must assert against the comment here to make sure the comment reads that the
# command "echo "hello"" was run. This ensures that we made it to the last unless
# command in the state. If the comment reads "unless execution succeeded", or similar,
# then the unless state run bailed out after the first unless command succeeded,
# which is the bug we're regression testing for.
self.assertEqual(sls['cmd_|-cmd_run_unless_multiple_|-echo "hello"_|-run']['comment'],
'Command "echo "hello"" run')
def test_run_creates_exists(self):
'''
test cmd.run creates already there
'''
state_key = 'cmd_|-echo >> {0}_|-echo >> {0}_|-run'.format(self.test_file)
with salt.utils.fopen(self.state_file, 'w') as fb_:
fb_.write(textwrap.dedent('''
echo >> {0}:
cmd.run:
- creates: {0}
'''.format(self.test_file)))
ret = self.run_function('state.sls', [self.state_name])
self.assertTrue(ret[state_key]['result'])
self.assertEqual(len(ret[state_key]['changes']), 0)
def test_run_creates_new(self):
'''
test cmd.run creates not there
'''
os.remove(self.test_file)
state_key = 'cmd_|-echo >> {0}_|-echo >> {0}_|-run'.format(self.test_file)
with salt.utils.fopen(self.state_file, 'w') as fb_:
fb_.write(textwrap.dedent('''
echo >> {0}:
cmd.run:
- creates: {0}
'''.format(self.test_file)))
ret = self.run_function('state.sls', [self.state_name])
self.assertTrue(ret[state_key]['result'])
self.assertEqual(len(ret[state_key]['changes']), 4)
def test_run_redirect(self):
'''
test cmd.run with shell redirect
'''
state_key = 'cmd_|-echo test > {0}_|-echo test > {0}_|-run'.format(self.test_file)
with salt.utils.fopen(self.state_file, 'w') as fb_:
fb_.write(textwrap.dedent('''
echo test > {0}:
cmd.run
'''.format(self.test_file)))
ret = self.run_function('state.sls', [self.state_name])
self.assertTrue(ret[state_key]['result'])
class CMDRunWatchTest(ModuleCase, SaltReturnAssertsMixin):
'''
Validate the cmd state of run_watch
'''
def setUp(self):
self.state_name = 'run_watch'
state_filename = self.state_name + '.sls'
self.state_file = os.path.join(TMP_STATE_TREE, state_filename)
super(CMDRunWatchTest, self).setUp()
def tearDown(self):
os.remove(self.state_file)
super(CMDRunWatchTest, self).tearDown()
def test_run_watch(self):
'''
test cmd.run watch
'''
saltines_key = 'cmd_|-saltines_|-echo changed=true_|-run'
biscuits_key = 'cmd_|-biscuits_|-echo biscuits_|-wait'
with salt.utils.fopen(self.state_file, 'w') as fb_:
fb_.write(textwrap.dedent('''
saltines:
cmd.run:
- name: echo changed=true
- cwd: /
- stateful: True
biscuits:
cmd.wait:
- name: echo biscuits
- cwd: /
- watch:
- cmd: saltines
'''))
ret = self.run_function('state.sls', [self.state_name])
self.assertTrue(ret[saltines_key]['result'])
self.assertTrue(ret[biscuits_key]['result'])
| [
"textwrap.dedent",
"os.close",
"os.path.join",
"tempfile.gettempdir",
"tempfile.mkstemp",
"os.remove"
] | [((1258, 1302), 'os.path.join', 'os.path.join', (['TMP_STATE_TREE', 'state_filename'], {}), '(TMP_STATE_TREE, state_filename)\n', (1270, 1302), False, 'import os\n'), ((1386, 1404), 'tempfile.mkstemp', 'tempfile.mkstemp', ([], {}), '()\n', (1402, 1404), False, 'import tempfile\n'), ((1628, 1646), 'tempfile.mkstemp', 'tempfile.mkstemp', ([], {}), '()\n', (1644, 1646), False, 'import tempfile\n'), ((4515, 4540), 'os.remove', 'os.remove', (['self.test_file'], {}), '(self.test_file)\n', (4524, 4540), False, 'import os\n'), ((5800, 5844), 'os.path.join', 'os.path.join', (['TMP_STATE_TREE', 'state_filename'], {}), '(TMP_STATE_TREE, state_filename)\n', (5812, 5844), False, 'import os\n'), ((5923, 5949), 'os.remove', 'os.remove', (['self.state_file'], {}), '(self.state_file)\n', (5932, 5949), False, 'import os\n'), ((1430, 1442), 'os.close', 'os.close', (['fd'], {}), '(fd)\n', (1438, 1442), False, 'import os\n'), ((1672, 1684), 'os.close', 'os.close', (['fd'], {}), '(fd)\n', (1680, 1684), False, 'import os\n'), ((686, 707), 'tempfile.gettempdir', 'tempfile.gettempdir', ([], {}), '()\n', (705, 707), False, 'import tempfile\n'), ((924, 945), 'tempfile.gettempdir', 'tempfile.gettempdir', ([], {}), '()\n', (943, 945), False, 'import tempfile\n'), ((1965, 1980), 'os.remove', 'os.remove', (['path'], {}), '(path)\n', (1974, 1980), False, 'import os\n'), ((6292, 6702), 'textwrap.dedent', 'textwrap.dedent', (['"""\n saltines:\n cmd.run:\n - name: echo changed=true\n - cwd: /\n - stateful: True\n\n biscuits:\n cmd.wait:\n - name: echo biscuits\n - cwd: /\n - watch:\n - cmd: saltines\n """'], {}), '(\n """\n saltines:\n cmd.run:\n - name: echo changed=true\n - cwd: /\n - stateful: True\n\n biscuits:\n cmd.wait:\n - name: echo biscuits\n - cwd: /\n - watch:\n - cmd: saltines\n """\n )\n', (6307, 6702), False, 'import textwrap\n')] |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright 1999-2018 Alibaba Group Holding Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
try:
import tiledb
except ImportError: # pragma: no cover
tiledb = None
from ...lib.sparse import SparseNDArray
from ...lib.sparse.core import sps
from ..expressions import datastore
from .utils import get_tiledb_ctx
def _store_tiledb(ctx, chunk):
tiledb_ctx = get_tiledb_ctx(chunk.op.tiledb_config)
uri = chunk.op.tiledb_uri
key = chunk.op.tiledb_key
timestamp = chunk.op.tiledb_timestamp
axis_offsets = chunk.op.axis_offsets
if not chunk.issparse():
# dense
to_store = np.ascontiguousarray(ctx[chunk.op.input.key])
slcs = []
for axis in range(chunk.ndim):
axis_offset = axis_offsets[axis]
axis_length = chunk.op.input.shape[axis]
slcs.append(slice(axis_offset, axis_offset + axis_length))
with tiledb.DenseArray(tiledb_ctx, uri, mode='w',
key=key, timestamp=timestamp) as arr:
arr[tuple(slcs)] = to_store
ctx[chunk.key] = np.empty((0,) * chunk.ndim, dtype=chunk.dtype)
else:
# sparse
to_store = ctx[chunk.op.input.key].spmatrix.tocoo()
if to_store.nnz > 0:
with tiledb.SparseArray(tiledb_ctx, uri, mode='w',
key=key, timestamp=timestamp) as arr:
if chunk.ndim == 1:
vec = to_store.col if to_store.shape[0] == 1 else to_store.row
vec += axis_offsets[0]
arr[vec] = to_store.data
else:
i, j = to_store.row + axis_offsets[0], to_store.col + axis_offsets[1]
arr[i, j] = to_store.data
ctx[chunk.key] = SparseNDArray(sps.csr_matrix((0, 0), dtype=chunk.dtype),
shape=chunk.shape)
def register_data_store_handler():
from ...executor import register
register(datastore.TensorTileDBDataStore, _store_tiledb)
| [
"tiledb.SparseArray",
"tiledb.DenseArray",
"numpy.empty",
"numpy.ascontiguousarray"
] | [((1186, 1231), 'numpy.ascontiguousarray', 'np.ascontiguousarray', (['ctx[chunk.op.input.key]'], {}), '(ctx[chunk.op.input.key])\n', (1206, 1231), True, 'import numpy as np\n'), ((1650, 1696), 'numpy.empty', 'np.empty', (['((0,) * chunk.ndim)'], {'dtype': 'chunk.dtype'}), '((0,) * chunk.ndim, dtype=chunk.dtype)\n', (1658, 1696), True, 'import numpy as np\n'), ((1471, 1545), 'tiledb.DenseArray', 'tiledb.DenseArray', (['tiledb_ctx', 'uri'], {'mode': '"""w"""', 'key': 'key', 'timestamp': 'timestamp'}), "(tiledb_ctx, uri, mode='w', key=key, timestamp=timestamp)\n", (1488, 1545), False, 'import tiledb\n'), ((1830, 1905), 'tiledb.SparseArray', 'tiledb.SparseArray', (['tiledb_ctx', 'uri'], {'mode': '"""w"""', 'key': 'key', 'timestamp': 'timestamp'}), "(tiledb_ctx, uri, mode='w', key=key, timestamp=timestamp)\n", (1848, 1905), False, 'import tiledb\n')] |
# Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You
# may not use this file except in compliance with the License. A copy of
# the License is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
"""
AWS Encryption SDK Decrypt Message Generation manifest handler.
Described in AWS Crypto Tools Test Vector Framework feature #0006 AWS Encryption SDK Decrypt Message Generation.
"""
import json
import os
import uuid
from copy import copy
import attr
import six
from aws_encryption_sdk.caches.local import LocalCryptoMaterialsCache
from aws_encryption_sdk.materials_managers.base import CryptoMaterialsManager
from aws_encryption_sdk.materials_managers.caching import CachingCryptoMaterialsManager
from aws_encryption_sdk.materials_managers.default import DefaultCryptoMaterialsManager
from awses_test_vectors.internal.defaults import ENCODING
from awses_test_vectors.internal.util import (
dictionary_validator,
file_reader,
file_writer,
iterable_validator,
membership_validator,
validate_manifest_type,
)
from awses_test_vectors.manifests.full_message.decrypt import (
DecryptionMethod,
MessageDecryptionManifest,
MessageDecryptionTestResult,
MessageDecryptionTestScenario,
)
from awses_test_vectors.manifests.full_message.encrypt import MessageEncryptionTestScenario
from awses_test_vectors.manifests.keys import KeysManifest
try:
from aws_encryption_sdk.identifiers import AlgorithmSuite
except ImportError:
from aws_encryption_sdk.identifiers import Algorithm as AlgorithmSuite
from awses_test_vectors.manifests.master_key import MasterKeySpec, master_key_provider_from_master_key_specs
try: # Python 3.5.0 and 3.5.1 have incompatible typing modules
from typing import IO, Callable, Dict, Iterable, Optional # noqa pylint: disable=unused-import
from awses_test_vectors.internal.mypy_types import ( # noqa pylint: disable=unused-import
ENCRYPT_SCENARIO_SPEC,
PLAINTEXTS_SPEC,
)
except ImportError: # pragma: no cover
# We only actually need these imports when running the mypy checks
pass
SUPPORTED_VERSIONS = (2,)
class TamperingMethod:
"""Base class for all tampering methods."""
@classmethod
def from_tampering_spec(cls, spec):
"""Load from a tampering specification"""
if spec is None:
return TamperingMethod()
if spec == "truncate":
return TruncateTamperingMethod()
if spec == "mutate":
return MutateTamperingMethod()
if spec == "half-sign":
return HalfSigningTamperingMethod()
((tampering_tag, tampering_values_spec),) = spec.items()
if tampering_tag == "change-edk-provider-info":
return ChangeEDKProviderInfoTamperingMethod.from_values_spec(tampering_values_spec)
raise ValueError("Unrecognized tampering method tag: " + tampering_tag)
# pylint: disable=R0201
def run_scenario_with_tampering(self, ciphertext_writer, generation_scenario, plaintext_uri):
"""
Run a given scenario, tampering with the input or the result.
return: a list of (ciphertext, result) pairs
"""
materials_manager = DefaultCryptoMaterialsManager(
generation_scenario.encryption_scenario.master_key_provider_fn()
)
ciphertext_to_decrypt = generation_scenario.encryption_scenario.run(materials_manager)
if generation_scenario.result:
expected_result = generation_scenario.result
else:
expected_result = MessageDecryptionTestResult.expect_output(
plaintext_uri=plaintext_uri, plaintext=generation_scenario.encryption_scenario.plaintext
)
return [
generation_scenario.decryption_test_scenario_pair(ciphertext_writer, ciphertext_to_decrypt, expected_result)
]
class ChangeEDKProviderInfoTamperingMethod(TamperingMethod):
"""Tampering method that changes the provider info on all EDKs."""
new_provider_infos = attr.ib(validator=iterable_validator(list, six.string_types))
def __init__(self, new_provider_infos):
"""Create a new instance for a given new provider info value."""
self.new_provider_infos = new_provider_infos
@classmethod
def from_values_spec(cls, values_spec):
"""Load from a tampering parameters specification"""
return ChangeEDKProviderInfoTamperingMethod(values_spec)
# pylint: disable=R0201
def run_scenario_with_tampering(self, ciphertext_writer, generation_scenario, _plaintext_uri):
"""
Run a given scenario, tampering with the input or the result.
return: a list of (ciphertext, result) pairs.
"""
master_key_provider = generation_scenario.encryption_scenario.master_key_provider_fn()
# Use a caching CMM to avoid generating a new data key every time.
cache = LocalCryptoMaterialsCache(10)
caching_cmm = CachingCryptoMaterialsManager(
master_key_provider=master_key_provider,
cache=cache,
max_age=60.0,
max_messages_encrypted=100,
)
return [
self.run_scenario_with_new_provider_info(
ciphertext_writer, generation_scenario, caching_cmm, new_provider_info
)
for new_provider_info in self.new_provider_infos
]
def run_scenario_with_new_provider_info(
self, ciphertext_writer, generation_scenario, materials_manager, new_provider_info
):
"""Run with tampering for a specific new provider info value"""
tampering_materials_manager = ProviderInfoChangingCryptoMaterialsManager(materials_manager, new_provider_info)
ciphertext_to_decrypt = generation_scenario.encryption_scenario.run(tampering_materials_manager)
expected_result = MessageDecryptionTestResult.expect_error(
"Incorrect encrypted data key provider info: " + new_provider_info
)
return generation_scenario.decryption_test_scenario_pair(
ciphertext_writer, ciphertext_to_decrypt, expected_result
)
class ProviderInfoChangingCryptoMaterialsManager(CryptoMaterialsManager):
"""
Custom CMM that modifies the provider info field on EDKS.
THIS IS ONLY USED TO CREATE INVALID MESSAGES and should never be used in
production!
"""
wrapped_cmm = attr.ib(validator=attr.validators.instance_of(CryptoMaterialsManager))
new_provider_info = attr.ib(validator=attr.validators.instance_of(six.string_types))
def __init__(self, materials_manager, new_provider_info):
"""Create a new CMM that wraps a the given CMM."""
self.wrapped_cmm = materials_manager
self.new_provider_info = new_provider_info
def get_encryption_materials(self, request):
"""
Request materials from the wrapped CMM, and then change the provider info
on each EDK.
"""
result = self.wrapped_cmm.get_encryption_materials(request)
for encrypted_data_key in result.encrypted_data_keys:
encrypted_data_key.key_provider.key_info = self.new_provider_info
return result
def decrypt_materials(self, request):
"""Thunks to the wrapped CMM"""
return self.wrapped_cmm.decrypt_materials(request)
BITS_PER_BYTE = 8
class TruncateTamperingMethod(TamperingMethod):
"""Tampering method that truncates a good message at every byte (except zero)."""
# pylint: disable=R0201
def run_scenario_with_tampering(self, ciphertext_writer, generation_scenario, _plaintext_uri):
"""
Run a given scenario, tampering with the input or the result.
return: a list of (ciphertext, result) pairs.
"""
ciphertext_to_decrypt = generation_scenario.encryption_scenario.run()
return [
generation_scenario.decryption_test_scenario_pair(
ciphertext_writer,
TruncateTamperingMethod.flip_bit(ciphertext_to_decrypt, bit),
MessageDecryptionTestResult.expect_error("Bit {} flipped".format(bit)),
)
for bit in range(0, len(ciphertext_to_decrypt) * BITS_PER_BYTE)
]
@classmethod
def flip_bit(cls, ciphertext, bit):
"""Flip only the given bit in the given ciphertext"""
byte_index, bit_index = divmod(bit, BITS_PER_BYTE)
result = bytearray(ciphertext)
result[byte_index] ^= 1 << (BITS_PER_BYTE - bit_index - 1)
return bytes(result)
class MutateTamperingMethod(TamperingMethod):
"""Tampering method that produces a message with a single bit flipped, for every possible bit."""
# pylint: disable=R0201
def run_scenario_with_tampering(self, ciphertext_writer, generation_scenario, _plaintext_uri):
"""
Run a given scenario, tampering with the input or the result.
return: a list of (ciphertext, result) pairs.
"""
ciphertext_to_decrypt = generation_scenario.encryption_scenario.run()
return [
generation_scenario.decryption_test_scenario_pair(
ciphertext_writer,
ciphertext_to_decrypt[0:length],
MessageDecryptionTestResult.expect_error("Truncated at byte {}".format(length)),
)
for length in range(1, len(ciphertext_to_decrypt))
]
class HalfSigningTamperingMethod(TamperingMethod):
"""Tampering method that changes the provider info on all EDKs."""
# pylint: disable=R0201
def run_scenario_with_tampering(self, ciphertext_writer, generation_scenario, _plaintext_uri):
"""
Run a given scenario, tampering with the input or the result.
return: a list of (ciphertext, result) pairs.
"""
tampering_materials_manager = HalfSigningCryptoMaterialsManager(
generation_scenario.encryption_scenario.master_key_provider_fn()
)
ciphertext_to_decrypt = generation_scenario.encryption_scenario.run(tampering_materials_manager)
expected_result = MessageDecryptionTestResult.expect_error(
"Unsigned message using a data key with a public key"
)
return [
generation_scenario.decryption_test_scenario_pair(ciphertext_writer, ciphertext_to_decrypt, expected_result)
]
class HalfSigningCryptoMaterialsManager(CryptoMaterialsManager):
"""
Custom CMM that generates materials for an unsigned algorithm suite
that includes the "aws-crypto-public-key" encryption context.
THIS IS ONLY USED TO CREATE INVALID MESSAGES and should never be used in
production! It is imitating what a malicious decryptor without encryption
permissions might do, to attempt to forge an unsigned message from a decrypted
signed message, and therefore this is an important case for ESDKs to reject.
"""
wrapped_default_cmm = attr.ib(validator=attr.validators.instance_of(CryptoMaterialsManager))
def __init__(self, master_key_provider):
"""
Create a new CMM that wraps a new DefaultCryptoMaterialsManager
based on the given master key provider.
"""
self.wrapped_default_cmm = DefaultCryptoMaterialsManager(master_key_provider)
def get_encryption_materials(self, request):
"""
Generate half-signing materials by requesting signing materials
from the wrapped default CMM, and then changing the algorithm suite
and removing the signing key from teh result.
"""
if request.algorithm == AlgorithmSuite.AES_256_GCM_HKDF_SHA512_COMMIT_KEY:
signing_request = copy(request)
signing_request.algorithm = AlgorithmSuite.AES_256_GCM_HKDF_SHA512_COMMIT_KEY_ECDSA_P384
result = self.wrapped_default_cmm.get_encryption_materials(signing_request)
result.algorithm = request.algorithm
result.signing_key = None
return result
raise NotImplementedError(
"The half-sign tampering method is only supported on the "
"AES_256_GCM_HKDF_SHA512_COMMIT_KEY algorithm suite."
)
def decrypt_materials(self, request):
"""Thunks to the wrapped default CMM"""
return self.wrapped_default_cmm.decrypt_materials(request)
@attr.s
class MessageDecryptionTestScenarioGenerator(object):
# pylint: disable=too-many-instance-attributes
"""Data class for a single full message decrypt test scenario.
Handles serialization and deserialization to and from manifest specs.
:param MessageEncryptionTestScenario encryption_scenario: Encryption parameters
:param tampering_method: Optional method used to tamper with the ciphertext
:type tampering_method: :class:`TamperingMethod`
:param decryption_method:
:param decryption_master_key_specs: Iterable of master key specifications
:type decryption_master_key_specs: iterable of :class:`MasterKeySpec`
:param Callable decryption_master_key_provider_fn:
:param result:
"""
encryption_scenario = attr.ib(validator=attr.validators.instance_of(MessageEncryptionTestScenario))
tampering_method = attr.ib(validator=attr.validators.optional(attr.validators.instance_of(TamperingMethod)))
decryption_method = attr.ib(validator=attr.validators.optional(attr.validators.instance_of(DecryptionMethod)))
decryption_master_key_specs = attr.ib(validator=iterable_validator(list, MasterKeySpec))
decryption_master_key_provider_fn = attr.ib(validator=attr.validators.is_callable())
result = attr.ib(validator=attr.validators.optional(attr.validators.instance_of(MessageDecryptionTestResult)))
@classmethod
def from_scenario(cls, scenario, keys, plaintexts):
"""Load from a scenario specification.
:param dict scenario: Scenario specification JSON
:param KeysManifest keys: Loaded keys
:param dict plaintexts: Mapping of plaintext names to plaintext values
:return: Loaded test scenario
:rtype: MessageDecryptionTestScenarioGenerator
"""
encryption_scenario_spec = scenario["encryption-scenario"]
encryption_scenario = MessageEncryptionTestScenario.from_scenario(encryption_scenario_spec, keys, plaintexts)
tampering = scenario.get("tampering")
tampering_method = TamperingMethod.from_tampering_spec(tampering)
decryption_method_spec = scenario.get("decryption-method")
decryption_method = DecryptionMethod(decryption_method_spec) if decryption_method_spec else None
if "decryption-master-keys" in scenario:
decryption_master_key_specs = [
MasterKeySpec.from_scenario(spec) for spec in scenario["decryption-master-keys"]
]
def decryption_master_key_provider_fn():
return master_key_provider_from_master_key_specs(keys, decryption_master_key_specs)
else:
decryption_master_key_specs = encryption_scenario.master_key_specs
decryption_master_key_provider_fn = encryption_scenario.master_key_provider_fn
result_spec = scenario.get("result")
result = MessageDecryptionTestResult.from_result_spec(result_spec, None) if result_spec else None
return cls(
encryption_scenario=encryption_scenario,
tampering_method=tampering_method,
decryption_method=decryption_method,
decryption_master_key_specs=decryption_master_key_specs,
decryption_master_key_provider_fn=decryption_master_key_provider_fn,
result=result,
)
def run(self, ciphertext_writer, plaintext_uri):
"""Run this scenario, writing the resulting ciphertext with ``ciphertext_writer`` and returning
a :class:`MessageDecryptionTestScenario` that describes the matching decrypt scenario.
:param callable ciphertext_writer: Callable that will write the requested named ciphertext and
return a URI locating the written data
:param str plaintext_uri: URI locating the written plaintext data for this scenario
:return: Decrypt test scenario that describes the generated scenario
:rtype: MessageDecryptionTestScenario
"""
return dict(self.tampering_method.run_scenario_with_tampering(ciphertext_writer, self, plaintext_uri))
def decryption_test_scenario_pair(self, ciphertext_writer, ciphertext_to_decrypt, expected_result):
"""Create a new (name, decryption scenario) pair"""
ciphertext_name = str(uuid.uuid4())
ciphertext_uri = ciphertext_writer(ciphertext_name, ciphertext_to_decrypt)
return (
ciphertext_name,
MessageDecryptionTestScenario(
ciphertext_uri=ciphertext_uri,
ciphertext=ciphertext_to_decrypt,
master_key_specs=self.decryption_master_key_specs,
master_key_provider_fn=self.decryption_master_key_provider_fn,
decryption_method=self.decryption_method,
result=expected_result,
),
)
@attr.s
class MessageDecryptionGenerationManifest(object):
"""AWS Encryption SDK Decryption Message Generation manifest handler.
Described in AWS Crypto Tools Test Vector Framework feature #0006 AWS Encryption SDK Decrypt Message Generation.
:param int version: Version of this manifest
:param KeysManifest keys: Loaded keys
:param dict plaintexts: Mapping of plaintext names to plaintext values
:param dict tests: Mapping of test scenario names to :class:`MessageDecryptionGenerationManifest`s
"""
version = attr.ib(validator=membership_validator(SUPPORTED_VERSIONS))
keys = attr.ib(validator=attr.validators.instance_of(KeysManifest))
plaintexts = attr.ib(validator=dictionary_validator(six.string_types, six.binary_type))
tests = attr.ib(validator=dictionary_validator(six.string_types, MessageDecryptionTestScenarioGenerator))
type_name = "awses-decrypt-generate"
@staticmethod
def _generate_plaintexts(plaintexts_specs):
# type: (PLAINTEXTS_SPEC) -> Dict[str, bytes]
"""Generate required plaintext values.
:param dict plaintexts_specs: Mapping of plaintext name to size in bytes
:return: Mapping of plaintext name to randomly generated bytes
:rtype: dict
"""
return {name: os.urandom(size) for name, size in plaintexts_specs.items()}
@classmethod
def from_file(cls, input_file):
# type: (IO) -> MessageDecryptionGenerationManifest
"""Load from a file containing a full message encrypt manifest.
:param file input_file: File object for file containing JSON manifest
:return: Loaded manifest
:rtype: MessageEncryptionManifest
"""
raw_manifest = json.load(input_file)
validate_manifest_type(
type_name=cls.type_name, manifest_version=raw_manifest["manifest"], supported_versions=SUPPORTED_VERSIONS
)
parent_dir = os.path.abspath(os.path.dirname(input_file.name))
reader = file_reader(parent_dir)
raw_keys_manifest = json.loads(reader(raw_manifest["keys"]).decode(ENCODING))
keys = KeysManifest.from_manifest_spec(raw_keys_manifest)
plaintexts = cls._generate_plaintexts(raw_manifest["plaintexts"])
tests = {}
for name, scenario in raw_manifest["tests"].items():
try:
tests[name] = MessageDecryptionTestScenarioGenerator.from_scenario(
scenario=scenario, keys=keys, plaintexts=plaintexts
)
except NotImplementedError:
continue
return cls(version=raw_manifest["manifest"]["version"], keys=keys, plaintexts=plaintexts, tests=tests)
def run_and_write_to_dir(self, target_directory, json_indent=None):
# type: (str, Optional[int]) -> None
"""Process all known encrypt test scenarios and write the resulting data and manifests to disk.
:param str target_directory: Directory in which to write all output
:param int json_indent: Number of spaces to indent JSON files (optional: default is to write minified)
"""
root_dir = os.path.abspath(target_directory)
root_writer = file_writer(root_dir)
root_writer("keys.json", json.dumps(self.keys.manifest_spec, indent=json_indent).encode(ENCODING))
plaintext_writer = file_writer(os.path.join(root_dir, "plaintexts"))
plaintext_uris = {name: plaintext_writer(name, plaintext) for name, plaintext in self.plaintexts.items()}
ciphertext_writer = file_writer(os.path.join(root_dir, "ciphertexts"))
test_scenarios = {
decrypt_scenario_name: decrypt_scenario
for name, scenario in self.tests.items()
for decrypt_scenario_name, decrypt_scenario in scenario.run(
ciphertext_writer, plaintext_uris[scenario.encryption_scenario.plaintext_name]
).items()
}
decrypt_manifest = MessageDecryptionManifest(
keys_uri="file://keys.json", keys=self.keys, test_scenarios=test_scenarios
)
root_writer("manifest.json", json.dumps(decrypt_manifest.manifest_spec, indent=json_indent).encode(ENCODING))
| [
"awses_test_vectors.manifests.full_message.decrypt.MessageDecryptionManifest",
"attr.validators.instance_of",
"awses_test_vectors.manifests.full_message.encrypt.MessageEncryptionTestScenario.from_scenario",
"copy.copy",
"awses_test_vectors.manifests.master_key.MasterKeySpec.from_scenario",
"awses_test_vectors.internal.util.membership_validator",
"awses_test_vectors.internal.util.iterable_validator",
"awses_test_vectors.manifests.full_message.decrypt.DecryptionMethod",
"json.dumps",
"awses_test_vectors.manifests.full_message.decrypt.MessageDecryptionTestResult.expect_error",
"awses_test_vectors.internal.util.file_reader",
"awses_test_vectors.manifests.keys.KeysManifest.from_manifest_spec",
"awses_test_vectors.internal.util.validate_manifest_type",
"attr.validators.is_callable",
"aws_encryption_sdk.caches.local.LocalCryptoMaterialsCache",
"awses_test_vectors.internal.util.dictionary_validator",
"awses_test_vectors.manifests.full_message.decrypt.MessageDecryptionTestScenario",
"os.urandom",
"awses_test_vectors.manifests.full_message.decrypt.MessageDecryptionTestResult.from_result_spec",
"uuid.uuid4",
"os.path.dirname",
"aws_encryption_sdk.materials_managers.caching.CachingCryptoMaterialsManager",
"os.path.abspath",
"aws_encryption_sdk.materials_managers.default.DefaultCryptoMaterialsManager",
"awses_test_vectors.internal.util.file_writer",
"os.path.join",
"awses_test_vectors.manifests.full_message.decrypt.MessageDecryptionTestResult.expect_output",
"awses_test_vectors.manifests.master_key.master_key_provider_from_master_key_specs",
"json.load"
] | [((5257, 5286), 'aws_encryption_sdk.caches.local.LocalCryptoMaterialsCache', 'LocalCryptoMaterialsCache', (['(10)'], {}), '(10)\n', (5282, 5286), False, 'from aws_encryption_sdk.caches.local import LocalCryptoMaterialsCache\n'), ((5309, 5438), 'aws_encryption_sdk.materials_managers.caching.CachingCryptoMaterialsManager', 'CachingCryptoMaterialsManager', ([], {'master_key_provider': 'master_key_provider', 'cache': 'cache', 'max_age': '(60.0)', 'max_messages_encrypted': '(100)'}), '(master_key_provider=master_key_provider,\n cache=cache, max_age=60.0, max_messages_encrypted=100)\n', (5338, 5438), False, 'from aws_encryption_sdk.materials_managers.caching import CachingCryptoMaterialsManager\n'), ((6203, 6316), 'awses_test_vectors.manifests.full_message.decrypt.MessageDecryptionTestResult.expect_error', 'MessageDecryptionTestResult.expect_error', (["('Incorrect encrypted data key provider info: ' + new_provider_info)"], {}), "(\n 'Incorrect encrypted data key provider info: ' + new_provider_info)\n", (6243, 6316), False, 'from awses_test_vectors.manifests.full_message.decrypt import DecryptionMethod, MessageDecryptionManifest, MessageDecryptionTestResult, MessageDecryptionTestScenario\n'), ((10425, 10525), 'awses_test_vectors.manifests.full_message.decrypt.MessageDecryptionTestResult.expect_error', 'MessageDecryptionTestResult.expect_error', (['"""Unsigned message using a data key with a public key"""'], {}), "(\n 'Unsigned message using a data key with a public key')\n", (10465, 10525), False, 'from awses_test_vectors.manifests.full_message.decrypt import DecryptionMethod, MessageDecryptionManifest, MessageDecryptionTestResult, MessageDecryptionTestScenario\n'), ((11555, 11605), 'aws_encryption_sdk.materials_managers.default.DefaultCryptoMaterialsManager', 'DefaultCryptoMaterialsManager', (['master_key_provider'], {}), '(master_key_provider)\n', (11584, 11605), False, 'from aws_encryption_sdk.materials_managers.default import DefaultCryptoMaterialsManager\n'), ((14530, 14621), 'awses_test_vectors.manifests.full_message.encrypt.MessageEncryptionTestScenario.from_scenario', 'MessageEncryptionTestScenario.from_scenario', (['encryption_scenario_spec', 'keys', 'plaintexts'], {}), '(encryption_scenario_spec, keys,\n plaintexts)\n', (14573, 14621), False, 'from awses_test_vectors.manifests.full_message.encrypt import MessageEncryptionTestScenario\n'), ((19188, 19209), 'json.load', 'json.load', (['input_file'], {}), '(input_file)\n', (19197, 19209), False, 'import json\n'), ((19218, 19352), 'awses_test_vectors.internal.util.validate_manifest_type', 'validate_manifest_type', ([], {'type_name': 'cls.type_name', 'manifest_version': "raw_manifest['manifest']", 'supported_versions': 'SUPPORTED_VERSIONS'}), "(type_name=cls.type_name, manifest_version=\n raw_manifest['manifest'], supported_versions=SUPPORTED_VERSIONS)\n", (19240, 19352), False, 'from awses_test_vectors.internal.util import dictionary_validator, file_reader, file_writer, iterable_validator, membership_validator, validate_manifest_type\n'), ((19459, 19482), 'awses_test_vectors.internal.util.file_reader', 'file_reader', (['parent_dir'], {}), '(parent_dir)\n', (19470, 19482), False, 'from awses_test_vectors.internal.util import dictionary_validator, file_reader, file_writer, iterable_validator, membership_validator, validate_manifest_type\n'), ((19584, 19634), 'awses_test_vectors.manifests.keys.KeysManifest.from_manifest_spec', 'KeysManifest.from_manifest_spec', (['raw_keys_manifest'], {}), '(raw_keys_manifest)\n', (19615, 19634), False, 'from awses_test_vectors.manifests.keys import KeysManifest\n'), ((20597, 20630), 'os.path.abspath', 'os.path.abspath', (['target_directory'], {}), '(target_directory)\n', (20612, 20630), False, 'import os\n'), ((20653, 20674), 'awses_test_vectors.internal.util.file_writer', 'file_writer', (['root_dir'], {}), '(root_dir)\n', (20664, 20674), False, 'from awses_test_vectors.internal.util import dictionary_validator, file_reader, file_writer, iterable_validator, membership_validator, validate_manifest_type\n'), ((21416, 21521), 'awses_test_vectors.manifests.full_message.decrypt.MessageDecryptionManifest', 'MessageDecryptionManifest', ([], {'keys_uri': '"""file://keys.json"""', 'keys': 'self.keys', 'test_scenarios': 'test_scenarios'}), "(keys_uri='file://keys.json', keys=self.keys,\n test_scenarios=test_scenarios)\n", (21441, 21521), False, 'from awses_test_vectors.manifests.full_message.decrypt import DecryptionMethod, MessageDecryptionManifest, MessageDecryptionTestResult, MessageDecryptionTestScenario\n'), ((3902, 4037), 'awses_test_vectors.manifests.full_message.decrypt.MessageDecryptionTestResult.expect_output', 'MessageDecryptionTestResult.expect_output', ([], {'plaintext_uri': 'plaintext_uri', 'plaintext': 'generation_scenario.encryption_scenario.plaintext'}), '(plaintext_uri=plaintext_uri,\n plaintext=generation_scenario.encryption_scenario.plaintext)\n', (3943, 4037), False, 'from awses_test_vectors.manifests.full_message.decrypt import DecryptionMethod, MessageDecryptionManifest, MessageDecryptionTestResult, MessageDecryptionTestScenario\n'), ((4390, 4432), 'awses_test_vectors.internal.util.iterable_validator', 'iterable_validator', (['list', 'six.string_types'], {}), '(list, six.string_types)\n', (4408, 4432), False, 'from awses_test_vectors.internal.util import dictionary_validator, file_reader, file_writer, iterable_validator, membership_validator, validate_manifest_type\n'), ((6765, 6816), 'attr.validators.instance_of', 'attr.validators.instance_of', (['CryptoMaterialsManager'], {}), '(CryptoMaterialsManager)\n', (6792, 6816), False, 'import attr\n'), ((6860, 6905), 'attr.validators.instance_of', 'attr.validators.instance_of', (['six.string_types'], {}), '(six.string_types)\n', (6887, 6905), False, 'import attr\n'), ((11277, 11328), 'attr.validators.instance_of', 'attr.validators.instance_of', (['CryptoMaterialsManager'], {}), '(CryptoMaterialsManager)\n', (11304, 11328), False, 'import attr\n'), ((11995, 12008), 'copy.copy', 'copy', (['request'], {}), '(request)\n', (11999, 12008), False, 'from copy import copy\n'), ((13438, 13496), 'attr.validators.instance_of', 'attr.validators.instance_of', (['MessageEncryptionTestScenario'], {}), '(MessageEncryptionTestScenario)\n', (13465, 13496), False, 'import attr\n'), ((13778, 13817), 'awses_test_vectors.internal.util.iterable_validator', 'iterable_validator', (['list', 'MasterKeySpec'], {}), '(list, MasterKeySpec)\n', (13796, 13817), False, 'from awses_test_vectors.internal.util import dictionary_validator, file_reader, file_writer, iterable_validator, membership_validator, validate_manifest_type\n'), ((13877, 13906), 'attr.validators.is_callable', 'attr.validators.is_callable', ([], {}), '()\n', (13904, 13906), False, 'import attr\n'), ((14833, 14873), 'awses_test_vectors.manifests.full_message.decrypt.DecryptionMethod', 'DecryptionMethod', (['decryption_method_spec'], {}), '(decryption_method_spec)\n', (14849, 14873), False, 'from awses_test_vectors.manifests.full_message.decrypt import DecryptionMethod, MessageDecryptionManifest, MessageDecryptionTestResult, MessageDecryptionTestScenario\n'), ((15515, 15578), 'awses_test_vectors.manifests.full_message.decrypt.MessageDecryptionTestResult.from_result_spec', 'MessageDecryptionTestResult.from_result_spec', (['result_spec', 'None'], {}), '(result_spec, None)\n', (15559, 15578), False, 'from awses_test_vectors.manifests.full_message.decrypt import DecryptionMethod, MessageDecryptionManifest, MessageDecryptionTestResult, MessageDecryptionTestScenario\n'), ((16902, 16914), 'uuid.uuid4', 'uuid.uuid4', ([], {}), '()\n', (16912, 16914), False, 'import uuid\n'), ((17058, 17352), 'awses_test_vectors.manifests.full_message.decrypt.MessageDecryptionTestScenario', 'MessageDecryptionTestScenario', ([], {'ciphertext_uri': 'ciphertext_uri', 'ciphertext': 'ciphertext_to_decrypt', 'master_key_specs': 'self.decryption_master_key_specs', 'master_key_provider_fn': 'self.decryption_master_key_provider_fn', 'decryption_method': 'self.decryption_method', 'result': 'expected_result'}), '(ciphertext_uri=ciphertext_uri, ciphertext=\n ciphertext_to_decrypt, master_key_specs=self.\n decryption_master_key_specs, master_key_provider_fn=self.\n decryption_master_key_provider_fn, decryption_method=self.\n decryption_method, result=expected_result)\n', (17087, 17352), False, 'from awses_test_vectors.manifests.full_message.decrypt import DecryptionMethod, MessageDecryptionManifest, MessageDecryptionTestResult, MessageDecryptionTestScenario\n'), ((18019, 18059), 'awses_test_vectors.internal.util.membership_validator', 'membership_validator', (['SUPPORTED_VERSIONS'], {}), '(SUPPORTED_VERSIONS)\n', (18039, 18059), False, 'from awses_test_vectors.internal.util import dictionary_validator, file_reader, file_writer, iterable_validator, membership_validator, validate_manifest_type\n'), ((18090, 18131), 'attr.validators.instance_of', 'attr.validators.instance_of', (['KeysManifest'], {}), '(KeysManifest)\n', (18117, 18131), False, 'import attr\n'), ((18168, 18223), 'awses_test_vectors.internal.util.dictionary_validator', 'dictionary_validator', (['six.string_types', 'six.binary_type'], {}), '(six.string_types, six.binary_type)\n', (18188, 18223), False, 'from awses_test_vectors.internal.util import dictionary_validator, file_reader, file_writer, iterable_validator, membership_validator, validate_manifest_type\n'), ((18255, 18333), 'awses_test_vectors.internal.util.dictionary_validator', 'dictionary_validator', (['six.string_types', 'MessageDecryptionTestScenarioGenerator'], {}), '(six.string_types, MessageDecryptionTestScenarioGenerator)\n', (18275, 18333), False, 'from awses_test_vectors.internal.util import dictionary_validator, file_reader, file_writer, iterable_validator, membership_validator, validate_manifest_type\n'), ((18752, 18768), 'os.urandom', 'os.urandom', (['size'], {}), '(size)\n', (18762, 18768), False, 'import os\n'), ((19408, 19440), 'os.path.dirname', 'os.path.dirname', (['input_file.name'], {}), '(input_file.name)\n', (19423, 19440), False, 'import os\n'), ((20823, 20859), 'os.path.join', 'os.path.join', (['root_dir', '"""plaintexts"""'], {}), "(root_dir, 'plaintexts')\n", (20835, 20859), False, 'import os\n'), ((21016, 21053), 'os.path.join', 'os.path.join', (['root_dir', '"""ciphertexts"""'], {}), "(root_dir, 'ciphertexts')\n", (21028, 21053), False, 'import os\n'), ((13564, 13608), 'attr.validators.instance_of', 'attr.validators.instance_of', (['TamperingMethod'], {}), '(TamperingMethod)\n', (13591, 13608), False, 'import attr\n'), ((13678, 13723), 'attr.validators.instance_of', 'attr.validators.instance_of', (['DecryptionMethod'], {}), '(DecryptionMethod)\n', (13705, 13723), False, 'import attr\n'), ((13964, 14020), 'attr.validators.instance_of', 'attr.validators.instance_of', (['MessageDecryptionTestResult'], {}), '(MessageDecryptionTestResult)\n', (13991, 14020), False, 'import attr\n'), ((15019, 15052), 'awses_test_vectors.manifests.master_key.MasterKeySpec.from_scenario', 'MasterKeySpec.from_scenario', (['spec'], {}), '(spec)\n', (15046, 15052), False, 'from awses_test_vectors.manifests.master_key import MasterKeySpec, master_key_provider_from_master_key_specs\n'), ((15191, 15267), 'awses_test_vectors.manifests.master_key.master_key_provider_from_master_key_specs', 'master_key_provider_from_master_key_specs', (['keys', 'decryption_master_key_specs'], {}), '(keys, decryption_master_key_specs)\n', (15232, 15267), False, 'from awses_test_vectors.manifests.master_key import MasterKeySpec, master_key_provider_from_master_key_specs\n'), ((20709, 20764), 'json.dumps', 'json.dumps', (['self.keys.manifest_spec'], {'indent': 'json_indent'}), '(self.keys.manifest_spec, indent=json_indent)\n', (20719, 20764), False, 'import json\n'), ((21578, 21640), 'json.dumps', 'json.dumps', (['decrypt_manifest.manifest_spec'], {'indent': 'json_indent'}), '(decrypt_manifest.manifest_spec, indent=json_indent)\n', (21588, 21640), False, 'import json\n')] |
import torch
import torchtestcase
from neural_spline_flows.nde.transforms import base
class TransformTest(torchtestcase.TorchTestCase):
"""Base test for all transforms."""
def assert_tensor_is_good(self, tensor, shape=None):
self.assertIsInstance(tensor, torch.Tensor)
self.assertFalse(torch.isnan(tensor).any())
self.assertFalse(torch.isinf(tensor).any())
if shape is not None:
self.assertEqual(tensor.shape, torch.Size(shape))
def assert_forward_inverse_are_consistent(self, transform, inputs):
inverse = base.InverseTransform(transform)
identity = base.CompositeTransform([inverse, transform])
outputs, logabsdet = identity(inputs)
self.assert_tensor_is_good(outputs, shape=inputs.shape)
self.assert_tensor_is_good(logabsdet, shape=inputs.shape[:1])
self.assertEqual(outputs, inputs)
self.assertEqual(logabsdet, torch.zeros(inputs.shape[:1]))
def assertNotEqual(self, first, second, msg=None):
if ((self._eps and (first - second).abs().max().item() < self._eps) or
(not self._eps and torch.equal(first, second))):
self._fail_with_message(msg, "The tensors are _not_ different!")
| [
"torch.isnan",
"neural_spline_flows.nde.transforms.base.InverseTransform",
"neural_spline_flows.nde.transforms.base.CompositeTransform",
"torch.equal",
"torch.isinf",
"torch.Size",
"torch.zeros"
] | [((576, 608), 'neural_spline_flows.nde.transforms.base.InverseTransform', 'base.InverseTransform', (['transform'], {}), '(transform)\n', (597, 608), False, 'from neural_spline_flows.nde.transforms import base\n'), ((628, 673), 'neural_spline_flows.nde.transforms.base.CompositeTransform', 'base.CompositeTransform', (['[inverse, transform]'], {}), '([inverse, transform])\n', (651, 673), False, 'from neural_spline_flows.nde.transforms import base\n'), ((933, 962), 'torch.zeros', 'torch.zeros', (['inputs.shape[:1]'], {}), '(inputs.shape[:1])\n', (944, 962), False, 'import torch\n'), ((466, 483), 'torch.Size', 'torch.Size', (['shape'], {}), '(shape)\n', (476, 483), False, 'import torch\n'), ((1134, 1160), 'torch.equal', 'torch.equal', (['first', 'second'], {}), '(first, second)\n', (1145, 1160), False, 'import torch\n'), ((314, 333), 'torch.isnan', 'torch.isnan', (['tensor'], {}), '(tensor)\n', (325, 333), False, 'import torch\n'), ((366, 385), 'torch.isinf', 'torch.isinf', (['tensor'], {}), '(tensor)\n', (377, 385), False, 'import torch\n')] |
import os
database_url = os.environ.get('DATABASE_URL')
| [
"os.environ.get"
] | [((26, 56), 'os.environ.get', 'os.environ.get', (['"""DATABASE_URL"""'], {}), "('DATABASE_URL')\n", (40, 56), False, 'import os\n')] |
from datetime import date
from six import BytesIO, binary_type, u
from six.moves.urllib.parse import parse_qsl, urlencode
from unittest2 import TestCase
import mock
from authorizesauce.apis.transaction import PROD_URL, TEST_URL, TransactionAPI
from authorizesauce.data import Address, CreditCard
from authorizesauce.exceptions import AuthorizeConnectionError, \
AuthorizeResponseError
class MockResponse(BytesIO):
class Headers(dict):
def getparam(self, *args, **kwargs):
"""Python 2 version"""
return None
def get_content_charset(self, failobj=None, *args, **kwargs):
"""Python 3 version"""
return failobj
def __init__(self, *args, **kwargs):
BytesIO.__init__(self, *args, **kwargs)
self.headers = self.Headers()
SUCCESS = MockResponse(
b'1;1;1;This transaction has been approved.;IKRAGJ;Y;2171062816;;;20.00;CC'
b';auth_only;;Jeffrey;Schenck;;<NAME>;Venice;CA;90291;USA;;;;;;;;;;;;'
b';;;;;375DD9293D7605E20DF0B437EE2A7B92;P;2;;;;;;;;;;;XXXX1111;Visa;;;;;;;'
b';;;;;;;;;;Y')
PARSED_SUCCESS = {
'cvv_response': 'P',
'authorization_code': 'IKRAGJ',
'response_code': '1',
'amount': '20.00',
'transaction_type': 'auth_only',
'avs_response': 'Y',
'response_reason_code': '1',
'response_reason_text': 'This transaction has been approved.',
'transaction_id': '2171062816',
}
ERROR = MockResponse(
b'2;1;2;This transaction has been declined.;000000;N;2171062816;;;20.00;CC'
b';auth_only;;Jeffrey;Schenck;;45 Rose Ave;Venice;CA;90291;USA;;;;;;;;;;;;'
b';;;;;375DD9293D7605E20DF0B437EE2A7B92;N;1;;;;;;;;;;;XXXX1111;Visa;;;;;;;'
b';;;;;;;;;;Y')
PARSED_ERROR = {
'cvv_response': 'N',
'authorization_code': '000000',
'response_code': '2',
'amount': '20.00',
'transaction_type': 'auth_only',
'avs_response': 'N',
'response_reason_code': '2',
'response_reason_text': 'This transaction has been declined.',
'transaction_id': '2171062816',
}
def _unicode_str(s):
if isinstance(s, binary_type):
return s.decode('unicode_escape')
return s
def _are_params_eq(params1, params2):
_params1, _params2 = map(_unicode_str, (params1, params2))
return frozenset(parse_qsl(_params1)) == frozenset(parse_qsl(_params2))
class TransactionAPITests(TestCase):
def setUp(self):
self.api = TransactionAPI('123', '456')
self.success = lambda *args, **kwargs: SUCCESS.seek(0) or SUCCESS
self.error = lambda *args, **kwargs: ERROR.seek(0) or ERROR
self.year = date.today().year + 10
self.credit_card = CreditCard('4111111111111111', self.year, 1, '911')
self.address = Address('45 Rose Ave', 'Venice', 'CA', '90291')
def test_basic_api(self):
api = TransactionAPI('123', '456')
self.assertEqual(api.url, TEST_URL)
api = TransactionAPI('123', '456', debug=False)
self.assertEqual(api.url, PROD_URL)
@mock.patch('authorizesauce.apis.transaction.urlopen')
def test_make_call(self, urlopen):
urlopen.side_effect = self.success
params = {'a': '1', 'b': '2'}
result = self.api._make_call(params)
self.assertEqual(urlopen.call_args[0][0], TEST_URL)
self.assertTrue(_are_params_eq(
urlopen.call_args[1]['data'], urlencode(params)
))
self.assertEqual(result, PARSED_SUCCESS)
@mock.patch('authorizesauce.apis.transaction.urlopen')
def test_make_call_with_unicode(self, urlopen):
urlopen.side_effect = self.success
result = self.api._make_call({u('\xe3'): '1', 'b': u('\xe3')})
self.assertEqual(urlopen.call_args[0][0], TEST_URL)
self.assertTrue(_are_params_eq(
urlopen.call_args[1]['data'], 'b=%C3%A3&%C3%A3=1'
))
self.assertEqual(result, PARSED_SUCCESS)
@mock.patch('authorizesauce.apis.transaction.urlopen')
def test_make_call_connection_error(self, urlopen):
urlopen.side_effect = IOError('Borked')
self.assertRaises(AuthorizeConnectionError, self.api._make_call,
{'a': '1', 'b': '2'})
@mock.patch('authorizesauce.apis.transaction.urlopen')
def test_make_call_response_error(self, urlopen):
urlopen.side_effect = self.error
try:
self.api._make_call({'a': '1', 'b': '2'})
except AuthorizeResponseError as e:
self.assertTrue(str(e).startswith(
'This transaction has been declined.'
))
self.assertEqual(e.full_response, PARSED_ERROR)
def test_add_params(self):
self.assertEqual(self.api._add_params({}), {})
params = self.api._add_params({}, credit_card=self.credit_card)
self.assertEqual(params, {
'x_card_num': '4111111111111111',
'x_exp_date': '01-{0}'.format(self.year),
'x_card_code': '911',
})
params = self.api._add_params({}, address=self.address)
self.assertEqual(params, {
'x_address': '45 Rose Ave',
'x_city': 'Venice',
'x_state': 'CA',
'x_zip': '90291',
'x_country': 'US',
})
params = self.api._add_params(
{}, credit_card=self.credit_card, address=self.address
)
self.assertEqual(params, {
'x_card_num': '4111111111111111',
'x_exp_date': '01-{0}'.format(self.year),
'x_card_code': '911',
'x_address': '45 Rose Ave',
'x_city': 'Venice',
'x_state': 'CA',
'x_zip': '90291',
'x_country': 'US',
})
@mock.patch('authorizesauce.apis.transaction.urlopen')
def test_auth(self, urlopen):
urlopen.side_effect = self.success
result = self.api.auth(20, self.credit_card, self.address)
self.assertEqual(urlopen.call_args[0][0], TEST_URL)
self.assertTrue(urlopen.call_args[1]['data'], (
'x_login=123&x_zip=90291&x_card_num=4111111111111111&'
'x_amount=20.00&x_tran_key=456&x_city=Venice&x_country=US&'
'x_version=3.1&x_state=CA&x_delim_char=%3B&'
'x_address=45+Rose+Ave&x_exp_date=01-{0}&x_test_request=FALSE'
'&x_card_code=911&x_type=AUTH_ONLY&x_delim_data=TRUE'.format(
str(self.year)
)
))
self.assertEqual(result, PARSED_SUCCESS)
@mock.patch('authorizesauce.apis.transaction.urlopen')
def test_capture(self, urlopen):
urlopen.side_effect = self.success
result = self.api.capture(20, self.credit_card, self.address)
self.assertEqual(urlopen.call_args[0][0], TEST_URL)
self.assertTrue(urlopen.call_args[1]['data'], (
'x_login=123&x_zip=90291&x_card_num=4111111111111111&'
'x_amount=20.00&x_tran_key=456&x_city=Venice&x_country=US&'
'x_version=3.1&x_state=CA&x_delim_char=%3B&'
'x_address=45+Rose+Ave&x_exp_date=01-{0}&x_test_request=FALSE'
'&x_card_code=911&x_type=AUTH_ONLY&x_delim_data=TRUE'.format(
str(self.year)
)
))
self.assertEqual(result, PARSED_SUCCESS)
@mock.patch('authorizesauce.apis.transaction.urlopen')
def test_settle(self, urlopen):
urlopen.side_effect = self.success
# Test without specified amount
result = self.api.settle('123456')
self.assertEqual(urlopen.call_args[0][0], TEST_URL)
self.assertTrue(urlopen.call_args[1]['data'], (
'https://test.authorize.net/gateway/transact.dll?x_login=123'
'&x_trans_id=123456&x_version=3.1&x_delim_char=%3B'
'&x_type=PRIOR_AUTH_CAPTURE&x_delim_data=TRUE&x_tran_key=456'
'&x_test_request=FALSE'
))
self.assertEqual(result, PARSED_SUCCESS)
# Test with specified amount
result = self.api.settle('123456', amount=10)
self.assertEqual(urlopen.call_args[0][0], TEST_URL)
self.assertTrue(urlopen.call_args[1]['data'], (
'https://test.authorize.net/gateway/transact.dll?x_login=123'
'&x_trans_id=123456&x_version=3.1&x_delim_char=%3B'
'&x_type=PRIOR_AUTH_CAPTURE&x_amount=10.00&x_delim_data=TRUE'
'&x_tran_key=456&x_test_request=FALSE'
))
self.assertEqual(result, PARSED_SUCCESS)
@mock.patch('authorizesauce.apis.transaction.urlopen')
def test_credit(self, urlopen):
urlopen.side_effect = self.success
# Test with transaction_id, amount
result = self.api.credit('1111', '123456', 10)
self.assertEqual(urlopen.call_args[0][0], TEST_URL)
self.assertTrue(urlopen.call_args[1]['data'], (
'https://test.authorize.net/gateway/transact.dll?x_login=123'
'&x_trans_id=123456&x_version=3.1&x_amount=10.00'
'&x_delim_char=%3B&x_type=CREDIT&x_card_num=1111'
'&x_delim_data=TRUE&x_tran_key=456&x_test_request=FALSE'
))
self.assertEqual(result, PARSED_SUCCESS)
@mock.patch('authorizesauce.apis.transaction.urlopen')
def test_void(self, urlopen):
urlopen.side_effect = self.success
result = self.api.void('123456')
self.assertEqual(urlopen.call_args[0][0], TEST_URL)
self.assertTrue(urlopen.call_args[1]['data'], (
'https://test.authorize.net/gateway/transact.dll?x_login=123'
'&x_trans_id=123456&x_version=3.1&x_delim_char=%3B&x_type=VOID'
'&x_delim_data=TRUE&x_tran_key=456&x_test_request=FALSE'
))
self.assertEqual(result, PARSED_SUCCESS)
| [
"mock.patch",
"authorizesauce.data.Address",
"six.moves.urllib.parse.parse_qsl",
"six.moves.urllib.parse.urlencode",
"authorizesauce.apis.transaction.TransactionAPI",
"datetime.date.today",
"six.BytesIO.__init__",
"authorizesauce.data.CreditCard",
"six.u"
] | [((2990, 3043), 'mock.patch', 'mock.patch', (['"""authorizesauce.apis.transaction.urlopen"""'], {}), "('authorizesauce.apis.transaction.urlopen')\n", (3000, 3043), False, 'import mock\n'), ((3435, 3488), 'mock.patch', 'mock.patch', (['"""authorizesauce.apis.transaction.urlopen"""'], {}), "('authorizesauce.apis.transaction.urlopen')\n", (3445, 3488), False, 'import mock\n'), ((3883, 3936), 'mock.patch', 'mock.patch', (['"""authorizesauce.apis.transaction.urlopen"""'], {}), "('authorizesauce.apis.transaction.urlopen')\n", (3893, 3936), False, 'import mock\n'), ((4168, 4221), 'mock.patch', 'mock.patch', (['"""authorizesauce.apis.transaction.urlopen"""'], {}), "('authorizesauce.apis.transaction.urlopen')\n", (4178, 4221), False, 'import mock\n'), ((5679, 5732), 'mock.patch', 'mock.patch', (['"""authorizesauce.apis.transaction.urlopen"""'], {}), "('authorizesauce.apis.transaction.urlopen')\n", (5689, 5732), False, 'import mock\n'), ((6449, 6502), 'mock.patch', 'mock.patch', (['"""authorizesauce.apis.transaction.urlopen"""'], {}), "('authorizesauce.apis.transaction.urlopen')\n", (6459, 6502), False, 'import mock\n'), ((7225, 7278), 'mock.patch', 'mock.patch', (['"""authorizesauce.apis.transaction.urlopen"""'], {}), "('authorizesauce.apis.transaction.urlopen')\n", (7235, 7278), False, 'import mock\n'), ((8403, 8456), 'mock.patch', 'mock.patch', (['"""authorizesauce.apis.transaction.urlopen"""'], {}), "('authorizesauce.apis.transaction.urlopen')\n", (8413, 8456), False, 'import mock\n'), ((9084, 9137), 'mock.patch', 'mock.patch', (['"""authorizesauce.apis.transaction.urlopen"""'], {}), "('authorizesauce.apis.transaction.urlopen')\n", (9094, 9137), False, 'import mock\n'), ((735, 774), 'six.BytesIO.__init__', 'BytesIO.__init__', (['self', '*args'], {}), '(self, *args, **kwargs)\n', (751, 774), False, 'from six import BytesIO, binary_type, u\n'), ((2402, 2430), 'authorizesauce.apis.transaction.TransactionAPI', 'TransactionAPI', (['"""123"""', '"""456"""'], {}), "('123', '456')\n", (2416, 2430), False, 'from authorizesauce.apis.transaction import PROD_URL, TEST_URL, TransactionAPI\n'), ((2643, 2694), 'authorizesauce.data.CreditCard', 'CreditCard', (['"""4111111111111111"""', 'self.year', '(1)', '"""911"""'], {}), "('4111111111111111', self.year, 1, '911')\n", (2653, 2694), False, 'from authorizesauce.data import Address, CreditCard\n'), ((2718, 2765), 'authorizesauce.data.Address', 'Address', (['"""45 Rose Ave"""', '"""Venice"""', '"""CA"""', '"""90291"""'], {}), "('45 Rose Ave', 'Venice', 'CA', '90291')\n", (2725, 2765), False, 'from authorizesauce.data import Address, CreditCard\n'), ((2811, 2839), 'authorizesauce.apis.transaction.TransactionAPI', 'TransactionAPI', (['"""123"""', '"""456"""'], {}), "('123', '456')\n", (2825, 2839), False, 'from authorizesauce.apis.transaction import PROD_URL, TEST_URL, TransactionAPI\n'), ((2898, 2939), 'authorizesauce.apis.transaction.TransactionAPI', 'TransactionAPI', (['"""123"""', '"""456"""'], {'debug': '(False)'}), "('123', '456', debug=False)\n", (2912, 2939), False, 'from authorizesauce.apis.transaction import PROD_URL, TEST_URL, TransactionAPI\n'), ((2268, 2287), 'six.moves.urllib.parse.parse_qsl', 'parse_qsl', (['_params1'], {}), '(_params1)\n', (2277, 2287), False, 'from six.moves.urllib.parse import parse_qsl, urlencode\n'), ((2302, 2321), 'six.moves.urllib.parse.parse_qsl', 'parse_qsl', (['_params2'], {}), '(_params2)\n', (2311, 2321), False, 'from six.moves.urllib.parse import parse_qsl, urlencode\n'), ((2593, 2605), 'datetime.date.today', 'date.today', ([], {}), '()\n', (2603, 2605), False, 'from datetime import date\n'), ((3351, 3368), 'six.moves.urllib.parse.urlencode', 'urlencode', (['params'], {}), '(params)\n', (3360, 3368), False, 'from six.moves.urllib.parse import parse_qsl, urlencode\n'), ((3622, 3628), 'six.u', 'u', (['"""ã"""'], {}), "('ã')\n", (3623, 3628), False, 'from six import BytesIO, binary_type, u\n'), ((3643, 3649), 'six.u', 'u', (['"""ã"""'], {}), "('ã')\n", (3644, 3649), False, 'from six import BytesIO, binary_type, u\n')] |
import torch
from ....data.utils.boxes import centroids2corners, iou
def matching_strategy(targets, dboxes, **kwargs):
"""
:param targets: Tensor, shape is (batch*object num(batch), 1+4+class_labels)
:param dboxes: shape is (default boxes num, 4)
IMPORTANT: Note that means (cx, cy, w, h)
:param kwargs:
threshold: (Optional) float, threshold for returned indicator
batch_num: (Required) int, batch size
:return:
pos_indicator: Bool Tensor, shape = (batch, default box num). this represents whether each default box is object or background.
matched_targets: Tensor, shape = (batch, default box num, 4+class_num) including background
"""
threshold = kwargs.pop('threshold', 0.5)
batch_num = kwargs.pop('batch_num')
device = dboxes.device
dboxes_num = dboxes.shape[0]
# minus 'box number per image' and 'localization=(cx, cy, w, h)'
class_num = targets[0].shape[1] - 4
# convert centered coordinated to minmax coordinates
dboxes_mm = centroids2corners(dboxes)
# create returned empty Tensor
pos_indicator, matched_targets = torch.empty((batch_num, dboxes_num), device=device, dtype=torch.bool), torch.empty((batch_num, dboxes_num, 4 + class_num), device=device)
# matching for each batch
index = 0
for b, target in enumerate(targets):
targets_loc, targets_conf = target[:, :4], target[:, 4:]
# overlaps' shape = (object num, default box num)
overlaps = iou(centroids2corners(targets_loc), dboxes_mm.clone())
"""
best_overlap_per_object, best_dbox_ind_per_object = overlaps.max(dim=1)
best_overlap_per_dbox, best_object_ind_per_dbox = overlaps.max(dim=0)
for object_ind, dbox_ind in enumerate(best_dbox_ind_per_object):
best_object_ind_per_dbox[dbox_ind] = object_ind
best_overlap_per_dbox.index_fill_(0, best_dbox_ind_per_object, 999)
pos_ind = best_overlap_per_dbox > threshold
pos_indicator[b] = pos_ind
gt_loc[b], gt_conf[b] = targets[best_object_ind_per_dbox], targets_conf[best_object_ind_per_dbox]
neg_ind = torch.logical_not(pos_ind)
gt_conf[b, neg_ind] = 0
gt_conf[b, neg_ind, -1] = 1
"""
# get maximum overlap value for each default box
# shape = (batch num, dboxes num)
overlaps_per_dbox, object_indices = overlaps.max(dim=0)
#object_indices = object_indices.long() # for fancy indexing
# get maximum overlap values for each object
# shape = (batch num, object num)
overlaps_per_object, dbox_indices = overlaps.max(dim=1)
for obj_ind, dbox_ind in enumerate(dbox_indices):
object_indices[dbox_ind] = obj_ind
overlaps_per_dbox.index_fill_(0, dbox_indices, threshold + 1)# ensure N!=0
pos_ind = overlaps_per_dbox > threshold
# assign targets
matched_targets[b, :, :4], matched_targets[b, :, 4:] = targets_loc[object_indices], targets_conf[object_indices]
pos_indicator[b] = pos_ind
# set background flag
neg_ind = torch.logical_not(pos_ind)
matched_targets[b, neg_ind, 4:] = 0
matched_targets[b, neg_ind, -1] = 1
return pos_indicator, matched_targets
def matching_strategy_quads(targets, dboxes, **kwargs):
"""
:param targets: Tensor, shape is (batch*object num(batch), 4=(cx,cy,w,h)+8=(x1,y1,x2,y2,...)+class_labels)
:param dboxes: shape is (default boxes num, 4)
IMPORTANT: Note that means (cx, cy, w, h)
:param kwargs:
threshold: (Optional) float, threshold for returned indicator
batch_num: (Required) int, batch size
:return:
pos_indicator: Bool Tensor, shape = (batch, default box num). this represents whether each default box is object or background.
matched_targets: Tensor, shape = (batch, default box num, 4+class_num) including background
"""
threshold = kwargs.pop('threshold', 0.5)
batch_num = kwargs.pop('batch_num')
device = dboxes.device
dboxes_num = dboxes.shape[0]
# minus 'box number per image' and 'localization=(cx, cy, w, h)'
class_num = targets[0].shape[1] - 4 - 8
# convert centered coordinated to minmax coordinates
dboxes_mm = centroids2corners(dboxes)
# create returned empty Tensor
pos_indicator, matched_targets = torch.empty((batch_num, dboxes_num), device=device, dtype=torch.bool), torch.empty(
(batch_num, dboxes_num, 4 + 8 + class_num), device=device)
# matching for each batch
index = 0
for b, target in enumerate(targets):
targets_loc, targets_quad, targets_conf = target[:, :4], target[:, 4:12], target[:, 12:]
# overlaps' shape = (object num, default box num)
overlaps = iou(centroids2corners(targets_loc), dboxes_mm.clone())
"""
best_overlap_per_object, best_dbox_ind_per_object = overlaps.max(dim=1)
best_overlap_per_dbox, best_object_ind_per_dbox = overlaps.max(dim=0)
for object_ind, dbox_ind in enumerate(best_dbox_ind_per_object):
best_object_ind_per_dbox[dbox_ind] = object_ind
best_overlap_per_dbox.index_fill_(0, best_dbox_ind_per_object, 999)
pos_ind = best_overlap_per_dbox > threshold
pos_indicator[b] = pos_ind
gt_loc[b], gt_conf[b] = targets[best_object_ind_per_dbox], targets_conf[best_object_ind_per_dbox]
neg_ind = torch.logical_not(pos_ind)
gt_conf[b, neg_ind] = 0
gt_conf[b, neg_ind, -1] = 1
"""
# get maximum overlap value for each default box
# shape = (batch num, dboxes num)
overlaps_per_dbox, object_indices = overlaps.max(dim=0)
# object_indices = object_indices.long() # for fancy indexing
# get maximum overlap values for each object
# shape = (batch num, object num)
overlaps_per_object, dbox_indices = overlaps.max(dim=1)
for obj_ind, dbox_ind in enumerate(dbox_indices):
object_indices[dbox_ind] = obj_ind
overlaps_per_dbox.index_fill_(0, dbox_indices, threshold + 1) # ensure N!=0
pos_ind = overlaps_per_dbox > threshold
# assign targets
matched_targets[b, :, :4], matched_targets[b, :, 4:12], matched_targets[b, :, 12:] = \
targets_loc[object_indices], targets_quad[object_indices], targets_conf[object_indices]
pos_indicator[b] = pos_ind
# set background flag
neg_ind = torch.logical_not(pos_ind)
matched_targets[b, neg_ind, 12:] = 0
matched_targets[b, neg_ind, -1] = 1
return pos_indicator, matched_targets
| [
"torch.logical_not",
"torch.empty"
] | [((1129, 1198), 'torch.empty', 'torch.empty', (['(batch_num, dboxes_num)'], {'device': 'device', 'dtype': 'torch.bool'}), '((batch_num, dboxes_num), device=device, dtype=torch.bool)\n', (1140, 1198), False, 'import torch\n'), ((1200, 1266), 'torch.empty', 'torch.empty', (['(batch_num, dboxes_num, 4 + class_num)'], {'device': 'device'}), '((batch_num, dboxes_num, 4 + class_num), device=device)\n', (1211, 1266), False, 'import torch\n'), ((3110, 3136), 'torch.logical_not', 'torch.logical_not', (['pos_ind'], {}), '(pos_ind)\n', (3127, 3136), False, 'import torch\n'), ((4369, 4438), 'torch.empty', 'torch.empty', (['(batch_num, dboxes_num)'], {'device': 'device', 'dtype': 'torch.bool'}), '((batch_num, dboxes_num), device=device, dtype=torch.bool)\n', (4380, 4438), False, 'import torch\n'), ((4440, 4510), 'torch.empty', 'torch.empty', (['(batch_num, dboxes_num, 4 + 8 + class_num)'], {'device': 'device'}), '((batch_num, dboxes_num, 4 + 8 + class_num), device=device)\n', (4451, 4510), False, 'import torch\n'), ((6472, 6498), 'torch.logical_not', 'torch.logical_not', (['pos_ind'], {}), '(pos_ind)\n', (6489, 6498), False, 'import torch\n')] |
"""Routines for numerical differentiation."""
from __future__ import division
import numpy as np
from numpy.linalg import norm
from scipy.sparse.linalg import LinearOperator
from ..sparse import issparse, csc_matrix, csr_matrix, coo_matrix, find
from ._group_columns import group_dense, group_sparse
EPS = np.finfo(np.float64).eps
def _adjust_scheme_to_bounds(x0, h, num_steps, scheme, lb, ub):
"""Adjust final difference scheme to the presence of bounds.
Parameters
----------
x0 : ndarray, shape (n,)
Point at which we wish to estimate derivative.
h : ndarray, shape (n,)
Desired finite difference steps.
num_steps : int
Number of `h` steps in one direction required to implement finite
difference scheme. For example, 2 means that we need to evaluate
f(x0 + 2 * h) or f(x0 - 2 * h)
scheme : {'1-sided', '2-sided'}
Whether steps in one or both directions are required. In other
words '1-sided' applies to forward and backward schemes, '2-sided'
applies to center schemes.
lb : ndarray, shape (n,)
Lower bounds on independent variables.
ub : ndarray, shape (n,)
Upper bounds on independent variables.
Returns
-------
h_adjusted : ndarray, shape (n,)
Adjusted step sizes. Step size decreases only if a sign flip or
switching to one-sided scheme doesn't allow to take a full step.
use_one_sided : ndarray of bool, shape (n,)
Whether to switch to one-sided scheme. Informative only for
``scheme='2-sided'``.
"""
if scheme == '1-sided':
use_one_sided = np.ones_like(h, dtype=bool)
elif scheme == '2-sided':
h = np.abs(h)
use_one_sided = np.zeros_like(h, dtype=bool)
else:
raise ValueError("`scheme` must be '1-sided' or '2-sided'.")
if np.all((lb == -np.inf) & (ub == np.inf)):
return h, use_one_sided
h_total = h * num_steps
h_adjusted = h.copy()
lower_dist = x0 - lb
upper_dist = ub - x0
if scheme == '1-sided':
x = x0 + h_total
violated = (x < lb) | (x > ub)
fitting = np.abs(h_total) <= np.maximum(lower_dist, upper_dist)
h_adjusted[violated & fitting] *= -1
forward = (upper_dist >= lower_dist) & ~fitting
h_adjusted[forward] = upper_dist[forward] / num_steps
backward = (upper_dist < lower_dist) & ~fitting
h_adjusted[backward] = -lower_dist[backward] / num_steps
elif scheme == '2-sided':
central = (lower_dist >= h_total) & (upper_dist >= h_total)
forward = (upper_dist >= lower_dist) & ~central
h_adjusted[forward] = np.minimum(
h[forward], 0.5 * upper_dist[forward] / num_steps)
use_one_sided[forward] = True
backward = (upper_dist < lower_dist) & ~central
h_adjusted[backward] = -np.minimum(
h[backward], 0.5 * lower_dist[backward] / num_steps)
use_one_sided[backward] = True
min_dist = np.minimum(upper_dist, lower_dist) / num_steps
adjusted_central = (~central & (np.abs(h_adjusted) <= min_dist))
h_adjusted[adjusted_central] = min_dist[adjusted_central]
use_one_sided[adjusted_central] = False
return h_adjusted, use_one_sided
relative_step = {"2-point": EPS**0.5,
"3-point": EPS**(1/3),
"cs": EPS**0.5}
def _compute_absolute_step(rel_step, x0, method):
if rel_step is None:
rel_step = relative_step[method]
sign_x0 = (x0 >= 0).astype(float) * 2 - 1
return rel_step * sign_x0 * np.maximum(1.0, np.abs(x0))
def _prepare_bounds(bounds, x0):
lb, ub = [np.asarray(b, dtype=float) for b in bounds]
if lb.ndim == 0:
lb = np.resize(lb, x0.shape)
if ub.ndim == 0:
ub = np.resize(ub, x0.shape)
return lb, ub
def group_columns(A, order=0):
"""Group columns of a 2-D matrix for sparse finite differencing [1]_.
Two columns are in the same group if in each row at least one of them
has zero. A greedy sequential algorithm is used to construct groups.
Parameters
----------
A : array_like or sparse matrix, shape (m, n)
Matrix of which to group columns.
order : int, iterable of int with shape (n,) or None
Permutation array which defines the order of columns enumeration.
If int or None, a random permutation is used with `order` used as
a random seed. Default is 0, that is use a random permutation but
guarantee repeatability.
Returns
-------
groups : ndarray of int, shape (n,)
Contains values from 0 to n_groups-1, where n_groups is the number
of found groups. Each value ``groups[i]`` is an index of a group to
which ith column assigned. The procedure was helpful only if
n_groups is significantly less than n.
References
----------
.. [1] <NAME>, <NAME>, and <NAME>, "On the estimation of
sparse Jacobian matrices", Journal of the Institute of Mathematics
and its Applications, 13 (1974), pp. 117-120.
"""
if issparse(A):
A = csc_matrix(A)
else:
A = np.atleast_2d(A)
A = (A != 0).astype(np.int32)
if A.ndim != 2:
raise ValueError("`A` must be 2-dimensional.")
m, n = A.shape
if order is None or np.isscalar(order):
rng = np.random.RandomState(order)
order = rng.permutation(n)
else:
order = np.asarray(order)
if order.shape != (n,):
raise ValueError("`order` has incorrect shape.")
A = A[:, order]
if issparse(A):
groups = group_sparse(m, n, A.indices, A.indptr)
else:
groups = group_dense(m, n, A)
groups[order] = groups.copy()
return groups
def approx_derivative(fun, x0, method='3-point', rel_step=None, f0=None,
bounds=(-np.inf, np.inf), sparsity=None,
as_linear_operator=False, args=(), kwargs={}):
"""Compute finite difference approximation of the derivatives of a
vector-valued function.
If a function maps from R^n to R^m, its derivatives form m-by-n matrix
called the Jacobian, where an element (i, j) is a partial derivative of
f[i] with respect to x[j].
Parameters
----------
fun : callable
Function of which to estimate the derivatives. The argument x
passed to this function is ndarray of shape (n,) (never a scalar
even if n=1). It must return 1-D array_like of shape (m,) or a scalar.
x0 : array_like of shape (n,) or float
Point at which to estimate the derivatives. Float will be converted
to a 1-D array.
method : {'3-point', '2-point', 'cs'}, optional
Finite difference method to use:
- '2-point' - use the first order accuracy forward or backward
difference.
- '3-point' - use central difference in interior points and the
second order accuracy forward or backward difference
near the boundary.
- 'cs' - use a complex-step finite difference scheme. This assumes
that the user function is real-valued and can be
analytically continued to the complex plane. Otherwise,
produces bogus results.
rel_step : None or array_like, optional
Relative step size to use. The absolute step size is computed as
``h = rel_step * sign(x0) * max(1, abs(x0))``, possibly adjusted to
fit into the bounds. For ``method='3-point'`` the sign of `h` is
ignored. If None (default) then step is selected automatically,
see Notes.
f0 : None or array_like, optional
If not None it is assumed to be equal to ``fun(x0)``, in this case
the ``fun(x0)`` is not called. Default is None.
bounds : tuple of array_like, optional
Lower and upper bounds on independent variables. Defaults to no bounds.
Each bound must match the size of `x0` or be a scalar, in the latter
case the bound will be the same for all variables. Use it to limit the
range of function evaluation. Bounds checking is not implemented
when `as_linear_operator` is True.
sparsity : {None, array_like, sparse matrix, 2-tuple}, optional
Defines a sparsity structure of the Jacobian matrix. If the Jacobian
matrix is known to have only few non-zero elements in each row, then
it's possible to estimate its several columns by a single function
evaluation [3]_. To perform such economic computations two ingredients
are required:
* structure : array_like or sparse matrix of shape (m, n). A zero
element means that a corresponding element of the Jacobian
identically equals to zero.
* groups : array_like of shape (n,). A column grouping for a given
sparsity structure, use `group_columns` to obtain it.
A single array or a sparse matrix is interpreted as a sparsity
structure, and groups are computed inside the function. A tuple is
interpreted as (structure, groups). If None (default), a standard
dense differencing will be used.
Note, that sparse differencing makes sense only for large Jacobian
matrices where each row contains few non-zero elements.
as_linear_operator : bool, optional
When True the function returns an `scipy.sparse.linalg.LinearOperator`.
Otherwise it returns a dense array or a sparse matrix depending on
`sparsity`. The linear operator provides an efficient way of computing
``J.dot(p)`` for any vector ``p`` of shape (n,), but does not allow
direct access to individual elements of the matrix. By default
`as_linear_operator` is False.
args, kwargs : tuple and dict, optional
Additional arguments passed to `fun`. Both empty by default.
The calling signature is ``fun(x, *args, **kwargs)``.
Returns
-------
J : {ndarray, sparse matrix, LinearOperator}
Finite difference approximation of the Jacobian matrix.
If `as_linear_operator` is True returns a LinearOperator
with shape (m, n). Otherwise it returns a dense array or sparse
matrix depending on how `sparsity` is defined. If `sparsity`
is None then a ndarray with shape (m, n) is returned. If
`sparsity` is not None returns a csr_matrix with shape (m, n).
For sparse matrices and linear operators it is always returned as
a 2-D structure, for ndarrays, if m=1 it is returned
as a 1-D gradient array with shape (n,).
See Also
--------
check_derivative : Check correctness of a function computing derivatives.
Notes
-----
If `rel_step` is not provided, it assigned to ``EPS**(1/s)``, where EPS is
machine epsilon for float64 numbers, s=2 for '2-point' method and s=3 for
'3-point' method. Such relative step approximately minimizes a sum of
truncation and round-off errors, see [1]_.
A finite difference scheme for '3-point' method is selected automatically.
The well-known central difference scheme is used for points sufficiently
far from the boundary, and 3-point forward or backward scheme is used for
points near the boundary. Both schemes have the second-order accuracy in
terms of Taylor expansion. Refer to [2]_ for the formulas of 3-point
forward and backward difference schemes.
For dense differencing when m=1 Jacobian is returned with a shape (n,),
on the other hand when n=1 Jacobian is returned with a shape (m, 1).
Our motivation is the following: a) It handles a case of gradient
computation (m=1) in a conventional way. b) It clearly separates these two
different cases. b) In all cases np.atleast_2d can be called to get 2-D
Jacobian with correct dimensions.
References
----------
.. [1] W. H. Press et. al. "Numerical Recipes. The Art of Scientific
Computing. 3rd edition", sec. 5.7.
.. [2] <NAME>, <NAME>, and <NAME>, "On the estimation of
sparse Jacobian matrices", Journal of the Institute of Mathematics
and its Applications, 13 (1974), pp. 117-120.
.. [3] <NAME>, "Generation of Finite Difference Formulas on
Arbitrarily Spaced Grids", Mathematics of Computation 51, 1988.
Examples
--------
>>> import numpy as np
>>> from scipy.optimize import approx_derivative
>>>
>>> def f(x, c1, c2):
... return np.array([x[0] * np.sin(c1 * x[1]),
... x[0] * np.cos(c2 * x[1])])
...
>>> x0 = np.array([1.0, 0.5 * np.pi])
>>> approx_derivative(f, x0, args=(1, 2))
array([[ 1., 0.],
[-1., 0.]])
Bounds can be used to limit the region of function evaluation.
In the example below we compute left and right derivative at point 1.0.
>>> def g(x):
... return x**2 if x >= 1 else x
...
>>> x0 = 1.0
>>> approx_derivative(g, x0, bounds=(-np.inf, 1.0))
array([ 1.])
>>> approx_derivative(g, x0, bounds=(1.0, np.inf))
array([ 2.])
"""
if method not in ['2-point', '3-point', 'cs']:
raise ValueError("Unknown method '%s'. " % method)
x0 = np.atleast_1d(x0)
if x0.ndim > 1:
raise ValueError("`x0` must have at most 1 dimension.")
lb, ub = _prepare_bounds(bounds, x0)
if lb.shape != x0.shape or ub.shape != x0.shape:
raise ValueError("Inconsistent shapes between bounds and `x0`.")
if as_linear_operator and not (np.all(np.isinf(lb))
and np.all(np.isinf(ub))):
raise ValueError("Bounds not supported when "
"`as_linear_operator` is True.")
def fun_wrapped(x):
f = np.atleast_1d(fun(x, *args, **kwargs))
if f.ndim > 1:
raise RuntimeError("`fun` return value has "
"more than 1 dimension.")
return f
if f0 is None:
f0 = fun_wrapped(x0)
else:
f0 = np.atleast_1d(f0)
if f0.ndim > 1:
raise ValueError("`f0` passed has more than 1 dimension.")
if np.any((x0 < lb) | (x0 > ub)):
raise ValueError("`x0` violates bound constraints.")
if as_linear_operator:
if rel_step is None:
rel_step = relative_step[method]
return _linear_operator_difference(fun_wrapped, x0,
f0, rel_step, method)
else:
h = _compute_absolute_step(rel_step, x0, method)
if method == '2-point':
h, use_one_sided = _adjust_scheme_to_bounds(
x0, h, 1, '1-sided', lb, ub)
elif method == '3-point':
h, use_one_sided = _adjust_scheme_to_bounds(
x0, h, 1, '2-sided', lb, ub)
elif method == 'cs':
use_one_sided = False
if sparsity is None:
return _dense_difference(fun_wrapped, x0, f0, h,
use_one_sided, method)
else:
if not issparse(sparsity) and len(sparsity) == 2:
structure, groups = sparsity
else:
structure = sparsity
groups = group_columns(sparsity)
if issparse(structure):
structure = csc_matrix(structure)
else:
structure = np.atleast_2d(structure)
groups = np.atleast_1d(groups)
return _sparse_difference(fun_wrapped, x0, f0, h,
use_one_sided, structure,
groups, method)
def _linear_operator_difference(fun, x0, f0, h, method):
m = f0.size
n = x0.size
if method == '2-point':
def matvec(p):
if np.array_equal(p, np.zeros_like(p)):
return np.zeros(m)
dx = h / norm(p)
x = x0 + dx*p
df = fun(x) - f0
return df / dx
elif method == '3-point':
def matvec(p):
if np.array_equal(p, np.zeros_like(p)):
return np.zeros(m)
dx = 2*h / norm(p)
x1 = x0 - (dx/2)*p
x2 = x0 + (dx/2)*p
f1 = fun(x1)
f2 = fun(x2)
df = f2 - f1
return df / dx
elif method == 'cs':
def matvec(p):
if np.array_equal(p, np.zeros_like(p)):
return np.zeros(m)
dx = h / norm(p)
x = x0 + dx*p*1.j
f1 = fun(x)
df = f1.imag
return df / dx
else:
raise RuntimeError("Never be here.")
return LinearOperator((m, n), matvec)
def _dense_difference(fun, x0, f0, h, use_one_sided, method):
m = f0.size
n = x0.size
J_transposed = np.empty((n, m))
h_vecs = np.diag(h)
for i in range(h.size):
if method == '2-point':
x = x0 + h_vecs[i]
dx = x[i] - x0[i] # Recompute dx as exactly representable number.
df = fun(x) - f0
elif method == '3-point' and use_one_sided[i]:
x1 = x0 + h_vecs[i]
x2 = x0 + 2 * h_vecs[i]
dx = x2[i] - x0[i]
f1 = fun(x1)
f2 = fun(x2)
df = -3.0 * f0 + 4 * f1 - f2
elif method == '3-point' and not use_one_sided[i]:
x1 = x0 - h_vecs[i]
x2 = x0 + h_vecs[i]
dx = x2[i] - x1[i]
f1 = fun(x1)
f2 = fun(x2)
df = f2 - f1
elif method == 'cs':
f1 = fun(x0 + h_vecs[i]*1.j)
df = f1.imag
dx = h_vecs[i, i]
else:
raise RuntimeError("Never be here.")
J_transposed[i] = df / dx
if m == 1:
J_transposed = np.ravel(J_transposed)
return J_transposed.T
def _sparse_difference(fun, x0, f0, h, use_one_sided,
structure, groups, method):
m = f0.size
n = x0.size
row_indices = []
col_indices = []
fractions = []
n_groups = np.max(groups) + 1
for group in range(n_groups):
# Perturb variables which are in the same group simultaneously.
e = np.equal(group, groups)
h_vec = h * e
if method == '2-point':
x = x0 + h_vec
dx = x - x0
df = fun(x) - f0
# The result is written to columns which correspond to perturbed
# variables.
cols, = np.nonzero(e)
# Find all non-zero elements in selected columns of Jacobian.
i, j, _ = find(structure[:, cols])
# Restore column indices in the full array.
j = cols[j]
elif method == '3-point':
# Here we do conceptually the same but separate one-sided
# and two-sided schemes.
x1 = x0.copy()
x2 = x0.copy()
mask_1 = use_one_sided & e
x1[mask_1] += h_vec[mask_1]
x2[mask_1] += 2 * h_vec[mask_1]
mask_2 = ~use_one_sided & e
x1[mask_2] -= h_vec[mask_2]
x2[mask_2] += h_vec[mask_2]
dx = np.zeros(n)
dx[mask_1] = x2[mask_1] - x0[mask_1]
dx[mask_2] = x2[mask_2] - x1[mask_2]
f1 = fun(x1)
f2 = fun(x2)
cols, = np.nonzero(e)
i, j, _ = find(structure[:, cols])
j = cols[j]
mask = use_one_sided[j]
df = np.empty(m)
rows = i[mask]
df[rows] = -3 * f0[rows] + 4 * f1[rows] - f2[rows]
rows = i[~mask]
df[rows] = f2[rows] - f1[rows]
elif method == 'cs':
f1 = fun(x0 + h_vec*1.j)
df = f1.imag
dx = h_vec
cols, = np.nonzero(e)
i, j, _ = find(structure[:, cols])
j = cols[j]
else:
raise ValueError("Never be here.")
# All that's left is to compute the fraction. We store i, j and
# fractions as separate arrays and later construct coo_matrix.
row_indices.append(i)
col_indices.append(j)
fractions.append(df[i] / dx[j])
row_indices = np.hstack(row_indices)
col_indices = np.hstack(col_indices)
fractions = np.hstack(fractions)
J = coo_matrix((fractions, (row_indices, col_indices)), shape=(m, n))
return csr_matrix(J)
def check_derivative(fun, jac, x0, bounds=(-np.inf, np.inf), args=(),
kwargs={}):
"""Check correctness of a function computing derivatives (Jacobian or
gradient) by comparison with a finite difference approximation.
Parameters
----------
fun : callable
Function of which to estimate the derivatives. The argument x
passed to this function is ndarray of shape (n,) (never a scalar
even if n=1). It must return 1-D array_like of shape (m,) or a scalar.
jac : callable
Function which computes Jacobian matrix of `fun`. It must work with
argument x the same way as `fun`. The return value must be array_like
or sparse matrix with an appropriate shape.
x0 : array_like of shape (n,) or float
Point at which to estimate the derivatives. Float will be converted
to 1-D array.
bounds : 2-tuple of array_like, optional
Lower and upper bounds on independent variables. Defaults to no bounds.
Each bound must match the size of `x0` or be a scalar, in the latter
case the bound will be the same for all variables. Use it to limit the
range of function evaluation.
args, kwargs : tuple and dict, optional
Additional arguments passed to `fun` and `jac`. Both empty by default.
The calling signature is ``fun(x, *args, **kwargs)`` and the same
for `jac`.
Returns
-------
accuracy : float
The maximum among all relative errors for elements with absolute values
higher than 1 and absolute errors for elements with absolute values
less or equal than 1. If `accuracy` is on the order of 1e-6 or lower,
then it is likely that your `jac` implementation is correct.
See Also
--------
approx_derivative : Compute finite difference approximation of derivative.
Examples
--------
>>> import numpy as np
>>> from scipy.optimize import check_derivative
>>>
>>>
>>> def f(x, c1, c2):
... return np.array([x[0] * np.sin(c1 * x[1]),
... x[0] * np.cos(c2 * x[1])])
...
>>> def jac(x, c1, c2):
... return np.array([
... [np.sin(c1 * x[1]), c1 * x[0] * np.cos(c1 * x[1])],
... [np.cos(c2 * x[1]), -c2 * x[0] * np.sin(c2 * x[1])]
... ])
...
>>>
>>> x0 = np.array([1.0, 0.5 * np.pi])
>>> check_derivative(f, jac, x0, args=(1, 2))
2.4492935982947064e-16
"""
J_to_test = jac(x0, *args, **kwargs)
if issparse(J_to_test):
J_diff = approx_derivative(fun, x0, bounds=bounds, sparsity=J_to_test,
args=args, kwargs=kwargs)
J_to_test = csr_matrix(J_to_test)
abs_err = J_to_test - J_diff
i, j, abs_err_data = find(abs_err)
J_diff_data = np.asarray(J_diff[i, j]).ravel()
return np.max(np.abs(abs_err_data) /
np.maximum(1, np.abs(J_diff_data)))
else:
J_diff = approx_derivative(fun, x0, bounds=bounds,
args=args, kwargs=kwargs)
abs_err = np.abs(J_to_test - J_diff)
return np.max(abs_err / np.maximum(1, np.abs(J_diff)))
| [
"scipy.sparse.linalg.LinearOperator",
"numpy.hstack",
"numpy.equal",
"numpy.linalg.norm",
"numpy.random.RandomState",
"numpy.atleast_2d",
"numpy.isscalar",
"numpy.asarray",
"numpy.max",
"numpy.resize",
"numpy.empty",
"numpy.maximum",
"numpy.isinf",
"numpy.abs",
"numpy.any",
"numpy.nonzero",
"numpy.finfo",
"numpy.atleast_1d",
"numpy.ones_like",
"numpy.minimum",
"numpy.diag",
"numpy.zeros",
"numpy.ravel",
"numpy.all",
"numpy.zeros_like"
] | [((310, 330), 'numpy.finfo', 'np.finfo', (['np.float64'], {}), '(np.float64)\n', (318, 330), True, 'import numpy as np\n'), ((1858, 1898), 'numpy.all', 'np.all', (['((lb == -np.inf) & (ub == np.inf))'], {}), '((lb == -np.inf) & (ub == np.inf))\n', (1864, 1898), True, 'import numpy as np\n'), ((13340, 13357), 'numpy.atleast_1d', 'np.atleast_1d', (['x0'], {}), '(x0)\n', (13353, 13357), True, 'import numpy as np\n'), ((14265, 14294), 'numpy.any', 'np.any', (['((x0 < lb) | (x0 > ub))'], {}), '((x0 < lb) | (x0 > ub))\n', (14271, 14294), True, 'import numpy as np\n'), ((16760, 16790), 'scipy.sparse.linalg.LinearOperator', 'LinearOperator', (['(m, n)', 'matvec'], {}), '((m, n), matvec)\n', (16774, 16790), False, 'from scipy.sparse.linalg import LinearOperator\n'), ((16906, 16922), 'numpy.empty', 'np.empty', (['(n, m)'], {}), '((n, m))\n', (16914, 16922), True, 'import numpy as np\n'), ((16936, 16946), 'numpy.diag', 'np.diag', (['h'], {}), '(h)\n', (16943, 16946), True, 'import numpy as np\n'), ((20279, 20301), 'numpy.hstack', 'np.hstack', (['row_indices'], {}), '(row_indices)\n', (20288, 20301), True, 'import numpy as np\n'), ((20320, 20342), 'numpy.hstack', 'np.hstack', (['col_indices'], {}), '(col_indices)\n', (20329, 20342), True, 'import numpy as np\n'), ((20359, 20379), 'numpy.hstack', 'np.hstack', (['fractions'], {}), '(fractions)\n', (20368, 20379), True, 'import numpy as np\n'), ((1638, 1665), 'numpy.ones_like', 'np.ones_like', (['h'], {'dtype': 'bool'}), '(h, dtype=bool)\n', (1650, 1665), True, 'import numpy as np\n'), ((3669, 3695), 'numpy.asarray', 'np.asarray', (['b'], {'dtype': 'float'}), '(b, dtype=float)\n', (3679, 3695), True, 'import numpy as np\n'), ((3747, 3770), 'numpy.resize', 'np.resize', (['lb', 'x0.shape'], {}), '(lb, x0.shape)\n', (3756, 3770), True, 'import numpy as np\n'), ((3806, 3829), 'numpy.resize', 'np.resize', (['ub', 'x0.shape'], {}), '(ub, x0.shape)\n', (3815, 3829), True, 'import numpy as np\n'), ((5174, 5190), 'numpy.atleast_2d', 'np.atleast_2d', (['A'], {}), '(A)\n', (5187, 5190), True, 'import numpy as np\n'), ((5350, 5368), 'numpy.isscalar', 'np.isscalar', (['order'], {}), '(order)\n', (5361, 5368), True, 'import numpy as np\n'), ((5384, 5412), 'numpy.random.RandomState', 'np.random.RandomState', (['order'], {}), '(order)\n', (5405, 5412), True, 'import numpy as np\n'), ((5474, 5491), 'numpy.asarray', 'np.asarray', (['order'], {}), '(order)\n', (5484, 5491), True, 'import numpy as np\n'), ((14144, 14161), 'numpy.atleast_1d', 'np.atleast_1d', (['f0'], {}), '(f0)\n', (14157, 14161), True, 'import numpy as np\n'), ((17883, 17905), 'numpy.ravel', 'np.ravel', (['J_transposed'], {}), '(J_transposed)\n', (17891, 17905), True, 'import numpy as np\n'), ((18149, 18163), 'numpy.max', 'np.max', (['groups'], {}), '(groups)\n', (18155, 18163), True, 'import numpy as np\n'), ((18286, 18309), 'numpy.equal', 'np.equal', (['group', 'groups'], {}), '(group, groups)\n', (18294, 18309), True, 'import numpy as np\n'), ((23611, 23637), 'numpy.abs', 'np.abs', (['(J_to_test - J_diff)'], {}), '(J_to_test - J_diff)\n', (23617, 23637), True, 'import numpy as np\n'), ((1708, 1717), 'numpy.abs', 'np.abs', (['h'], {}), '(h)\n', (1714, 1717), True, 'import numpy as np\n'), ((1742, 1770), 'numpy.zeros_like', 'np.zeros_like', (['h'], {'dtype': 'bool'}), '(h, dtype=bool)\n', (1755, 1770), True, 'import numpy as np\n'), ((2149, 2164), 'numpy.abs', 'np.abs', (['h_total'], {}), '(h_total)\n', (2155, 2164), True, 'import numpy as np\n'), ((2168, 2202), 'numpy.maximum', 'np.maximum', (['lower_dist', 'upper_dist'], {}), '(lower_dist, upper_dist)\n', (2178, 2202), True, 'import numpy as np\n'), ((2673, 2734), 'numpy.minimum', 'np.minimum', (['h[forward]', '(0.5 * upper_dist[forward] / num_steps)'], {}), '(h[forward], 0.5 * upper_dist[forward] / num_steps)\n', (2683, 2734), True, 'import numpy as np\n'), ((3608, 3618), 'numpy.abs', 'np.abs', (['x0'], {}), '(x0)\n', (3614, 3618), True, 'import numpy as np\n'), ((15542, 15563), 'numpy.atleast_1d', 'np.atleast_1d', (['groups'], {}), '(groups)\n', (15555, 15563), True, 'import numpy as np\n'), ((18567, 18580), 'numpy.nonzero', 'np.nonzero', (['e'], {}), '(e)\n', (18577, 18580), True, 'import numpy as np\n'), ((2875, 2938), 'numpy.minimum', 'np.minimum', (['h[backward]', '(0.5 * lower_dist[backward] / num_steps)'], {}), '(h[backward], 0.5 * lower_dist[backward] / num_steps)\n', (2885, 2938), True, 'import numpy as np\n'), ((3011, 3045), 'numpy.minimum', 'np.minimum', (['upper_dist', 'lower_dist'], {}), '(upper_dist, lower_dist)\n', (3021, 3045), True, 'import numpy as np\n'), ((15495, 15519), 'numpy.atleast_2d', 'np.atleast_2d', (['structure'], {}), '(structure)\n', (15508, 15519), True, 'import numpy as np\n'), ((15920, 15936), 'numpy.zeros_like', 'np.zeros_like', (['p'], {}), '(p)\n', (15933, 15936), True, 'import numpy as np\n'), ((15962, 15973), 'numpy.zeros', 'np.zeros', (['m'], {}), '(m)\n', (15970, 15973), True, 'import numpy as np\n'), ((15995, 16002), 'numpy.linalg.norm', 'norm', (['p'], {}), '(p)\n', (15999, 16002), False, 'from numpy.linalg import norm\n'), ((19240, 19251), 'numpy.zeros', 'np.zeros', (['n'], {}), '(n)\n', (19248, 19251), True, 'import numpy as np\n'), ((19422, 19435), 'numpy.nonzero', 'np.nonzero', (['e'], {}), '(e)\n', (19432, 19435), True, 'import numpy as np\n'), ((19561, 19572), 'numpy.empty', 'np.empty', (['m'], {}), '(m)\n', (19569, 19572), True, 'import numpy as np\n'), ((23327, 23351), 'numpy.asarray', 'np.asarray', (['J_diff[i, j]'], {}), '(J_diff[i, j])\n', (23337, 23351), True, 'import numpy as np\n'), ((23382, 23402), 'numpy.abs', 'np.abs', (['abs_err_data'], {}), '(abs_err_data)\n', (23388, 23402), True, 'import numpy as np\n'), ((3098, 3116), 'numpy.abs', 'np.abs', (['h_adjusted'], {}), '(h_adjusted)\n', (3104, 3116), True, 'import numpy as np\n'), ((13654, 13666), 'numpy.isinf', 'np.isinf', (['lb'], {}), '(lb)\n', (13662, 13666), True, 'import numpy as np\n'), ((13714, 13726), 'numpy.isinf', 'np.isinf', (['ub'], {}), '(ub)\n', (13722, 13726), True, 'import numpy as np\n'), ((16172, 16188), 'numpy.zeros_like', 'np.zeros_like', (['p'], {}), '(p)\n', (16185, 16188), True, 'import numpy as np\n'), ((16214, 16225), 'numpy.zeros', 'np.zeros', (['m'], {}), '(m)\n', (16222, 16225), True, 'import numpy as np\n'), ((16249, 16256), 'numpy.linalg.norm', 'norm', (['p'], {}), '(p)\n', (16253, 16256), False, 'from numpy.linalg import norm\n'), ((19870, 19883), 'numpy.nonzero', 'np.nonzero', (['e'], {}), '(e)\n', (19880, 19883), True, 'import numpy as np\n'), ((23441, 23460), 'numpy.abs', 'np.abs', (['J_diff_data'], {}), '(J_diff_data)\n', (23447, 23460), True, 'import numpy as np\n'), ((23684, 23698), 'numpy.abs', 'np.abs', (['J_diff'], {}), '(J_diff)\n', (23690, 23698), True, 'import numpy as np\n'), ((16503, 16519), 'numpy.zeros_like', 'np.zeros_like', (['p'], {}), '(p)\n', (16516, 16519), True, 'import numpy as np\n'), ((16545, 16556), 'numpy.zeros', 'np.zeros', (['m'], {}), '(m)\n', (16553, 16556), True, 'import numpy as np\n'), ((16578, 16585), 'numpy.linalg.norm', 'norm', (['p'], {}), '(p)\n', (16582, 16585), False, 'from numpy.linalg import norm\n')] |
# Copyright The PyTorch Lightning team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import pickle
from argparse import Namespace
import cloudpickle
import pytest
import torch
from fsspec.implementations.local import LocalFileSystem
from omegaconf import OmegaConf, Container
from torch.nn import functional as F
from torch.utils.data import DataLoader
from pytorch_lightning import Trainer, LightningModule
from pytorch_lightning.core.saving import save_hparams_to_yaml, load_hparams_from_yaml
from pytorch_lightning.utilities import AttributeDict, is_picklable
from tests.base import EvalModelTemplate, TrialMNIST, BoringModel
class SaveHparamsModel(EvalModelTemplate):
""" Tests that a model can take an object """
def __init__(self, hparams):
super().__init__()
self.save_hyperparameters(hparams)
class AssignHparamsModel(EvalModelTemplate):
""" Tests that a model can take an object with explicit setter """
def __init__(self, hparams):
super().__init__()
self.hparams = hparams
# -------------------------
# STANDARD TESTS
# -------------------------
def _run_standard_hparams_test(tmpdir, model, cls, try_overwrite=False):
"""
Tests for the existence of an arg 'test_arg=14'
"""
hparam_type = type(model.hparams)
# test proper property assignments
assert model.hparams.test_arg == 14
# verify we can train
trainer = Trainer(default_root_dir=tmpdir, max_epochs=1, overfit_batches=2)
trainer.fit(model)
# make sure the raw checkpoint saved the properties
raw_checkpoint_path = _raw_checkpoint_path(trainer)
raw_checkpoint = torch.load(raw_checkpoint_path)
assert LightningModule.CHECKPOINT_HYPER_PARAMS_KEY in raw_checkpoint
assert raw_checkpoint[LightningModule.CHECKPOINT_HYPER_PARAMS_KEY]['test_arg'] == 14
# verify that model loads correctly
model2 = cls.load_from_checkpoint(raw_checkpoint_path)
assert model2.hparams.test_arg == 14
assert isinstance(model2.hparams, hparam_type)
if try_overwrite:
# verify that we can overwrite the property
model3 = cls.load_from_checkpoint(raw_checkpoint_path, test_arg=78)
assert model3.hparams.test_arg == 78
return raw_checkpoint_path
@pytest.mark.parametrize("cls", [SaveHparamsModel, AssignHparamsModel])
def test_namespace_hparams(tmpdir, cls):
# init model
model = cls(hparams=Namespace(test_arg=14))
# run standard test suite
_run_standard_hparams_test(tmpdir, model, cls)
@pytest.mark.parametrize("cls", [SaveHparamsModel, AssignHparamsModel])
def test_dict_hparams(tmpdir, cls):
# init model
model = cls(hparams={'test_arg': 14})
# run standard test suite
_run_standard_hparams_test(tmpdir, model, cls)
@pytest.mark.parametrize("cls", [SaveHparamsModel, AssignHparamsModel])
def test_omega_conf_hparams(tmpdir, cls):
# init model
conf = OmegaConf.create(dict(test_arg=14, mylist=[15.4, dict(a=1, b=2)]))
model = cls(hparams=conf)
assert isinstance(model.hparams, Container)
# run standard test suite
raw_checkpoint_path = _run_standard_hparams_test(tmpdir, model, cls)
model2 = cls.load_from_checkpoint(raw_checkpoint_path)
assert isinstance(model2.hparams, Container)
# config specific tests
assert model2.hparams.test_arg == 14
assert model2.hparams.mylist[0] == 15.4
def test_explicit_args_hparams(tmpdir):
"""
Tests that a model can take implicit args and assign
"""
# define model
class LocalModel(EvalModelTemplate):
def __init__(self, test_arg, test_arg2):
super().__init__()
self.save_hyperparameters('test_arg', 'test_arg2')
model = LocalModel(test_arg=14, test_arg2=90)
# run standard test suite
raw_checkpoint_path = _run_standard_hparams_test(tmpdir, model, LocalModel)
model = LocalModel.load_from_checkpoint(raw_checkpoint_path, test_arg2=120)
# config specific tests
assert model.hparams.test_arg2 == 120
def test_implicit_args_hparams(tmpdir):
"""
Tests that a model can take regular args and assign
"""
# define model
class LocalModel(EvalModelTemplate):
def __init__(self, test_arg, test_arg2):
super().__init__()
self.save_hyperparameters()
model = LocalModel(test_arg=14, test_arg2=90)
# run standard test suite
raw_checkpoint_path = _run_standard_hparams_test(tmpdir, model, LocalModel)
model = LocalModel.load_from_checkpoint(raw_checkpoint_path, test_arg2=120)
# config specific tests
assert model.hparams.test_arg2 == 120
def test_explicit_missing_args_hparams(tmpdir):
"""
Tests that a model can take regular args and assign
"""
# define model
class LocalModel(EvalModelTemplate):
def __init__(self, test_arg, test_arg2):
super().__init__()
self.save_hyperparameters('test_arg')
model = LocalModel(test_arg=14, test_arg2=90)
# test proper property assignments
assert model.hparams.test_arg == 14
# verify we can train
trainer = Trainer(default_root_dir=tmpdir, max_epochs=2, overfit_batches=0.5)
trainer.fit(model)
# make sure the raw checkpoint saved the properties
raw_checkpoint_path = _raw_checkpoint_path(trainer)
raw_checkpoint = torch.load(raw_checkpoint_path)
assert LightningModule.CHECKPOINT_HYPER_PARAMS_KEY in raw_checkpoint
assert raw_checkpoint[LightningModule.CHECKPOINT_HYPER_PARAMS_KEY]['test_arg'] == 14
# verify that model loads correctly
model = LocalModel.load_from_checkpoint(raw_checkpoint_path, test_arg2=123)
assert model.hparams.test_arg == 14
assert 'test_arg2' not in model.hparams # test_arg2 is not registered in class init
return raw_checkpoint_path
# -------------------------
# SPECIFIC TESTS
# -------------------------
def test_class_nesting():
class MyModule(LightningModule):
def forward(self):
...
# make sure PL modules are always nn.Module
a = MyModule()
assert isinstance(a, torch.nn.Module)
def test_outside():
a = MyModule()
_ = a.hparams
class A:
def test(self):
a = MyModule()
_ = a.hparams
def test2(self):
test_outside()
test_outside()
A().test2()
A().test()
class SubClassEvalModel(EvalModelTemplate):
any_other_loss = torch.nn.CrossEntropyLoss()
def __init__(self, *args, subclass_arg=1200, **kwargs):
super().__init__(*args, **kwargs)
self.save_hyperparameters()
class SubSubClassEvalModel(SubClassEvalModel):
pass
class AggSubClassEvalModel(SubClassEvalModel):
def __init__(self, *args, my_loss=torch.nn.CrossEntropyLoss(), **kwargs):
super().__init__(*args, **kwargs)
self.save_hyperparameters()
class UnconventionalArgsEvalModel(EvalModelTemplate):
""" A model that has unconventional names for "self", "*args" and "**kwargs". """
def __init__(obj, *more_args, other_arg=300, **more_kwargs):
# intentionally named obj
super().__init__(*more_args, **more_kwargs)
obj.save_hyperparameters()
class DictConfSubClassEvalModel(SubClassEvalModel):
def __init__(self, *args, dict_conf=OmegaConf.create(dict(my_param='something')), **kwargs):
super().__init__(*args, **kwargs)
self.save_hyperparameters()
@pytest.mark.parametrize("cls", [
EvalModelTemplate,
SubClassEvalModel,
SubSubClassEvalModel,
AggSubClassEvalModel,
UnconventionalArgsEvalModel,
DictConfSubClassEvalModel,
])
def test_collect_init_arguments(tmpdir, cls):
""" Test that the model automatically saves the arguments passed into the constructor """
extra_args = {}
if cls is AggSubClassEvalModel:
extra_args.update(my_loss=torch.nn.CosineEmbeddingLoss())
elif cls is DictConfSubClassEvalModel:
extra_args.update(dict_conf=OmegaConf.create(dict(my_param='anything')))
model = cls(**extra_args)
assert model.hparams.batch_size == 32
model = cls(batch_size=179, **extra_args)
assert model.hparams.batch_size == 179
if isinstance(model, SubClassEvalModel):
assert model.hparams.subclass_arg == 1200
if isinstance(model, AggSubClassEvalModel):
assert isinstance(model.hparams.my_loss, torch.nn.CosineEmbeddingLoss)
# verify that the checkpoint saved the correct values
trainer = Trainer(default_root_dir=tmpdir, max_epochs=2, overfit_batches=0.5)
trainer.fit(model)
raw_checkpoint_path = _raw_checkpoint_path(trainer)
raw_checkpoint = torch.load(raw_checkpoint_path)
assert LightningModule.CHECKPOINT_HYPER_PARAMS_KEY in raw_checkpoint
assert raw_checkpoint[LightningModule.CHECKPOINT_HYPER_PARAMS_KEY]['batch_size'] == 179
# verify that model loads correctly
model = cls.load_from_checkpoint(raw_checkpoint_path)
assert model.hparams.batch_size == 179
if isinstance(model, AggSubClassEvalModel):
assert isinstance(model.hparams.my_loss, torch.nn.CosineEmbeddingLoss)
if isinstance(model, DictConfSubClassEvalModel):
assert isinstance(model.hparams.dict_conf, Container)
assert model.hparams.dict_conf['my_param'] == 'anything'
# verify that we can overwrite whatever we want
model = cls.load_from_checkpoint(raw_checkpoint_path, batch_size=99)
assert model.hparams.batch_size == 99
def _raw_checkpoint_path(trainer) -> str:
raw_checkpoint_paths = os.listdir(trainer.checkpoint_callback.dirpath)
raw_checkpoint_paths = [x for x in raw_checkpoint_paths if '.ckpt' in x]
assert raw_checkpoint_paths
raw_checkpoint_path = raw_checkpoint_paths[0]
raw_checkpoint_path = os.path.join(trainer.checkpoint_callback.dirpath, raw_checkpoint_path)
return raw_checkpoint_path
class LocalVariableModelSuperLast(EvalModelTemplate):
""" This model has the super().__init__() call at the end. """
def __init__(self, arg1, arg2, *args, **kwargs):
self.argument1 = arg1 # arg2 intentionally not set
arg1 = 'overwritten'
local_var = 1234
super().__init__(*args, **kwargs) # this is intentionally here at the end
class LocalVariableModelSuperFirst(EvalModelTemplate):
""" This model has the _auto_collect_arguments() call at the end. """
def __init__(self, arg1, arg2, *args, **kwargs):
super().__init__(*args, **kwargs)
self.argument1 = arg1 # arg2 intentionally not set
arg1 = 'overwritten'
local_var = 1234
self.save_hyperparameters() # this is intentionally here at the end
@pytest.mark.parametrize("cls", [
LocalVariableModelSuperFirst,
# LocalVariableModelSuperLast,
])
def test_collect_init_arguments_with_local_vars(cls):
""" Tests that only the arguments are collected and not local variables. """
model = cls(arg1=1, arg2=2)
assert 'local_var' not in model.hparams
assert model.hparams['arg1'] == 'overwritten'
assert model.hparams['arg2'] == 2
# @pytest.mark.parametrize("cls,config", [
# (SaveHparamsModel, Namespace(my_arg=42)),
# (SaveHparamsModel, dict(my_arg=42)),
# (SaveHparamsModel, OmegaConf.create(dict(my_arg=42))),
# (AssignHparamsModel, Namespace(my_arg=42)),
# (AssignHparamsModel, dict(my_arg=42)),
# (AssignHparamsModel, OmegaConf.create(dict(my_arg=42))),
# ])
# def test_single_config_models(tmpdir, cls, config):
# """ Test that the model automatically saves the arguments passed into the constructor """
# model = cls(config)
#
# # no matter how you do it, it should be assigned
# assert model.hparams.my_arg == 42
#
# # verify that the checkpoint saved the correct values
# trainer = Trainer(default_root_dir=tmpdir, max_epochs=2, overfit_batches=0.5)
# trainer.fit(model)
#
# # verify that model loads correctly
# raw_checkpoint_path = _raw_checkpoint_path(trainer)
# model = cls.load_from_checkpoint(raw_checkpoint_path)
# assert model.hparams.my_arg == 42
class AnotherArgModel(EvalModelTemplate):
def __init__(self, arg1):
super().__init__()
self.save_hyperparameters(arg1)
class OtherArgsModel(EvalModelTemplate):
def __init__(self, arg1, arg2):
super().__init__()
self.save_hyperparameters(arg1, arg2)
@pytest.mark.parametrize("cls,config", [
(AnotherArgModel, dict(arg1=42)),
(OtherArgsModel, dict(arg1=3.14, arg2='abc')),
])
def test_single_config_models_fail(tmpdir, cls, config):
""" Test fail on passing unsupported config type. """
with pytest.raises(ValueError):
_ = cls(**config)
@pytest.mark.parametrize("past_key", ['module_arguments'])
def test_load_past_checkpoint(tmpdir, past_key):
model = EvalModelTemplate()
# verify we can train
trainer = Trainer(default_root_dir=tmpdir, max_epochs=1)
trainer.fit(model)
# make sure the raw checkpoint saved the properties
raw_checkpoint_path = _raw_checkpoint_path(trainer)
raw_checkpoint = torch.load(raw_checkpoint_path)
raw_checkpoint[past_key] = raw_checkpoint[LightningModule.CHECKPOINT_HYPER_PARAMS_KEY]
raw_checkpoint['hparams_type'] = 'Namespace'
raw_checkpoint[past_key]['batch_size'] = -17
del raw_checkpoint[LightningModule.CHECKPOINT_HYPER_PARAMS_KEY]
# save back the checkpoint
torch.save(raw_checkpoint, raw_checkpoint_path)
# verify that model loads correctly
model2 = EvalModelTemplate.load_from_checkpoint(raw_checkpoint_path)
assert model2.hparams.batch_size == -17
def test_hparams_pickle(tmpdir):
ad = AttributeDict({'key1': 1, 'key2': 'abc'})
pkl = pickle.dumps(ad)
assert ad == pickle.loads(pkl)
pkl = cloudpickle.dumps(ad)
assert ad == pickle.loads(pkl)
class UnpickleableArgsEvalModel(EvalModelTemplate):
""" A model that has an attribute that cannot be pickled. """
def __init__(self, foo='bar', pickle_me=(lambda x: x + 1), **kwargs):
super().__init__(**kwargs)
assert not is_picklable(pickle_me)
self.save_hyperparameters()
def test_hparams_pickle_warning(tmpdir):
model = UnpickleableArgsEvalModel()
trainer = Trainer(default_root_dir=tmpdir, max_steps=1)
with pytest.warns(UserWarning, match="attribute 'pickle_me' removed from hparams because it cannot be pickled"):
trainer.fit(model)
assert 'pickle_me' not in model.hparams
def test_hparams_save_yaml(tmpdir):
hparams = dict(batch_size=32, learning_rate=0.001, data_root='./any/path/here',
nasted=dict(any_num=123, anystr='abcd'))
path_yaml = os.path.join(tmpdir, 'testing-hparams.yaml')
save_hparams_to_yaml(path_yaml, hparams)
assert load_hparams_from_yaml(path_yaml) == hparams
save_hparams_to_yaml(path_yaml, Namespace(**hparams))
assert load_hparams_from_yaml(path_yaml) == hparams
save_hparams_to_yaml(path_yaml, AttributeDict(hparams))
assert load_hparams_from_yaml(path_yaml) == hparams
save_hparams_to_yaml(path_yaml, OmegaConf.create(hparams))
assert load_hparams_from_yaml(path_yaml) == hparams
class NoArgsSubClassEvalModel(EvalModelTemplate):
def __init__(self):
super().__init__()
class SimpleNoArgsModel(LightningModule):
def __init__(self):
super().__init__()
self.l1 = torch.nn.Linear(28 * 28, 10)
def forward(self, x):
return torch.relu(self.l1(x.view(x.size(0), -1)))
def training_step(self, batch, batch_nb):
x, y = batch
loss = F.cross_entropy(self(x), y)
return {'loss': loss, 'log': {'train_loss': loss}}
def test_step(self, batch, batch_nb):
x, y = batch
loss = F.cross_entropy(self(x), y)
return {'loss': loss, 'log': {'train_loss': loss}}
def configure_optimizers(self):
return torch.optim.Adam(self.parameters(), lr=0.02)
@pytest.mark.parametrize("cls", [
SimpleNoArgsModel,
NoArgsSubClassEvalModel,
])
def test_model_nohparams_train_test(tmpdir, cls):
"""Test models that do not tae any argument in init."""
model = cls()
trainer = Trainer(
max_epochs=1,
default_root_dir=tmpdir,
)
train_loader = DataLoader(TrialMNIST(os.getcwd(), train=True, download=True), batch_size=32)
trainer.fit(model, train_loader)
test_loader = DataLoader(TrialMNIST(os.getcwd(), train=False, download=True), batch_size=32)
trainer.test(test_dataloaders=test_loader)
def test_model_ignores_non_exist_kwargument(tmpdir):
"""Test that the model takes only valid class arguments."""
class LocalModel(EvalModelTemplate):
def __init__(self, batch_size=15):
super().__init__(batch_size=batch_size)
self.save_hyperparameters()
model = LocalModel()
assert model.hparams.batch_size == 15
# verify that the checkpoint saved the correct values
trainer = Trainer(default_root_dir=tmpdir, max_epochs=1)
trainer.fit(model)
# verify that we can overwrite whatever we want
raw_checkpoint_path = _raw_checkpoint_path(trainer)
model = LocalModel.load_from_checkpoint(raw_checkpoint_path, non_exist_kwarg=99)
assert 'non_exist_kwarg' not in model.hparams
class SuperClassPositionalArgs(EvalModelTemplate):
def __init__(self, hparams):
super().__init__()
self._hparams = None # pretend EvalModelTemplate did not call self.save_hyperparameters()
self.hparams = hparams
class SubClassVarArgs(SuperClassPositionalArgs):
""" Loading this model should accept hparams and init in the super class """
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
def test_args(tmpdir):
""" Test for inheritance: super class takes positional arg, subclass takes varargs. """
hparams = dict(test=1)
model = SubClassVarArgs(hparams)
trainer = Trainer(default_root_dir=tmpdir, max_epochs=1)
trainer.fit(model)
raw_checkpoint_path = _raw_checkpoint_path(trainer)
with pytest.raises(TypeError, match="__init__\(\) got an unexpected keyword argument 'test'"):
SubClassVarArgs.load_from_checkpoint(raw_checkpoint_path)
class RuntimeParamChangeModelSaving(BoringModel):
def __init__(self, **kwargs):
super().__init__()
self.save_hyperparameters()
class RuntimeParamChangeModelAssign(BoringModel):
def __init__(self, **kwargs):
super().__init__()
self.hparams = kwargs
@pytest.mark.parametrize("cls", [RuntimeParamChangeModelSaving, RuntimeParamChangeModelAssign])
def test_init_arg_with_runtime_change(tmpdir, cls):
"""Test that we save/export only the initial hparams, no other runtime change allowed"""
model = cls(running_arg=123)
assert model.hparams.running_arg == 123
model.hparams.running_arg = -1
assert model.hparams.running_arg == -1
model.hparams = Namespace(abc=42)
assert model.hparams.abc == 42
trainer = Trainer(
default_root_dir=tmpdir,
limit_train_batches=2,
limit_val_batches=2,
limit_test_batches=2,
max_epochs=1,
)
trainer.fit(model)
path_yaml = os.path.join(trainer.logger.log_dir, trainer.logger.NAME_HPARAMS_FILE)
hparams = load_hparams_from_yaml(path_yaml)
assert hparams.get('running_arg') == 123
class UnsafeParamModel(BoringModel):
def __init__(self, my_path, any_param=123):
super().__init__()
self.save_hyperparameters()
def test_model_with_fsspec_as_parameter(tmpdir):
model = UnsafeParamModel(LocalFileSystem(tmpdir))
trainer = Trainer(
default_root_dir=tmpdir,
limit_train_batches=2,
limit_val_batches=2,
limit_test_batches=2,
max_epochs=1,
)
trainer.fit(model)
trainer.test()
| [
"tests.base.EvalModelTemplate.load_from_checkpoint",
"torch.nn.CrossEntropyLoss",
"fsspec.implementations.local.LocalFileSystem",
"pickle.dumps",
"pytorch_lightning.utilities.is_picklable",
"pytorch_lightning.Trainer",
"argparse.Namespace",
"pickle.loads",
"os.listdir",
"omegaconf.OmegaConf.create",
"tests.base.EvalModelTemplate",
"torch.nn.CosineEmbeddingLoss",
"pytorch_lightning.core.saving.load_hparams_from_yaml",
"pytorch_lightning.utilities.AttributeDict",
"pytest.raises",
"torch.save",
"torch.load",
"os.path.join",
"os.getcwd",
"cloudpickle.dumps",
"pytest.mark.parametrize",
"pytorch_lightning.core.saving.save_hparams_to_yaml",
"torch.nn.Linear",
"pytest.warns"
] | [((2764, 2834), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""cls"""', '[SaveHparamsModel, AssignHparamsModel]'], {}), "('cls', [SaveHparamsModel, AssignHparamsModel])\n", (2787, 2834), False, 'import pytest\n'), ((3026, 3096), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""cls"""', '[SaveHparamsModel, AssignHparamsModel]'], {}), "('cls', [SaveHparamsModel, AssignHparamsModel])\n", (3049, 3096), False, 'import pytest\n'), ((3277, 3347), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""cls"""', '[SaveHparamsModel, AssignHparamsModel]'], {}), "('cls', [SaveHparamsModel, AssignHparamsModel])\n", (3300, 3347), False, 'import pytest\n'), ((7932, 8110), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""cls"""', '[EvalModelTemplate, SubClassEvalModel, SubSubClassEvalModel,\n AggSubClassEvalModel, UnconventionalArgsEvalModel,\n DictConfSubClassEvalModel]'], {}), "('cls', [EvalModelTemplate, SubClassEvalModel,\n SubSubClassEvalModel, AggSubClassEvalModel, UnconventionalArgsEvalModel,\n DictConfSubClassEvalModel])\n", (7955, 8110), False, 'import pytest\n'), ((11162, 11224), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""cls"""', '[LocalVariableModelSuperFirst]'], {}), "('cls', [LocalVariableModelSuperFirst])\n", (11185, 11224), False, 'import pytest\n'), ((13180, 13237), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""past_key"""', "['module_arguments']"], {}), "('past_key', ['module_arguments'])\n", (13203, 13237), False, 'import pytest\n'), ((16412, 16488), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""cls"""', '[SimpleNoArgsModel, NoArgsSubClassEvalModel]'], {}), "('cls', [SimpleNoArgsModel, NoArgsSubClassEvalModel])\n", (16435, 16488), False, 'import pytest\n'), ((18985, 19083), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""cls"""', '[RuntimeParamChangeModelSaving, RuntimeParamChangeModelAssign]'], {}), "('cls', [RuntimeParamChangeModelSaving,\n RuntimeParamChangeModelAssign])\n", (19008, 19083), False, 'import pytest\n'), ((1923, 1988), 'pytorch_lightning.Trainer', 'Trainer', ([], {'default_root_dir': 'tmpdir', 'max_epochs': '(1)', 'overfit_batches': '(2)'}), '(default_root_dir=tmpdir, max_epochs=1, overfit_batches=2)\n', (1930, 1988), False, 'from pytorch_lightning import Trainer, LightningModule\n'), ((2146, 2177), 'torch.load', 'torch.load', (['raw_checkpoint_path'], {}), '(raw_checkpoint_path)\n', (2156, 2177), False, 'import torch\n'), ((5614, 5681), 'pytorch_lightning.Trainer', 'Trainer', ([], {'default_root_dir': 'tmpdir', 'max_epochs': '(2)', 'overfit_batches': '(0.5)'}), '(default_root_dir=tmpdir, max_epochs=2, overfit_batches=0.5)\n', (5621, 5681), False, 'from pytorch_lightning import Trainer, LightningModule\n'), ((5839, 5870), 'torch.load', 'torch.load', (['raw_checkpoint_path'], {}), '(raw_checkpoint_path)\n', (5849, 5870), False, 'import torch\n'), ((6940, 6967), 'torch.nn.CrossEntropyLoss', 'torch.nn.CrossEntropyLoss', ([], {}), '()\n', (6965, 6967), False, 'import torch\n'), ((8975, 9042), 'pytorch_lightning.Trainer', 'Trainer', ([], {'default_root_dir': 'tmpdir', 'max_epochs': '(2)', 'overfit_batches': '(0.5)'}), '(default_root_dir=tmpdir, max_epochs=2, overfit_batches=0.5)\n', (8982, 9042), False, 'from pytorch_lightning import Trainer, LightningModule\n'), ((9145, 9176), 'torch.load', 'torch.load', (['raw_checkpoint_path'], {}), '(raw_checkpoint_path)\n', (9155, 9176), False, 'import torch\n'), ((10032, 10079), 'os.listdir', 'os.listdir', (['trainer.checkpoint_callback.dirpath'], {}), '(trainer.checkpoint_callback.dirpath)\n', (10042, 10079), False, 'import os\n'), ((10265, 10335), 'os.path.join', 'os.path.join', (['trainer.checkpoint_callback.dirpath', 'raw_checkpoint_path'], {}), '(trainer.checkpoint_callback.dirpath, raw_checkpoint_path)\n', (10277, 10335), False, 'import os\n'), ((13299, 13318), 'tests.base.EvalModelTemplate', 'EvalModelTemplate', ([], {}), '()\n', (13316, 13318), False, 'from tests.base import EvalModelTemplate, TrialMNIST, BoringModel\n'), ((13360, 13406), 'pytorch_lightning.Trainer', 'Trainer', ([], {'default_root_dir': 'tmpdir', 'max_epochs': '(1)'}), '(default_root_dir=tmpdir, max_epochs=1)\n', (13367, 13406), False, 'from pytorch_lightning import Trainer, LightningModule\n'), ((13564, 13595), 'torch.load', 'torch.load', (['raw_checkpoint_path'], {}), '(raw_checkpoint_path)\n', (13574, 13595), False, 'import torch\n'), ((13888, 13935), 'torch.save', 'torch.save', (['raw_checkpoint', 'raw_checkpoint_path'], {}), '(raw_checkpoint, raw_checkpoint_path)\n', (13898, 13935), False, 'import torch\n'), ((13990, 14049), 'tests.base.EvalModelTemplate.load_from_checkpoint', 'EvalModelTemplate.load_from_checkpoint', (['raw_checkpoint_path'], {}), '(raw_checkpoint_path)\n', (14028, 14049), False, 'from tests.base import EvalModelTemplate, TrialMNIST, BoringModel\n'), ((14138, 14179), 'pytorch_lightning.utilities.AttributeDict', 'AttributeDict', (["{'key1': 1, 'key2': 'abc'}"], {}), "({'key1': 1, 'key2': 'abc'})\n", (14151, 14179), False, 'from pytorch_lightning.utilities import AttributeDict, is_picklable\n'), ((14190, 14206), 'pickle.dumps', 'pickle.dumps', (['ad'], {}), '(ad)\n', (14202, 14206), False, 'import pickle\n'), ((14252, 14273), 'cloudpickle.dumps', 'cloudpickle.dumps', (['ad'], {}), '(ad)\n', (14269, 14273), False, 'import cloudpickle\n'), ((14715, 14760), 'pytorch_lightning.Trainer', 'Trainer', ([], {'default_root_dir': 'tmpdir', 'max_steps': '(1)'}), '(default_root_dir=tmpdir, max_steps=1)\n', (14722, 14760), False, 'from pytorch_lightning import Trainer, LightningModule\n'), ((15147, 15191), 'os.path.join', 'os.path.join', (['tmpdir', '"""testing-hparams.yaml"""'], {}), "(tmpdir, 'testing-hparams.yaml')\n", (15159, 15191), False, 'import os\n'), ((15197, 15237), 'pytorch_lightning.core.saving.save_hparams_to_yaml', 'save_hparams_to_yaml', (['path_yaml', 'hparams'], {}), '(path_yaml, hparams)\n', (15217, 15237), False, 'from pytorch_lightning.core.saving import save_hparams_to_yaml, load_hparams_from_yaml\n'), ((16643, 16689), 'pytorch_lightning.Trainer', 'Trainer', ([], {'max_epochs': '(1)', 'default_root_dir': 'tmpdir'}), '(max_epochs=1, default_root_dir=tmpdir)\n', (16650, 16689), False, 'from pytorch_lightning import Trainer, LightningModule\n'), ((17430, 17476), 'pytorch_lightning.Trainer', 'Trainer', ([], {'default_root_dir': 'tmpdir', 'max_epochs': '(1)'}), '(default_root_dir=tmpdir, max_epochs=1)\n', (17437, 17476), False, 'from pytorch_lightning import Trainer, LightningModule\n'), ((18398, 18444), 'pytorch_lightning.Trainer', 'Trainer', ([], {'default_root_dir': 'tmpdir', 'max_epochs': '(1)'}), '(default_root_dir=tmpdir, max_epochs=1)\n', (18405, 18444), False, 'from pytorch_lightning import Trainer, LightningModule\n'), ((19400, 19417), 'argparse.Namespace', 'Namespace', ([], {'abc': '(42)'}), '(abc=42)\n', (19409, 19417), False, 'from argparse import Namespace\n'), ((19468, 19584), 'pytorch_lightning.Trainer', 'Trainer', ([], {'default_root_dir': 'tmpdir', 'limit_train_batches': '(2)', 'limit_val_batches': '(2)', 'limit_test_batches': '(2)', 'max_epochs': '(1)'}), '(default_root_dir=tmpdir, limit_train_batches=2, limit_val_batches=2,\n limit_test_batches=2, max_epochs=1)\n', (19475, 19584), False, 'from pytorch_lightning import Trainer, LightningModule\n'), ((19668, 19738), 'os.path.join', 'os.path.join', (['trainer.logger.log_dir', 'trainer.logger.NAME_HPARAMS_FILE'], {}), '(trainer.logger.log_dir, trainer.logger.NAME_HPARAMS_FILE)\n', (19680, 19738), False, 'import os\n'), ((19753, 19786), 'pytorch_lightning.core.saving.load_hparams_from_yaml', 'load_hparams_from_yaml', (['path_yaml'], {}), '(path_yaml)\n', (19775, 19786), False, 'from pytorch_lightning.core.saving import save_hparams_to_yaml, load_hparams_from_yaml\n'), ((20101, 20217), 'pytorch_lightning.Trainer', 'Trainer', ([], {'default_root_dir': 'tmpdir', 'limit_train_batches': '(2)', 'limit_val_batches': '(2)', 'limit_test_batches': '(2)', 'max_epochs': '(1)'}), '(default_root_dir=tmpdir, limit_train_batches=2, limit_val_batches=2,\n limit_test_batches=2, max_epochs=1)\n', (20108, 20217), False, 'from pytorch_lightning import Trainer, LightningModule\n'), ((7253, 7280), 'torch.nn.CrossEntropyLoss', 'torch.nn.CrossEntropyLoss', ([], {}), '()\n', (7278, 7280), False, 'import torch\n'), ((13124, 13149), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (13137, 13149), False, 'import pytest\n'), ((14224, 14241), 'pickle.loads', 'pickle.loads', (['pkl'], {}), '(pkl)\n', (14236, 14241), False, 'import pickle\n'), ((14291, 14308), 'pickle.loads', 'pickle.loads', (['pkl'], {}), '(pkl)\n', (14303, 14308), False, 'import pickle\n'), ((14770, 14881), 'pytest.warns', 'pytest.warns', (['UserWarning'], {'match': '"""attribute \'pickle_me\' removed from hparams because it cannot be pickled"""'}), '(UserWarning, match=\n "attribute \'pickle_me\' removed from hparams because it cannot be pickled")\n', (14782, 14881), False, 'import pytest\n'), ((15249, 15282), 'pytorch_lightning.core.saving.load_hparams_from_yaml', 'load_hparams_from_yaml', (['path_yaml'], {}), '(path_yaml)\n', (15271, 15282), False, 'from pytorch_lightning.core.saving import save_hparams_to_yaml, load_hparams_from_yaml\n'), ((15331, 15351), 'argparse.Namespace', 'Namespace', ([], {}), '(**hparams)\n', (15340, 15351), False, 'from argparse import Namespace\n'), ((15364, 15397), 'pytorch_lightning.core.saving.load_hparams_from_yaml', 'load_hparams_from_yaml', (['path_yaml'], {}), '(path_yaml)\n', (15386, 15397), False, 'from pytorch_lightning.core.saving import save_hparams_to_yaml, load_hparams_from_yaml\n'), ((15446, 15468), 'pytorch_lightning.utilities.AttributeDict', 'AttributeDict', (['hparams'], {}), '(hparams)\n', (15459, 15468), False, 'from pytorch_lightning.utilities import AttributeDict, is_picklable\n'), ((15481, 15514), 'pytorch_lightning.core.saving.load_hparams_from_yaml', 'load_hparams_from_yaml', (['path_yaml'], {}), '(path_yaml)\n', (15503, 15514), False, 'from pytorch_lightning.core.saving import save_hparams_to_yaml, load_hparams_from_yaml\n'), ((15563, 15588), 'omegaconf.OmegaConf.create', 'OmegaConf.create', (['hparams'], {}), '(hparams)\n', (15579, 15588), False, 'from omegaconf import OmegaConf, Container\n'), ((15601, 15634), 'pytorch_lightning.core.saving.load_hparams_from_yaml', 'load_hparams_from_yaml', (['path_yaml'], {}), '(path_yaml)\n', (15623, 15634), False, 'from pytorch_lightning.core.saving import save_hparams_to_yaml, load_hparams_from_yaml\n'), ((15862, 15890), 'torch.nn.Linear', 'torch.nn.Linear', (['(28 * 28)', '(10)'], {}), '(28 * 28, 10)\n', (15877, 15890), False, 'import torch\n'), ((18534, 18629), 'pytest.raises', 'pytest.raises', (['TypeError'], {'match': '"""__init__\\\\(\\\\) got an unexpected keyword argument \'test\'"""'}), '(TypeError, match=\n "__init__\\\\(\\\\) got an unexpected keyword argument \'test\'")\n', (18547, 18629), False, 'import pytest\n'), ((20062, 20085), 'fsspec.implementations.local.LocalFileSystem', 'LocalFileSystem', (['tmpdir'], {}), '(tmpdir)\n', (20077, 20085), False, 'from fsspec.implementations.local import LocalFileSystem\n'), ((2917, 2939), 'argparse.Namespace', 'Namespace', ([], {'test_arg': '(14)'}), '(test_arg=14)\n', (2926, 2939), False, 'from argparse import Namespace\n'), ((14558, 14581), 'pytorch_lightning.utilities.is_picklable', 'is_picklable', (['pickle_me'], {}), '(pickle_me)\n', (14570, 14581), False, 'from pytorch_lightning.utilities import AttributeDict, is_picklable\n'), ((16755, 16766), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (16764, 16766), False, 'import os\n'), ((16889, 16900), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (16898, 16900), False, 'import os\n'), ((8360, 8390), 'torch.nn.CosineEmbeddingLoss', 'torch.nn.CosineEmbeddingLoss', ([], {}), '()\n', (8388, 8390), False, 'import torch\n')] |
import unittest
from coinplus_solo_redeem.common import wif_export_bitcoin, compute_public_key_sec256k1, address_from_publickey_ethereum
class TestEthereum(unittest.TestCase):
"""test of the bitcoin conversion from private key to wif"""
def setUp(self):
self.test_add_vector = [("03cb3e5f30245658e1e3615f1620e5b40f7d9016c0edb3611dd786327dd5e40caa", "<KEY>"),
("<KEY>", "0xDB1F8a8B668F15B9e696dDfF30Ce233703f9eC97"),
("<KEY>", "<KEY>"),
("<KEY>", "<KEY>"),
("037049004c5ad576beb518dcc74506df3faf520109a489886b7d1435a63b9b0b88", "0x0af4DbEf58063AEd75e6fF57610348E55954E8FB"),
("0260bbacc03555af21f062ff04e9fbde36bcf0ae7396812d336e7f2e5292306f2b", "<KEY>"),
("0343710601de0710dd81a0b7102bf1b794809a330caf4e1b4ae6567923c00df6a5", "<KEY>"),
("028c48ff458287f34cc1ad5c58a441500f8f315e9cabe34ff1601a5a0f791e4d0a", "0x98447B7aC721BDeb197a7e72780f6f41BECA2919"),
("0258cdabe1dad468dda6a7d62bee9e0cddadfe87d664e62df9143e769c017dd651", "0xaA5EacE5be0D09B09BAf66df62b0D85EA20b4ee4"),
("<KEY>", "<KEY>")]
def test_address_testvector(self):
for publickey_hex, address_expected in self.test_add_vector:
publickey = bytearray.fromhex(publickey_hex)
address = address_from_publickey_ethereum(publickey)
self.assertEqual(address, address_expected)
| [
"coinplus_solo_redeem.common.address_from_publickey_ethereum"
] | [((1486, 1528), 'coinplus_solo_redeem.common.address_from_publickey_ethereum', 'address_from_publickey_ethereum', (['publickey'], {}), '(publickey)\n', (1517, 1528), False, 'from coinplus_solo_redeem.common import wif_export_bitcoin, compute_public_key_sec256k1, address_from_publickey_ethereum\n')] |
import ast
import operator
import re
from collections import OrderedDict
from functools import partial
from ..cache import Cache
from ..exceptions import PluginError, NoStreamsError
from ..options import Options
# FIXME: This is a crude attempt at making a bitrate's
# weight end up similar to the weight of a resolution.
# Someone who knows math, please fix.
BIT_RATE_WEIGHT_RATIO = 2.8
ALT_WEIGHT_MOD = 0.01
QUALITY_WEIGTHS_EXTRA = {
"other": {
"live": 1080,
},
"tv": {
"hd": 1080,
"sd": 576,
},
"quality": {
"ehq": 720,
"hq": 576,
"sq": 360,
},
}
FILTER_OPERATORS = {
"<": operator.lt,
"<=": operator.le,
">": operator.gt,
">=": operator.ge,
}
PARAMS_REGEX = r"(\w+)=({.+?}|\[.+?\]|\(.+?\)|'(?:[^'\\]|\\')*'|\"(?:[^\"\\]|\\\")*\"|\S+)"
HIGH_PRIORITY = 30
NORMAL_PRIORITY = 20
LOW_PRIORITY = 10
NO_PRIORITY = 0
def stream_weight(stream):
for group, weights in QUALITY_WEIGTHS_EXTRA.items():
if stream in weights:
return weights[stream], group
match = re.match(r"^(\d+)(k|p)?(\d+)?(\+)?(?:_(\d+)k)?(?:_(alt)(\d)?)?$", stream)
if match:
weight = 0
if match.group(6):
if match.group(7):
weight -= ALT_WEIGHT_MOD * int(match.group(7))
else:
weight -= ALT_WEIGHT_MOD
name_type = match.group(2)
if name_type == "k": # bit rate
bitrate = int(match.group(1))
weight += bitrate / BIT_RATE_WEIGHT_RATIO
return weight, "bitrate"
elif name_type == "p": # resolution
weight += int(match.group(1))
if match.group(3): # fps eg. 60p or 50p
weight += int(match.group(3))
if match.group(4) == "+":
weight += 1
if match.group(5): # bit rate classifier for resolution
weight += int(match.group(5)) / BIT_RATE_WEIGHT_RATIO
return weight, "pixels"
return 0, "none"
def iterate_streams(streams):
for name, stream in streams:
if isinstance(stream, list):
for sub_stream in stream:
yield (name, sub_stream)
else:
yield (name, stream)
def stream_type_priority(stream_types, stream):
stream_type = type(stream[1]).shortname()
try:
prio = stream_types.index(stream_type)
except ValueError:
try:
prio = stream_types.index("*")
except ValueError:
prio = 99
return prio
def stream_sorting_filter(expr, stream_weight):
match = re.match(r"(?P<op><=|>=|<|>)?(?P<value>[\w+]+)", expr)
if not match:
raise PluginError("Invalid filter expression: {0}".format(expr))
op, value = match.group("op", "value")
op = FILTER_OPERATORS.get(op, operator.eq)
filter_weight, filter_group = stream_weight(value)
def func(quality):
weight, group = stream_weight(quality)
if group == filter_group:
return not op(weight, filter_weight)
return True
return func
def parse_url_params(url):
split = url.split(" ", 1)
url = split[0]
params = split[1] if len(split) > 1 else ''
return url, parse_params(params)
def parse_params(params):
rval = {}
matches = re.findall(PARAMS_REGEX, params)
for key, value in matches:
try:
value = ast.literal_eval(value)
except Exception:
pass
rval[key] = value
return rval
class Plugin(object):
"""A plugin can retrieve stream information from the URL specified.
:param url: URL that the plugin will operate on
"""
cache = None
logger = None
module = "unknown"
options = Options()
session = None
@classmethod
def bind(cls, session, module):
cls.cache = Cache(filename="plugin-cache.json",
key_prefix=module)
cls.logger = session.logger.new_module("plugin." + module)
cls.module = module
cls.session = session
def __init__(self, url):
self.url = url
@classmethod
def can_handle_url(cls, url):
raise NotImplementedError
@classmethod
def set_option(cls, key, value):
cls.options.set(key, value)
@classmethod
def get_option(cls, key):
return cls.options.get(key)
@classmethod
def stream_weight(cls, stream):
return stream_weight(stream)
@classmethod
def default_stream_types(cls, streams):
stream_types = ["rtmp", "hls", "hds", "http"]
for name, stream in iterate_streams(streams):
stream_type = type(stream).shortname()
if stream_type not in stream_types:
stream_types.append(stream_type)
return stream_types
@classmethod
def broken(cls, issue=None):
def func(*args, **kwargs):
msg = (
"This plugin has been marked as broken. This is likely due to "
"changes to the service preventing a working implementation. "
)
if issue:
msg += "More info: https://github.com/streamlink/streamlink/issues/{0}".format(issue)
raise PluginError(msg)
def decorator(*args, **kwargs):
return func
return decorator
@classmethod
def priority(cls, url):
"""
Return the plugin priority for a given URL, by default it returns
NORMAL priority.
:return: priority level
"""
return NORMAL_PRIORITY
def streams(self, stream_types=None, sorting_excludes=None):
"""Attempts to extract available streams.
Returns a :class:`dict` containing the streams, where the key is
the name of the stream, most commonly the quality and the value
is a :class:`Stream` object.
The result can contain the synonyms **best** and **worst** which
points to the streams which are likely to be of highest and
lowest quality respectively.
If multiple streams with the same name are found, the order of
streams specified in *stream_types* will determine which stream
gets to keep the name while the rest will be renamed to
"<name>_<stream type>".
The synonyms can be fine tuned with the *sorting_excludes*
parameter. This can be either of these types:
- A list of filter expressions in the format
*[operator]<value>*. For example the filter ">480p" will
exclude streams ranked higher than "480p" from the list
used in the synonyms ranking. Valid operators are >, >=, <
and <=. If no operator is specified then equality will be
tested.
- A function that is passed to filter() with a list of
stream names as input.
:param stream_types: A list of stream types to return.
:param sorting_excludes: Specify which streams to exclude from
the best/worst synonyms.
.. versionchanged:: 1.4.2
Added *priority* parameter.
.. versionchanged:: 1.5.0
Renamed *priority* to *stream_types* and changed behaviour
slightly.
.. versionchanged:: 1.5.0
Added *sorting_excludes* parameter.
.. versionchanged:: 1.6.0
*sorting_excludes* can now be a list of filter expressions
or a function that is passed to filter().
"""
try:
ostreams = self._get_streams()
if isinstance(ostreams, dict):
ostreams = ostreams.items()
# Flatten the iterator to a list so we can reuse it.
if ostreams:
ostreams = list(ostreams)
except NoStreamsError:
return {}
except (IOError, OSError, ValueError) as err:
raise PluginError(err)
if not ostreams:
return {}
if stream_types is None:
stream_types = self.default_stream_types(ostreams)
# Add streams depending on stream type and priorities
sorted_streams = sorted(iterate_streams(ostreams),
key=partial(stream_type_priority,
stream_types))
streams = {}
for name, stream in sorted_streams:
stream_type = type(stream).shortname()
# Use * as wildcard to match other stream types
if "*" not in stream_types and stream_type not in stream_types:
continue
# drop _alt from any stream names
if name.endswith("_alt"):
name = name[:-len("_alt")]
existing = streams.get(name)
if existing:
existing_stream_type = type(existing).shortname()
if existing_stream_type != stream_type:
name = "{0}_{1}".format(name, stream_type)
if name in streams:
name = "{0}_alt".format(name)
num_alts = len(list(filter(lambda n: n.startswith(name), streams.keys())))
# We shouldn't need more than 2 alt streams
if num_alts >= 2:
continue
elif num_alts > 0:
name = "{0}{1}".format(name, num_alts + 1)
# Validate stream name and discard the stream if it's bad.
match = re.match("([A-z0-9_+]+)", name)
if match:
name = match.group(1)
else:
self.logger.debug("The stream '{0}' has been ignored "
"since it is badly named.", name)
continue
# Force lowercase name and replace space with underscore.
streams[name.lower()] = stream
# Create the best/worst synonmys
def stream_weight_only(s):
return (self.stream_weight(s)[0] or
(len(streams) == 1 and 1))
stream_names = filter(stream_weight_only, streams.keys())
sorted_streams = sorted(stream_names, key=stream_weight_only)
if isinstance(sorting_excludes, list):
for expr in sorting_excludes:
filter_func = stream_sorting_filter(expr, self.stream_weight)
sorted_streams = list(filter(filter_func, sorted_streams))
elif callable(sorting_excludes):
sorted_streams = list(filter(sorting_excludes, sorted_streams))
final_sorted_streams = OrderedDict()
for stream_name in sorted(streams, key=stream_weight_only):
final_sorted_streams[stream_name] = streams[stream_name]
if len(sorted_streams) > 0:
best = sorted_streams[-1]
worst = sorted_streams[0]
final_sorted_streams["worst"] = streams[worst]
final_sorted_streams["best"] = streams[best]
return final_sorted_streams
def get_streams(self, *args, **kwargs):
"""Deprecated since version 1.9.0.
Has been renamed to :func:`Plugin.streams`, this is an alias
for backwards compatibility.
"""
return self.streams(*args, **kwargs)
def _get_streams(self):
raise NotImplementedError
__all__ = ["Plugin"]
| [
"collections.OrderedDict",
"re.match",
"ast.literal_eval",
"functools.partial",
"re.findall"
] | [((1082, 1159), 're.match', 're.match', (['"""^(\\\\d+)(k|p)?(\\\\d+)?(\\\\+)?(?:_(\\\\d+)k)?(?:_(alt)(\\\\d)?)?$"""', 'stream'], {}), "('^(\\\\d+)(k|p)?(\\\\d+)?(\\\\+)?(?:_(\\\\d+)k)?(?:_(alt)(\\\\d)?)?$', stream)\n", (1090, 1159), False, 'import re\n'), ((2624, 2678), 're.match', 're.match', (['"""(?P<op><=|>=|<|>)?(?P<value>[\\\\w+]+)"""', 'expr'], {}), "('(?P<op><=|>=|<|>)?(?P<value>[\\\\w+]+)', expr)\n", (2632, 2678), False, 'import re\n'), ((3329, 3361), 're.findall', 're.findall', (['PARAMS_REGEX', 'params'], {}), '(PARAMS_REGEX, params)\n', (3339, 3361), False, 'import re\n'), ((10628, 10641), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (10639, 10641), False, 'from collections import OrderedDict\n'), ((3427, 3450), 'ast.literal_eval', 'ast.literal_eval', (['value'], {}), '(value)\n', (3443, 3450), False, 'import ast\n'), ((9540, 9571), 're.match', 're.match', (['"""([A-z0-9_+]+)"""', 'name'], {}), "('([A-z0-9_+]+)', name)\n", (9548, 9571), False, 'import re\n'), ((8276, 8319), 'functools.partial', 'partial', (['stream_type_priority', 'stream_types'], {}), '(stream_type_priority, stream_types)\n', (8283, 8319), False, 'from functools import partial\n')] |
# This file is part of the Reference Data Repository (refdata).
#
# Copyright (C) 2021 New York University.
#
# refdata is free software; you can redistribute it and/or modify it under the
# terms of the MIT License; see LICENSE file for more details.
"""Fixtures for testing the command-line interface."""
import os
import pytest
from click.testing import CliRunner
from refdata.db import DB
import refdata.config as config
@pytest.fixture
def refdata_cli(tmpdir):
"""Initialize the environment and the database for the local store."""
basedir = os.path.abspath(str(tmpdir))
connect_url = 'sqlite:///{}'.format(os.path.join(basedir, 'test.db'))
DB(connect_url=connect_url).init()
os.environ[config.ENV_BASEDIR] = basedir
os.environ[config.ENV_URL] = connect_url
# Make sure to reset the database.
yield CliRunner()
# Clear environment variables that were set for the test runner.
del os.environ[config.ENV_BASEDIR]
del os.environ[config.ENV_URL]
| [
"refdata.db.DB",
"os.path.join",
"click.testing.CliRunner"
] | [((631, 663), 'os.path.join', 'os.path.join', (['basedir', '"""test.db"""'], {}), "(basedir, 'test.db')\n", (643, 663), False, 'import os\n'), ((843, 854), 'click.testing.CliRunner', 'CliRunner', ([], {}), '()\n', (852, 854), False, 'from click.testing import CliRunner\n'), ((669, 696), 'refdata.db.DB', 'DB', ([], {'connect_url': 'connect_url'}), '(connect_url=connect_url)\n', (671, 696), False, 'from refdata.db import DB\n')] |
#!/usr/bin/python3
# ***************************************************************
# Copyright (c) 2022 Jittor. All Rights Reserved.
# Maintainers:
# <NAME> <<EMAIL>>.
#
# This file is subject to the terms and conditions defined in
# file 'LICENSE.txt', which is part of this source code package.
# ***************************************************************
# Publish steps:
# 1. build,push,upload docker image[jittor/jittor]
# 2. build,push,upload docker image[jittor/jittor-cuda]
# upload to pip:
# rm -rf dist && python3.7 ./setup.py sdist && python3.7 -m twine upload dist/*
import os
def run_cmd(cmd):
print("[run cmd]", cmd)
assert os.system(cmd) == 0
def upload_file(path):
run_cmd(f"rsync -avPu {path} jittor-web:Documents/jittor-blog/assets/build/")
def docker_task(name, build_cmd):
run_cmd(build_cmd)
run_cmd(f"sudo docker push {name}")
bname = os.path.basename(name)
run_cmd(f"sudo docker save {name}:latest -o /tmp/{bname}.tgz && sudo chmod 666 /tmp/{bname}.tgz")
upload_file(f"/tmp/{bname}.tgz")
docker_task(
"jittor/jittor-cuda-11-1",
"sudo docker build --tag jittor/jittor-cuda-11-1:latest -f script/Dockerfile_cuda11 . --network host"
)
docker_task(
"jittor/jittor",
"sudo docker build --tag jittor/jittor:latest . --network host"
)
docker_task(
"jittor/jittor-cuda",
"sudo docker build --tag jittor/jittor-cuda:latest --build-arg FROM_IMAGE='nvidia/cuda:10.2-cudnn7-devel-ubuntu18.04' . --network host"
)
docker_task(
"jittor/jittor-cuda-10-1",
"sudo docker build --tag jittor/jittor-cuda-10-1:latest --build-arg FROM_IMAGE='nvidia/cuda:10.1-cudnn7-devel-ubuntu18.04' . --network host"
)
run_cmd("ssh jittor-web Documents/jittor-blog.git/hooks/post-update") | [
"os.system",
"os.path.basename"
] | [((899, 921), 'os.path.basename', 'os.path.basename', (['name'], {}), '(name)\n', (915, 921), False, 'import os\n'), ((663, 677), 'os.system', 'os.system', (['cmd'], {}), '(cmd)\n', (672, 677), False, 'import os\n')] |
from setuptools import setup
PLUGIN_NAME = "papermill"
microlib_name = f"flytekitplugins-{PLUGIN_NAME}"
plugin_requires = [
"flytekit>=0.16.0b0,<1.0.0",
"flytekitplugins-spark>=0.16.0b0,<1.0.0,!=0.24.0b0",
"papermill>=1.2.0",
"nbconvert>=6.0.7",
"ipykernel>=5.0.0",
]
__version__ = "0.0.0+develop"
setup(
name=microlib_name,
version=__version__,
author="flyteorg",
author_email="<EMAIL>",
description="This is the flytekit papermill plugin",
namespace_packages=["flytekitplugins"],
packages=[f"flytekitplugins.{PLUGIN_NAME}"],
install_requires=plugin_requires,
license="apache2",
python_requires=">=3.7",
classifiers=[
"Intended Audience :: Science/Research",
"Intended Audience :: Developers",
"License :: OSI Approved :: Apache Software License",
"Programming Language :: Python :: 3.7",
"Programming Language :: Python :: 3.8",
"Topic :: Scientific/Engineering",
"Topic :: Scientific/Engineering :: Artificial Intelligence",
"Topic :: Software Development",
"Topic :: Software Development :: Libraries",
"Topic :: Software Development :: Libraries :: Python Modules",
],
)
| [
"setuptools.setup"
] | [((323, 1153), 'setuptools.setup', 'setup', ([], {'name': 'microlib_name', 'version': '__version__', 'author': '"""flyteorg"""', 'author_email': '"""<EMAIL>"""', 'description': '"""This is the flytekit papermill plugin"""', 'namespace_packages': "['flytekitplugins']", 'packages': "[f'flytekitplugins.{PLUGIN_NAME}']", 'install_requires': 'plugin_requires', 'license': '"""apache2"""', 'python_requires': '""">=3.7"""', 'classifiers': "['Intended Audience :: Science/Research', 'Intended Audience :: Developers',\n 'License :: OSI Approved :: Apache Software License',\n 'Programming Language :: Python :: 3.7',\n 'Programming Language :: Python :: 3.8',\n 'Topic :: Scientific/Engineering',\n 'Topic :: Scientific/Engineering :: Artificial Intelligence',\n 'Topic :: Software Development',\n 'Topic :: Software Development :: Libraries',\n 'Topic :: Software Development :: Libraries :: Python Modules']"}), "(name=microlib_name, version=__version__, author='flyteorg',\n author_email='<EMAIL>', description=\n 'This is the flytekit papermill plugin', namespace_packages=[\n 'flytekitplugins'], packages=[f'flytekitplugins.{PLUGIN_NAME}'],\n install_requires=plugin_requires, license='apache2', python_requires=\n '>=3.7', classifiers=['Intended Audience :: Science/Research',\n 'Intended Audience :: Developers',\n 'License :: OSI Approved :: Apache Software License',\n 'Programming Language :: Python :: 3.7',\n 'Programming Language :: Python :: 3.8',\n 'Topic :: Scientific/Engineering',\n 'Topic :: Scientific/Engineering :: Artificial Intelligence',\n 'Topic :: Software Development',\n 'Topic :: Software Development :: Libraries',\n 'Topic :: Software Development :: Libraries :: Python Modules'])\n", (328, 1153), False, 'from setuptools import setup\n')] |
import pyttsx3
import datetime
import speech_recognition as sr
import wikipedia
import webbrowser
import os
import smtplib
engine = pyttsx3.init('sapi5')
voices = engine.getProperty('voices')
engine.setProperty('voice', voices[0].id)
# To change the voice to female change 0 to 1.
def speak(audio):
engine.say(audio)
engine.runAndWait()
pass
def take_command():
"""
It takes microphone input from the user and returns a string
:return:
"""
r = sr.Recognizer()
with sr.Microphone() as source:
print("Listening...")
r.pause_threshold = 1.5 # It will wait 1.5 seconds to complete a sentence
audio = r.listen(source)
#Do read details
try:
print("Recognizing")
query = r.recognize_google(audio,language='en-in')
print(f'user said : {query}\n')
except Exception as e:
#print(e)
print("Say that again please")
return "None"
return query
def sendEmail(to,content):
server =smtplib.SMTP('smtp.gmail.com',28)
# server.connect("smtp.gmail.com",465)
# server.ehlo()
server.login('<EMAIL>','########')
server.sendmail('<EMAIL>',to,content)
server.close()
def wish_me():
hour = int(datetime.datetime.now().hour)
if hour >= 0 and hour < 12:
speak("Good morning")
elif hour >= 12 and hour < 18:
speak("Good afternoon")
else:
speak("Good night")
speak("I am JARVIS how can i help you")
if __name__ == '__main__':
wish_me()
while True:
query =take_command().lower()
if 'wikipedia' in query:
speak("Searching wikipedia")
query = query.replace('wikipedia','')
results = wikipedia.summary(query,sentences=2)#To read more increase sentence to decrease sentence decreease sentence
speak("According to wikipedia")
#print(results)
speak(results)
elif 'open youtube' in query:
# webbrowser.Chrome.open_new("youtube.com")
webbrowser.open("youtube.com")
elif "open google" in query:
webbrowser.open("google.com")
elif "play music" in query:
music_dir = "D:\\vijayesh\\music"
songs = os.listdir(music_dir)
print(songs)
os.startfile(os.path.join(music_dir,songs[1]))
elif "the time" in query:
strtime = datetime.datetime.now().strftime("%H:%M:%S")
speak(f"The time is {strtime}")
elif " open pycharm" in query:
pycharmpath ="C:\\Program Files\\JetBrains\\PyCharm Community Edition 2021"
os.startfile(pycharmpath)
#elif "open command" in query:
# filelocation = "path of the particular file like above"
# os.startfile(filelocation)
elif " email to vijayesh" or "email to vijesh" in query:
try:
speak("What should i say")#error present
content = take_command()
to = "<EMAIL>"
sendEmail(to,content)
speak("Email has been sent")
exit()
except Exception as e:
print(e)
speak("Sorry,I am not able to send this email")
exit()
| [
"smtplib.SMTP",
"os.listdir",
"pyttsx3.init",
"os.path.join",
"webbrowser.open",
"speech_recognition.Recognizer",
"datetime.datetime.now",
"speech_recognition.Microphone",
"os.startfile",
"wikipedia.summary"
] | [((145, 166), 'pyttsx3.init', 'pyttsx3.init', (['"""sapi5"""'], {}), "('sapi5')\n", (157, 166), False, 'import pyttsx3\n'), ((495, 510), 'speech_recognition.Recognizer', 'sr.Recognizer', ([], {}), '()\n', (508, 510), True, 'import speech_recognition as sr\n'), ((961, 995), 'smtplib.SMTP', 'smtplib.SMTP', (['"""smtp.gmail.com"""', '(28)'], {}), "('smtp.gmail.com', 28)\n", (973, 995), False, 'import smtplib\n'), ((518, 533), 'speech_recognition.Microphone', 'sr.Microphone', ([], {}), '()\n', (531, 533), True, 'import speech_recognition as sr\n'), ((1177, 1200), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (1198, 1200), False, 'import datetime\n'), ((1606, 1643), 'wikipedia.summary', 'wikipedia.summary', (['query'], {'sentences': '(2)'}), '(query, sentences=2)\n', (1623, 1643), False, 'import wikipedia\n'), ((1876, 1906), 'webbrowser.open', 'webbrowser.open', (['"""youtube.com"""'], {}), "('youtube.com')\n", (1891, 1906), False, 'import webbrowser\n'), ((1943, 1972), 'webbrowser.open', 'webbrowser.open', (['"""google.com"""'], {}), "('google.com')\n", (1958, 1972), False, 'import webbrowser\n'), ((2058, 2079), 'os.listdir', 'os.listdir', (['music_dir'], {}), '(music_dir)\n', (2068, 2079), False, 'import os\n'), ((2114, 2147), 'os.path.join', 'os.path.join', (['music_dir', 'songs[1]'], {}), '(music_dir, songs[1])\n', (2126, 2147), False, 'import os\n'), ((2392, 2417), 'os.startfile', 'os.startfile', (['pycharmpath'], {}), '(pycharmpath)\n', (2404, 2417), False, 'import os\n'), ((2193, 2216), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (2214, 2216), False, 'import datetime\n')] |
"""
Ory Kratos API
Documentation for all public and administrative Ory Kratos APIs. Public and administrative APIs are exposed on different ports. Public APIs can face the public internet without any protection while administrative APIs should never be exposed without prior authorization. To protect the administative API port you should use something like Nginx, Ory Oathkeeper, or any other technology capable of authorizing incoming requests. # noqa: E501
The version of the OpenAPI document: v0.7.0-alpha.1
Contact: <EMAIL>
Generated by: https://openapi-generator.tech
"""
import unittest
import ory_kratos_client
from ory_kratos_client.api.v0alpha1_api import V0alpha1Api # noqa: E501
class TestV0alpha1Api(unittest.TestCase):
"""V0alpha1Api unit test stubs"""
def setUp(self):
self.api = V0alpha1Api() # noqa: E501
def tearDown(self):
pass
def test_admin_create_identity(self):
"""Test case for admin_create_identity
Create an Identity # noqa: E501
"""
pass
def test_admin_create_self_service_recovery_link(self):
"""Test case for admin_create_self_service_recovery_link
Create a Recovery Link # noqa: E501
"""
pass
def test_admin_delete_identity(self):
"""Test case for admin_delete_identity
Delete an Identity # noqa: E501
"""
pass
def test_admin_get_identity(self):
"""Test case for admin_get_identity
Get an Identity # noqa: E501
"""
pass
def test_admin_list_identities(self):
"""Test case for admin_list_identities
List Identities # noqa: E501
"""
pass
def test_admin_update_identity(self):
"""Test case for admin_update_identity
Update an Identity # noqa: E501
"""
pass
def test_create_self_service_logout_flow_url_for_browsers(self):
"""Test case for create_self_service_logout_flow_url_for_browsers
Create a Logout URL for Browsers # noqa: E501
"""
pass
def test_get_json_schema(self):
"""Test case for get_json_schema
"""
pass
def test_get_self_service_error(self):
"""Test case for get_self_service_error
Get Self-Service Errors # noqa: E501
"""
pass
def test_get_self_service_login_flow(self):
"""Test case for get_self_service_login_flow
Get Login Flow # noqa: E501
"""
pass
def test_get_self_service_recovery_flow(self):
"""Test case for get_self_service_recovery_flow
Get Recovery Flow # noqa: E501
"""
pass
def test_get_self_service_registration_flow(self):
"""Test case for get_self_service_registration_flow
Get Registration Flow # noqa: E501
"""
pass
def test_get_self_service_settings_flow(self):
"""Test case for get_self_service_settings_flow
Get Settings Flow # noqa: E501
"""
pass
def test_get_self_service_verification_flow(self):
"""Test case for get_self_service_verification_flow
Get Verification Flow # noqa: E501
"""
pass
def test_initialize_self_service_login_flow_for_browsers(self):
"""Test case for initialize_self_service_login_flow_for_browsers
Initialize Login Flow for Browsers # noqa: E501
"""
pass
def test_initialize_self_service_login_flow_without_browser(self):
"""Test case for initialize_self_service_login_flow_without_browser
Initialize Login Flow for APIs, Services, Apps, ... # noqa: E501
"""
pass
def test_initialize_self_service_recovery_flow_for_browsers(self):
"""Test case for initialize_self_service_recovery_flow_for_browsers
Initialize Recovery Flow for Browsers # noqa: E501
"""
pass
def test_initialize_self_service_recovery_flow_without_browser(self):
"""Test case for initialize_self_service_recovery_flow_without_browser
Initialize Recovery Flow for APIs, Services, Apps, ... # noqa: E501
"""
pass
def test_initialize_self_service_registration_flow_for_browsers(self):
"""Test case for initialize_self_service_registration_flow_for_browsers
Initialize Registration Flow for Browsers # noqa: E501
"""
pass
def test_initialize_self_service_registration_flow_without_browser(self):
"""Test case for initialize_self_service_registration_flow_without_browser
Initialize Registration Flow for APIs, Services, Apps, ... # noqa: E501
"""
pass
def test_initialize_self_service_settings_flow_for_browsers(self):
"""Test case for initialize_self_service_settings_flow_for_browsers
Initialize Settings Flow for Browsers # noqa: E501
"""
pass
def test_initialize_self_service_settings_flow_without_browser(self):
"""Test case for initialize_self_service_settings_flow_without_browser
Initialize Settings Flow for APIs, Services, Apps, ... # noqa: E501
"""
pass
def test_initialize_self_service_verification_flow_for_browsers(self):
"""Test case for initialize_self_service_verification_flow_for_browsers
Initialize Verification Flow for Browser Clients # noqa: E501
"""
pass
def test_initialize_self_service_verification_flow_without_browser(self):
"""Test case for initialize_self_service_verification_flow_without_browser
Initialize Verification Flow for APIs, Services, Apps, ... # noqa: E501
"""
pass
def test_submit_self_service_login_flow(self):
"""Test case for submit_self_service_login_flow
Submit a Login Flow # noqa: E501
"""
pass
def test_submit_self_service_logout_flow(self):
"""Test case for submit_self_service_logout_flow
Complete Self-Service Logout # noqa: E501
"""
pass
def test_submit_self_service_logout_flow_without_browser(self):
"""Test case for submit_self_service_logout_flow_without_browser
Perform Logout for APIs, Services, Apps, ... # noqa: E501
"""
pass
def test_submit_self_service_recovery_flow(self):
"""Test case for submit_self_service_recovery_flow
Complete Recovery Flow # noqa: E501
"""
pass
def test_submit_self_service_registration_flow(self):
"""Test case for submit_self_service_registration_flow
Submit a Registration Flow # noqa: E501
"""
pass
def test_submit_self_service_settings_flow(self):
"""Test case for submit_self_service_settings_flow
Complete Settings Flow # noqa: E501
"""
pass
def test_submit_self_service_verification_flow(self):
"""Test case for submit_self_service_verification_flow
Complete Verification Flow # noqa: E501
"""
pass
def test_to_session(self):
"""Test case for to_session
Check Who the Current HTTP Session Belongs To # noqa: E501
"""
pass
if __name__ == '__main__':
unittest.main()
| [
"unittest.main",
"ory_kratos_client.api.v0alpha1_api.V0alpha1Api"
] | [((7305, 7320), 'unittest.main', 'unittest.main', ([], {}), '()\n', (7318, 7320), False, 'import unittest\n'), ((842, 855), 'ory_kratos_client.api.v0alpha1_api.V0alpha1Api', 'V0alpha1Api', ([], {}), '()\n', (853, 855), False, 'from ory_kratos_client.api.v0alpha1_api import V0alpha1Api\n')] |
"""The present code is the Version 1.0 of the RCNN approach to perform MPS
in 3D for categorical variables. It has been developed by <NAME> and <NAME> in the
Geometallurygical Group at Queen's University as part of a PhD program.
The code is not free of bugs but running end-to-end.
Any comments and further improvements are well recevied to: <EMAIL>
April 16, 2019.
Geomet Group - Queen's University - Canada"""
# Do not display the AVX message about using GPU
import os
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'
#from tensorflow.python.client import device_lib
#print(device_lib.list_local_devices())
#os.environ["CUDA_DEVICE_ORDER"]="PCI_BUS_ID" # see issue #152
#os.environ["CUDA_VISIBLE_DEVICES"]="0"
## #########################
import numpy as np
import tensorflow as tf
import time
import External_Functions_3D as fns_nested
import gc
for ind0 in range(1):
start_time_AllTrain = time.time()
HyperPar = []
HyperPar.append(50) # SGsizex - Num 0
HyperPar.append(50) # SGsizey - Num 1
HyperPar.append(50) # SGsizez - Num 2
HyperPar.append(int(7)) # Search_x - Num 3
HyperPar.append(int(7)) # Search_y - Num 4
HyperPar.append(int(7)) # Search_z - Num 5
HyperPar.append(int(7)) # IPsizex - Num 6
HyperPar.append(int(7)) # IPsizey - Num 7
HyperPar.append(int(7)) # IPsizez - Num 8
HyperPar.append(50) # Percentage of Data Conditioning - Num 9 .. divided by 3 so 1% is 10 represents 1%
HyperPar.append(1) # MinDC - Num 10
HyperPar.append(1500) # Num Fully Connected - Num 11
HyperPar.append(3) # wdnh - Num 12
HyperPar.append(16) # convdepth - Num 13
HyperPar.append(2) # num of categories - Num 14
print("SG: ", int(HyperPar[3]),"x",int(HyperPar[4]),"x",int(HyperPar[5]), "IP: ", int(HyperPar[6]),"x",int(HyperPar[7]),"x",int(HyperPar[8]))
Ncicles = 500
Nepoch = 1
#Nbatch = 250
Nsamples = 512
TrainingImage = "TI_Collaboration_1of4_50x50x50_newRepresentation.dat"
LocModel = 'Models/3D_NewRepresentation/Allperc/%sx%sx%s_%sx%sx%s_4ConvNets_4HL_BN_3FC%s_ws%sx%sx%s_%sconvdepth/FeatMaps'%(int(HyperPar[3]),int(HyperPar[4]),int(HyperPar[5]), int(HyperPar[6]),int(HyperPar[7]),int(HyperPar[8]), int(HyperPar[11]), int(HyperPar[12]),int(HyperPar[12]),int(HyperPar[12]), int(HyperPar[13]))
#LocModel = 'Models/3D_NewRepresentation/New%sperc/%sx%sx%s_%sx%sx%s_4ConvNets_4HL_BN_3FC%s_ws%sx%sx%s_%sconvdepth/FeatMaps'%(int(HyperPar[9]), int(HyperPar[3]),int(HyperPar[4]),int(HyperPar[5]), int(HyperPar[6]),int(HyperPar[7]),int(HyperPar[8]), int(HyperPar[11]), int(HyperPar[12]),int(HyperPar[12]),int(HyperPar[12]), int(HyperPar[13]))
LocFile = 'Models/3D_NewRepresentation/Allperc/%sx%sx%s_%sx%sx%s_4ConvNets_4HL_BN_3FC%s_ws%sx%sx%s_%sconvdepth'%(int(HyperPar[3]),int(HyperPar[4]),int(HyperPar[5]), int(HyperPar[6]),int(HyperPar[7]),int(HyperPar[8]), int(HyperPar[11]), int(HyperPar[12]),int(HyperPar[12]),int(HyperPar[12]), int(HyperPar[13]))
#LocFile = 'Models/3D_NewRepresentation/New%sperc/%sx%sx%s_%sx%sx%s_4ConvNets_4HL_BN_3FC%s_ws%sx%sx%s_%sconvdepth'%(int(HyperPar[9]), int(HyperPar[3]),int(HyperPar[4]),int(HyperPar[5]), int(HyperPar[6]),int(HyperPar[7]),int(HyperPar[8]), int(HyperPar[11]), int(HyperPar[12]),int(HyperPar[12]),int(HyperPar[12]), int(HyperPar[13]))
print("[Graph]")
#fns_nested.CreateGraph_4ConvNets_4HL_NFeaConv_wdnhxwdnh_BN_3D_NoBN(HyperPar=HyperPar, LocModel=LocModel)
fns_nested.CreateGraph_4ConvNets_4HL_NFeaConv_wdnhxwdnh_BN_3D(HyperPar=HyperPar, LocModel=LocModel)
# To save the TI
TempSimGrid = fns_nested.Grid(HyperPar=HyperPar, DBname=TrainingImage, Lvl=3,Training=False, Padding=True)
TempSimGrid.SavePlot(name=LocModel+'_TI.png', Level=1)
MaxLR, MinLR = 0.01, 0.001
StepLR = 10
PointStart = 1
for indTrain in range(Ncicles):
#HyperPar[9] = np.random.randint(41)+10
cuos = indTrain%(2*StepLR)
if cuos < StepLR:
LearningRate = np.around(((MaxLR - MinLR)/StepLR)*cuos + MinLR, decimals=7)
else:
LearningRate = np.around(((MaxLR - MinLR)/StepLR)*(StepLR - cuos) + MaxLR, decimals=7)
start_time_1 = time.time()
print ("Cicle: {}".format(indTrain+PointStart), "Learning Rate: ", LearningRate)
TempSimGrid = fns_nested.Grid(HyperPar=HyperPar, DBname=TrainingImage, Lvl=5, Training=True, Padding=True)
print("[Sim]")
TempSimGrid.Simulate_4ConvNets_BN_3D(LocModel=LocModel, Cicle=(indTrain+PointStart), Plot=True)
print("[Saving Grid]")
TempSimGrid.SaveGrid(file="{}/TrainReas_{}.txt".format(LocFile, indTrain+PointStart))
print("[Train]")
TempSimGrid.Train_4ConvNets_BN_3D(Epochs=Nepoch, Num_samples=Nsamples, LocModel=LocModel, LR=LearningRate)
print("--%s seconds of whole training process-" % (np.around((time.time() - start_time_1), decimals=2)))
gc.collect()
print(" ")
print("--%s minutes of ALL training-" % ((time.time() - start_time_AllTrain)/60)) | [
"External_Functions_3D.Grid",
"numpy.around",
"gc.collect",
"External_Functions_3D.CreateGraph_4ConvNets_4HL_NFeaConv_wdnhxwdnh_BN_3D",
"time.time"
] | [((926, 937), 'time.time', 'time.time', ([], {}), '()\n', (935, 937), False, 'import time\n'), ((3406, 3510), 'External_Functions_3D.CreateGraph_4ConvNets_4HL_NFeaConv_wdnhxwdnh_BN_3D', 'fns_nested.CreateGraph_4ConvNets_4HL_NFeaConv_wdnhxwdnh_BN_3D', ([], {'HyperPar': 'HyperPar', 'LocModel': 'LocModel'}), '(HyperPar=\n HyperPar, LocModel=LocModel)\n', (3467, 3510), True, 'import External_Functions_3D as fns_nested\n'), ((3547, 3645), 'External_Functions_3D.Grid', 'fns_nested.Grid', ([], {'HyperPar': 'HyperPar', 'DBname': 'TrainingImage', 'Lvl': '(3)', 'Training': '(False)', 'Padding': '(True)'}), '(HyperPar=HyperPar, DBname=TrainingImage, Lvl=3, Training=\n False, Padding=True)\n', (3562, 3645), True, 'import External_Functions_3D as fns_nested\n'), ((4086, 4097), 'time.time', 'time.time', ([], {}), '()\n', (4095, 4097), False, 'import time\n'), ((4199, 4296), 'External_Functions_3D.Grid', 'fns_nested.Grid', ([], {'HyperPar': 'HyperPar', 'DBname': 'TrainingImage', 'Lvl': '(5)', 'Training': '(True)', 'Padding': '(True)'}), '(HyperPar=HyperPar, DBname=TrainingImage, Lvl=5, Training=\n True, Padding=True)\n', (4214, 4296), True, 'import External_Functions_3D as fns_nested\n'), ((4771, 4783), 'gc.collect', 'gc.collect', ([], {}), '()\n', (4781, 4783), False, 'import gc\n'), ((3905, 3967), 'numpy.around', 'np.around', (['((MaxLR - MinLR) / StepLR * cuos + MinLR)'], {'decimals': '(7)'}), '((MaxLR - MinLR) / StepLR * cuos + MinLR, decimals=7)\n', (3914, 3967), True, 'import numpy as np\n'), ((3995, 4068), 'numpy.around', 'np.around', (['((MaxLR - MinLR) / StepLR * (StepLR - cuos) + MaxLR)'], {'decimals': '(7)'}), '((MaxLR - MinLR) / StepLR * (StepLR - cuos) + MaxLR, decimals=7)\n', (4004, 4068), True, 'import numpy as np\n'), ((4848, 4859), 'time.time', 'time.time', ([], {}), '()\n', (4857, 4859), False, 'import time\n'), ((4722, 4733), 'time.time', 'time.time', ([], {}), '()\n', (4731, 4733), False, 'import time\n')] |
import logging
from typing import Dict
from django.http import HttpRequest
logger = logging.getLogger(__name__)
class FeatureFlagProvider:
def is_feature_enabled(self, feature_name: str, user_id: str = None, attributes: Dict = None):
raise NotImplementedError("You must override FeatureFlagProvider.is_feature_enabled()")
def _attributes_from_request(request: HttpRequest) -> Dict:
if not request:
return dict()
attributes = dict()
try:
attributes["is_staff"] = request.user.is_staff
return attributes
except Exception:
logger.exception(
"Unexpected exception while trying to parse http-request for feature-attributes."
)
return dict()
def is_feature_enabled(feature_name: str, request: HttpRequest) -> bool:
from django.conf import settings
is_enabled = False
attributes = _attributes_from_request(request)
try:
is_enabled = settings.FEATURE_FLAG_PROVIDER.is_feature_enabled(
feature_name=feature_name, user_id="dontcare", attributes=attributes
)
logger.info(f"Feature '{feature_name}' is enabled={is_enabled}")
except Exception:
logger.exception(f"Exception while trying to check feature-flag state for '{feature_name}'")
return is_enabled
| [
"logging.getLogger",
"django.conf.settings.FEATURE_FLAG_PROVIDER.is_feature_enabled"
] | [((86, 113), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (103, 113), False, 'import logging\n'), ((946, 1069), 'django.conf.settings.FEATURE_FLAG_PROVIDER.is_feature_enabled', 'settings.FEATURE_FLAG_PROVIDER.is_feature_enabled', ([], {'feature_name': 'feature_name', 'user_id': '"""dontcare"""', 'attributes': 'attributes'}), "(feature_name=feature_name,\n user_id='dontcare', attributes=attributes)\n", (995, 1069), False, 'from django.conf import settings\n')] |
from collections import OrderedDict
import torch
import torch.nn as nn
from torch_geometric.data.batch import Batch
class GNN(nn.Module):
def __init__(self, mp_steps, **config):
super().__init__()
self.mp_steps = mp_steps
self.update_fns = self.assign_update_fns()
self.readout_fns = self.assign_readout_fns()
def assign_update_fns(self) -> OrderedDict:
raise NotImplementedError
def assign_readout_fns(self) -> dict:
raise NotImplementedError
def forward(self, batch: Batch, output_all_steps=True):
edge_index = batch.edge_index
sections = (
torch.bincount(batch.batch).tolist() if hasattr(batch, "batch") else None
)
hiddens = self.initialize(batch)
del batch
# update attributes with update and aggregation step
outputs = {element: [] for element in self.readout_fns.keys()}
for step in range(self.mp_steps):
hiddens = self.step(edge_index=edge_index, sections=sections, **hiddens)
if not output_all_steps and (step + 1) != self.mp_steps:
continue
for element, readout_fn in self.readout_fns.items():
outputs[element].append(readout_fn(**hiddens))
return outputs
def initialize(self, batch):
hiddens = {}
# initialize attributes trough embeddings and intialize lstm states to None
for element in self.embeddings.keys():
embedding = self.embeddings[element](batch[f"{element}_input"])
hiddens.update(
{
f"{element}_input": embedding,
f"{element}_embedding": embedding.clone(),
f"{element}_lstm": None,
}
)
return hiddens
def step(self, edge_index, sections, **hiddens):
"""
Perform a message passing step by propagating information and updating each element
"""
for element, update_fn in self.update_fns.items():
hiddens[f"{element}_embedding"], hiddens[f"{element}_lstm"] = update_fn(
edge_index=edge_index, sections=sections, element=element, **hiddens
)
return hiddens
| [
"torch.bincount"
] | [((642, 669), 'torch.bincount', 'torch.bincount', (['batch.batch'], {}), '(batch.batch)\n', (656, 669), False, 'import torch\n')] |
"""
SVG export test
"""
import test
import pyqtgraph as pg
app = pg.mkQApp()
class SVGTest(test.TestCase):
#def test_plotscene(self):
#pg.setConfigOption('foreground', (0,0,0))
#w = pg.GraphicsWindow()
#w.show()
#p1 = w.addPlot()
#p2 = w.addPlot()
#p1.plot([1,3,2,3,1,6,9,8,4,2,3,5,3], pen={'color':'k'})
#p1.setXRange(0,5)
#p2.plot([1,5,2,3,4,6,1,2,4,2,3,5,3], pen={'color':'k', 'cosmetic':False, 'width': 0.3})
#app.processEvents()
#app.processEvents()
#ex = pg.exporters.SVGExporter.SVGExporter(w.scene())
#ex.export(fileName='test.svg')
def test_simple(self):
scene = pg.QtGui.QGraphicsScene()
#rect = pg.QtGui.QGraphicsRectItem(0, 0, 100, 100)
#scene.addItem(rect)
#rect.setPos(20,20)
#rect.translate(50, 50)
#rect.rotate(30)
#rect.scale(0.5, 0.5)
#rect1 = pg.QtGui.QGraphicsRectItem(0, 0, 100, 100)
#rect1.setParentItem(rect)
#rect1.setFlag(rect1.ItemIgnoresTransformations)
#rect1.setPos(20, 20)
#rect1.scale(2,2)
#el1 = pg.QtGui.QGraphicsEllipseItem(0, 0, 100, 100)
#el1.setParentItem(rect1)
##grp = pg.ItemGroup()
#grp.setParentItem(rect)
#grp.translate(200,0)
##grp.rotate(30)
#rect2 = pg.QtGui.QGraphicsRectItem(0, 0, 100, 25)
#rect2.setFlag(rect2.ItemClipsChildrenToShape)
#rect2.setParentItem(grp)
#rect2.setPos(0,25)
#rect2.rotate(30)
#el = pg.QtGui.QGraphicsEllipseItem(0, 0, 100, 50)
#el.translate(10,-5)
#el.scale(0.5,2)
#el.setParentItem(rect2)
grp2 = pg.ItemGroup()
scene.addItem(grp2)
grp2.scale(100,100)
rect3 = pg.QtGui.QGraphicsRectItem(0,0,2,2)
rect3.setPen(pg.mkPen(width=1, cosmetic=False))
grp2.addItem(rect3)
ex = pg.exporters.SVGExporter.SVGExporter(scene)
ex.export(fileName='test.svg')
if __name__ == '__main__':
test.unittest.main() | [
"pyqtgraph.QtGui.QGraphicsRectItem",
"pyqtgraph.ItemGroup",
"pyqtgraph.mkQApp",
"test.unittest.main",
"pyqtgraph.exporters.SVGExporter.SVGExporter",
"pyqtgraph.mkPen",
"pyqtgraph.QtGui.QGraphicsScene"
] | [((65, 76), 'pyqtgraph.mkQApp', 'pg.mkQApp', ([], {}), '()\n', (74, 76), True, 'import pyqtgraph as pg\n'), ((2116, 2136), 'test.unittest.main', 'test.unittest.main', ([], {}), '()\n', (2134, 2136), False, 'import test\n'), ((704, 729), 'pyqtgraph.QtGui.QGraphicsScene', 'pg.QtGui.QGraphicsScene', ([], {}), '()\n', (727, 729), True, 'import pyqtgraph as pg\n'), ((1754, 1768), 'pyqtgraph.ItemGroup', 'pg.ItemGroup', ([], {}), '()\n', (1766, 1768), True, 'import pyqtgraph as pg\n'), ((1850, 1888), 'pyqtgraph.QtGui.QGraphicsRectItem', 'pg.QtGui.QGraphicsRectItem', (['(0)', '(0)', '(2)', '(2)'], {}), '(0, 0, 2, 2)\n', (1876, 1888), True, 'import pyqtgraph as pg\n'), ((1992, 2035), 'pyqtgraph.exporters.SVGExporter.SVGExporter', 'pg.exporters.SVGExporter.SVGExporter', (['scene'], {}), '(scene)\n', (2028, 2035), True, 'import pyqtgraph as pg\n'), ((1907, 1940), 'pyqtgraph.mkPen', 'pg.mkPen', ([], {'width': '(1)', 'cosmetic': '(False)'}), '(width=1, cosmetic=False)\n', (1915, 1940), True, 'import pyqtgraph as pg\n')] |
"""This file contains functions for loading and preprocessing pianoroll data.
"""
import logging
import numpy as np
import tensorflow.compat.v1 as tf
from musegan.config import SHUFFLE_BUFFER_SIZE, PREFETCH_SIZE
LOGGER = logging.getLogger(__name__)
# --- Data loader --------------------------------------------------------------
def load_data_from_npy(filename):
"""Load and return the training data from a npy file."""
return np.load(filename)
def load_data_from_npz(filename):
"""Load and return the training data from a npz file (sparse format)."""
with np.load(filename) as f:
data = np.zeros(f['shape'], np.bool_)
data[[x for x in f['nonzero']]] = True
return data
def load_data(data_source, data_filename):
"""Load and return the training data."""
if data_source == 'sa':
import SharedArray as sa
return sa.attach(data_filename)
if data_source == 'npy':
return load_data_from_npy(data_filename)
if data_source == 'npz':
return load_data_from_npz(data_filename)
raise ValueError("Expect `data_source` to be one of 'sa', 'npy', 'npz'. "
"But get " + str(data_source))
# --- Dataset Utilities -------------------------------------------------------
def random_transpose(pianoroll):
"""Randomly transpose a pianoroll with [-5, 6] semitones."""
semitone = np.random.randint(-5, 6)
if semitone > 0:
pianoroll[:, semitone:, 1:] = pianoroll[:, :-semitone, 1:]
pianoroll[:, :semitone, 1:] = 0
elif semitone < 0:
pianoroll[:, :semitone, 1:] = pianoroll[:, -semitone:, 1:]
pianoroll[:, semitone:, 1:] = 0
return pianoroll
def set_pianoroll_shape(pianoroll, data_shape):
"""Set the pianoroll shape and return the pianoroll."""
pianoroll.set_shape(data_shape)
return pianoroll
def set_label_shape(label):
"""Set the label shape and return the label."""
label.set_shape([1])
return label
# --- Sampler ------------------------------------------------------------------
def get_samples(n_samples, data, labels=None, use_random_transpose=False):
"""Return some random samples of the training data."""
indices = np.random.choice(len(data), n_samples, False)
if np.issubdtype(data.dtype, np.bool_):
sample_data = data[indices] * 2. - 1.
else:
sample_data = data[indices]
if use_random_transpose:
sample_data = np.array([random_transpose(x) for x in sample_data])
if labels is None:
return sample_data
return sample_data, labels[indices]
# --- Tensorflow Dataset -------------------------------------------------------
def _gen_data(data, labels=None):
"""Data Generator."""
if labels is None:
for item in data:
if np.issubdtype(data.dtype, np.bool_):
yield item * 2. - 1.
else:
yield item
else:
for i, item in enumerate(data):
if np.issubdtype(data.dtype, np.bool_):
yield (item * 2. - 1., labels[i])
else:
yield (item, labels[i])
def get_dataset(data, labels=None, batch_size=None, data_shape=None,
use_random_transpose=False, num_threads=1):
"""Create and return a tensorflow dataset from an array."""
if labels is None:
dataset = tf.data.Dataset.from_generator(
lambda: _gen_data(data), tf.float32)
if use_random_transpose:
dataset = dataset.map(
lambda pianoroll: tf.py_func(
random_transpose, [pianoroll], tf.float32),
num_parallel_calls=num_threads)
dataset = dataset.map(lambda pianoroll: set_pianoroll_shape(
pianoroll, data_shape), num_parallel_calls=num_threads)
else:
assert len(data) == len(labels), (
"Lengths of `data` and `lables` do not match.")
dataset = tf.data.Dataset.from_generator(
lambda: _gen_data(data, labels), [tf.float32, tf.int32])
if use_random_transpose:
dataset = dataset.map(
lambda pianoroll, label: (
tf.py_func(random_transpose, [pianoroll], tf.float32),
label),
num_parallel_calls=num_threads)
dataset = dataset.map(
lambda pianoroll, label: (set_pianoroll_shape(
pianoroll, data_shape), set_label_shape(label)),
num_parallel_calls=num_threads)
dataset = dataset.shuffle(SHUFFLE_BUFFER_SIZE).repeat().batch(batch_size)
return dataset.prefetch(PREFETCH_SIZE)
| [
"logging.getLogger",
"numpy.issubdtype",
"numpy.random.randint",
"numpy.zeros",
"tensorflow.compat.v1.py_func",
"SharedArray.attach",
"numpy.load"
] | [((221, 248), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (238, 248), False, 'import logging\n'), ((437, 454), 'numpy.load', 'np.load', (['filename'], {}), '(filename)\n', (444, 454), True, 'import numpy as np\n'), ((1379, 1403), 'numpy.random.randint', 'np.random.randint', (['(-5)', '(6)'], {}), '(-5, 6)\n', (1396, 1403), True, 'import numpy as np\n'), ((2255, 2290), 'numpy.issubdtype', 'np.issubdtype', (['data.dtype', 'np.bool_'], {}), '(data.dtype, np.bool_)\n', (2268, 2290), True, 'import numpy as np\n'), ((576, 593), 'numpy.load', 'np.load', (['filename'], {}), '(filename)\n', (583, 593), True, 'import numpy as np\n'), ((615, 645), 'numpy.zeros', 'np.zeros', (["f['shape']", 'np.bool_'], {}), "(f['shape'], np.bool_)\n", (623, 645), True, 'import numpy as np\n'), ((874, 898), 'SharedArray.attach', 'sa.attach', (['data_filename'], {}), '(data_filename)\n', (883, 898), True, 'import SharedArray as sa\n'), ((2784, 2819), 'numpy.issubdtype', 'np.issubdtype', (['data.dtype', 'np.bool_'], {}), '(data.dtype, np.bool_)\n', (2797, 2819), True, 'import numpy as np\n'), ((2968, 3003), 'numpy.issubdtype', 'np.issubdtype', (['data.dtype', 'np.bool_'], {}), '(data.dtype, np.bool_)\n', (2981, 3003), True, 'import numpy as np\n'), ((3532, 3585), 'tensorflow.compat.v1.py_func', 'tf.py_func', (['random_transpose', '[pianoroll]', 'tf.float32'], {}), '(random_transpose, [pianoroll], tf.float32)\n', (3542, 3585), True, 'import tensorflow.compat.v1 as tf\n'), ((4156, 4209), 'tensorflow.compat.v1.py_func', 'tf.py_func', (['random_transpose', '[pianoroll]', 'tf.float32'], {}), '(random_transpose, [pianoroll], tf.float32)\n', (4166, 4209), True, 'import tensorflow.compat.v1 as tf\n')] |
import torch
import torch.nn as nn
import torch.nn.functional as F
from .kernels import (
get_spatial_gradient_kernel2d,
get_spatial_gradient_kernel3d,
normalize_kernel2d
)
def spatial_gradient(input, mode='sobel', order=1, normalized=True):
"""
Computes the first order image derivative in both x and y using a Sobel operator.
"""
if not len(input.shape) == 4:
raise ValueError("Invalid input shape, we expect BxCxHxW. Got: {}"
.format(input.shape))
# allocate kernel
kernel = get_spatial_gradient_kernel2d(mode, order)
if normalized:
kernel = normalize_kernel2d(kernel)
# prepare kernel
b, c, h, w = input.shape
tmp_kernel = kernel.to(input).detach()
tmp_kernel = tmp_kernel.unsqueeze(1).unsqueeze(1)
# convolve input tensor with sobel kernel
kernel_flip = tmp_kernel.flip(-3)
# Pad with "replicate for spatial dims, but with zeros for channel
spatial_pad = [
kernel.size(1) // 2,
kernel.size(1) // 2,
kernel.size(2) // 2,
kernel.size(2) // 2
]
out_channels = 3 if order == 2 else 2
padded_inp = F.pad(input.reshape(b * c, 1, h, w), spatial_pad, 'replicate')[:, :, None]
return F.conv3d(padded_inp, kernel_flip, padding=0).view(b, c, out_channels, h, w)
def spatial_gradient3d(input, mode='diff', order=1):
"""
Computes the first and second order volume derivative in x, y and d using a diff operator.
"""
if not len(input.shape) == 5:
raise ValueError("Invalid input shape, we expect BxCxDxHxW. Got: {}"
.format(input.shape))
# allocate kernel
kernel = get_spatial_gradient_kernel3d(mode, order)
# prepare kernel
b, c, d, h, w = input.shape
tmp_kernel = kernel.to(input).detach()
tmp_kernel = tmp_kernel.repeat(c, 1, 1, 1, 1)
# convolve input tensor with grad kernel
kernel_flip = tmp_kernel.flip(-3)
# Pad with "replicate for spatial dims, but with zeros for channel
spatial_pad = [
kernel.size(2) // 2,
kernel.size(2) // 2,
kernel.size(3) // 2,
kernel.size(3) // 2,
kernel.size(4) // 2,
kernel.size(4) // 2
]
out_ch = 6 if order == 2 else 3
return F.conv3d(F.pad(
input, spatial_pad, 'replicate'), kernel_flip, padding=0, groups=c).view(b, c, out_ch, d, h, w)
def sobel(input, normalized=True, eps=1e-6):
"""
Computes the Sobel operator and returns the magnitude per channel.
"""
if not len(input.shape) == 4:
raise ValueError("Invalid input shape, we expect BxCxHxW. Got: {}"
.format(input.shape))
# comput the x/y gradients
edges = spatial_gradient(input, normalized=normalized)
# unpack the edges
gx = edges[:, :, 0]
gy = edges[:, :, 1]
# compute gradient maginitude
magnitude = torch.sqrt(gx * gx + gy * gy + eps)
return magnitude
class SpatialGradient(nn.Module):
"""
Computes the first order image derivative in both x and y using a Sobel operator.
"""
def __init__(self, mode='sobel', order=1, normalized=True):
super(SpatialGradient, self).__init__()
self.normalized = normalized
self.order = order
self.mode = mode
def forward(self, input):
return spatial_gradient(input, self.mode, self.order, self.normalized)
class SpatialGradient3d(nn.Module):
"""
Computes the first and second order volume derivative in x, y and d using a diff operator.
"""
def __init__(self, mode='diff', order=1):
super(SpatialGradient3d, self).__init__()
self.order = order
self.mode = mode
self.kernel = get_spatial_gradient_kernel3d(mode, order)
def forward(self, input):
return spatial_gradient3d(input, self.mode, self.order)
class Sobel(nn.Module):
"""
Computes the Sobel operator and returns the magnitude per channel.
"""
def __init__(self, normalized=True, eps=1e-6):
super(Sobel, self).__init__()
self.normalized = normalized
self.eps = eps
def forward(self, input):
return sobel(input, self.normalized, self.eps)
| [
"torch.nn.functional.pad",
"torch.sqrt",
"torch.nn.functional.conv3d"
] | [((2916, 2951), 'torch.sqrt', 'torch.sqrt', (['(gx * gx + gy * gy + eps)'], {}), '(gx * gx + gy * gy + eps)\n', (2926, 2951), False, 'import torch\n'), ((1255, 1299), 'torch.nn.functional.conv3d', 'F.conv3d', (['padded_inp', 'kernel_flip'], {'padding': '(0)'}), '(padded_inp, kernel_flip, padding=0)\n', (1263, 1299), True, 'import torch.nn.functional as F\n'), ((2296, 2334), 'torch.nn.functional.pad', 'F.pad', (['input', 'spatial_pad', '"""replicate"""'], {}), "(input, spatial_pad, 'replicate')\n", (2301, 2334), True, 'import torch.nn.functional as F\n')] |
# ******************************************************************
# |docname| - Provide `docker_tools.py` as the script `docker-tools`
# ******************************************************************
from setuptools import setup
setup(
name="runestone-docker-tools",
version="0.1",
install_requires=["click"],
entry_points={
"console_scripts": ["docker-tools = docker_tools:cli"]
},
)
| [
"setuptools.setup"
] | [((237, 397), 'setuptools.setup', 'setup', ([], {'name': '"""runestone-docker-tools"""', 'version': '"""0.1"""', 'install_requires': "['click']", 'entry_points': "{'console_scripts': ['docker-tools = docker_tools:cli']}"}), "(name='runestone-docker-tools', version='0.1', install_requires=[\n 'click'], entry_points={'console_scripts': [\n 'docker-tools = docker_tools:cli']})\n", (242, 397), False, 'from setuptools import setup\n')] |
import os
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.autograd import Variable
import numpy as np
import matplotlib.pyplot as plt
# local model
import sys
sys.path.append("../network")
import Coral
from lstm import LSTMHardSigmoid
from AdaBN import AdaBN
sys.path.append("../network/AutoEncoder")
import AutoEncoder
class cnnblstm_with_adabn(nn.Module):
PARAMS_FILE = "params.pkl"
PARAMS_AE = "params_ae.pkl"
NET1_ADABN = "net1_adabn"
NET2_ADABN = "net2_adabn"
NET3_ADABN = "net3_adabn"
def __init__(self, time_steps = 800, n_features = 3, n_outputs = 10, use_cuda = False, params_dir = "./params", enable_CORAL = False):
super(cnnblstm_with_adabn, self).__init__()
self.time_steps = time_steps
self.n_features = n_features
self.n_outputs = n_outputs
self.use_cuda = use_cuda
self.params_dir = params_dir
if not os.path.exists(self.params_dir):
os.mkdir(self.params_dir)
self.enable_CORAL = enable_CORAL
self.n_filters = 128
self.kernel_size = 15
self.n_hidden = 150 # 150
self.n_layers = 1
self.bidirectional = True
# self.ae = AutoEncoder.load_AE(type = "ConvAE", time_steps = self.time_steps, n_features = self.n_features, use_cuda = self.use_cuda, params_pkl = os.path.join(self.params_dir, cnnblstm_with_adabn.PARAMS_AE))
# build net1 cnn
self.net1 = nn.Sequential(
nn.Conv1d(in_channels = self.n_features, out_channels = self.n_filters, kernel_size = self.kernel_size),
# nn.Conv1d(in_channels = self.ae.n_filters3, out_channels = self.n_filters, kernel_size = self.kernel_size),
nn.ReLU(),
# nn.Sigmoid(),
nn.Dropout(p = 0.5),
nn.MaxPool1d(kernel_size = 2)
)
# build net1_adabn
self.net1_adabn = AdaBN(self.n_filters, variables_dir = os.path.join(self.params_dir, cnnblstm_with_adabn.NET1_ADABN), use_cuda = self.use_cuda)
# build net2 blstm
# self.net2 = nn.LSTM(input_size = self.n_filters, hidden_size = self.n_hidden, num_layers = self.n_layers, dropout = 0.2, batch_first = True, bidirectional = self.bidirectional, bias = True)
self.net2 = LSTMHardSigmoid(input_size = self.n_filters, hidden_size = self.n_hidden, num_layers = self.n_layers, dropout = 0.2, batch_first = True, bidirectional = self.bidirectional, bias = True)
# build net2_adabn
if self.bidirectional:
n_blstm_output = self.n_hidden * 2
else:
n_blstm_output = self.n_hidden
self.net2_adabn = AdaBN(n_blstm_output, variables_dir = os.path.join(self.params_dir, cnnblstm_with_adabn.NET2_ADABN), use_cuda = self.use_cuda)
# build net3 fc
self.net3 = nn.Sequential(
nn.Linear(n_blstm_output, 50, bias = True),
nn.ReLU(),
# nn.Sigmoid(),
)
# build net3_adabn
self.net3_adabn = AdaBN(50, variables_dir = os.path.join(self.params_dir, cnnblstm_with_adabn.NET3_ADABN), use_cuda = self.use_cuda)
# build net4 fc
self.net4 = nn.Sequential(
nn.Dropout(p = 0.2),
nn.Linear(50, self.n_outputs, bias = True),
nn.Softmax(dim = 1)
)
def init_hidden(self, batch_size):
"""
init blstm's hidden states
"""
if self.bidirectional:
n_layers = self.n_layers * 2
else:
n_layers = self.n_layers
if self.use_cuda:
hidden_state = torch.zeros(n_layers, batch_size, self.n_hidden).cuda()
cell_state = torch.zeros(n_layers, batch_size, self.n_hidden).cuda()
else:
hidden_state = torch.zeros(n_layers, batch_size, self.n_hidden)
cell_state = torch.zeros(n_layers, batch_size, self.n_hidden)
self.hidden = (hidden_state, cell_state)
def reset_parameters(self):
"""
temp useless
Here we reproduce Keras default initialization weights for consistency with Keras version
"""
# get weights & bias set
net1_weights = ((name, param.data) for name, param in self.named_parameters() if (("weight" in name) and (("net1" in name) and ("net1_adabn" not in name))))
net1_biases = ((name, param.data) for name, param in self.named_parameters() if (("bias" in name) and (("net1" in name) and ("net1_adabn" not in name))))
# net2_weights = ((name, param.data) for name, param in self.named_parameters() if (("weight" in name) and (("net2" in name) and ("net2_adabn" not in name))))
# net2_biases = ((name, param.data) for name, param in self.named_parameters() if (("bias" in name) and (("net2" in name) and ("net2_adabn" not in name))))
net3_weights = ((name, param.data) for name, param in self.named_parameters() if (("weight" in name) and (("net3" in name) and ("net3_adabn" not in name))))
net3_biases = ((name, param.data) for name, param in self.named_parameters() if (("bias" in name) and (("net3" in name) and ("net3_adabn" not in name))))
net4_weights = ((name, param.data) for name, param in self.named_parameters() if (("weight" in name) and (("net4" in name) and ("net4_adabn" not in name))))
net4_biases = ((name, param.data) for name, param in self.named_parameters() if (("bias" in name) and (("net4" in name) and ("net4_adabn" not in name))))
# init weights & bias
# self.ae.reset_parameters()
for name, params_data in net1_weights:
# print(name)
nn.init.xavier_uniform_(params_data)
for name, params_data in net1_biases:
nn.init.constant_(params_data, 0)
self.net1_adabn.reset_parameters()
self.net2.reset_parameters() # lstm reset parameters
self.net2_adabn.reset_parameters()
for name, params_data in net3_weights:
nn.init.xavier_uniform_(params_data)
for name, params_data in net3_biases:
nn.init.constant_(params_data, 0)
self.net3_adabn.reset_parameters()
for name, params_data in net4_weights:
nn.init.xavier_uniform_(params_data)
for name, params_data in net4_biases:
nn.init.constant_(params_data, 0)
def forward(self, input):
"""
compute the output of input according to the entire network model
"""
# print(input.shape)
# AutoEncoder
# input = self.ae.encoder(input)
# input = self.ae(input)
# MaxPool1d
maxPool1d_output = self.net1(input)
# maxPool1d_adabn_output = maxPool1d_output
maxPool1d_adabn_output, maxPool1d_output = self.net1_adabn(maxPool1d_output), None
maxPool1d_adabn_t_output = maxPool1d_adabn_output.permute(0, 2, 1).contiguous()
# BiLSTM
(bilstm_output, _), maxPool1d_adabn_t_output = self.net2(maxPool1d_adabn_t_output, None), None
# MaxPooling1D time_steps
bilstm_output = bilstm_output.permute(0, 2, 1)
maxPooling_output, bilstm_output = F.max_pool1d(bilstm_output, kernel_size = bilstm_output.size(2)).squeeze(2), None
# maxPooling_adabn_output = maxPooling_output
maxPooling_adabn_output, maxPooling_output = self.net2_adabn(maxPooling_output), None
# get classifier
net3_output, maxPooling_adabn_output = self.net3(maxPooling_adabn_output), None
net3_adabn_output, net3_output = self.net3_adabn(net3_output), None
linear2_softmax_output, net3_adabn_output = self.net4(net3_adabn_output), None
return linear2_softmax_output
def update_adabn_running_stats(self):
"""
update adabn running states, update mu_j with mu_j_next to start next round
"""
self.net1_adabn.update_running_stats()
self.net2_adabn.update_running_stats()
self.net3_adabn.update_running_stats()
def trainAllLayers(self, train_x, train_y, test_x = None, learning_rate = 0.001, n_epoches = 20, batch_size = 20, shuffle = True):
"""
train all layers of network model
"""
# print(os.environ["CUDA_VISIBLE_DEVICES"])
# CORAL
if self.enable_CORAL:
if test_x == None:
print("ERROR: (in cnnblstm_with_adabn.trainAllLayers) test_x == None!")
return
# review train_x & test_x
train_x = train_x.view(-1, self.time_steps * self.n_features)
test_x = test_x.view(-1, self.time_steps * self.n_features)
# get CORAL(train_x, test_x)
train_x = Coral.CORAL_torch(train_x, test_x)
# review train_x
train_x = train_x.view(-1, self.n_features, self.time_steps)
# optimize all cnn parameters
params = [{"params": model.parameters()} for model in self.children() if model not in [self.ae]]
optimizer = torch.optim.Adam(params, lr = learning_rate)
# the target label is not one-hotted
loss_func = nn.CrossEntropyLoss()
# init params
self.reset_parameters()
# load params
self.load_params()
# set train mode True
self.train()
# get parallel model
parallel_cba = self
if self.use_cuda:
# print("we use cuda!")
parallel_cba = torch.nn.DataParallel(self, device_ids = range(torch.cuda.device_count()))
# parallel_cba = parallel_cba.cuda()
# if use_cuda
if self.use_cuda:
train_x = train_x.cuda()
train_y = train_y.cuda()
"""
# get autoencoder
self.ae = AutoEncoder.train_AE(self.ae, train_x, train_x, n_epoches = 20)
self.ae.save_params()
"""
# get train_data
train_data = torch.utils.data.TensorDataset(train_x, train_y)
# Data Loader for easy mini-batch return in training
train_loader = torch.utils.data.DataLoader(dataset = train_data, batch_size = batch_size, shuffle = shuffle)
# training and testing
for epoch in range(n_epoches):
# init loss & acc
train_loss = 0
train_acc = 0
for step, (b_x, b_y) in enumerate(train_loader): # gives batch data
b_x = b_x.view(-1, self.n_features, self.time_steps) # reshape x to (batch, n_features, time_step)
if self.use_cuda:
b_x, b_y = Variable(b_x).cuda(), Variable(b_y).cuda()
else:
b_x, b_y = Variable(b_x), Variable(b_y)
"""
# get hidden
if self.use_cuda:
self.init_hidden(b_x.size(0) // torch.cuda.device_count())
else:
self.init_hidden(b_x.size(0))
"""
# update adabn running stats
self.update_adabn_running_stats()
# get output
output = parallel_cba(b_x) # CNN_BLSTM output
# get loss
loss = loss_func(output, b_y) # cross entropy loss
train_loss += loss.item() * len(b_y)
_, pre = torch.max(output, 1)
num_acc = (pre == b_y).sum()
train_acc += num_acc.item()
# backward
optimizer.zero_grad() # clear gradients for this training step
loss.backward() # backpropagation, compute gradients
optimizer.step() # apply gradients
# print loss
# if (step + 1) % 5 == 0:
# print("[{}/{}], train loss is: {:.6f}, train acc is: {:.6f}".format(step, len(train_loader), train_loss / ((step + 1) * batch_size), train_acc / ((step + 1) * batch_size)))
print("[{}/{}], train loss is: {:.6f}, train acc is: {:.6f}".format(len(train_loader), len(train_loader), train_loss / (len(train_loader) * batch_size), train_acc / (len(train_loader) * batch_size)))
# save params
self.save_params()
# print("train finish!")
def getTestAccuracy(self, test_x, test_y):
"""
test network model with test set
"""
# init params
self.reset_parameters()
# load params
self.load_params()
# set eval
self.eval()
# get parallel model
parallel_cba = self
if self.use_cuda:
# print("we use cuda!")
parallel_cba = torch.nn.DataParallel(self, device_ids = range(torch.cuda.device_count()))
# parallel_cba = parallel_cba.cuda()
# cuda test_data
with torch.no_grad():
if self.use_cuda:
test_x, test_y = Variable(test_x).cuda(), Variable(test_y).cuda()
else:
test_x, test_y = Variable(test_x), Variable(test_y)
"""
# get hidden
if self.use_cuda:
self.init_hidden(test_x.size(0) // torch.cuda.device_count())
else:
self.init_hidden(test_x.size(0))
"""
# update adabn running stats
self.update_adabn_running_stats()
# get output
with torch.no_grad():
output = parallel_cba(test_x)
# print(output)
prediction = torch.max(output, 1)[1]
pred_y = prediction.cpu().data.numpy()
# print(pred_y)
target_y = test_y.cpu().data.numpy()
# print(test_y)
accuracy = float((pred_y == target_y).astype(int).sum()) / float(target_y.size)
# print("Accuracy: ", str(accuracy))
return accuracy
def save_params(self):
"""
save params & adabn's inner stats
"""
self.save_adabn_variables()
torch.save(self.state_dict(), os.path.join(self.params_dir, cnnblstm_with_adabn.PARAMS_FILE))
# self.ae.save_params()
# print("save_params success!")
def save_adabn_variables(self):
"""
save adabn's inner stats
"""
self.net1_adabn.save_attrs()
self.net2_adabn.save_attrs()
self.net3_adabn.save_attrs()
def load_params(self):
"""
load params & adabn's inner stats
"""
self.load_adabn_variables()
if os.path.exists(os.path.join(self.params_dir, cnnblstm_with_adabn.PARAMS_FILE)):
if self.use_cuda:
self.load_state_dict(torch.load(os.path.join(self.params_dir, cnnblstm_with_adabn.PARAMS_FILE), map_location = torch.device('cuda')))
else:
self.load_state_dict(torch.load(os.path.join(self.params_dir, cnnblstm_with_adabn.PARAMS_FILE), map_location = torch.device('cpu')))
# print("load_params success!")
# self.ae.load_params()
def load_adabn_variables(self):
"""
load adabn's inner stats
"""
self.net1_adabn.load_attrs()
self.net2_adabn.load_attrs()
self.net3_adabn.load_attrs()
def get_model(self, pre_trained = False):
"""
get pretrained model
"""
if pre_trained:
self.load_params()
return self
if __name__ == '__main__':
use_cuda = torch.cuda.is_available()
if use_cuda:
cnnblstm = cnnblstm_with_adabn(use_cuda = use_cuda).cuda()
else:
cnnblstm = cnnblstm_with_adabn(use_cuda = use_cuda)
print(cnnblstm)
# get train_x, train_y
train_x = torch.rand(20, 3, 800, dtype = torch.float32)
train_y = torch.randint(10, (20, ), dtype = torch.int64)
# train_y = torch.LongTensor(20, 1).random_() % 10
print(train_x.type())
# train_y = torch.zeros(20, 10).scatter_(1, train_y, 1)
print(train_y)
train_data = torch.utils.data.TensorDataset(train_x, train_y)
cnnblstm.trainAllLayers(train_data)
| [
"torch.nn.Conv1d",
"torch.nn.ReLU",
"torch.nn.Dropout",
"torch.nn.CrossEntropyLoss",
"torch.nn.init.constant_",
"torch.max",
"torch.cuda.device_count",
"torch.cuda.is_available",
"sys.path.append",
"torch.nn.MaxPool1d",
"os.path.exists",
"Coral.CORAL_torch",
"torch.nn.init.xavier_uniform_",
"torch.randint",
"os.mkdir",
"torch.autograd.Variable",
"torch.utils.data.TensorDataset",
"lstm.LSTMHardSigmoid",
"torch.device",
"torch.optim.Adam",
"torch.nn.Softmax",
"os.path.join",
"torch.nn.Linear",
"torch.utils.data.DataLoader",
"torch.no_grad",
"torch.zeros",
"torch.rand"
] | [((189, 218), 'sys.path.append', 'sys.path.append', (['"""../network"""'], {}), "('../network')\n", (204, 218), False, 'import sys\n'), ((289, 330), 'sys.path.append', 'sys.path.append', (['"""../network/AutoEncoder"""'], {}), "('../network/AutoEncoder')\n", (304, 330), False, 'import sys\n'), ((13068, 13093), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (13091, 13093), False, 'import torch\n'), ((13282, 13325), 'torch.rand', 'torch.rand', (['(20)', '(3)', '(800)'], {'dtype': 'torch.float32'}), '(20, 3, 800, dtype=torch.float32)\n', (13292, 13325), False, 'import torch\n'), ((13339, 13382), 'torch.randint', 'torch.randint', (['(10)', '(20,)'], {'dtype': 'torch.int64'}), '(10, (20,), dtype=torch.int64)\n', (13352, 13382), False, 'import torch\n'), ((13548, 13596), 'torch.utils.data.TensorDataset', 'torch.utils.data.TensorDataset', (['train_x', 'train_y'], {}), '(train_x, train_y)\n', (13578, 13596), False, 'import torch\n'), ((2066, 2246), 'lstm.LSTMHardSigmoid', 'LSTMHardSigmoid', ([], {'input_size': 'self.n_filters', 'hidden_size': 'self.n_hidden', 'num_layers': 'self.n_layers', 'dropout': '(0.2)', 'batch_first': '(True)', 'bidirectional': 'self.bidirectional', 'bias': '(True)'}), '(input_size=self.n_filters, hidden_size=self.n_hidden,\n num_layers=self.n_layers, dropout=0.2, batch_first=True, bidirectional=\n self.bidirectional, bias=True)\n', (2081, 2246), False, 'from lstm import LSTMHardSigmoid\n'), ((7921, 7963), 'torch.optim.Adam', 'torch.optim.Adam', (['params'], {'lr': 'learning_rate'}), '(params, lr=learning_rate)\n', (7937, 7963), False, 'import torch\n'), ((8019, 8040), 'torch.nn.CrossEntropyLoss', 'nn.CrossEntropyLoss', ([], {}), '()\n', (8038, 8040), True, 'import torch.nn as nn\n'), ((8650, 8698), 'torch.utils.data.TensorDataset', 'torch.utils.data.TensorDataset', (['train_x', 'train_y'], {}), '(train_x, train_y)\n', (8680, 8698), False, 'import torch\n'), ((8771, 8862), 'torch.utils.data.DataLoader', 'torch.utils.data.DataLoader', ([], {'dataset': 'train_data', 'batch_size': 'batch_size', 'shuffle': 'shuffle'}), '(dataset=train_data, batch_size=batch_size,\n shuffle=shuffle)\n', (8798, 8862), False, 'import torch\n'), ((869, 900), 'os.path.exists', 'os.path.exists', (['self.params_dir'], {}), '(self.params_dir)\n', (883, 900), False, 'import os\n'), ((905, 930), 'os.mkdir', 'os.mkdir', (['self.params_dir'], {}), '(self.params_dir)\n', (913, 930), False, 'import os\n'), ((1355, 1456), 'torch.nn.Conv1d', 'nn.Conv1d', ([], {'in_channels': 'self.n_features', 'out_channels': 'self.n_filters', 'kernel_size': 'self.kernel_size'}), '(in_channels=self.n_features, out_channels=self.n_filters,\n kernel_size=self.kernel_size)\n', (1364, 1456), True, 'import torch.nn as nn\n'), ((1576, 1585), 'torch.nn.ReLU', 'nn.ReLU', ([], {}), '()\n', (1583, 1585), True, 'import torch.nn as nn\n'), ((1609, 1626), 'torch.nn.Dropout', 'nn.Dropout', ([], {'p': '(0.5)'}), '(p=0.5)\n', (1619, 1626), True, 'import torch.nn as nn\n'), ((1633, 1660), 'torch.nn.MaxPool1d', 'nn.MaxPool1d', ([], {'kernel_size': '(2)'}), '(kernel_size=2)\n', (1645, 1660), True, 'import torch.nn as nn\n'), ((2577, 2617), 'torch.nn.Linear', 'nn.Linear', (['n_blstm_output', '(50)'], {'bias': '(True)'}), '(n_blstm_output, 50, bias=True)\n', (2586, 2617), True, 'import torch.nn as nn\n'), ((2624, 2633), 'torch.nn.ReLU', 'nn.ReLU', ([], {}), '()\n', (2631, 2633), True, 'import torch.nn as nn\n'), ((2866, 2883), 'torch.nn.Dropout', 'nn.Dropout', ([], {'p': '(0.2)'}), '(p=0.2)\n', (2876, 2883), True, 'import torch.nn as nn\n'), ((2890, 2930), 'torch.nn.Linear', 'nn.Linear', (['(50)', 'self.n_outputs'], {'bias': '(True)'}), '(50, self.n_outputs, bias=True)\n', (2899, 2930), True, 'import torch.nn as nn\n'), ((2937, 2954), 'torch.nn.Softmax', 'nn.Softmax', ([], {'dim': '(1)'}), '(dim=1)\n', (2947, 2954), True, 'import torch.nn as nn\n'), ((3324, 3372), 'torch.zeros', 'torch.zeros', (['n_layers', 'batch_size', 'self.n_hidden'], {}), '(n_layers, batch_size, self.n_hidden)\n', (3335, 3372), False, 'import torch\n'), ((3389, 3437), 'torch.zeros', 'torch.zeros', (['n_layers', 'batch_size', 'self.n_hidden'], {}), '(n_layers, batch_size, self.n_hidden)\n', (3400, 3437), False, 'import torch\n'), ((5037, 5073), 'torch.nn.init.xavier_uniform_', 'nn.init.xavier_uniform_', (['params_data'], {}), '(params_data)\n', (5060, 5073), True, 'import torch.nn as nn\n'), ((5117, 5150), 'torch.nn.init.constant_', 'nn.init.constant_', (['params_data', '(0)'], {}), '(params_data, 0)\n', (5134, 5150), True, 'import torch.nn as nn\n'), ((5325, 5361), 'torch.nn.init.xavier_uniform_', 'nn.init.xavier_uniform_', (['params_data'], {}), '(params_data)\n', (5348, 5361), True, 'import torch.nn as nn\n'), ((5405, 5438), 'torch.nn.init.constant_', 'nn.init.constant_', (['params_data', '(0)'], {}), '(params_data, 0)\n', (5422, 5438), True, 'import torch.nn as nn\n'), ((5520, 5556), 'torch.nn.init.xavier_uniform_', 'nn.init.xavier_uniform_', (['params_data'], {}), '(params_data)\n', (5543, 5556), True, 'import torch.nn as nn\n'), ((5600, 5633), 'torch.nn.init.constant_', 'nn.init.constant_', (['params_data', '(0)'], {}), '(params_data, 0)\n', (5617, 5633), True, 'import torch.nn as nn\n'), ((7658, 7692), 'Coral.CORAL_torch', 'Coral.CORAL_torch', (['train_x', 'test_x'], {}), '(train_x, test_x)\n', (7675, 7692), False, 'import Coral\n'), ((10969, 10984), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (10982, 10984), False, 'import torch\n'), ((11387, 11402), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (11400, 11402), False, 'import torch\n'), ((11470, 11490), 'torch.max', 'torch.max', (['output', '(1)'], {}), '(output, 1)\n', (11479, 11490), False, 'import torch\n'), ((11885, 11947), 'os.path.join', 'os.path.join', (['self.params_dir', 'cnnblstm_with_adabn.PARAMS_FILE'], {}), '(self.params_dir, cnnblstm_with_adabn.PARAMS_FILE)\n', (11897, 11947), False, 'import os\n'), ((12298, 12360), 'os.path.join', 'os.path.join', (['self.params_dir', 'cnnblstm_with_adabn.PARAMS_FILE'], {}), '(self.params_dir, cnnblstm_with_adabn.PARAMS_FILE)\n', (12310, 12360), False, 'import os\n'), ((1747, 1808), 'os.path.join', 'os.path.join', (['self.params_dir', 'cnnblstm_with_adabn.NET1_ADABN'], {}), '(self.params_dir, cnnblstm_with_adabn.NET1_ADABN)\n', (1759, 1808), False, 'import os\n'), ((2437, 2498), 'os.path.join', 'os.path.join', (['self.params_dir', 'cnnblstm_with_adabn.NET2_ADABN'], {}), '(self.params_dir, cnnblstm_with_adabn.NET2_ADABN)\n', (2449, 2498), False, 'import os\n'), ((2726, 2787), 'os.path.join', 'os.path.join', (['self.params_dir', 'cnnblstm_with_adabn.NET3_ADABN'], {}), '(self.params_dir, cnnblstm_with_adabn.NET3_ADABN)\n', (2738, 2787), False, 'import os\n'), ((9730, 9750), 'torch.max', 'torch.max', (['output', '(1)'], {}), '(output, 1)\n', (9739, 9750), False, 'import torch\n'), ((3170, 3218), 'torch.zeros', 'torch.zeros', (['n_layers', 'batch_size', 'self.n_hidden'], {}), '(n_layers, batch_size, self.n_hidden)\n', (3181, 3218), False, 'import torch\n'), ((3242, 3290), 'torch.zeros', 'torch.zeros', (['n_layers', 'batch_size', 'self.n_hidden'], {}), '(n_layers, batch_size, self.n_hidden)\n', (3253, 3290), False, 'import torch\n'), ((11107, 11123), 'torch.autograd.Variable', 'Variable', (['test_x'], {}), '(test_x)\n', (11115, 11123), False, 'from torch.autograd import Variable\n'), ((11125, 11141), 'torch.autograd.Variable', 'Variable', (['test_y'], {}), '(test_y)\n', (11133, 11141), False, 'from torch.autograd import Variable\n'), ((8320, 8345), 'torch.cuda.device_count', 'torch.cuda.device_count', ([], {}), '()\n', (8343, 8345), False, 'import torch\n'), ((9262, 9275), 'torch.autograd.Variable', 'Variable', (['b_x'], {}), '(b_x)\n', (9270, 9275), False, 'from torch.autograd import Variable\n'), ((9277, 9290), 'torch.autograd.Variable', 'Variable', (['b_y'], {}), '(b_y)\n', (9285, 9290), False, 'from torch.autograd import Variable\n'), ((10875, 10900), 'torch.cuda.device_count', 'torch.cuda.device_count', ([], {}), '()\n', (10898, 10900), False, 'import torch\n'), ((12420, 12482), 'os.path.join', 'os.path.join', (['self.params_dir', 'cnnblstm_with_adabn.PARAMS_FILE'], {}), '(self.params_dir, cnnblstm_with_adabn.PARAMS_FILE)\n', (12432, 12482), False, 'import os\n'), ((12567, 12629), 'os.path.join', 'os.path.join', (['self.params_dir', 'cnnblstm_with_adabn.PARAMS_FILE'], {}), '(self.params_dir, cnnblstm_with_adabn.PARAMS_FILE)\n', (12579, 12629), False, 'import os\n'), ((11028, 11044), 'torch.autograd.Variable', 'Variable', (['test_x'], {}), '(test_x)\n', (11036, 11044), False, 'from torch.autograd import Variable\n'), ((11053, 11069), 'torch.autograd.Variable', 'Variable', (['test_y'], {}), '(test_y)\n', (11061, 11069), False, 'from torch.autograd import Variable\n'), ((12499, 12519), 'torch.device', 'torch.device', (['"""cuda"""'], {}), "('cuda')\n", (12511, 12519), False, 'import torch\n'), ((12646, 12665), 'torch.device', 'torch.device', (['"""cpu"""'], {}), "('cpu')\n", (12658, 12665), False, 'import torch\n'), ((9193, 9206), 'torch.autograd.Variable', 'Variable', (['b_x'], {}), '(b_x)\n', (9201, 9206), False, 'from torch.autograd import Variable\n'), ((9215, 9228), 'torch.autograd.Variable', 'Variable', (['b_y'], {}), '(b_y)\n', (9223, 9228), False, 'from torch.autograd import Variable\n')] |
import asyncio, json
from config import Config
from soundpad_manager import SoundpadManager
from version import BRIDGE_VERSION
import websockets
from sanic.log import logger
# yes I know that it's very lazy to run a separate WS and HTTP server, when both could be run on the same port
# I don't like sanics WS implementation tho and this is just a quick and dirty project anyway, so there is no reason to get all that fancy
class WebsocketServer:
def __init__(self, config: Config, sp_manager: SoundpadManager):
self._server = None
self._config = config
self._soundpad = sp_manager
# ephemeral state
self._state = {
"edit_mode": False,
"soundpad_connected": False,
"version": BRIDGE_VERSION,
}
self._index_sockets = set()
self._control_sockets = set()
def start(self):
port = self._config.get(["server", "ws_port"])
logger.info(f"Websocket server is running on port {port}")
self._server = asyncio.get_event_loop().run_until_complete(websockets.serve(self.connHandler, "localhost", port))
async def stop(self):
self._server.close()
await self._server.wait_closed()
async def changeState(self, key, value):
self._state[key] = value
await self.emitEvent("state-update", self._state)
async def commandHandler(self, socket, command, params):
if command == "register":
if params["as"] == "index":
self._index_sockets.add(socket)
elif params["as"] == "control":
self._control_sockets.add(socket)
await self.emitEvent("settings-change", self._config.getExternalSerialized(), socket=socket, index_sockets=False, control_sockets=False)
await self.emitEvent("state-update", self._state, socket=socket, index_sockets=False, control_sockets=False)
elif command == "change-settings":
if params["setting"] == [ "board", "rows" ] or params["setting"] == [ "board", "columns" ]:
if not 1 <= params["value"] <= 10:
return # invalid values are not allowed
self._config.set(params["setting"], params["value"])
await self.emitEvent("settings-change", self._config.getExternalSerialized())
elif command == "set-edit-mode":
self._state["edit_mode"] = params["value"]
await self.emitEvent("state-update", self._state)
elif command == "select-sound":
if not 0 <= params['page'] <= 9 or not 0 <= params['row'] <= 9 or not 0 <= params['col'] <= 9:
return # out of bounds
if params['page'] == 0 and self._config.exists([ "sounds", f"{params['row']},{params['col']}" ]):
self._config.delete([ "sounds", f"{params['row']},{params['col']}" ])
sound_index = f"{params['page']}:{params['row']},{params['col']}"
self._config.set([ "sounds", sound_index ], params["sound"])
await self.emitEvent("settings-change", self._config.getExternalSerialized(), index_sockets=False)
elif command == "play-sound":
sound_id = params["sound"]
self._soundpad.playSound(sound_id)
elif command == "stop-sound":
self._soundpad.stopSound()
elif command == "pause-sound":
self._soundpad.pauseSound()
elif command == "log":
if "message" in params:
logger.info("Log: " + params["message"])
else:
logger.info("Log: " + json.dumps(params))
async def emitEvent(self, event, data, socket=None, index_sockets=True, control_sockets=True):
msg = json.dumps({ "type": "event", "event": event, "data": data })
if socket is not None:
await socket.send(msg)
if index_sockets:
for socket in self._index_sockets:
await socket.send(msg)
if control_sockets:
for socket in self._control_sockets:
await socket.send(msg)
async def connHandler(self, socket, path):
print("Client connected")
try:
async for raw_msg in socket:
try:
msg = json.loads(raw_msg)
except Exception as err:
logger.error(f"Could not parse JSON: {repr(err)}")
continue
if not "type" in msg:
continue
if msg["type"] == "command":
if not "command" in msg or not "params" in msg:
continue
try:
await self.commandHandler(socket, msg["command"], msg["params"])
except Exception as e: # if we get garbage data just ignore
print(f"Error in commandHandler: {msg['command']}({msg['params']}): {repr(e)}")
pass
except websockets.ConnectionClosedError:
pass
finally:
if socket in self._index_sockets:
self._index_sockets.discard(socket)
if socket in self._control_sockets:
self._control_sockets.discard(socket)
print("Client disconnected") | [
"json.loads",
"json.dumps",
"sanic.log.logger.info",
"websockets.serve",
"asyncio.get_event_loop"
] | [((886, 944), 'sanic.log.logger.info', 'logger.info', (['f"""Websocket server is running on port {port}"""'], {}), "(f'Websocket server is running on port {port}')\n", (897, 944), False, 'from sanic.log import logger\n'), ((3359, 3418), 'json.dumps', 'json.dumps', (["{'type': 'event', 'event': event, 'data': data}"], {}), "({'type': 'event', 'event': event, 'data': data})\n", (3369, 3418), False, 'import asyncio, json\n'), ((1007, 1060), 'websockets.serve', 'websockets.serve', (['self.connHandler', '"""localhost"""', 'port'], {}), "(self.connHandler, 'localhost', port)\n", (1023, 1060), False, 'import websockets\n'), ((963, 987), 'asyncio.get_event_loop', 'asyncio.get_event_loop', ([], {}), '()\n', (985, 987), False, 'import asyncio, json\n'), ((3797, 3816), 'json.loads', 'json.loads', (['raw_msg'], {}), '(raw_msg)\n', (3807, 3816), False, 'import asyncio, json\n'), ((3153, 3193), 'sanic.log.logger.info', 'logger.info', (["('Log: ' + params['message'])"], {}), "('Log: ' + params['message'])\n", (3164, 3193), False, 'from sanic.log import logger\n'), ((3231, 3249), 'json.dumps', 'json.dumps', (['params'], {}), '(params)\n', (3241, 3249), False, 'import asyncio, json\n')] |
from django.urls import path
from what_can_i_cook.views import WCICFilterView, WCICResultView
app_name = "wcic"
urlpatterns = [
path("", WCICFilterView.as_view(), name="wcic-start"),
path("results/", WCICResultView.as_view(), name="wcic-results"),
]
| [
"what_can_i_cook.views.WCICResultView.as_view",
"what_can_i_cook.views.WCICFilterView.as_view"
] | [((145, 169), 'what_can_i_cook.views.WCICFilterView.as_view', 'WCICFilterView.as_view', ([], {}), '()\n', (167, 169), False, 'from what_can_i_cook.views import WCICFilterView, WCICResultView\n'), ((212, 236), 'what_can_i_cook.views.WCICResultView.as_view', 'WCICResultView.as_view', ([], {}), '()\n', (234, 236), False, 'from what_can_i_cook.views import WCICFilterView, WCICResultView\n')] |
from setuptools import setup, find_packages
from distutils.extension import Extension
from distutils.command.sdist import sdist
try:
from Cython.Build import cythonize
USE_CYTHON = True
except ImportError:
USE_CYTHON = False
ext = 'pyx' if USE_CYTHON else 'c'
extensions = [Extension(
'dsigma.precompute_engine', ['dsigma/precompute_engine.{}'.format(ext)],
extra_compile_args=['-Ofast', '-march=native'])]
if USE_CYTHON:
extensions = cythonize(extensions)
class sdist_with_cythonize(sdist):
def run(self):
cythonize(['dsigma/precompute_engine.pyx'])
sdist.run(self)
with open('README.md', 'r') as fstream:
long_description = fstream.read()
setup(
name='dsigma',
version='0.5.0',
description=('A Galaxy-Galaxy Lensing Pipeline'),
long_description=long_description,
long_description_content_type='text/markdown',
classifiers=[
'Intended Audience :: Science/Research',
'Topic :: Scientific/Engineering :: Astronomy',
'Operating System :: OS Independent',
'Programming Language :: Python :: 3 :: Only',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
],
keywords='astronomy, weak-lensing',
url='https://github.com/johannesulf/dsigma',
author='<NAME>, <NAME>',
author_email='<EMAIL>',
packages=find_packages(),
install_requires=['numpy', 'astropy', 'scipy', 'scikit-learn',
'healpy'],
python_requires='>=3.4',
ext_modules=extensions,
cmdclass={'sdist': sdist_with_cythonize}
)
| [
"Cython.Build.cythonize",
"distutils.command.sdist.sdist.run",
"setuptools.find_packages"
] | [((462, 483), 'Cython.Build.cythonize', 'cythonize', (['extensions'], {}), '(extensions)\n', (471, 483), False, 'from Cython.Build import cythonize\n'), ((548, 591), 'Cython.Build.cythonize', 'cythonize', (["['dsigma/precompute_engine.pyx']"], {}), "(['dsigma/precompute_engine.pyx'])\n", (557, 591), False, 'from Cython.Build import cythonize\n'), ((600, 615), 'distutils.command.sdist.sdist.run', 'sdist.run', (['self'], {}), '(self)\n', (609, 615), False, 'from distutils.command.sdist import sdist\n'), ((1423, 1438), 'setuptools.find_packages', 'find_packages', ([], {}), '()\n', (1436, 1438), False, 'from setuptools import setup, find_packages\n')] |