code
stringlengths 22
1.05M
| apis
sequencelengths 1
3.31k
| extract_api
stringlengths 75
3.25M
|
---|---|---|
# Lint as: python2, python3
# Copyright 2019 Google LLC. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""A client for the chicago_taxi demo."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
import base64
import json
import os
import subprocess
import tempfile
import requests
from tensorflow_transform import coders as tft_coders
from tensorflow_transform.tf_metadata import dataset_schema
from tensorflow_transform.tf_metadata import schema_utils
from google.protobuf import text_format
from tensorflow.python.lib.io import file_io # pylint: disable=g-direct-tensorflow-import
from tensorflow.python.platform import app # pylint: disable=g-direct-tensorflow-import
from tensorflow_metadata.proto.v0 import schema_pb2
from tfx.utils import io_utils
_LOCAL_INFERENCE_TIMEOUT_SECONDS = 5.0
_LABEL_KEY = 'tips'
# Tf.Transform considers these features as "raw"
def _get_raw_feature_spec(schema):
return schema_utils.schema_as_feature_spec(schema).feature_spec
def _make_proto_coder(schema):
raw_feature_spec = _get_raw_feature_spec(schema)
raw_schema = dataset_schema.from_feature_spec(raw_feature_spec)
return tft_coders.ExampleProtoCoder(raw_schema)
def _make_csv_coder(schema, column_names):
"""Return a coder for tf.transform to read csv files."""
raw_feature_spec = _get_raw_feature_spec(schema)
parsing_schema = dataset_schema.from_feature_spec(raw_feature_spec)
return tft_coders.CsvCoder(column_names, parsing_schema)
def _read_schema(path):
"""Reads a schema from the provided location.
Args:
path: The location of the file holding a serialized Schema proto.
Returns:
An instance of Schema or None if the input argument is None
"""
result = schema_pb2.Schema()
contents = file_io.read_file_to_string(path)
text_format.Parse(contents, result)
return result
def _do_local_inference(host, port, serialized_examples):
"""Performs inference on a model hosted by the host:port server."""
json_examples = []
for serialized_example in serialized_examples:
# The encoding follows the guidelines in:
# https://www.tensorflow.org/tfx/serving/api_rest
example_bytes = base64.b64encode(serialized_example).decode('utf-8')
predict_request = '{ "b64": "%s" }' % example_bytes
json_examples.append(predict_request)
json_request = '{ "instances": [' + ','.join(map(str, json_examples)) + ']}'
server_url = 'http://' + host + ':' + port + '/v1/models/chicago_taxi:predict'
response = requests.post(
server_url, data=json_request, timeout=_LOCAL_INFERENCE_TIMEOUT_SECONDS)
response.raise_for_status()
prediction = response.json()
print(json.dumps(prediction, indent=4))
def _do_aiplatform_inference(model, version, serialized_examples):
"""Performs inference on the model:version in AI Platform."""
working_dir = tempfile.mkdtemp()
instances_file = os.path.join(working_dir, 'test.json')
json_examples = []
for serialized_example in serialized_examples:
# The encoding follows the example in:
# https://github.com/GoogleCloudPlatform/training-data-analyst/blob/master/quests/tpu/invoke_model.py
json_examples.append('{ "inputs": { "b64": "%s" } }' %
base64.b64encode(serialized_example).decode('utf-8'))
file_io.write_string_to_file(instances_file, '\n'.join(json_examples))
gcloud_command = [
'gcloud', 'ai-platform', 'predict', '--model', model, '--version',
version, '--json-instances', instances_file
]
print(subprocess.check_output(gcloud_command))
def _do_inference(model_handle, examples_file, num_examples, schema):
"""Sends requests to the model and prints the results.
Args:
model_handle: handle to the model. This can be either
"aiplatform:model:version" or "host:port"
examples_file: path to csv file containing examples, with the first line
assumed to have the column headers
num_examples: number of requests to send to the server
schema: a Schema describing the input data
Returns:
Response from model server
"""
filtered_features = [
feature for feature in schema.feature if feature.name != _LABEL_KEY
]
del schema.feature[:]
schema.feature.extend(filtered_features)
column_names = io_utils.load_csv_column_names(examples_file)
csv_coder = _make_csv_coder(schema, column_names)
proto_coder = _make_proto_coder(schema)
input_file = open(examples_file, 'r')
input_file.readline() # skip header line
serialized_examples = []
for _ in range(num_examples):
one_line = input_file.readline()
if not one_line:
print('End of example file reached')
break
one_example = csv_coder.decode(one_line)
serialized_example = proto_coder.encode(one_example)
serialized_examples.append(serialized_example)
parsed_model_handle = model_handle.split(':')
if parsed_model_handle[0] == 'aiplatform':
_do_aiplatform_inference(
model=parsed_model_handle[1],
version=parsed_model_handle[2],
serialized_examples=serialized_examples)
else:
_do_local_inference(
host=parsed_model_handle[0],
port=parsed_model_handle[1],
serialized_examples=serialized_examples)
def main(_):
parser = argparse.ArgumentParser()
parser.add_argument(
'--num_examples',
help=('Number of examples to send to the server.'),
default=1,
type=int)
parser.add_argument(
'--server',
help=('Prediction service host:port or aiplatform:model:version'),
required=True)
parser.add_argument(
'--examples_file',
help=('Path to csv file containing examples.'),
required=True)
parser.add_argument(
'--schema_file', help='File holding the schema for the input data')
known_args, _ = parser.parse_known_args()
_do_inference(known_args.server, known_args.examples_file,
known_args.num_examples, _read_schema(known_args.schema_file))
if __name__ == '__main__':
app.run(main)
| [
"subprocess.check_output",
"tensorflow.python.platform.app.run",
"google.protobuf.text_format.Parse",
"requests.post",
"argparse.ArgumentParser",
"tensorflow_transform.tf_metadata.schema_utils.schema_as_feature_spec",
"json.dumps",
"tensorflow.python.lib.io.file_io.read_file_to_string",
"tensorflow_transform.coders.CsvCoder",
"os.path.join",
"base64.b64encode",
"tempfile.mkdtemp",
"tensorflow_transform.coders.ExampleProtoCoder",
"tensorflow_transform.tf_metadata.dataset_schema.from_feature_spec",
"tfx.utils.io_utils.load_csv_column_names",
"tensorflow_metadata.proto.v0.schema_pb2.Schema"
] | [((1670, 1720), 'tensorflow_transform.tf_metadata.dataset_schema.from_feature_spec', 'dataset_schema.from_feature_spec', (['raw_feature_spec'], {}), '(raw_feature_spec)\n', (1702, 1720), False, 'from tensorflow_transform.tf_metadata import dataset_schema\n'), ((1730, 1770), 'tensorflow_transform.coders.ExampleProtoCoder', 'tft_coders.ExampleProtoCoder', (['raw_schema'], {}), '(raw_schema)\n', (1758, 1770), True, 'from tensorflow_transform import coders as tft_coders\n'), ((1945, 1995), 'tensorflow_transform.tf_metadata.dataset_schema.from_feature_spec', 'dataset_schema.from_feature_spec', (['raw_feature_spec'], {}), '(raw_feature_spec)\n', (1977, 1995), False, 'from tensorflow_transform.tf_metadata import dataset_schema\n'), ((2005, 2054), 'tensorflow_transform.coders.CsvCoder', 'tft_coders.CsvCoder', (['column_names', 'parsing_schema'], {}), '(column_names, parsing_schema)\n', (2024, 2054), True, 'from tensorflow_transform import coders as tft_coders\n'), ((2301, 2320), 'tensorflow_metadata.proto.v0.schema_pb2.Schema', 'schema_pb2.Schema', ([], {}), '()\n', (2318, 2320), False, 'from tensorflow_metadata.proto.v0 import schema_pb2\n'), ((2334, 2367), 'tensorflow.python.lib.io.file_io.read_file_to_string', 'file_io.read_file_to_string', (['path'], {}), '(path)\n', (2361, 2367), False, 'from tensorflow.python.lib.io import file_io\n'), ((2370, 2405), 'google.protobuf.text_format.Parse', 'text_format.Parse', (['contents', 'result'], {}), '(contents, result)\n', (2387, 2405), False, 'from google.protobuf import text_format\n'), ((3069, 3160), 'requests.post', 'requests.post', (['server_url'], {'data': 'json_request', 'timeout': '_LOCAL_INFERENCE_TIMEOUT_SECONDS'}), '(server_url, data=json_request, timeout=\n _LOCAL_INFERENCE_TIMEOUT_SECONDS)\n', (3082, 3160), False, 'import requests\n'), ((3415, 3433), 'tempfile.mkdtemp', 'tempfile.mkdtemp', ([], {}), '()\n', (3431, 3433), False, 'import tempfile\n'), ((3453, 3491), 'os.path.join', 'os.path.join', (['working_dir', '"""test.json"""'], {}), "(working_dir, 'test.json')\n", (3465, 3491), False, 'import os\n'), ((4822, 4867), 'tfx.utils.io_utils.load_csv_column_names', 'io_utils.load_csv_column_names', (['examples_file'], {}), '(examples_file)\n', (4852, 4867), False, 'from tfx.utils import io_utils\n'), ((5807, 5832), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (5830, 5832), False, 'import argparse\n'), ((6544, 6557), 'tensorflow.python.platform.app.run', 'app.run', (['main'], {}), '(main)\n', (6551, 6557), False, 'from tensorflow.python.platform import app\n'), ((1514, 1557), 'tensorflow_transform.tf_metadata.schema_utils.schema_as_feature_spec', 'schema_utils.schema_as_feature_spec', (['schema'], {}), '(schema)\n', (1549, 1557), False, 'from tensorflow_transform.tf_metadata import schema_utils\n'), ((3232, 3264), 'json.dumps', 'json.dumps', (['prediction'], {'indent': '(4)'}), '(prediction, indent=4)\n', (3242, 3264), False, 'import json\n'), ((4078, 4117), 'subprocess.check_output', 'subprocess.check_output', (['gcloud_command'], {}), '(gcloud_command)\n', (4101, 4117), False, 'import subprocess\n'), ((2743, 2779), 'base64.b64encode', 'base64.b64encode', (['serialized_example'], {}), '(serialized_example)\n', (2759, 2779), False, 'import base64\n'), ((3795, 3831), 'base64.b64encode', 'base64.b64encode', (['serialized_example'], {}), '(serialized_example)\n', (3811, 3831), False, 'import base64\n')] |
#!/usr/bin/env python
"""Test checkpoint-like periodic snapshots.
We test that there are that many folders and that the currentStep changes.
"""
import mirheo as mir
u = mir.Mirheo(nranks=(1, 1, 1), domain=(4, 6, 8), debug_level=3,
log_filename='log', no_splash=True,
checkpoint_every=10, checkpoint_mode='Incremental',
checkpoint_folder='periodic_snapshots/snapshot_', checkpoint_mechanism='Snapshot')
pv = mir.ParticleVectors.ParticleVector('pv', mass=1)
ic = mir.InitialConditions.Uniform(number_density=2)
u.registerParticleVector(pv, ic)
dpd = mir.Interactions.Pairwise('dpd', rc=1.0, kind='DPD', a=10.0, gamma=10.0, kBT=1.0, power=0.5)
lj = mir.Interactions.Pairwise('lj', rc=1.0, kind='LJ', epsilon=1.25, sigma=0.75)
u.registerInteraction(dpd)
u.registerInteraction(lj)
u.setInteraction(dpd, pv, pv)
minimize = mir.Integrators.Minimize('minimize', max_displacement=1. / 1024)
u.registerIntegrator(minimize)
u.run(45, dt=0.125)
# TEST: snapshot.periodic
# cd snapshot
# rm -rf periodic_snapshots/
# mir.run --runargs "-n 2" ./periodic.py
# ls periodic_snapshots | cat > snapshot.out.txt
# grep -rH --include=*.json currentStep periodic_snapshots/ | sort >> snapshot.out.txt
| [
"mirheo.Interactions.Pairwise",
"mirheo.Integrators.Minimize",
"mirheo.Mirheo",
"mirheo.ParticleVectors.ParticleVector",
"mirheo.InitialConditions.Uniform"
] | [((174, 420), 'mirheo.Mirheo', 'mir.Mirheo', ([], {'nranks': '(1, 1, 1)', 'domain': '(4, 6, 8)', 'debug_level': '(3)', 'log_filename': '"""log"""', 'no_splash': '(True)', 'checkpoint_every': '(10)', 'checkpoint_mode': '"""Incremental"""', 'checkpoint_folder': '"""periodic_snapshots/snapshot_"""', 'checkpoint_mechanism': '"""Snapshot"""'}), "(nranks=(1, 1, 1), domain=(4, 6, 8), debug_level=3, log_filename=\n 'log', no_splash=True, checkpoint_every=10, checkpoint_mode=\n 'Incremental', checkpoint_folder='periodic_snapshots/snapshot_',\n checkpoint_mechanism='Snapshot')\n", (184, 420), True, 'import mirheo as mir\n'), ((458, 506), 'mirheo.ParticleVectors.ParticleVector', 'mir.ParticleVectors.ParticleVector', (['"""pv"""'], {'mass': '(1)'}), "('pv', mass=1)\n", (492, 506), True, 'import mirheo as mir\n'), ((512, 559), 'mirheo.InitialConditions.Uniform', 'mir.InitialConditions.Uniform', ([], {'number_density': '(2)'}), '(number_density=2)\n', (541, 559), True, 'import mirheo as mir\n'), ((600, 696), 'mirheo.Interactions.Pairwise', 'mir.Interactions.Pairwise', (['"""dpd"""'], {'rc': '(1.0)', 'kind': '"""DPD"""', 'a': '(10.0)', 'gamma': '(10.0)', 'kBT': '(1.0)', 'power': '(0.5)'}), "('dpd', rc=1.0, kind='DPD', a=10.0, gamma=10.0,\n kBT=1.0, power=0.5)\n", (625, 696), True, 'import mirheo as mir\n'), ((698, 774), 'mirheo.Interactions.Pairwise', 'mir.Interactions.Pairwise', (['"""lj"""'], {'rc': '(1.0)', 'kind': '"""LJ"""', 'epsilon': '(1.25)', 'sigma': '(0.75)'}), "('lj', rc=1.0, kind='LJ', epsilon=1.25, sigma=0.75)\n", (723, 774), True, 'import mirheo as mir\n'), ((871, 936), 'mirheo.Integrators.Minimize', 'mir.Integrators.Minimize', (['"""minimize"""'], {'max_displacement': '(1.0 / 1024)'}), "('minimize', max_displacement=1.0 / 1024)\n", (895, 936), True, 'import mirheo as mir\n')] |
from abc import ABC, abstractmethod
from typing import Optional
from xml import dom
import numpy as np
import pandas as pd
from .utils import get_factors_rev
def calc_plot_size(domain_x, domain_y, plot_goal, house_goal):
f1 = sorted(get_factors_rev(domain_x))
f2 = sorted(get_factors_rev(domain_y))
plot_x, plot_y = None, None
for x in f1:
for y in f2:
if x * y - house_goal >= 0 and plot_goal - x * y >= 0:
if not plot_x and not plot_y:
plot_x, plot_y = x, y
if (plot_goal - x * y) < (plot_goal - plot_x * plot_y):
plot_x, plot_y = x, y
elif ((plot_goal - x * y) == (plot_goal - plot_x * plot_y)) and ((x - y) < (plot_x - plot_y)):
plot_x, plot_y = x, y
return plot_x, plot_y
def calc_plot_sizes(
domain_x, domain_y, plot_footprint, house_footprint, plot_ratio, dx, dy, full_domain, x_spread=None, y_spread=None
):
x_spread = x_spread if x_spread is not None else (-round(domain_x / 15), 0)
y_spread = (
y_spread if y_spread is not None else (-round(domain_y / 20), min(full_domain - domain_y, round(domain_y / 10)))
)
goal = plot_footprint / (dx * dy)
house_goal = house_footprint / (dx * dy)
dom_x = range(domain_x + x_spread[0], domain_x + x_spread[1] + 1)
dom_y = range(domain_y + y_spread[0], domain_y + y_spread[1] + 1)
plots = []
for d_x in dom_x:
for d_y in dom_y:
trimmed_d_y = int(d_y * plot_ratio)
plot_x, plot_y = calc_plot_size(d_x, trimmed_d_y, goal, house_goal)
if plot_x is not None and plot_y is not None:
plots.append((plot_x, plot_y, d_x, d_y, trimmed_d_y))
return plots
def get_best_plot_size(plots, plot_footprint, plot_ratio, dx, dy):
goal = plot_footprint / (dx * dy)
tmp = pd.DataFrame(plots, columns=["px", "py", "domx", "domy", "trimmed_dy"])
tmp["plt_area"] = tmp["px"] * tmp["py"]
tmp["goal_diff"] = goal - tmp.plt_area
tmp["domain_y_diff"] = tmp.domy * plot_ratio - tmp.trimmed_dy
tmp["trimmed_area"] = tmp["domx"] * tmp["trimmed_dy"]
tmp["full_domain"] = tmp["domx"] * tmp["domy"]
tmp["ratio_diff"] = abs((((tmp.trimmed_area + round(tmp.domain_y_diff * tmp.domx))) / tmp.full_domain - plot_ratio))
normalized_ratio_diff = (tmp.ratio_diff + plot_ratio) / plot_ratio
normalized_goal_diff = (tmp.goal_diff + goal) / goal
tmp["weighted_sorter"] = (tmp.px + tmp.py) ** (normalized_ratio_diff * normalized_goal_diff)
# tmp["ratio_diff"] = abs(((tmp.trimmed_area) / tmp.full_domain - plot_ratio))
tmp = tmp.sort_values(
by=["weighted_sorter", "goal_diff", "ratio_diff", "domain_y_diff", "trimmed_area"],
ascending=[True, True, True, True, False],
)
# tmp = tmp.sort_values(by=["goal_diff", "domain_y_diff", "trimmed_area"], ascending=[True, True, False])
tplot_x, tplot_y, tdomain_x, tdomain_y, trimmed_y = tmp[["px", "py", "domx", "domy", "trimmed_dy"]].iloc[0]
return tplot_x, tplot_y, tdomain_x, tdomain_y, trimmed_y
def calc_house_size(plot_x, plot_y, house_footprint, dx, dy):
goal = house_footprint / (dx * dy)
f1 = range(1, plot_x + 1)
f2 = range(1, plot_y + 1)
true_x, true_y = f1[0], f2[0]
for x in f1:
for y in f2:
padded_x, padded_y = x - 0, y - 0
nums = sorted([padded_x, padded_y])
if nums[0] * 2 < nums[1]:
continue
if abs(goal - padded_x * padded_y) < abs(goal - true_x * true_y):
true_x, true_y = padded_x, padded_y
elif (abs(goal - padded_x * padded_y) == abs(goal - true_x * true_y)) and (
abs(padded_x - padded_y) < abs(true_x - true_y)
):
true_x, true_y = padded_x, padded_y
return true_x, true_y
class BaseDomainArea(ABC):
subplot: Optional["BaseDomainArea"]
x: int
y: int
z: Optional[int]
matrix: np.ndarray
def __str__(self) -> str:
string = ""
for row in self.matrix:
string += f'{" ".join(str(int(pixel)) for pixel in row)}\n'
return string
@abstractmethod
def get_matrix(self) -> np.ndarray:
"""Get the numpy matrix representation of the domain area"""
def _validate_matrix_size(self, subplot):
for value in ["x", "y"]:
cell_val = getattr(self, value)
subplot_val = getattr(subplot, value)
if subplot_val and cell_val < subplot_val:
raise ValueError(
f"The {value} ({cell_val}) value of {self.__class__.__name__}"
f" must be larger than the house ({subplot_val}) going on it!"
)
def save_matrix(self, filename: str, matrix_name: str = None) -> None:
matrix = self.matrix if matrix_name is None else getattr(self, matrix_name)
np.savetxt(filename, matrix, delimiter=",")
class House(BaseDomainArea):
def __init__(self, x: int, y: int, z: int) -> None:
self.x = x
self.y = y
self.z = z
self.matrix = self.get_matrix()
def get_matrix(self) -> np.ndarray:
house = np.full((self.x, self.y), self.z)
return house
class Cell(BaseDomainArea):
def __init__(self, subplot: House, x: int, y: int) -> None:
self.subplot = subplot
self.x = x
self.y = y
self._validate_matrix_size(subplot=self.subplot)
self.matrix = self.get_matrix()
def get_matrix(self) -> np.ndarray:
left = (self.x - self.subplot.x) // 2
top = (self.y - self.subplot.y) // 2
plot = np.zeros((self.x, self.y), dtype=int)
plot[left : left + self.subplot.x, top : top + self.subplot.y] = self.subplot.matrix
return plot
class Domain(BaseDomainArea):
def __init__(self, subplot: Cell, tdomain_x, tdomain_y, full_x, full_y, trimmed_y, plot_ratio, stack_height) -> None:
self.subplot = subplot
self.temp_x = tdomain_x
self.temp_y = tdomain_y
self.full_x = full_x
self.full_y = full_y
self.trimmed_y = trimmed_y
self.plot_ratio = plot_ratio
self.stack_height = stack_height
# self._validate_matrix_size(subplot=self.subplot)
self.matrix, self.trees_matrix = self.get_matrix()
def print_tree_matrix(self) -> str:
string = ""
for row in self.trees_matrix:
string += f'{" ".join(str(int(pixel)) for pixel in row)}\n'
return string
def get_matrix(self) -> np.ndarray:
houses_row = np.tile(
self.subplot.matrix,
(
self.temp_x // self.subplot.x,
1,
),
)
number_of_house_rows = self.trimmed_y // self.subplot.y
number_of_full_tree_rows = self.temp_y - self.trimmed_y - 1
mixed_row_ratio = self.temp_y * self.plot_ratio - self.trimmed_y
tree_row = np.full((self.temp_x, 1), -1)
mixed_row = np.array(
[-1 if i <= mixed_row_ratio * self.temp_x else 0 for i in range(1, self.temp_x + 1)]
).reshape(self.temp_x, 1)
rows = [[houses_row.copy()] for _ in range(number_of_house_rows)]
trees = [tree_row.copy() for _ in range(number_of_full_tree_rows)]
trees.insert(number_of_house_rows // 2, mixed_row)
while trees:
for row in rows:
if not trees:
break
row.append(trees.pop())
domain_with_trees = np.concatenate([np.concatenate(row, axis=1) for row in rows], axis=1)
dwtx = domain_with_trees.shape[0]
dwty = domain_with_trees.shape[1]
xs = int(np.floor((self.full_x - dwtx) / 2)), int(np.ceil((self.full_x - dwtx) / 2))
full_domain = np.pad(domain_with_trees, (xs, (self.full_y - dwty, 0)))
mid_x = self.full_x // 2
full_domain[mid_x - 2:mid_x + 2, :1] = self.stack_height # stack for surface scalar to come out of
domain = np.where(full_domain != -1, full_domain, 0)
trees = np.where(full_domain == -1, full_domain, 0)
return domain.T, trees.T
@classmethod
def from_domain_config(cls, house, config):
cell = Cell(house, tree_domain_fraction=config["trees"]["domain_fraction"], **config["plot_size"])
x = config["domain"]["x"]
y = config["domain"]["y"]
return cls(subplot=cell, x=x, y=y)
@classmethod
def from_plot_size(cls, house, config, tplot_x, tplot_y, tdomain_x, tdomain_y, trimmed_y, plot_ratio, stack_height):
cell = Cell(house, x=tplot_x, y=tplot_y)
# x = config["domain"]["x"]
# y = config["domain"]["y"]
return cls(cell, tdomain_x, tdomain_y, config["domain"]["x"], config["domain"]["y"], trimmed_y, plot_ratio, stack_height)
def setup_domain(cfg):
domain_x, domain_y = cfg["domain"]["x"], (round(cfg["domain"]["y"] * cfg["domain"]["urban_ratio"]))
plot_footprint, plot_ratio, dx, dy = (
cfg["plot"]["plot_footprint"],
cfg["plot"]["plot_ratio"],
cfg["domain"]["dx"],
cfg["domain"]["dy"],
)
plots = calc_plot_sizes(
domain_x,
domain_y,
plot_footprint,
cfg["house"]["footprint"],
plot_ratio,
dx,
dy,
cfg["domain"]["y"],
)
tplot_x, tplot_y, tdomain_x, tdomain_y, trimmed_y = get_best_plot_size(plots, plot_footprint, plot_ratio, dx, dy)
house_x, house_y = calc_house_size(tplot_x, tplot_y, cfg["house"]["footprint"], dx, dy)
house = House(house_x, house_y, cfg["house"]["height"])
return Domain.from_plot_size(house, cfg, tplot_x, tplot_y, tdomain_x, tdomain_y, trimmed_y, plot_ratio, cfg["domain"]["stack_height"])
if __name__ == "__main__":
from .load_wrapper_config import get_wrapper_config
config = get_wrapper_config()
domain = setup_domain(config)
domain
| [
"numpy.tile",
"numpy.ceil",
"numpy.where",
"numpy.floor",
"numpy.zeros",
"numpy.savetxt",
"numpy.concatenate",
"pandas.DataFrame",
"numpy.full",
"numpy.pad"
] | [((1874, 1945), 'pandas.DataFrame', 'pd.DataFrame', (['plots'], {'columns': "['px', 'py', 'domx', 'domy', 'trimmed_dy']"}), "(plots, columns=['px', 'py', 'domx', 'domy', 'trimmed_dy'])\n", (1886, 1945), True, 'import pandas as pd\n'), ((4922, 4965), 'numpy.savetxt', 'np.savetxt', (['filename', 'matrix'], {'delimiter': '""","""'}), "(filename, matrix, delimiter=',')\n", (4932, 4965), True, 'import numpy as np\n'), ((5207, 5240), 'numpy.full', 'np.full', (['(self.x, self.y)', 'self.z'], {}), '((self.x, self.y), self.z)\n', (5214, 5240), True, 'import numpy as np\n'), ((5669, 5706), 'numpy.zeros', 'np.zeros', (['(self.x, self.y)'], {'dtype': 'int'}), '((self.x, self.y), dtype=int)\n', (5677, 5706), True, 'import numpy as np\n'), ((6614, 6678), 'numpy.tile', 'np.tile', (['self.subplot.matrix', '(self.temp_x // self.subplot.x, 1)'], {}), '(self.subplot.matrix, (self.temp_x // self.subplot.x, 1))\n', (6621, 6678), True, 'import numpy as np\n'), ((6986, 7015), 'numpy.full', 'np.full', (['(self.temp_x, 1)', '(-1)'], {}), '((self.temp_x, 1), -1)\n', (6993, 7015), True, 'import numpy as np\n'), ((7831, 7887), 'numpy.pad', 'np.pad', (['domain_with_trees', '(xs, (self.full_y - dwty, 0))'], {}), '(domain_with_trees, (xs, (self.full_y - dwty, 0)))\n', (7837, 7887), True, 'import numpy as np\n'), ((8047, 8090), 'numpy.where', 'np.where', (['(full_domain != -1)', 'full_domain', '(0)'], {}), '(full_domain != -1, full_domain, 0)\n', (8055, 8090), True, 'import numpy as np\n'), ((8107, 8150), 'numpy.where', 'np.where', (['(full_domain == -1)', 'full_domain', '(0)'], {}), '(full_domain == -1, full_domain, 0)\n', (8115, 8150), True, 'import numpy as np\n'), ((7577, 7604), 'numpy.concatenate', 'np.concatenate', (['row'], {'axis': '(1)'}), '(row, axis=1)\n', (7591, 7604), True, 'import numpy as np\n'), ((7733, 7767), 'numpy.floor', 'np.floor', (['((self.full_x - dwtx) / 2)'], {}), '((self.full_x - dwtx) / 2)\n', (7741, 7767), True, 'import numpy as np\n'), ((7774, 7807), 'numpy.ceil', 'np.ceil', (['((self.full_x - dwtx) / 2)'], {}), '((self.full_x - dwtx) / 2)\n', (7781, 7807), True, 'import numpy as np\n')] |
#!/usr/bin/env python3
# -*- config: utf-8 -*-
from tkinter import *
from random import random
def on_click():
x = random()
y = random()
bt1.place(relx=x, rely=y)
root = Tk()
root['bg'] = 'white'
root.title('crown')
img = PhotoImage(file='crown.png')
bt1 = Button(image=img, command=on_click)
bt1.place(relx=0.5, rely=0.5, anchor=CENTER)
root.mainloop()
| [
"random.random"
] | [((122, 130), 'random.random', 'random', ([], {}), '()\n', (128, 130), False, 'from random import random\n'), ((139, 147), 'random.random', 'random', ([], {}), '()\n', (145, 147), False, 'from random import random\n')] |
from django.core.exceptions import ValidationError
from django.core.validators import validate_email
from django.template import Template, TemplateSyntaxError, TemplateDoesNotExist
from django.utils.encoding import force_str
def validate_email_with_name(value):
"""
Validate email address.
Both "<NAME> <<EMAIL>>" and "<EMAIL>" are valid.
"""
value = force_str(value)
recipient = value
if '<' in value and '>' in value:
start = value.find('<') + 1
end = value.find('>')
if start < end:
recipient = value[start:end]
validate_email(recipient)
def validate_comma_separated_emails(value):
"""
Validate every email address in a comma separated list of emails.
"""
if not isinstance(value, (tuple, list)):
raise ValidationError('Email list must be a list/tuple.')
for email in value:
try:
validate_email_with_name(email)
except ValidationError:
raise ValidationError('Invalid email: %s' % email, code='invalid')
def validate_template_syntax(source):
"""
Basic Django Template syntax validation. This allows for robuster template
authoring.
"""
try:
Template(source)
except (TemplateSyntaxError, TemplateDoesNotExist) as err:
raise ValidationError(str(err))
| [
"django.utils.encoding.force_str",
"django.template.Template",
"django.core.validators.validate_email",
"django.core.exceptions.ValidationError"
] | [((374, 390), 'django.utils.encoding.force_str', 'force_str', (['value'], {}), '(value)\n', (383, 390), False, 'from django.utils.encoding import force_str\n'), ((588, 613), 'django.core.validators.validate_email', 'validate_email', (['recipient'], {}), '(recipient)\n', (602, 613), False, 'from django.core.validators import validate_email\n'), ((805, 856), 'django.core.exceptions.ValidationError', 'ValidationError', (['"""Email list must be a list/tuple."""'], {}), "('Email list must be a list/tuple.')\n", (820, 856), False, 'from django.core.exceptions import ValidationError\n'), ((1217, 1233), 'django.template.Template', 'Template', (['source'], {}), '(source)\n', (1225, 1233), False, 'from django.template import Template, TemplateSyntaxError, TemplateDoesNotExist\n'), ((989, 1049), 'django.core.exceptions.ValidationError', 'ValidationError', (["('Invalid email: %s' % email)"], {'code': '"""invalid"""'}), "('Invalid email: %s' % email, code='invalid')\n", (1004, 1049), False, 'from django.core.exceptions import ValidationError\n')] |
# Input DOI / URL
import re
import sys
# Pyperclip is not built-in, check and download if needed
try:
import pyperclip
except (ImportError, ModuleNotFoundError):
print('Pyperclip module not found. Please download it.')
sys.exit(0)
# Regex for links
link_regex = re.compile(r'''(
http[s]?://
(?:[a-zA-Z]|
[0-9]|
[$-_@.&+]|
[!*\(\),]|
(?:%[0-9a-fA-F][0-9a-fA-F]))+
)''', re.IGNORECASE | re.VERBOSE)
# Get DOI / URL using different methods
# Method 1: argument
try:
input_link = sys.argv[1]
# Method 2: clipboard
except IndexError:
input_link = pyperclip.paste()
# Method 3: manual input
def regex_check(regex, link):
"""
Check using regex. If DOI/URL are not in the right format,
require manual input until correct or Enter to quit.
"""
while True:
match = re.match(regex, link)
if match == None:
link = str(input('''Enter valid DOI / URL or press Enter to quit: > '''))
if link == '':
exit()
else:
continue
else:
return link
url = regex_check(link_regex, input_link) | [
"pyperclip.paste",
"sys.exit",
"re.match",
"re.compile"
] | [((277, 450), 're.compile', 're.compile', (['"""(\n http[s]?://\n (?:[a-zA-Z]|\n [0-9]|\n [$-_@.&+]|\n [!*\\\\(\\\\),]|\n (?:%[0-9a-fA-F][0-9a-fA-F]))+\n )"""', '(re.IGNORECASE | re.VERBOSE)'], {}), '(\n """(\n http[s]?://\n (?:[a-zA-Z]|\n [0-9]|\n [$-_@.&+]|\n [!*\\\\(\\\\),]|\n (?:%[0-9a-fA-F][0-9a-fA-F]))+\n )"""\n , re.IGNORECASE | re.VERBOSE)\n', (287, 450), False, 'import re\n'), ((233, 244), 'sys.exit', 'sys.exit', (['(0)'], {}), '(0)\n', (241, 244), False, 'import sys\n'), ((595, 612), 'pyperclip.paste', 'pyperclip.paste', ([], {}), '()\n', (610, 612), False, 'import pyperclip\n'), ((837, 858), 're.match', 're.match', (['regex', 'link'], {}), '(regex, link)\n', (845, 858), False, 'import re\n')] |
import os
import sys
import logging
import time
import argparse
import numpy as np
from collections import OrderedDict
import scripts.options as option
import utils.util as util
from data.util import bgr2ycbcr
from data import create_dataset, create_dataloader
from models import create_model
# options
parser = argparse.ArgumentParser()
parser.add_argument('-opt', type=str, required=True, help='Path to options JSON file.')
opt = option.parse(parser.parse_args().opt, is_train=False)
util.mkdirs((path for key, path in opt['path'].items() if not key == 'pretrain_model_G'))
opt = option.dict_to_nonedict(opt)
util.setup_logger(None, opt['path']['log'], 'test.log', level=logging.INFO, screen=True)
logger = logging.getLogger('base')
logger.info(option.dict2str(opt))
# Create test dataset and dataloader
test_loaders = []
for phase, dataset_opt in sorted(opt['datasets'].items()):
test_set = create_dataset(dataset_opt)
test_loader = create_dataloader(test_set, dataset_opt)
logger.info('Number of test images in [{:s}]: {:d}'.format(dataset_opt['name'], len(test_set)))
test_loaders.append(test_loader)
# Create model
model = create_model(opt)
for test_loader in test_loaders:
test_set_name = test_loader.dataset.opt['name']
logger.info('\nTesting [{:s}]...'.format(test_set_name))
test_start_time = time.time()
dataset_dir = os.path.join(opt['path']['results_root'], test_set_name)
util.mkdir(dataset_dir)
test_results = OrderedDict()
test_results['psnr'] = []
test_results['ssim'] = []
test_results['psnr_y'] = []
test_results['ssim_y'] = []
for data in test_loader:
need_GT = False if test_loader.dataset.opt['dataroot_GT'] is None else True
# need_GT = True
model.feed_data_specular(data, need_GT=need_GT)
if opt["image_type"] == "exr":
y = data["x_offset"]
x = data["y_offset"]
img_path = data['NOISY_path'][0]
img_name = os.path.splitext(os.path.basename(img_path))[0]
start = time.time()
model.test() # test
end = time.time()
print("Time elapsed... %f "%(end - start))
visuals = model.get_current_visuals(need_GT=need_GT)
denoised_img = util.tensor2img(visuals['DENOISED']) # uint8
noisy_img = util.tensor2img(visuals['NOISY'])
gt_img = util.tensor2img(visuals['GT']) # uint8
# save images
suffix = opt['suffix']
if suffix ==None:
suffix = ""
save_DENOISED_img_path = os.path.join(dataset_dir, img_name + suffix + '_1denoised.png')
save_NOISY_img_path = os.path.join(dataset_dir, img_name + suffix + '_0noisy.png')
save_GT_img_path = os.path.join(dataset_dir, img_name + suffix + '_2gt.png')
# calculate PSNR and SSIM
if need_GT:
# gt_img = util.tensor2img(visuals['GT'])
gt_img = gt_img / 255.
denoised_img = denoised_img / 255.
crop_border = test_loader.dataset.opt['scale']
cropped_denoised_img = denoised_img#[crop_border:-crop_border, crop_border:-crop_border, :]
cropped_gt_img = gt_img#[crop_border:-crop_border, crop_border:-crop_border, :]
psnr = util.calculate_psnr(cropped_denoised_img * 255, cropped_gt_img * 255)
ssim = util.calculate_ssim(cropped_denoised_img * 255, cropped_gt_img * 255)
test_results['psnr'].append(psnr)
test_results['ssim'].append(ssim)
if gt_img.shape[2] == 3: # RGB image
denoised_img_y = bgr2ycbcr(denoised_img, only_y=True)
gt_img_y = bgr2ycbcr(gt_img, only_y=True)
cropped_denoised_img_y = denoised_img_y[crop_border:-crop_border, crop_border:-crop_border]
cropped_gt_img_y = gt_img_y[crop_border:-crop_border, crop_border:-crop_border]
psnr_y = util.calculate_psnr(cropped_denoised_img_y * 255, cropped_gt_img_y * 255)
ssim_y = util.calculate_ssim(cropped_denoised_img_y * 255, cropped_gt_img_y * 255)
test_results['psnr_y'].append(psnr_y)
test_results['ssim_y'].append(ssim_y)
logger.info('{:20s} - PSNR: {:.6f} dB; SSIM: {:.6f}; PSNR_Y: {:.6f} dB; SSIM_Y: {:.6f}.'\
.format(img_name, psnr, ssim, psnr_y, ssim_y))
else:
logger.info('{:20s} - PSNR: {:.6f} dB; SSIM: {:.6f}.'.format(img_name, psnr, ssim))
else:
logger.info(img_name)
if opt["image_type"] == "exr":
denoised_exr = util.tensor2exr(visuals['DENOISED']) # uint8
noisy_exr = util.tensor2exr(visuals['NOISY'])
gt_exr = util.tensor2exr(visuals['GT']) # uint8
save_DENOISED_img_path = os.path.join(dataset_dir, img_name + suffix + '_1denoised.exr')
save_NOISY_img_path = os.path.join(dataset_dir, img_name + suffix + '_0noisy.exr')
save_GT_img_path = os.path.join(dataset_dir, img_name + suffix + '_2gt.exr')
util.saveEXRfromMatrix(save_DENOISED_img_path, denoised_exr, (x, y))
util.saveEXRfromMatrix(save_NOISY_img_path, noisy_exr, (x, y))
util.saveEXRfromMatrix(save_GT_img_path, gt_exr, (x, y))
if need_GT: # metrics
# Average PSNR/SSIM results
ave_psnr = sum(test_results['psnr']) / len(test_results['psnr'])
ave_ssim = sum(test_results['ssim']) / len(test_results['ssim'])
logger.info('----Average PSNR/SSIM results for {}----\n\tPSNR: {:.6f} dB; SSIM: {:.6f}\n'\
.format(test_set_name, ave_psnr, ave_ssim))
# if test_results['psnr_y'] and test_results['ssim_y']:
# ave_psnr_y = sum(test_results['psnr_y']) / len(test_results['psnr_y'])
# ave_ssim_y = sum(test_results['ssim_y']) / len(test_results['ssim_y'])
# logger.info('----Y channel, average PSNR/SSIM----\n\tPSNR_Y: {:.6f} dB; SSIM_Y: {:.6f}\n'\
# .format(ave_psnr_y, ave_ssim_y))
| [
"logging.getLogger",
"utils.util.calculate_ssim",
"utils.util.tensor2img",
"argparse.ArgumentParser",
"scripts.options.dict2str",
"collections.OrderedDict",
"scripts.options.dict_to_nonedict",
"utils.util.saveEXRfromMatrix",
"utils.util.mkdir",
"utils.util.tensor2exr",
"utils.util.calculate_psnr",
"data.create_dataloader",
"time.time",
"data.util.bgr2ycbcr",
"models.create_model",
"utils.util.setup_logger",
"data.create_dataset",
"os.path.join",
"os.path.basename"
] | [((314, 339), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (337, 339), False, 'import argparse\n'), ((584, 612), 'scripts.options.dict_to_nonedict', 'option.dict_to_nonedict', (['opt'], {}), '(opt)\n', (607, 612), True, 'import scripts.options as option\n'), ((614, 706), 'utils.util.setup_logger', 'util.setup_logger', (['None', "opt['path']['log']", '"""test.log"""'], {'level': 'logging.INFO', 'screen': '(True)'}), "(None, opt['path']['log'], 'test.log', level=logging.INFO,\n screen=True)\n", (631, 706), True, 'import utils.util as util\n'), ((712, 737), 'logging.getLogger', 'logging.getLogger', (['"""base"""'], {}), "('base')\n", (729, 737), False, 'import logging\n'), ((1149, 1166), 'models.create_model', 'create_model', (['opt'], {}), '(opt)\n', (1161, 1166), False, 'from models import create_model\n'), ((750, 770), 'scripts.options.dict2str', 'option.dict2str', (['opt'], {}), '(opt)\n', (765, 770), True, 'import scripts.options as option\n'), ((901, 928), 'data.create_dataset', 'create_dataset', (['dataset_opt'], {}), '(dataset_opt)\n', (915, 928), False, 'from data import create_dataset, create_dataloader\n'), ((947, 987), 'data.create_dataloader', 'create_dataloader', (['test_set', 'dataset_opt'], {}), '(test_set, dataset_opt)\n', (964, 987), False, 'from data import create_dataset, create_dataloader\n'), ((1336, 1347), 'time.time', 'time.time', ([], {}), '()\n', (1345, 1347), False, 'import time\n'), ((1366, 1422), 'os.path.join', 'os.path.join', (["opt['path']['results_root']", 'test_set_name'], {}), "(opt['path']['results_root'], test_set_name)\n", (1378, 1422), False, 'import os\n'), ((1427, 1450), 'utils.util.mkdir', 'util.mkdir', (['dataset_dir'], {}), '(dataset_dir)\n', (1437, 1450), True, 'import utils.util as util\n'), ((1471, 1484), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (1482, 1484), False, 'from collections import OrderedDict\n'), ((2034, 2045), 'time.time', 'time.time', ([], {}), '()\n', (2043, 2045), False, 'import time\n'), ((2089, 2100), 'time.time', 'time.time', ([], {}), '()\n', (2098, 2100), False, 'import time\n'), ((2237, 2273), 'utils.util.tensor2img', 'util.tensor2img', (["visuals['DENOISED']"], {}), "(visuals['DENOISED'])\n", (2252, 2273), True, 'import utils.util as util\n'), ((2303, 2336), 'utils.util.tensor2img', 'util.tensor2img', (["visuals['NOISY']"], {}), "(visuals['NOISY'])\n", (2318, 2336), True, 'import utils.util as util\n'), ((2354, 2384), 'utils.util.tensor2img', 'util.tensor2img', (["visuals['GT']"], {}), "(visuals['GT'])\n", (2369, 2384), True, 'import utils.util as util\n'), ((2544, 2607), 'os.path.join', 'os.path.join', (['dataset_dir', "(img_name + suffix + '_1denoised.png')"], {}), "(dataset_dir, img_name + suffix + '_1denoised.png')\n", (2556, 2607), False, 'import os\n'), ((2638, 2698), 'os.path.join', 'os.path.join', (['dataset_dir', "(img_name + suffix + '_0noisy.png')"], {}), "(dataset_dir, img_name + suffix + '_0noisy.png')\n", (2650, 2698), False, 'import os\n'), ((2726, 2783), 'os.path.join', 'os.path.join', (['dataset_dir', "(img_name + suffix + '_2gt.png')"], {}), "(dataset_dir, img_name + suffix + '_2gt.png')\n", (2738, 2783), False, 'import os\n'), ((3262, 3331), 'utils.util.calculate_psnr', 'util.calculate_psnr', (['(cropped_denoised_img * 255)', '(cropped_gt_img * 255)'], {}), '(cropped_denoised_img * 255, cropped_gt_img * 255)\n', (3281, 3331), True, 'import utils.util as util\n'), ((3351, 3420), 'utils.util.calculate_ssim', 'util.calculate_ssim', (['(cropped_denoised_img * 255)', '(cropped_gt_img * 255)'], {}), '(cropped_denoised_img * 255, cropped_gt_img * 255)\n', (3370, 3420), True, 'import utils.util as util\n'), ((4609, 4645), 'utils.util.tensor2exr', 'util.tensor2exr', (["visuals['DENOISED']"], {}), "(visuals['DENOISED'])\n", (4624, 4645), True, 'import utils.util as util\n'), ((4679, 4712), 'utils.util.tensor2exr', 'util.tensor2exr', (["visuals['NOISY']"], {}), "(visuals['NOISY'])\n", (4694, 4712), True, 'import utils.util as util\n'), ((4734, 4764), 'utils.util.tensor2exr', 'util.tensor2exr', (["visuals['GT']"], {}), "(visuals['GT'])\n", (4749, 4764), True, 'import utils.util as util\n'), ((4812, 4875), 'os.path.join', 'os.path.join', (['dataset_dir', "(img_name + suffix + '_1denoised.exr')"], {}), "(dataset_dir, img_name + suffix + '_1denoised.exr')\n", (4824, 4875), False, 'import os\n'), ((4910, 4970), 'os.path.join', 'os.path.join', (['dataset_dir', "(img_name + suffix + '_0noisy.exr')"], {}), "(dataset_dir, img_name + suffix + '_0noisy.exr')\n", (4922, 4970), False, 'import os\n'), ((5002, 5059), 'os.path.join', 'os.path.join', (['dataset_dir', "(img_name + suffix + '_2gt.exr')"], {}), "(dataset_dir, img_name + suffix + '_2gt.exr')\n", (5014, 5059), False, 'import os\n'), ((5076, 5144), 'utils.util.saveEXRfromMatrix', 'util.saveEXRfromMatrix', (['save_DENOISED_img_path', 'denoised_exr', '(x, y)'], {}), '(save_DENOISED_img_path, denoised_exr, (x, y))\n', (5098, 5144), True, 'import utils.util as util\n'), ((5158, 5220), 'utils.util.saveEXRfromMatrix', 'util.saveEXRfromMatrix', (['save_NOISY_img_path', 'noisy_exr', '(x, y)'], {}), '(save_NOISY_img_path, noisy_exr, (x, y))\n', (5180, 5220), True, 'import utils.util as util\n'), ((5235, 5291), 'utils.util.saveEXRfromMatrix', 'util.saveEXRfromMatrix', (['save_GT_img_path', 'gt_exr', '(x, y)'], {}), '(save_GT_img_path, gt_exr, (x, y))\n', (5257, 5291), True, 'import utils.util as util\n'), ((1987, 2013), 'os.path.basename', 'os.path.basename', (['img_path'], {}), '(img_path)\n', (2003, 2013), False, 'import os\n'), ((3597, 3633), 'data.util.bgr2ycbcr', 'bgr2ycbcr', (['denoised_img'], {'only_y': '(True)'}), '(denoised_img, only_y=True)\n', (3606, 3633), False, 'from data.util import bgr2ycbcr\n'), ((3661, 3691), 'data.util.bgr2ycbcr', 'bgr2ycbcr', (['gt_img'], {'only_y': '(True)'}), '(gt_img, only_y=True)\n', (3670, 3691), False, 'from data.util import bgr2ycbcr\n'), ((3921, 3994), 'utils.util.calculate_psnr', 'util.calculate_psnr', (['(cropped_denoised_img_y * 255)', '(cropped_gt_img_y * 255)'], {}), '(cropped_denoised_img_y * 255, cropped_gt_img_y * 255)\n', (3940, 3994), True, 'import utils.util as util\n'), ((4020, 4093), 'utils.util.calculate_ssim', 'util.calculate_ssim', (['(cropped_denoised_img_y * 255)', '(cropped_gt_img_y * 255)'], {}), '(cropped_denoised_img_y * 255, cropped_gt_img_y * 255)\n', (4039, 4093), True, 'import utils.util as util\n')] |
from api import db
from uuid import uuid4
from ariadne import MutationType
from api.models import Post
from api.store import queues
mutation = MutationType()
@mutation.field("createPost")
async def create_post_resolver(obj, info, input):
try:
post = Post(postId=uuid4(), caption=input["caption"])
db.session.add(post)
db.session.commit()
for queue in queues:
queue.put(post)
return{
"error": None,
"post": post
}
except Exception as e:
return{
"error": {"message":str(e), "field": "unknown"},
"post": None
} | [
"api.db.session.commit",
"api.db.session.add",
"uuid.uuid4",
"ariadne.MutationType"
] | [((145, 159), 'ariadne.MutationType', 'MutationType', ([], {}), '()\n', (157, 159), False, 'from ariadne import MutationType\n'), ((320, 340), 'api.db.session.add', 'db.session.add', (['post'], {}), '(post)\n', (334, 340), False, 'from api import db\n'), ((349, 368), 'api.db.session.commit', 'db.session.commit', ([], {}), '()\n', (366, 368), False, 'from api import db\n'), ((277, 284), 'uuid.uuid4', 'uuid4', ([], {}), '()\n', (282, 284), False, 'from uuid import uuid4\n')] |
import abc
from typing import TypeVar, Generic, List, Dict
T = TypeVar('T')
class CRUDInterface(Generic[T], metaclass=abc.ABCMeta):
@abc.abstractmethod
def all(self) -> List[T]:
pass
@abc.abstractmethod
def one_by_id(self, entity_id: int) -> T:
pass
@abc.abstractmethod
def append_one(self, entity: Dict) -> T:
pass
@abc.abstractmethod
def replace_one(self, entity: Dict) -> None:
pass
@abc.abstractmethod
def remove_one(self, entity_id: int) -> None:
pass
| [
"typing.TypeVar"
] | [((64, 76), 'typing.TypeVar', 'TypeVar', (['"""T"""'], {}), "('T')\n", (71, 76), False, 'from typing import TypeVar, Generic, List, Dict\n')] |
# Copyright 2015 Huawei Technologies Co., Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from oslo_config import cfg
from oslo_log import log as logging
from pecan import rest
import six
import tooz.coordination
import wsmeext.pecan as wsme_pecan
from mistral.api import access_control as acl
from mistral.api.controllers.v2 import resources
# TODO(rakhmerov): invalid dependency, a REST controller must not depend on
# a launch script.
from mistral.cmd import launch
from mistral import context
from mistral import exceptions as exc
from mistral.service import coordination
from mistral.utils import rest_utils
LOG = logging.getLogger(__name__)
class ServicesController(rest.RestController):
@rest_utils.wrap_wsme_controller_exception
@wsme_pecan.wsexpose(resources.Services)
def get_all(self):
"""Return all services."""
acl.enforce('services:list', context.ctx())
LOG.info("Fetch services.")
if not cfg.CONF.coordination.backend_url:
raise exc.CoordinationException("Service API is not supported.")
service_coordinator = coordination.get_service_coordinator()
if not service_coordinator.is_active():
raise exc.CoordinationException(
"Failed to connect to coordination backend."
)
services_list = []
service_group = ['%s_group' % i for i in launch.LAUNCH_OPTIONS]
try:
for group in service_group:
members = service_coordinator.get_members(group)
services_list.extend(
[resources.Service.from_dict(
{'type': group, 'name': member}) for member in members]
)
except tooz.coordination.ToozError as e:
# In the scenario of network interruption or manually shutdown
# connection shutdown, ToozError will be raised.
raise exc.CoordinationException(
"Failed to get service members from coordination backend. %s"
% six.text_type(e)
)
return resources.Services(services=services_list)
| [
"mistral.context.ctx",
"six.text_type",
"mistral.api.controllers.v2.resources.Services",
"mistral.service.coordination.get_service_coordinator",
"wsmeext.pecan.wsexpose",
"mistral.exceptions.CoordinationException",
"mistral.api.controllers.v2.resources.Service.from_dict",
"oslo_log.log.getLogger"
] | [((1153, 1180), 'oslo_log.log.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (1170, 1180), True, 'from oslo_log import log as logging\n'), ((1282, 1321), 'wsmeext.pecan.wsexpose', 'wsme_pecan.wsexpose', (['resources.Services'], {}), '(resources.Services)\n', (1301, 1321), True, 'import wsmeext.pecan as wsme_pecan\n'), ((1628, 1666), 'mistral.service.coordination.get_service_coordinator', 'coordination.get_service_coordinator', ([], {}), '()\n', (1664, 1666), False, 'from mistral.service import coordination\n'), ((2614, 2656), 'mistral.api.controllers.v2.resources.Services', 'resources.Services', ([], {'services': 'services_list'}), '(services=services_list)\n', (2632, 2656), False, 'from mistral.api.controllers.v2 import resources\n'), ((1417, 1430), 'mistral.context.ctx', 'context.ctx', ([], {}), '()\n', (1428, 1430), False, 'from mistral import context\n'), ((1538, 1596), 'mistral.exceptions.CoordinationException', 'exc.CoordinationException', (['"""Service API is not supported."""'], {}), "('Service API is not supported.')\n", (1563, 1596), True, 'from mistral import exceptions as exc\n'), ((1734, 1805), 'mistral.exceptions.CoordinationException', 'exc.CoordinationException', (['"""Failed to connect to coordination backend."""'], {}), "('Failed to connect to coordination backend.')\n", (1759, 1805), True, 'from mistral import exceptions as exc\n'), ((2114, 2174), 'mistral.api.controllers.v2.resources.Service.from_dict', 'resources.Service.from_dict', (["{'type': group, 'name': member}"], {}), "({'type': group, 'name': member})\n", (2141, 2174), False, 'from mistral.api.controllers.v2 import resources\n'), ((2567, 2583), 'six.text_type', 'six.text_type', (['e'], {}), '(e)\n', (2580, 2583), False, 'import six\n')] |
from setuptools import setup
setup(
name="greek-utils",
version="0.2",
description="various utilities for processing Ancient Greek",
license="MIT",
url="http://github.com/jtauber/greek-utils",
author="<NAME>",
author_email="<EMAIL>",
packages=["greekutils"],
classifiers=[
"Development Status :: 3 - Alpha",
"License :: OSI Approved :: MIT License",
"Programming Language :: Python :: 3.3",
"Programming Language :: Python :: 3.4",
"Programming Language :: Python :: 3.5",
"Programming Language :: Python :: 3.6",
"Topic :: Text Processing",
"Topic :: Text Processing :: Linguistic",
"Topic :: Utilities",
],
)
| [
"setuptools.setup"
] | [((30, 649), 'setuptools.setup', 'setup', ([], {'name': '"""greek-utils"""', 'version': '"""0.2"""', 'description': '"""various utilities for processing Ancient Greek"""', 'license': '"""MIT"""', 'url': '"""http://github.com/jtauber/greek-utils"""', 'author': '"""<NAME>"""', 'author_email': '"""<EMAIL>"""', 'packages': "['greekutils']", 'classifiers': "['Development Status :: 3 - Alpha',\n 'License :: OSI Approved :: MIT License',\n 'Programming Language :: Python :: 3.3',\n 'Programming Language :: Python :: 3.4',\n 'Programming Language :: Python :: 3.5',\n 'Programming Language :: Python :: 3.6', 'Topic :: Text Processing',\n 'Topic :: Text Processing :: Linguistic', 'Topic :: Utilities']"}), "(name='greek-utils', version='0.2', description=\n 'various utilities for processing Ancient Greek', license='MIT', url=\n 'http://github.com/jtauber/greek-utils', author='<NAME>', author_email=\n '<EMAIL>', packages=['greekutils'], classifiers=[\n 'Development Status :: 3 - Alpha',\n 'License :: OSI Approved :: MIT License',\n 'Programming Language :: Python :: 3.3',\n 'Programming Language :: Python :: 3.4',\n 'Programming Language :: Python :: 3.5',\n 'Programming Language :: Python :: 3.6', 'Topic :: Text Processing',\n 'Topic :: Text Processing :: Linguistic', 'Topic :: Utilities'])\n", (35, 649), False, 'from setuptools import setup\n')] |
"""
tweet stuff in intervals
"""
import time
import datetime
import twitter
from markov_chains import german_text
from config import config_no, config_yes
MAX_TWEET_LENGTH = 280
greeting = ' Sehr geehrte/r Anstragssteller/in.'
ending = ' MfG'
num_tweets = 3
class FoiaBot:
def __init__(self, config):
self.api = twitter.Api(consumer_key=config["consumer_key"],
consumer_secret=config["consumer_secret"],
access_token_key=config["access_token"],
access_token_secret=config["access_token_secret"], sleep_on_rate_limit=True)
self.screen_name = config["screen_name"]
self.model = german_text.setup_model(config["model_path"])
self.hour_to_tweet = config["hour_to_tweet"]
def get_favorites(self):
favorites = self.api.GetFavorites(
screen_name=self.screen_name, count=200)
print(favorites)
fav_set = set([f.id for f in favorites])
return fav_set
def get_status_to_work_on(self):
favorites = self.get_favorites()
status_list = self.api.GetMentions(count=200, trim_user=True,
contributor_details=False, include_entities=False)
for status in status_list:
print(status)
if status.id in favorites:
continue
if status.in_reply_to_status_id is not None:
continue
if not status.text.startswith('@' + self.screen_name):
continue
self.post_replies(status)
def post_replies(self, status):
tweets = self.create_tweets()
print(tweets)
success = True
reply_to_status_id = status.id
for tweet in tweets:
response = self.api.PostUpdate(tweet, in_reply_to_status_id=reply_to_status_id, auto_populate_reply_metadata=True,
exclude_reply_user_ids=False, trim_user=True, verify_status_length=False)
if response is None:
success = False
break
else:
reply_to_status_id = response.id
if success:
self.api.CreateFavorite(status=status)
def generate_sentence(self, tweet_text, chars_left, set_limit=False):
max_length = 150
if set_limit:
max_length = chars_left
new_sent = self.model.make_short_sentence(max_length, tries=100)
if new_sent is not None and len(new_sent) < chars_left:
tweet_text += ' ' + new_sent
return tweet_text
# https://stackoverflow.com/questions/7703865/going-from-twitter-date-to-python-datetime-date
def get_date_from_twitter_string(self, created_at):
x = time.strptime(created_at, '%a %b %d %H:%M:%S +0000 %Y')
return datetime.datetime.fromtimestamp(time.mktime(x))
def tweet_once_a_day(self):
now = datetime.datetime.now()
print(now.hour)
if now.hour == self.hour_to_tweet:
last_status_list = self.api.GetUserTimeline(screen_name=self.screen_name, count=1,
include_rts=False, trim_user=True, exclude_replies=True)
print(last_status_list)
if last_status_list is None:
return
if len(last_status_list) == 0:
self.post_single_tweet()
if len(last_status_list) == 1:
last_status = last_status_list[0]
created_at_date = self.get_date_from_twitter_string(
last_status.created_at)
time_diff = now - created_at_date
print('time_diff', time_diff)
time_diff_hours = time_diff.seconds / 3600 + time_diff.days * 24
print(time_diff_hours)
if time_diff_hours > 20: # something is broken with the date but whatever
self.post_single_tweet()
def post_single_tweet(self):
tweet_text = self.generate_single_tweet_text()
response = self.api.PostUpdate(tweet_text, verify_status_length=False)
def generate_single_tweet_text(self):
tweet_text = ""
while True:
chars_left = MAX_TWEET_LENGTH - len(tweet_text)
chars_left -= 1 # for the space
if chars_left < 20:
break
if chars_left < 70:
tweet_text = self.generate_sentence(
tweet_text, chars_left, True)
else:
tweet_text = self.generate_sentence(
tweet_text, chars_left)
return tweet_text
def create_tweets(self):
tweets = []
for i in range(num_tweets):
tweet_text = f'{i + 1}/{num_tweets}'
if i == 0:
tweet_text += greeting
while True:
chars_left = MAX_TWEET_LENGTH - \
len(tweet_text) - 1 # because of space
# ensure space for the ending
if i + 1 == num_tweets:
chars_left -= len(ending)
if chars_left < 20:
# at ending
if i + 1 == num_tweets:
tweet_text += ending
break
if chars_left < 70:
tweet_text = self.generate_sentence(
tweet_text, chars_left, True)
else:
tweet_text = self.generate_sentence(
tweet_text, chars_left)
tweets.append(tweet_text)
return tweets
def run(self):
self.get_status_to_work_on()
def main():
print('main called')
no_bot = FoiaBot(config_no)
print('after setting up no bot')
yes_bot = FoiaBot(config_yes)
print('after setting up yes bot')
no_bot.run()
print('after running no bot')
yes_bot.run()
print('after running yes bot')
no_bot.tweet_once_a_day()
yes_bot.tweet_once_a_day()
print('after tweet once a day')
def lambda_handler(event, context):
print('handler called')
main()
print('handler about to finish')
# if __name__ == '__main__':
# main()
| [
"time.strptime",
"time.mktime",
"datetime.datetime.now",
"twitter.Api",
"markov_chains.german_text.setup_model"
] | [((330, 553), 'twitter.Api', 'twitter.Api', ([], {'consumer_key': "config['consumer_key']", 'consumer_secret': "config['consumer_secret']", 'access_token_key': "config['access_token']", 'access_token_secret': "config['access_token_secret']", 'sleep_on_rate_limit': '(True)'}), "(consumer_key=config['consumer_key'], consumer_secret=config[\n 'consumer_secret'], access_token_key=config['access_token'],\n access_token_secret=config['access_token_secret'], sleep_on_rate_limit=True\n )\n", (341, 553), False, 'import twitter\n'), ((703, 748), 'markov_chains.german_text.setup_model', 'german_text.setup_model', (["config['model_path']"], {}), "(config['model_path'])\n", (726, 748), False, 'from markov_chains import german_text\n'), ((2794, 2849), 'time.strptime', 'time.strptime', (['created_at', '"""%a %b %d %H:%M:%S +0000 %Y"""'], {}), "(created_at, '%a %b %d %H:%M:%S +0000 %Y')\n", (2807, 2849), False, 'import time\n'), ((2960, 2983), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (2981, 2983), False, 'import datetime\n'), ((2897, 2911), 'time.mktime', 'time.mktime', (['x'], {}), '(x)\n', (2908, 2911), False, 'import time\n')] |
from django.conf import settings
from netaddr import mac_unix, mac_eui48
import importlib
import warnings
class mac_linux(mac_unix):
"""MAC format with zero-padded all upper-case hex and colon separated"""
word_fmt = '%.2X'
def default_dialect(eui_obj=None):
# Check to see if a default dialect class has been specified in settings,
# using 'module.dialect_cls' string and use importlib and getattr to retrieve dialect class. 'module' is the module and
# 'dialect_cls' is the class name of the custom dialect. The dialect must either be defined or imported by the module's
# __init__.py if the module is a package.
from .fields import MACAddressField # Remove import at v1.4
if hasattr(settings, 'MACADDRESS_DEFAULT_DIALECT') and not MACAddressField.dialect:
module, dialect_cls = settings.MACADDRESS_DEFAULT_DIALECT.split('.')
dialect = getattr(importlib.import_module(module), dialect_cls, mac_linux)
return dialect
else:
if MACAddressField.dialect: # Remove this "if" statement at v1.4
warnings.warn(
"The set_dialect class method on MACAddressField has been deprecated, in favor of the default_dialect "
"utility function and settings.MACADDRESS_DEFAULT_DIALECT. See macaddress.__init__.py source or the "
"project README for more information.",
DeprecationWarning,
)
return MACAddressField.dialect
if eui_obj:
return eui_obj.dialect
else:
return mac_linux
def format_mac(eui_obj, dialect):
# Format a EUI instance as a string using the supplied dialect class, allowing custom string classes by
# passing directly or as a string, a la 'module.dialect_cls', where 'module' is the module and 'dialect_cls'
# is the class name of the custom dialect. The dialect must either be defined or imported by the module's __init__.py if
# the module is a package.
if not isinstance(dialect, mac_eui48):
if isinstance(dialect, str):
module, dialect_cls = dialect.split('.')
dialect = getattr(importlib.import_module(module), dialect_cls)
eui_obj.dialect = dialect
return str(eui_obj)
from pkg_resources import get_distribution, DistributionNotFound
import os.path
try:
_dist = get_distribution('django-macaddress')
except DistributionNotFound:
__version__ = 'Please install this project with setup.py'
else:
__version__ = _dist.version
VERSION = __version__ # synonym
| [
"warnings.warn",
"django.conf.settings.MACADDRESS_DEFAULT_DIALECT.split",
"pkg_resources.get_distribution",
"importlib.import_module"
] | [((2348, 2385), 'pkg_resources.get_distribution', 'get_distribution', (['"""django-macaddress"""'], {}), "('django-macaddress')\n", (2364, 2385), False, 'from pkg_resources import get_distribution, DistributionNotFound\n'), ((825, 871), 'django.conf.settings.MACADDRESS_DEFAULT_DIALECT.split', 'settings.MACADDRESS_DEFAULT_DIALECT.split', (['"""."""'], {}), "('.')\n", (866, 871), False, 'from django.conf import settings\n'), ((898, 929), 'importlib.import_module', 'importlib.import_module', (['module'], {}), '(module)\n', (921, 929), False, 'import importlib\n'), ((1073, 1356), 'warnings.warn', 'warnings.warn', (['"""The set_dialect class method on MACAddressField has been deprecated, in favor of the default_dialect utility function and settings.MACADDRESS_DEFAULT_DIALECT. See macaddress.__init__.py source or the project README for more information."""', 'DeprecationWarning'], {}), "(\n 'The set_dialect class method on MACAddressField has been deprecated, in favor of the default_dialect utility function and settings.MACADDRESS_DEFAULT_DIALECT. See macaddress.__init__.py source or the project README for more information.'\n , DeprecationWarning)\n", (1086, 1356), False, 'import warnings\n'), ((2148, 2179), 'importlib.import_module', 'importlib.import_module', (['module'], {}), '(module)\n', (2171, 2179), False, 'import importlib\n')] |
from django.db import models
from utils.models import BaseModel
from users.models import User, Address
from goods.models import GoodsSKU
# Create your models here.
class OrderInfo(BaseModel):
"""订单信息"""
PAY_METHOD = ['1', '2']
PAY_METHOD_CHOICES = (
(1, "货到付款"),
(2, "支付宝"),
)
ORDER_STATUS_CHOICES = (
(1, "待支付"),
(2, "待发货"),
(3, "待收货"),
(4, "待评价"),
(5, "已完成"),
)
"""---------订单信息------------------------"""
PAY_METHODS = {
1: "货到付款",
2: "支付宝",
}
ORDER_STATUS = {
1: "待支付",
2: "待发货",
3: "待收货",
4: "待评价",
5: "已完成",
}
PAY_METHODS_ENUM = {
"CASH": 1,
"ALIPAY": 2
}
ORDER_STATUS_ENUM = {
"UNPAID": 1,
"UNSEND": 2,
"UNRECEIVED": 3,
"UNCOMMENT": 4,
"FINISHED": 5
}
order_id = models.CharField(max_length=64, primary_key=True, verbose_name="订单号")
user = models.ForeignKey(User, on_delete=models.CASCADE, verbose_name="下单用户")
address = models.ForeignKey(Address, on_delete=models.CASCADE, verbose_name="收获地址")
total_count = models.IntegerField(default=1, verbose_name="商品总数")
total_amount = models.DecimalField(max_digits=10, decimal_places=2, verbose_name="商品总金额")
trans_cost = models.DecimalField(max_digits=10, decimal_places=2, verbose_name="运费")
pay_method = models.SmallIntegerField(choices=PAY_METHOD_CHOICES, default=1, verbose_name="支付方式")
status = models.SmallIntegerField(choices=ORDER_STATUS_CHOICES, default=1, verbose_name="订单状态")
trade_id = models.CharField(max_length=100, unique=True, null=True, blank=True, verbose_name="支付编号")
class Meta:
db_table = "df_order_info"
class OrderGoods(BaseModel):
"""订单商品"""
order = models.ForeignKey(OrderInfo, on_delete=models.CASCADE, verbose_name="订单")
sku = models.ForeignKey(GoodsSKU, on_delete=models.CASCADE, verbose_name="订单商品")
count = models.IntegerField(default=1, verbose_name="数量")
price = models.DecimalField(max_digits=10, decimal_places=2, verbose_name="单价")
comment = models.TextField(default="", verbose_name="评价信息")
class Meta:
db_table = "df_order_goods"
| [
"django.db.models.TextField",
"django.db.models.IntegerField",
"django.db.models.ForeignKey",
"django.db.models.SmallIntegerField",
"django.db.models.DecimalField",
"django.db.models.CharField"
] | [((911, 980), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(64)', 'primary_key': '(True)', 'verbose_name': '"""订单号"""'}), "(max_length=64, primary_key=True, verbose_name='订单号')\n", (927, 980), False, 'from django.db import models\n'), ((992, 1062), 'django.db.models.ForeignKey', 'models.ForeignKey', (['User'], {'on_delete': 'models.CASCADE', 'verbose_name': '"""下单用户"""'}), "(User, on_delete=models.CASCADE, verbose_name='下单用户')\n", (1009, 1062), False, 'from django.db import models\n'), ((1077, 1150), 'django.db.models.ForeignKey', 'models.ForeignKey', (['Address'], {'on_delete': 'models.CASCADE', 'verbose_name': '"""收获地址"""'}), "(Address, on_delete=models.CASCADE, verbose_name='收获地址')\n", (1094, 1150), False, 'from django.db import models\n'), ((1169, 1220), 'django.db.models.IntegerField', 'models.IntegerField', ([], {'default': '(1)', 'verbose_name': '"""商品总数"""'}), "(default=1, verbose_name='商品总数')\n", (1188, 1220), False, 'from django.db import models\n'), ((1240, 1314), 'django.db.models.DecimalField', 'models.DecimalField', ([], {'max_digits': '(10)', 'decimal_places': '(2)', 'verbose_name': '"""商品总金额"""'}), "(max_digits=10, decimal_places=2, verbose_name='商品总金额')\n", (1259, 1314), False, 'from django.db import models\n'), ((1332, 1403), 'django.db.models.DecimalField', 'models.DecimalField', ([], {'max_digits': '(10)', 'decimal_places': '(2)', 'verbose_name': '"""运费"""'}), "(max_digits=10, decimal_places=2, verbose_name='运费')\n", (1351, 1403), False, 'from django.db import models\n'), ((1421, 1509), 'django.db.models.SmallIntegerField', 'models.SmallIntegerField', ([], {'choices': 'PAY_METHOD_CHOICES', 'default': '(1)', 'verbose_name': '"""支付方式"""'}), "(choices=PAY_METHOD_CHOICES, default=1,\n verbose_name='支付方式')\n", (1445, 1509), False, 'from django.db import models\n'), ((1519, 1609), 'django.db.models.SmallIntegerField', 'models.SmallIntegerField', ([], {'choices': 'ORDER_STATUS_CHOICES', 'default': '(1)', 'verbose_name': '"""订单状态"""'}), "(choices=ORDER_STATUS_CHOICES, default=1,\n verbose_name='订单状态')\n", (1543, 1609), False, 'from django.db import models\n'), ((1621, 1714), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(100)', 'unique': '(True)', 'null': '(True)', 'blank': '(True)', 'verbose_name': '"""支付编号"""'}), "(max_length=100, unique=True, null=True, blank=True,\n verbose_name='支付编号')\n", (1637, 1714), False, 'from django.db import models\n'), ((1821, 1894), 'django.db.models.ForeignKey', 'models.ForeignKey', (['OrderInfo'], {'on_delete': 'models.CASCADE', 'verbose_name': '"""订单"""'}), "(OrderInfo, on_delete=models.CASCADE, verbose_name='订单')\n", (1838, 1894), False, 'from django.db import models\n'), ((1905, 1979), 'django.db.models.ForeignKey', 'models.ForeignKey', (['GoodsSKU'], {'on_delete': 'models.CASCADE', 'verbose_name': '"""订单商品"""'}), "(GoodsSKU, on_delete=models.CASCADE, verbose_name='订单商品')\n", (1922, 1979), False, 'from django.db import models\n'), ((1992, 2041), 'django.db.models.IntegerField', 'models.IntegerField', ([], {'default': '(1)', 'verbose_name': '"""数量"""'}), "(default=1, verbose_name='数量')\n", (2011, 2041), False, 'from django.db import models\n'), ((2054, 2125), 'django.db.models.DecimalField', 'models.DecimalField', ([], {'max_digits': '(10)', 'decimal_places': '(2)', 'verbose_name': '"""单价"""'}), "(max_digits=10, decimal_places=2, verbose_name='单价')\n", (2073, 2125), False, 'from django.db import models\n'), ((2140, 2189), 'django.db.models.TextField', 'models.TextField', ([], {'default': '""""""', 'verbose_name': '"""评价信息"""'}), "(default='', verbose_name='评价信息')\n", (2156, 2189), False, 'from django.db import models\n')] |
import os
from pathlib import Path
from typing import Any, Dict
from determined.common import util
MASTER_SCHEME = "http"
MASTER_IP = "localhost"
MASTER_PORT = "8080"
DET_VERSION = None
DEFAULT_MAX_WAIT_SECS = 1800
MAX_TASK_SCHEDULED_SECS = 30
MAX_TRIAL_BUILD_SECS = 90
DEFAULT_TF1_CPU_IMAGE = "determinedai/environments:py-3.7-pytorch-1.7-tf-1.15-cpu-08f9c9b"
DEFAULT_TF2_CPU_IMAGE = (
"determinedai/environments:py-3.8-pytorch-1.9-lightning-1.3-tf-2.4-cpu-08f9c9b"
)
DEFAULT_TF1_GPU_IMAGE = "determinedai/environments:cuda-10.2-pytorch-1.7-tf-1.15-gpu-08f9c9b"
DEFAULT_TF2_GPU_IMAGE = (
"determinedai/environments:cuda-11.1-pytorch-1.9-lightning-1.3-tf-2.4-gpu-08f9c9b"
)
TF1_CPU_IMAGE = os.environ.get("TF1_CPU_IMAGE") or DEFAULT_TF1_CPU_IMAGE
TF2_CPU_IMAGE = os.environ.get("TF2_CPU_IMAGE") or DEFAULT_TF2_CPU_IMAGE
TF1_GPU_IMAGE = os.environ.get("TF1_GPU_IMAGE") or DEFAULT_TF1_GPU_IMAGE
TF2_GPU_IMAGE = os.environ.get("TF2_GPU_IMAGE") or DEFAULT_TF2_GPU_IMAGE
GPU_ENABLED = os.environ.get("DET_TEST_GPU_ENABLED", "1") not in ("0", "false")
PROJECT_ROOT_PATH = Path(__file__).resolve().parents[2]
def fixtures_path(path: str) -> str:
return os.path.join(os.path.dirname(__file__), "fixtures", path)
def tutorials_path(path: str) -> str:
return os.path.join(os.path.dirname(__file__), "../../examples/tutorials", path)
def cv_examples_path(path: str) -> str:
return os.path.join(os.path.dirname(__file__), "../../examples/computer_vision", path)
def nlp_examples_path(path: str) -> str:
return os.path.join(os.path.dirname(__file__), "../../examples/nlp", path)
def nas_examples_path(path: str) -> str:
return os.path.join(os.path.dirname(__file__), "../../examples/nas", path)
def meta_learning_examples_path(path: str) -> str:
return os.path.join(os.path.dirname(__file__), "../../examples/meta_learning", path)
def gan_examples_path(path: str) -> str:
return os.path.join(os.path.dirname(__file__), "../../examples/gan", path)
def decision_trees_examples_path(path: str) -> str:
return os.path.join(os.path.dirname(__file__), "../../examples/decision_trees", path)
def features_examples_path(path: str) -> str:
return os.path.join(os.path.dirname(__file__), "../../examples/features", path)
def model_hub_examples_path(path: str) -> str:
return os.path.join(os.path.dirname(__file__), "../../model_hub/examples", path)
def graphs_examples_path(path: str) -> str:
return os.path.join(os.path.dirname(__file__), "../../examples/graphs", path)
def load_config(config_path: str) -> Any:
with open(config_path) as f:
config = util.safe_load_yaml_with_exceptions(f)
return config
def make_master_url(suffix: str = "") -> str:
return "{}://{}:{}/{}".format(MASTER_SCHEME, MASTER_IP, MASTER_PORT, suffix)
def set_global_batch_size(config: Dict[Any, Any], batch_size: int) -> Dict[Any, Any]:
config = config.copy()
config["hyperparameters"]["global_batch_size"] = batch_size
return config
def set_slots_per_trial(config: Dict[Any, Any], slots: int) -> Dict[Any, Any]:
config = config.copy()
config.setdefault("resources", {})
config["resources"]["slots_per_trial"] = slots
return config
def set_max_length(config: Dict[Any, Any], max_length: Dict[str, int]) -> Dict[Any, Any]:
config = config.copy()
config["searcher"]["max_length"] = max_length
return config
def set_min_validation_period(
config: Dict[Any, Any], min_validation_period: Dict[str, int]
) -> Dict[Any, Any]:
config = config.copy()
config["min_validation_period"] = min_validation_period
return config
def set_min_checkpoint_period(
config: Dict[Any, Any], min_checkpoint_period: Dict[str, int]
) -> Dict[Any, Any]:
config = config.copy()
config["min_checkpoint_period"] = min_checkpoint_period
return config
def set_aggregation_frequency(config: Dict[Any, Any], aggregation_frequency: int) -> Dict[Any, Any]:
config = config.copy()
config.setdefault("optimizations", {})
config["optimizations"]["aggregation_frequency"] = aggregation_frequency
return config
def set_tensor_auto_tuning(config: Dict[Any, Any], auto_tune: bool) -> Dict[Any, Any]:
config = config.copy()
config.setdefault("optimizations", {})
config["optimizations"]["auto_tune_tensor_fusion"] = auto_tune
return config
def set_image(config: Dict[Any, Any], cpu_image: str, gpu_image: str) -> Dict[Any, Any]:
config = config.copy()
config.setdefault("environment", {})
config["environment"]["image"] = {"cpu": cpu_image, "gpu": gpu_image}
return config
def set_tf1_image(config: Dict[Any, Any]) -> Dict[Any, Any]:
return set_image(config, TF1_CPU_IMAGE, TF1_GPU_IMAGE)
def set_tf2_image(config: Dict[Any, Any]) -> Dict[Any, Any]:
return set_image(config, TF2_CPU_IMAGE, TF2_GPU_IMAGE)
def set_shared_fs_data_layer(config: Dict[Any, Any]) -> Dict[Any, Any]:
config = config.copy()
config["data_layer"] = {}
config["data_layer"]["type"] = "shared_fs"
return config
def set_s3_data_layer(config: Dict[Any, Any]) -> Dict[Any, Any]:
config = config.copy()
config["data_layer"] = {}
config["data_layer"]["type"] = "s3"
config["data_layer"]["bucket"] = "yogadl-test"
config["data_layer"]["bucket_directory_path"] = "determined_integration_tests"
return config
def set_random_seed(config: Dict[Any, Any], seed: int) -> Dict[Any, Any]:
config = config.copy()
config.setdefault("reproducibility", {})
config["reproducibility"]["experiment_seed"] = seed
return config
def set_hparam(config: Dict[Any, Any], name: str, value: Any) -> Dict[Any, Any]:
config = config.copy()
config["hyperparameters"][name] = {"type": "const", "val": value}
return config
def set_perform_initial_validation(config: Dict[Any, Any], init_val: bool) -> Dict[Any, Any]:
config = config.copy()
config["perform_initial_validation"] = init_val
return config
def set_pod_spec(config: Dict[Any, Any], pod_spec: Dict[Any, Any]) -> Dict[Any, Any]:
config = config.copy()
config.setdefault("environment", {})
config["environment"]["pod_spec"] = pod_spec
return config
def set_profiling_enabled(config: Dict[Any, Any]) -> Dict[Any, Any]:
config = config.copy()
config.setdefault("profiling", {})
config["profiling"]["enabled"] = True
return config
def set_entrypoint(config: Dict[Any, Any], entrypoint: str) -> Dict[Any, Any]:
config = config.copy()
config["entrypoint"] = entrypoint
return config
| [
"os.path.dirname",
"determined.common.util.safe_load_yaml_with_exceptions",
"os.environ.get",
"pathlib.Path"
] | [((703, 734), 'os.environ.get', 'os.environ.get', (['"""TF1_CPU_IMAGE"""'], {}), "('TF1_CPU_IMAGE')\n", (717, 734), False, 'import os\n'), ((776, 807), 'os.environ.get', 'os.environ.get', (['"""TF2_CPU_IMAGE"""'], {}), "('TF2_CPU_IMAGE')\n", (790, 807), False, 'import os\n'), ((849, 880), 'os.environ.get', 'os.environ.get', (['"""TF1_GPU_IMAGE"""'], {}), "('TF1_GPU_IMAGE')\n", (863, 880), False, 'import os\n'), ((922, 953), 'os.environ.get', 'os.environ.get', (['"""TF2_GPU_IMAGE"""'], {}), "('TF2_GPU_IMAGE')\n", (936, 953), False, 'import os\n'), ((993, 1036), 'os.environ.get', 'os.environ.get', (['"""DET_TEST_GPU_ENABLED"""', '"""1"""'], {}), "('DET_TEST_GPU_ENABLED', '1')\n", (1007, 1036), False, 'import os\n'), ((1179, 1204), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (1194, 1204), False, 'import os\n'), ((1288, 1313), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (1303, 1313), False, 'import os\n'), ((1415, 1440), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (1430, 1440), False, 'import os\n'), ((1549, 1574), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (1564, 1574), False, 'import os\n'), ((1671, 1696), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (1686, 1696), False, 'import os\n'), ((1803, 1828), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (1818, 1828), False, 'import os\n'), ((1935, 1960), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (1950, 1960), False, 'import os\n'), ((2068, 2093), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (2083, 2093), False, 'import os\n'), ((2206, 2231), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (2221, 2231), False, 'import os\n'), ((2339, 2364), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (2354, 2364), False, 'import os\n'), ((2470, 2495), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (2485, 2495), False, 'import os\n'), ((2622, 2660), 'determined.common.util.safe_load_yaml_with_exceptions', 'util.safe_load_yaml_with_exceptions', (['f'], {}), '(f)\n', (2657, 2660), False, 'from determined.common import util\n'), ((1080, 1094), 'pathlib.Path', 'Path', (['__file__'], {}), '(__file__)\n', (1084, 1094), False, 'from pathlib import Path\n')] |
from django.contrib.contenttypes.models import ContentType
from rest_framework import serializers, exceptions
from greenbudget.lib.rest_framework_utils.fields import ModelChoiceField
from greenbudget.lib.rest_framework_utils.serializers import (
EnhancedModelSerializer)
from greenbudget.app.budget.models import BaseBudget
from greenbudget.app.common.serializers import (
EntitySerializer,
AbstractBulkUpdateSerializer,
create_bulk_create_serializer
)
from greenbudget.app.fringe.models import Fringe
from greenbudget.app.group.models import (
BudgetSubAccountGroup,
TemplateSubAccountGroup
)
from .models import SubAccount, BudgetSubAccount, TemplateSubAccount
class SubAccountSimpleSerializer(EnhancedModelSerializer):
id = serializers.IntegerField(read_only=True)
type = serializers.CharField(read_only=True)
identifier = serializers.CharField(
required=False,
allow_blank=False,
allow_null=True,
trim_whitespace=False
)
description = serializers.CharField(
required=False,
allow_blank=False,
allow_null=True,
trim_whitespace=False
)
name = serializers.CharField(
required=False,
allow_blank=True,
allow_null=False,
trim_whitespace=False
)
class Meta:
model = SubAccount
fields = ('id', 'name', 'identifier', 'type', 'description')
class SubAccountSerializer(SubAccountSimpleSerializer):
created_by = serializers.PrimaryKeyRelatedField(read_only=True)
updated_by = serializers.PrimaryKeyRelatedField(read_only=True)
created_at = serializers.DateTimeField(read_only=True)
updated_at = serializers.DateTimeField(read_only=True)
quantity = serializers.IntegerField(
required=False,
allow_null=True
)
rate = serializers.FloatField(required=False, allow_null=True)
multiplier = serializers.FloatField(required=False, allow_null=True)
estimated = serializers.FloatField(read_only=True)
unit = ModelChoiceField(
required=False,
choices=SubAccount.UNITS,
allow_null=True
)
budget = serializers.PrimaryKeyRelatedField(read_only=True)
subaccounts = serializers.PrimaryKeyRelatedField(many=True, read_only=True)
ancestors = EntitySerializer(many=True, read_only=True)
siblings = EntitySerializer(many=True, read_only=True)
account = serializers.IntegerField(read_only=True, source='account.pk')
object_id = serializers.IntegerField(read_only=True)
parent_type = serializers.ChoiceField(
choices=["account", "subaccount"],
read_only=True
)
fringes = serializers.PrimaryKeyRelatedField(
many=True,
required=False,
queryset=Fringe.objects.filter(budget__trash=False)
)
class Meta:
model = SubAccount
fields = SubAccountSimpleSerializer.Meta.fields + (
'identifier', 'name', 'created_by', 'updated_by', 'created_at',
'updated_at', 'quantity', 'rate', 'multiplier', 'unit', 'account',
'object_id', 'parent_type', 'ancestors', 'estimated', 'subaccounts',
'budget', 'siblings', 'fringes')
def validate(self, attrs):
if self.instance is not None and self.instance.subaccounts.count() != 0:
if any([field in attrs for field in self.instance.DERIVING_FIELDS]):
raise exceptions.ValidationError(
"Field can only be updated when the sub account is not "
"derived."
)
return super().validate(attrs)
class BudgetSubAccountSerializer(SubAccountSerializer):
actual = serializers.FloatField(read_only=True)
variance = serializers.FloatField(read_only=True)
group = serializers.PrimaryKeyRelatedField(
required=False,
allow_null=True,
queryset=BudgetSubAccountGroup.objects.all()
)
class Meta:
model = BudgetSubAccount
fields = SubAccountSerializer.Meta.fields + (
'actual', 'variance', 'group')
class TemplateSubAccountSerializer(SubAccountSerializer):
group = serializers.PrimaryKeyRelatedField(
required=False,
allow_null=True,
queryset=TemplateSubAccountGroup.objects.all()
)
class Meta:
model = TemplateSubAccount
fields = SubAccountSerializer.Meta.fields + ('group', )
def create_bulk_create_subaccounts_serializer(model_cls):
data_serializer = BudgetSubAccountSerializer
if model_cls is TemplateSubAccount:
data_serializer = TemplateSubAccountSerializer
base_serializer = create_bulk_create_serializer(data_serializer)
class BulkCreateSubAccountsSerializer(base_serializer):
class Meta(base_serializer.Meta):
model = BaseBudget
def get_serializer_context(self, instance):
return {'parent': instance}
def perform_save(self, serializer, instance, validated_data):
# Note that the updated_by argument is the user updating the
# Account by adding new SubAccount(s), so the SubAccount(s)
# should be denoted as having been created by this user.
return serializer.save(
updated_by=validated_data['updated_by'],
created_by=validated_data['updated_by'],
object_id=instance.pk,
content_type=ContentType.objects.get_for_model(model_cls),
parent=instance,
budget=instance.budget
)
return BulkCreateSubAccountsSerializer
def create_subaccount_bulk_change_serializer(model_cls):
base_serializer = BudgetSubAccountSerializer
if model_cls is TemplateSubAccount:
base_serializer = TemplateSubAccountSerializer
class SubAccountBulkChangeSerializer(base_serializer):
id = serializers.PrimaryKeyRelatedField(
required=True,
queryset=model_cls.objects.all()
)
def validate_id(self, instance):
account = self.parent.parent.instance
if account != instance.parent:
raise exceptions.ValidationError(
"The sub-account %s does not belong to account %s."
% (instance.pk, account.pk)
)
return instance
return SubAccountBulkChangeSerializer
def create_bulk_update_subaccounts_serializer(model_cls):
class BulkUpdateSubAccountsSerializer(AbstractBulkUpdateSerializer):
data = create_subaccount_bulk_change_serializer(model_cls)(
many=True, nested=True)
class Meta:
model = BaseBudget
fields = ('data', )
def update(self, instance, validated_data):
for subaccount, change in validated_data['data']:
serializer = SubAccountSerializer(
instance=subaccount,
data=change,
partial=True
)
serializer.is_valid(raise_exception=True)
serializer.save(
updated_by=validated_data['updated_by'],
suppress_budget_update=validated_data.get(
'suppress_budget_update', False)
)
return instance
return BulkUpdateSubAccountsSerializer
| [
"rest_framework.serializers.DateTimeField",
"rest_framework.serializers.IntegerField",
"rest_framework.serializers.PrimaryKeyRelatedField",
"django.contrib.contenttypes.models.ContentType.objects.get_for_model",
"greenbudget.lib.rest_framework_utils.fields.ModelChoiceField",
"greenbudget.app.common.serializers.create_bulk_create_serializer",
"greenbudget.app.group.models.BudgetSubAccountGroup.objects.all",
"greenbudget.app.group.models.TemplateSubAccountGroup.objects.all",
"rest_framework.serializers.CharField",
"rest_framework.exceptions.ValidationError",
"greenbudget.app.fringe.models.Fringe.objects.filter",
"rest_framework.serializers.FloatField",
"greenbudget.app.common.serializers.EntitySerializer",
"rest_framework.serializers.ChoiceField"
] | [((760, 800), 'rest_framework.serializers.IntegerField', 'serializers.IntegerField', ([], {'read_only': '(True)'}), '(read_only=True)\n', (784, 800), False, 'from rest_framework import serializers, exceptions\n'), ((812, 849), 'rest_framework.serializers.CharField', 'serializers.CharField', ([], {'read_only': '(True)'}), '(read_only=True)\n', (833, 849), False, 'from rest_framework import serializers, exceptions\n'), ((867, 967), 'rest_framework.serializers.CharField', 'serializers.CharField', ([], {'required': '(False)', 'allow_blank': '(False)', 'allow_null': '(True)', 'trim_whitespace': '(False)'}), '(required=False, allow_blank=False, allow_null=True,\n trim_whitespace=False)\n', (888, 967), False, 'from rest_framework import serializers, exceptions\n'), ((1020, 1120), 'rest_framework.serializers.CharField', 'serializers.CharField', ([], {'required': '(False)', 'allow_blank': '(False)', 'allow_null': '(True)', 'trim_whitespace': '(False)'}), '(required=False, allow_blank=False, allow_null=True,\n trim_whitespace=False)\n', (1041, 1120), False, 'from rest_framework import serializers, exceptions\n'), ((1166, 1266), 'rest_framework.serializers.CharField', 'serializers.CharField', ([], {'required': '(False)', 'allow_blank': '(True)', 'allow_null': '(False)', 'trim_whitespace': '(False)'}), '(required=False, allow_blank=True, allow_null=False,\n trim_whitespace=False)\n', (1187, 1266), False, 'from rest_framework import serializers, exceptions\n'), ((1489, 1539), 'rest_framework.serializers.PrimaryKeyRelatedField', 'serializers.PrimaryKeyRelatedField', ([], {'read_only': '(True)'}), '(read_only=True)\n', (1523, 1539), False, 'from rest_framework import serializers, exceptions\n'), ((1557, 1607), 'rest_framework.serializers.PrimaryKeyRelatedField', 'serializers.PrimaryKeyRelatedField', ([], {'read_only': '(True)'}), '(read_only=True)\n', (1591, 1607), False, 'from rest_framework import serializers, exceptions\n'), ((1625, 1666), 'rest_framework.serializers.DateTimeField', 'serializers.DateTimeField', ([], {'read_only': '(True)'}), '(read_only=True)\n', (1650, 1666), False, 'from rest_framework import serializers, exceptions\n'), ((1684, 1725), 'rest_framework.serializers.DateTimeField', 'serializers.DateTimeField', ([], {'read_only': '(True)'}), '(read_only=True)\n', (1709, 1725), False, 'from rest_framework import serializers, exceptions\n'), ((1741, 1798), 'rest_framework.serializers.IntegerField', 'serializers.IntegerField', ([], {'required': '(False)', 'allow_null': '(True)'}), '(required=False, allow_null=True)\n', (1765, 1798), False, 'from rest_framework import serializers, exceptions\n'), ((1832, 1887), 'rest_framework.serializers.FloatField', 'serializers.FloatField', ([], {'required': '(False)', 'allow_null': '(True)'}), '(required=False, allow_null=True)\n', (1854, 1887), False, 'from rest_framework import serializers, exceptions\n'), ((1905, 1960), 'rest_framework.serializers.FloatField', 'serializers.FloatField', ([], {'required': '(False)', 'allow_null': '(True)'}), '(required=False, allow_null=True)\n', (1927, 1960), False, 'from rest_framework import serializers, exceptions\n'), ((1977, 2015), 'rest_framework.serializers.FloatField', 'serializers.FloatField', ([], {'read_only': '(True)'}), '(read_only=True)\n', (1999, 2015), False, 'from rest_framework import serializers, exceptions\n'), ((2027, 2102), 'greenbudget.lib.rest_framework_utils.fields.ModelChoiceField', 'ModelChoiceField', ([], {'required': '(False)', 'choices': 'SubAccount.UNITS', 'allow_null': '(True)'}), '(required=False, choices=SubAccount.UNITS, allow_null=True)\n', (2043, 2102), False, 'from greenbudget.lib.rest_framework_utils.fields import ModelChoiceField\n'), ((2146, 2196), 'rest_framework.serializers.PrimaryKeyRelatedField', 'serializers.PrimaryKeyRelatedField', ([], {'read_only': '(True)'}), '(read_only=True)\n', (2180, 2196), False, 'from rest_framework import serializers, exceptions\n'), ((2215, 2276), 'rest_framework.serializers.PrimaryKeyRelatedField', 'serializers.PrimaryKeyRelatedField', ([], {'many': '(True)', 'read_only': '(True)'}), '(many=True, read_only=True)\n', (2249, 2276), False, 'from rest_framework import serializers, exceptions\n'), ((2293, 2336), 'greenbudget.app.common.serializers.EntitySerializer', 'EntitySerializer', ([], {'many': '(True)', 'read_only': '(True)'}), '(many=True, read_only=True)\n', (2309, 2336), False, 'from greenbudget.app.common.serializers import EntitySerializer, AbstractBulkUpdateSerializer, create_bulk_create_serializer\n'), ((2352, 2395), 'greenbudget.app.common.serializers.EntitySerializer', 'EntitySerializer', ([], {'many': '(True)', 'read_only': '(True)'}), '(many=True, read_only=True)\n', (2368, 2395), False, 'from greenbudget.app.common.serializers import EntitySerializer, AbstractBulkUpdateSerializer, create_bulk_create_serializer\n'), ((2410, 2471), 'rest_framework.serializers.IntegerField', 'serializers.IntegerField', ([], {'read_only': '(True)', 'source': '"""account.pk"""'}), "(read_only=True, source='account.pk')\n", (2434, 2471), False, 'from rest_framework import serializers, exceptions\n'), ((2488, 2528), 'rest_framework.serializers.IntegerField', 'serializers.IntegerField', ([], {'read_only': '(True)'}), '(read_only=True)\n', (2512, 2528), False, 'from rest_framework import serializers, exceptions\n'), ((2547, 2621), 'rest_framework.serializers.ChoiceField', 'serializers.ChoiceField', ([], {'choices': "['account', 'subaccount']", 'read_only': '(True)'}), "(choices=['account', 'subaccount'], read_only=True)\n", (2570, 2621), False, 'from rest_framework import serializers, exceptions\n'), ((3668, 3706), 'rest_framework.serializers.FloatField', 'serializers.FloatField', ([], {'read_only': '(True)'}), '(read_only=True)\n', (3690, 3706), False, 'from rest_framework import serializers, exceptions\n'), ((3722, 3760), 'rest_framework.serializers.FloatField', 'serializers.FloatField', ([], {'read_only': '(True)'}), '(read_only=True)\n', (3744, 3760), False, 'from rest_framework import serializers, exceptions\n'), ((4625, 4671), 'greenbudget.app.common.serializers.create_bulk_create_serializer', 'create_bulk_create_serializer', (['data_serializer'], {}), '(data_serializer)\n', (4654, 4671), False, 'from greenbudget.app.common.serializers import EntitySerializer, AbstractBulkUpdateSerializer, create_bulk_create_serializer\n'), ((2754, 2796), 'greenbudget.app.fringe.models.Fringe.objects.filter', 'Fringe.objects.filter', ([], {'budget__trash': '(False)'}), '(budget__trash=False)\n', (2775, 2796), False, 'from greenbudget.app.fringe.models import Fringe\n'), ((3875, 3910), 'greenbudget.app.group.models.BudgetSubAccountGroup.objects.all', 'BudgetSubAccountGroup.objects.all', ([], {}), '()\n', (3908, 3910), False, 'from greenbudget.app.group.models import BudgetSubAccountGroup, TemplateSubAccountGroup\n'), ((4238, 4275), 'greenbudget.app.group.models.TemplateSubAccountGroup.objects.all', 'TemplateSubAccountGroup.objects.all', ([], {}), '()\n', (4273, 4275), False, 'from greenbudget.app.group.models import BudgetSubAccountGroup, TemplateSubAccountGroup\n'), ((3404, 3501), 'rest_framework.exceptions.ValidationError', 'exceptions.ValidationError', (['"""Field can only be updated when the sub account is not derived."""'], {}), "(\n 'Field can only be updated when the sub account is not derived.')\n", (3430, 3501), False, 'from rest_framework import serializers, exceptions\n'), ((6130, 6246), 'rest_framework.exceptions.ValidationError', 'exceptions.ValidationError', (["('The sub-account %s does not belong to account %s.' % (instance.pk,\n account.pk))"], {}), "(\n 'The sub-account %s does not belong to account %s.' % (instance.pk,\n account.pk))\n", (6156, 6246), False, 'from rest_framework import serializers, exceptions\n'), ((5403, 5447), 'django.contrib.contenttypes.models.ContentType.objects.get_for_model', 'ContentType.objects.get_for_model', (['model_cls'], {}), '(model_cls)\n', (5436, 5447), False, 'from django.contrib.contenttypes.models import ContentType\n')] |
import contextlib
import logging
import typing
from typing import Any, Dict, Tuple
import attr
from dbnd._core.configuration import get_dbnd_project_config
from dbnd._core.constants import (
RESULT_PARAM,
DbndTargetOperationStatus,
DbndTargetOperationType,
TaskRunState,
)
from dbnd._core.current import (
current_task_run,
get_databand_run,
is_verbose,
try_get_current_task,
)
from dbnd._core.errors.errors_utils import log_exception
from dbnd._core.log.external_exception_logging import log_exception_to_server
from dbnd._core.parameter.parameter_definition import ParameterDefinition
from dbnd._core.parameter.parameter_value import ParameterFilters
from dbnd._core.settings import TrackingConfig
from dbnd._core.task.tracking_task import TrackingTask
from dbnd._core.task_build.task_context import try_get_current_task
from dbnd._core.task_build.task_definition import TaskDefinition
from dbnd._core.task_build.task_results import FuncResultParameter
from dbnd._core.task_run.task_run import TaskRun
from dbnd._core.task_run.task_run_error import TaskRunError
from dbnd._core.utils.callable_spec import args_to_kwargs
from dbnd._core.utils.timezone import utcnow
from targets import InMemoryTarget, Target
from targets.value_meta import ValueMetaConf
from targets.values import get_value_type_of_obj
if typing.TYPE_CHECKING:
from dbnd._core.task_build.task_decorator import TaskDecorator
logger = logging.getLogger(__name__)
@attr.s
class TrackedFuncCallWithResult(object):
call_args = attr.ib() # type: Tuple[Any]
call_kwargs = attr.ib() # type: Dict[str,Any]
callable = attr.ib()
result = attr.ib(default=None)
def set_result(self, value):
self.result = value
return value
def invoke(self):
func = self.callable
return func(*self.call_args, **self.call_kwargs)
class CallableTrackingManager(object):
def __init__(self, task_decorator):
# type: (CallableTrackingManager, TaskDecorator) -> None
self.task_decorator = task_decorator
self._tracking_task_definition = None
self._call_count = 0
self._call_as_func = False
self._max_call_count = get_dbnd_project_config().max_calls_per_run
@property
def callable(self):
return self.task_decorator.class_or_func
def get_tracking_task_definition(self):
if not self._tracking_task_definition:
self._tracking_task_definition = self._build_tracking_task_definition()
return self._tracking_task_definition
def _build_tracking_task_definition(self):
return TaskDefinition.from_task_decorator(task_decorator=self.task_decorator)
def _call_count_limit_exceeded(self):
if not self._call_as_func:
self._call_count += 1
if self._call_count > self._max_call_count:
logger.info(
"Reached maximum tracking limit of {} tasks. Running function regularly.".format(
self._max_call_count
)
)
self._call_as_func = True
return self._call_as_func
@contextlib.contextmanager
def tracking_context(self, call_args, call_kwargs):
user_code_called = False # whether we got to executing of user code
user_code_finished = False # whether we passed executing of user code
func_call = None
try:
# 1. check that we don't have too many calls
if self._call_count_limit_exceeded():
yield _do_nothing_decorator
return
# 2. Start or reuse existing "main tracking task" that is root for tracked tasks
if not try_get_current_task():
"""
try to get existing task, and if not exists - try to get/create inplace_task_run
"""
from dbnd._core.tracking.script_tracking_manager import (
try_get_inplace_tracking_task_run,
)
inplace_tacking_task = try_get_inplace_tracking_task_run()
if not inplace_tacking_task:
# we didn't manage to start inplace tracking task run, we will not be able to track
yield _do_nothing_decorator
return
tracking_task_definition = self.get_tracking_task_definition()
callable_spec = tracking_task_definition.task_decorator.get_callable_spec()
func_call = TrackedFuncCallWithResult(
callable=self.callable,
call_args=tuple(call_args), # prevent original call_args modification
call_kwargs=dict(call_kwargs), # prevent original kwargs modification
)
# replace any position argument with kwarg if it possible
args, kwargs = args_to_kwargs(
callable_spec.args, func_call.call_args, func_call.call_kwargs,
)
# instantiate inline task
task = TrackingTask.for_func(tracking_task_definition, args, kwargs)
# update upstream/downstream relations - needed for correct tracking
# we can have the task as upstream , as it was executed already
parent_task = current_task_run().task
if not parent_task.task_dag.has_upstream(task):
parent_task.set_upstream(task)
# checking if any of the inputs are the outputs of previous task.
# we can add that task as upstream.
dbnd_run = get_databand_run()
call_kwargs_as_targets = dbnd_run.target_origin.get_for_map(kwargs)
for value_origin in call_kwargs_as_targets.values():
up_task = value_origin.origin_target.task
task.set_upstream(up_task)
# creating task_run as a task we found mid-run
task_run = dbnd_run.create_task_run_at_execution_time(
task, task_engine=current_task_run().task_engine
)
should_capture_log = TrackingConfig.current().capture_tracking_log
with task_run.runner.task_run_execution_context(
handle_sigterm=True, capture_log=should_capture_log
):
task_run.set_task_run_state(state=TaskRunState.RUNNING)
_log_inputs(task_run)
# if we reached this line, then all tracking initialization is
# finished successfully, and we're going to execute user code
user_code_called = True
try:
# tracking_context is context manager - user code will run on yield
yield func_call.set_result
# if we reached this line, this means that user code finished
# successfully without any exceptions
user_code_finished = True
except Exception as ex:
task_run.finished_time = utcnow()
error = TaskRunError.build_from_ex(ex, task_run)
task_run.set_task_run_state(TaskRunState.FAILED, error=error)
raise
else:
task_run.finished_time = utcnow()
# func_call.result should contain result, log it
_log_result(task_run, func_call.result)
task_run.set_task_run_state(TaskRunState.SUCCESS)
except Exception:
if user_code_called and not user_code_finished:
# if we started to call the user code and not got to user_code_finished
# line - it means there was user code exception - so just re-raise it
raise
# else it's either we didn't reached calling user code, or already passed it
# then it's some dbnd tracking error - just log it
if func_call:
_handle_tracking_error("tracking-init", func_call)
else:
log_exception_to_server()
# if we didn't reached user_code_called=True line - there was an error during
# dbnd tracking initialization, so nothing is done - user function wasn't called yet
if not user_code_called:
# tracking_context is context manager - user code will run on yield
yield _do_nothing_decorator
return
def _handle_tracking_error(msg, func_call=None):
log_exception_to_server()
location = " for %s" % func_call.callable if func_call else ""
msg = "Failed during dbnd %s for %s, ignoring, and continue without tracking" % (
msg,
location,
)
if is_verbose():
logger.warning(
msg, exc_info=True,
)
else:
logger.info(msg)
def _do_nothing_decorator(f):
return f
def _log_inputs(task_run):
"""
For tracking mode. Logs InMemoryTarget inputs.
"""
try:
params = task_run.task._params
for param_value in params.get_param_values(ParameterFilters.INPUTS):
param, value = param_value.parameter, param_value.value
if isinstance(param_value, InMemoryTarget):
try:
param = param.modify(
value_meta_conf=ValueMetaConf(
log_preview=True, log_schema=True,
)
)
task_run.tracker.log_parameter_data(
parameter=param,
target=param_value,
value=value,
operation_type=DbndTargetOperationType.read,
operation_status=DbndTargetOperationStatus.OK,
)
except Exception as ex:
log_exception(
"Failed to log input param to tracking store.",
ex=ex,
non_critical=True,
)
except Exception as ex:
log_exception(
"Failed to log input params to tracking store.", ex=ex, non_critical=True
)
def _log_result(task_run, result):
# type: (TaskRun, Any) -> None
"""
For tracking mode. Logs the task result and adds it to the target_origin map to support relationships between
dynamic tasks.
"""
try:
result_param = task_run.task.task_params.get_param_value(RESULT_PARAM)
if not result_param:
logger.debug(
"No result params to log for task {}".format(task_run.task_af_id)
)
return
# we now the parameter value is a target because this is an output param
# the target is created in the task creation
result_param_def, result_target = result_param.parameter, result_param.value
# spread result into relevant fields.
if isinstance(result_param_def, FuncResultParameter):
# assign all returned values to relevant band Outputs
if result is None:
return
for result_name, value in result_param_def.named_results(result):
# we now the parameter value is a target because this is an output param
# the target is created in the task creation
parameter_value = task_run.task.task_params.get_param_value(result_name)
_log_parameter_value(
task_run,
parameter_definition=parameter_value.parameter,
target=parameter_value.value,
value=value,
)
else:
_log_parameter_value(
task_run,
parameter_definition=result_param_def,
target=result_target,
value=result,
)
except Exception as ex:
log_exception(
"Failed to log result to tracking store.", ex=ex, non_critical=True
)
def _log_parameter_value(task_run, parameter_definition, target, value):
# type: (TaskRun, ParameterDefinition, Target, Any) -> None
# make sure it will be logged correctly
parameter_definition = parameter_definition.modify(
value_meta_conf=ValueMetaConf(log_preview=True, log_schema=True)
)
try:
# case what if result is Proxy
value_type = get_value_type_of_obj(value, parameter_definition.value_type)
task_run.run.target_origin.add(target, value, value_type)
except Exception as ex:
log_exception(
"Failed to register result to target tracking.", ex=ex, non_critical=True
)
try:
task_run.tracker.log_parameter_data(
parameter=parameter_definition, # was: task_run.task.task_definition.task_class.result,
target=target,
value=value,
operation_type=DbndTargetOperationType.write, # is it write? (or log?)
operation_status=DbndTargetOperationStatus.OK,
)
except Exception as ex:
log_exception(
"Failed to log result to tracking store.", ex=ex, non_critical=True
)
| [
"logging.getLogger",
"dbnd._core.tracking.script_tracking_manager.try_get_inplace_tracking_task_run",
"dbnd._core.current.current_task_run",
"dbnd._core.utils.callable_spec.args_to_kwargs",
"targets.values.get_value_type_of_obj",
"dbnd._core.current.is_verbose",
"dbnd._core.task_build.task_definition.TaskDefinition.from_task_decorator",
"dbnd._core.errors.errors_utils.log_exception",
"dbnd._core.log.external_exception_logging.log_exception_to_server",
"dbnd._core.utils.timezone.utcnow",
"dbnd._core.settings.TrackingConfig.current",
"dbnd._core.task.tracking_task.TrackingTask.for_func",
"targets.value_meta.ValueMetaConf",
"dbnd._core.task_run.task_run_error.TaskRunError.build_from_ex",
"dbnd._core.current.get_databand_run",
"dbnd._core.configuration.get_dbnd_project_config",
"dbnd._core.task_build.task_context.try_get_current_task",
"attr.ib"
] | [((1443, 1470), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (1460, 1470), False, 'import logging\n'), ((1538, 1547), 'attr.ib', 'attr.ib', ([], {}), '()\n', (1545, 1547), False, 'import attr\n'), ((1587, 1596), 'attr.ib', 'attr.ib', ([], {}), '()\n', (1594, 1596), False, 'import attr\n'), ((1636, 1645), 'attr.ib', 'attr.ib', ([], {}), '()\n', (1643, 1645), False, 'import attr\n'), ((1659, 1680), 'attr.ib', 'attr.ib', ([], {'default': 'None'}), '(default=None)\n', (1666, 1680), False, 'import attr\n'), ((8459, 8484), 'dbnd._core.log.external_exception_logging.log_exception_to_server', 'log_exception_to_server', ([], {}), '()\n', (8482, 8484), False, 'from dbnd._core.log.external_exception_logging import log_exception_to_server\n'), ((8682, 8694), 'dbnd._core.current.is_verbose', 'is_verbose', ([], {}), '()\n', (8692, 8694), False, 'from dbnd._core.current import current_task_run, get_databand_run, is_verbose, try_get_current_task\n'), ((2623, 2693), 'dbnd._core.task_build.task_definition.TaskDefinition.from_task_decorator', 'TaskDefinition.from_task_decorator', ([], {'task_decorator': 'self.task_decorator'}), '(task_decorator=self.task_decorator)\n', (2657, 2693), False, 'from dbnd._core.task_build.task_definition import TaskDefinition\n'), ((12375, 12436), 'targets.values.get_value_type_of_obj', 'get_value_type_of_obj', (['value', 'parameter_definition.value_type'], {}), '(value, parameter_definition.value_type)\n', (12396, 12436), False, 'from targets.values import get_value_type_of_obj\n'), ((2206, 2231), 'dbnd._core.configuration.get_dbnd_project_config', 'get_dbnd_project_config', ([], {}), '()\n', (2229, 2231), False, 'from dbnd._core.configuration import get_dbnd_project_config\n'), ((4874, 4952), 'dbnd._core.utils.callable_spec.args_to_kwargs', 'args_to_kwargs', (['callable_spec.args', 'func_call.call_args', 'func_call.call_kwargs'], {}), '(callable_spec.args, func_call.call_args, func_call.call_kwargs)\n', (4888, 4952), False, 'from dbnd._core.utils.callable_spec import args_to_kwargs\n'), ((5042, 5103), 'dbnd._core.task.tracking_task.TrackingTask.for_func', 'TrackingTask.for_func', (['tracking_task_definition', 'args', 'kwargs'], {}), '(tracking_task_definition, args, kwargs)\n', (5063, 5103), False, 'from dbnd._core.task.tracking_task import TrackingTask\n'), ((5569, 5587), 'dbnd._core.current.get_databand_run', 'get_databand_run', ([], {}), '()\n', (5585, 5587), False, 'from dbnd._core.current import current_task_run, get_databand_run, is_verbose, try_get_current_task\n'), ((10038, 10130), 'dbnd._core.errors.errors_utils.log_exception', 'log_exception', (['"""Failed to log input params to tracking store."""'], {'ex': 'ex', 'non_critical': '(True)'}), "('Failed to log input params to tracking store.', ex=ex,\n non_critical=True)\n", (10051, 10130), False, 'from dbnd._core.errors.errors_utils import log_exception\n'), ((11882, 11968), 'dbnd._core.errors.errors_utils.log_exception', 'log_exception', (['"""Failed to log result to tracking store."""'], {'ex': 'ex', 'non_critical': '(True)'}), "('Failed to log result to tracking store.', ex=ex,\n non_critical=True)\n", (11895, 11968), False, 'from dbnd._core.errors.errors_utils import log_exception\n'), ((12250, 12298), 'targets.value_meta.ValueMetaConf', 'ValueMetaConf', ([], {'log_preview': '(True)', 'log_schema': '(True)'}), '(log_preview=True, log_schema=True)\n', (12263, 12298), False, 'from targets.value_meta import ValueMetaConf\n'), ((12539, 12631), 'dbnd._core.errors.errors_utils.log_exception', 'log_exception', (['"""Failed to register result to target tracking."""'], {'ex': 'ex', 'non_critical': '(True)'}), "('Failed to register result to target tracking.', ex=ex,\n non_critical=True)\n", (12552, 12631), False, 'from dbnd._core.errors.errors_utils import log_exception\n'), ((13047, 13133), 'dbnd._core.errors.errors_utils.log_exception', 'log_exception', (['"""Failed to log result to tracking store."""'], {'ex': 'ex', 'non_critical': '(True)'}), "('Failed to log result to tracking store.', ex=ex,\n non_critical=True)\n", (13060, 13133), False, 'from dbnd._core.errors.errors_utils import log_exception\n'), ((3723, 3745), 'dbnd._core.task_build.task_context.try_get_current_task', 'try_get_current_task', ([], {}), '()\n', (3743, 3745), False, 'from dbnd._core.task_build.task_context import try_get_current_task\n'), ((4073, 4108), 'dbnd._core.tracking.script_tracking_manager.try_get_inplace_tracking_task_run', 'try_get_inplace_tracking_task_run', ([], {}), '()\n', (4106, 4108), False, 'from dbnd._core.tracking.script_tracking_manager import try_get_inplace_tracking_task_run\n'), ((5288, 5306), 'dbnd._core.current.current_task_run', 'current_task_run', ([], {}), '()\n', (5304, 5306), False, 'from dbnd._core.current import current_task_run, get_databand_run, is_verbose, try_get_current_task\n'), ((6074, 6098), 'dbnd._core.settings.TrackingConfig.current', 'TrackingConfig.current', ([], {}), '()\n', (6096, 6098), False, 'from dbnd._core.settings import TrackingConfig\n'), ((7256, 7264), 'dbnd._core.utils.timezone.utcnow', 'utcnow', ([], {}), '()\n', (7262, 7264), False, 'from dbnd._core.utils.timezone import utcnow\n'), ((8027, 8052), 'dbnd._core.log.external_exception_logging.log_exception_to_server', 'log_exception_to_server', ([], {}), '()\n', (8050, 8052), False, 'from dbnd._core.log.external_exception_logging import log_exception_to_server\n'), ((5995, 6013), 'dbnd._core.current.current_task_run', 'current_task_run', ([], {}), '()\n', (6011, 6013), False, 'from dbnd._core.current import current_task_run, get_databand_run, is_verbose, try_get_current_task\n'), ((7002, 7010), 'dbnd._core.utils.timezone.utcnow', 'utcnow', ([], {}), '()\n', (7008, 7010), False, 'from dbnd._core.utils.timezone import utcnow\n'), ((7040, 7080), 'dbnd._core.task_run.task_run_error.TaskRunError.build_from_ex', 'TaskRunError.build_from_ex', (['ex', 'task_run'], {}), '(ex, task_run)\n', (7066, 7080), False, 'from dbnd._core.task_run.task_run_error import TaskRunError\n'), ((9819, 9910), 'dbnd._core.errors.errors_utils.log_exception', 'log_exception', (['"""Failed to log input param to tracking store."""'], {'ex': 'ex', 'non_critical': '(True)'}), "('Failed to log input param to tracking store.', ex=ex,\n non_critical=True)\n", (9832, 9910), False, 'from dbnd._core.errors.errors_utils import log_exception\n'), ((9291, 9339), 'targets.value_meta.ValueMetaConf', 'ValueMetaConf', ([], {'log_preview': '(True)', 'log_schema': '(True)'}), '(log_preview=True, log_schema=True)\n', (9304, 9339), False, 'from targets.value_meta import ValueMetaConf\n')] |
import os
import logging
from json import loads, dumps
from datetime import timedelta
from argparse import ArgumentParser
from redis import Redis
from flask import Response, Flask, request
app = Flask(__name__)
log = logging.getLogger(__name__)
parser = ArgumentParser()
parser.add_argument("-a", "--address",
action="store", dest="address",
type=str, required=True,
help="Address for api")
parser.add_argument("-p", "--port",
action="store", dest="port",
type=str, required=True,
help="Port for api")
parser.add_argument("-c", "--crt",
action="store", dest="cert",
type=str, required=False,
help="Path to certificate for this API")
parser.add_argument("-k", "--key",
action="store", dest="key",
type=str, required=False,
help="Path to key of certificate used by this API")
parser.add_argument("-rp", "--redis-port",
action="store", dest="redis-port",
type=str, required=True,
help="Port for Redis client")
args = vars(parser.parse_args())
api_address = args["address"]
api_port = args["port"]
api_cert = args["cert"]
api_key = args["key"]
redis_port = args["redis-port"]
r = Redis(port=redis_port, charset="utf-8", decode_responses=True)
@app.route("/hash", methods=['POST'])
def create_redis_hash():
data = loads(request.data)
success = r.hmset(data["key"], data["pairs"])
if data.get("expire") is not None:
expiration = timedelta(**data.get("expire"))
r.expire(data["key"], expiration)
response_body = {"success": success}
response_body[data["key"]] = r.hgetall(data["key"])
return Response(dumps(response_body), status=200, mimetype="application/json")
@app.route("/hash", methods=['PUT'])
def update_redis_hash():
data = loads(request.data)
success = r.hmset(data["key"], data["pairs"])
if data.get("expire") is not None:
expiration = timedelta(**data.get("expire"))
r.expire(data["key"], expiration)
if data.get("newkey") is not None:
r.rename(data["key"], data["newkey"])
response_body = {"success": success}
if data.get("newkey") is not None:
response_body[data["newkey"]] = r.hgetall(data["newkey"])
else:
response_body[data["key"]] = r.hgetall(data["key"])
return Response(dumps(response_body), status=200, mimetype="application/json")
@app.route("/hash", methods=['GET'])
def get_redis_hash():
response_body = {"success": True}
key = request.headers.get("key")
response_body[key] = r.hgetall(key)
return Response(dumps(response_body), status=200, mimetype="application/json")
@app.route("/key", methods=['DELETE'])
def delete_redis_key():
status = 200
key = request.headers.get("key")
success = r.delete(key)
if not success:
status = 404
response_body = {"success": bool(success)}
return Response(dumps(response_body), status=status, mimetype="application/json")
@app.route("/list", methods=['POST'])
def create_redis_list():
data = loads(request.data)
strat = data.get("strategy")
if strat is not None and strat == "left":
length = r.lpush(data["key"], *data["values"])
else:
length = r.rpush(data["key"], *data["values"])
response_body = {"length": length}
response_body[data["key"]] = r.lrange(data["key"], 0, -1)
return Response(dumps(response_body), status=200, mimetype="application/json")
@app.route("/list", methods=['GET'])
def get_entire_list():
response_body = {"success": True}
key = request.headers.get("key")
response_body[key] = r.lrange(key, 0, -1)
return Response(dumps(response_body), status=200, mimetype="application/json")
@app.route("/list/<idx>", methods=['GET'])
def get_list_at_idx(idx):
response_body = {"success": True}
key = request.headers.get("key")
response_body[key] = {}
response_body[key][str(idx)] = r.lindex(key, idx)
return Response(dumps(response_body), status=200, mimetype="application/json")
@app.route("/set", methods=['POST'])
def create_add_set():
data = loads(request.data)
length = r.sadd(data["key"], *data["values"])
response_body = {"length": length}
response_body[data["key"]] = list(r.smembers(data["key"]))
return Response(dumps(response_body), status=200, mimetype="application/json")
@app.route("/set/<n_items>", methods=['GET'])
def get_n_items_set(n_items):
response_body = {"success": True}
key = request.headers.get("key")
response_body = {key: list(r.srandmember(key, n_items))}
return Response(dumps(response_body), status=200, mimetype="application/json")
@app.route("/set", methods=['GET'])
def get_set():
response_body = {"success": True}
key = request.headers.get("key")
response_body = {key: list(r.smembers(key))}
return Response(dumps(response_body), status=200, mimetype="application/json")
def start_api(address, port, clnt_cert=None, clnt_key=None):
if clnt_cert is None or clnt_key is None:
app.run(host=address, port=port, debug=False)
else:
app.run(host=address, port=port,
ssl_context=(clnt_cert, clnt_key), debug=False)
if api_cert is None or api_key is None:
start_api(api_address, api_port)
else:
start_api(api_address, api_port, api_cert, api_key)
| [
"logging.getLogger",
"json.loads",
"argparse.ArgumentParser",
"flask.Flask",
"json.dumps",
"redis.Redis",
"flask.request.headers.get"
] | [((197, 212), 'flask.Flask', 'Flask', (['__name__'], {}), '(__name__)\n', (202, 212), False, 'from flask import Response, Flask, request\n'), ((219, 246), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (236, 246), False, 'import logging\n'), ((257, 273), 'argparse.ArgumentParser', 'ArgumentParser', ([], {}), '()\n', (271, 273), False, 'from argparse import ArgumentParser\n'), ((1383, 1445), 'redis.Redis', 'Redis', ([], {'port': 'redis_port', 'charset': '"""utf-8"""', 'decode_responses': '(True)'}), "(port=redis_port, charset='utf-8', decode_responses=True)\n", (1388, 1445), False, 'from redis import Redis\n'), ((1522, 1541), 'json.loads', 'loads', (['request.data'], {}), '(request.data)\n', (1527, 1541), False, 'from json import loads, dumps\n'), ((1985, 2004), 'json.loads', 'loads', (['request.data'], {}), '(request.data)\n', (1990, 2004), False, 'from json import loads, dumps\n'), ((2681, 2707), 'flask.request.headers.get', 'request.headers.get', (['"""key"""'], {}), "('key')\n", (2700, 2707), False, 'from flask import Response, Flask, request\n'), ((2924, 2950), 'flask.request.headers.get', 'request.headers.get', (['"""key"""'], {}), "('key')\n", (2943, 2950), False, 'from flask import Response, Flask, request\n'), ((3231, 3250), 'json.loads', 'loads', (['request.data'], {}), '(request.data)\n', (3236, 3250), False, 'from json import loads, dumps\n'), ((3747, 3773), 'flask.request.headers.get', 'request.headers.get', (['"""key"""'], {}), "('key')\n", (3766, 3773), False, 'from flask import Response, Flask, request\n'), ((4024, 4050), 'flask.request.headers.get', 'request.headers.get', (['"""key"""'], {}), "('key')\n", (4043, 4050), False, 'from flask import Response, Flask, request\n'), ((4290, 4309), 'json.loads', 'loads', (['request.data'], {}), '(request.data)\n', (4295, 4309), False, 'from json import loads, dumps\n'), ((4678, 4704), 'flask.request.headers.get', 'request.headers.get', (['"""key"""'], {}), "('key')\n", (4697, 4704), False, 'from flask import Response, Flask, request\n'), ((4957, 4983), 'flask.request.headers.get', 'request.headers.get', (['"""key"""'], {}), "('key')\n", (4976, 4983), False, 'from flask import Response, Flask, request\n'), ((1847, 1867), 'json.dumps', 'dumps', (['response_body'], {}), '(response_body)\n', (1852, 1867), False, 'from json import loads, dumps\n'), ((2509, 2529), 'json.dumps', 'dumps', (['response_body'], {}), '(response_body)\n', (2514, 2529), False, 'from json import loads, dumps\n'), ((2769, 2789), 'json.dumps', 'dumps', (['response_body'], {}), '(response_body)\n', (2774, 2789), False, 'from json import loads, dumps\n'), ((3089, 3109), 'json.dumps', 'dumps', (['response_body'], {}), '(response_body)\n', (3094, 3109), False, 'from json import loads, dumps\n'), ((3574, 3594), 'json.dumps', 'dumps', (['response_body'], {}), '(response_body)\n', (3579, 3594), False, 'from json import loads, dumps\n'), ((3842, 3862), 'json.dumps', 'dumps', (['response_body'], {}), '(response_body)\n', (3847, 3862), False, 'from json import loads, dumps\n'), ((4155, 4175), 'json.dumps', 'dumps', (['response_body'], {}), '(response_body)\n', (4160, 4175), False, 'from json import loads, dumps\n'), ((4489, 4509), 'json.dumps', 'dumps', (['response_body'], {}), '(response_body)\n', (4494, 4509), False, 'from json import loads, dumps\n'), ((4793, 4813), 'json.dumps', 'dumps', (['response_body'], {}), '(response_body)\n', (4798, 4813), False, 'from json import loads, dumps\n'), ((5060, 5080), 'json.dumps', 'dumps', (['response_body'], {}), '(response_body)\n', (5065, 5080), False, 'from json import loads, dumps\n')] |
"""
启动此 spider 前需要手动启动 Chrome,cmd 命令如下:
cd 进入 Chrome 可执行文件 所在的目录
执行:chrome.exe --remote-debugging-port=9222
此时在浏览器窗口地址栏访问:http://127.0.0.1:9222/json,如果页面出现 json 数据,则表明手动启动成功
启动此 spider 后,注意与命令行交互!
在 settings 当中要做的:
# ROBOTSTXT_OBEY = False # 如果不关闭,parse 方法无法执行
# COOKIES_ENABLED = True # 以便 Request 值在传递时自动传递 cookies
# USER_AGENT = 一个合适的值
# DOWNLOADER_MIDDLEWARES 配置好以备 user agent 的自动变换
"""
import re
import json
import datetime
import scrapy
from scrapy.loader import ItemLoader
from urllib import parse
from ZhihuSpider.utils.browsezhihu import get_cookies
from ZhihuSpider import settings
from ZhihuSpider.items import ZhihuQuestionItem, ZhihuAnswerItem
class ZhihuSpider(scrapy.Spider):
name = 'zhihu'
allowed_domains = ['zhihu.com']
start_urls = ['http://zhihu.com/']
# 通用的 question 第一页 answer 请求 url
# 0: question id, 1: offset, 2: limit
start_answer_urls = 'https://www.zhihu.com/api/v4/questions/{0}/answers?include=data%5B*%5D.is_normal%2Cadmin_closed_comment%2Creward_info%2Cis_collapsed%2Cannotation_action%2Cannotation_detail%2Ccollapse_reason%2Cis_sticky%2Ccollapsed_by%2Csuggest_edit%2Ccomment_count%2Ccan_comment%2Ccontent%2Ceditable_content%2Cattachment%2Cvoteup_count%2Creshipment_settings%2Ccomment_permission%2Ccreated_time%2Cupdated_time%2Creview_info%2Crelevant_info%2Cquestion%2Cexcerpt%2Cis_labeled%2Cpaid_info%2Cpaid_info_content%2Crelationship.is_authorized%2Cis_author%2Cvoting%2Cis_thanked%2Cis_nothelp%2Cis_recognized%3Bdata%5B*%5D.mark_infos%5B*%5D.url%3Bdata%5B*%5D.author.follower_count%2Cvip_info%2Cbadge%5B*%5D.topics%3Bdata%5B*%5D.settings.table_of_content.enabled&offset={1}&limit={2}&sort_by=default&platform=desktop'
headers = {
"HOST": "www.zhihu.com",
"Referer": "https://www.zhihu.com",
"User-Agent": settings.USER_AGENT
}
# 提取主页所有指向问题的 url
def parse(self, response, **kwargs):
# .extract() 是 parsel.selection 中的函数,用于提取元素集合中的 data 域的值
all_urls = response.css("a::attr(href)").extract()
# urllib.parse.urljoin 可以合并两个不完整 url
all_urls = [parse.urljoin(response.url, url) for url in all_urls]
all_urls = filter(lambda x: True if x.startswith("https") else False, all_urls)
for url in all_urls:
# (/|$) 表示匹配 / 或“结束”
match_obj = re.match("(.*zhihu.com/question/(\d+))(/|$).*", url)
if match_obj: # 如果是一个含有指向 question 页的 url
question_url = match_obj.group(1)
question_id = match_obj.group(2)
yield scrapy.Request(question_url, callback=self.parse_question, headers=self.headers
, meta={"question_id": question_id, "url": question_url}) # meta 可以向下传递
def parse_question(self, response):
"""
提取问题页 question item
"""
# 使用 ItemLoader 时,每个字段值都是一个 list
item_loader = ItemLoader(item=ZhihuQuestionItem(), response=response)
item_loader.add_value("question_id", response.meta.get("question_id", 0)) # 使用 meta 来加载
item_loader.add_css("topics", "head > meta[name=keywords]::attr(content)")
item_loader.add_value("url", response.meta.get("url", ''))
item_loader.add_css("title", "h1.QuestionHeader-title::text")
item_loader.add_css("content", ".QuestionRichText span:nth-child(1)::text")
item_loader.add_css("answer_num", ".List-headerText > span::text, .ViewAll:nth-child(1) > a::text")
item_loader.add_css("comments_num", ".QuestionHeader-Comment button::text")
item_loader.add_css("watch_user_num", ".NumberBoard-itemValue::attr(title)")
item_loader.add_css("click_num", ".NumberBoard-itemValue::attr(title)")
# 关于获取 create_time update_time
# request log url of question,接着,将以上 item_loader 的内容改为 meta 字典向下传递
# 最终交到 get_create_update_of_question 中去打包 question_item 然后 yield
# 未完成的部分实现如下
# tmp = response.css(".QuestionHeader-menu > a").extract()[0]
# log_url = parse.urljoin(self.start_urls[0], tmp)
# yield scrapy.Request(log_url, callback=self.get_create_update_of_question, headers=self.headers, meta=......)
question_item = item_loader.load_item()
yield question_item
yield scrapy.Request(self.start_answer_urls.format(response.meta.get("question_id", ''), 0, 20)
, callback=self.parse_answer, headers=self.headers)
# def get_create_update_of_question(self, response):
# pass
def parse_answer(self, response):
"""
提取答案页 answer item
"""
answer_json = json.loads(response.text)
is_end = answer_json["paging"]["is_end"]
next_url = answer_json["paging"]["next"]
for answer in answer_json["data"]:
answer_item = ZhihuAnswerItem()
answer_item["answer_id"] = answer["id"]
answer_item["url"] = answer["url"]
answer_item["question_id"] = answer["question"]["id"]
answer_item["author_id"] = answer["author"]["id"]
answer_item["content"] = answer["content"] if "content" in answer else None
answer_item["praise_num"] = answer["voteup_count"]
answer_item["comments_num"] = answer["comment_count"]
answer_item["create_time"] = answer["created_time"]
answer_item["update_time"] = answer["updated_time"]
answer_item["crawl_time"] = datetime.datetime.now()
yield answer_item
if not is_end:
yield scrapy.Request(next_url, callback=self.parse_answer, headers=self.headers)
def start_requests(self):
# 在使用 selenium 前要用以下 cmd 启动 chrome
# cd "C:\Program Files\Google\Chrome\Application"
# chrome.exe --remote-debugging-port=9222
# 不能使用下面的 python 代码的原因是:这个命令是要求返回值的,除非使用多线程
# os.system('"C:\\Program Files\\Google\\Chrome\\Application\\chrome.exe" --remote-debugging-port=9222')
cookies = get_cookies()
yield scrapy.Request(url=self.start_urls[0], dont_filter=True, cookies=cookies)
| [
"ZhihuSpider.items.ZhihuAnswerItem",
"json.loads",
"ZhihuSpider.items.ZhihuQuestionItem",
"re.match",
"datetime.datetime.now",
"scrapy.Request",
"ZhihuSpider.utils.browsezhihu.get_cookies",
"urllib.parse.urljoin"
] | [((4606, 4631), 'json.loads', 'json.loads', (['response.text'], {}), '(response.text)\n', (4616, 4631), False, 'import json\n'), ((5969, 5982), 'ZhihuSpider.utils.browsezhihu.get_cookies', 'get_cookies', ([], {}), '()\n', (5980, 5982), False, 'from ZhihuSpider.utils.browsezhihu import get_cookies\n'), ((2082, 2114), 'urllib.parse.urljoin', 'parse.urljoin', (['response.url', 'url'], {}), '(response.url, url)\n', (2095, 2114), False, 'from urllib import parse\n'), ((2310, 2363), 're.match', 're.match', (['"""(.*zhihu.com/question/(\\\\d+))(/|$).*"""', 'url'], {}), "('(.*zhihu.com/question/(\\\\d+))(/|$).*', url)\n", (2318, 2363), False, 'import re\n'), ((4800, 4817), 'ZhihuSpider.items.ZhihuAnswerItem', 'ZhihuAnswerItem', ([], {}), '()\n', (4815, 4817), False, 'from ZhihuSpider.items import ZhihuQuestionItem, ZhihuAnswerItem\n'), ((5431, 5454), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (5452, 5454), False, 'import datetime\n'), ((5997, 6070), 'scrapy.Request', 'scrapy.Request', ([], {'url': 'self.start_urls[0]', 'dont_filter': '(True)', 'cookies': 'cookies'}), '(url=self.start_urls[0], dont_filter=True, cookies=cookies)\n', (6011, 6070), False, 'import scrapy\n'), ((2901, 2920), 'ZhihuSpider.items.ZhihuQuestionItem', 'ZhihuQuestionItem', ([], {}), '()\n', (2918, 2920), False, 'from ZhihuSpider.items import ZhihuQuestionItem, ZhihuAnswerItem\n'), ((5528, 5602), 'scrapy.Request', 'scrapy.Request', (['next_url'], {'callback': 'self.parse_answer', 'headers': 'self.headers'}), '(next_url, callback=self.parse_answer, headers=self.headers)\n', (5542, 5602), False, 'import scrapy\n'), ((2539, 2680), 'scrapy.Request', 'scrapy.Request', (['question_url'], {'callback': 'self.parse_question', 'headers': 'self.headers', 'meta': "{'question_id': question_id, 'url': question_url}"}), "(question_url, callback=self.parse_question, headers=self.\n headers, meta={'question_id': question_id, 'url': question_url})\n", (2553, 2680), False, 'import scrapy\n')] |
import nose
import angr
import logging
l = logging.getLogger("angr.tests.test_bindiff")
import os
test_location = os.path.join(os.path.dirname(os.path.realpath(__file__)), '..', '..', 'binaries', 'tests')
# todo make a better test
def test_bindiff_x86_64():
binary_path_1 = os.path.join(test_location, 'x86_64', 'bindiff_a')
binary_path_2 = os.path.join(test_location, 'x86_64', 'bindiff_b')
b = angr.Project(binary_path_1, load_options={"auto_load_libs": False})
b2 = angr.Project(binary_path_2, load_options={"auto_load_libs": False})
bindiff = b.analyses.BinDiff(b2)
identical_functions = bindiff.identical_functions
differing_functions = bindiff.differing_functions
unmatched_functions = bindiff.unmatched_functions
# check identical functions
nose.tools.assert_in((0x40064c, 0x40066a), identical_functions)
# check differing functions
nose.tools.assert_in((0x400616, 0x400616), differing_functions)
# check unmatched functions
nose.tools.assert_less_equal(len(unmatched_functions[0]), 1)
nose.tools.assert_less_equal(len(unmatched_functions[1]), 2)
# check for no major regressions
nose.tools.assert_greater(len(identical_functions), len(differing_functions))
nose.tools.assert_less(len(differing_functions), 4)
# check a function diff
fdiff = bindiff.get_function_diff(0x400616, 0x400616)
block_matches = { (a.addr, b.addr) for a, b in fdiff.block_matches }
nose.tools.assert_in((0x40064a, 0x400668), block_matches)
nose.tools.assert_in((0x400616, 0x400616), block_matches)
nose.tools.assert_in((0x40061e, 0x40061e), block_matches)
def run_all():
functions = globals()
all_functions = dict(filter((lambda kv: kv[0].startswith('test_')), functions.items()))
for f in sorted(all_functions.keys()):
if hasattr(all_functions[f], '__call__'):
all_functions[f]()
if __name__ == "__main__":
logging.getLogger("angr.analyses.bindiff").setLevel(logging.DEBUG)
import sys
if len(sys.argv) > 1:
globals()['test_' + sys.argv[1]]()
else:
run_all()
| [
"logging.getLogger",
"angr.Project",
"os.path.join",
"os.path.realpath",
"nose.tools.assert_in"
] | [((44, 88), 'logging.getLogger', 'logging.getLogger', (['"""angr.tests.test_bindiff"""'], {}), "('angr.tests.test_bindiff')\n", (61, 88), False, 'import logging\n'), ((281, 331), 'os.path.join', 'os.path.join', (['test_location', '"""x86_64"""', '"""bindiff_a"""'], {}), "(test_location, 'x86_64', 'bindiff_a')\n", (293, 331), False, 'import os\n'), ((352, 402), 'os.path.join', 'os.path.join', (['test_location', '"""x86_64"""', '"""bindiff_b"""'], {}), "(test_location, 'x86_64', 'bindiff_b')\n", (364, 402), False, 'import os\n'), ((411, 478), 'angr.Project', 'angr.Project', (['binary_path_1'], {'load_options': "{'auto_load_libs': False}"}), "(binary_path_1, load_options={'auto_load_libs': False})\n", (423, 478), False, 'import angr\n'), ((488, 555), 'angr.Project', 'angr.Project', (['binary_path_2'], {'load_options': "{'auto_load_libs': False}"}), "(binary_path_2, load_options={'auto_load_libs': False})\n", (500, 555), False, 'import angr\n'), ((792, 853), 'nose.tools.assert_in', 'nose.tools.assert_in', (['(4195916, 4195946)', 'identical_functions'], {}), '((4195916, 4195946), identical_functions)\n', (812, 853), False, 'import nose\n'), ((892, 953), 'nose.tools.assert_in', 'nose.tools.assert_in', (['(4195862, 4195862)', 'differing_functions'], {}), '((4195862, 4195862), differing_functions)\n', (912, 953), False, 'import nose\n'), ((1457, 1512), 'nose.tools.assert_in', 'nose.tools.assert_in', (['(4195914, 4195944)', 'block_matches'], {}), '((4195914, 4195944), block_matches)\n', (1477, 1512), False, 'import nose\n'), ((1519, 1574), 'nose.tools.assert_in', 'nose.tools.assert_in', (['(4195862, 4195862)', 'block_matches'], {}), '((4195862, 4195862), block_matches)\n', (1539, 1574), False, 'import nose\n'), ((1581, 1636), 'nose.tools.assert_in', 'nose.tools.assert_in', (['(4195870, 4195870)', 'block_matches'], {}), '((4195870, 4195870), block_matches)\n', (1601, 1636), False, 'import nose\n'), ((145, 171), 'os.path.realpath', 'os.path.realpath', (['__file__'], {}), '(__file__)\n', (161, 171), False, 'import os\n'), ((1929, 1971), 'logging.getLogger', 'logging.getLogger', (['"""angr.analyses.bindiff"""'], {}), "('angr.analyses.bindiff')\n", (1946, 1971), False, 'import logging\n')] |
from pathlib import Path
BASE_DIR = Path(__file__).resolve().parent.parent
# def handle_uploaded_file(f):
# with open('screenshot.png', 'wb') as destination:
# # for chunk in f.chunks():
# # destination.write(chunk)
# destination.write(f)
with open(
BASE_DIR/'media'/'Greater_coat_of_arms_of_the_United_States.png', 'rb'
) as file:
flag = file.read()
# handle_uploaded_file(flag)
print(type(flag))
print(len(flag))
# print(flag)
# for place in sys.path:
# print(place) | [
"pathlib.Path"
] | [((38, 52), 'pathlib.Path', 'Path', (['__file__'], {}), '(__file__)\n', (42, 52), False, 'from pathlib import Path\n')] |
#!/usr/bin/env python3
#
# Copyright 2018 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from aiohttp import web
import capstone
import functools
from gdbproc import GDBProcess
import socketio
import asyncio
import codecs
import os
enable_logging = False
premium = 'PREMIUM' in os.environ
if premium:
access_key = os.getenv('PREMIUM_KEY')
runnable = ['/home/user/printwebflag']
else:
access_key = os.getenv('TRIAL_KEY')
runnable = ['/bin/sleep', '20']
MAX_INSN_LEN = 15
capstone_md = capstone.Cs(capstone.CS_ARCH_X86, capstone.CS_MODE_64)
sio = socketio.AsyncServer()
app = web.Application()
sio.attach(app)
with open('index.html') as f:
index_html = f.read()
async def index(request):
if not 'key' in request.cookies:
return web.Response(status=401, text='permission denied (missing key)', content_type='text/html')
if request.cookies['key'] != access_key:
return web.Response(status=401, text='permission denied (invalid key)', content_type='text/html')
return web.Response(text=index_html, content_type='text/html')
app.add_routes([web.get('/', index),
web.get('/{name}', index)])
gdb_sessions = {}
stop_queue_readers = {}
async def on_shutdown(app):
await asyncio.gather(delete_gdb_process(sid) for sid in gdb_sessions.keys())
app.on_shutdown.append(on_shutdown)
def log(msg):
if enable_logging:
print('[*] {}'.format(msg))
@sio.on('connect')
def connect(sid, environ):
log('connected {}'.format(sid))
if not 'key={}'.format(access_key) in environ['HTTP_COOKIE']:
log('access_key not found {}'.format(environ['HTTP_COOKIE']))
return False
@sio.on('disconnect')
async def disconnect(sid):
log('disconnected {}'.format(sid))
await delete_gdb_process(sid)
async def stop_queue_reader(sid, queue):
while True:
pkt = await queue.get()
await update_all(sid)
async def create_gdb_process(sid):
stop_queue = asyncio.Queue()
gdb_sessions[sid] = await GDBProcess.create(runnable, stop_queue, env={'KEY': access_key}, log_fn=log)
loop = asyncio.get_event_loop()
stop_queue_readers[sid] = loop.create_task(stop_queue_reader(sid, stop_queue))
async def delete_gdb_process(sid):
if sid in gdb_sessions:
stop_queue_readers[sid].cancel()
del stop_queue_readers[sid]
await gdb_sessions[sid].release()
del gdb_sessions[sid]
@sio.on('start')
async def start(sid):
await delete_gdb_process(sid)
await create_gdb_process(sid)
# Reading registers doesn't work on ubuntu 18.04 for some reason.
# Step once as a work around
step(sid)
async def update_all(sid):
log('updating sid {}'.format(sid))
regs_task = getregs(sid)
maps_task = getmaps(sid)
asm_task = getasm(sid, {'addr': await gdb_sessions[sid].get_reg('rip'), 'count': 100})
await asyncio.gather(regs_task, maps_task, asm_task)
log('update done')
@sio.on('step')
def step(sid):
gdb_sessions[sid].step()
@sio.on('cont')
def cont(sid):
gdb_sessions[sid].cont()
@sio.on('stop')
def stop(sid):
gdb_sessions[sid].interrupt()
async def getregs(sid):
regs = await gdb_sessions[sid].get_regs()
await sio.emit('regs', regs, room=sid)
@sio.on('mem')
async def getmem(sid, msg):
addr = msg['addr']
count = msg['count']
data = gdb_sessions[sid].read_mem(addr, count)
await sio.emit('mem', {'addr': addr, 'data': data}, room=sid)
async def getmaps(sid):
maps = gdb_sessions[sid].maps()
await sio.emit('maps', maps, room=sid)
@sio.on('break')
async def setbreakpoint(sid, data):
addr = data['addr']
await gdb_sessions[sid].set_breakpoint(addr)
await sio.emit('breakpoints', gdb_sessions[sid].breakpoints(), room=sid)
@sio.on('unbreak')
async def rmbreakpoint(sid, data):
addr = data['addr']
await gdb_sessions[sid].remove_breakpoint(addr)
await sio.emit('breakpoints', gdb_sessions[sid].breakpoints(), room=sid)
@sio.on('search')
async def search(sid, data):
q = data['q']
qtype = data['type']
await sio.emit('search_result', gdb_sessions[sid].search(q.encode(), qtype), room=sid)
async def getasm(sid, data):
addr = data['addr']
count = data['count']
result = []
for _ in range(count):
data = gdb_sessions[sid].read_mem(addr, MAX_INSN_LEN)
try:
disasm = next(capstone_md.disasm_lite(data, addr))
except StopIteration:
break
result.append(disasm)
addr += disasm[1]
await sio.emit('asm', result, room=sid)
if __name__ == '__main__':
web.run_app(app)
| [
"aiohttp.web.run_app",
"os.getenv",
"aiohttp.web.Response",
"capstone.Cs",
"aiohttp.web.Application",
"asyncio.Queue",
"gdbproc.GDBProcess.create",
"aiohttp.web.get",
"asyncio.gather",
"socketio.AsyncServer",
"asyncio.get_event_loop"
] | [((1008, 1062), 'capstone.Cs', 'capstone.Cs', (['capstone.CS_ARCH_X86', 'capstone.CS_MODE_64'], {}), '(capstone.CS_ARCH_X86, capstone.CS_MODE_64)\n', (1019, 1062), False, 'import capstone\n'), ((1070, 1092), 'socketio.AsyncServer', 'socketio.AsyncServer', ([], {}), '()\n', (1090, 1092), False, 'import socketio\n'), ((1099, 1116), 'aiohttp.web.Application', 'web.Application', ([], {}), '()\n', (1114, 1116), False, 'from aiohttp import web\n'), ((830, 854), 'os.getenv', 'os.getenv', (['"""PREMIUM_KEY"""'], {}), "('PREMIUM_KEY')\n", (839, 854), False, 'import os\n'), ((917, 939), 'os.getenv', 'os.getenv', (['"""TRIAL_KEY"""'], {}), "('TRIAL_KEY')\n", (926, 939), False, 'import os\n'), ((1506, 1561), 'aiohttp.web.Response', 'web.Response', ([], {'text': 'index_html', 'content_type': '"""text/html"""'}), "(text=index_html, content_type='text/html')\n", (1518, 1561), False, 'from aiohttp import web\n'), ((2408, 2423), 'asyncio.Queue', 'asyncio.Queue', ([], {}), '()\n', (2421, 2423), False, 'import asyncio\n'), ((2538, 2562), 'asyncio.get_event_loop', 'asyncio.get_event_loop', ([], {}), '()\n', (2560, 2562), False, 'import asyncio\n'), ((4904, 4920), 'aiohttp.web.run_app', 'web.run_app', (['app'], {}), '(app)\n', (4915, 4920), False, 'from aiohttp import web\n'), ((1261, 1355), 'aiohttp.web.Response', 'web.Response', ([], {'status': '(401)', 'text': '"""permission denied (missing key)"""', 'content_type': '"""text/html"""'}), "(status=401, text='permission denied (missing key)',\n content_type='text/html')\n", (1273, 1355), False, 'from aiohttp import web\n'), ((1406, 1500), 'aiohttp.web.Response', 'web.Response', ([], {'status': '(401)', 'text': '"""permission denied (invalid key)"""', 'content_type': '"""text/html"""'}), "(status=401, text='permission denied (invalid key)',\n content_type='text/html')\n", (1418, 1500), False, 'from aiohttp import web\n'), ((1579, 1598), 'aiohttp.web.get', 'web.get', (['"""/"""', 'index'], {}), "('/', index)\n", (1586, 1598), False, 'from aiohttp import web\n'), ((1616, 1641), 'aiohttp.web.get', 'web.get', (['"""/{name}"""', 'index'], {}), "('/{name}', index)\n", (1623, 1641), False, 'from aiohttp import web\n'), ((2452, 2528), 'gdbproc.GDBProcess.create', 'GDBProcess.create', (['runnable', 'stop_queue'], {'env': "{'KEY': access_key}", 'log_fn': 'log'}), "(runnable, stop_queue, env={'KEY': access_key}, log_fn=log)\n", (2469, 2528), False, 'from gdbproc import GDBProcess\n'), ((3270, 3316), 'asyncio.gather', 'asyncio.gather', (['regs_task', 'maps_task', 'asm_task'], {}), '(regs_task, maps_task, asm_task)\n', (3284, 3316), False, 'import asyncio\n')] |
from django.core.urlresolvers import reverse
from sentry.models import Project
from sentry.testutils import APITestCase
class ProjectDetailsTest(APITestCase):
def test_simple(self):
project = self.project # force creation
self.login_as(user=self.user)
url = reverse('sentry-api-0-project-details', kwargs={'project_id': project.id})
response = self.client.get(url)
assert response.status_code == 200
assert response.data['id'] == str(project.id)
class ProjectUpdateTest(APITestCase):
def test_simple(self):
project = self.project # force creation
self.login_as(user=self.user)
url = reverse('sentry-api-0-project-details', kwargs={'project_id': project.id})
resp = self.client.put(url, data={
'name': 'hello world',
'slug': 'foobar',
})
assert resp.status_code == 200, resp.content
project = Project.objects.get(id=project.id)
assert project.name == 'hello world'
assert project.slug == 'foobar'
class ProjectDeleteTest(APITestCase):
def test_simple(self):
project = self.create_project()
self.login_as(user=self.user)
url = reverse('sentry-api-0-project-details', kwargs={'project_id': project.id})
with self.settings(SENTRY_PROJECT=0):
response = self.client.delete(url)
assert response.status_code == 204
assert not Project.objects.filter(id=project.id).exists()
def test_internal_project(self):
project = self.create_project()
self.login_as(user=self.user)
url = reverse('sentry-api-0-project-details', kwargs={'project_id': project.id})
with self.settings(SENTRY_PROJECT=project.id):
response = self.client.delete(url)
assert response.status_code == 403
| [
"sentry.models.Project.objects.filter",
"sentry.models.Project.objects.get",
"django.core.urlresolvers.reverse"
] | [((289, 363), 'django.core.urlresolvers.reverse', 'reverse', (['"""sentry-api-0-project-details"""'], {'kwargs': "{'project_id': project.id}"}), "('sentry-api-0-project-details', kwargs={'project_id': project.id})\n", (296, 363), False, 'from django.core.urlresolvers import reverse\n'), ((669, 743), 'django.core.urlresolvers.reverse', 'reverse', (['"""sentry-api-0-project-details"""'], {'kwargs': "{'project_id': project.id}"}), "('sentry-api-0-project-details', kwargs={'project_id': project.id})\n", (676, 743), False, 'from django.core.urlresolvers import reverse\n'), ((934, 968), 'sentry.models.Project.objects.get', 'Project.objects.get', ([], {'id': 'project.id'}), '(id=project.id)\n', (953, 968), False, 'from sentry.models import Project\n'), ((1215, 1289), 'django.core.urlresolvers.reverse', 'reverse', (['"""sentry-api-0-project-details"""'], {'kwargs': "{'project_id': project.id}"}), "('sentry-api-0-project-details', kwargs={'project_id': project.id})\n", (1222, 1289), False, 'from django.core.urlresolvers import reverse\n'), ((1626, 1700), 'django.core.urlresolvers.reverse', 'reverse', (['"""sentry-api-0-project-details"""'], {'kwargs': "{'project_id': project.id}"}), "('sentry-api-0-project-details', kwargs={'project_id': project.id})\n", (1633, 1700), False, 'from django.core.urlresolvers import reverse\n'), ((1447, 1484), 'sentry.models.Project.objects.filter', 'Project.objects.filter', ([], {'id': 'project.id'}), '(id=project.id)\n', (1469, 1484), False, 'from sentry.models import Project\n')] |
# encoding: utf-8
"""Unit-test suite for `pptx.table` module."""
import pytest
from pptx.dml.fill import FillFormat
from pptx.dml.border import BorderFormat
from pptx.enum.text import MSO_ANCHOR
from pptx.oxml.ns import qn
from pptx.oxml.table import CT_Table, CT_TableCell, TcRange
from pptx.shapes.graphfrm import GraphicFrame
from pptx.table import (
_Cell,
_CellCollection,
_Column,
_ColumnCollection,
_Row,
_RowCollection,
Table,
)
from pptx.text.text import TextFrame
from pptx.util import Inches, Length, Pt
from .unitutil.cxml import element, xml
from .unitutil.mock import call, class_mock, instance_mock, property_mock
class DescribeTable(object):
"""Unit-test suite for `pptx.table.Table` objects."""
def it_provides_access_to_its_cells(self, tbl_, tc_, _Cell_, cell_):
row_idx, col_idx = 4, 2
tbl_.tc.return_value = tc_
_Cell_.return_value = cell_
table = Table(tbl_, None)
cell = table.cell(row_idx, col_idx)
tbl_.tc.assert_called_once_with(row_idx, col_idx)
_Cell_.assert_called_once_with(tc_, table)
assert cell is cell_
def it_provides_access_to_its_columns(self, request):
columns_ = instance_mock(request, _ColumnCollection)
_ColumnCollection_ = class_mock(
request, "pptx.table._ColumnCollection", return_value=columns_
)
tbl = element("a:tbl")
table = Table(tbl, None)
columns = table.columns
_ColumnCollection_.assert_called_once_with(tbl, table)
assert columns is columns_
def it_can_iterate_its_grid_cells(self, request, _Cell_):
tbl = element("a:tbl/(a:tr/(a:tc,a:tc),a:tr/(a:tc,a:tc))")
expected_tcs = tbl.xpath(".//a:tc")
expected_cells = _Cell_.side_effect = [
instance_mock(request, _Cell, name="cell%d" % idx) for idx in range(4)
]
table = Table(tbl, None)
cells = list(table.iter_cells())
assert cells == expected_cells
assert _Cell_.call_args_list == [call(tc, table) for tc in expected_tcs]
def it_provides_access_to_its_rows(self, request):
rows_ = instance_mock(request, _RowCollection)
_RowCollection_ = class_mock(
request, "pptx.table._RowCollection", return_value=rows_
)
tbl = element("a:tbl")
table = Table(tbl, None)
rows = table.rows
_RowCollection_.assert_called_once_with(tbl, table)
assert rows is rows_
def it_updates_graphic_frame_width_on_width_change(self, dx_fixture):
table, expected_width = dx_fixture
table.notify_width_changed()
assert table._graphic_frame.width == expected_width
def it_updates_graphic_frame_height_on_height_change(self, dy_fixture):
table, expected_height = dy_fixture
table.notify_height_changed()
assert table._graphic_frame.height == expected_height
# fixtures -------------------------------------------------------
@pytest.fixture
def dx_fixture(self, graphic_frame_):
tbl_cxml = "a:tbl/a:tblGrid/(a:gridCol{w=111},a:gridCol{w=222})"
table = Table(element(tbl_cxml), graphic_frame_)
expected_width = 333
return table, expected_width
@pytest.fixture
def dy_fixture(self, graphic_frame_):
tbl_cxml = "a:tbl/(a:tr{h=100},a:tr{h=200})"
table = Table(element(tbl_cxml), graphic_frame_)
expected_height = 300
return table, expected_height
# fixture components ---------------------------------------------
@pytest.fixture
def _Cell_(self, request):
return class_mock(request, "pptx.table._Cell")
@pytest.fixture
def cell_(self, request):
return instance_mock(request, _Cell)
@pytest.fixture
def graphic_frame_(self, request):
return instance_mock(request, GraphicFrame)
@pytest.fixture
def tbl_(self, request):
return instance_mock(request, CT_Table)
@pytest.fixture
def tc_(self, request):
return instance_mock(request, CT_TableCell)
class DescribeTableBooleanProperties(object):
def it_knows_its_boolean_property_settings(self, boolprop_get_fixture):
table, boolprop_name, expected_value = boolprop_get_fixture
boolprop_value = getattr(table, boolprop_name)
assert boolprop_value is expected_value
def it_can_change_its_boolean_property_settings(self, boolprop_set_fixture):
table, boolprop_name, new_value, expected_xml = boolprop_set_fixture
setattr(table, boolprop_name, new_value)
assert table._tbl.xml == expected_xml
# fixtures -------------------------------------------------------
@pytest.fixture(
params=[
("a:tbl", "first_row", False),
("a:tbl/a:tblPr", "first_row", False),
("a:tbl/a:tblPr{firstRow=1}", "first_row", True),
("a:tbl/a:tblPr{firstRow=0}", "first_row", False),
("a:tbl/a:tblPr{firstRow=true}", "first_row", True),
("a:tbl/a:tblPr{firstRow=false}", "first_row", False),
("a:tbl/a:tblPr{firstCol=1}", "first_col", True),
("a:tbl/a:tblPr{lastRow=0}", "last_row", False),
("a:tbl/a:tblPr{lastCol=true}", "last_col", True),
("a:tbl/a:tblPr{bandRow=false}", "horz_banding", False),
("a:tbl/a:tblPr", "vert_banding", False),
]
)
def boolprop_get_fixture(self, request):
tbl_cxml, boolprop_name, expected_value = request.param
table = Table(element(tbl_cxml), None)
return table, boolprop_name, expected_value
@pytest.fixture(
params=[
("a:tbl", "first_row", True, "a:tbl/a:tblPr{firstRow=1}"),
("a:tbl", "first_row", False, "a:tbl/a:tblPr"),
("a:tbl/a:tblPr", "first_row", True, "a:tbl/a:tblPr{firstRow=1}"),
("a:tbl/a:tblPr", "first_row", False, "a:tbl/a:tblPr"),
(
"a:tbl/a:tblPr{firstRow=true}",
"first_row",
True,
"a:tbl/a:tblPr{firstRow=1}",
),
("a:tbl/a:tblPr{firstRow=false}", "first_row", False, "a:tbl/a:tblPr"),
(
"a:tbl/a:tblPr{bandRow=1}",
"first_row",
True,
"a:tbl/a:tblPr{bandRow=1,firstRow=1}",
),
("a:tbl", "first_col", True, "a:tbl/a:tblPr{firstCol=1}"),
("a:tbl", "last_row", True, "a:tbl/a:tblPr{lastRow=1}"),
("a:tbl", "last_col", True, "a:tbl/a:tblPr{lastCol=1}"),
("a:tbl", "horz_banding", True, "a:tbl/a:tblPr{bandRow=1}"),
("a:tbl", "vert_banding", True, "a:tbl/a:tblPr{bandCol=1}"),
]
)
def boolprop_set_fixture(self, request):
tbl_cxml, boolprop_name, new_value, expected_tbl_cxml = request.param
table = Table(element(tbl_cxml), None)
expected_xml = xml(expected_tbl_cxml)
return table, boolprop_name, new_value, expected_xml
class Describe_Cell(object):
"""Unit-test suite for `pptx.table._Cell` object."""
def it_is_equal_to_other_instance_having_same_tc(self):
tc = element("a:tc")
other_tc = element("a:tc")
cell = _Cell(tc, None)
cell_with_same_tc = _Cell(tc, None)
cell_with_other_tc = _Cell(other_tc, None)
assert cell == cell_with_same_tc
assert cell != cell_with_other_tc
def it_has_a_fill(self, fill_fixture):
cell = fill_fixture
assert isinstance(cell.fill, FillFormat)
def it_knows_whether_it_is_merge_origin_cell(self, origin_fixture):
tc, expected_value = origin_fixture
cell = _Cell(tc, None)
is_merge_origin = cell.is_merge_origin
assert is_merge_origin is expected_value
def it_knows_whether_it_is_spanned(self, spanned_fixture):
tc, expected_value = spanned_fixture
cell = _Cell(tc, None)
is_spanned = cell.is_spanned
assert is_spanned is expected_value
def it_knows_its_margin_settings(self, margin_get_fixture):
cell, margin_prop_name, expected_value = margin_get_fixture
margin_value = getattr(cell, margin_prop_name)
assert margin_value == expected_value
def it_can_change_its_margin_settings(self, margin_set_fixture):
cell, margin_prop_name, new_value, expected_xml = margin_set_fixture
setattr(cell, margin_prop_name, new_value)
assert cell._tc.xml == expected_xml
def it_raises_on_margin_assigned_other_than_int_or_None(
self, margin_raises_fixture
):
cell, margin_attr_name, val_of_invalid_type = margin_raises_fixture
with pytest.raises(TypeError):
setattr(cell, margin_attr_name, val_of_invalid_type)
def it_can_merge_a_range_of_cells(self, TcRange_, tc_range_):
tbl = element("a:tbl/(a:tr/(a:tc,a:tc),a:tr/(a:tc,a:tc))")
tc, other_tc = tbl.tc(0, 0), tbl.tc(1, 1)
TcRange_.return_value = tc_range_
tc_range_.contains_merged_cell = False
tc_range_.dimensions = 2, 2
def tcs(*rowcols):
return (tbl.tc(*rowcol) for rowcol in rowcols)
tc_range_.iter_top_row_tcs.return_value = tcs((0, 0), (0, 1))
tc_range_.iter_left_col_tcs.return_value = tcs((0, 0), (1, 0))
tc_range_.iter_except_left_col_tcs.return_value = tcs((0, 1), (1, 1))
tc_range_.iter_except_top_row_tcs.return_value = tcs((1, 0), (1, 1))
expected_xml = xml(
"a:tbl/(a:tr/(a:tc{gridSpan=2,rowSpan=2},a:tc{rowSpan=2,hMerge=1"
"}),a:tr/(a:tc{gridSpan=2,vMerge=1},a:tc{hMerge=1,vMerge=1}))"
)
cell, other_cell = _Cell(tc, None), _Cell(other_tc, None)
cell.merge(other_cell)
TcRange_.assert_called_once_with(tc, other_tc)
tc_range_.move_content_to_origin.assert_called_once_with()
assert tbl.xml == expected_xml
def but_it_raises_when_cells_are_from_different_tables(self, TcRange_, tc_range_):
TcRange_.return_value = tc_range_
tc_range_.in_same_table = False
cell, other_cell = _Cell(None, None), _Cell(None, None)
with pytest.raises(ValueError) as e:
cell.merge(other_cell)
assert "different table" in str(e.value)
def and_it_raises_when_range_contains_merged_cell(self, TcRange_, tc_range_):
TcRange_.return_value = tc_range_
tc_range_.contains_merged_cell = True
cell, other_cell = _Cell(None, None), _Cell(None, None)
with pytest.raises(ValueError) as e:
cell.merge(other_cell)
assert "contains one or more merged cells" in str(e.value)
def it_knows_how_many_rows_the_merge_spans(self, height_fixture):
tc, expected_value = height_fixture
cell = _Cell(tc, None)
span_height = cell.span_height
assert span_height == expected_value
def it_knows_how_many_columns_the_merge_spans(self, width_fixture):
tc, expected_value = width_fixture
cell = _Cell(tc, None)
span_width = cell.span_width
assert span_width == expected_value
def it_can_split_a_merged_cell(self, split_fixture):
origin_tc, range_tcs = split_fixture
cell = _Cell(origin_tc, None)
cell.split()
assert all(tc.gridSpan == 1 for tc in range_tcs)
assert all(tc.rowSpan == 1 for tc in range_tcs)
assert all(not tc.hMerge for tc in range_tcs)
assert all(not tc.vMerge for tc in range_tcs)
def but_it_raises_when_cell_to_be_split_is_not_merge_origin(self):
tc = element("a:tbl/a:tr/a:tc").xpath("//a:tc")[0]
cell = _Cell(tc, None)
with pytest.raises(ValueError) as e:
cell.split()
assert "not a merge-origin cell" in str(e.value)
def it_knows_what_text_it_contains(self, text_frame_prop_, text_frame_):
text_frame_prop_.return_value = text_frame_
text_frame_.text = "foobar"
cell = _Cell(None, None)
text = cell.text
assert text == "foobar"
def it_can_change_its_text(self, text_frame_prop_, text_frame_):
text_frame_prop_.return_value = text_frame_
cell = _Cell(None, None)
cell.text = "føøbår"
assert text_frame_.text == "føøbår"
def it_knows_its_vertical_anchor_setting(self, anchor_get_fixture):
cell, expected_value = anchor_get_fixture
assert cell.vertical_anchor == expected_value
def it_can_change_its_vertical_anchor(self, anchor_set_fixture):
cell, new_value, expected_xml = anchor_set_fixture
cell.vertical_anchor = new_value
assert cell._tc.xml == expected_xml
def it_knows_it_has_border_settings(self, border_fixture):
cell = border_fixture
assert isinstance(cell.border_left, BorderFormat)
assert isinstance(cell.border_right, BorderFormat)
assert isinstance(cell.border_top, BorderFormat)
assert isinstance(cell.border_bottom, BorderFormat)
assert isinstance(cell.border_tl_br, BorderFormat)
assert isinstance(cell.border_bl_tr, BorderFormat)
# fixtures -------------------------------------------------------
@pytest.fixture(
params=[
("a:tc", None),
("a:tc/a:tcPr", None),
("a:tc/a:tcPr{anchor=t}", MSO_ANCHOR.TOP),
("a:tc/a:tcPr{anchor=ctr}", MSO_ANCHOR.MIDDLE),
("a:tc/a:tcPr{anchor=b}", MSO_ANCHOR.BOTTOM),
]
)
def anchor_get_fixture(self, request):
tc_cxml, expected_value = request.param
cell = _Cell(element(tc_cxml), None)
return cell, expected_value
@pytest.fixture(
params=[
("a:tc", None, "a:tc"),
("a:tc", MSO_ANCHOR.TOP, "a:tc/a:tcPr{anchor=t}"),
("a:tc", MSO_ANCHOR.MIDDLE, "a:tc/a:tcPr{anchor=ctr}"),
("a:tc", MSO_ANCHOR.BOTTOM, "a:tc/a:tcPr{anchor=b}"),
("a:tc/a:tcPr{anchor=t}", MSO_ANCHOR.MIDDLE, "a:tc/a:tcPr{anchor=ctr}"),
("a:tc/a:tcPr{anchor=ctr}", None, "a:tc/a:tcPr"),
]
)
def anchor_set_fixture(self, request):
tc_cxml, new_value, expected_tc_cxml = request.param
cell = _Cell(element(tc_cxml), None)
expected_xml = xml(expected_tc_cxml)
return cell, new_value, expected_xml
@pytest.fixture
def fill_fixture(self, cell):
return cell
@pytest.fixture
def border_fixture(self, cell):
return cell
@pytest.fixture(
params=[("a:tc", 1), ("a:tc{gridSpan=2}", 1), ("a:tc{rowSpan=42}", 42)]
)
def height_fixture(self, request):
tc_cxml, expected_value = request.param
tc = element(tc_cxml)
return tc, expected_value
@pytest.fixture(
params=[
("a:tc/a:tcPr{marL=82296}", "margin_left", Inches(0.09)),
("a:tc/a:tcPr{marR=73152}", "margin_right", Inches(0.08)),
("a:tc/a:tcPr{marT=64008}", "margin_top", Inches(0.07)),
("a:tc/a:tcPr{marB=54864}", "margin_bottom", Inches(0.06)),
("a:tc", "margin_left", Inches(0.1)),
("a:tc/a:tcPr", "margin_right", Inches(0.1)),
("a:tc", "margin_top", Inches(0.05)),
("a:tc/a:tcPr", "margin_bottom", Inches(0.05)),
]
)
def margin_get_fixture(self, request):
tc_cxml, margin_prop_name, expected_value = request.param
cell = _Cell(element(tc_cxml), None)
return cell, margin_prop_name, expected_value
@pytest.fixture(
params=[
("a:tc", "margin_left", Inches(0.08), "a:tc/a:tcPr{marL=73152}"),
("a:tc", "margin_right", Inches(0.08), "a:tc/a:tcPr{marR=73152}"),
("a:tc", "margin_top", Inches(0.08), "a:tc/a:tcPr{marT=73152}"),
("a:tc", "margin_bottom", Inches(0.08), "a:tc/a:tcPr{marB=73152}"),
("a:tc", "margin_left", None, "a:tc"),
("a:tc/a:tcPr{marL=42}", "margin_left", None, "a:tc/a:tcPr"),
]
)
def margin_set_fixture(self, request):
tc_cxml, margin_prop_name, new_value, expected_tc_cxml = request.param
cell = _Cell(element(tc_cxml), None)
expected_xml = xml(expected_tc_cxml)
return cell, margin_prop_name, new_value, expected_xml
@pytest.fixture(
params=["margin_left", "margin_right", "margin_top", "margin_bottom"]
)
def margin_raises_fixture(self, request):
margin_prop_name = request.param
cell = _Cell(element("a:tc"), None)
val_of_invalid_type = "foobar"
return cell, margin_prop_name, val_of_invalid_type
@pytest.fixture(
params=[
("a:tc", False),
("a:tc{gridSpan=1}", False),
("a:tc{hMerge=1}", False),
("a:tc{gridSpan=2,vMerge=1}", False),
("a:tc{gridSpan=2}", True),
("a:tc{rowSpan=2}", True),
("a:tc{gridSpan=2,rowSpan=3}", True),
]
)
def origin_fixture(self, request):
tc_cxml, expected_value = request.param
tc = element(tc_cxml)
return tc, expected_value
@pytest.fixture(
params=[
("a:tc", False),
("a:tc{gridSpan=2}", False),
("a:tc{hMerge=1}", True),
("a:tc{gridSpan=2,vMerge=1}", True),
("a:tc{rowSpan=2,hMerge=true}", True),
("a:tc{gridSpan=2,rowSpan=3}", False),
]
)
def spanned_fixture(self, request):
tc_cxml, expected_value = request.param
tc = element(tc_cxml)
return tc, expected_value
@pytest.fixture(
params=[
(
"a:tbl/(a:tr/(a:tc{gridSpan=2},a:tc{hMerge=1}),a:tr/(a:tc,a:tc))",
0,
[0, 1],
),
(
"a:tbl/(a:tr/(a:tc{rowSpan=2},a:tc),a:tr/(a:tc{vMerge=1},a:tc))",
0,
[0, 2],
),
(
"a:tbl/(a:tr/(a:tc{gridSpan=2,rowSpan=2},a:tc{hMerge=1,rowSpan=2}),"
"a:tr/(a:tc{gridSpan=2,vMerge=1},a:tc{hMerge=1,vMerge=1}))",
0,
[0, 1, 2, 3],
),
]
)
def split_fixture(self, request):
tbl_cxml, origin_tc_idx, range_tc_idxs = request.param
tcs = element(tbl_cxml).xpath("//a:tc")
origin_tc = tcs[origin_tc_idx]
range_tcs = tuple(tcs[idx] for idx in range_tc_idxs)
return origin_tc, range_tcs
@pytest.fixture(
params=[("a:tc", 1), ("a:tc{rowSpan=2}", 1), ("a:tc{gridSpan=24}", 24)]
)
def width_fixture(self, request):
tc_cxml, expected_value = request.param
tc = element(tc_cxml)
return tc, expected_value
# fixture components ---------------------------------------------
@pytest.fixture
def cell(self):
return _Cell(element("a:tc"), None)
@pytest.fixture
def TcRange_(self, request):
return class_mock(request, "pptx.table.TcRange")
@pytest.fixture
def tc_range_(self, request):
return instance_mock(request, TcRange)
@pytest.fixture
def text_frame_(self, request):
return instance_mock(request, TextFrame)
@pytest.fixture
def text_frame_prop_(self, request):
return property_mock(request, _Cell, "text_frame")
class Describe_CellCollection(object):
def it_knows_how_many_cells_it_contains(self, len_fixture):
cells, expected_count = len_fixture
assert len(cells) == expected_count
def it_can_iterate_over_the_cells_it_contains(self, iter_fixture):
cell_collection, _Cell_, calls, expected_cells = iter_fixture
cells = list(cell_collection)
assert _Cell_.call_args_list == calls
assert cells == expected_cells
def it_supports_indexed_access(self, _Cell_, cell_):
tr = element("a:tr/(a:tc, a:tc, a:tc)")
tcs = tr.xpath("//a:tc")
_Cell_.return_value = cell_
cell_collection = _CellCollection(tr, None)
cell = cell_collection[1]
_Cell_.assert_called_once_with(tcs[1], cell_collection)
assert cell is cell_
def it_raises_on_indexed_access_out_of_range(self):
cells = _CellCollection(element("a:tr/a:tc"), None)
with pytest.raises(IndexError):
cells[-1]
with pytest.raises(IndexError):
cells[9]
# fixtures -------------------------------------------------------
@pytest.fixture(params=["a:tr", "a:tr/a:tc", "a:tr/(a:tc, a:tc, a:tc)"])
def iter_fixture(self, request, _Cell_):
tr_cxml = request.param
tr = element(tr_cxml)
tcs = tr.xpath("//a:tc")
cell_collection = _CellCollection(tr, None)
expected_cells = [
instance_mock(request, _Cell, name="cell%d" % idx)
for idx in range(len(tcs))
]
_Cell_.side_effect = expected_cells
calls = [call(tc, cell_collection) for tc in tcs]
return cell_collection, _Cell_, calls, expected_cells
@pytest.fixture(params=[("a:tr", 0), ("a:tr/a:tc", 1), ("a:tr/(a:tc, a:tc)", 2)])
def len_fixture(self, request):
tr_cxml, expected_len = request.param
cells = _CellCollection(element(tr_cxml), None)
return cells, expected_len
# fixture components ---------------------------------------------
@pytest.fixture
def _Cell_(self, request):
return class_mock(request, "pptx.table._Cell")
@pytest.fixture
def cell_(self, request):
return instance_mock(request, _Cell)
class Describe_Column(object):
def it_knows_its_width(self, width_get_fixture):
column, expected_value = width_get_fixture
width = column.width
assert width == expected_value
assert isinstance(width, Length)
def it_can_change_its_width(self, width_set_fixture):
column, new_width, expected_xml, parent_ = width_set_fixture
column.width = new_width
assert column._gridCol.xml == expected_xml
parent_.notify_width_changed.assert_called_once_with()
# fixtures -------------------------------------------------------
@pytest.fixture(
params=[("a:gridCol{w=914400}", Inches(1)), ("a:gridCol{w=10pt}", Pt(10))]
)
def width_get_fixture(self, request):
gridCol_cxml, expected_value = request.param
column = _Column(element(gridCol_cxml), None)
return column, expected_value
@pytest.fixture(
params=[
("a:gridCol{w=12pt}", Inches(1), "a:gridCol{w=914400}"),
("a:gridCol{w=1234}", Inches(1), "a:gridCol{w=914400}"),
]
)
def width_set_fixture(self, request, parent_):
gridCol_cxml, new_width, expected_gridCol_cxml = request.param
column = _Column(element(gridCol_cxml), parent_)
expected_xml = xml(expected_gridCol_cxml)
return column, new_width, expected_xml, parent_
# fixture components ---------------------------------------------
@pytest.fixture
def parent_(self, request):
return instance_mock(request, _ColumnCollection)
class Describe_ColumnCollection(object):
def it_knows_how_many_columns_it_contains(self, len_fixture):
columns, expected_count = len_fixture
assert len(columns) == expected_count
def it_can_iterate_over_the_columns_it_contains(self, iter_fixture):
columns, expected_gridCol_lst = iter_fixture
count = 0
for idx, column in enumerate(columns):
assert isinstance(column, _Column)
assert column._gridCol is expected_gridCol_lst[idx]
count += 1
assert count == len(expected_gridCol_lst)
def it_supports_indexed_access(self, getitem_fixture):
columns, expected_gridCol_lst = getitem_fixture
for idx, gridCol in enumerate(expected_gridCol_lst):
column = columns[idx]
assert isinstance(column, _Column)
assert column._gridCol is gridCol
def it_raises_on_indexed_access_out_of_range(self):
columns = _ColumnCollection(element("a:tbl/a:tblGrid/a:gridCol"), None)
with pytest.raises(IndexError):
columns[-1]
with pytest.raises(IndexError):
columns[9]
# fixtures -------------------------------------------------------
@pytest.fixture(
params=[
"a:tbl/a:tblGrid",
"a:tbl/a:tblGrid/a:gridCol",
"a:tbl/a:tblGrid/(a:gridCol, a:gridCol, a:gridCol)",
]
)
def getitem_fixture(self, request):
tbl_cxml = request.param
tbl = element(tbl_cxml)
columns = _ColumnCollection(tbl, None)
expected_column_lst = tbl.xpath("//a:gridCol")
return columns, expected_column_lst
@pytest.fixture(
params=[
"a:tbl/a:tblGrid",
"a:tbl/a:tblGrid/a:gridCol",
"a:tbl/a:tblGrid/(a:gridCol, a:gridCol, a:gridCol)",
]
)
def iter_fixture(self, request):
tbl_cxml = request.param
tbl = element(tbl_cxml)
columns = _ColumnCollection(tbl, None)
expected_column_lst = tbl.xpath("//a:gridCol")
return columns, expected_column_lst
@pytest.fixture(
params=[
("a:tbl/a:tblGrid", 0),
("a:tbl/a:tblGrid/a:gridCol", 1),
("a:tbl/a:tblGrid/(a:gridCol,a:gridCol)", 2),
]
)
def len_fixture(self, request):
tbl_cxml, expected_len = request.param
columns = _ColumnCollection(element(tbl_cxml), None)
return columns, expected_len
class Describe_Row(object):
def it_knows_its_height(self, height_get_fixture):
row, expected_value = height_get_fixture
height = row.height
assert height == expected_value
assert isinstance(height, Length)
def it_can_change_its_height(self, height_set_fixture):
row, new_height, expected_xml, parent_ = height_set_fixture
row.height = new_height
assert row._tr.xml == expected_xml
parent_.notify_height_changed.assert_called_once_with()
def it_provides_access_to_its_cells(self, cells_fixture):
row, _CellCollection_, cells_ = cells_fixture
cells = row.cells
_CellCollection_.assert_called_once_with(row._tr, row)
assert cells is cells_
# fixtures -------------------------------------------------------
@pytest.fixture
def cells_fixture(self, _CellCollection_, cells_):
row = _Row(element("a:tr"), None)
return row, _CellCollection_, cells_
@pytest.fixture(params=[("a:tr{h=914400}", Inches(1)), ("a:tr{h=10pt}", Pt(10))])
def height_get_fixture(self, request):
tr_cxml, expected_value = request.param
row = _Row(element(tr_cxml), None)
return row, expected_value
@pytest.fixture(
params=[
("a:tr{h=12pt}", Inches(1), "a:tr{h=914400}"),
("a:tr{h=1234}", Inches(1), "a:tr{h=914400}"),
]
)
def height_set_fixture(self, request, parent_):
tr_cxml, new_height, expected_tr_cxml = request.param
row = _Row(element(tr_cxml), parent_)
expected_xml = xml(expected_tr_cxml)
return row, new_height, expected_xml, parent_
# fixture components ---------------------------------------------
@pytest.fixture
def _CellCollection_(self, request, cells_):
return class_mock(request, "pptx.table._CellCollection", return_value=cells_)
@pytest.fixture
def cells_(self, request):
return instance_mock(request, _CellCollection)
@pytest.fixture
def parent_(self, request):
return instance_mock(request, _RowCollection)
class Describe_RowCollection(object):
def it_knows_how_many_rows_it_contains(self, len_fixture):
rows, expected_count = len_fixture
assert len(rows) == expected_count
def it_can_iterate_over_the_rows_it_contains(self, iter_fixture):
rows, expected_tr_lst = iter_fixture
count = 0
for idx, row in enumerate(rows):
assert isinstance(row, _Row)
assert row._tr is expected_tr_lst[idx]
count += 1
assert count == len(expected_tr_lst)
def it_supports_indexed_access(self, getitem_fixture):
rows, expected_tr_lst = getitem_fixture
for idx, tr in enumerate(expected_tr_lst):
row = rows[idx]
assert isinstance(row, _Row)
assert row._tr is tr
def it_raises_on_indexed_access_out_of_range(self):
rows = _RowCollection(element("a:tbl/a:tr"), None)
with pytest.raises(IndexError):
rows[-1]
with pytest.raises(IndexError):
rows[9]
# fixtures -------------------------------------------------------
@pytest.fixture(params=["a:tbl", "a:tbl/a:tr", "a:tbl/(a:tr, a:tr, a:tr)"])
def getitem_fixture(self, request):
tbl_cxml = request.param
tbl = element(tbl_cxml)
rows = _RowCollection(tbl, None)
expected_row_lst = tbl.findall(qn("a:tr"))
return rows, expected_row_lst
@pytest.fixture(params=["a:tbl", "a:tbl/a:tr", "a:tbl/(a:tr, a:tr, a:tr)"])
def iter_fixture(self, request):
tbl_cxml = request.param
tbl = element(tbl_cxml)
rows = _RowCollection(tbl, None)
expected_row_lst = tbl.findall(qn("a:tr"))
return rows, expected_row_lst
@pytest.fixture(params=[("a:tbl", 0), ("a:tbl/a:tr", 1), ("a:tbl/(a:tr, a:tr)", 2)])
def len_fixture(self, request):
tbl_cxml, expected_len = request.param
rows = _RowCollection(element(tbl_cxml), None)
return rows, expected_len
| [
"pptx.table._Cell",
"pptx.table._CellCollection",
"pptx.table._RowCollection",
"pptx.table._ColumnCollection",
"pptx.oxml.ns.qn",
"pytest.raises",
"pytest.fixture",
"pptx.util.Inches",
"pptx.table.Table",
"pptx.util.Pt"
] | [((4728, 5322), 'pytest.fixture', 'pytest.fixture', ([], {'params': "[('a:tbl', 'first_row', False), ('a:tbl/a:tblPr', 'first_row', False), (\n 'a:tbl/a:tblPr{firstRow=1}', 'first_row', True), (\n 'a:tbl/a:tblPr{firstRow=0}', 'first_row', False), (\n 'a:tbl/a:tblPr{firstRow=true}', 'first_row', True), (\n 'a:tbl/a:tblPr{firstRow=false}', 'first_row', False), (\n 'a:tbl/a:tblPr{firstCol=1}', 'first_col', True), (\n 'a:tbl/a:tblPr{lastRow=0}', 'last_row', False), (\n 'a:tbl/a:tblPr{lastCol=true}', 'last_col', True), (\n 'a:tbl/a:tblPr{bandRow=false}', 'horz_banding', False), (\n 'a:tbl/a:tblPr', 'vert_banding', False)]"}), "(params=[('a:tbl', 'first_row', False), ('a:tbl/a:tblPr',\n 'first_row', False), ('a:tbl/a:tblPr{firstRow=1}', 'first_row', True),\n ('a:tbl/a:tblPr{firstRow=0}', 'first_row', False), (\n 'a:tbl/a:tblPr{firstRow=true}', 'first_row', True), (\n 'a:tbl/a:tblPr{firstRow=false}', 'first_row', False), (\n 'a:tbl/a:tblPr{firstCol=1}', 'first_col', True), (\n 'a:tbl/a:tblPr{lastRow=0}', 'last_row', False), (\n 'a:tbl/a:tblPr{lastCol=true}', 'last_col', True), (\n 'a:tbl/a:tblPr{bandRow=false}', 'horz_banding', False), (\n 'a:tbl/a:tblPr', 'vert_banding', False)])\n", (4742, 5322), False, 'import pytest\n'), ((5651, 6490), 'pytest.fixture', 'pytest.fixture', ([], {'params': "[('a:tbl', 'first_row', True, 'a:tbl/a:tblPr{firstRow=1}'), ('a:tbl',\n 'first_row', False, 'a:tbl/a:tblPr'), ('a:tbl/a:tblPr', 'first_row', \n True, 'a:tbl/a:tblPr{firstRow=1}'), ('a:tbl/a:tblPr', 'first_row', \n False, 'a:tbl/a:tblPr'), ('a:tbl/a:tblPr{firstRow=true}', 'first_row', \n True, 'a:tbl/a:tblPr{firstRow=1}'), ('a:tbl/a:tblPr{firstRow=false}',\n 'first_row', False, 'a:tbl/a:tblPr'), ('a:tbl/a:tblPr{bandRow=1}',\n 'first_row', True, 'a:tbl/a:tblPr{bandRow=1,firstRow=1}'), ('a:tbl',\n 'first_col', True, 'a:tbl/a:tblPr{firstCol=1}'), ('a:tbl', 'last_row', \n True, 'a:tbl/a:tblPr{lastRow=1}'), ('a:tbl', 'last_col', True,\n 'a:tbl/a:tblPr{lastCol=1}'), ('a:tbl', 'horz_banding', True,\n 'a:tbl/a:tblPr{bandRow=1}'), ('a:tbl', 'vert_banding', True,\n 'a:tbl/a:tblPr{bandCol=1}')]"}), "(params=[('a:tbl', 'first_row', True,\n 'a:tbl/a:tblPr{firstRow=1}'), ('a:tbl', 'first_row', False,\n 'a:tbl/a:tblPr'), ('a:tbl/a:tblPr', 'first_row', True,\n 'a:tbl/a:tblPr{firstRow=1}'), ('a:tbl/a:tblPr', 'first_row', False,\n 'a:tbl/a:tblPr'), ('a:tbl/a:tblPr{firstRow=true}', 'first_row', True,\n 'a:tbl/a:tblPr{firstRow=1}'), ('a:tbl/a:tblPr{firstRow=false}',\n 'first_row', False, 'a:tbl/a:tblPr'), ('a:tbl/a:tblPr{bandRow=1}',\n 'first_row', True, 'a:tbl/a:tblPr{bandRow=1,firstRow=1}'), ('a:tbl',\n 'first_col', True, 'a:tbl/a:tblPr{firstCol=1}'), ('a:tbl', 'last_row', \n True, 'a:tbl/a:tblPr{lastRow=1}'), ('a:tbl', 'last_col', True,\n 'a:tbl/a:tblPr{lastCol=1}'), ('a:tbl', 'horz_banding', True,\n 'a:tbl/a:tblPr{bandRow=1}'), ('a:tbl', 'vert_banding', True,\n 'a:tbl/a:tblPr{bandCol=1}')])\n", (5665, 6490), False, 'import pytest\n'), ((13255, 13463), 'pytest.fixture', 'pytest.fixture', ([], {'params': "[('a:tc', None), ('a:tc/a:tcPr', None), ('a:tc/a:tcPr{anchor=t}',\n MSO_ANCHOR.TOP), ('a:tc/a:tcPr{anchor=ctr}', MSO_ANCHOR.MIDDLE), (\n 'a:tc/a:tcPr{anchor=b}', MSO_ANCHOR.BOTTOM)]"}), "(params=[('a:tc', None), ('a:tc/a:tcPr', None), (\n 'a:tc/a:tcPr{anchor=t}', MSO_ANCHOR.TOP), ('a:tc/a:tcPr{anchor=ctr}',\n MSO_ANCHOR.MIDDLE), ('a:tc/a:tcPr{anchor=b}', MSO_ANCHOR.BOTTOM)])\n", (13269, 13463), False, 'import pytest\n'), ((13718, 14069), 'pytest.fixture', 'pytest.fixture', ([], {'params': "[('a:tc', None, 'a:tc'), ('a:tc', MSO_ANCHOR.TOP, 'a:tc/a:tcPr{anchor=t}'),\n ('a:tc', MSO_ANCHOR.MIDDLE, 'a:tc/a:tcPr{anchor=ctr}'), ('a:tc',\n MSO_ANCHOR.BOTTOM, 'a:tc/a:tcPr{anchor=b}'), ('a:tc/a:tcPr{anchor=t}',\n MSO_ANCHOR.MIDDLE, 'a:tc/a:tcPr{anchor=ctr}'), (\n 'a:tc/a:tcPr{anchor=ctr}', None, 'a:tc/a:tcPr')]"}), "(params=[('a:tc', None, 'a:tc'), ('a:tc', MSO_ANCHOR.TOP,\n 'a:tc/a:tcPr{anchor=t}'), ('a:tc', MSO_ANCHOR.MIDDLE,\n 'a:tc/a:tcPr{anchor=ctr}'), ('a:tc', MSO_ANCHOR.BOTTOM,\n 'a:tc/a:tcPr{anchor=b}'), ('a:tc/a:tcPr{anchor=t}', MSO_ANCHOR.MIDDLE,\n 'a:tc/a:tcPr{anchor=ctr}'), ('a:tc/a:tcPr{anchor=ctr}', None,\n 'a:tc/a:tcPr')])\n", (13732, 14069), False, 'import pytest\n'), ((14544, 14636), 'pytest.fixture', 'pytest.fixture', ([], {'params': "[('a:tc', 1), ('a:tc{gridSpan=2}', 1), ('a:tc{rowSpan=42}', 42)]"}), "(params=[('a:tc', 1), ('a:tc{gridSpan=2}', 1), (\n 'a:tc{rowSpan=42}', 42)])\n", (14558, 14636), False, 'import pytest\n'), ((16335, 16424), 'pytest.fixture', 'pytest.fixture', ([], {'params': "['margin_left', 'margin_right', 'margin_top', 'margin_bottom']"}), "(params=['margin_left', 'margin_right', 'margin_top',\n 'margin_bottom'])\n", (16349, 16424), False, 'import pytest\n'), ((16670, 16912), 'pytest.fixture', 'pytest.fixture', ([], {'params': "[('a:tc', False), ('a:tc{gridSpan=1}', False), ('a:tc{hMerge=1}', False), (\n 'a:tc{gridSpan=2,vMerge=1}', False), ('a:tc{gridSpan=2}', True), (\n 'a:tc{rowSpan=2}', True), ('a:tc{gridSpan=2,rowSpan=3}', True)]"}), "(params=[('a:tc', False), ('a:tc{gridSpan=1}', False), (\n 'a:tc{hMerge=1}', False), ('a:tc{gridSpan=2,vMerge=1}', False), (\n 'a:tc{gridSpan=2}', True), ('a:tc{rowSpan=2}', True), (\n 'a:tc{gridSpan=2,rowSpan=3}', True)])\n", (16684, 16912), False, 'import pytest\n'), ((17164, 17389), 'pytest.fixture', 'pytest.fixture', ([], {'params': "[('a:tc', False), ('a:tc{gridSpan=2}', False), ('a:tc{hMerge=1}', True), (\n 'a:tc{gridSpan=2,vMerge=1}', True), ('a:tc{rowSpan=2,hMerge=true}', \n True), ('a:tc{gridSpan=2,rowSpan=3}', False)]"}), "(params=[('a:tc', False), ('a:tc{gridSpan=2}', False), (\n 'a:tc{hMerge=1}', True), ('a:tc{gridSpan=2,vMerge=1}', True), (\n 'a:tc{rowSpan=2,hMerge=true}', True), ('a:tc{gridSpan=2,rowSpan=3}', \n False)])\n", (17178, 17389), False, 'import pytest\n'), ((17630, 17987), 'pytest.fixture', 'pytest.fixture', ([], {'params': "[('a:tbl/(a:tr/(a:tc{gridSpan=2},a:tc{hMerge=1}),a:tr/(a:tc,a:tc))', 0, [0,\n 1]), ('a:tbl/(a:tr/(a:tc{rowSpan=2},a:tc),a:tr/(a:tc{vMerge=1},a:tc))',\n 0, [0, 2]), (\n 'a:tbl/(a:tr/(a:tc{gridSpan=2,rowSpan=2},a:tc{hMerge=1,rowSpan=2}),a:tr/(a:tc{gridSpan=2,vMerge=1},a:tc{hMerge=1,vMerge=1}))'\n , 0, [0, 1, 2, 3])]"}), "(params=[(\n 'a:tbl/(a:tr/(a:tc{gridSpan=2},a:tc{hMerge=1}),a:tr/(a:tc,a:tc))', 0, [\n 0, 1]), (\n 'a:tbl/(a:tr/(a:tc{rowSpan=2},a:tc),a:tr/(a:tc{vMerge=1},a:tc))', 0, [0,\n 2]), (\n 'a:tbl/(a:tr/(a:tc{gridSpan=2,rowSpan=2},a:tc{hMerge=1,rowSpan=2}),a:tr/(a:tc{gridSpan=2,vMerge=1},a:tc{hMerge=1,vMerge=1}))'\n , 0, [0, 1, 2, 3])])\n", (17644, 17987), False, 'import pytest\n'), ((18519, 18611), 'pytest.fixture', 'pytest.fixture', ([], {'params': "[('a:tc', 1), ('a:tc{rowSpan=2}', 1), ('a:tc{gridSpan=24}', 24)]"}), "(params=[('a:tc', 1), ('a:tc{rowSpan=2}', 1), (\n 'a:tc{gridSpan=24}', 24)])\n", (18533, 18611), False, 'import pytest\n'), ((20502, 20573), 'pytest.fixture', 'pytest.fixture', ([], {'params': "['a:tr', 'a:tr/a:tc', 'a:tr/(a:tc, a:tc, a:tc)']"}), "(params=['a:tr', 'a:tr/a:tc', 'a:tr/(a:tc, a:tc, a:tc)'])\n", (20516, 20573), False, 'import pytest\n'), ((21077, 21162), 'pytest.fixture', 'pytest.fixture', ([], {'params': "[('a:tr', 0), ('a:tr/a:tc', 1), ('a:tr/(a:tc, a:tc)', 2)]"}), "(params=[('a:tr', 0), ('a:tr/a:tc', 1), ('a:tr/(a:tc, a:tc)', 2)]\n )\n", (21091, 21162), False, 'import pytest\n'), ((24380, 24508), 'pytest.fixture', 'pytest.fixture', ([], {'params': "['a:tbl/a:tblGrid', 'a:tbl/a:tblGrid/a:gridCol',\n 'a:tbl/a:tblGrid/(a:gridCol, a:gridCol, a:gridCol)']"}), "(params=['a:tbl/a:tblGrid', 'a:tbl/a:tblGrid/a:gridCol',\n 'a:tbl/a:tblGrid/(a:gridCol, a:gridCol, a:gridCol)'])\n", (24394, 24508), False, 'import pytest\n'), ((24823, 24951), 'pytest.fixture', 'pytest.fixture', ([], {'params': "['a:tbl/a:tblGrid', 'a:tbl/a:tblGrid/a:gridCol',\n 'a:tbl/a:tblGrid/(a:gridCol, a:gridCol, a:gridCol)']"}), "(params=['a:tbl/a:tblGrid', 'a:tbl/a:tblGrid/a:gridCol',\n 'a:tbl/a:tblGrid/(a:gridCol, a:gridCol, a:gridCol)'])\n", (24837, 24951), False, 'import pytest\n'), ((25263, 25394), 'pytest.fixture', 'pytest.fixture', ([], {'params': "[('a:tbl/a:tblGrid', 0), ('a:tbl/a:tblGrid/a:gridCol', 1), (\n 'a:tbl/a:tblGrid/(a:gridCol,a:gridCol)', 2)]"}), "(params=[('a:tbl/a:tblGrid', 0), ('a:tbl/a:tblGrid/a:gridCol',\n 1), ('a:tbl/a:tblGrid/(a:gridCol,a:gridCol)', 2)])\n", (25277, 25394), False, 'import pytest\n'), ((28847, 28921), 'pytest.fixture', 'pytest.fixture', ([], {'params': "['a:tbl', 'a:tbl/a:tr', 'a:tbl/(a:tr, a:tr, a:tr)']"}), "(params=['a:tbl', 'a:tbl/a:tr', 'a:tbl/(a:tr, a:tr, a:tr)'])\n", (28861, 28921), False, 'import pytest\n'), ((29163, 29237), 'pytest.fixture', 'pytest.fixture', ([], {'params': "['a:tbl', 'a:tbl/a:tr', 'a:tbl/(a:tr, a:tr, a:tr)']"}), "(params=['a:tbl', 'a:tbl/a:tr', 'a:tbl/(a:tr, a:tr, a:tr)'])\n", (29177, 29237), False, 'import pytest\n'), ((29476, 29564), 'pytest.fixture', 'pytest.fixture', ([], {'params': "[('a:tbl', 0), ('a:tbl/a:tr', 1), ('a:tbl/(a:tr, a:tr)', 2)]"}), "(params=[('a:tbl', 0), ('a:tbl/a:tr', 1), (\n 'a:tbl/(a:tr, a:tr)', 2)])\n", (29490, 29564), False, 'import pytest\n'), ((943, 960), 'pptx.table.Table', 'Table', (['tbl_', 'None'], {}), '(tbl_, None)\n', (948, 960), False, 'from pptx.table import _Cell, _CellCollection, _Column, _ColumnCollection, _Row, _RowCollection, Table\n'), ((1438, 1454), 'pptx.table.Table', 'Table', (['tbl', 'None'], {}), '(tbl, None)\n', (1443, 1454), False, 'from pptx.table import _Cell, _CellCollection, _Column, _ColumnCollection, _Row, _RowCollection, Table\n'), ((1918, 1934), 'pptx.table.Table', 'Table', (['tbl', 'None'], {}), '(tbl, None)\n', (1923, 1934), False, 'from pptx.table import _Cell, _CellCollection, _Column, _ColumnCollection, _Row, _RowCollection, Table\n'), ((2373, 2389), 'pptx.table.Table', 'Table', (['tbl', 'None'], {}), '(tbl, None)\n', (2378, 2389), False, 'from pptx.table import _Cell, _CellCollection, _Column, _ColumnCollection, _Row, _RowCollection, Table\n'), ((7274, 7289), 'pptx.table._Cell', '_Cell', (['tc', 'None'], {}), '(tc, None)\n', (7279, 7289), False, 'from pptx.table import _Cell, _CellCollection, _Column, _ColumnCollection, _Row, _RowCollection, Table\n'), ((7318, 7333), 'pptx.table._Cell', '_Cell', (['tc', 'None'], {}), '(tc, None)\n', (7323, 7333), False, 'from pptx.table import _Cell, _CellCollection, _Column, _ColumnCollection, _Row, _RowCollection, Table\n'), ((7363, 7384), 'pptx.table._Cell', '_Cell', (['other_tc', 'None'], {}), '(other_tc, None)\n', (7368, 7384), False, 'from pptx.table import _Cell, _CellCollection, _Column, _ColumnCollection, _Row, _RowCollection, Table\n'), ((7722, 7737), 'pptx.table._Cell', '_Cell', (['tc', 'None'], {}), '(tc, None)\n', (7727, 7737), False, 'from pptx.table import _Cell, _CellCollection, _Column, _ColumnCollection, _Row, _RowCollection, Table\n'), ((7960, 7975), 'pptx.table._Cell', '_Cell', (['tc', 'None'], {}), '(tc, None)\n', (7965, 7975), False, 'from pptx.table import _Cell, _CellCollection, _Column, _ColumnCollection, _Row, _RowCollection, Table\n'), ((10841, 10856), 'pptx.table._Cell', '_Cell', (['tc', 'None'], {}), '(tc, None)\n', (10846, 10856), False, 'from pptx.table import _Cell, _CellCollection, _Column, _ColumnCollection, _Row, _RowCollection, Table\n'), ((11072, 11087), 'pptx.table._Cell', '_Cell', (['tc', 'None'], {}), '(tc, None)\n', (11077, 11087), False, 'from pptx.table import _Cell, _CellCollection, _Column, _ColumnCollection, _Row, _RowCollection, Table\n'), ((11287, 11309), 'pptx.table._Cell', '_Cell', (['origin_tc', 'None'], {}), '(origin_tc, None)\n', (11292, 11309), False, 'from pptx.table import _Cell, _CellCollection, _Column, _ColumnCollection, _Row, _RowCollection, Table\n'), ((11700, 11715), 'pptx.table._Cell', '_Cell', (['tc', 'None'], {}), '(tc, None)\n', (11705, 11715), False, 'from pptx.table import _Cell, _CellCollection, _Column, _ColumnCollection, _Row, _RowCollection, Table\n'), ((12025, 12042), 'pptx.table._Cell', '_Cell', (['None', 'None'], {}), '(None, None)\n', (12030, 12042), False, 'from pptx.table import _Cell, _CellCollection, _Column, _ColumnCollection, _Row, _RowCollection, Table\n'), ((12239, 12256), 'pptx.table._Cell', '_Cell', (['None', 'None'], {}), '(None, None)\n', (12244, 12256), False, 'from pptx.table import _Cell, _CellCollection, _Column, _ColumnCollection, _Row, _RowCollection, Table\n'), ((20029, 20054), 'pptx.table._CellCollection', '_CellCollection', (['tr', 'None'], {}), '(tr, None)\n', (20044, 20054), False, 'from pptx.table import _Cell, _CellCollection, _Column, _ColumnCollection, _Row, _RowCollection, Table\n'), ((20740, 20765), 'pptx.table._CellCollection', '_CellCollection', (['tr', 'None'], {}), '(tr, None)\n', (20755, 20765), False, 'from pptx.table import _Cell, _CellCollection, _Column, _ColumnCollection, _Row, _RowCollection, Table\n'), ((24689, 24717), 'pptx.table._ColumnCollection', '_ColumnCollection', (['tbl', 'None'], {}), '(tbl, None)\n', (24706, 24717), False, 'from pptx.table import _Cell, _CellCollection, _Column, _ColumnCollection, _Row, _RowCollection, Table\n'), ((25129, 25157), 'pptx.table._ColumnCollection', '_ColumnCollection', (['tbl', 'None'], {}), '(tbl, None)\n', (25146, 25157), False, 'from pptx.table import _Cell, _CellCollection, _Column, _ColumnCollection, _Row, _RowCollection, Table\n'), ((29042, 29067), 'pptx.table._RowCollection', '_RowCollection', (['tbl', 'None'], {}), '(tbl, None)\n', (29056, 29067), False, 'from pptx.table import _Cell, _CellCollection, _Column, _ColumnCollection, _Row, _RowCollection, Table\n'), ((29355, 29380), 'pptx.table._RowCollection', '_RowCollection', (['tbl', 'None'], {}), '(tbl, None)\n', (29369, 29380), False, 'from pptx.table import _Cell, _CellCollection, _Column, _ColumnCollection, _Row, _RowCollection, Table\n'), ((8729, 8753), 'pytest.raises', 'pytest.raises', (['TypeError'], {}), '(TypeError)\n', (8742, 8753), False, 'import pytest\n'), ((9731, 9746), 'pptx.table._Cell', '_Cell', (['tc', 'None'], {}), '(tc, None)\n', (9736, 9746), False, 'from pptx.table import _Cell, _CellCollection, _Column, _ColumnCollection, _Row, _RowCollection, Table\n'), ((9748, 9769), 'pptx.table._Cell', '_Cell', (['other_tc', 'None'], {}), '(other_tc, None)\n', (9753, 9769), False, 'from pptx.table import _Cell, _CellCollection, _Column, _ColumnCollection, _Row, _RowCollection, Table\n'), ((10161, 10178), 'pptx.table._Cell', '_Cell', (['None', 'None'], {}), '(None, None)\n', (10166, 10178), False, 'from pptx.table import _Cell, _CellCollection, _Column, _ColumnCollection, _Row, _RowCollection, Table\n'), ((10180, 10197), 'pptx.table._Cell', '_Cell', (['None', 'None'], {}), '(None, None)\n', (10185, 10197), False, 'from pptx.table import _Cell, _CellCollection, _Column, _ColumnCollection, _Row, _RowCollection, Table\n'), ((10212, 10237), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (10225, 10237), False, 'import pytest\n'), ((10526, 10543), 'pptx.table._Cell', '_Cell', (['None', 'None'], {}), '(None, None)\n', (10531, 10543), False, 'from pptx.table import _Cell, _CellCollection, _Column, _ColumnCollection, _Row, _RowCollection, Table\n'), ((10545, 10562), 'pptx.table._Cell', '_Cell', (['None', 'None'], {}), '(None, None)\n', (10550, 10562), False, 'from pptx.table import _Cell, _CellCollection, _Column, _ColumnCollection, _Row, _RowCollection, Table\n'), ((10577, 10602), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (10590, 10602), False, 'import pytest\n'), ((11730, 11755), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (11743, 11755), False, 'import pytest\n'), ((20314, 20339), 'pytest.raises', 'pytest.raises', (['IndexError'], {}), '(IndexError)\n', (20327, 20339), False, 'import pytest\n'), ((20376, 20401), 'pytest.raises', 'pytest.raises', (['IndexError'], {}), '(IndexError)\n', (20389, 20401), False, 'import pytest\n'), ((24188, 24213), 'pytest.raises', 'pytest.raises', (['IndexError'], {}), '(IndexError)\n', (24201, 24213), False, 'import pytest\n'), ((24252, 24277), 'pytest.raises', 'pytest.raises', (['IndexError'], {}), '(IndexError)\n', (24265, 24277), False, 'import pytest\n'), ((28661, 28686), 'pytest.raises', 'pytest.raises', (['IndexError'], {}), '(IndexError)\n', (28674, 28686), False, 'import pytest\n'), ((28722, 28747), 'pytest.raises', 'pytest.raises', (['IndexError'], {}), '(IndexError)\n', (28735, 28747), False, 'import pytest\n'), ((29107, 29117), 'pptx.oxml.ns.qn', 'qn', (['"""a:tr"""'], {}), "('a:tr')\n", (29109, 29117), False, 'from pptx.oxml.ns import qn\n'), ((29420, 29430), 'pptx.oxml.ns.qn', 'qn', (['"""a:tr"""'], {}), "('a:tr')\n", (29422, 29430), False, 'from pptx.oxml.ns import qn\n'), ((14891, 14903), 'pptx.util.Inches', 'Inches', (['(0.09)'], {}), '(0.09)\n', (14897, 14903), False, 'from pptx.util import Inches, Length, Pt\n'), ((14962, 14974), 'pptx.util.Inches', 'Inches', (['(0.08)'], {}), '(0.08)\n', (14968, 14974), False, 'from pptx.util import Inches, Length, Pt\n'), ((15031, 15043), 'pptx.util.Inches', 'Inches', (['(0.07)'], {}), '(0.07)\n', (15037, 15043), False, 'from pptx.util import Inches, Length, Pt\n'), ((15103, 15115), 'pptx.util.Inches', 'Inches', (['(0.06)'], {}), '(0.06)\n', (15109, 15115), False, 'from pptx.util import Inches, Length, Pt\n'), ((15154, 15165), 'pptx.util.Inches', 'Inches', (['(0.1)'], {}), '(0.1)\n', (15160, 15165), False, 'from pptx.util import Inches, Length, Pt\n'), ((15212, 15223), 'pptx.util.Inches', 'Inches', (['(0.1)'], {}), '(0.1)\n', (15218, 15223), False, 'from pptx.util import Inches, Length, Pt\n'), ((15261, 15273), 'pptx.util.Inches', 'Inches', (['(0.05)'], {}), '(0.05)\n', (15267, 15273), False, 'from pptx.util import Inches, Length, Pt\n'), ((15321, 15333), 'pptx.util.Inches', 'Inches', (['(0.05)'], {}), '(0.05)\n', (15327, 15333), False, 'from pptx.util import Inches, Length, Pt\n'), ((15635, 15647), 'pptx.util.Inches', 'Inches', (['(0.08)'], {}), '(0.08)\n', (15641, 15647), False, 'from pptx.util import Inches, Length, Pt\n'), ((15714, 15726), 'pptx.util.Inches', 'Inches', (['(0.08)'], {}), '(0.08)\n', (15720, 15726), False, 'from pptx.util import Inches, Length, Pt\n'), ((15791, 15803), 'pptx.util.Inches', 'Inches', (['(0.08)'], {}), '(0.08)\n', (15797, 15803), False, 'from pptx.util import Inches, Length, Pt\n'), ((15871, 15883), 'pptx.util.Inches', 'Inches', (['(0.08)'], {}), '(0.08)\n', (15877, 15883), False, 'from pptx.util import Inches, Length, Pt\n'), ((22261, 22270), 'pptx.util.Inches', 'Inches', (['(1)'], {}), '(1)\n', (22267, 22270), False, 'from pptx.util import Inches, Length, Pt\n'), ((22295, 22301), 'pptx.util.Pt', 'Pt', (['(10)'], {}), '(10)\n', (22297, 22301), False, 'from pptx.util import Inches, Length, Pt\n'), ((22570, 22579), 'pptx.util.Inches', 'Inches', (['(1)'], {}), '(1)\n', (22576, 22579), False, 'from pptx.util import Inches, Length, Pt\n'), ((22639, 22648), 'pptx.util.Inches', 'Inches', (['(1)'], {}), '(1)\n', (22645, 22648), False, 'from pptx.util import Inches, Length, Pt\n'), ((26665, 26674), 'pptx.util.Inches', 'Inches', (['(1)'], {}), '(1)\n', (26671, 26674), False, 'from pptx.util import Inches, Length, Pt\n'), ((26694, 26700), 'pptx.util.Pt', 'Pt', (['(10)'], {}), '(10)\n', (26696, 26700), False, 'from pptx.util import Inches, Length, Pt\n'), ((26941, 26950), 'pptx.util.Inches', 'Inches', (['(1)'], {}), '(1)\n', (26947, 26950), False, 'from pptx.util import Inches, Length, Pt\n'), ((27000, 27009), 'pptx.util.Inches', 'Inches', (['(1)'], {}), '(1)\n', (27006, 27009), False, 'from pptx.util import Inches, Length, Pt\n')] |
from serial import Serial
from tqdm import tqdm
import binascii
import hashlib
import struct
import time
import sys
import os
def if_read(ser, data_len):
data = bytearray(0)
received = 0
while received < data_len:
tmp = ser.read(data_len - received)
if len(tmp) == 0:
break
else:
data += tmp
received += len(tmp)
if len(data) != data_len:
return (0, data)
return (1, data)
def reset(ser):
ser.setRTS(0)
time.sleep(0.2)
reset_cnt = 2
while reset_cnt > 0:
ser.setRTS(1)
time.sleep(0.005)
ser.setRTS(0)
time.sleep(0.1)
ser.setRTS(1)
time.sleep(0.005)
ser.setRTS(0)
time.sleep(0.005)
reset_cnt -= 1
def handshake(ser):
ser.setRTS(1)
time.sleep(0.2)
ser.setRTS(0)
time.sleep(0.05)
ser.setRTS(1)
ser.setDTR(1)
time.sleep(0.1)
ser.setDTR(0)
time.sleep(0.1)
def expect_ok(ser):
data = ser.read(2)
if data[0] != 0x4f or data[1] != 0x4b:
err = ser.read(2)
raise ValueError(binascii.hexlify(err))
def expect_data(ser):
expect_ok(ser)
len = ser.read(2)
len = struct.unpack('<h', len)[0]
data = ser.read(len)
return data
def cmd_load_seg_header(ser, file):
header = file.read(0x10)
ser.write(b'\x17\x00\x10\x00' + header)
data = expect_data(ser)
seg_addr, seg_len = struct.unpack('<II', data[0:8])
print(f'{seg_len} bytes @ {hex(seg_addr)}')
return seg_len
def cmd_load_seg_data(ser, data):
ser.write(b'\x18\x00' + struct.pack('<H', len(data)) + data)
expect_ok(ser)
def cmd_load_boot_header(ser, file):
header = file.read(0xb0)
ser.write(b'\x11\x00\xb0\x00' + header)
expect_ok(ser)
def cmd_check_image(ser):
ser.write(b'\x19\x00\x00\x00')
expect_ok(ser)
def cmd_run_image(ser):
ser.write(b'\x1a\x00\x00\x00')
expect_ok(ser)
def load_image(ser, file):
image = open(file, 'rb')
cmd_load_boot_header(ser, image)
total = cmd_load_seg_header(ser, image)
sent = 0
with tqdm(total=total, unit='byte', unit_scale=True) as pbar:
while sent != total:
chunk = image.read(min(total-sent, 4080))
cmd_load_seg_data(ser, chunk)
sent = sent + len(chunk)
pbar.update(len(chunk))
cmd_check_image(ser)
cmd_run_image(ser)
def empty_buffer(ser):
timeout = ser.timeout
ser.timeout = 0.1
if_read(ser, 10000)
ser.timeout = timeout
def send_sync(ser):
empty_buffer(ser)
ser.write(b'\x55' * int(0.006 * ser.baudrate / 10))
expect_ok(ser)
def efl_write_cmd(ser, id, payload = b''):
plen = len(payload)
plen_data = struct.pack('<h', plen)
checksum = struct.pack('<h', sum(plen_data + payload) & 0xff)[0:1]
data = bytes([id]) + checksum + plen_data + payload
ser.write(data)
def efl_cmd_read_memory(ser, addr):
# there is a length parameter here but it doesn't seem to work correctly
efl_write_cmd(ser, 0x51, struct.pack('<II', addr, 0x4))
return expect_data(ser)
def efl_cmd_write_memory(ser, addr, data):
efl_write_cmd(ser, 0x50, struct.pack('<I', len(data)) + data)
expect_ok(ser)
def efl_cmd_read_jid(ser):
efl_write_cmd(ser, 0x36)
return expect_data(ser)
def efl_cmd_flash_erase(ser, addr, len):
end_addr = addr + len - 1
efl_write_cmd(ser, 0x30, struct.pack('<II', addr, end_addr))
timeout = ser.timeout
ser.timeout = 10.0
expect_ok(ser)
ser.timeout = timeout
print(f'Erased {len} bytes @ {hex(addr)}')
def efl_cmd_flash_write(ser, addr, data):
efl_write_cmd(ser, 0x31, struct.pack('<I', addr) + data)
expect_ok(ser)
def efl_cmd_flash_write_check(ser):
efl_write_cmd(ser, 0x3a)
expect_ok(ser)
def efl_cmd_flash_xip_read_start(ser):
efl_write_cmd(ser, 0x60)
expect_ok(ser)
def efl_cmd_flash_xip_read_sha(ser, addr, len):
efl_write_cmd(ser, 0x3e, struct.pack('<II', addr, len))
return expect_data(ser)
def efl_cmd_flash_xip_read_finish(ser):
efl_write_cmd(ser, 0x61)
expect_ok(ser)
def efl_cmd_reset(ser):
efl_write_cmd(ser, 0x21)
expect_ok(ser)
def efl_program_img(ser, addr, data):
data_len = len(data)
efl_cmd_flash_erase(ser, addr, data_len)
print(f'Programming {data_len} bytes @ {hex(addr)}')
sent = 0
with tqdm(total=data_len, unit='byte', unit_scale=True) as pbar:
while sent != data_len:
buf_len = min(2048, data_len - sent)
buf = data[sent:sent + buf_len]
efl_cmd_flash_write(ser, addr + sent, buf)
sent = sent + buf_len
pbar.update(buf_len)
efl_cmd_flash_write_check(ser)
sha256sum = hashlib.sha256(data).digest()
efl_cmd_flash_xip_read_start(ser)
device_sum = efl_cmd_flash_xip_read_sha(ser, addr, data_len)
efl_cmd_flash_xip_read_finish(ser)
if device_sum != sha256sum:
print('Verification failed')
print('Host SHA256:', binascii.hexlify(sha256sum))
print('BL SHA256:', binascii.hexlify(device_sum))
return False
print('Verified by XIP SHA256 hash')
return True
def prepend_fw_header(img, header_file):
if img[0:4] == b'BFNP':
print('Image already has FW header')
return img
with open(header_file, 'rb') as f:
header = f.read()
img = header + (b'\xFF' * (4096-len(header))) + img
return img
def get_contrib_path(name):
sep = os.path.sep
return os.path.dirname(os.path.realpath(__file__)) + sep + 'contrib' + sep + name
def main():
if len(sys.argv) < 3:
print(f'Usage: {sys.argv[0]} <serial port> <firmware bin>')
sys.exit(1)
ser = Serial(sys.argv[1], baudrate=500000, timeout=2)
handshake(ser)
reset(ser)
send_sync(ser)
time.sleep(0.1)
print('Loading helper binary')
load_image(ser, get_contrib_path('eflash_loader_40m.bin'))
time.sleep(0.2)
print()
# at this point, the eflash loader binary is running with efl_ commands
# (which seems to work with a higher baudrate)
ser.baudrate = 2000000
send_sync(ser)
with open(sys.argv[2], 'rb') as f:
data = f.read()
data = prepend_fw_header(data, get_contrib_path('bootheader.bin'))
efl_program_img(ser, 0x10000, data)
efl_cmd_reset(ser)
if __name__ == "__main__":
main()
| [
"hashlib.sha256",
"binascii.hexlify",
"tqdm.tqdm",
"struct.pack",
"time.sleep",
"os.path.realpath",
"struct.unpack",
"serial.Serial",
"sys.exit"
] | [((528, 543), 'time.sleep', 'time.sleep', (['(0.2)'], {}), '(0.2)\n', (538, 543), False, 'import time\n'), ((858, 873), 'time.sleep', 'time.sleep', (['(0.2)'], {}), '(0.2)\n', (868, 873), False, 'import time\n'), ((898, 914), 'time.sleep', 'time.sleep', (['(0.05)'], {}), '(0.05)\n', (908, 914), False, 'import time\n'), ((958, 973), 'time.sleep', 'time.sleep', (['(0.1)'], {}), '(0.1)\n', (968, 973), False, 'import time\n'), ((998, 1013), 'time.sleep', 'time.sleep', (['(0.1)'], {}), '(0.1)\n', (1008, 1013), False, 'import time\n'), ((1499, 1530), 'struct.unpack', 'struct.unpack', (['"""<II"""', 'data[0:8]'], {}), "('<II', data[0:8])\n", (1512, 1530), False, 'import struct\n'), ((2842, 2865), 'struct.pack', 'struct.pack', (['"""<h"""', 'plen'], {}), "('<h', plen)\n", (2853, 2865), False, 'import struct\n'), ((5929, 5976), 'serial.Serial', 'Serial', (['sys.argv[1]'], {'baudrate': '(500000)', 'timeout': '(2)'}), '(sys.argv[1], baudrate=500000, timeout=2)\n', (5935, 5976), False, 'from serial import Serial\n'), ((6038, 6053), 'time.sleep', 'time.sleep', (['(0.1)'], {}), '(0.1)\n', (6048, 6053), False, 'import time\n'), ((6159, 6174), 'time.sleep', 'time.sleep', (['(0.2)'], {}), '(0.2)\n', (6169, 6174), False, 'import time\n'), ((621, 638), 'time.sleep', 'time.sleep', (['(0.005)'], {}), '(0.005)\n', (631, 638), False, 'import time\n'), ((671, 686), 'time.sleep', 'time.sleep', (['(0.1)'], {}), '(0.1)\n', (681, 686), False, 'import time\n'), ((719, 736), 'time.sleep', 'time.sleep', (['(0.005)'], {}), '(0.005)\n', (729, 736), False, 'import time\n'), ((769, 786), 'time.sleep', 'time.sleep', (['(0.005)'], {}), '(0.005)\n', (779, 786), False, 'import time\n'), ((1260, 1284), 'struct.unpack', 'struct.unpack', (['"""<h"""', 'len'], {}), "('<h', len)\n", (1273, 1284), False, 'import struct\n'), ((2193, 2240), 'tqdm.tqdm', 'tqdm', ([], {'total': 'total', 'unit': '"""byte"""', 'unit_scale': '(True)'}), "(total=total, unit='byte', unit_scale=True)\n", (2197, 2240), False, 'from tqdm import tqdm\n'), ((3163, 3190), 'struct.pack', 'struct.pack', (['"""<II"""', 'addr', '(4)'], {}), "('<II', addr, 4)\n", (3174, 3190), False, 'import struct\n'), ((3550, 3584), 'struct.pack', 'struct.pack', (['"""<II"""', 'addr', 'end_addr'], {}), "('<II', addr, end_addr)\n", (3561, 3584), False, 'import struct\n'), ((4121, 4150), 'struct.pack', 'struct.pack', (['"""<II"""', 'addr', 'len'], {}), "('<II', addr, len)\n", (4132, 4150), False, 'import struct\n'), ((4548, 4598), 'tqdm.tqdm', 'tqdm', ([], {'total': 'data_len', 'unit': '"""byte"""', 'unit_scale': '(True)'}), "(total=data_len, unit='byte', unit_scale=True)\n", (4552, 4598), False, 'from tqdm import tqdm\n'), ((5904, 5915), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (5912, 5915), False, 'import sys\n'), ((1158, 1179), 'binascii.hexlify', 'binascii.hexlify', (['err'], {}), '(err)\n', (1174, 1179), False, 'import binascii\n'), ((3807, 3830), 'struct.pack', 'struct.pack', (['"""<I"""', 'addr'], {}), "('<I', addr)\n", (3818, 3830), False, 'import struct\n'), ((4916, 4936), 'hashlib.sha256', 'hashlib.sha256', (['data'], {}), '(data)\n', (4930, 4936), False, 'import hashlib\n'), ((5193, 5220), 'binascii.hexlify', 'binascii.hexlify', (['sha256sum'], {}), '(sha256sum)\n', (5209, 5220), False, 'import binascii\n'), ((5253, 5281), 'binascii.hexlify', 'binascii.hexlify', (['device_sum'], {}), '(device_sum)\n', (5269, 5281), False, 'import binascii\n'), ((5725, 5751), 'os.path.realpath', 'os.path.realpath', (['__file__'], {}), '(__file__)\n', (5741, 5751), False, 'import os\n')] |
import random
def estimate_pi(sims, needles):
trials = []
for _ in xrange(sims):
trials.append(simulate_pi(needles))
mean = sum(trials) / sims
return mean
# use a unit square
def simulate_pi(needles):
hits = 0 # how many hits we hit the circle
for _ in xrange(needles):
x = random.uniform(-1., 1.)
y = random.uniform(-1, 1.)
if x*x + y*y <= 1.0:
hits += 1
return 4. * (hits / float(needles))
| [
"random.uniform"
] | [((317, 342), 'random.uniform', 'random.uniform', (['(-1.0)', '(1.0)'], {}), '(-1.0, 1.0)\n', (331, 342), False, 'import random\n'), ((353, 376), 'random.uniform', 'random.uniform', (['(-1)', '(1.0)'], {}), '(-1, 1.0)\n', (367, 376), False, 'import random\n')] |
#!/usr/bin/env python3
#
# This file is part of the MicroPython project, http://micropython.org/
#
# The MIT License (MIT)
#
# Copyright (c) 2019 <NAME>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
"""
Link .o files to .mpy
"""
import sys, os, struct, re
from elftools.elf import elffile
sys.path.append(os.path.dirname(__file__) + "/../py")
import makeqstrdata as qstrutil
# MicroPython constants
MPY_VERSION = 5
MP_NATIVE_ARCH_X86 = 1
MP_NATIVE_ARCH_X64 = 2
MP_NATIVE_ARCH_ARMV7M = 5
MP_NATIVE_ARCH_ARMV7EMSP = 7
MP_NATIVE_ARCH_ARMV7EMDP = 8
MP_NATIVE_ARCH_XTENSA = 9
MP_NATIVE_ARCH_XTENSAWIN = 10
MP_CODE_BYTECODE = 2
MP_CODE_NATIVE_VIPER = 4
MP_SCOPE_FLAG_VIPERRELOC = 0x20
MP_SCOPE_FLAG_VIPERRODATA = 0x40
MP_SCOPE_FLAG_VIPERBSS = 0x80
MICROPY_OPT_CACHE_MAP_LOOKUP_IN_BYTECODE = 1
MICROPY_PY_BUILTINS_STR_UNICODE = 2
MP_SMALL_INT_BITS = 31
QSTR_WINDOW_SIZE = 32
# ELF constants
R_386_32 = 1
R_X86_64_64 = 1
R_XTENSA_32 = 1
R_386_PC32 = 2
R_X86_64_PC32 = 2
R_ARM_ABS32 = 2
R_386_GOT32 = 3
R_ARM_REL32 = 3
R_386_PLT32 = 4
R_X86_64_PLT32 = 4
R_XTENSA_PLT = 6
R_386_GOTOFF = 9
R_386_GOTPC = 10
R_ARM_THM_CALL = 10
R_XTENSA_DIFF32 = 19
R_XTENSA_SLOT0_OP = 20
R_ARM_BASE_PREL = 25 # aka R_ARM_GOTPC
R_ARM_GOT_BREL = 26 # aka R_ARM_GOT32
R_ARM_THM_JUMP24 = 30
R_X86_64_REX_GOTPCRELX = 42
R_386_GOT32X = 43
################################################################################
# Architecture configuration
def asm_jump_x86(entry):
return struct.pack("<BI", 0xE9, entry - 5)
def asm_jump_arm(entry):
b_off = entry - 4
if b_off >> 11 == 0 or b_off >> 11 == -1:
# Signed value fits in 12 bits
b0 = 0xE000 | (b_off >> 1 & 0x07FF)
b1 = 0
else:
# Use large jump
b0 = 0xF000 | (b_off >> 12 & 0x07FF)
b1 = 0xB800 | (b_off >> 1 & 0x7FF)
return struct.pack("<HH", b0, b1)
def asm_jump_xtensa(entry):
jump_offset = entry - 4
jump_op = jump_offset << 6 | 6
return struct.pack("<BH", jump_op & 0xFF, jump_op >> 8)
class ArchData:
def __init__(self, name, mpy_feature, qstr_entry_size, word_size, arch_got, asm_jump):
self.name = name
self.mpy_feature = mpy_feature
self.qstr_entry_size = qstr_entry_size
self.word_size = word_size
self.arch_got = arch_got
self.asm_jump = asm_jump
self.separate_rodata = name == "EM_XTENSA" and qstr_entry_size == 4
ARCH_DATA = {
"x86": ArchData(
"EM_386",
MP_NATIVE_ARCH_X86 << 2
| MICROPY_PY_BUILTINS_STR_UNICODE
| MICROPY_OPT_CACHE_MAP_LOOKUP_IN_BYTECODE,
2,
4,
(R_386_PC32, R_386_GOT32, R_386_GOT32X),
asm_jump_x86,
),
"x64": ArchData(
"EM_X86_64",
MP_NATIVE_ARCH_X64 << 2
| MICROPY_PY_BUILTINS_STR_UNICODE
| MICROPY_OPT_CACHE_MAP_LOOKUP_IN_BYTECODE,
2,
8,
(R_X86_64_REX_GOTPCRELX,),
asm_jump_x86,
),
"armv7m": ArchData(
"EM_ARM",
MP_NATIVE_ARCH_ARMV7M << 2 | MICROPY_PY_BUILTINS_STR_UNICODE,
2,
4,
(R_ARM_GOT_BREL,),
asm_jump_arm,
),
"armv7emsp": ArchData(
"EM_ARM",
MP_NATIVE_ARCH_ARMV7EMSP << 2 | MICROPY_PY_BUILTINS_STR_UNICODE,
2,
4,
(R_ARM_GOT_BREL,),
asm_jump_arm,
),
"armv7emdp": ArchData(
"EM_ARM",
MP_NATIVE_ARCH_ARMV7EMDP << 2 | MICROPY_PY_BUILTINS_STR_UNICODE,
2,
4,
(R_ARM_GOT_BREL,),
asm_jump_arm,
),
"xtensa": ArchData(
"EM_XTENSA",
MP_NATIVE_ARCH_XTENSA << 2 | MICROPY_PY_BUILTINS_STR_UNICODE,
2,
4,
(R_XTENSA_32, R_XTENSA_PLT),
asm_jump_xtensa,
),
"xtensawin": ArchData(
"EM_XTENSA",
MP_NATIVE_ARCH_XTENSAWIN << 2 | MICROPY_PY_BUILTINS_STR_UNICODE,
4,
4,
(R_XTENSA_32, R_XTENSA_PLT),
asm_jump_xtensa,
),
}
################################################################################
# Helper functions
def align_to(value, align):
return (value + align - 1) & ~(align - 1)
def unpack_u24le(data, offset):
return data[offset] | data[offset + 1] << 8 | data[offset + 2] << 16
def pack_u24le(data, offset, value):
data[offset] = value & 0xFF
data[offset + 1] = value >> 8 & 0xFF
data[offset + 2] = value >> 16 & 0xFF
def xxd(text):
for i in range(0, len(text), 16):
print("{:08x}:".format(i), end="")
for j in range(4):
off = i + j * 4
if off < len(text):
d = int.from_bytes(text[off : off + 4], "little")
print(" {:08x}".format(d), end="")
print()
# Smaller numbers are enabled first
LOG_LEVEL_1 = 1
LOG_LEVEL_2 = 2
LOG_LEVEL_3 = 3
log_level = LOG_LEVEL_1
def log(level, msg):
if level <= log_level:
print(msg)
################################################################################
# Qstr extraction
def extract_qstrs(source_files):
def read_qstrs(f):
with open(f) as f:
vals = set()
objs = set()
for line in f:
while line:
m = re.search(r"MP_OBJ_NEW_QSTR\((MP_QSTR_[A-Za-z0-9_]*)\)", line)
if m:
objs.add(m.group(1))
else:
m = re.search(r"MP_QSTR_[A-Za-z0-9_]*", line)
if m:
vals.add(m.group())
if m:
s = m.span()
line = line[: s[0]] + line[s[1] :]
else:
line = ""
return vals, objs
static_qstrs = ["MP_QSTR_" + qstrutil.qstr_escape(q) for q in qstrutil.static_qstr_list]
qstr_vals = set()
qstr_objs = set()
for f in source_files:
vals, objs = read_qstrs(f)
qstr_vals.update(vals)
qstr_objs.update(objs)
qstr_vals.difference_update(static_qstrs)
return static_qstrs, qstr_vals, qstr_objs
################################################################################
# Linker
class LinkError(Exception):
pass
class Section:
def __init__(self, name, data, alignment, filename=None):
self.filename = filename
self.name = name
self.data = data
self.alignment = alignment
self.addr = 0
self.reloc = []
@staticmethod
def from_elfsec(elfsec, filename):
assert elfsec.header.sh_addr == 0
return Section(elfsec.name, elfsec.data(), elfsec.data_alignment, filename)
class GOTEntry:
def __init__(self, name, sym, link_addr=0):
self.name = name
self.sym = sym
self.offset = None
self.link_addr = link_addr
def isexternal(self):
return self.sec_name.startswith(".external")
def istext(self):
return self.sec_name.startswith(".text")
def isrodata(self):
return self.sec_name.startswith((".rodata", ".data.rel.ro"))
def isbss(self):
return self.sec_name.startswith(".bss")
class LiteralEntry:
def __init__(self, value, offset):
self.value = value
self.offset = offset
class LinkEnv:
def __init__(self, arch):
self.arch = ARCH_DATA[arch]
self.sections = [] # list of sections in order of output
self.literal_sections = [] # list of literal sections (xtensa only)
self.known_syms = {} # dict of symbols that are defined
self.unresolved_syms = [] # list of unresolved symbols
self.mpy_relocs = [] # list of relocations needed in the output .mpy file
def check_arch(self, arch_name):
if arch_name != self.arch.name:
raise LinkError("incompatible arch")
def print_sections(self):
log(LOG_LEVEL_2, "sections:")
for sec in self.sections:
log(LOG_LEVEL_2, " {:08x} {} size={}".format(sec.addr, sec.name, len(sec.data)))
def find_addr(self, name):
if name in self.known_syms:
s = self.known_syms[name]
return s.section.addr + s["st_value"]
raise LinkError("unknown symbol: {}".format(name))
def build_got_generic(env):
env.got_entries = {}
for sec in env.sections:
for r in sec.reloc:
s = r.sym
if not (
s.entry["st_info"]["bind"] == "STB_GLOBAL"
and r["r_info_type"] in env.arch.arch_got
):
continue
s_type = s.entry["st_info"]["type"]
assert s_type in ("STT_NOTYPE", "STT_FUNC", "STT_OBJECT"), s_type
assert s.name
if s.name in env.got_entries:
continue
env.got_entries[s.name] = GOTEntry(s.name, s)
def build_got_xtensa(env):
env.got_entries = {}
env.lit_entries = {}
env.xt_literals = {}
# Extract the values from the literal table
for sec in env.literal_sections:
assert len(sec.data) % env.arch.word_size == 0
# Look through literal relocations to find any global pointers that should be GOT entries
for r in sec.reloc:
s = r.sym
s_type = s.entry["st_info"]["type"]
assert s_type in ("STT_NOTYPE", "STT_FUNC", "STT_OBJECT", "STT_SECTION"), s_type
assert r["r_info_type"] in env.arch.arch_got
assert r["r_offset"] % env.arch.word_size == 0
# This entry is a global pointer
existing = struct.unpack_from("<I", sec.data, r["r_offset"])[0]
if s_type == "STT_SECTION":
assert r["r_addend"] == 0
name = "{}+0x{:x}".format(s.section.name, existing)
else:
assert existing == 0
name = s.name
if r["r_addend"] != 0:
name = "{}+0x{:x}".format(name, r["r_addend"])
idx = "{}+0x{:x}".format(sec.filename, r["r_offset"])
env.xt_literals[idx] = name
if name in env.got_entries:
# Deduplicate GOT entries
continue
env.got_entries[name] = GOTEntry(name, s, existing)
# Go through all literal entries finding those that aren't global pointers so must be actual literals
for i in range(0, len(sec.data), env.arch.word_size):
idx = "{}+0x{:x}".format(sec.filename, i)
if idx not in env.xt_literals:
# This entry is an actual literal
value = struct.unpack_from("<I", sec.data, i)[0]
env.xt_literals[idx] = value
if value in env.lit_entries:
# Deduplicate literals
continue
env.lit_entries[value] = LiteralEntry(
value, len(env.lit_entries) * env.arch.word_size
)
def populate_got(env):
# Compute GOT destination addresses
for got_entry in env.got_entries.values():
sym = got_entry.sym
if hasattr(sym, "resolved"):
sym = sym.resolved
sec = sym.section
addr = sym["st_value"]
got_entry.sec_name = sec.name
got_entry.link_addr += sec.addr + addr
# Get sorted GOT, sorted by external, text, rodata, bss so relocations can be combined
got_list = sorted(
env.got_entries.values(),
key=lambda g: g.isexternal() + 2 * g.istext() + 3 * g.isrodata() + 4 * g.isbss(),
)
# Layout and populate the GOT
offset = 0
for got_entry in got_list:
got_entry.offset = offset
offset += env.arch.word_size
o = env.got_section.addr + got_entry.offset
env.full_text[o : o + env.arch.word_size] = got_entry.link_addr.to_bytes(
env.arch.word_size, "little"
)
# Create a relocation for each GOT entry
for got_entry in got_list:
if got_entry.name == "mp_fun_table":
dest = "mp_fun_table"
elif got_entry.name.startswith("mp_fun_table+0x"):
dest = int(got_entry.name.split("+")[1], 16) // env.arch.word_size
elif got_entry.sec_name.startswith(".text"):
dest = ".text"
elif got_entry.sec_name.startswith(".rodata"):
dest = ".rodata"
elif got_entry.sec_name.startswith(".data.rel.ro"):
dest = ".data.rel.ro"
elif got_entry.sec_name.startswith(".bss"):
dest = ".bss"
else:
assert 0, (got_entry.name, got_entry.sec_name)
env.mpy_relocs.append((".text", env.got_section.addr + got_entry.offset, dest))
# Print out the final GOT
log(LOG_LEVEL_2, "GOT: {:08x}".format(env.got_section.addr))
for g in got_list:
log(
LOG_LEVEL_2,
" {:08x} {} -> {}+{:08x}".format(g.offset, g.name, g.sec_name, g.link_addr),
)
def populate_lit(env):
log(LOG_LEVEL_2, "LIT: {:08x}".format(env.lit_section.addr))
for lit_entry in env.lit_entries.values():
value = lit_entry.value
log(LOG_LEVEL_2, " {:08x} = {:08x}".format(lit_entry.offset, value))
o = env.lit_section.addr + lit_entry.offset
env.full_text[o : o + env.arch.word_size] = value.to_bytes(env.arch.word_size, "little")
def do_relocation_text(env, text_addr, r):
# Extract relevant info about symbol that's being relocated
s = r.sym
s_bind = s.entry["st_info"]["bind"]
s_shndx = s.entry["st_shndx"]
s_type = s.entry["st_info"]["type"]
r_offset = r["r_offset"] + text_addr
r_info_type = r["r_info_type"]
try:
# only for RELA sections
r_addend = r["r_addend"]
except KeyError:
r_addend = 0
# Default relocation type and name for logging
reloc_type = "le32"
log_name = None
if (
env.arch.name == "EM_386"
and r_info_type in (R_386_PC32, R_386_PLT32)
or env.arch.name == "EM_X86_64"
and r_info_type in (R_X86_64_PC32, R_X86_64_PLT32)
or env.arch.name == "EM_ARM"
and r_info_type in (R_ARM_REL32, R_ARM_THM_CALL, R_ARM_THM_JUMP24)
or s_bind == "STB_LOCAL"
and env.arch.name == "EM_XTENSA"
and r_info_type == R_XTENSA_32 # not GOT
):
# Standard relocation to fixed location within text/rodata
if hasattr(s, "resolved"):
s = s.resolved
sec = s.section
if env.arch.separate_rodata and sec.name.startswith(".rodata"):
raise LinkError("fixed relocation to rodata with rodata referenced via GOT")
if sec.name.startswith(".bss"):
raise LinkError(
"{}: fixed relocation to bss (bss variables can't be static)".format(s.filename)
)
if sec.name.startswith(".external"):
raise LinkError(
"{}: fixed relocation to external symbol: {}".format(s.filename, s.name)
)
addr = sec.addr + s["st_value"]
reloc = addr - r_offset + r_addend
if r_info_type in (R_ARM_THM_CALL, R_ARM_THM_JUMP24):
# Both relocations have the same bit pattern to rewrite:
# R_ARM_THM_CALL: bl
# R_ARM_THM_JUMP24: b.w
reloc_type = "thumb_b"
elif (
env.arch.name == "EM_386"
and r_info_type == R_386_GOTPC
or env.arch.name == "EM_ARM"
and r_info_type == R_ARM_BASE_PREL
):
# Relocation to GOT address itself
assert s.name == "_GLOBAL_OFFSET_TABLE_"
addr = env.got_section.addr
reloc = addr - r_offset + r_addend
elif (
env.arch.name == "EM_386"
and r_info_type in (R_386_GOT32, R_386_GOT32X)
or env.arch.name == "EM_ARM"
and r_info_type == R_ARM_GOT_BREL
):
# Relcation pointing to GOT
reloc = addr = env.got_entries[s.name].offset
elif env.arch.name == "EM_X86_64" and r_info_type == R_X86_64_REX_GOTPCRELX:
# Relcation pointing to GOT
got_entry = env.got_entries[s.name]
addr = env.got_section.addr + got_entry.offset
reloc = addr - r_offset + r_addend
elif env.arch.name == "EM_386" and r_info_type == R_386_GOTOFF:
# Relocation relative to GOT
addr = s.section.addr + s["st_value"]
reloc = addr - env.got_section.addr + r_addend
elif env.arch.name == "EM_XTENSA" and r_info_type == R_XTENSA_SLOT0_OP:
# Relocation pointing to GOT, xtensa specific
sec = s.section
if sec.name.startswith(".text"):
# it looks like R_XTENSA_SLOT0_OP into .text is already correctly relocated
return
assert sec.name.startswith(".literal"), sec.name
lit_idx = "{}+0x{:x}".format(sec.filename, r_addend)
lit_ptr = env.xt_literals[lit_idx]
if isinstance(lit_ptr, str):
addr = env.got_section.addr + env.got_entries[lit_ptr].offset
log_name = "GOT {}".format(lit_ptr)
else:
addr = env.lit_section.addr + env.lit_entries[lit_ptr].offset
log_name = "LIT"
reloc = addr - r_offset
reloc_type = "xtensa_l32r"
elif env.arch.name == "EM_XTENSA" and r_info_type == R_XTENSA_DIFF32:
if s.section.name.startswith(".text"):
# it looks like R_XTENSA_DIFF32 into .text is already correctly relocated
return
assert 0
else:
# Unknown/unsupported relocation
assert 0, r_info_type
# Write relocation
if reloc_type == "le32":
(existing,) = struct.unpack_from("<I", env.full_text, r_offset)
struct.pack_into("<I", env.full_text, r_offset, (existing + reloc) & 0xFFFFFFFF)
elif reloc_type == "thumb_b":
b_h, b_l = struct.unpack_from("<HH", env.full_text, r_offset)
existing = (b_h & 0x7FF) << 12 | (b_l & 0x7FF) << 1
if existing >= 0x400000: # 2's complement
existing -= 0x800000
new = existing + reloc
b_h = (b_h & 0xF800) | (new >> 12) & 0x7FF
b_l = (b_l & 0xF800) | (new >> 1) & 0x7FF
struct.pack_into("<HH", env.full_text, r_offset, b_h, b_l)
elif reloc_type == "xtensa_l32r":
l32r = unpack_u24le(env.full_text, r_offset)
assert l32r & 0xF == 1 # RI16 encoded l32r
l32r_imm16 = l32r >> 8
l32r_imm16 = (l32r_imm16 + reloc >> 2) & 0xFFFF
l32r = l32r & 0xFF | l32r_imm16 << 8
pack_u24le(env.full_text, r_offset, l32r)
else:
assert 0, reloc_type
# Log information about relocation
if log_name is None:
if s_type == "STT_SECTION":
log_name = s.section.name
else:
log_name = s.name
log(LOG_LEVEL_3, " {:08x} {} -> {:08x}".format(r_offset, log_name, addr))
def do_relocation_data(env, text_addr, r):
s = r.sym
s_type = s.entry["st_info"]["type"]
r_offset = r["r_offset"] + text_addr
r_info_type = r["r_info_type"]
try:
# only for RELA sections
r_addend = r["r_addend"]
except KeyError:
r_addend = 0
if (
env.arch.name == "EM_386"
and r_info_type == R_386_32
or env.arch.name == "EM_X86_64"
and r_info_type == R_X86_64_64
or env.arch.name == "EM_ARM"
and r_info_type == R_ARM_ABS32
or env.arch.name == "EM_XTENSA"
and r_info_type == R_XTENSA_32
):
# Relocation in data.rel.ro to internal/external symbol
if env.arch.word_size == 4:
struct_type = "<I"
elif env.arch.word_size == 8:
struct_type = "<Q"
sec = s.section
assert r_offset % env.arch.word_size == 0
addr = sec.addr + s["st_value"] + r_addend
if s_type == "STT_SECTION":
log_name = sec.name
else:
log_name = s.name
log(LOG_LEVEL_3, " {:08x} -> {} {:08x}".format(r_offset, log_name, addr))
if env.arch.separate_rodata:
data = env.full_rodata
else:
data = env.full_text
(existing,) = struct.unpack_from(struct_type, data, r_offset)
if sec.name.startswith((".text", ".rodata", ".data.rel.ro", ".bss")):
struct.pack_into(struct_type, data, r_offset, existing + addr)
kind = sec.name
elif sec.name == ".external.mp_fun_table":
assert addr == 0
kind = s.mp_fun_table_offset
else:
assert 0, sec.name
if env.arch.separate_rodata:
base = ".rodata"
else:
base = ".text"
env.mpy_relocs.append((base, r_offset, kind))
else:
# Unknown/unsupported relocation
assert 0, r_info_type
def load_object_file(env, felf):
with open(felf, "rb") as f:
elf = elffile.ELFFile(f)
env.check_arch(elf["e_machine"])
# Get symbol table
symtab = list(elf.get_section_by_name(".symtab").iter_symbols())
# Load needed sections from ELF file
sections_shndx = {} # maps elf shndx to Section object
for idx, s in enumerate(elf.iter_sections()):
if s.header.sh_type in ("SHT_PROGBITS", "SHT_NOBITS"):
if s.data_size == 0:
# Ignore empty sections
pass
elif s.name.startswith((".literal", ".text", ".rodata", ".data.rel.ro", ".bss")):
sec = Section.from_elfsec(s, felf)
sections_shndx[idx] = sec
if s.name.startswith(".literal"):
env.literal_sections.append(sec)
else:
env.sections.append(sec)
elif s.name.startswith(".data"):
raise LinkError("{}: {} non-empty".format(felf, s.name))
else:
# Ignore section
pass
elif s.header.sh_type in ("SHT_REL", "SHT_RELA"):
shndx = s.header.sh_info
if shndx in sections_shndx:
sec = sections_shndx[shndx]
sec.reloc_name = s.name
sec.reloc = list(s.iter_relocations())
for r in sec.reloc:
r.sym = symtab[r["r_info_sym"]]
# Link symbols to their sections, and update known and unresolved symbols
for sym in symtab:
sym.filename = felf
shndx = sym.entry["st_shndx"]
if shndx in sections_shndx:
# Symbol with associated section
sym.section = sections_shndx[shndx]
if sym["st_info"]["bind"] == "STB_GLOBAL":
# Defined global symbol
if sym.name in env.known_syms and not sym.name.startswith(
"__x86.get_pc_thunk."
):
raise LinkError("duplicate symbol: {}".format(sym.name))
env.known_syms[sym.name] = sym
elif sym.entry["st_shndx"] == "SHN_UNDEF" and sym["st_info"]["bind"] == "STB_GLOBAL":
# Undefined global symbol, needs resolving
env.unresolved_syms.append(sym)
def link_objects(env, native_qstr_vals_len, native_qstr_objs_len):
# Build GOT information
if env.arch.name == "EM_XTENSA":
build_got_xtensa(env)
else:
build_got_generic(env)
# Creat GOT section
got_size = len(env.got_entries) * env.arch.word_size
env.got_section = Section("GOT", bytearray(got_size), env.arch.word_size)
if env.arch.name == "EM_XTENSA":
env.sections.insert(0, env.got_section)
else:
env.sections.append(env.got_section)
# Create optional literal section
if env.arch.name == "EM_XTENSA":
lit_size = len(env.lit_entries) * env.arch.word_size
env.lit_section = Section("LIT", bytearray(lit_size), env.arch.word_size)
env.sections.insert(1, env.lit_section)
# Create section to contain mp_native_qstr_val_table
env.qstr_val_section = Section(
".text.QSTR_VAL",
bytearray(native_qstr_vals_len * env.arch.qstr_entry_size),
env.arch.qstr_entry_size,
)
env.sections.append(env.qstr_val_section)
# Create section to contain mp_native_qstr_obj_table
env.qstr_obj_section = Section(
".text.QSTR_OBJ", bytearray(native_qstr_objs_len * env.arch.word_size), env.arch.word_size
)
env.sections.append(env.qstr_obj_section)
# Resolve unknown symbols
mp_fun_table_sec = Section(".external.mp_fun_table", b"", 0)
fun_table = {
key: 68 + idx
for idx, key in enumerate(
[
"mp_type_type",
"mp_type_str",
"mp_type_list",
"mp_type_dict",
"mp_type_fun_builtin_0",
"mp_type_fun_builtin_1",
"mp_type_fun_builtin_2",
"mp_type_fun_builtin_3",
"mp_type_fun_builtin_var",
"mp_stream_read_obj",
"mp_stream_readinto_obj",
"mp_stream_unbuffered_readline_obj",
"mp_stream_write_obj",
]
)
}
for sym in env.unresolved_syms:
assert sym["st_value"] == 0
if sym.name == "_GLOBAL_OFFSET_TABLE_":
pass
elif sym.name == "mp_fun_table":
sym.section = Section(".external", b"", 0)
elif sym.name == "mp_native_qstr_val_table":
sym.section = env.qstr_val_section
elif sym.name == "mp_native_qstr_obj_table":
sym.section = env.qstr_obj_section
elif sym.name in env.known_syms:
sym.resolved = env.known_syms[sym.name]
else:
if sym.name in fun_table:
sym.section = mp_fun_table_sec
sym.mp_fun_table_offset = fun_table[sym.name]
else:
raise LinkError("{}: undefined symbol: {}".format(sym.filename, sym.name))
# Align sections, assign their addresses, and create full_text
env.full_text = bytearray(env.arch.asm_jump(8)) # dummy, to be filled in later
env.full_rodata = bytearray(0)
env.full_bss = bytearray(0)
for sec in env.sections:
if env.arch.separate_rodata and sec.name.startswith((".rodata", ".data.rel.ro")):
data = env.full_rodata
elif sec.name.startswith(".bss"):
data = env.full_bss
else:
data = env.full_text
sec.addr = align_to(len(data), sec.alignment)
data.extend(b"\x00" * (sec.addr - len(data)))
data.extend(sec.data)
env.print_sections()
populate_got(env)
if env.arch.name == "EM_XTENSA":
populate_lit(env)
# Fill in relocations
for sec in env.sections:
if not sec.reloc:
continue
log(
LOG_LEVEL_3,
"{}: {} relocations via {}:".format(sec.filename, sec.name, sec.reloc_name),
)
for r in sec.reloc:
if sec.name.startswith((".text", ".rodata")):
do_relocation_text(env, sec.addr, r)
elif sec.name.startswith(".data.rel.ro"):
do_relocation_data(env, sec.addr, r)
else:
assert 0, sec.name
################################################################################
# .mpy output
class MPYOutput:
def open(self, fname):
self.f = open(fname, "wb")
self.prev_base = -1
self.prev_offset = -1
def close(self):
self.f.close()
def write_bytes(self, buf):
self.f.write(buf)
def write_uint(self, val):
b = bytearray()
b.insert(0, val & 0x7F)
val >>= 7
while val:
b.insert(0, 0x80 | (val & 0x7F))
val >>= 7
self.write_bytes(b)
def write_qstr(self, s):
if s in qstrutil.static_qstr_list:
self.write_bytes(bytes([0, qstrutil.static_qstr_list.index(s) + 1]))
else:
s = bytes(s, "ascii")
self.write_uint(len(s) << 1)
self.write_bytes(s)
def write_reloc(self, base, offset, dest, n):
need_offset = not (base == self.prev_base and offset == self.prev_offset + 1)
self.prev_offset = offset + n - 1
if dest <= 2:
dest = (dest << 1) | (n > 1)
else:
assert 6 <= dest <= 127
assert n == 1
dest = dest << 1 | need_offset
assert 0 <= dest <= 0xFE, dest
self.write_bytes(bytes([dest]))
if need_offset:
if base == ".text":
base = 0
elif base == ".rodata":
base = 1
self.write_uint(offset << 1 | base)
if n > 1:
self.write_uint(n)
def build_mpy(env, entry_offset, fmpy, native_qstr_vals, native_qstr_objs):
# Write jump instruction to start of text
jump = env.arch.asm_jump(entry_offset)
env.full_text[: len(jump)] = jump
log(LOG_LEVEL_1, "arch: {}".format(env.arch.name))
log(LOG_LEVEL_1, "text size: {}".format(len(env.full_text)))
if len(env.full_rodata):
log(LOG_LEVEL_1, "rodata size: {}".format(len(env.full_rodata)))
log(LOG_LEVEL_1, "bss size: {}".format(len(env.full_bss)))
log(LOG_LEVEL_1, "GOT entries: {}".format(len(env.got_entries)))
# xxd(env.full_text)
out = MPYOutput()
out.open(fmpy)
# MPY: header
out.write_bytes(
bytearray(
[
ord("C"),
MPY_VERSION,
env.arch.mpy_feature,
MP_SMALL_INT_BITS,
QSTR_WINDOW_SIZE,
]
)
)
# MPY: kind/len
out.write_uint(len(env.full_text) << 2 | (MP_CODE_NATIVE_VIPER - MP_CODE_BYTECODE))
# MPY: machine code
out.write_bytes(env.full_text)
# MPY: n_qstr_link (assumes little endian)
out.write_uint(len(native_qstr_vals) + len(native_qstr_objs))
for q in range(len(native_qstr_vals)):
off = env.qstr_val_section.addr + q * env.arch.qstr_entry_size
out.write_uint(off << 2)
out.write_qstr(native_qstr_vals[q])
for q in range(len(native_qstr_objs)):
off = env.qstr_obj_section.addr + q * env.arch.word_size
out.write_uint(off << 2 | 3)
out.write_qstr(native_qstr_objs[q])
# MPY: scope_flags
scope_flags = MP_SCOPE_FLAG_VIPERRELOC
if len(env.full_rodata):
scope_flags |= MP_SCOPE_FLAG_VIPERRODATA
if len(env.full_bss):
scope_flags |= MP_SCOPE_FLAG_VIPERBSS
out.write_uint(scope_flags)
# MPY: n_obj
out.write_uint(0)
# MPY: n_raw_code
out.write_uint(0)
# MPY: rodata and/or bss
if len(env.full_rodata):
rodata_const_table_idx = 1
out.write_uint(len(env.full_rodata))
out.write_bytes(env.full_rodata)
if len(env.full_bss):
bss_const_table_idx = bool(env.full_rodata) + 1
out.write_uint(len(env.full_bss))
# MPY: relocation information
prev_kind = None
for base, addr, kind in env.mpy_relocs:
if isinstance(kind, str) and kind.startswith(".text"):
kind = 0
elif kind in (".rodata", ".data.rel.ro"):
if env.arch.separate_rodata:
kind = rodata_const_table_idx
else:
kind = 0
elif isinstance(kind, str) and kind.startswith(".bss"):
kind = bss_const_table_idx
elif kind == "mp_fun_table":
kind = 6
else:
kind = 7 + kind
assert addr % env.arch.word_size == 0, addr
offset = addr // env.arch.word_size
if kind == prev_kind and base == prev_base and offset == prev_offset + 1:
prev_n += 1
prev_offset += 1
else:
if prev_kind is not None:
out.write_reloc(prev_base, prev_offset - prev_n + 1, prev_kind, prev_n)
prev_kind = kind
prev_base = base
prev_offset = offset
prev_n = 1
if prev_kind is not None:
out.write_reloc(prev_base, prev_offset - prev_n + 1, prev_kind, prev_n)
# MPY: sentinel for end of relocations
out.write_bytes(b"\xff")
out.close()
################################################################################
# main
def do_preprocess(args):
if args.output is None:
assert args.files[0].endswith(".c")
args.output = args.files[0][:-1] + "config.h"
static_qstrs, qstr_vals, qstr_objs = extract_qstrs(args.files)
with open(args.output, "w") as f:
print(
"#include <stdint.h>\n"
"typedef uintptr_t mp_uint_t;\n"
"typedef intptr_t mp_int_t;\n"
"typedef uintptr_t mp_off_t;",
file=f,
)
for i, q in enumerate(static_qstrs):
print("#define %s (%u)" % (q, i + 1), file=f)
for i, q in enumerate(sorted(qstr_vals)):
print("#define %s (mp_native_qstr_val_table[%d])" % (q, i), file=f)
for i, q in enumerate(sorted(qstr_objs)):
print(
"#define MP_OBJ_NEW_QSTR_%s ((mp_obj_t)mp_native_qstr_obj_table[%d])" % (q, i),
file=f,
)
if args.arch == "xtensawin":
qstr_type = "uint32_t" # esp32 can only read 32-bit values from IRAM
else:
qstr_type = "uint16_t"
print("extern const {} mp_native_qstr_val_table[];".format(qstr_type), file=f)
print("extern const mp_uint_t mp_native_qstr_obj_table[];", file=f)
def do_link(args):
if args.output is None:
assert args.files[0].endswith(".o")
args.output = args.files[0][:-1] + "mpy"
native_qstr_vals = []
native_qstr_objs = []
if args.qstrs is not None:
with open(args.qstrs) as f:
for l in f:
m = re.match(r"#define MP_QSTR_([A-Za-z0-9_]*) \(mp_native_", l)
if m:
native_qstr_vals.append(m.group(1))
else:
m = re.match(r"#define MP_OBJ_NEW_QSTR_MP_QSTR_([A-Za-z0-9_]*)", l)
if m:
native_qstr_objs.append(m.group(1))
log(LOG_LEVEL_2, "qstr vals: " + ", ".join(native_qstr_vals))
log(LOG_LEVEL_2, "qstr objs: " + ", ".join(native_qstr_objs))
env = LinkEnv(args.arch)
try:
for file in args.files:
load_object_file(env, file)
link_objects(env, len(native_qstr_vals), len(native_qstr_objs))
build_mpy(env, env.find_addr("mpy_init"), args.output, native_qstr_vals, native_qstr_objs)
except LinkError as er:
print("LinkError:", er.args[0])
sys.exit(1)
def main():
import argparse
cmd_parser = argparse.ArgumentParser(description="Run scripts on the pyboard.")
cmd_parser.add_argument(
"--verbose", "-v", action="count", default=1, help="increase verbosity"
)
cmd_parser.add_argument("--arch", default="x64", help="architecture")
cmd_parser.add_argument("--preprocess", action="store_true", help="preprocess source files")
cmd_parser.add_argument("--qstrs", default=None, help="file defining additional qstrs")
cmd_parser.add_argument(
"--output", "-o", default=None, help="output .mpy file (default to input with .o->.mpy)"
)
cmd_parser.add_argument("files", nargs="+", help="input files")
args = cmd_parser.parse_args()
global log_level
log_level = args.verbose
if args.preprocess:
do_preprocess(args)
else:
do_link(args)
if __name__ == "__main__":
main()
| [
"makeqstrdata.qstr_escape",
"re.search",
"argparse.ArgumentParser",
"makeqstrdata.static_qstr_list.index",
"re.match",
"elftools.elf.elffile.ELFFile",
"struct.pack",
"os.path.dirname",
"struct.pack_into",
"sys.exit",
"struct.unpack_from"
] | [((2472, 2506), 'struct.pack', 'struct.pack', (['"""<BI"""', '(233)', '(entry - 5)'], {}), "('<BI', 233, entry - 5)\n", (2483, 2506), False, 'import sys, os, struct, re\n'), ((2835, 2861), 'struct.pack', 'struct.pack', (['"""<HH"""', 'b0', 'b1'], {}), "('<HH', b0, b1)\n", (2846, 2861), False, 'import sys, os, struct, re\n'), ((2966, 3013), 'struct.pack', 'struct.pack', (['"""<BH"""', '(jump_op & 255)', '(jump_op >> 8)'], {}), "('<BH', jump_op & 255, jump_op >> 8)\n", (2977, 3013), False, 'import sys, os, struct, re\n'), ((35711, 35777), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Run scripts on the pyboard."""'}), "(description='Run scripts on the pyboard.')\n", (35734, 35777), False, 'import argparse\n'), ((1319, 1344), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (1334, 1344), False, 'import sys, os, struct, re\n'), ((18522, 18571), 'struct.unpack_from', 'struct.unpack_from', (['"""<I"""', 'env.full_text', 'r_offset'], {}), "('<I', env.full_text, r_offset)\n", (18540, 18571), False, 'import sys, os, struct, re\n'), ((18580, 18658), 'struct.pack_into', 'struct.pack_into', (['"""<I"""', 'env.full_text', 'r_offset', '(existing + reloc & 4294967295)'], {}), "('<I', env.full_text, r_offset, existing + reloc & 4294967295)\n", (18596, 18658), False, 'import sys, os, struct, re\n'), ((21008, 21055), 'struct.unpack_from', 'struct.unpack_from', (['struct_type', 'data', 'r_offset'], {}), '(struct_type, data, r_offset)\n', (21026, 21055), False, 'import sys, os, struct, re\n'), ((21727, 21745), 'elftools.elf.elffile.ELFFile', 'elffile.ELFFile', (['f'], {}), '(f)\n', (21742, 21745), False, 'from elftools.elf import elffile\n'), ((6753, 6776), 'makeqstrdata.qstr_escape', 'qstrutil.qstr_escape', (['q'], {}), '(q)\n', (6773, 6776), True, 'import makeqstrdata as qstrutil\n'), ((18714, 18764), 'struct.unpack_from', 'struct.unpack_from', (['"""<HH"""', 'env.full_text', 'r_offset'], {}), "('<HH', env.full_text, r_offset)\n", (18732, 18764), False, 'import sys, os, struct, re\n'), ((19049, 19107), 'struct.pack_into', 'struct.pack_into', (['"""<HH"""', 'env.full_text', 'r_offset', 'b_h', 'b_l'], {}), "('<HH', env.full_text, r_offset, b_h, b_l)\n", (19065, 19107), False, 'import sys, os, struct, re\n'), ((21146, 21208), 'struct.pack_into', 'struct.pack_into', (['struct_type', 'data', 'r_offset', '(existing + addr)'], {}), '(struct_type, data, r_offset, existing + addr)\n', (21162, 21208), False, 'import sys, os, struct, re\n'), ((35647, 35658), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (35655, 35658), False, 'import sys, os, struct, re\n'), ((10527, 10576), 'struct.unpack_from', 'struct.unpack_from', (['"""<I"""', 'sec.data', "r['r_offset']"], {}), "('<I', sec.data, r['r_offset'])\n", (10545, 10576), False, 'import sys, os, struct, re\n'), ((34823, 34883), 're.match', 're.match', (['"""#define MP_QSTR_([A-Za-z0-9_]*) \\\\(mp_native_"""', 'l'], {}), "('#define MP_QSTR_([A-Za-z0-9_]*) \\\\(mp_native_', l)\n", (34831, 34883), False, 'import sys, os, struct, re\n'), ((6199, 6262), 're.search', 're.search', (['"""MP_OBJ_NEW_QSTR\\\\((MP_QSTR_[A-Za-z0-9_]*)\\\\)"""', 'line'], {}), "('MP_OBJ_NEW_QSTR\\\\((MP_QSTR_[A-Za-z0-9_]*)\\\\)', line)\n", (6208, 6262), False, 'import sys, os, struct, re\n'), ((11542, 11579), 'struct.unpack_from', 'struct.unpack_from', (['"""<I"""', 'sec.data', 'i'], {}), "('<I', sec.data, i)\n", (11560, 11579), False, 'import sys, os, struct, re\n'), ((35008, 35070), 're.match', 're.match', (['"""#define MP_OBJ_NEW_QSTR_MP_QSTR_([A-Za-z0-9_]*)"""', 'l'], {}), "('#define MP_OBJ_NEW_QSTR_MP_QSTR_([A-Za-z0-9_]*)', l)\n", (35016, 35070), False, 'import sys, os, struct, re\n'), ((6387, 6427), 're.search', 're.search', (['"""MP_QSTR_[A-Za-z0-9_]*"""', 'line'], {}), "('MP_QSTR_[A-Za-z0-9_]*', line)\n", (6396, 6427), False, 'import sys, os, struct, re\n'), ((28890, 28924), 'makeqstrdata.static_qstr_list.index', 'qstrutil.static_qstr_list.index', (['s'], {}), '(s)\n', (28921, 28924), True, 'import makeqstrdata as qstrutil\n')] |
from PyQt4 import QtGui
from ui_mant_libros_new import NewLibrosWindow
from ui_mant_libros_edit import EditLibrosWindow
from ui_mant_libros_id_edit import GetIdEditWindow
# Debug only
import inspect
class MenuLibros(QtGui.QWidget):
"""
Ventana-menu para editar Libros
"""
def __init__(self):
super(MenuLibros, self).__init__()
self.createButtons()
self.setWindowTitle('Mantenimiento Libros')
self.setWindowIcon(QtGui.QIcon('images/user-plus.png'))
self.setWindowTitle("Mantenimiento Libros")
self.setGeometry(650, 300, 150, 100)
def createButtons(self):
btn_new_libros = QtGui.QPushButton('Nuevo')
btn_new_libros.clicked.connect(self.open_new_libros_window)
btn_edit_libros = QtGui.QPushButton('Editar')
btn_edit_libros.clicked.connect(self.open_edit_libros_window)
btn_list_libros = QtGui.QPushButton('Listar')
btn_list_libros.clicked.connect(self.close)
btn_delete_libros = QtGui.QPushButton('Eliminar')
btn_delete_libros.clicked.connect(self.close)
hbox = QtGui.QHBoxLayout()
hbox.addWidget(btn_new_libros)
hbox.addWidget(btn_edit_libros)
hbox.addWidget(btn_list_libros)
hbox.addWidget(btn_delete_libros)
vbox = QtGui.QVBoxLayout()
vbox.addLayout(hbox)
self.setLayout(vbox)
def open_new_libros_window(self):
self.new_libros_view = NewLibrosWindow()
self.new_libros_view.show()
print(inspect.stack()[0][3])
self.close()
def open_edit_libros_window(self):
self.edit_libros_view = GetIdEditWindow()
self.edit_libros_view.show()
print(inspect.stack()[0][3])
self.close()
def open_list_reserva_window(self):
# self.new_reserva_view = NewReserva()
# self.new_reserva_view.show()
print(inspect.stack()[0][3])
self.close()
def open_delete_reserva_window(self):
# self.new_reserva_view = NewReserva()
# self.new_reserva_view.show()
print(inspect.stack()[0][3])
self.close()
if __name__ == '__main__':
import sys
app = QtGui.QApplication(sys.argv)
mainWin = MenuLibros()
mainWin.show()
sys.exit(app.exec_())
| [
"ui_mant_libros_new.NewLibrosWindow",
"PyQt4.QtGui.QApplication",
"inspect.stack",
"PyQt4.QtGui.QPushButton",
"ui_mant_libros_id_edit.GetIdEditWindow",
"PyQt4.QtGui.QIcon",
"PyQt4.QtGui.QVBoxLayout",
"PyQt4.QtGui.QHBoxLayout"
] | [((2188, 2216), 'PyQt4.QtGui.QApplication', 'QtGui.QApplication', (['sys.argv'], {}), '(sys.argv)\n', (2206, 2216), False, 'from PyQt4 import QtGui\n'), ((658, 684), 'PyQt4.QtGui.QPushButton', 'QtGui.QPushButton', (['"""Nuevo"""'], {}), "('Nuevo')\n", (675, 684), False, 'from PyQt4 import QtGui\n'), ((780, 807), 'PyQt4.QtGui.QPushButton', 'QtGui.QPushButton', (['"""Editar"""'], {}), "('Editar')\n", (797, 807), False, 'from PyQt4 import QtGui\n'), ((905, 932), 'PyQt4.QtGui.QPushButton', 'QtGui.QPushButton', (['"""Listar"""'], {}), "('Listar')\n", (922, 932), False, 'from PyQt4 import QtGui\n'), ((1014, 1043), 'PyQt4.QtGui.QPushButton', 'QtGui.QPushButton', (['"""Eliminar"""'], {}), "('Eliminar')\n", (1031, 1043), False, 'from PyQt4 import QtGui\n'), ((1115, 1134), 'PyQt4.QtGui.QHBoxLayout', 'QtGui.QHBoxLayout', ([], {}), '()\n', (1132, 1134), False, 'from PyQt4 import QtGui\n'), ((1311, 1330), 'PyQt4.QtGui.QVBoxLayout', 'QtGui.QVBoxLayout', ([], {}), '()\n', (1328, 1330), False, 'from PyQt4 import QtGui\n'), ((1460, 1477), 'ui_mant_libros_new.NewLibrosWindow', 'NewLibrosWindow', ([], {}), '()\n', (1475, 1477), False, 'from ui_mant_libros_new import NewLibrosWindow\n'), ((1645, 1662), 'ui_mant_libros_id_edit.GetIdEditWindow', 'GetIdEditWindow', ([], {}), '()\n', (1660, 1662), False, 'from ui_mant_libros_id_edit import GetIdEditWindow\n'), ((469, 504), 'PyQt4.QtGui.QIcon', 'QtGui.QIcon', (['"""images/user-plus.png"""'], {}), "('images/user-plus.png')\n", (480, 504), False, 'from PyQt4 import QtGui\n'), ((1528, 1543), 'inspect.stack', 'inspect.stack', ([], {}), '()\n', (1541, 1543), False, 'import inspect\n'), ((1714, 1729), 'inspect.stack', 'inspect.stack', ([], {}), '()\n', (1727, 1729), False, 'import inspect\n'), ((1900, 1915), 'inspect.stack', 'inspect.stack', ([], {}), '()\n', (1913, 1915), False, 'import inspect\n'), ((2088, 2103), 'inspect.stack', 'inspect.stack', ([], {}), '()\n', (2101, 2103), False, 'import inspect\n')] |
import logging
import re
from anime_downloader.extractors.base_extractor import BaseExtractor
from anime_downloader.sites import helpers
logger = logging.getLogger(__name__)
class VidStream(BaseExtractor):
def _get_data(self):
QUALITIES = {
"360":[],
"480":[],
"720":[],
"1080":[],
}
url = self.url.replace('https:////','https://')
soup = helpers.get(url).text
regex = r'https://vidstreaming\.io/download\?[^"]*'
download = re.search(regex,soup).group()
soup = helpers.soupify(helpers.get(download))
links = soup.select('div.mirror_link')[0].select('div.dowload > a')
for a in QUALITIES:
for b in links:
if a in b.text:
QUALITIES[a].append(b.get('href'))
stream_url = QUALITIES[self.quality[:-1]][0] if QUALITIES != {"360":[],"480":[],"720":[],"1080":[],} else links[0].get('href') #In case nothing is found
return {
'stream_url': stream_url,
'referer': download
}
| [
"logging.getLogger",
"anime_downloader.sites.helpers.get",
"re.search"
] | [((147, 174), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (164, 174), False, 'import logging\n'), ((410, 426), 'anime_downloader.sites.helpers.get', 'helpers.get', (['url'], {}), '(url)\n', (421, 426), False, 'from anime_downloader.sites import helpers\n'), ((585, 606), 'anime_downloader.sites.helpers.get', 'helpers.get', (['download'], {}), '(download)\n', (596, 606), False, 'from anime_downloader.sites import helpers\n'), ((511, 533), 're.search', 're.search', (['regex', 'soup'], {}), '(regex, soup)\n', (520, 533), False, 'import re\n')] |
# Copyright © 2020 Province of British Columbia
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Test Suite to ensure the PPR Search Query schema is valid.
"""
import copy
from registry_schemas import validate
from registry_schemas.example_data.ppr import SEARCH_QUERY
def test_valid_search_query_ind_debtor():
"""Assert that the schema is performing as expected for a search by individual debtor."""
query = copy.deepcopy(SEARCH_QUERY)
query['type'] = 'INDIVIDUAL_DEBTOR'
del query['criteria']['debtorName']['business']
del query['criteria']['value']
del query['clientReferenceId']
del query['startDateTime']
del query['endDateTime']
is_valid, errors = validate(query, 'searchQuery', 'ppr')
if errors:
for err in errors:
print(err.message)
print(errors)
assert is_valid
def test_valid_search_query_bus_debtor():
"""Assert that the schema is performing as expected for a search by business debtor."""
query = copy.deepcopy(SEARCH_QUERY)
query['type'] = 'BUSINESS_DEBTOR'
del query['criteria']['debtorName']['first']
del query['criteria']['debtorName']['second']
del query['criteria']['debtorName']['last']
del query['criteria']['value']
is_valid, errors = validate(query, 'searchQuery', 'ppr')
if errors:
for err in errors:
print(err.message)
print(errors)
assert is_valid
def test_valid_search_query_airdot():
"""Assert that the schema is performing as expected for a search by aircraft DOT."""
query = copy.deepcopy(SEARCH_QUERY)
query['type'] = 'AIRCRAFT_DOT'
del query['criteria']['debtorName']
query['criteria']['value'] = 'CFYXW'
is_valid, errors = validate(query, 'searchQuery', 'ppr')
if errors:
for err in errors:
print(err.message)
print(errors)
assert is_valid
def test_valid_search_query_regnum():
"""Assert that the schema is performing as expected for a search by registration number."""
query = copy.deepcopy(SEARCH_QUERY)
query['type'] = 'REGISTRATION_NUMBER'
del query['criteria']['debtorName']
query['criteria']['value'] = '023001B'
is_valid, errors = validate(query, 'searchQuery', 'ppr')
if errors:
for err in errors:
print(err.message)
print(errors)
assert is_valid
def test_valid_search_query_mhrnum():
"""Assert that the schema is performing as expected for a search by MHR number."""
query = copy.deepcopy(SEARCH_QUERY)
query['type'] = 'MHR_NUMBER'
del query['criteria']['debtorName']
query['criteria']['value'] = '21324'
is_valid, errors = validate(query, 'searchQuery', 'ppr')
if errors:
for err in errors:
print(err.message)
print(errors)
assert is_valid
def test_valid_search_query_serialnum():
"""Assert that the schema is performing as expected for a search by serial number."""
query = copy.deepcopy(SEARCH_QUERY)
query['type'] = 'SERIAL_NUMBER'
del query['criteria']['debtorName']
query['criteria']['value'] = 'KM8J3CA46JU622994'
is_valid, errors = validate(query, 'searchQuery', 'ppr')
if errors:
for err in errors:
print(err.message)
print(errors)
assert is_valid
def test_invalid_search_query_missing_type():
"""Assert that an invalid search query fails - type is missing."""
query = copy.deepcopy(SEARCH_QUERY)
del query['type']
del query['criteria']['debtorName']['business']
del query['criteria']['value']
is_valid, errors = validate(query, 'searchQuery', 'ppr')
if errors:
for err in errors:
print(err.message)
print(errors)
assert not is_valid
def test_invalid_search_query_missing_criteria():
"""Assert that an invalid search query fails - criteria is missing."""
query = copy.deepcopy(SEARCH_QUERY)
del query['criteria']
is_valid, errors = validate(query, 'searchQuery', 'ppr')
if errors:
for err in errors:
print(err.message)
print(errors)
assert not is_valid
def test_invalid_search_query_type():
"""Assert that an invalid search query fails - type is invalid."""
query = copy.deepcopy(SEARCH_QUERY)
query['type'] = 'XXXXXXXX'
del query['criteria']['debtorName']['business']
del query['criteria']['value']
is_valid, errors = validate(query, 'searchQuery', 'ppr')
if errors:
for err in errors:
print(err.message)
print(errors)
assert not is_valid
def test_invalid_search_query_criteria():
"""Assert that an invalid search query fails - criteria is invalid."""
query = copy.deepcopy(SEARCH_QUERY)
del query['criteria']['debtorName']['business']
is_valid, errors = validate(query, 'searchQuery', 'ppr')
if errors:
for err in errors:
print(err.message)
print(errors)
assert not is_valid
def test_invalid_search_query_busname():
"""Assert that an invalid search query fails - business name is too short."""
query = copy.deepcopy(SEARCH_QUERY)
del query['criteria']['debtorName']['first']
del query['criteria']['debtorName']['second']
del query['criteria']['debtorName']['last']
del query['criteria']['value']
query['criteria']['debtorName']['business'] = 'XXXX'
is_valid, errors = validate(query, 'searchQuery', 'ppr')
if errors:
for err in errors:
print(err.message)
print(errors)
assert not is_valid
def test_invalid_search_query_value():
"""Assert that an invalid search query fails - value is too long."""
query = copy.deepcopy(SEARCH_QUERY)
del query['criteria']['debtorName']
query['criteria']['value'] = 'XxxxxxxxxxxxxxxxxxxxXxxxxxxxxxxxxxxxxxxxXxxxxxxxxxx'
is_valid, errors = validate(query, 'searchQuery', 'ppr')
if errors:
for err in errors:
print(err.message)
print(errors)
assert not is_valid
def test_invalid_search_query_debtor():
"""Assert that an invalid search query fails - debtor name is invalid."""
query = copy.deepcopy(SEARCH_QUERY)
del query['criteria']['value']
is_valid, errors = validate(query, 'searchQuery', 'ppr')
if errors:
for err in errors:
print(err.message)
print(errors)
assert not is_valid
def test_invalid_search_query_firstname():
"""Assert that an invalid search query fails - debtor first name is too long."""
query = copy.deepcopy(SEARCH_QUERY)
del query['criteria']['value']
del query['criteria']['debtorName']['business']
query['criteria']['debtorName']['first'] = 'XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX'
is_valid, errors = validate(query, 'searchQuery', 'ppr')
if errors:
for err in errors:
print(err.message)
print(errors)
assert not is_valid
def test_invalid_search_query_secondname():
"""Assert that an invalid search query fails - debtor second name is too long."""
query = copy.deepcopy(SEARCH_QUERY)
del query['criteria']['value']
del query['criteria']['debtorName']['business']
query['criteria']['debtorName']['second'] = 'XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX'
is_valid, errors = validate(query, 'searchQuery', 'ppr')
if errors:
for err in errors:
print(err.message)
print(errors)
assert not is_valid
def test_invalid_search_query_lastname():
"""Assert that an invalid search query fails - debtor last name is too long."""
query = copy.deepcopy(SEARCH_QUERY)
del query['criteria']['value']
del query['criteria']['debtorName']['business']
query['criteria']['debtorName']['last'] = 'XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX'
is_valid, errors = validate(query, 'searchQuery', 'ppr')
if errors:
for err in errors:
print(err.message)
print(errors)
assert not is_valid
def test_invalid_search_query_clientref():
"""Assert that an invalid search query fails - client reference id is too long."""
query = copy.deepcopy(SEARCH_QUERY)
del query['criteria']['value']
del query['criteria']['debtorName']['business']
query['clientReferenceId'] = 'XxxxxxxxxxXxxxxxxxxxX'
is_valid, errors = validate(query, 'searchQuery', 'ppr')
if errors:
for err in errors:
print(err.message)
print(errors)
assert not is_valid
def test_invalid_search_query_startts():
"""Assert that an invalid search query fails - start date time format is invalid."""
query = copy.deepcopy(SEARCH_QUERY)
del query['criteria']['value']
del query['criteria']['debtorName']['business']
query['startDateTime'] = 'Xxxxxxxxxx'
is_valid, errors = validate(query, 'searchQuery', 'ppr')
if errors:
for err in errors:
print(err.message)
print(errors)
assert not is_valid
def test_invalid_search_query_endts():
"""Assert that an invalid search query fails - end date time format is invalid."""
query = copy.deepcopy(SEARCH_QUERY)
del query['criteria']['value']
del query['criteria']['debtorName']['business']
query['endDateTime'] = 'Xxxxxxxxxx'
is_valid, errors = validate(query, 'searchQuery', 'ppr')
if errors:
for err in errors:
print(err.message)
print(errors)
assert not is_valid
| [
"registry_schemas.validate",
"copy.deepcopy"
] | [((921, 948), 'copy.deepcopy', 'copy.deepcopy', (['SEARCH_QUERY'], {}), '(SEARCH_QUERY)\n', (934, 948), False, 'import copy\n'), ((1195, 1232), 'registry_schemas.validate', 'validate', (['query', '"""searchQuery"""', '"""ppr"""'], {}), "(query, 'searchQuery', 'ppr')\n", (1203, 1232), False, 'from registry_schemas import validate\n'), ((1495, 1522), 'copy.deepcopy', 'copy.deepcopy', (['SEARCH_QUERY'], {}), '(SEARCH_QUERY)\n', (1508, 1522), False, 'import copy\n'), ((1767, 1804), 'registry_schemas.validate', 'validate', (['query', '"""searchQuery"""', '"""ppr"""'], {}), "(query, 'searchQuery', 'ppr')\n", (1775, 1804), False, 'from registry_schemas import validate\n'), ((2060, 2087), 'copy.deepcopy', 'copy.deepcopy', (['SEARCH_QUERY'], {}), '(SEARCH_QUERY)\n', (2073, 2087), False, 'import copy\n'), ((2228, 2265), 'registry_schemas.validate', 'validate', (['query', '"""searchQuery"""', '"""ppr"""'], {}), "(query, 'searchQuery', 'ppr')\n", (2236, 2265), False, 'from registry_schemas import validate\n'), ((2528, 2555), 'copy.deepcopy', 'copy.deepcopy', (['SEARCH_QUERY'], {}), '(SEARCH_QUERY)\n', (2541, 2555), False, 'import copy\n'), ((2705, 2742), 'registry_schemas.validate', 'validate', (['query', '"""searchQuery"""', '"""ppr"""'], {}), "(query, 'searchQuery', 'ppr')\n", (2713, 2742), False, 'from registry_schemas import validate\n'), ((2996, 3023), 'copy.deepcopy', 'copy.deepcopy', (['SEARCH_QUERY'], {}), '(SEARCH_QUERY)\n', (3009, 3023), False, 'import copy\n'), ((3162, 3199), 'registry_schemas.validate', 'validate', (['query', '"""searchQuery"""', '"""ppr"""'], {}), "(query, 'searchQuery', 'ppr')\n", (3170, 3199), False, 'from registry_schemas import validate\n'), ((3459, 3486), 'copy.deepcopy', 'copy.deepcopy', (['SEARCH_QUERY'], {}), '(SEARCH_QUERY)\n', (3472, 3486), False, 'import copy\n'), ((3640, 3677), 'registry_schemas.validate', 'validate', (['query', '"""searchQuery"""', '"""ppr"""'], {}), "(query, 'searchQuery', 'ppr')\n", (3648, 3677), False, 'from registry_schemas import validate\n'), ((3923, 3950), 'copy.deepcopy', 'copy.deepcopy', (['SEARCH_QUERY'], {}), '(SEARCH_QUERY)\n', (3936, 3950), False, 'import copy\n'), ((4084, 4121), 'registry_schemas.validate', 'validate', (['query', '"""searchQuery"""', '"""ppr"""'], {}), "(query, 'searchQuery', 'ppr')\n", (4092, 4121), False, 'from registry_schemas import validate\n'), ((4378, 4405), 'copy.deepcopy', 'copy.deepcopy', (['SEARCH_QUERY'], {}), '(SEARCH_QUERY)\n', (4391, 4405), False, 'import copy\n'), ((4456, 4493), 'registry_schemas.validate', 'validate', (['query', '"""searchQuery"""', '"""ppr"""'], {}), "(query, 'searchQuery', 'ppr')\n", (4464, 4493), False, 'from registry_schemas import validate\n'), ((4734, 4761), 'copy.deepcopy', 'copy.deepcopy', (['SEARCH_QUERY'], {}), '(SEARCH_QUERY)\n', (4747, 4761), False, 'import copy\n'), ((4904, 4941), 'registry_schemas.validate', 'validate', (['query', '"""searchQuery"""', '"""ppr"""'], {}), "(query, 'searchQuery', 'ppr')\n", (4912, 4941), False, 'from registry_schemas import validate\n'), ((5190, 5217), 'copy.deepcopy', 'copy.deepcopy', (['SEARCH_QUERY'], {}), '(SEARCH_QUERY)\n', (5203, 5217), False, 'import copy\n'), ((5294, 5331), 'registry_schemas.validate', 'validate', (['query', '"""searchQuery"""', '"""ppr"""'], {}), "(query, 'searchQuery', 'ppr')\n", (5302, 5331), False, 'from registry_schemas import validate\n'), ((5586, 5613), 'copy.deepcopy', 'copy.deepcopy', (['SEARCH_QUERY'], {}), '(SEARCH_QUERY)\n', (5599, 5613), False, 'import copy\n'), ((5877, 5914), 'registry_schemas.validate', 'validate', (['query', '"""searchQuery"""', '"""ppr"""'], {}), "(query, 'searchQuery', 'ppr')\n", (5885, 5914), False, 'from registry_schemas import validate\n'), ((6158, 6185), 'copy.deepcopy', 'copy.deepcopy', (['SEARCH_QUERY'], {}), '(SEARCH_QUERY)\n', (6171, 6185), False, 'import copy\n'), ((6337, 6374), 'registry_schemas.validate', 'validate', (['query', '"""searchQuery"""', '"""ppr"""'], {}), "(query, 'searchQuery', 'ppr')\n", (6345, 6374), False, 'from registry_schemas import validate\n'), ((6624, 6651), 'copy.deepcopy', 'copy.deepcopy', (['SEARCH_QUERY'], {}), '(SEARCH_QUERY)\n', (6637, 6651), False, 'import copy\n'), ((6711, 6748), 'registry_schemas.validate', 'validate', (['query', '"""searchQuery"""', '"""ppr"""'], {}), "(query, 'searchQuery', 'ppr')\n", (6719, 6748), False, 'from registry_schemas import validate\n'), ((7008, 7035), 'copy.deepcopy', 'copy.deepcopy', (['SEARCH_QUERY'], {}), '(SEARCH_QUERY)\n', (7021, 7035), False, 'import copy\n'), ((7229, 7266), 'registry_schemas.validate', 'validate', (['query', '"""searchQuery"""', '"""ppr"""'], {}), "(query, 'searchQuery', 'ppr')\n", (7237, 7266), False, 'from registry_schemas import validate\n'), ((7528, 7555), 'copy.deepcopy', 'copy.deepcopy', (['SEARCH_QUERY'], {}), '(SEARCH_QUERY)\n', (7541, 7555), False, 'import copy\n'), ((7750, 7787), 'registry_schemas.validate', 'validate', (['query', '"""searchQuery"""', '"""ppr"""'], {}), "(query, 'searchQuery', 'ppr')\n", (7758, 7787), False, 'from registry_schemas import validate\n'), ((8046, 8073), 'copy.deepcopy', 'copy.deepcopy', (['SEARCH_QUERY'], {}), '(SEARCH_QUERY)\n', (8059, 8073), False, 'import copy\n'), ((8266, 8303), 'registry_schemas.validate', 'validate', (['query', '"""searchQuery"""', '"""ppr"""'], {}), "(query, 'searchQuery', 'ppr')\n", (8274, 8303), False, 'from registry_schemas import validate\n'), ((8565, 8592), 'copy.deepcopy', 'copy.deepcopy', (['SEARCH_QUERY'], {}), '(SEARCH_QUERY)\n', (8578, 8592), False, 'import copy\n'), ((8761, 8798), 'registry_schemas.validate', 'validate', (['query', '"""searchQuery"""', '"""ppr"""'], {}), "(query, 'searchQuery', 'ppr')\n", (8769, 8798), False, 'from registry_schemas import validate\n'), ((9060, 9087), 'copy.deepcopy', 'copy.deepcopy', (['SEARCH_QUERY'], {}), '(SEARCH_QUERY)\n', (9073, 9087), False, 'import copy\n'), ((9241, 9278), 'registry_schemas.validate', 'validate', (['query', '"""searchQuery"""', '"""ppr"""'], {}), "(query, 'searchQuery', 'ppr')\n", (9249, 9278), False, 'from registry_schemas import validate\n'), ((9536, 9563), 'copy.deepcopy', 'copy.deepcopy', (['SEARCH_QUERY'], {}), '(SEARCH_QUERY)\n', (9549, 9563), False, 'import copy\n'), ((9715, 9752), 'registry_schemas.validate', 'validate', (['query', '"""searchQuery"""', '"""ppr"""'], {}), "(query, 'searchQuery', 'ppr')\n", (9723, 9752), False, 'from registry_schemas import validate\n')] |
"""
Module to contain Pywork decorators
"""
__author__ = '<NAME>'
import re
import time
import itertools
import logging
log = logging.getLogger(__name__)
| [
"logging.getLogger"
] | [((129, 156), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (146, 156), False, 'import logging\n')] |
from __future__ import unicode_literals
import pytest
from django.test import TestCase
from rest_framework import status
from rest_framework.authentication import BasicAuthentication
from rest_framework.decorators import (
action, api_view, authentication_classes, detail_route, list_route,
parser_classes, permission_classes, renderer_classes, schema,
throttle_classes
)
from rest_framework.parsers import JSONParser
from rest_framework.permissions import IsAuthenticated
from rest_framework.renderers import JSONRenderer
from rest_framework.response import Response
from rest_framework.schemas import AutoSchema
from rest_framework.test import APIRequestFactory
from rest_framework.throttling import UserRateThrottle
from rest_framework.views import APIView
class DecoratorTestCase(TestCase):
def setUp(self):
self.factory = APIRequestFactory()
def _finalize_response(self, request, response, *args, **kwargs):
response.request = request
return APIView.finalize_response(self, request, response, *args, **kwargs)
def test_api_view_incorrect(self):
"""
If @api_view is not applied correct, we should raise an assertion.
"""
@api_view
def view(request):
return Response()
request = self.factory.get('/')
self.assertRaises(AssertionError, view, request)
def test_api_view_incorrect_arguments(self):
"""
If @api_view is missing arguments, we should raise an assertion.
"""
with self.assertRaises(AssertionError):
@api_view('GET')
def view(request):
return Response()
def test_calling_method(self):
@api_view(['GET'])
def view(request):
return Response({})
request = self.factory.get('/')
response = view(request)
assert response.status_code == status.HTTP_200_OK
request = self.factory.post('/')
response = view(request)
assert response.status_code == status.HTTP_405_METHOD_NOT_ALLOWED
def test_calling_put_method(self):
@api_view(['GET', 'PUT'])
def view(request):
return Response({})
request = self.factory.put('/')
response = view(request)
assert response.status_code == status.HTTP_200_OK
request = self.factory.post('/')
response = view(request)
assert response.status_code == status.HTTP_405_METHOD_NOT_ALLOWED
def test_calling_patch_method(self):
@api_view(['GET', 'PATCH'])
def view(request):
return Response({})
request = self.factory.patch('/')
response = view(request)
assert response.status_code == status.HTTP_200_OK
request = self.factory.post('/')
response = view(request)
assert response.status_code == status.HTTP_405_METHOD_NOT_ALLOWED
def test_renderer_classes(self):
@api_view(['GET'])
@renderer_classes([JSONRenderer])
def view(request):
return Response({})
request = self.factory.get('/')
response = view(request)
assert isinstance(response.accepted_renderer, JSONRenderer)
def test_parser_classes(self):
@api_view(['GET'])
@parser_classes([JSONParser])
def view(request):
assert len(request.parsers) == 1
assert isinstance(request.parsers[0], JSONParser)
return Response({})
request = self.factory.get('/')
view(request)
def test_authentication_classes(self):
@api_view(['GET'])
@authentication_classes([BasicAuthentication])
def view(request):
assert len(request.authenticators) == 1
assert isinstance(request.authenticators[0], BasicAuthentication)
return Response({})
request = self.factory.get('/')
view(request)
def test_permission_classes(self):
@api_view(['GET'])
@permission_classes([IsAuthenticated])
def view(request):
return Response({})
request = self.factory.get('/')
response = view(request)
assert response.status_code == status.HTTP_403_FORBIDDEN
def test_throttle_classes(self):
class OncePerDayUserThrottle(UserRateThrottle):
rate = '1/day'
@api_view(['GET'])
@throttle_classes([OncePerDayUserThrottle])
def view(request):
return Response({})
request = self.factory.get('/')
response = view(request)
assert response.status_code == status.HTTP_200_OK
response = view(request)
assert response.status_code == status.HTTP_429_TOO_MANY_REQUESTS
def test_schema(self):
"""
Checks CustomSchema class is set on view
"""
class CustomSchema(AutoSchema):
pass
@api_view(['GET'])
@schema(CustomSchema())
def view(request):
return Response({})
assert isinstance(view.cls.schema, CustomSchema)
class ActionDecoratorTestCase(TestCase):
def test_defaults(self):
@action(detail=True)
def test_action(request):
"""Description"""
assert test_action.mapping == {'get': 'test_action'}
assert test_action.detail is True
assert test_action.url_path == 'test_action'
assert test_action.url_name == 'test-action'
assert test_action.kwargs == {
'name': 'Test action',
'description': 'Description',
}
def test_detail_required(self):
with pytest.raises(AssertionError) as excinfo:
@action()
def test_action(request):
raise NotImplementedError
assert str(excinfo.value) == "@action() missing required argument: 'detail'"
def test_method_mapping_http_methods(self):
# All HTTP methods should be mappable
@action(detail=False, methods=[])
def test_action():
raise NotImplementedError
for name in APIView.http_method_names:
def method():
raise NotImplementedError
# Python 2.x compatibility - cast __name__ to str
method.__name__ = str(name)
getattr(test_action.mapping, name)(method)
# ensure the mapping returns the correct method name
for name in APIView.http_method_names:
assert test_action.mapping[name] == name
def test_view_name_kwargs(self):
"""
'name' and 'suffix' are mutually exclusive kwargs used for generating
a view's display name.
"""
# by default, generate name from method
@action(detail=True)
def test_action(request):
raise NotImplementedError
assert test_action.kwargs == {
'description': None,
'name': '<NAME>',
}
# name kwarg supersedes name generation
@action(detail=True, name='<NAME>')
def test_action(request):
raise NotImplementedError
assert test_action.kwargs == {
'description': None,
'name': '<NAME>',
}
# suffix kwarg supersedes name generation
@action(detail=True, suffix='Suffix')
def test_action(request):
raise NotImplementedError
assert test_action.kwargs == {
'description': None,
'suffix': 'Suffix',
}
# name + suffix is a conflict.
with pytest.raises(TypeError) as excinfo:
action(detail=True, name='test name', suffix='Suffix')
assert str(excinfo.value) == "`name` and `suffix` are mutually exclusive arguments."
def test_method_mapping(self):
@action(detail=False)
def test_action(request):
raise NotImplementedError
@test_action.mapping.post
def test_action_post(request):
raise NotImplementedError
# The secondary handler methods should not have the action attributes
for name in ['mapping', 'detail', 'url_path', 'url_name', 'kwargs']:
assert hasattr(test_action, name) and not hasattr(test_action_post, name)
def test_method_mapping_already_mapped(self):
@action(detail=True)
def test_action(request):
raise NotImplementedError
msg = "Method 'get' has already been mapped to '.test_action'."
with self.assertRaisesMessage(AssertionError, msg):
@test_action.mapping.get
def test_action_get(request):
raise NotImplementedError
def test_method_mapping_overwrite(self):
@action(detail=True)
def test_action():
raise NotImplementedError
msg = ("Method mapping does not behave like the property decorator. You "
"cannot use the same method name for each mapping declaration.")
with self.assertRaisesMessage(AssertionError, msg):
@test_action.mapping.post
def test_action():
raise NotImplementedError
def test_detail_route_deprecation(self):
with pytest.warns(DeprecationWarning) as record:
@detail_route()
def view(request):
raise NotImplementedError
assert len(record) == 1
assert str(record[0].message) == (
"`detail_route` is deprecated and will be removed in "
"3.10 in favor of `action`, which accepts a `detail` bool. Use "
"`@action(detail=True)` instead."
)
def test_list_route_deprecation(self):
with pytest.warns(DeprecationWarning) as record:
@list_route()
def view(request):
raise NotImplementedError
assert len(record) == 1
assert str(record[0].message) == (
"`list_route` is deprecated and will be removed in "
"3.10 in favor of `action`, which accepts a `detail` bool. Use "
"`@action(detail=False)` instead."
)
def test_route_url_name_from_path(self):
# pre-3.8 behavior was to base the `url_name` off of the `url_path`
with pytest.warns(DeprecationWarning):
@list_route(url_path='foo_bar')
def view(request):
raise NotImplementedError
assert view.url_path == 'foo_bar'
assert view.url_name == 'foo-bar'
| [
"rest_framework.decorators.renderer_classes",
"rest_framework.decorators.permission_classes",
"rest_framework.decorators.list_route",
"rest_framework.decorators.authentication_classes",
"rest_framework.decorators.api_view",
"rest_framework.response.Response",
"rest_framework.decorators.throttle_classes",
"rest_framework.decorators.parser_classes",
"pytest.raises",
"rest_framework.decorators.detail_route",
"rest_framework.test.APIRequestFactory",
"rest_framework.decorators.action",
"rest_framework.views.APIView.finalize_response",
"pytest.warns"
] | [((856, 875), 'rest_framework.test.APIRequestFactory', 'APIRequestFactory', ([], {}), '()\n', (873, 875), False, 'from rest_framework.test import APIRequestFactory\n'), ((997, 1064), 'rest_framework.views.APIView.finalize_response', 'APIView.finalize_response', (['self', 'request', 'response', '*args'], {}), '(self, request, response, *args, **kwargs)\n', (1022, 1064), False, 'from rest_framework.views import APIView\n'), ((1714, 1731), 'rest_framework.decorators.api_view', 'api_view', (["['GET']"], {}), "(['GET'])\n", (1722, 1731), False, 'from rest_framework.decorators import action, api_view, authentication_classes, detail_route, list_route, parser_classes, permission_classes, renderer_classes, schema, throttle_classes\n'), ((2122, 2146), 'rest_framework.decorators.api_view', 'api_view', (["['GET', 'PUT']"], {}), "(['GET', 'PUT'])\n", (2130, 2146), False, 'from rest_framework.decorators import action, api_view, authentication_classes, detail_route, list_route, parser_classes, permission_classes, renderer_classes, schema, throttle_classes\n'), ((2539, 2565), 'rest_framework.decorators.api_view', 'api_view', (["['GET', 'PATCH']"], {}), "(['GET', 'PATCH'])\n", (2547, 2565), False, 'from rest_framework.decorators import action, api_view, authentication_classes, detail_route, list_route, parser_classes, permission_classes, renderer_classes, schema, throttle_classes\n'), ((2956, 2973), 'rest_framework.decorators.api_view', 'api_view', (["['GET']"], {}), "(['GET'])\n", (2964, 2973), False, 'from rest_framework.decorators import action, api_view, authentication_classes, detail_route, list_route, parser_classes, permission_classes, renderer_classes, schema, throttle_classes\n'), ((2983, 3015), 'rest_framework.decorators.renderer_classes', 'renderer_classes', (['[JSONRenderer]'], {}), '([JSONRenderer])\n', (2999, 3015), False, 'from rest_framework.decorators import action, api_view, authentication_classes, detail_route, list_route, parser_classes, permission_classes, renderer_classes, schema, throttle_classes\n'), ((3263, 3280), 'rest_framework.decorators.api_view', 'api_view', (["['GET']"], {}), "(['GET'])\n", (3271, 3280), False, 'from rest_framework.decorators import action, api_view, authentication_classes, detail_route, list_route, parser_classes, permission_classes, renderer_classes, schema, throttle_classes\n'), ((3290, 3318), 'rest_framework.decorators.parser_classes', 'parser_classes', (['[JSONParser]'], {}), '([JSONParser])\n', (3304, 3318), False, 'from rest_framework.decorators import action, api_view, authentication_classes, detail_route, list_route, parser_classes, permission_classes, renderer_classes, schema, throttle_classes\n'), ((3602, 3619), 'rest_framework.decorators.api_view', 'api_view', (["['GET']"], {}), "(['GET'])\n", (3610, 3619), False, 'from rest_framework.decorators import action, api_view, authentication_classes, detail_route, list_route, parser_classes, permission_classes, renderer_classes, schema, throttle_classes\n'), ((3629, 3674), 'rest_framework.decorators.authentication_classes', 'authentication_classes', (['[BasicAuthentication]'], {}), '([BasicAuthentication])\n', (3651, 3674), False, 'from rest_framework.decorators import action, api_view, authentication_classes, detail_route, list_route, parser_classes, permission_classes, renderer_classes, schema, throttle_classes\n'), ((3977, 3994), 'rest_framework.decorators.api_view', 'api_view', (["['GET']"], {}), "(['GET'])\n", (3985, 3994), False, 'from rest_framework.decorators import action, api_view, authentication_classes, detail_route, list_route, parser_classes, permission_classes, renderer_classes, schema, throttle_classes\n'), ((4004, 4041), 'rest_framework.decorators.permission_classes', 'permission_classes', (['[IsAuthenticated]'], {}), '([IsAuthenticated])\n', (4022, 4041), False, 'from rest_framework.decorators import action, api_view, authentication_classes, detail_route, list_route, parser_classes, permission_classes, renderer_classes, schema, throttle_classes\n'), ((4371, 4388), 'rest_framework.decorators.api_view', 'api_view', (["['GET']"], {}), "(['GET'])\n", (4379, 4388), False, 'from rest_framework.decorators import action, api_view, authentication_classes, detail_route, list_route, parser_classes, permission_classes, renderer_classes, schema, throttle_classes\n'), ((4398, 4440), 'rest_framework.decorators.throttle_classes', 'throttle_classes', (['[OncePerDayUserThrottle]'], {}), '([OncePerDayUserThrottle])\n', (4414, 4440), False, 'from rest_framework.decorators import action, api_view, authentication_classes, detail_route, list_route, parser_classes, permission_classes, renderer_classes, schema, throttle_classes\n'), ((4907, 4924), 'rest_framework.decorators.api_view', 'api_view', (["['GET']"], {}), "(['GET'])\n", (4915, 4924), False, 'from rest_framework.decorators import action, api_view, authentication_classes, detail_route, list_route, parser_classes, permission_classes, renderer_classes, schema, throttle_classes\n'), ((5156, 5175), 'rest_framework.decorators.action', 'action', ([], {'detail': '(True)'}), '(detail=True)\n', (5162, 5175), False, 'from rest_framework.decorators import action, api_view, authentication_classes, detail_route, list_route, parser_classes, permission_classes, renderer_classes, schema, throttle_classes\n'), ((5960, 5992), 'rest_framework.decorators.action', 'action', ([], {'detail': '(False)', 'methods': '[]'}), '(detail=False, methods=[])\n', (5966, 5992), False, 'from rest_framework.decorators import action, api_view, authentication_classes, detail_route, list_route, parser_classes, permission_classes, renderer_classes, schema, throttle_classes\n'), ((6722, 6741), 'rest_framework.decorators.action', 'action', ([], {'detail': '(True)'}), '(detail=True)\n', (6728, 6741), False, 'from rest_framework.decorators import action, api_view, authentication_classes, detail_route, list_route, parser_classes, permission_classes, renderer_classes, schema, throttle_classes\n'), ((6985, 7019), 'rest_framework.decorators.action', 'action', ([], {'detail': '(True)', 'name': '"""<NAME>"""'}), "(detail=True, name='<NAME>')\n", (6991, 7019), False, 'from rest_framework.decorators import action, api_view, authentication_classes, detail_route, list_route, parser_classes, permission_classes, renderer_classes, schema, throttle_classes\n'), ((7265, 7301), 'rest_framework.decorators.action', 'action', ([], {'detail': '(True)', 'suffix': '"""Suffix"""'}), "(detail=True, suffix='Suffix')\n", (7271, 7301), False, 'from rest_framework.decorators import action, api_view, authentication_classes, detail_route, list_route, parser_classes, permission_classes, renderer_classes, schema, throttle_classes\n'), ((7785, 7805), 'rest_framework.decorators.action', 'action', ([], {'detail': '(False)'}), '(detail=False)\n', (7791, 7805), False, 'from rest_framework.decorators import action, api_view, authentication_classes, detail_route, list_route, parser_classes, permission_classes, renderer_classes, schema, throttle_classes\n'), ((8292, 8311), 'rest_framework.decorators.action', 'action', ([], {'detail': '(True)'}), '(detail=True)\n', (8298, 8311), False, 'from rest_framework.decorators import action, api_view, authentication_classes, detail_route, list_route, parser_classes, permission_classes, renderer_classes, schema, throttle_classes\n'), ((8693, 8712), 'rest_framework.decorators.action', 'action', ([], {'detail': '(True)'}), '(detail=True)\n', (8699, 8712), False, 'from rest_framework.decorators import action, api_view, authentication_classes, detail_route, list_route, parser_classes, permission_classes, renderer_classes, schema, throttle_classes\n'), ((1269, 1279), 'rest_framework.response.Response', 'Response', ([], {}), '()\n', (1277, 1279), False, 'from rest_framework.response import Response\n'), ((1587, 1602), 'rest_framework.decorators.api_view', 'api_view', (['"""GET"""'], {}), "('GET')\n", (1595, 1602), False, 'from rest_framework.decorators import action, api_view, authentication_classes, detail_route, list_route, parser_classes, permission_classes, renderer_classes, schema, throttle_classes\n'), ((1778, 1790), 'rest_framework.response.Response', 'Response', (['{}'], {}), '({})\n', (1786, 1790), False, 'from rest_framework.response import Response\n'), ((2193, 2205), 'rest_framework.response.Response', 'Response', (['{}'], {}), '({})\n', (2201, 2205), False, 'from rest_framework.response import Response\n'), ((2612, 2624), 'rest_framework.response.Response', 'Response', (['{}'], {}), '({})\n', (2620, 2624), False, 'from rest_framework.response import Response\n'), ((3062, 3074), 'rest_framework.response.Response', 'Response', (['{}'], {}), '({})\n', (3070, 3074), False, 'from rest_framework.response import Response\n'), ((3472, 3484), 'rest_framework.response.Response', 'Response', (['{}'], {}), '({})\n', (3480, 3484), False, 'from rest_framework.response import Response\n'), ((3851, 3863), 'rest_framework.response.Response', 'Response', (['{}'], {}), '({})\n', (3859, 3863), False, 'from rest_framework.response import Response\n'), ((4088, 4100), 'rest_framework.response.Response', 'Response', (['{}'], {}), '({})\n', (4096, 4100), False, 'from rest_framework.response import Response\n'), ((4487, 4499), 'rest_framework.response.Response', 'Response', (['{}'], {}), '({})\n', (4495, 4499), False, 'from rest_framework.response import Response\n'), ((5003, 5015), 'rest_framework.response.Response', 'Response', (['{}'], {}), '({})\n', (5011, 5015), False, 'from rest_framework.response import Response\n'), ((5626, 5655), 'pytest.raises', 'pytest.raises', (['AssertionError'], {}), '(AssertionError)\n', (5639, 5655), False, 'import pytest\n'), ((5681, 5689), 'rest_framework.decorators.action', 'action', ([], {}), '()\n', (5687, 5689), False, 'from rest_framework.decorators import action, api_view, authentication_classes, detail_route, list_route, parser_classes, permission_classes, renderer_classes, schema, throttle_classes\n'), ((7542, 7566), 'pytest.raises', 'pytest.raises', (['TypeError'], {}), '(TypeError)\n', (7555, 7566), False, 'import pytest\n'), ((7591, 7645), 'rest_framework.decorators.action', 'action', ([], {'detail': '(True)', 'name': '"""test name"""', 'suffix': '"""Suffix"""'}), "(detail=True, name='test name', suffix='Suffix')\n", (7597, 7645), False, 'from rest_framework.decorators import action, api_view, authentication_classes, detail_route, list_route, parser_classes, permission_classes, renderer_classes, schema, throttle_classes\n'), ((9171, 9203), 'pytest.warns', 'pytest.warns', (['DeprecationWarning'], {}), '(DeprecationWarning)\n', (9183, 9203), False, 'import pytest\n'), ((9228, 9242), 'rest_framework.decorators.detail_route', 'detail_route', ([], {}), '()\n', (9240, 9242), False, 'from rest_framework.decorators import action, api_view, authentication_classes, detail_route, list_route, parser_classes, permission_classes, renderer_classes, schema, throttle_classes\n'), ((9649, 9681), 'pytest.warns', 'pytest.warns', (['DeprecationWarning'], {}), '(DeprecationWarning)\n', (9661, 9681), False, 'import pytest\n'), ((9706, 9718), 'rest_framework.decorators.list_route', 'list_route', ([], {}), '()\n', (9716, 9718), False, 'from rest_framework.decorators import action, api_view, authentication_classes, detail_route, list_route, parser_classes, permission_classes, renderer_classes, schema, throttle_classes\n'), ((10202, 10234), 'pytest.warns', 'pytest.warns', (['DeprecationWarning'], {}), '(DeprecationWarning)\n', (10214, 10234), False, 'import pytest\n'), ((10249, 10279), 'rest_framework.decorators.list_route', 'list_route', ([], {'url_path': '"""foo_bar"""'}), "(url_path='foo_bar')\n", (10259, 10279), False, 'from rest_framework.decorators import action, api_view, authentication_classes, detail_route, list_route, parser_classes, permission_classes, renderer_classes, schema, throttle_classes\n'), ((1657, 1667), 'rest_framework.response.Response', 'Response', ([], {}), '()\n', (1665, 1667), False, 'from rest_framework.response import Response\n')] |
# Copyright (c) 2020, <NAME>, Honda Research Institute Europe GmbH, and
# Technical University of Darmstadt.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# 3. Neither the name of <NAME>, Honda Research Institute Europe GmbH,
# or Technical University of Darmstadt, nor the names of its contributors may
# be used to endorse or promote products derived from this software without
# specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL <NAME>, HONDA RESEARCH INSTITUTE EUROPE GMBH,
# OR TECHNICAL UNIVERSITY OF DARMSTADT BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
# OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER
# IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
import mujoco_py
import numpy as np
import os.path as osp
from init_args_serializer import Serializable
from typing import Optional
import pyrado
from pyrado.environments.barrett_wam import (
goal_pos_init_sim_4dof,
goal_pos_init_sim_7dof,
init_qpos_des_4dof,
init_qpos_des_7dof,
act_space_bic_4dof,
act_space_bic_7dof,
wam_q_limits_up_7dof,
wam_q_limits_lo_7dof,
torque_space_wam_4dof,
torque_space_wam_7dof,
wam_pgains_7dof,
wam_dgains_7dof,
wam_pgains_4dof,
wam_dgains_4dof,
)
from pyrado.environments.mujoco.base import MujocoSimEnv
from pyrado.spaces.base import Space
from pyrado.spaces.box import BoxSpace
from pyrado.spaces.singular import SingularStateSpace
from pyrado.tasks.base import Task
from pyrado.tasks.condition_only import ConditionOnlyTask
from pyrado.tasks.desired_state import DesStateTask
from pyrado.tasks.final_reward import BestStateFinalRewTask, FinalRewTask, FinalRewMode
from pyrado.tasks.goalless import GoallessTask
from pyrado.tasks.masked import MaskedTask
from pyrado.tasks.parallel import ParallelTasks
from pyrado.tasks.reward_functions import ZeroPerStepRewFcn, ExpQuadrErrRewFcn, QuadrErrRewFcn
from pyrado.tasks.sequential import SequentialTasks
from pyrado.utils.data_types import EnvSpec
from pyrado.utils.input_output import print_cbt
class WAMBallInCupSim(MujocoSimEnv, Serializable):
"""
WAM robotic arm from Barrett technologies for the ball-in-the-cup task, controlled by a PD controller.
.. note::
When using the `reset()` function, always pass a meaningful `init_state`
.. seealso::
[1] https://github.com/psclklnk/self-paced-rl/tree/master/sprl/envs/ball_in_a_cup.py
"""
name: str = "wam-bic"
def __init__(
self,
num_dof: int,
frame_skip: int = 4,
dt: Optional[float] = None,
max_steps: int = pyrado.inf,
fixed_init_state: bool = True,
stop_on_collision: bool = True,
observe_ball: bool = False,
observe_cup: bool = False,
task_args: Optional[dict] = None,
):
"""
Constructor
:param num_dof: number of degrees of freedom (4 or 7), depending on which Barrett WAM setup being used
:param frame_skip: number of simulation frames for which the same action is held, results in a multiplier of
the time step size `dt`
:param dt: by default the time step size is the one from the mujoco config file multiplied by the number of
frame skips (legacy from OpenAI environments). By passing an explicit `dt` value, this can be
overwritten. Possible use case if if you know that you recorded a trajectory with a specific `dt`.
:param max_steps: max number of simulation time steps
:param fixed_init_state: enables/disables deterministic, fixed initial state
:param stop_on_collision: set the `failed` flag in the `dict` returned by `_mujoco_step()` to true, if the ball
collides with something else than the desired parts of the cup. This causes the
episode to end. Keep in mind that in case of a negative step reward and no final
cost on failing, this might result in undesired behavior.
:param observe_ball: if `True`, include the 2-dim (x-z plane) cartesian ball position into the observation
:param observe_cup: if `True`, include the 2-dim (x-z plane) cartesian cup position into the observation
:param task_args: arguments for the task construction
"""
Serializable._init(self, locals())
self.fixed_init_state = fixed_init_state
self.observe_ball = observe_ball
self.observe_cup = observe_cup
# Initialize num DoF specific variables
self._num_dof = num_dof
if num_dof == 4:
graph_file_name = "wam_4dof_bic.xml"
self.qpos_des_init = init_qpos_des_4dof
self.p_gains = wam_pgains_4dof
self.d_gains = wam_dgains_4dof
init_ball_pos = np.array([0.723, 0.0, 1.168])
init_cup_goal = goal_pos_init_sim_4dof
elif num_dof == 7:
graph_file_name = "wam_7dof_bic.xml"
self.qpos_des_init = init_qpos_des_7dof
self.p_gains = wam_pgains_7dof
self.d_gains = wam_dgains_7dof
init_ball_pos = np.array([0.828, 0.0, 1.131])
init_cup_goal = goal_pos_init_sim_7dof
else:
raise pyrado.ValueErr(given=num_dof, eq_constraint="4 or 7")
model_path = osp.join(pyrado.MUJOCO_ASSETS_DIR, graph_file_name)
super().__init__(model_path, frame_skip, dt, max_steps, task_args)
# Actual initial joint position (when the WAM moved to the home position)
if num_dof == 4:
self.init_qpos[:4] = np.array([0.0, 0.63, 0.0, 1.27])
self.init_qpos[4] = -0.34 # angle of the first rope segment relative to the cup bottom plate
else:
self.init_qpos[:7] = np.array([0.0, 0.65, 0.0, 1.41, 0.0, -0.28, -1.57])
self.init_qpos[7] = -0.21 # angle of the first rope segment relative to the cup bottom plate
# Set the actual stable initial position. This position would be reached after some time using the internal
# PD controller to stabilize at self._qpos_des_init.
# The initial position of the ball in cartesian coordinates
self._init_state = np.concatenate([self.init_qpos, self.init_qvel, init_ball_pos, init_cup_goal])
if self.fixed_init_state:
self._init_space = SingularStateSpace(self._init_state)
else:
# Add plus/minus one degree to each motor joint and the first rope segment joint
init_state_up = self._init_state.copy()
init_state_up[: self._num_dof] += np.pi / 180 * np.array([0.1, 1, 0.5, 1.0, 0.1, 1.0, 1.0])[: self._num_dof]
init_state_lo = self._init_state.copy()
init_state_lo[: self._num_dof] -= np.pi / 180 * np.array([0.1, 1, 0.5, 1.0, 0.1, 1.0, 1.0])[: self._num_dof]
self._init_space = BoxSpace(init_state_lo, init_state_up)
# Bodies to check fo collision
self._collision_bodies = [
"wam/base_link",
"wam/shoulder_yaw_link",
"wam/shoulder_pitch_link",
"wam/upper_arm_link",
"wam/forearm_link",
"wrist_palm_link",
"wam/wrist_pitch_link",
"wam/wrist_yaw_link",
]
if self._num_dof == 4:
self._collision_bodies = self._collision_bodies[:6]
# We access a private attribute since a method like 'model.geom_names[geom_id]' cannot be used because
# not every geom has a name
self._collision_geom_ids = [self.model._geom_name2id[name] for name in ["cup_geom1", "cup_geom2"]]
self.stop_on_collision = stop_on_collision
self.camera_config = dict(
distance=2.7,
trackbodyid=0, # id of the body to track
elevation=-30, # camera rotation around the axis in the plane
azimuth=-90, # camera rotation around the camera's vertical axis
)
@property
def num_dof(self) -> int:
""" Get the number of degrees of freedom. """
return self._num_dof
@property
def torque_space(self) -> Space:
""" Get the space of joint torques. """
return torque_space_wam_7dof if self._num_dof == 7 else torque_space_wam_4dof
@property
def state_space(self) -> Space:
# The state space has the same shape as the init space (including ball and cup)
state_shape = np.concatenate([self.init_qpos, self.init_qvel, np.empty(3), np.empty(3)]).shape
state_lo, state_up = np.full(state_shape, -pyrado.inf), np.full(state_shape, pyrado.inf)
# Ensure that joint limits of the arm are not reached (5 deg safety margin)
state_lo[: self._num_dof] = wam_q_limits_lo_7dof[: self._num_dof]
state_up[: self._num_dof] = wam_q_limits_up_7dof[: self._num_dof]
return BoxSpace(state_lo, state_up)
@property
def obs_space(self) -> Space:
# Observing the normalized time and optionally the cup and ball position
obs_lo, obs_up, labels = [0.0], [1.0], ["t"]
if self.observe_ball:
obs_lo.extend([-3.0, -3.0])
obs_up.extend([3.0, 3.0])
labels.extend(["ball_x", "ball_z"])
if self.observe_cup:
obs_lo.extend([-3.0, -3.0])
obs_up.extend([3.0, 3.0])
labels.extend(["cup_x", "cup_z"])
return BoxSpace(obs_lo, obs_up, labels=labels)
@property
def act_space(self) -> Space:
# Running a PD controller on joint positions and velocities
return act_space_bic_7dof if self._num_dof == 7 else act_space_bic_4dof
@classmethod
def get_nominal_domain_param(cls, num_dof: int = 7) -> dict:
if num_dof == 7:
return dict(
cup_scale=1.0, # scaling factor for the radius of the cup [-] (should be >0.65)
rope_length=0.41, # length of the rope [m]
ball_mass=0.024, # mass of the ball [kg]
joint_1_damping=0.05, # damping of motor joints [N/s] (default value is small)
joint_2_damping=0.05, # damping of motor joints [N/s] (default value is small)
joint_3_damping=0.05, # damping of motor joints [N/s] (default value is small)
joint_4_damping=0.05, # damping of motor joints [N/s] (default value is small)
joint_5_damping=0.05, # damping of motor joints [N/s] (default value is small)
joint_6_damping=0.05, # damping of motor joints [N/s] (default value is small)
joint_7_damping=0.05, # damping of motor joints [N/s] (default value is small)
joint_1_dryfriction=0.4, # dry friction coefficient of motor joint 1 [-]
joint_2_dryfriction=0.4, # dry friction coefficient of motor joint 2 [-]
joint_3_dryfriction=0.4, # dry friction coefficient of motor joint 3 [-]
joint_4_dryfriction=0.4, # dry friction coefficient of motor joint 4 [-]
joint_5_dryfriction=0.4, # dry friction coefficient of motor joint 5 [-]
joint_6_dryfriction=0.4, # dry friction coefficient of motor joint 6 [-]
joint_7_dryfriction=0.4, # dry friction coefficient of motor joint 7 [-]
rope_damping=1e-4, # damping of rope joints [N/s] (reasonable values are 6e-4 to 1e-6)
)
elif num_dof == 4:
return dict(
cup_scale=1.0, # scaling factor for the radius of the cup [-] (should be >0.65)
rope_length=0.41, # length of the rope [m]
ball_mass=0.024, # mass of the ball [kg]
joint_1_damping=0.05, # damping of motor joints [N/s] (default value is small)
joint_2_damping=0.05, # damping of motor joints [N/s] (default value is small)
joint_3_damping=0.05, # damping of motor joints [N/s] (default value is small)
joint_4_damping=0.05, # damping of motor joints [N/s] (default value is small)
joint_1_dryfriction=0.4, # dry friction coefficient of motor joint 1 [-]
joint_2_dryfriction=0.4, # dry friction coefficient of motor joint 2 [-]
joint_3_dryfriction=0.4, # dry friction coefficient of motor joint 3 [-]
joint_4_dryfriction=0.4, # dry friction coefficient of motor joint 4 [-]
rope_damping=1e-4, # damping of rope joints [N/s] (reasonable values are 6e-4 to 1e-6)
)
else:
raise pyrado.ValueErr(given=num_dof, eq_constraint="4 or 7")
def _create_task(self, task_args: dict) -> Task:
if task_args.get("sparse_rew_fcn", False):
# Create a task with binary reward
return self._create_main_task(task_args)
else:
# Create two (or three) parallel running task.
# 1.) Main task: Desired state task for the cartesian ball distance
# 2.) Deviation task: Desired state task for the cartesian- and joint deviation from the init position
# 3.) Binary Bonus: Adds a binary bonus when ball is catched [inactive by default]
return ParallelTasks(
[
self._create_main_task(task_args),
self._create_deviation_task(task_args),
self._create_main_task(
dict(
sparse_rew_fcn=True,
success_bonus=task_args.get("success_bonus", 0),
)
),
]
)
def _create_main_task(self, task_args: dict) -> Task:
# Create a DesStateTask that masks everything but the ball position
idcs = list(range(self.state_space.flat_dim - 6, self.state_space.flat_dim - 3)) # Cartesian ball position
spec = EnvSpec(
self.spec.obs_space,
self.spec.act_space,
self.spec.state_space.subspace(self.spec.state_space.create_mask(idcs)),
)
# If we do not use copy(), state_des coming from MuJoCo is a reference and updates automatically at each step.
# Note: sim.forward() + get_body_xpos() results in wrong output for state_des, as sim has not been updated to
# init_space.sample(), which is first called in reset()
if task_args.get("sparse_rew_fcn", False):
factor = task_args.get("success_bonus", 1)
# Binary final reward task
main_task = FinalRewTask(
ConditionOnlyTask(
spec,
condition_fcn=self.check_ball_in_cup,
is_success_condition=True,
),
mode=FinalRewMode(always_positive=True),
factor=factor,
)
# Yield -1 on fail after the main task ist done (successfully or not)
dont_fail_after_succ_task = FinalRewTask(
GoallessTask(spec, ZeroPerStepRewFcn()),
mode=FinalRewMode(always_negative=True),
factor=factor,
)
# Augment the binary task with an endless dummy task, to avoid early stopping
task = SequentialTasks((main_task, dont_fail_after_succ_task))
return MaskedTask(self.spec, task, idcs)
else:
state_des = self.sim.data.get_site_xpos("cup_goal") # this is a reference
# state_des_ball = self.sim.data.get_site_xpos("cup_goal") # this is a reference
# state_des_cup = np.array([0.82521, 0, 1.4469]) if self._num_dof == 7 else np.array([0.758, 0, 1.5])
# state_des = np.concatenate([state_des_ball, state_des_cup])
R_default = np.diag([0, 0, 1, 1e-2, 1e-2, 1e-1]) if self._num_dof == 7 else np.diag([0, 0, 1e-2, 1e-2])
rew_fcn = ExpQuadrErrRewFcn(
Q=task_args.get("Q", np.diag([2e1, 1e-4, 2e1])), # distance ball - cup; shouldn't move in y-direction
R=task_args.get("R", R_default), # last joint is really unreliable for 7 dof, thus punish more
)
task = DesStateTask(spec, state_des, rew_fcn)
# Wrap the masked DesStateTask to add a bonus for the best state in the rollout
return BestStateFinalRewTask(
MaskedTask(self.spec, task, idcs),
factor=task_args.get("final_factor", 0.05 * self.max_steps),
)
def _create_deviation_task(self, task_args: dict) -> Task:
idcs = list(range(self.state_space.flat_dim - 3, self.state_space.flat_dim)) # Cartesian cup goal position
spec = EnvSpec(
self.spec.obs_space,
self.spec.act_space,
self.spec.state_space.subspace(self.spec.state_space.create_mask(idcs)),
)
# init cup goal position
state_des = goal_pos_init_sim_7dof if self._num_dof == 7 else goal_pos_init_sim_4dof
rew_fcn = QuadrErrRewFcn(
Q=task_args.get("Q_dev", np.diag([2e-1, 1e-6, 5e0])), # Cartesian distance from init cup position
R=task_args.get(
"R_dev", np.zeros((self.act_space.shape[0], self.act_space.shape[0]))
), # joint space distance from init pose, interferes with R_default from _create_main_task
)
task = DesStateTask(spec, state_des, rew_fcn)
return MaskedTask(self.spec, task, idcs)
def _adapt_model_file(self, xml_model: str, domain_param: dict) -> str:
# First replace special domain parameters
cup_scale = domain_param.pop("cup_scale", None)
rope_length = domain_param.pop("rope_length", None)
if cup_scale is not None:
# See [1, l.93-96]
xml_model = xml_model.replace("[scale_mesh]", str(cup_scale * 0.001))
xml_model = xml_model.replace("[pos_mesh]", str(0.055 - (cup_scale - 1.0) * 0.023))
xml_model = xml_model.replace("[pos_goal]", str(0.1165 + (cup_scale - 1.0) * 0.0385))
xml_model = xml_model.replace("[size_cup]", str(cup_scale * 0.038))
xml_model = xml_model.replace("[size_cup_inner]", str(cup_scale * 0.03))
if rope_length is not None:
# The rope consists of 30 capsules
xml_model = xml_model.replace("[pos_capsule]", str(rope_length / 30))
# Each joint is at the top of each capsule (therefore negative direction from center)
xml_model = xml_model.replace("[pos_capsule_joint]", str(-rope_length / 60))
# Pure visualization component
xml_model = xml_model.replace("[size_capsule_geom]", str(rope_length / 72))
# Resolve mesh directory and replace the remaining domain parameters
return super()._adapt_model_file(xml_model, domain_param)
def _mujoco_step(self, act: np.ndarray) -> dict:
assert self.act_space.contains(act, verbose=True)
# Get the desired positions and velocities for the selected joints
qpos_des = self.qpos_des_init.copy() # the desired trajectory is relative to self._qpos_des_init
qvel_des = np.zeros_like(qpos_des)
if self._num_dof == 4:
np.add.at(qpos_des, [1, 3], act[:2])
np.add.at(qvel_des, [1, 3], act[2:])
elif self._num_dof == 7:
np.add.at(qpos_des, [1, 3, 5], act[:3])
np.add.at(qvel_des, [1, 3, 5], act[3:])
# Compute the position and velocity errors
err_pos = qpos_des - self.state[: self._num_dof]
err_vel = qvel_des - self.state[self.model.nq : self.model.nq + self._num_dof]
# Compute the torques for the PD controller and clip them to their max values
torque = self.p_gains * err_pos + self.d_gains * err_vel
torque = self.torque_space.project_to(torque)
# Apply the torques to the robot
self.sim.data.qfrc_applied[: self._num_dof] = torque
# Call MuJoCo
try:
self.sim.step()
mjsim_crashed = False
except mujoco_py.builder.MujocoException:
# When MuJoCo recognized instabilities in the simulation, it simply kills it.
# Instead, we want the episode to end with a failure.
mjsim_crashed = True
qpos, qvel = self.sim.data.qpos.copy(), self.sim.data.qvel.copy()
ball_pos = self.sim.data.get_body_xpos("ball").copy()
cup_goal = self.sim.data.get_site_xpos("cup_goal").copy()
self.state = np.concatenate([qpos, qvel, ball_pos, cup_goal])
# If desired, check for collisions of the ball with the robot
ball_collided = self.check_ball_collisions() if self.stop_on_collision else False
# If state is out of bounds (this is normally checked by the task, but does not work because of the mask)
state_oob = False if self.state_space.contains(self.state) else True
return dict(
qpos_des=qpos_des,
qvel_des=qvel_des,
qpos=qpos[: self._num_dof],
qvel=qvel[: self._num_dof],
ball_pos=ball_pos,
cup_pos=cup_goal,
failed=mjsim_crashed or ball_collided or state_oob,
)
def check_ball_collisions(self, verbose: bool = False) -> bool:
"""
Check if an undesired collision with the ball occurs.
:param verbose: print messages on collision
:return: `True` if the ball collides with something else than the central parts of the cup
"""
for i in range(self.sim.data.ncon):
# Get current contact object
contact = self.sim.data.contact[i]
# Extract body-id and body-name of both contact geoms
body1 = self.model.geom_bodyid[contact.geom1]
body1_name = self.model.body_names[body1]
body2 = self.model.geom_bodyid[contact.geom2]
body2_name = self.model.body_names[body2]
# Evaluate if the ball collides with part of the WAM (collision bodies)
# or the connection of WAM and cup (geom_ids)
c1 = body1_name == "ball" and (
body2_name in self._collision_bodies or contact.geom2 in self._collision_geom_ids
)
c2 = body2_name == "ball" and (
body1_name in self._collision_bodies or contact.geom1 in self._collision_geom_ids
)
if c1 or c2:
if verbose:
print_cbt(
f"Undesired collision of {body1_name} and {body2_name} detected!",
"y",
)
return True
return False
def check_ball_in_cup(self, *args, verbose: bool = False):
"""
Check if the ball is in the cup.
:param verbose: print messages when ball is in the cup
:return: `True` if the ball is in the cup
"""
for i in range(self.sim.data.ncon):
# Get current contact object
contact = self.sim.data.contact[i]
# Extract body-id and body-name of both contact geoms
body1 = self.model.geom_bodyid[contact.geom1]
body1_name = self.model.body_names[body1]
body2 = self.model.geom_bodyid[contact.geom2]
body2_name = self.model.body_names[body2]
# Evaluate if the ball collides with part of the WAM (collision bodies)
# or the connection of WAM and cup (geom_ids)
cup_inner_id = self.model._geom_name2id["cup_inner"]
c1 = body1_name == "ball" and contact.geom2 == cup_inner_id
c2 = body2_name == "ball" and contact.geom1 == cup_inner_id
if c1 or c2:
if verbose:
print_cbt(f"The ball is in the cup at time step {self.curr_step}.", "y")
return True
return False
def observe(self, state: np.ndarray) -> np.ndarray:
# TODO: Debug print-outs, should be removed in future...
# if self._curr_step == 0:
# print_cbt(f'cup xpos: {self.sim.data.get_body_xpos("cup").copy()}', 'b') # center of frame
# print_cbt(f'cup xipos: {self.sim.data.get_body_xipos("cup").copy()}', 'b') # center of mass
# Observe the normalized time
obs = [self._curr_step / self.max_steps]
# Extract the (x, z) cartesian position of cup and ball (the robot operates in the x-z plane).
# Note: the cup_goal is the mujoco site object marking the goal position for the ball. It is not identical
# to the coordinate system origin of the rigid body object 'cup'
if self.observe_ball:
obs.extend([state[-3], state[-1]])
if self.observe_cup:
obs.extend([state[-6], state[-4]])
return np.array(obs)
| [
"pyrado.tasks.sequential.SequentialTasks",
"numpy.array",
"pyrado.tasks.final_reward.FinalRewMode",
"numpy.add.at",
"pyrado.tasks.condition_only.ConditionOnlyTask",
"numpy.empty",
"numpy.concatenate",
"pyrado.ValueErr",
"pyrado.tasks.masked.MaskedTask",
"pyrado.spaces.box.BoxSpace",
"pyrado.spaces.singular.SingularStateSpace",
"pyrado.utils.input_output.print_cbt",
"os.path.join",
"pyrado.tasks.desired_state.DesStateTask",
"numpy.diag",
"numpy.zeros",
"pyrado.tasks.reward_functions.ZeroPerStepRewFcn",
"numpy.full",
"numpy.zeros_like"
] | [((6388, 6439), 'os.path.join', 'osp.join', (['pyrado.MUJOCO_ASSETS_DIR', 'graph_file_name'], {}), '(pyrado.MUJOCO_ASSETS_DIR, graph_file_name)\n', (6396, 6439), True, 'import os.path as osp\n'), ((7273, 7351), 'numpy.concatenate', 'np.concatenate', (['[self.init_qpos, self.init_qvel, init_ball_pos, init_cup_goal]'], {}), '([self.init_qpos, self.init_qvel, init_ball_pos, init_cup_goal])\n', (7287, 7351), True, 'import numpy as np\n'), ((9917, 9945), 'pyrado.spaces.box.BoxSpace', 'BoxSpace', (['state_lo', 'state_up'], {}), '(state_lo, state_up)\n', (9925, 9945), False, 'from pyrado.spaces.box import BoxSpace\n'), ((10453, 10492), 'pyrado.spaces.box.BoxSpace', 'BoxSpace', (['obs_lo', 'obs_up'], {'labels': 'labels'}), '(obs_lo, obs_up, labels=labels)\n', (10461, 10492), False, 'from pyrado.spaces.box import BoxSpace\n'), ((18421, 18459), 'pyrado.tasks.desired_state.DesStateTask', 'DesStateTask', (['spec', 'state_des', 'rew_fcn'], {}), '(spec, state_des, rew_fcn)\n', (18433, 18459), False, 'from pyrado.tasks.desired_state import DesStateTask\n'), ((18476, 18509), 'pyrado.tasks.masked.MaskedTask', 'MaskedTask', (['self.spec', 'task', 'idcs'], {}), '(self.spec, task, idcs)\n', (18486, 18509), False, 'from pyrado.tasks.masked import MaskedTask\n'), ((20201, 20224), 'numpy.zeros_like', 'np.zeros_like', (['qpos_des'], {}), '(qpos_des)\n', (20214, 20224), True, 'import numpy as np\n'), ((21557, 21605), 'numpy.concatenate', 'np.concatenate', (['[qpos, qvel, ball_pos, cup_goal]'], {}), '([qpos, qvel, ball_pos, cup_goal])\n', (21571, 21605), True, 'import numpy as np\n'), ((25854, 25867), 'numpy.array', 'np.array', (['obs'], {}), '(obs)\n', (25862, 25867), True, 'import numpy as np\n'), ((5875, 5904), 'numpy.array', 'np.array', (['[0.723, 0.0, 1.168]'], {}), '([0.723, 0.0, 1.168])\n', (5883, 5904), True, 'import numpy as np\n'), ((6656, 6688), 'numpy.array', 'np.array', (['[0.0, 0.63, 0.0, 1.27]'], {}), '([0.0, 0.63, 0.0, 1.27])\n', (6664, 6688), True, 'import numpy as np\n'), ((6842, 6893), 'numpy.array', 'np.array', (['[0.0, 0.65, 0.0, 1.41, 0.0, -0.28, -1.57]'], {}), '([0.0, 0.65, 0.0, 1.41, 0.0, -0.28, -1.57])\n', (6850, 6893), True, 'import numpy as np\n'), ((7417, 7453), 'pyrado.spaces.singular.SingularStateSpace', 'SingularStateSpace', (['self._init_state'], {}), '(self._init_state)\n', (7435, 7453), False, 'from pyrado.spaces.singular import SingularStateSpace\n'), ((7938, 7976), 'pyrado.spaces.box.BoxSpace', 'BoxSpace', (['init_state_lo', 'init_state_up'], {}), '(init_state_lo, init_state_up)\n', (7946, 7976), False, 'from pyrado.spaces.box import BoxSpace\n'), ((9600, 9633), 'numpy.full', 'np.full', (['state_shape', '(-pyrado.inf)'], {}), '(state_shape, -pyrado.inf)\n', (9607, 9633), True, 'import numpy as np\n'), ((9635, 9667), 'numpy.full', 'np.full', (['state_shape', 'pyrado.inf'], {}), '(state_shape, pyrado.inf)\n', (9642, 9667), True, 'import numpy as np\n'), ((16310, 16365), 'pyrado.tasks.sequential.SequentialTasks', 'SequentialTasks', (['(main_task, dont_fail_after_succ_task)'], {}), '((main_task, dont_fail_after_succ_task))\n', (16325, 16365), False, 'from pyrado.tasks.sequential import SequentialTasks\n'), ((16386, 16419), 'pyrado.tasks.masked.MaskedTask', 'MaskedTask', (['self.spec', 'task', 'idcs'], {}), '(self.spec, task, idcs)\n', (16396, 16419), False, 'from pyrado.tasks.masked import MaskedTask\n'), ((17225, 17263), 'pyrado.tasks.desired_state.DesStateTask', 'DesStateTask', (['spec', 'state_des', 'rew_fcn'], {}), '(spec, state_des, rew_fcn)\n', (17237, 17263), False, 'from pyrado.tasks.desired_state import DesStateTask\n'), ((20268, 20304), 'numpy.add.at', 'np.add.at', (['qpos_des', '[1, 3]', 'act[:2]'], {}), '(qpos_des, [1, 3], act[:2])\n', (20277, 20304), True, 'import numpy as np\n'), ((20317, 20353), 'numpy.add.at', 'np.add.at', (['qvel_des', '[1, 3]', 'act[2:]'], {}), '(qvel_des, [1, 3], act[2:])\n', (20326, 20353), True, 'import numpy as np\n'), ((6198, 6227), 'numpy.array', 'np.array', (['[0.828, 0.0, 1.131]'], {}), '([0.828, 0.0, 1.131])\n', (6206, 6227), True, 'import numpy as np\n'), ((6311, 6365), 'pyrado.ValueErr', 'pyrado.ValueErr', ([], {'given': 'num_dof', 'eq_constraint': '"""4 or 7"""'}), "(given=num_dof, eq_constraint='4 or 7')\n", (6326, 6365), False, 'import pyrado\n'), ((13619, 13673), 'pyrado.ValueErr', 'pyrado.ValueErr', ([], {'given': 'num_dof', 'eq_constraint': '"""4 or 7"""'}), "(given=num_dof, eq_constraint='4 or 7')\n", (13634, 13673), False, 'import pyrado\n'), ((15634, 15726), 'pyrado.tasks.condition_only.ConditionOnlyTask', 'ConditionOnlyTask', (['spec'], {'condition_fcn': 'self.check_ball_in_cup', 'is_success_condition': '(True)'}), '(spec, condition_fcn=self.check_ball_in_cup,\n is_success_condition=True)\n', (15651, 15726), False, 'from pyrado.tasks.condition_only import ConditionOnlyTask\n'), ((16828, 16863), 'numpy.diag', 'np.diag', (['[0, 0, 1, 0.01, 0.01, 0.1]'], {}), '([0, 0, 1, 0.01, 0.01, 0.1])\n', (16835, 16863), True, 'import numpy as np\n'), ((16892, 16919), 'numpy.diag', 'np.diag', (['[0, 0, 0.01, 0.01]'], {}), '([0, 0, 0.01, 0.01])\n', (16899, 16919), True, 'import numpy as np\n'), ((17415, 17448), 'pyrado.tasks.masked.MaskedTask', 'MaskedTask', (['self.spec', 'task', 'idcs'], {}), '(self.spec, task, idcs)\n', (17425, 17448), False, 'from pyrado.tasks.masked import MaskedTask\n'), ((20399, 20438), 'numpy.add.at', 'np.add.at', (['qpos_des', '[1, 3, 5]', 'act[:3]'], {}), '(qpos_des, [1, 3, 5], act[:3])\n', (20408, 20438), True, 'import numpy as np\n'), ((20451, 20490), 'numpy.add.at', 'np.add.at', (['qvel_des', '[1, 3, 5]', 'act[3:]'], {}), '(qvel_des, [1, 3, 5], act[3:])\n', (20460, 20490), True, 'import numpy as np\n'), ((7673, 7716), 'numpy.array', 'np.array', (['[0.1, 1, 0.5, 1.0, 0.1, 1.0, 1.0]'], {}), '([0.1, 1, 0.5, 1.0, 0.1, 1.0, 1.0])\n', (7681, 7716), True, 'import numpy as np\n'), ((7846, 7889), 'numpy.array', 'np.array', (['[0.1, 1, 0.5, 1.0, 0.1, 1.0, 1.0]'], {}), '([0.1, 1, 0.5, 1.0, 0.1, 1.0, 1.0])\n', (7854, 7889), True, 'import numpy as np\n'), ((9538, 9549), 'numpy.empty', 'np.empty', (['(3)'], {}), '(3)\n', (9546, 9549), True, 'import numpy as np\n'), ((9551, 9562), 'numpy.empty', 'np.empty', (['(3)'], {}), '(3)\n', (9559, 9562), True, 'import numpy as np\n'), ((15824, 15858), 'pyrado.tasks.final_reward.FinalRewMode', 'FinalRewMode', ([], {'always_positive': '(True)'}), '(always_positive=True)\n', (15836, 15858), False, 'from pyrado.tasks.final_reward import BestStateFinalRewTask, FinalRewTask, FinalRewMode\n'), ((16076, 16095), 'pyrado.tasks.reward_functions.ZeroPerStepRewFcn', 'ZeroPerStepRewFcn', ([], {}), '()\n', (16093, 16095), False, 'from pyrado.tasks.reward_functions import ZeroPerStepRewFcn, ExpQuadrErrRewFcn, QuadrErrRewFcn\n'), ((16119, 16153), 'pyrado.tasks.final_reward.FinalRewMode', 'FinalRewMode', ([], {'always_negative': '(True)'}), '(always_negative=True)\n', (16131, 16153), False, 'from pyrado.tasks.final_reward import BestStateFinalRewTask, FinalRewTask, FinalRewMode\n'), ((18103, 18129), 'numpy.diag', 'np.diag', (['[0.2, 1e-06, 5.0]'], {}), '([0.2, 1e-06, 5.0])\n', (18110, 18129), True, 'import numpy as np\n'), ((18231, 18291), 'numpy.zeros', 'np.zeros', (['(self.act_space.shape[0], self.act_space.shape[0])'], {}), '((self.act_space.shape[0], self.act_space.shape[0]))\n', (18239, 18291), True, 'import numpy as np\n'), ((23516, 23601), 'pyrado.utils.input_output.print_cbt', 'print_cbt', (['f"""Undesired collision of {body1_name} and {body2_name} detected!"""', '"""y"""'], {}), "(f'Undesired collision of {body1_name} and {body2_name} detected!',\n 'y')\n", (23525, 23601), False, 'from pyrado.utils.input_output import print_cbt\n'), ((24810, 24882), 'pyrado.utils.input_output.print_cbt', 'print_cbt', (['f"""The ball is in the cup at time step {self.curr_step}."""', '"""y"""'], {}), "(f'The ball is in the cup at time step {self.curr_step}.', 'y')\n", (24819, 24882), False, 'from pyrado.utils.input_output import print_cbt\n'), ((16998, 17027), 'numpy.diag', 'np.diag', (['[20.0, 0.0001, 20.0]'], {}), '([20.0, 0.0001, 20.0])\n', (17005, 17027), True, 'import numpy as np\n')] |
# pyRasp
# Copyright (c) <NAME> 2020. Licensed under MIT.
# requirement :
# Python 3
# pip install pyyaml
# pip install request
# pip install f90nml
from downloadGFSA import downloadGFSA
from prepare_wps import prepare_wps
from ungrib import ungrib
from metgrid import metgrid
from prepare_wrf import prepare_wrf
from real import real
from wrf import wrf
result = downloadGFSA(True)
prepare_wps(result)
ungrib()
metgrid()
prepare_wrf(result)
real()
wrf()
| [
"real.real",
"prepare_wrf.prepare_wrf",
"ungrib.ungrib",
"prepare_wps.prepare_wps",
"downloadGFSA.downloadGFSA",
"metgrid.metgrid",
"wrf.wrf"
] | [((369, 387), 'downloadGFSA.downloadGFSA', 'downloadGFSA', (['(True)'], {}), '(True)\n', (381, 387), False, 'from downloadGFSA import downloadGFSA\n'), ((388, 407), 'prepare_wps.prepare_wps', 'prepare_wps', (['result'], {}), '(result)\n', (399, 407), False, 'from prepare_wps import prepare_wps\n'), ((408, 416), 'ungrib.ungrib', 'ungrib', ([], {}), '()\n', (414, 416), False, 'from ungrib import ungrib\n'), ((417, 426), 'metgrid.metgrid', 'metgrid', ([], {}), '()\n', (424, 426), False, 'from metgrid import metgrid\n'), ((427, 446), 'prepare_wrf.prepare_wrf', 'prepare_wrf', (['result'], {}), '(result)\n', (438, 446), False, 'from prepare_wrf import prepare_wrf\n'), ((447, 453), 'real.real', 'real', ([], {}), '()\n', (451, 453), False, 'from real import real\n'), ((454, 459), 'wrf.wrf', 'wrf', ([], {}), '()\n', (457, 459), False, 'from wrf import wrf\n')] |
import pandas as pd
import ta
from app.common import reshape_data
from app.strategies.base_strategy import BaseStrategy
pd.set_option("display.max_columns", None)
pd.set_option("display.width", None)
class EMABBAlligatorStrategy(BaseStrategy):
BUY_SIGNAL = "buy_signal"
SELL_SIGNAL = "sell_signal"
def calculate_indicators(self):
df = self.load_df(limit=1000)
_ = df["close_3_ema"]
_ = df["boll"]
ao = ta.momentum.AwesomeOscillatorIndicator(high=df["high"], low=df["low"])
df["AO"] = ao.ao()
return df
def can_sell(self, df):
prev_candle = self.candle(df)
last_ema = prev_candle["close_3_ema"]
last_bb = prev_candle["boll"]
return [
last_ema < last_bb,
(self.candle(df, rewind=-2)["AO"] > 0)
& (self.candle(df, rewind=-1)["AO"] < 0),
prev_candle["volume"] > 0,
]
def can_buy(self, df):
prev_candle = self.candle(df)
last_ema = prev_candle["close_3_ema"]
last_bb = prev_candle["boll"]
return [
last_ema > last_bb,
(self.candle(df, rewind=-2)["AO"] < 0)
& (self.candle(df, rewind=-1)["AO"] > 0),
prev_candle["volume"] > 0,
]
def alert_message(self, df):
prev_candle = self.candle(df)
last_close = prev_candle["close"]
last_ao = prev_candle["AO"]
return (
"Close: {:.2f}, Awesome Oscillator value: {:.2f}".format(
last_close, last_ao
),
)
| [
"ta.momentum.AwesomeOscillatorIndicator",
"pandas.set_option"
] | [((122, 164), 'pandas.set_option', 'pd.set_option', (['"""display.max_columns"""', 'None'], {}), "('display.max_columns', None)\n", (135, 164), True, 'import pandas as pd\n'), ((165, 201), 'pandas.set_option', 'pd.set_option', (['"""display.width"""', 'None'], {}), "('display.width', None)\n", (178, 201), True, 'import pandas as pd\n'), ((451, 521), 'ta.momentum.AwesomeOscillatorIndicator', 'ta.momentum.AwesomeOscillatorIndicator', ([], {'high': "df['high']", 'low': "df['low']"}), "(high=df['high'], low=df['low'])\n", (489, 521), False, 'import ta\n')] |
import numpy as np
import matplotlib.pyplot as plt
from scipy import stats
from sklearn.linear_model import ARDRegression, LinearRegression
# Parameters of the example
np.random.seed(0)
n_samples, n_features = 100, 100
# Create Gaussian data
X = np.random.randn(n_samples, n_features)
# Create weights with a precision lambda_ of 4.
lambda_ = 4.
w = np.zeros(n_features)
# Only keep 10 weights of interest
relevant_features = np.random.randint(0, n_features, 10)
for i in relevant_features:
w[i] = stats.norm.rvs(loc=0, scale=1. / np.sqrt(lambda_))
# Create noise with a precision alpha of 50.
alpha_ = 50.
noise = stats.norm.rvs(loc=0, scale=1. / np.sqrt(alpha_), size=n_samples)
# Create the target<
y = np.dot(X, w) + noise
clf = ARDRegression(fit_intercept=False, n_iter=1000)
clf.fit(X, y)
ols = LinearRegression(fit_intercept=False)
ols.fit(X, y)
from copy import deepcopy
from sds.distributions.lingauss import SingleOutputLinearGaussianWithKnownPrecision
from sds.distributions.lingauss import SingleOutputLinearGaussianWithKnownMean
from sds.distributions.gaussian import GaussianWithPrecision
from sds.distributions.gaussian import GaussianWithKnownMeanAndDiagonalPrecision
from sds.distributions.gamma import Gamma
likelihood_precision_prior = Gamma(dim=1, alphas=np.ones((1, )),
betas=1e-6 * np.ones((1, )))
parameter_precision_prior = Gamma(dim=n_features, alphas=np.ones((n_features, )),
betas=1e-6 * np.ones((n_features, )))
likelihood_precision_posterior = deepcopy(likelihood_precision_prior)
parameter_precision_posterior = deepcopy(parameter_precision_prior)
parameter_posterior = None
for i in range(100):
# parameter posterior
alphas = parameter_precision_posterior.mean()
parameter_prior = GaussianWithPrecision(dim=n_features,
mu=np.zeros((n_features, )),
lmbda=np.diag(alphas))
parameter_posterior = deepcopy(parameter_prior)
beta = likelihood_precision_posterior.mean()
likelihood_known_precision = SingleOutputLinearGaussianWithKnownPrecision(column_dim=n_features,
lmbda=beta,
affine=False)
stats = likelihood_known_precision.statistics(X, y)
parameter_posterior.nat_param = parameter_prior.nat_param + stats
# likelihood precision posterior
param = parameter_posterior.mean()
likelihood_known_mean = SingleOutputLinearGaussianWithKnownMean(column_dim=n_features,
W=param, affine=False)
stats = likelihood_known_mean.statistics(X, y)
likelihood_precision_posterior.nat_param = likelihood_precision_prior.nat_param + stats
# parameter precision posterior
parameter_likelihood = GaussianWithKnownMeanAndDiagonalPrecision(dim=n_features)
param = parameter_posterior.mean()
stats = parameter_likelihood.statistics(param)
parameter_precision_posterior.nat_param = parameter_precision_prior.nat_param + stats
our_ard = parameter_posterior.mode()
from sds.distributions.composite import MatrixNormalGamma
from sds.distributions.lingauss import LinearGaussianWithDiagonalPrecision
M = np.zeros((1, n_features))
K = 1e-16 * np.eye(n_features)
alphas = 1e-16 * np.ones((1, ))
betas = 1e-16 * np.ones((1, ))
prior = MatrixNormalGamma(column_dim=n_features, row_dim=1,
M=M, K=K, alphas=alphas, betas=betas)
posterior = deepcopy(prior)
likelihood = LinearGaussianWithDiagonalPrecision(column_dim=n_features,
row_dim=1,
affine=False)
stats = likelihood.statistics(X, np.atleast_2d(y).T)
posterior.nat_param = prior.nat_param + stats
our_ols = posterior.mode()[0]
plt.figure(figsize=(6, 5))
plt.title("Weights of the model")
plt.plot(w, color='orange', linestyle='-', linewidth=2, label="Ground truth")
plt.plot(clf.coef_, color='darkblue', linestyle='-', linewidth=2, label="Sklearn ARD")
plt.plot(our_ard, color='red', linestyle='-', linewidth=2, label="Our ARD")
# plt.plot(ols.coef_, color='yellowgreen', linestyle=':', linewidth=2, label="Sklearn OLS")
# plt.plot(our_ols.flatten(), color='cyan', linestyle='-', linewidth=2, label="Our OLS")
plt.xlabel("Features")
plt.ylabel("Values of the weights")
plt.legend(loc=1)
plt.show()
| [
"numpy.sqrt",
"sds.distributions.lingauss.SingleOutputLinearGaussianWithKnownPrecision",
"matplotlib.pyplot.ylabel",
"sklearn.linear_model.ARDRegression",
"copy.deepcopy",
"numpy.atleast_2d",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.plot",
"sds.distributions.gaussian.GaussianWithKnownMeanAndDiagonalPrecision",
"numpy.dot",
"numpy.random.seed",
"numpy.eye",
"numpy.ones",
"sds.distributions.composite.MatrixNormalGamma",
"sds.distributions.lingauss.SingleOutputLinearGaussianWithKnownMean",
"matplotlib.pyplot.title",
"numpy.random.randn",
"sklearn.linear_model.LinearRegression",
"matplotlib.pyplot.legend",
"matplotlib.pyplot.show",
"numpy.diag",
"numpy.zeros",
"numpy.random.randint",
"sds.distributions.lingauss.LinearGaussianWithDiagonalPrecision",
"matplotlib.pyplot.figure"
] | [((170, 187), 'numpy.random.seed', 'np.random.seed', (['(0)'], {}), '(0)\n', (184, 187), True, 'import numpy as np\n'), ((248, 286), 'numpy.random.randn', 'np.random.randn', (['n_samples', 'n_features'], {}), '(n_samples, n_features)\n', (263, 286), True, 'import numpy as np\n'), ((352, 372), 'numpy.zeros', 'np.zeros', (['n_features'], {}), '(n_features)\n', (360, 372), True, 'import numpy as np\n'), ((428, 464), 'numpy.random.randint', 'np.random.randint', (['(0)', 'n_features', '(10)'], {}), '(0, n_features, 10)\n', (445, 464), True, 'import numpy as np\n'), ((740, 787), 'sklearn.linear_model.ARDRegression', 'ARDRegression', ([], {'fit_intercept': '(False)', 'n_iter': '(1000)'}), '(fit_intercept=False, n_iter=1000)\n', (753, 787), False, 'from sklearn.linear_model import ARDRegression, LinearRegression\n'), ((809, 846), 'sklearn.linear_model.LinearRegression', 'LinearRegression', ([], {'fit_intercept': '(False)'}), '(fit_intercept=False)\n', (825, 846), False, 'from sklearn.linear_model import ARDRegression, LinearRegression\n'), ((1555, 1591), 'copy.deepcopy', 'deepcopy', (['likelihood_precision_prior'], {}), '(likelihood_precision_prior)\n', (1563, 1591), False, 'from copy import deepcopy\n'), ((1624, 1659), 'copy.deepcopy', 'deepcopy', (['parameter_precision_prior'], {}), '(parameter_precision_prior)\n', (1632, 1659), False, 'from copy import deepcopy\n'), ((3380, 3405), 'numpy.zeros', 'np.zeros', (['(1, n_features)'], {}), '((1, n_features))\n', (3388, 3405), True, 'import numpy as np\n'), ((3509, 3602), 'sds.distributions.composite.MatrixNormalGamma', 'MatrixNormalGamma', ([], {'column_dim': 'n_features', 'row_dim': '(1)', 'M': 'M', 'K': 'K', 'alphas': 'alphas', 'betas': 'betas'}), '(column_dim=n_features, row_dim=1, M=M, K=K, alphas=alphas,\n betas=betas)\n', (3526, 3602), False, 'from sds.distributions.composite import MatrixNormalGamma\n'), ((3638, 3653), 'copy.deepcopy', 'deepcopy', (['prior'], {}), '(prior)\n', (3646, 3653), False, 'from copy import deepcopy\n'), ((3667, 3754), 'sds.distributions.lingauss.LinearGaussianWithDiagonalPrecision', 'LinearGaussianWithDiagonalPrecision', ([], {'column_dim': 'n_features', 'row_dim': '(1)', 'affine': '(False)'}), '(column_dim=n_features, row_dim=1,\n affine=False)\n', (3702, 3754), False, 'from sds.distributions.lingauss import LinearGaussianWithDiagonalPrecision\n'), ((3980, 4006), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(6, 5)'}), '(figsize=(6, 5))\n', (3990, 4006), True, 'import matplotlib.pyplot as plt\n'), ((4007, 4040), 'matplotlib.pyplot.title', 'plt.title', (['"""Weights of the model"""'], {}), "('Weights of the model')\n", (4016, 4040), True, 'import matplotlib.pyplot as plt\n'), ((4041, 4118), 'matplotlib.pyplot.plot', 'plt.plot', (['w'], {'color': '"""orange"""', 'linestyle': '"""-"""', 'linewidth': '(2)', 'label': '"""Ground truth"""'}), "(w, color='orange', linestyle='-', linewidth=2, label='Ground truth')\n", (4049, 4118), True, 'import matplotlib.pyplot as plt\n'), ((4119, 4210), 'matplotlib.pyplot.plot', 'plt.plot', (['clf.coef_'], {'color': '"""darkblue"""', 'linestyle': '"""-"""', 'linewidth': '(2)', 'label': '"""Sklearn ARD"""'}), "(clf.coef_, color='darkblue', linestyle='-', linewidth=2, label=\n 'Sklearn ARD')\n", (4127, 4210), True, 'import matplotlib.pyplot as plt\n'), ((4206, 4281), 'matplotlib.pyplot.plot', 'plt.plot', (['our_ard'], {'color': '"""red"""', 'linestyle': '"""-"""', 'linewidth': '(2)', 'label': '"""Our ARD"""'}), "(our_ard, color='red', linestyle='-', linewidth=2, label='Our ARD')\n", (4214, 4281), True, 'import matplotlib.pyplot as plt\n'), ((4464, 4486), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Features"""'], {}), "('Features')\n", (4474, 4486), True, 'import matplotlib.pyplot as plt\n'), ((4487, 4522), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Values of the weights"""'], {}), "('Values of the weights')\n", (4497, 4522), True, 'import matplotlib.pyplot as plt\n'), ((4523, 4540), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'loc': '(1)'}), '(loc=1)\n', (4533, 4540), True, 'import matplotlib.pyplot as plt\n'), ((4542, 4552), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (4550, 4552), True, 'import matplotlib.pyplot as plt\n'), ((712, 724), 'numpy.dot', 'np.dot', (['X', 'w'], {}), '(X, w)\n', (718, 724), True, 'import numpy as np\n'), ((2011, 2036), 'copy.deepcopy', 'deepcopy', (['parameter_prior'], {}), '(parameter_prior)\n', (2019, 2036), False, 'from copy import deepcopy\n'), ((2120, 2218), 'sds.distributions.lingauss.SingleOutputLinearGaussianWithKnownPrecision', 'SingleOutputLinearGaussianWithKnownPrecision', ([], {'column_dim': 'n_features', 'lmbda': 'beta', 'affine': '(False)'}), '(column_dim=n_features, lmbda=\n beta, affine=False)\n', (2164, 2218), False, 'from sds.distributions.lingauss import SingleOutputLinearGaussianWithKnownPrecision\n'), ((2602, 2691), 'sds.distributions.lingauss.SingleOutputLinearGaussianWithKnownMean', 'SingleOutputLinearGaussianWithKnownMean', ([], {'column_dim': 'n_features', 'W': 'param', 'affine': '(False)'}), '(column_dim=n_features, W=param,\n affine=False)\n', (2641, 2691), False, 'from sds.distributions.lingauss import SingleOutputLinearGaussianWithKnownMean\n'), ((2964, 3021), 'sds.distributions.gaussian.GaussianWithKnownMeanAndDiagonalPrecision', 'GaussianWithKnownMeanAndDiagonalPrecision', ([], {'dim': 'n_features'}), '(dim=n_features)\n', (3005, 3021), False, 'from sds.distributions.gaussian import GaussianWithKnownMeanAndDiagonalPrecision\n'), ((3418, 3436), 'numpy.eye', 'np.eye', (['n_features'], {}), '(n_features)\n', (3424, 3436), True, 'import numpy as np\n'), ((3454, 3467), 'numpy.ones', 'np.ones', (['(1,)'], {}), '((1,))\n', (3461, 3467), True, 'import numpy as np\n'), ((3485, 3498), 'numpy.ones', 'np.ones', (['(1,)'], {}), '((1,))\n', (3492, 3498), True, 'import numpy as np\n'), ((1286, 1299), 'numpy.ones', 'np.ones', (['(1,)'], {}), '((1,))\n', (1293, 1299), True, 'import numpy as np\n'), ((1424, 1446), 'numpy.ones', 'np.ones', (['(n_features,)'], {}), '((n_features,))\n', (1431, 1446), True, 'import numpy as np\n'), ((3883, 3899), 'numpy.atleast_2d', 'np.atleast_2d', (['y'], {}), '(y)\n', (3896, 3899), True, 'import numpy as np\n'), ((654, 669), 'numpy.sqrt', 'np.sqrt', (['alpha_'], {}), '(alpha_)\n', (661, 669), True, 'import numpy as np\n'), ((1350, 1363), 'numpy.ones', 'np.ones', (['(1,)'], {}), '((1,))\n', (1357, 1363), True, 'import numpy as np\n'), ((1496, 1518), 'numpy.ones', 'np.ones', (['(n_features,)'], {}), '((n_features,))\n', (1503, 1518), True, 'import numpy as np\n'), ((1892, 1915), 'numpy.zeros', 'np.zeros', (['(n_features,)'], {}), '((n_features,))\n', (1900, 1915), True, 'import numpy as np\n'), ((1968, 1983), 'numpy.diag', 'np.diag', (['alphas'], {}), '(alphas)\n', (1975, 1983), True, 'import numpy as np\n'), ((537, 553), 'numpy.sqrt', 'np.sqrt', (['lambda_'], {}), '(lambda_)\n', (544, 553), True, 'import numpy as np\n')] |
#!/usr/bin/python3
import os
import json
import re
import ast
import json
from graphviz import Digraph
import pandas as pd
# color the graph
import graph_tool.all as gt
import copy
import matplotlib.colors as mcolors
import sys
import utils
from tompkins.ilp import schedule, jobs_when_where
from collections import defaultdict
from pulp import value
import re
import ast
import json
from graphviz import Digraph
import pandas as pd
# color the graph
import graph_tool.all as gt
import copy
import matplotlib.colors as mcolors
import sys
import seaborn as sns
def get_benchmarks():
benchmarks = {}
for _file in os.listdir(stats_dir):
try:
bnch = _file.rsplit('.', 1)[0]
assert os.path.isfile(os.path.join(stats_dir, f'{bnch}.iopt'))
app = bnch #, scheduler = bnch.rsplit(':', 1)
scheduler = 'vanilla'
benchmarks[bnch] = {'app': app, 'scheduler': scheduler, 'benchmark': bnch}
except AssertionError:
pass
return benchmarks
def build_graph(benchmark):
css_colors = list(mcolors.CSS4_COLORS.keys())
gfile = os.path.join(stats_dir, f'{benchmark}.iopt')
with open(gfile, 'r') as fd:
raw = fd.read().split('\n')
g = gt.Graph(directed=True)
vid_to_vx = {}
name_to_vid = {}
g.vertex_properties['name'] = g.new_vertex_property("string")
g.vertex_properties['worker'] = g.new_vertex_property("string")
g.vertex_properties['color'] = g.new_vertex_property("string", '#e0e0e0')
g.vertex_properties['icolor'] = g.new_vertex_property("int")
g.vertex_properties['output_size'] = g.new_vertex_property("int")
g.vertex_properties['runtime'] = g.new_vertex_property("float")
for ln in raw:
if ln.startswith('v'):
_, vid, name, runtime, output_size = ln.split(',', 4)
v = g.add_vertex()
vid_to_vx[vid] = v
name_to_vid[name] = vid
g.vp.name[v] = name
g.vp.runtime[v] = float(runtime) # 1 second
g.vp.output_size[v] = float(output_size) # 1GB
g.vp.color[v] = '#e0e0e0'
for ln in raw:
if ln.startswith('e'):
_, vsrc, vdst = ln.split(',')
g.add_edge(vid_to_vx[vsrc], vid_to_vx[vdst])
return g
def get_runtime_statistics(benchmark):
tasks = []
statistics = {}
jfile = os.path.join(stats_dir, f'{benchmark}.json')
with open(jfile, 'r') as fd:
stats = ast.literal_eval(fd.read())
for ts in stats:
ops = 'ts'; #ts.replace("(", '').replace(')', '').split("'")[1].split('-')[0]
statistics[ts] = {'key': ts, 'op': ops,
'output_size': stats[ts]['msg']['nbytes'], 'worker': stats[ts]['worker'].split(':')[1].replace('/', '')}
startsstops = stats[ts]['msg']['startstops']
for ss in startsstops:
if ss['action'] == 'compute':
statistics[ts]['compute_end'] = ss['stop']
statistics[ts]['compute_start'] = ss['start']
statistics[ts]['runtime'] = ss['stop'] - ss['start']
cfile = os.path.join(stats_dir, f'{benchmark}.colors')
with open(cfile, 'r') as cfd:
raw = cfd.read().split('\n')
for ln in raw:
if not ln:
continue
ts, color = ln.split(',')
#ts += ')'
statistics[ts]['color'] = int(color)
return statistics
def plot_graph(g, benchmark, optimal=False):
print(benchmark["benchmark"])
post = ".optimal" if optimal else ""
dg = Digraph('G', filename=f'{benchmark["benchmark"]}{post}.gv', format='png')
for v in g.vertices():
dg.attr('node', shape='ellipse', style="filled,solid",
penwidth="3",
fillcolor=g.vp.color[v],
color=worker_color[g.vp.statistics[v]['worker']])
#if benchmark['scheduler'] == "vanilla":
# dg.node(f'{v}')
#else:
dg.node(f'{v}, color({g.vp.icolor[v]})')
for e in g.edges():
#if benchmark['scheduler'] == "vanilla":
# dg.edge(f'{e.source()}', f'{e.target()}')
#else:
dg.edge(f'{e.source()}, color({g.vp.icolor[e.source()]})',
f'{e.target()}, color({g.vp.icolor[e.target()]})')
dg.view(os.path.join(f'{results_dir}',f'{benchmark["benchmark"]}{post}'), quiet=False)
import pulp as pl
import time
def find_optimal(g, bw):
n_workers = 4
workers = [f'w{i}' for i in range(n_workers)]
# Job Release Times - Additional constraints on availablility of Jobs
# R = np.zeros(n)
R = defaultdict(lambda:0)
# Maximum makespan
M = 100
B = defaultdict(lambda:1)
agents = workers
jobs = []
for v in g.vertices():
jobs.append(f't{v}')
n = len(jobs)
m = len(agents)
P = defaultdict(lambda:0)
for e in g.edges():
P[f't{e.source()}',f't{e.target()}'] = 1
# computation
D = defaultdict(lambda:0)
for v in g.vertices():
for a in agents:
D[f't{v}', a] = g.vp.runtime[v] # statistics[g.vp.name[v]]['runtime']
# Communication Delay matrix - Cost of sending results of job from
# agent to agent
#bw = 10*(1<<30)/(1<<3)
bw = bw*(1<<20)/(1<<3)
C = defaultdict(lambda:0)
for v in g.vertices():
for a in agents:
for b in agents:
C[f't{v}', a, b] = 0 if a == b else g.vp.output_size[v]/bw # 0 --> cost_serialization
start = time.time()
# Set up the Mixed Integer Linear Program
prob, X, S, Cmax = schedule(jobs, agents, D, C, R, B, P, M)
solver = pl.GUROBI_CMD()
prob.solve(solver)
latency = time.time() - start
print('-----------------------------------------------> constraints', len(prob.constraints.keys()))
print('----------------------------------------------> # of variables', prob.numVariables())
print('---------------------------------------------->', latency)
print("Makespan: ", value(Cmax))
sched = jobs_when_where(prob, X, S, Cmax)
print("Schedule: ", sched)
sched2 = []
for j in sched:
new = j + (j[1] + D[j[0], j[2]], g.vp.name[int(j[0].replace('t', ''))])
sched2.append(new)
print("Schedule: ", sched2)
return sched2, {'makespan': value(Cmax),
'constraints': len(prob.constraints.keys()),
'variables': prob.numVariables(),
'time': float(latency)}
results_dir = './benchmarks'
stats_dir='./benchmarks'
benchmarks = get_benchmarks()
#benchmarks = ['dom4x61GB1B', 'dom2x41GB1B', 'tree4x61GB1B']
for bnch in benchmarks:
for bw in [1*1024, 16*1024, 512, 32*1024, 8*1024, 4*1024, 2*1024, 256, 128, 64, 32]:
print(f'process {bnch}')
g = build_graph(bnch)
sched2, stats = find_optimal(g, bw)
with open(f'{results_dir}/optimal_compuation_stats.csv', 'a') as fd:
fd.write(f'{bnch},{stats["makespan"]},{stats["constraints"]},{stats["variables"]},{stats["time"]},no,{bw}\n')
with open(f'{results_dir}/{bnch}.nonetworkcontention.{bw}mbps.optimal', 'w') as fd:
for s in sched2:
fd.write(f'v,{s[0]},{s[1]},{s[2]}\n')
#fd.write(f'{s[4]},{s[3]},{s[0]},{s[1]},{s[2]}\n')
#v = int(s[0].replace('t', ''))
#g.vp.worker[v] = s[2]
break
#break
| [
"os.listdir",
"os.path.join",
"graph_tool.all.Graph",
"tompkins.ilp.jobs_when_where",
"tompkins.ilp.schedule",
"matplotlib.colors.CSS4_COLORS.keys",
"pulp.value",
"collections.defaultdict",
"graphviz.Digraph",
"pulp.GUROBI_CMD",
"time.time"
] | [((626, 647), 'os.listdir', 'os.listdir', (['stats_dir'], {}), '(stats_dir)\n', (636, 647), False, 'import os\n'), ((1123, 1167), 'os.path.join', 'os.path.join', (['stats_dir', 'f"""{benchmark}.iopt"""'], {}), "(stats_dir, f'{benchmark}.iopt')\n", (1135, 1167), False, 'import os\n'), ((2469, 2513), 'os.path.join', 'os.path.join', (['stats_dir', 'f"""{benchmark}.json"""'], {}), "(stats_dir, f'{benchmark}.json')\n", (2481, 2513), False, 'import os\n'), ((3238, 3284), 'os.path.join', 'os.path.join', (['stats_dir', 'f"""{benchmark}.colors"""'], {}), "(stats_dir, f'{benchmark}.colors')\n", (3250, 3284), False, 'import os\n'), ((3700, 3773), 'graphviz.Digraph', 'Digraph', (['"""G"""'], {'filename': 'f"""{benchmark[\'benchmark\']}{post}.gv"""', 'format': '"""png"""'}), '(\'G\', filename=f"{benchmark[\'benchmark\']}{post}.gv", format=\'png\')\n', (3707, 3773), False, 'from graphviz import Digraph\n'), ((4745, 4768), 'collections.defaultdict', 'defaultdict', (['(lambda : 0)'], {}), '(lambda : 0)\n', (4756, 4768), False, 'from collections import defaultdict\n'), ((4811, 4834), 'collections.defaultdict', 'defaultdict', (['(lambda : 1)'], {}), '(lambda : 1)\n', (4822, 4834), False, 'from collections import defaultdict\n'), ((4979, 5002), 'collections.defaultdict', 'defaultdict', (['(lambda : 0)'], {}), '(lambda : 0)\n', (4990, 5002), False, 'from collections import defaultdict\n'), ((5105, 5128), 'collections.defaultdict', 'defaultdict', (['(lambda : 0)'], {}), '(lambda : 0)\n', (5116, 5128), False, 'from collections import defaultdict\n'), ((5417, 5440), 'collections.defaultdict', 'defaultdict', (['(lambda : 0)'], {}), '(lambda : 0)\n', (5428, 5440), False, 'from collections import defaultdict\n'), ((5635, 5646), 'time.time', 'time.time', ([], {}), '()\n', (5644, 5646), False, 'import time\n'), ((5716, 5756), 'tompkins.ilp.schedule', 'schedule', (['jobs', 'agents', 'D', 'C', 'R', 'B', 'P', 'M'], {}), '(jobs, agents, D, C, R, B, P, M)\n', (5724, 5756), False, 'from tompkins.ilp import schedule, jobs_when_where\n'), ((5770, 5785), 'pulp.GUROBI_CMD', 'pl.GUROBI_CMD', ([], {}), '()\n', (5783, 5785), True, 'import pulp as pl\n'), ((6164, 6197), 'tompkins.ilp.jobs_when_where', 'jobs_when_where', (['prob', 'X', 'S', 'Cmax'], {}), '(prob, X, S, Cmax)\n', (6179, 6197), False, 'from tompkins.ilp import schedule, jobs_when_where\n'), ((1083, 1109), 'matplotlib.colors.CSS4_COLORS.keys', 'mcolors.CSS4_COLORS.keys', ([], {}), '()\n', (1107, 1109), True, 'import matplotlib.colors as mcolors\n'), ((1250, 1273), 'graph_tool.all.Graph', 'gt.Graph', ([], {'directed': '(True)'}), '(directed=True)\n', (1258, 1273), True, 'import graph_tool.all as gt\n'), ((4434, 4499), 'os.path.join', 'os.path.join', (['f"""{results_dir}"""', 'f"""{benchmark[\'benchmark\']}{post}"""'], {}), '(f\'{results_dir}\', f"{benchmark[\'benchmark\']}{post}")\n', (4446, 4499), False, 'import os\n'), ((5823, 5834), 'time.time', 'time.time', ([], {}), '()\n', (5832, 5834), False, 'import time\n'), ((6139, 6150), 'pulp.value', 'value', (['Cmax'], {}), '(Cmax)\n', (6144, 6150), False, 'from pulp import value\n'), ((6437, 6448), 'pulp.value', 'value', (['Cmax'], {}), '(Cmax)\n', (6442, 6448), False, 'from pulp import value\n'), ((739, 778), 'os.path.join', 'os.path.join', (['stats_dir', 'f"""{bnch}.iopt"""'], {}), "(stats_dir, f'{bnch}.iopt')\n", (751, 778), False, 'import os\n')] |
import datetime as dt
import logging
from babel import Locale, UnknownLocaleError
from babel.dates import format_datetime, format_time, format_date
import pytz
from tzlocal import get_localzone
from . import settings
logger = logging.getLogger(__name__)
class LocaleHelper:
"""Helpers for converting date & time according to current locale and timezone"""
def __init__(
self,
my_locale: Locale = None,
my_tz: pytz.BaseTzInfo = None,
author_info: dict = None,
) -> None:
"""
Args:
- my_locale: Primary locale to use
- my_tz: Primary timezone to use
- author_info: locale and timezone to use from this Slack response
if my_locale and/or my_tz are not given
"""
self._locale = self._determine_locale(my_locale, author_info)
self._timezone = self._determine_timezone(my_tz, author_info)
@staticmethod
def _determine_locale(my_locale: Locale = None, author_info: dict = None) -> Locale:
if my_locale:
if not isinstance(my_locale, Locale):
raise TypeError("my_locale must be a babel Locale object")
else:
if author_info:
try:
my_locale = Locale.parse(author_info["locale"], sep="-")
except UnknownLocaleError:
logger.warning("Could not use locale info from Slack")
my_locale = Locale.default()
else:
my_locale = Locale.default()
if not my_locale:
my_locale = Locale.parse(settings.FALLBACK_LOCALE)
return my_locale
@staticmethod
def _determine_timezone(
my_tz: pytz.BaseTzInfo = None, author_info: dict = None
) -> pytz.BaseTzInfo:
if my_tz:
if not isinstance(my_tz, pytz.BaseTzInfo):
raise TypeError("my_tz must be of type pytz")
else:
if author_info:
try:
my_tz = pytz.timezone(author_info["tz"])
except pytz.exceptions.UnknownTimeZoneError:
logger.warning("Could not use timezone info from Slack")
my_tz = get_localzone()
else:
my_tz = get_localzone()
if not my_tz:
my_tz = pytz.UTC
return my_tz
@property
def locale(self) -> Locale:
return self._locale
@property
def timezone(self) -> pytz.BaseTzInfo:
return self._timezone
def format_date_full_str(self, my_datetime: dt.datetime) -> str:
return format_date(my_datetime, format="full", locale=self.locale)
def format_datetime_str(self, my_datetime: dt.datetime) -> str:
"""returns formated datetime string for given dt using locale"""
return format_datetime(my_datetime, format="short", locale=self.locale)
def get_datetime_formatted_str(self, ts: int) -> str:
"""return given timestamp as formated datetime string using locale"""
my_datetime = self.get_datetime_from_ts(ts)
return format_datetime(my_datetime, format="short", locale=self.locale)
def get_time_formatted_str(self, ts: int) -> str:
"""return given timestamp as formated datetime string using locale"""
my_datetime = self.get_datetime_from_ts(ts)
return format_time(my_datetime, format="short", locale=self.locale)
def get_datetime_from_ts(self, ts: int) -> dt.datetime:
"""returns datetime object of a unix timestamp with local timezone"""
my_datetime = dt.datetime.fromtimestamp(float(ts), pytz.UTC)
return my_datetime.astimezone(self.timezone)
| [
"logging.getLogger",
"pytz.timezone",
"babel.dates.format_time",
"tzlocal.get_localzone",
"babel.dates.format_date",
"babel.Locale.parse",
"babel.dates.format_datetime",
"babel.Locale.default"
] | [((230, 257), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (247, 257), False, 'import logging\n'), ((2602, 2661), 'babel.dates.format_date', 'format_date', (['my_datetime'], {'format': '"""full"""', 'locale': 'self.locale'}), "(my_datetime, format='full', locale=self.locale)\n", (2613, 2661), False, 'from babel.dates import format_datetime, format_time, format_date\n'), ((2819, 2883), 'babel.dates.format_datetime', 'format_datetime', (['my_datetime'], {'format': '"""short"""', 'locale': 'self.locale'}), "(my_datetime, format='short', locale=self.locale)\n", (2834, 2883), False, 'from babel.dates import format_datetime, format_time, format_date\n'), ((3088, 3152), 'babel.dates.format_datetime', 'format_datetime', (['my_datetime'], {'format': '"""short"""', 'locale': 'self.locale'}), "(my_datetime, format='short', locale=self.locale)\n", (3103, 3152), False, 'from babel.dates import format_datetime, format_time, format_date\n'), ((3353, 3413), 'babel.dates.format_time', 'format_time', (['my_datetime'], {'format': '"""short"""', 'locale': 'self.locale'}), "(my_datetime, format='short', locale=self.locale)\n", (3364, 3413), False, 'from babel.dates import format_datetime, format_time, format_date\n'), ((1581, 1619), 'babel.Locale.parse', 'Locale.parse', (['settings.FALLBACK_LOCALE'], {}), '(settings.FALLBACK_LOCALE)\n', (1593, 1619), False, 'from babel import Locale, UnknownLocaleError\n'), ((1514, 1530), 'babel.Locale.default', 'Locale.default', ([], {}), '()\n', (1528, 1530), False, 'from babel import Locale, UnknownLocaleError\n'), ((2266, 2281), 'tzlocal.get_localzone', 'get_localzone', ([], {}), '()\n', (2279, 2281), False, 'from tzlocal import get_localzone\n'), ((1256, 1300), 'babel.Locale.parse', 'Locale.parse', (["author_info['locale']"], {'sep': '"""-"""'}), "(author_info['locale'], sep='-')\n", (1268, 1300), False, 'from babel import Locale, UnknownLocaleError\n'), ((2009, 2041), 'pytz.timezone', 'pytz.timezone', (["author_info['tz']"], {}), "(author_info['tz'])\n", (2022, 2041), False, 'import pytz\n'), ((1451, 1467), 'babel.Locale.default', 'Locale.default', ([], {}), '()\n', (1465, 1467), False, 'from babel import Locale, UnknownLocaleError\n'), ((2208, 2223), 'tzlocal.get_localzone', 'get_localzone', ([], {}), '()\n', (2221, 2223), False, 'from tzlocal import get_localzone\n')] |
import signal
class KillableProcess(object):
def __init__(self):
self.interrupt = False
signal.signal(signal.SIGTERM, self._signal_handler)
signal.signal(signal.SIGINT, self._signal_handler)
def _signal_handler(self, sign, frame):
self.interrupt = True | [
"signal.signal"
] | [((110, 161), 'signal.signal', 'signal.signal', (['signal.SIGTERM', 'self._signal_handler'], {}), '(signal.SIGTERM, self._signal_handler)\n', (123, 161), False, 'import signal\n'), ((170, 220), 'signal.signal', 'signal.signal', (['signal.SIGINT', 'self._signal_handler'], {}), '(signal.SIGINT, self._signal_handler)\n', (183, 220), False, 'import signal\n')] |
#
# Copyright (c) 2016 <NAME> <<EMAIL>>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
#
import os
import json
import struct
import threading
import socket
import queue
import tempfile
import base64
import select
from behem0th import utils, log
BLOCK_SIZE = 4096
class Route:
def handle(self, data, request):
raise NotImplementedError
def send(self, data):
self.handler.send(self.route_name, data)
class FilelistRoute(Route):
def handle(self, data, request):
if request.is_client:
request.client._filelist = data
request.client._rlock.release()
else:
files, events = request.client._merge_filelist(data)
with request.client._rlock:
self.send(request.client._filelist)
for e in events:
request.queue_event(e)
for f in files:
request.queue_file(f[0], f[1])
"""
{
"action": "<action>",
"path": "<relpath-to-file>"
}
<action> can be either 'receive' or 'send'
Payload are base64 encoded chunks (BLOCK_SIZE bytes)
"""
class FileRoute(Route):
def handle(self, data, request):
action = data['action']
path = data['path']
if action == 'receive':
tmpf = tempfile.NamedTemporaryFile(delete=False)
buffer = b''
for chunk in request.recv():
buffer += chunk
if len(buffer) >= BLOCK_SIZE:
tmpf.write(base64.b64decode(buffer[:BLOCK_SIZE]))
buffer = buffer[:BLOCK_SIZE]
tmpf.write(base64.b64decode(buffer))
tmpf.close()
# watchdog reports a file-deleted and a file-created event, so ignore both.
request.client._ignore_next_fsevent(path)
request.client._ignore_next_fsevent(path)
os.rename(tmpf.name, request.client._abspath(path))
request.client._update_metadata(path)
request.client._event_handler._dispatch(
'received', request.client, path, 'file'
)
elif action == 'send':
request.queue_file('send', path)
else:
log.warn('FileRoute: Unknown action \'{0}\', igoring.', action)
# If we are the 'server', we also need to distribute all file request
# to all other clients.
if not request.is_client:
action = 'send' if action == 'receive' else 'request'
request.client._run_on_peers('queue_file', request, action, path)
"""
{
"type": "<type>",
"path": "<relpath-to-file>"
}
<type> can be one of 'file-created', 'file-deleted', 'file-moved'
"""
class EventRoute(Route):
def handle(self, data, request):
f_type, event = data['type'].split('-')
path = data['path']
abspath = request.client._abspath(path)
request.client._ignore_next_fsevent(path)
# TODO: factor out common code with Client._handle_fsevent() and Client._merge_filelist()
if event == 'created':
# create the file/directory
if f_type == 'file':
open(abspath, 'a').close()
else:
os.mkdir(abspath, 0o755)
request.client._add_to_filelist(path, f_type)
elif event == 'deleted':
request.client._remove_from_filelist(path)
os.remove(abspath)
elif event == 'moved':
request.client._remove_from_filelist(path)
os.rename(abspath, data['dest'])
request.client._add_to_filelist(data['dest'], f_type)
else:
log.warn('EventRoute: Unknown event {0}', data)
# For rationale, see FileRoute.handle()
if not request.is_client:
request.client._run_on_peers('queue_event', request, data)
ROUTES = {
'filelist': FilelistRoute(),
'file': FileRoute(),
'event': EventRoute()
}
"""
behem0th's protocol is completely text-based, using utf-8 encoding and
encoded in JSON for easy parsing.
A request usually looks like this:
{ "route": "<route-name>", "data": "<data>" }
'data' holds additional data which is then passed to the route.
There is no special format designed for 'data' and is specific to each route.
After each request there is a newline to separate them. (think of HTTP)
If a route needs to transfer additional data (a 'payload'), it has to send them
in a text-based format, e.g. base-64 encoding for binary data.
After the payload, if any, there has to be another newline to separate it from
the next request.
"""
class RequestHandler(threading.Thread):
req_handler_num = 0
def __init__(self, **kwargs):
super().__init__()
self.daemon = True
self.sync_queue = queue.Queue()
self.routes = {}
self.recvbuf = b''
RequestHandler.req_handler_num += 1
self.name = "request-handler-{0}".format(RequestHandler.req_handler_num)
for key, value in kwargs.items():
setattr(self, key, value)
with self.client._rlock:
self.client._peers.append(self)
self.sock.setblocking(0)
self.is_client = bool(self.client._sock)
for name, route in ROUTES.items():
route.route_name = name
route.handler = self
self.routes[name] = route
def setup(self):
log.info('Connected to {0}:{1}', self.address[0], self.address[1])
# If self.client has a (active) socket, it is a client and
# thus needs to starts syncing up with the server.
if self.is_client:
# Lock the client until the filelist has been sent back by the server.
self.client._rlock.acquire()
self.send('filelist', self.client._filelist)
def close(self):
self.sync_queue.put({'action': 'exit'})
try:
self.sock.shutdown(socket.SHUT_RDWR)
except OSError:
pass
def handle(self, data):
try:
data = json.loads(data)
except ValueError:
log.error('Received invalid data: {0}', data)
return
route = data['route']
data = data['data']
log.info_v('Handling {0}, data:\n{1}', route, data)
if route in self.routes:
self.routes[route].handle(data, self)
else:
log.error("Data received on unknown route '{0}'!", route)
def send(self, route, data):
request = json.dumps({'route': route, 'data': data}) + '\n'
self.sock.sendall(request.encode())
def recv(self):
if self.recvbuf:
# This needs special handling because there could be multiple
# request in recvbuf. If this is the case, we can only yield the first
# one and have to leave to others in recvbuf.
index = self.recvbuf.find(b'\n')
if index == -1:
yield self.recvbuf
self.recvbuf = None
else:
yield self.recvbuf[:index]
self.recvbuf = self.recvbuf[index+1:]
return
while 1:
select.select([self.sock], [], [])
chunk = self.sock.recv(1024)
if not len(chunk):
# If select has signaled the socket is readable, yet .recv()
# returns zero bytes, the other end probably performed
# a close() or shutdown() on the socket.
break
index = chunk.find(b'\n')
if index == -1:
yield chunk
else:
yield chunk[:index]
self.recvbuf = chunk[index+1:]
break
def queue_file(self, action, path):
self.sync_queue.put({
'action': action + '-file',
'path': path
})
def queue_event(self, event):
self.sync_queue.put({
'action': 'send-event',
'event': event
})
def sync_worker(self):
while 1:
entry = self.sync_queue.get()
log.info_v('Processing {0}', entry)
if entry['action'] == 'exit':
break
elif entry['action'] == 'send-file':
path = entry['path']
abspath = self.client._abspath(path)
self.send('file', {
'path': path,
'action': 'receive'
})
for buf in utils.read_file_seq(abspath, BLOCK_SIZE):
self.sock.sendall(base64.b64encode(buf))
self.sock.sendall(b'\n')
self.client._event_handler._dispatch(
'sent', self.client, path, 'file'
)
elif entry['action'] == 'request-file':
self.send('file', {
'path': entry['path'],
'action': 'send'
})
elif entry['action'] == 'send-event':
self.send('event', entry['event'])
self.sync_queue.task_done()
def run(self):
self.setup()
utils.create_thread(self.sync_worker,
name=self.name.replace('request-handler', 'sync-worker'))
while 1:
buffer = b''
for chunk in self.recv():
buffer += chunk
if not len(buffer):
break
self.handle(buffer.decode())
log.info('Disconnected from {0}:{1}', self.address[0], self.address[1])
self.close()
| [
"behem0th.utils.read_file_seq",
"json.loads",
"select.select",
"behem0th.log.error",
"os.rename",
"base64.b64encode",
"json.dumps",
"base64.b64decode",
"behem0th.log.info_v",
"behem0th.log.info",
"os.mkdir",
"tempfile.NamedTemporaryFile",
"queue.Queue",
"behem0th.log.warn",
"os.remove"
] | [((5172, 5185), 'queue.Queue', 'queue.Queue', ([], {}), '()\n', (5183, 5185), False, 'import queue\n'), ((5679, 5745), 'behem0th.log.info', 'log.info', (['"""Connected to {0}:{1}"""', 'self.address[0]', 'self.address[1]'], {}), "('Connected to {0}:{1}', self.address[0], self.address[1])\n", (5687, 5745), False, 'from behem0th import utils, log\n'), ((6363, 6417), 'behem0th.log.info_v', 'log.info_v', (['"""Handling {0}, data:\n{1}"""', 'route', 'data'], {}), '("""Handling {0}, data:\n{1}""", route, data)\n', (6373, 6417), False, 'from behem0th import utils, log\n'), ((8837, 8908), 'behem0th.log.info', 'log.info', (['"""Disconnected from {0}:{1}"""', 'self.address[0]', 'self.address[1]'], {}), "('Disconnected from {0}:{1}', self.address[0], self.address[1])\n", (8845, 8908), False, 'from behem0th import utils, log\n'), ((2127, 2168), 'tempfile.NamedTemporaryFile', 'tempfile.NamedTemporaryFile', ([], {'delete': '(False)'}), '(delete=False)\n', (2154, 2168), False, 'import tempfile\n'), ((6216, 6232), 'json.loads', 'json.loads', (['data'], {}), '(data)\n', (6226, 6232), False, 'import json\n'), ((6495, 6552), 'behem0th.log.error', 'log.error', (['"""Data received on unknown route \'{0}\'!"""', 'route'], {}), '("Data received on unknown route \'{0}\'!", route)\n', (6504, 6552), False, 'from behem0th import utils, log\n'), ((6597, 6639), 'json.dumps', 'json.dumps', (["{'route': route, 'data': data}"], {}), "({'route': route, 'data': data})\n", (6607, 6639), False, 'import json\n'), ((7124, 7158), 'select.select', 'select.select', (['[self.sock]', '[]', '[]'], {}), '([self.sock], [], [])\n', (7137, 7158), False, 'import select\n'), ((7832, 7867), 'behem0th.log.info_v', 'log.info_v', (['"""Processing {0}"""', 'entry'], {}), "('Processing {0}', entry)\n", (7842, 7867), False, 'from behem0th import utils, log\n'), ((2376, 2400), 'base64.b64decode', 'base64.b64decode', (['buffer'], {}), '(buffer)\n', (2392, 2400), False, 'import base64\n'), ((2854, 2915), 'behem0th.log.warn', 'log.warn', (['"""FileRoute: Unknown action \'{0}\', igoring."""', 'action'], {}), '("FileRoute: Unknown action \'{0}\', igoring.", action)\n', (2862, 2915), False, 'from behem0th import utils, log\n'), ((3732, 3754), 'os.mkdir', 'os.mkdir', (['abspath', '(493)'], {}), '(abspath, 493)\n', (3740, 3754), False, 'import os\n'), ((3884, 3902), 'os.remove', 'os.remove', (['abspath'], {}), '(abspath)\n', (3893, 3902), False, 'import os\n'), ((6257, 6302), 'behem0th.log.error', 'log.error', (['"""Received invalid data: {0}"""', 'data'], {}), "('Received invalid data: {0}', data)\n", (6266, 6302), False, 'from behem0th import utils, log\n'), ((3978, 4010), 'os.rename', 'os.rename', (['abspath', "data['dest']"], {}), "(abspath, data['dest'])\n", (3987, 4010), False, 'import os\n'), ((4080, 4127), 'behem0th.log.warn', 'log.warn', (['"""EventRoute: Unknown event {0}"""', 'data'], {}), "('EventRoute: Unknown event {0}', data)\n", (4088, 4127), False, 'from behem0th import utils, log\n'), ((8111, 8151), 'behem0th.utils.read_file_seq', 'utils.read_file_seq', (['abspath', 'BLOCK_SIZE'], {}), '(abspath, BLOCK_SIZE)\n', (8130, 8151), False, 'from behem0th import utils, log\n'), ((2289, 2326), 'base64.b64decode', 'base64.b64decode', (['buffer[:BLOCK_SIZE]'], {}), '(buffer[:BLOCK_SIZE])\n', (2305, 2326), False, 'import base64\n'), ((8176, 8197), 'base64.b64encode', 'base64.b64encode', (['buf'], {}), '(buf)\n', (8192, 8197), False, 'import base64\n')] |
from django.utils.translation import gettext
from wagtail.admin.rich_text.editors.draftail import features as draftail_features
from wagtail.core import hooks
from .richtext import KaTeXEntityElementHandler, katex_entity_decorator
@hooks.register('register_rich_text_features')
def register_katex_features(features):
features.default_features.append('katex')
"""
Registering the `katex` feature, which uses the `KATEX` Draft.js entity type,
and is stored as HTML with a `<div data-katex-embed="c = \\pm\\sqrt{a^2 + b^2}">` tag.
"""
feature_name = 'katex-embed'
type_ = 'KATEX-EMBED'
features.register_editor_plugin(
'draftail',
feature_name,
draftail_features.EntityFeature(
{
'type': type_,
'icon': 'square-root-alt',
'description': gettext('Equation'),
},
js=[
'wagtailkatex/katex/katex.min.js',
'wagtailkatex/wagtailkatex.js',
],
css={
'all': [
'wagtailkatex/katex/katex.min.css',
]
}
)
)
features.register_converter_rule('contentstate', feature_name, {
'from_database_format': {'div[data-katex-embed]': KaTeXEntityElementHandler()},
'to_database_format': {'entity_decorators': {type_: katex_entity_decorator}},
})
| [
"wagtail.core.hooks.register",
"django.utils.translation.gettext"
] | [((236, 281), 'wagtail.core.hooks.register', 'hooks.register', (['"""register_rich_text_features"""'], {}), "('register_rich_text_features')\n", (250, 281), False, 'from wagtail.core import hooks\n'), ((855, 874), 'django.utils.translation.gettext', 'gettext', (['"""Equation"""'], {}), "('Equation')\n", (862, 874), False, 'from django.utils.translation import gettext\n')] |
'''
Copyright 2017 Dell Inc. or its subsidiaries. All Rights Reserved.
This script tests arbitrary payload of the RackHD API 2.0 OS bootstrap workflows.
The default case is running a minimum payload Windows OS install.
Other Windows-type OS install cases can be specified by creating a payload file and specifiying it using the '-extra' argument.
This test takes 30-45 minutes to run.
Example payload file (installed in configuration dir):
{"bootstrap-payload":
{"name": "Graph.InstallWindowsServer",
"options": {"defaults": {"version": "2012",
"repo": "http://172.31.128.1:8080/repo/winpe",
"smbRepo": "\\\\172.31.128.1\\windowsServer2012",
"productkey": "<KEY>",
"username": "rackhduser",
"password": "<PASSWORD>",
"smbUser": "vagrant",
"smbPassword": "<PASSWORD>"}}}
}
Example command line using external payload file:
python run_tests.py -stack 4 -test tests/bootstrap/test_api20_windows_bootstrap.py -extra base_windows_2012_install.json
RackHD Windows installation workflow requires special configuration of the RackHD server:
- A customized WinPE environment installed on RackHD server as documented here:
https://github.com/RackHD/on-tools/tree/master/winpe
- Samba installed on the RackHD server and configured as documented here:
http://rackhd.readthedocs.io/en/latest/rackhd/install_os.html?highlight=os%20install
- Windows 2012 installation distro installed on RackHD server or equivalent NFS mount.
- Windows 2012 activation key in the installation payload file.
'''
import fit_path # NOQA: unused import
from nose.plugins.attrib import attr
import fit_common
import flogging
import random
import json
import time
from nosedep import depends
from datetime import datetime
log = flogging.get_loggers()
# sample default base payload
PAYLOAD = {"name": "Graph.InstallWindowsServer",
"options": {"defaults": {"version": "2012",
"repo": "http://172.31.128.1:8080/repo/winpe",
"smbRepo": "\\\\172.31.128.1\\windowsServer2012",
"productkey": "<KEY>",
"username": "rackhduser",
"password": "<PASSWORD>",
"smbUser": "vagrant",
"smbPassword": "<PASSWORD>"}}}
# if an external payload file is specified, use that
config = fit_common.fitcfg().get('bootstrap-payload', None)
if config:
PAYLOAD = config
# function to return the value of a field from the workflow response
def findall(obj, key):
if isinstance(obj, dict):
for k, v in obj.items():
if k == key:
log.error(" workflow error: %s", v)
findall(v, key)
elif isinstance(obj, list):
for item in obj:
findall(item, key)
else:
pass
# this routine polls a workflow task ID for completion
def wait_for_workflow_complete(instanceid, start_time, waittime=3200, cycle=30):
log.info_1(" Workflow started at time: " + str(datetime.fromtimestamp(start_time)))
while time.time() - start_time < waittime: # limit test to waittime seconds
result = fit_common.rackhdapi("/api/2.0/workflows/" + instanceid)
if result['status'] != 200:
log.error(" HTTP error: " + result['text'])
return False
if result['json']['status'] in ['running', 'pending']:
log.info_5("{} workflow status: {}".format(result['json']['injectableName'], result['json']['status']))
fit_common.time.sleep(cycle)
elif result['json']['status'] == 'succeeded':
log.info_1("{} workflow status: {}".format(result['json']['injectableName'], result['json']['status']))
end_time = time.time()
log.info_1(" Workflow completed at time: " + str(datetime.fromtimestamp(end_time)))
log.info_1(" Workflow duration: " + str(end_time - start_time))
return True
else:
end_time = time.time()
log.info_1(" Workflow failed at time: " + str(datetime.fromtimestamp(end_time)))
log.info_1(" Workflow duration: " + str(end_time - start_time))
try:
res = json.loads(result['text'])
findall(res, "error")
except:
res = result['text']
log.error(" Workflow failed: status: %s", result['json']['status'])
log.error(" Data: %s", json.dumps(res, indent=4, separators=(',', ':')))
return False
try:
res = json.loads(result['text'])
except:
res = result['text']
log.error(" Workflow Timeout: " + json.dumps(res, indent=4, separators=(',', ':')))
return False
# ------------------------ Tests -------------------------------------
@attr(all=False)
class api20_bootstrap_windows(fit_common.unittest.TestCase):
@classmethod
def setUpClass(cls):
# Get the list of nodes
NODECATALOG = fit_common.node_select()
assert (len(NODECATALOG) != 0), "There are no nodes currently discovered"
# Select one node at random
cls.__NODE = NODECATALOG[random.randint(0, len(NODECATALOG) - 1)]
# Print node Id, node BMC mac ,node type
nodeinfo = fit_common.rackhdapi('/api/2.0/nodes/' + cls.__NODE)['json']
nodesku = fit_common.rackhdapi(nodeinfo.get('sku'))['json']['name']
monurl = "/api/2.0/nodes/" + cls.__NODE + "/catalogs/bmc"
mondata = fit_common.rackhdapi(monurl, action="get")
catalog = mondata['json']
bmcresult = mondata['status']
if bmcresult != 200:
log.info_1(" Node ID: " + cls.__NODE)
log.info_1(" Error on catalog/bmc command")
else:
log.info_1(" Node ID: " + cls.__NODE)
log.info_1(" Node SKU: " + nodesku)
log.info_1(" Node BMC Mac: %s", catalog.get('data')['MAC Address'])
log.info_1(" Node BMC IP Addr: %s", catalog.get('data')['IP Address'])
log.info_1(" Node BMC IP Addr Src: %s", catalog.get('data')['IP Address Source'])
# delete active workflows for specified node
result = fit_common.cancel_active_workflows(cls.__NODE)
assert (result is True), "There are still some active workflows running against the node"
def test01_node_check(self):
# Log node data
nodeinfo = fit_common.rackhdapi('/api/2.0/nodes/' + self.__class__.__NODE)['json']
nodesku = fit_common.rackhdapi(nodeinfo.get('sku'))['json']['name']
log.info_1(" Node ID: %s ", self.__class__.__NODE)
log.info_1(" Node SKU: %s ", nodesku)
log.info_1(" Graph Name: Graph.PowerOn.Node")
# Ensure the compute node is powered on and reachable
result = fit_common.rackhdapi('/api/2.0/nodes/' +
self.__class__.__NODE +
'/workflows',
action='post', payload={"name": "Graph.PowerOn.Node"})
self.assertEqual(result['status'], 201, "Node Power on workflow API failed, see logs.")
self.assertTrue(wait_for_workflow_complete(result['json']['instanceId'], time.time(), 50, 5),
"Node Power on workflow failed, see logs.")
@depends(after=test01_node_check)
def test02_os_install(self):
# Log node data
nodeinfo = fit_common.rackhdapi('/api/2.0/nodes/' + self.__class__.__NODE)['json']
nodesku = fit_common.rackhdapi(nodeinfo.get('sku'))['json']['name']
log.info_1(" Node ID: " + self.__class__.__NODE)
log.info_1(" Node SKU: " + nodesku)
log.info_1(" Graph Name: Graph.InstallWindowsServer")
log.info_1(" Payload: " + fit_common.json.dumps(PAYLOAD))
# launch workflow
workflowid = None
result = fit_common.rackhdapi('/api/2.0/nodes/' +
self.__class__.__NODE +
'/workflows',
action='post', payload=PAYLOAD)
if result['status'] == 201:
# workflow running
log.info_1(" InstanceID: " + result['json']['instanceId'])
workflowid = result['json']['instanceId']
else:
# workflow failed with response code
log.error(" InstanceID: " + result['text'])
self.fail("Workflow failed with response code: " + result['status'])
self.assertTrue(wait_for_workflow_complete(workflowid, time.time()), "OS Install workflow failed, see logs.")
if __name__ == '__main__':
fit_common.unittest.main()
| [
"fit_common.cancel_active_workflows",
"json.loads",
"datetime.datetime.fromtimestamp",
"nose.plugins.attrib.attr",
"fit_common.unittest.main",
"fit_common.json.dumps",
"json.dumps",
"flogging.get_loggers",
"time.time",
"fit_common.fitcfg",
"fit_common.time.sleep",
"fit_common.rackhdapi",
"fit_common.node_select",
"nosedep.depends"
] | [((1983, 2005), 'flogging.get_loggers', 'flogging.get_loggers', ([], {}), '()\n', (2003, 2005), False, 'import flogging\n'), ((5096, 5111), 'nose.plugins.attrib.attr', 'attr', ([], {'all': '(False)'}), '(all=False)\n', (5100, 5111), False, 'from nose.plugins.attrib import attr\n'), ((7596, 7628), 'nosedep.depends', 'depends', ([], {'after': 'test01_node_check'}), '(after=test01_node_check)\n', (7603, 7628), False, 'from nosedep import depends\n'), ((8920, 8946), 'fit_common.unittest.main', 'fit_common.unittest.main', ([], {}), '()\n', (8944, 8946), False, 'import fit_common\n'), ((2680, 2699), 'fit_common.fitcfg', 'fit_common.fitcfg', ([], {}), '()\n', (2697, 2699), False, 'import fit_common\n'), ((3460, 3516), 'fit_common.rackhdapi', 'fit_common.rackhdapi', (["('/api/2.0/workflows/' + instanceid)"], {}), "('/api/2.0/workflows/' + instanceid)\n", (3480, 3516), False, 'import fit_common\n'), ((4847, 4873), 'json.loads', 'json.loads', (["result['text']"], {}), "(result['text'])\n", (4857, 4873), False, 'import json\n'), ((5269, 5293), 'fit_common.node_select', 'fit_common.node_select', ([], {}), '()\n', (5291, 5293), False, 'import fit_common\n'), ((5777, 5819), 'fit_common.rackhdapi', 'fit_common.rackhdapi', (['monurl'], {'action': '"""get"""'}), "(monurl, action='get')\n", (5797, 5819), False, 'import fit_common\n'), ((6467, 6513), 'fit_common.cancel_active_workflows', 'fit_common.cancel_active_workflows', (['cls.__NODE'], {}), '(cls.__NODE)\n', (6501, 6513), False, 'import fit_common\n'), ((7076, 7213), 'fit_common.rackhdapi', 'fit_common.rackhdapi', (["('/api/2.0/nodes/' + self.__class__.__NODE + '/workflows')"], {'action': '"""post"""', 'payload': "{'name': 'Graph.PowerOn.Node'}"}), "('/api/2.0/nodes/' + self.__class__.__NODE +\n '/workflows', action='post', payload={'name': 'Graph.PowerOn.Node'})\n", (7096, 7213), False, 'import fit_common\n'), ((8152, 8266), 'fit_common.rackhdapi', 'fit_common.rackhdapi', (["('/api/2.0/nodes/' + self.__class__.__NODE + '/workflows')"], {'action': '"""post"""', 'payload': 'PAYLOAD'}), "('/api/2.0/nodes/' + self.__class__.__NODE +\n '/workflows', action='post', payload=PAYLOAD)\n", (8172, 8266), False, 'import fit_common\n'), ((3372, 3383), 'time.time', 'time.time', ([], {}), '()\n', (3381, 3383), False, 'import time\n'), ((3825, 3853), 'fit_common.time.sleep', 'fit_common.time.sleep', (['cycle'], {}), '(cycle)\n', (3846, 3853), False, 'import fit_common\n'), ((4953, 5001), 'json.dumps', 'json.dumps', (['res'], {'indent': '(4)', 'separators': "(',', ':')"}), "(res, indent=4, separators=(',', ':'))\n", (4963, 5001), False, 'import json\n'), ((5556, 5608), 'fit_common.rackhdapi', 'fit_common.rackhdapi', (["('/api/2.0/nodes/' + cls.__NODE)"], {}), "('/api/2.0/nodes/' + cls.__NODE)\n", (5576, 5608), False, 'import fit_common\n'), ((6689, 6752), 'fit_common.rackhdapi', 'fit_common.rackhdapi', (["('/api/2.0/nodes/' + self.__class__.__NODE)"], {}), "('/api/2.0/nodes/' + self.__class__.__NODE)\n", (6709, 6752), False, 'import fit_common\n'), ((7705, 7768), 'fit_common.rackhdapi', 'fit_common.rackhdapi', (["('/api/2.0/nodes/' + self.__class__.__NODE)"], {}), "('/api/2.0/nodes/' + self.__class__.__NODE)\n", (7725, 7768), False, 'import fit_common\n'), ((3325, 3359), 'datetime.datetime.fromtimestamp', 'datetime.fromtimestamp', (['start_time'], {}), '(start_time)\n', (3347, 3359), False, 'from datetime import datetime\n'), ((4047, 4058), 'time.time', 'time.time', ([], {}), '()\n', (4056, 4058), False, 'import time\n'), ((4292, 4303), 'time.time', 'time.time', ([], {}), '()\n', (4301, 4303), False, 'import time\n'), ((7501, 7512), 'time.time', 'time.time', ([], {}), '()\n', (7510, 7512), False, 'import time\n'), ((8050, 8080), 'fit_common.json.dumps', 'fit_common.json.dumps', (['PAYLOAD'], {}), '(PAYLOAD)\n', (8071, 8080), False, 'import fit_common\n'), ((8832, 8843), 'time.time', 'time.time', ([], {}), '()\n', (8841, 8843), False, 'import time\n'), ((4512, 4538), 'json.loads', 'json.loads', (["result['text']"], {}), "(result['text'])\n", (4522, 4538), False, 'import json\n'), ((4749, 4797), 'json.dumps', 'json.dumps', (['res'], {'indent': '(4)', 'separators': "(',', ':')"}), "(res, indent=4, separators=(',', ':'))\n", (4759, 4797), False, 'import json\n'), ((4120, 4152), 'datetime.datetime.fromtimestamp', 'datetime.fromtimestamp', (['end_time'], {}), '(end_time)\n', (4142, 4152), False, 'from datetime import datetime\n'), ((4362, 4394), 'datetime.datetime.fromtimestamp', 'datetime.fromtimestamp', (['end_time'], {}), '(end_time)\n', (4384, 4394), False, 'from datetime import datetime\n')] |
from flask import Flask, render_template
from flask_ask import Ask, statement
import random
app = Flask(__name__)
ask = Ask(app, '/')
@ask.intent('RandomNumber', convert={'lowerLimit': int, 'upperLimit': int})
def hello(lowerLimit, upperLimit):
if lowerLimit == None:
lowerLimit = 0
if upperLimit == None:
upperLimit = 100
number = random.randint(lowerLimit, upperLimit)
text = render_template('random_number', lowerLimit=lowerLimit, upperLimit=upperLimit, number=number)
return statement(text).simple_card('Flask-Ask Random Number', text)
if __name__ == '__main__':
app.run(debug=True) | [
"flask.render_template",
"flask_ask.Ask",
"flask.Flask",
"flask_ask.statement",
"random.randint"
] | [((99, 114), 'flask.Flask', 'Flask', (['__name__'], {}), '(__name__)\n', (104, 114), False, 'from flask import Flask, render_template\n'), ((121, 134), 'flask_ask.Ask', 'Ask', (['app', '"""/"""'], {}), "(app, '/')\n", (124, 134), False, 'from flask_ask import Ask, statement\n'), ((341, 379), 'random.randint', 'random.randint', (['lowerLimit', 'upperLimit'], {}), '(lowerLimit, upperLimit)\n', (355, 379), False, 'import random\n'), ((388, 486), 'flask.render_template', 'render_template', (['"""random_number"""'], {'lowerLimit': 'lowerLimit', 'upperLimit': 'upperLimit', 'number': 'number'}), "('random_number', lowerLimit=lowerLimit, upperLimit=\n upperLimit, number=number)\n", (403, 486), False, 'from flask import Flask, render_template\n'), ((490, 505), 'flask_ask.statement', 'statement', (['text'], {}), '(text)\n', (499, 505), False, 'from flask_ask import Ask, statement\n')] |
import numpy as np
import pybullet as p
import itertools
from robot import Robot
class World():
def __init__(self):
# create the physics simulator
self.physicsClient = p.connect(p.GUI)
p.setGravity(0,0,-9.81)
self.max_communication_distance = 2.0
# We will integrate every 4ms (250Hz update)
self.dt = 1./250.
p.setPhysicsEngineParameter(self.dt, numSubSteps=1)
# Create the plane.
self.planeId = p.loadURDF("../models/plane.urdf")
p.changeDynamics(self.planeId, -1, lateralFriction=5., rollingFriction=0)
self.goalId = p.loadURDF("../models/goal.urdf")
self.goalId = p.loadURDF("../models/goal2.urdf")
# the balls
self.ball1 = p.loadURDF("../models/ball1.urdf")
p.resetBasePositionAndOrientation(self.ball1, [2., 4., 0.5], (0., 0., 0.5, 0.5))
self.ball2 = p.loadURDF("../models/ball2.urdf")
p.resetBasePositionAndOrientation(self.ball2, [4., 2., 0.5], (0., 0., 0.5, 0.5))
p.resetDebugVisualizerCamera(7.0,90.0, -43.0, (1., 1., 0.0))
# Add objects
wallId = p.loadSDF("../models/walls.sdf")[0]
p.resetBasePositionAndOrientation(wallId, [0., -1., 0], (0., 0., 0.5, 0.5))
wallId = p.loadSDF("../models/walls.sdf")[0]
p.resetBasePositionAndOrientation(wallId, [0., 1., 0], (0., 0., 0.5, 0.5))
wallId = p.loadSDF("../models/walls.sdf")[0]
p.resetBasePositionAndOrientation(wallId, [3., -1., 0], (0., 0., 0.5, 0.5))
wallId = p.loadSDF("../models/walls.sdf")[0]
p.resetBasePositionAndOrientation(wallId, [3., 1., 0], (0., 0., 0.5, 0.5))
wallId = p.loadSDF("../models/walls.sdf")[0]
p.resetBasePositionAndOrientation(wallId, [1., 2., 0], (0., 0., 0., 1.))
wallId = p.loadSDF("../models/walls.sdf")[0]
p.resetBasePositionAndOrientation(wallId, [2., -2., 0], (0., 0., 0., 1.))
# tube
# wallId = p.loadSDF("../models/walls.sdf")[0]
# p.resetBasePositionAndOrientation(wallId, [-1., 5., 0], (0., 0., 0., 1.))
# wallId = p.loadSDF("../models/walls.sdf")[0]
# p.resetBasePositionAndOrientation(wallId, [-1., 6., 0], (0., 0., 0., 1.))
# #arena
# wallId = p.loadSDF("../models/walls.sdf")[0]
# p.resetBasePositionAndOrientation(wallId, [-2, 4., 0], (0., 0., 0.5, 0.5))
# wallId = p.loadSDF("../models/walls.sdf")[0]
# p.resetBasePositionAndOrientation(wallId, [-2., 7., 0], (0., 0., 0.5, 0.5))
# wallId = p.loadSDF("../models/walls.sdf")[0]
# p.resetBasePositionAndOrientation(wallId, [-2., 9., 0], (0., 0., 0.5, 0.5))
# wallId = p.loadSDF("../models/walls.sdf")[0]
# p.resetBasePositionAndOrientation(wallId, [-2., 11., 0], (0., 0., 0.5, 0.5))
# wallId = p.loadSDF("../models/walls.sdf")[0]
# p.resetBasePositionAndOrientation(wallId, [-2., 13., 0], (0., 0., 0.5, 0.5))
# wallId = p.loadSDF("../models/walls.sdf")[0]
# p.resetBasePositionAndOrientation(wallId, [-3., 3., 0], (0., 0., 0., 1.))
# wallId = p.loadSDF("../models/walls.sdf")[0]
# p.resetBasePositionAndOrientation(wallId, [-5., 3., 0], (0., 0., 0., 1.))
# wallId = p.loadSDF("../models/walls.sdf")[0]
# p.resetBasePositionAndOrientation(wallId, [-7., 3., 0], (0., 0., 0., 1.))
# wallId = p.loadSDF("../models/walls.sdf")[0]
# p.resetBasePositionAndOrientation(wallId, [-8, 4., 0], (0., 0., 0.5, 0.5))
# wallId = p.loadSDF("../models/walls.sdf")[0]
# p.resetBasePositionAndOrientation(wallId, [-8., 6., 0], (0., 0., 0.5, 0.5))
# wallId = p.loadSDF("../models/walls.sdf")[0]
# p.resetBasePositionAndOrientation(wallId, [-8., 8., 0], (0., 0., 0.5, 0.5))
# wallId = p.loadSDF("../models/walls.sdf")[0]
# p.resetBasePositionAndOrientation(wallId, [-8., 10., 0], (0., 0., 0.5, 0.5))
# wallId = p.loadSDF("../models/walls.sdf")[0]
# p.resetBasePositionAndOrientation(wallId, [-8., 12., 0], (0., 0., 0.5, 0.5))
# create 6 robots
self.robots = []
for (i,j) in itertools.product(range(3), range(2)):
self.robots.append(Robot([1. * i + 0.5, 1. * j - 0.5, 0.3], 2*i+j, self.dt))
p.stepSimulation()
self.time = 0.0
self.stepSimulation()
self.stepSimulation()
def reset(self):
"""
Resets the position of all the robots
"""
for r in self.robots:
r.reset()
p.stepSimulation()
def stepSimulation(self):
"""
Simulates one step simulation
"""
# for each robot construct list of neighbors
for r in self.robots:
r.neighbors = [] #reset neighbors
r.messages_received = [] #reset message received
pos1, or1 = r.get_pos_and_orientation()
for j,r2 in enumerate(self.robots):
if(r.id != r2.id):
pos2, or2 = r2.get_pos_and_orientation()
if(np.linalg.norm(pos1-pos2) < self.max_communication_distance):
r.neighbors.append(j)
# for each robot send and receive messages
for i,r in enumerate(self.robots):
for msg in r.messages_to_send:
if msg[0] in r.neighbors: #then we can send the message
self.robots[msg[0]].messages_received.append([i, msg[1]]) #add the sender id
r.messages_to_send = []
# update the controllers
if self.time > 1.0:
for r in self.robots:
r.compute_controller()
# do one simulation step
p.stepSimulation()
self.time += self.dt
| [
"pybullet.resetDebugVisualizerCamera",
"robot.Robot",
"pybullet.loadSDF",
"pybullet.connect",
"pybullet.setGravity",
"pybullet.setPhysicsEngineParameter",
"pybullet.changeDynamics",
"numpy.linalg.norm",
"pybullet.stepSimulation",
"pybullet.resetBasePositionAndOrientation",
"pybullet.loadURDF"
] | [((194, 210), 'pybullet.connect', 'p.connect', (['p.GUI'], {}), '(p.GUI)\n', (203, 210), True, 'import pybullet as p\n'), ((219, 244), 'pybullet.setGravity', 'p.setGravity', (['(0)', '(0)', '(-9.81)'], {}), '(0, 0, -9.81)\n', (231, 244), True, 'import pybullet as p\n'), ((386, 437), 'pybullet.setPhysicsEngineParameter', 'p.setPhysicsEngineParameter', (['self.dt'], {'numSubSteps': '(1)'}), '(self.dt, numSubSteps=1)\n', (413, 437), True, 'import pybullet as p\n'), ((490, 524), 'pybullet.loadURDF', 'p.loadURDF', (['"""../models/plane.urdf"""'], {}), "('../models/plane.urdf')\n", (500, 524), True, 'import pybullet as p\n'), ((533, 607), 'pybullet.changeDynamics', 'p.changeDynamics', (['self.planeId', '(-1)'], {'lateralFriction': '(5.0)', 'rollingFriction': '(0)'}), '(self.planeId, -1, lateralFriction=5.0, rollingFriction=0)\n', (549, 607), True, 'import pybullet as p\n'), ((630, 663), 'pybullet.loadURDF', 'p.loadURDF', (['"""../models/goal.urdf"""'], {}), "('../models/goal.urdf')\n", (640, 663), True, 'import pybullet as p\n'), ((686, 720), 'pybullet.loadURDF', 'p.loadURDF', (['"""../models/goal2.urdf"""'], {}), "('../models/goal2.urdf')\n", (696, 720), True, 'import pybullet as p\n'), ((771, 805), 'pybullet.loadURDF', 'p.loadURDF', (['"""../models/ball1.urdf"""'], {}), "('../models/ball1.urdf')\n", (781, 805), True, 'import pybullet as p\n'), ((814, 903), 'pybullet.resetBasePositionAndOrientation', 'p.resetBasePositionAndOrientation', (['self.ball1', '[2.0, 4.0, 0.5]', '(0.0, 0.0, 0.5, 0.5)'], {}), '(self.ball1, [2.0, 4.0, 0.5], (0.0, 0.0, \n 0.5, 0.5))\n', (847, 903), True, 'import pybullet as p\n'), ((916, 950), 'pybullet.loadURDF', 'p.loadURDF', (['"""../models/ball2.urdf"""'], {}), "('../models/ball2.urdf')\n", (926, 950), True, 'import pybullet as p\n'), ((959, 1048), 'pybullet.resetBasePositionAndOrientation', 'p.resetBasePositionAndOrientation', (['self.ball2', '[4.0, 2.0, 0.5]', '(0.0, 0.0, 0.5, 0.5)'], {}), '(self.ball2, [4.0, 2.0, 0.5], (0.0, 0.0, \n 0.5, 0.5))\n', (992, 1048), True, 'import pybullet as p\n'), ((1049, 1112), 'pybullet.resetDebugVisualizerCamera', 'p.resetDebugVisualizerCamera', (['(7.0)', '(90.0)', '(-43.0)', '(1.0, 1.0, 0.0)'], {}), '(7.0, 90.0, -43.0, (1.0, 1.0, 0.0))\n', (1077, 1112), True, 'import pybullet as p\n'), ((1202, 1281), 'pybullet.resetBasePositionAndOrientation', 'p.resetBasePositionAndOrientation', (['wallId', '[0.0, -1.0, 0]', '(0.0, 0.0, 0.5, 0.5)'], {}), '(wallId, [0.0, -1.0, 0], (0.0, 0.0, 0.5, 0.5))\n', (1235, 1281), True, 'import pybullet as p\n'), ((1339, 1417), 'pybullet.resetBasePositionAndOrientation', 'p.resetBasePositionAndOrientation', (['wallId', '[0.0, 1.0, 0]', '(0.0, 0.0, 0.5, 0.5)'], {}), '(wallId, [0.0, 1.0, 0], (0.0, 0.0, 0.5, 0.5))\n', (1372, 1417), True, 'import pybullet as p\n'), ((1475, 1554), 'pybullet.resetBasePositionAndOrientation', 'p.resetBasePositionAndOrientation', (['wallId', '[3.0, -1.0, 0]', '(0.0, 0.0, 0.5, 0.5)'], {}), '(wallId, [3.0, -1.0, 0], (0.0, 0.0, 0.5, 0.5))\n', (1508, 1554), True, 'import pybullet as p\n'), ((1612, 1690), 'pybullet.resetBasePositionAndOrientation', 'p.resetBasePositionAndOrientation', (['wallId', '[3.0, 1.0, 0]', '(0.0, 0.0, 0.5, 0.5)'], {}), '(wallId, [3.0, 1.0, 0], (0.0, 0.0, 0.5, 0.5))\n', (1645, 1690), True, 'import pybullet as p\n'), ((1748, 1826), 'pybullet.resetBasePositionAndOrientation', 'p.resetBasePositionAndOrientation', (['wallId', '[1.0, 2.0, 0]', '(0.0, 0.0, 0.0, 1.0)'], {}), '(wallId, [1.0, 2.0, 0], (0.0, 0.0, 0.0, 1.0))\n', (1781, 1826), True, 'import pybullet as p\n'), ((1882, 1961), 'pybullet.resetBasePositionAndOrientation', 'p.resetBasePositionAndOrientation', (['wallId', '[2.0, -2.0, 0]', '(0.0, 0.0, 0.0, 1.0)'], {}), '(wallId, [2.0, -2.0, 0], (0.0, 0.0, 0.0, 1.0))\n', (1915, 1961), True, 'import pybullet as p\n'), ((4600, 4618), 'pybullet.stepSimulation', 'p.stepSimulation', ([], {}), '()\n', (4616, 4618), True, 'import pybullet as p\n'), ((5790, 5808), 'pybullet.stepSimulation', 'p.stepSimulation', ([], {}), '()\n', (5806, 5808), True, 'import pybullet as p\n'), ((1158, 1190), 'pybullet.loadSDF', 'p.loadSDF', (['"""../models/walls.sdf"""'], {}), "('../models/walls.sdf')\n", (1167, 1190), True, 'import pybullet as p\n'), ((1295, 1327), 'pybullet.loadSDF', 'p.loadSDF', (['"""../models/walls.sdf"""'], {}), "('../models/walls.sdf')\n", (1304, 1327), True, 'import pybullet as p\n'), ((1431, 1463), 'pybullet.loadSDF', 'p.loadSDF', (['"""../models/walls.sdf"""'], {}), "('../models/walls.sdf')\n", (1440, 1463), True, 'import pybullet as p\n'), ((1568, 1600), 'pybullet.loadSDF', 'p.loadSDF', (['"""../models/walls.sdf"""'], {}), "('../models/walls.sdf')\n", (1577, 1600), True, 'import pybullet as p\n'), ((1704, 1736), 'pybullet.loadSDF', 'p.loadSDF', (['"""../models/walls.sdf"""'], {}), "('../models/walls.sdf')\n", (1713, 1736), True, 'import pybullet as p\n'), ((1838, 1870), 'pybullet.loadSDF', 'p.loadSDF', (['"""../models/walls.sdf"""'], {}), "('../models/walls.sdf')\n", (1847, 1870), True, 'import pybullet as p\n'), ((4327, 4345), 'pybullet.stepSimulation', 'p.stepSimulation', ([], {}), '()\n', (4343, 4345), True, 'import pybullet as p\n'), ((4257, 4319), 'robot.Robot', 'Robot', (['[1.0 * i + 0.5, 1.0 * j - 0.5, 0.3]', '(2 * i + j)', 'self.dt'], {}), '([1.0 * i + 0.5, 1.0 * j - 0.5, 0.3], 2 * i + j, self.dt)\n', (4262, 4319), False, 'from robot import Robot\n'), ((5138, 5165), 'numpy.linalg.norm', 'np.linalg.norm', (['(pos1 - pos2)'], {}), '(pos1 - pos2)\n', (5152, 5165), True, 'import numpy as np\n')] |
"""
입력 예시
3 16
출력 예시
3
5
7
11
13
"""
import math
left, right = map(int, input().split())
array = [True for i in range(right+1)]
array[1] = 0
for i in range(2, int(math.sqrt(right)) + 1):
if array[i] == True:
j = 2
while i * j <= right:
array[i * j] = False
j += 1
for i in range(left, right+1):
if array[i]:
print(i) | [
"math.sqrt"
] | [((166, 182), 'math.sqrt', 'math.sqrt', (['right'], {}), '(right)\n', (175, 182), False, 'import math\n')] |
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests the graph quantization script.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import sys
import numpy as np
from tensorflow.core.framework import graph_pb2
from tensorflow.python.client import session
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import graph_util
from tensorflow.python.framework import importer
from tensorflow.python.framework import ops as ops_lib
from tensorflow.python.platform import flags as flags_lib
from tensorflow.python.platform import test
from tensorflow.python.platform import tf_logging
from tensorflow.tools.quantization import quantize_graph
flags = flags_lib
FLAGS = flags.FLAGS
def run_graph_def(graph_def, input_map, outputs):
graph = ops_lib.Graph()
with graph.as_default():
importer.import_graph_def(graph_def, input_map={}, name="")
with session.Session(graph=graph) as sess:
results = sess.run(outputs, feed_dict=input_map)
return results
def test_mat_mul(m, n, k, a, b):
"""Tests a MatMul replacement."""
a_constant_name = "a_constant"
b_constant_name = "b_constant"
mat_mul_name = "mat_mul"
float_graph_def = graph_pb2.GraphDef()
a_constant = quantize_graph.create_constant_node(
a_constant_name, value=a, dtype=dtypes.float32, shape=[m, k])
float_graph_def.node.extend([a_constant])
b_constant = quantize_graph.create_constant_node(
b_constant_name, value=b, dtype=dtypes.float32, shape=[k, n])
float_graph_def.node.extend([b_constant])
mat_mul_node = quantize_graph.create_node("MatMul", mat_mul_name,
[a_constant_name, b_constant_name])
quantize_graph.set_attr_dtype(mat_mul_node, "T", dtypes.float32)
quantize_graph.set_attr_bool(mat_mul_node, "transpose_a", False)
quantize_graph.set_attr_bool(mat_mul_node, "transpose_b", False)
float_graph_def.node.extend([mat_mul_node])
test_graph(float_graph_def, {}, [mat_mul_name])
def test_conv(depth, image_width, image_height, image_batch_count, filter_size,
filter_count, stride, padding, input_values, filter_values):
"""Tests a Conv replacement."""
input_constant_name = "input_constant"
filter_constant_name = "filter_constant"
conv_name = "conv"
float_graph_def = graph_pb2.GraphDef()
input_constant = quantize_graph.create_constant_node(
input_constant_name,
value=input_values,
dtype=dtypes.float32,
shape=[image_batch_count, image_height, image_width, depth])
float_graph_def.node.extend([input_constant])
filter_constant = quantize_graph.create_constant_node(
filter_constant_name,
value=filter_values,
dtype=dtypes.float32,
shape=[filter_size, filter_size, depth, filter_count])
float_graph_def.node.extend([filter_constant])
conv_node = quantize_graph.create_node(
"Conv2D", conv_name, [input_constant_name, filter_constant_name])
quantize_graph.set_attr_dtype(conv_node, "T", dtypes.float32)
quantize_graph.set_attr_int_list(conv_node, "strides", [1, stride, stride, 1])
quantize_graph.set_attr_string(conv_node, "padding", padding)
float_graph_def.node.extend([conv_node])
test_graph(float_graph_def, {}, [conv_name])
def are_tensors_near(a, b, tolerance):
"""Tests whether two tensors are nearly identical.
This is a specialized comparison function designed to help debug problems with
quantization. It prints out information about the differences between tensors
on failure, paying special attention to possible biases by looking at the mean
and absolute average errors.
Args:
a: First comparison tensor.
b: Second comparison tensor.
tolerance: Float value indicating how large an error between values is ok.
Returns:
Boolean indicating whether the two inputs were close enough.
"""
flat_a = a.flatten()
flat_b = b.flatten()
if len(flat_a) != len(flat_b):
tf_logging.info("Tensors are different sizes: " + str(len(flat_a)) + " vs "
+ str(len(flat_b)))
return False
value_count = len(flat_a)
how_many_different = 0
total_difference = 0
total_abs_difference = 0
for index in range(value_count):
a_value = flat_a[index]
b_value = flat_b[index]
difference = a_value - b_value
total_difference += difference
total_abs_difference += abs(difference)
if abs(difference) > tolerance:
how_many_different += 1
mean_difference = total_difference / value_count
mean_abs_difference = total_abs_difference / value_count
proportion_different = (how_many_different * 1.0) / value_count
if how_many_different == 0:
return True
else:
tf_logging.info("Tensors have {0} different values ({1}%), with mean"
" difference {2} and mean absolute difference {3}".format(
how_many_different, proportion_different * 100,
mean_difference, mean_abs_difference))
return False
def get_top_value(input_values):
max_value = None
max_index = None
for index, value in enumerate(input_values.flatten()):
if max_value is None or value > max:
max_value = value
max_index = index
return max_index, max_value
def test_graph(float_graph_def, input_map, output_names, log_graph=False):
"""Runs the float graph through the rewriter and tests the results."""
float_results = run_graph_def(
float_graph_def, input_map,
[output_name + ":0" for output_name in output_names])
# TODO(petewarden): round test is currently failing because there is no
# RoundToSteps op available.
# round_rewriter = quantize_graph.GraphRewriter(float_graph_def, "round")
# round_graph_def = round_rewriter.rewrite(output_name)
# round_results = run_graph_def(round_graph_def, input_map,
# [output_name + ":0"])
# assert are_tensors_near(expected, round_results[0], 1.0)
#
# TODO(petewarden): Add test for "quantize" mode.
eightbit_rewriter = quantize_graph.GraphRewriter(
float_graph_def, "eightbit", quantized_input_range=None)
eightbit_graph_def = eightbit_rewriter.rewrite(output_names)
eightbit_results = run_graph_def(
eightbit_graph_def, input_map,
[output_name + ":0" for output_name in output_names])
for expected, result in zip(float_results, eightbit_results):
assert are_tensors_near(expected, result, 1.0)
if log_graph:
tf_logging.info("8bit:\n%s", str(eightbit_graph_def))
# Test the weights_rounded mode. This uses the default bit_depth.
weights_rounded_rewriter = quantize_graph.GraphRewriter(
float_graph_def, "weights_rounded", quantized_input_range=None)
weights_rounded_graph_def = weights_rounded_rewriter.rewrite(output_names)
weights_rounded_results = run_graph_def(
weights_rounded_graph_def, input_map,
[output_name + ":0" for output_name in output_names])
for expected, result in zip(float_results, weights_rounded_results):
assert are_tensors_near(expected, result, 1.0)
class QuantizeGraphTest(test.TestCase):
def test_negative_const_problem(self):
shape_constant_name = "shape_constant"
shape_constant = quantize_graph.create_constant_node(
shape_constant_name, value=-0.8, dtype=dtypes.float32, shape=[1])
quantization_result = quantize_graph.quantize_weight_eightbit(
shape_constant, b"MIN_COMBINED")
self.assertEqual(4, len(quantization_result))
def test_odd_padding_problem(self):
"""Tests one error case we ran into in a real graph."""
test_conv(1, 4, 4, 1, 3, 1, 2, b"SAME",
[1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16],
[1, 2, 3, 4, 5, 6, 7, 8, 9])
def test_mat_mul_tiny(self):
# These tests are added to test the generate case where
# min(matrix) == max(matrix), which used to cause problems.
test_mat_mul(1, 1, 1, [2], [3])
test_mat_mul(1, 2, 1, [1], [2, 3])
test_mat_mul(1, 1, 2, [1, 1], [1, 1])
test_mat_mul(1, 1, 2, [0, 0], [1, 1])
# The general case.
test_mat_mul(1, 1, 2, [1, 2], [1, 2])
def test_mat_mul_small(self):
test_mat_mul(2, 4, 3, [1, 2, 3, 4, 5, 6],
[7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18])
def test_conv(self):
test_conv(1, 4, 3, 1, 3, 1, 1, b"SAME",
[1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12],
[1, 4, 7, 2, 5, 8, 3, 6, 9])
def test_reshape(self):
"""Tests that MatMul->Reshape->MatMul avoids extra quantize/dequantize."""
def make_matmul(name, a, b):
n = quantize_graph.create_node("MatMul", name, [a.name, b.name])
quantize_graph.set_attr_dtype(n, "T", dtypes.float32)
quantize_graph.set_attr_bool(n, "transpose_a", False)
quantize_graph.set_attr_bool(n, "transpose_b", False)
return n
# matmul_1 = input*weight_1
input_node = quantize_graph.create_constant_node(
"input", value=[0, 1, 2, 3], dtype=dtypes.float32, shape=[4, 1])
weight_1_node = quantize_graph.create_constant_node(
"weight_1",
value=[.5, .6, .7, .8, .9],
dtype=dtypes.float32,
shape=[1, 5])
matmul_1_node = make_matmul("matmul_1", input_node, weight_1_node)
# Reshape 4x5 to 10x2.
new_shape_node = quantize_graph.create_constant_node(
"new_shape_node", value=[10, 2], dtype=dtypes.int32, shape=[2])
reshape_node = quantize_graph.create_node(
"Reshape", "reshape", [matmul_1_node.name, new_shape_node.name])
quantize_graph.set_attr_dtype(reshape_node, "T", dtypes.float32)
# matmul_2_node = reshape*weight_2
weight_2_node = quantize_graph.create_constant_node(
"weight_2", value=[1.5, 2.5], dtype=dtypes.float32, shape=[2, 1])
matmul_2_node = make_matmul("matmul_2", reshape_node, weight_2_node)
g = graph_pb2.GraphDef()
g.node.extend([
input_node, weight_1_node, matmul_1_node, new_shape_node, reshape_node,
weight_2_node, matmul_2_node
])
# Test the graph
test_graph(g, {}, ["matmul_2"])
# Verify there is only one Quantize and one Requantize op.
eightbit_rewriter = quantize_graph.GraphRewriter(
g, "eightbit", quantized_input_range=None)
eightbit_graph_def = eightbit_rewriter.rewrite(["matmul_2"])
ops = [node.op for node in eightbit_graph_def.node]
# No quantize since all inputs are const and can be quantized up-front.
self.assertEqual(0, ops.count("QuantizeV2") + ops.count("Quantize"))
self.assertEqual(1, ops.count("QuantizedReshape"))
# One dequantize at the end.
self.assertEqual(1, ops.count("Dequantize"))
def test_quantize_array(self):
# Test invalid parameters (empty array, or 0 buckets.
self.assertRaises(ValueError, quantize_graph.quantize_array, np.array([]),
2)
self.assertRaises(ValueError, quantize_graph.quantize_array,
np.array([1, 2]), 0)
# Test input array of length 1.
arr = np.array([1])
qarr = quantize_graph.quantize_array(arr, 1)
self.assertEqual(arr, qarr)
qarr = quantize_graph.quantize_array(arr, 2)
self.assertEqual(arr, qarr)
# Test input array with all elements equal.
arr = np.array([1, 1, 1])
qarr = quantize_graph.quantize_array(arr, 10)
self.assertTrue((np.array([1, 1, 1]) == qarr).all())
# Test "normal" input arrays.
arr = np.array([0, 0.3, 0.6, 1])
qarr = quantize_graph.quantize_array(arr, 1)
self.assertTrue((np.array([0.5, 0.5, 0.5, 0.5]) == qarr).all())
qarr = quantize_graph.quantize_array(arr, 2)
self.assertTrue((np.array([0.25, 0.25, 0.75, 0.75]) == qarr).all())
qarr = quantize_graph.quantize_array(arr.reshape((2, 2)), 2)
self.assertTrue((np.array([[0.25, 0.25], [0.75, 0.75]]) == qarr).all())
def test_non_float_concat(self):
concat_dim = quantize_graph.create_constant_node(
"concat_dim", value=0, dtype=dtypes.int32, shape=[])
a = quantize_graph.create_constant_node(
"a",
value=[1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12],
dtype=dtypes.int32,
shape=[2, 2, 3])
b = quantize_graph.create_constant_node(
"b",
value=[13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24],
dtype=dtypes.int32,
shape=[2, 2, 3])
concat = quantize_graph.create_node("Concat", "concat",
[concat_dim.name, a.name, b.name])
quantize_graph.set_attr_int(concat, "N", 2)
quantize_graph.set_attr_dtype(concat, "T", dtypes.int32)
g = graph_pb2.GraphDef()
g.node.extend([concat_dim, a, b, concat])
test_graph(g, {}, [concat.name])
def test_non_float_reshape(self):
a = quantize_graph.create_constant_node(
"a",
value=[1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12],
dtype=dtypes.int32,
shape=[2, 2, 3])
shape = quantize_graph.create_constant_node(
"shape", value=[12], dtype=dtypes.int32, shape=[1])
reshape = quantize_graph.create_node("Reshape", "reshape",
[a.name, shape.name])
quantize_graph.set_attr_dtype(reshape, "T", dtypes.int32)
g = graph_pb2.GraphDef()
g.node.extend([a, shape, reshape])
test_graph(g, {}, [reshape.name])
def test_concat(self):
shape_constant_name = "shape_constant"
a_constant_name = "a_constant"
b_constant_name = "b_constant"
concat_name = "concat"
float_graph_def = graph_pb2.GraphDef()
shape_constant = quantize_graph.create_constant_node(
shape_constant_name, value=0, dtype=dtypes.int32, shape=[])
float_graph_def.node.extend([shape_constant])
a_constant = quantize_graph.create_constant_node(
a_constant_name,
value=[1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12],
dtype=dtypes.float32,
shape=[2, 2, 3])
float_graph_def.node.extend([a_constant])
b_constant = quantize_graph.create_constant_node(
b_constant_name,
value=[13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24],
dtype=dtypes.float32,
shape=[2, 2, 3])
float_graph_def.node.extend([b_constant])
concat_node = quantize_graph.create_node(
"Concat", concat_name,
[shape_constant_name, a_constant_name, b_constant_name])
quantize_graph.set_attr_int(concat_node, "N", 2)
quantize_graph.set_attr_dtype(concat_node, "T", dtypes.float32)
float_graph_def.node.extend([concat_node])
test_graph(float_graph_def, {}, [concat_name])
# Verify the concat is quantized.
eightbit_rewriter = quantize_graph.GraphRewriter(
float_graph_def, "eightbit", quantized_input_range=None)
eightbit_graph_def = eightbit_rewriter.rewrite([concat_name])
ops = [node.op for node in eightbit_graph_def.node]
self.assertEqual(1, ops.count("QuantizedConcat"))
def test_multiple_outputs(self):
input_constant_name = "input_constant"
split_constant_name = "split_constant"
split_name = "split"
concat_constant_name = "concat_constant"
concat_name = "concat"
float_graph_def = graph_pb2.GraphDef()
input_constant = quantize_graph.create_constant_node(
input_constant_name,
value=[1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12],
dtype=dtypes.float32,
shape=[2, 6])
float_graph_def.node.extend([input_constant])
split_constant = quantize_graph.create_constant_node(
split_constant_name, value=1, dtype=dtypes.int32, shape=[])
float_graph_def.node.extend([split_constant])
split_node = quantize_graph.create_node(
"Split", split_name, [split_constant_name, input_constant_name])
quantize_graph.set_attr_int(split_node, "num_split", 2)
quantize_graph.set_attr_dtype(split_node, "T", dtypes.float32)
float_graph_def.node.extend([split_node])
concat_constant = quantize_graph.create_constant_node(
concat_constant_name, value=1, dtype=dtypes.int32, shape=[])
float_graph_def.node.extend([concat_constant])
concat_node = quantize_graph.create_node(
"Concat", concat_name,
[concat_constant_name, split_name + ":0", split_name + ":1"])
quantize_graph.set_attr_int(concat_node, "N", 2)
quantize_graph.set_attr_dtype(concat_node, "T", dtypes.float32)
float_graph_def.node.extend([concat_node])
test_graph(float_graph_def, {}, [concat_name])
def test_node_name_from_input(self):
self.assertEqual("SomeName",
quantize_graph.node_name_from_input("^SomeName:2"))
def test_unique_node_name_from_input(self):
self.assertEqual("__hat__SomeName__port__2",
quantize_graph.unique_node_name_from_input("^SomeName:2"))
def test_identity(self):
input_constant_name = "input_constant"
identity_name = "identity"
float_graph_def = graph_pb2.GraphDef()
input_constant = quantize_graph.create_constant_node(
input_constant_name,
value=[1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12],
dtype=dtypes.float32,
shape=[2, 6])
float_graph_def.node.extend([input_constant])
identity_node = quantize_graph.create_node("Identity", identity_name,
[input_constant_name])
quantize_graph.set_attr_dtype(identity_node, "T", dtypes.float32)
float_graph_def.node.extend([identity_node])
mul_name = "mul"
mul_node = quantize_graph.create_node("Mul", mul_name,
[identity_name, identity_name])
quantize_graph.set_attr_dtype(mul_node, "T", dtypes.float32)
float_graph_def.node.extend([mul_node])
test_graph(float_graph_def, {}, [mul_name])
def test_keep_control_edges(self):
no_op_name = "no_op"
a_constant_name = "a_constant"
b_constant_name = "b_constant"
a_check_name = "a_check"
b_check_name = "b_check"
a_identity_name = "a_identity"
b_identity_name = "b_identity"
add_name = "add"
graph_def = graph_pb2.GraphDef()
no_op = quantize_graph.create_node("NoOp", no_op_name, [])
graph_def.node.extend([no_op])
a_constant = quantize_graph.create_constant_node(
a_constant_name, value=1, dtype=dtypes.float32, shape=[])
graph_def.node.extend([a_constant])
a_check_node = quantize_graph.create_node("CheckNumerics", a_check_name,
[a_constant_name])
graph_def.node.extend([a_check_node])
a_identity_node = quantize_graph.create_node(
"Identity", a_identity_name,
[a_constant_name, "^" + a_check_name, "^" + no_op_name])
graph_def.node.extend([a_identity_node])
b_constant = quantize_graph.create_constant_node(
b_constant_name, value=1, dtype=dtypes.float32, shape=[])
graph_def.node.extend([b_constant])
b_check_node = quantize_graph.create_node("CheckNumerics", b_check_name,
[b_constant_name])
graph_def.node.extend([b_check_node])
b_identity_node = quantize_graph.create_node(
"Identity", b_identity_name, [b_constant_name, "^" + b_check_name])
graph_def.node.extend([b_identity_node])
add_node = quantize_graph.create_node("Add", add_name,
[a_identity_name, b_identity_name])
quantize_graph.set_attr_dtype(add_node, "T", dtypes.float32)
graph_def.node.extend([add_node])
expected_output = graph_pb2.GraphDef()
no_op = quantize_graph.create_node("NoOp", no_op_name, [])
expected_output.node.extend([no_op])
a_constant = quantize_graph.create_constant_node(
a_constant_name, value=1, dtype=dtypes.float32, shape=[])
expected_output.node.extend([a_constant])
a_identity_node = quantize_graph.create_node(
"Identity", a_identity_name, [a_constant_name, "^" + no_op_name])
expected_output.node.extend([a_identity_node])
b_constant = quantize_graph.create_constant_node(
b_constant_name, value=1, dtype=dtypes.float32, shape=[])
expected_output.node.extend([b_constant])
add_node = quantize_graph.create_node("Add", add_name,
[a_identity_name, b_constant_name])
quantize_graph.set_attr_dtype(add_node, "T", dtypes.float32)
expected_output.node.extend([add_node])
expected_output.versions.CopyFrom(graph_def.versions)
expected_output.library.CopyFrom(graph_def.library)
output = graph_util.remove_training_nodes(graph_def)
stripped_output = graph_util.extract_sub_graph(output, [add_name])
self.assertProtoEquals(expected_output, stripped_output)
def test_batch_norm(self):
input_constant_name = "input_constant"
mean_constant_name = "mean_constant"
variance_constant_name = "variance_constant"
beta_constant_name = "beta_constant"
gamma_constant_name = "gamma_constant"
batch_norm_name = "batch_norm"
float_graph_def = graph_pb2.GraphDef()
input_constant = quantize_graph.create_constant_node(
input_constant_name,
value=[1, 4, 2, 5, 3, 6, -1, -4, -2, -5, -3, -6],
dtype=dtypes.float32,
shape=[1, 1, 6, 2])
float_graph_def.node.extend([input_constant])
mean_constant = quantize_graph.create_constant_node(
mean_constant_name, value=[10, 20], dtype=dtypes.float32, shape=[2])
float_graph_def.node.extend([mean_constant])
variance_constant = quantize_graph.create_constant_node(
variance_constant_name,
value=[0.25, 0.5],
dtype=dtypes.float32,
shape=[2])
float_graph_def.node.extend([variance_constant])
beta_constant = quantize_graph.create_constant_node(
beta_constant_name, value=[0.1, 0.6], dtype=dtypes.float32, shape=[2])
float_graph_def.node.extend([beta_constant])
gamma_constant = quantize_graph.create_constant_node(
gamma_constant_name, value=[0, 0], dtype=dtypes.float32, shape=[2])
float_graph_def.node.extend([gamma_constant])
batch_norm_node = quantize_graph.create_node(
"BatchNormWithGlobalNormalization", batch_norm_name, [
input_constant_name, mean_constant_name, variance_constant_name,
beta_constant_name, gamma_constant_name
])
quantize_graph.set_attr_dtype(batch_norm_node, "T", dtypes.float32)
quantize_graph.set_attr_bool(batch_norm_node, "scale_after_normalization",
False)
quantize_graph.set_attr_float(batch_norm_node, "variance_epsilon", 0.001)
float_graph_def.node.extend([batch_norm_node])
test_graph(float_graph_def, {}, [batch_norm_name])
def test_max_pool(self):
input_constant_name = "input_constant"
max_pool_name = "max_pool"
float_graph_def = graph_pb2.GraphDef()
input_constant = quantize_graph.create_constant_node(
input_constant_name,
value=[1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12],
dtype=dtypes.float32,
shape=[1, 2, 6, 1])
float_graph_def.node.extend([input_constant])
max_pool_node = quantize_graph.create_node("MaxPool", max_pool_name,
[input_constant_name])
quantize_graph.set_attr_int_list(max_pool_node, "ksize", [1, 2, 2, 1])
quantize_graph.set_attr_int_list(max_pool_node, "strides", [1, 1, 1, 1])
quantize_graph.set_attr_string(max_pool_node, "padding", b"SAME")
float_graph_def.node.extend([max_pool_node])
test_graph(float_graph_def, {}, [max_pool_name])
def test_avg_pool(self):
input_constant_name = "input_constant"
avg_pool_name = "avg_pool"
float_graph_def = graph_pb2.GraphDef()
input_constant = quantize_graph.create_constant_node(
input_constant_name,
value=[1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12],
dtype=dtypes.float32,
shape=[1, 2, 6, 1])
float_graph_def.node.extend([input_constant])
avg_pool_node = quantize_graph.create_node("AvgPool", avg_pool_name,
[input_constant_name])
quantize_graph.set_attr_dtype(avg_pool_node, "T", dtypes.float32)
quantize_graph.set_attr_int_list(avg_pool_node, "ksize", [1, 2, 2, 1])
quantize_graph.set_attr_int_list(avg_pool_node, "strides", [1, 1, 1, 1])
quantize_graph.set_attr_string(avg_pool_node, "padding", b"SAME")
float_graph_def.node.extend([avg_pool_node])
test_graph(float_graph_def, {}, [avg_pool_name])
def test_relu(self):
input_constant_name = "input_constant"
relu_name = "relu"
float_graph_def = graph_pb2.GraphDef()
input_constant = quantize_graph.create_constant_node(
input_constant_name,
value=[1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12],
dtype=dtypes.float32,
shape=[1, 2, 6, 1])
float_graph_def.node.extend([input_constant])
relu_node = quantize_graph.create_node("Relu", relu_name,
[input_constant_name])
quantize_graph.set_attr_dtype(relu_node, "T", dtypes.float32)
float_graph_def.node.extend([relu_node])
test_graph(float_graph_def, {}, [relu_name])
def test_relu_w_fake_quant_w_min_max_vars(self):
input_node = quantize_graph.create_constant_node(
"input",
value=[1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12],
dtype=dtypes.float32,
shape=[1, 2, 6, 1])
relu_node = quantize_graph.create_node("Relu", "relu", [input_node.name])
quantize_graph.set_attr_dtype(relu_node, "T", dtypes.float32)
min_node = quantize_graph.create_constant_node(
"min_bias_add", value=0, dtype=dtypes.float32, shape=[])
max_node = quantize_graph.create_constant_node(
"max_bias_add", value=12, dtype=dtypes.float32, shape=[])
fake_quant_node = quantize_graph.create_node(
"FakeQuantWithMinMaxVars", "fake_quant",
[relu_node.name, min_node.name, max_node.name])
float_graph_def = graph_pb2.GraphDef()
float_graph_def.node.extend(
[input_node, relu_node, min_node, max_node, fake_quant_node])
test_graph(float_graph_def, {}, [fake_quant_node.name], log_graph=True)
# Verify there is only one Quantize and one Requantize op.
eightbit_rewriter = quantize_graph.GraphRewriter(
float_graph_def, "eightbit", quantized_input_range=None)
eightbit_graph_def = eightbit_rewriter.rewrite([fake_quant_node.name])
ops = [node.op for node in eightbit_graph_def.node]
# No quantize since all inputs are const and can be quantized up-front.
self.assertEqual(0, ops.count("QuantizeV2") + ops.count("Quantize"))
# One dequantize at the end.
self.assertEqual(1, ops.count("Dequantize"))
def test_relu6(self):
input_constant_name = "input_constant"
relu6_name = "relu6"
float_graph_def = graph_pb2.GraphDef()
input_constant = quantize_graph.create_constant_node(
input_constant_name,
value=[1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12],
dtype=dtypes.float32,
shape=[1, 2, 6, 1])
float_graph_def.node.extend([input_constant])
relu6_node = quantize_graph.create_node("Relu6", relu6_name,
[input_constant_name])
quantize_graph.set_attr_dtype(relu6_node, "T", dtypes.float32)
float_graph_def.node.extend([relu6_node])
test_graph(float_graph_def, {}, [relu6_name])
def test_bias_add(self):
input_constant_name = "input_constant"
offset_constant_name = "offset_constant"
bias_add_name = "bias_add"
float_graph_def = graph_pb2.GraphDef()
input_constant = quantize_graph.create_constant_node(
input_constant_name,
value=[1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12],
dtype=dtypes.float32,
shape=[1, 1, 2, 6])
float_graph_def.node.extend([input_constant])
offset_constant = quantize_graph.create_constant_node(
offset_constant_name,
value=[1, 2, 3, 4, 5, 6],
dtype=dtypes.float32,
shape=[6])
float_graph_def.node.extend([offset_constant])
bias_add_node = quantize_graph.create_node(
"BiasAdd", bias_add_name, [input_constant_name, offset_constant_name])
quantize_graph.set_attr_dtype(bias_add_node, "T", dtypes.float32)
float_graph_def.node.extend([bias_add_node])
test_graph(float_graph_def, {}, [bias_add_name])
def test_quantized_input_range_errors(self):
with self.assertRaises(ValueError):
# Invalid mode.
quantize_graph.GraphRewriter(graph_pb2.GraphDef(), "weights_rounded",
[0, 1])
with self.assertRaises(ValueError):
# Invalid range.
quantize_graph.GraphRewriter(graph_pb2.GraphDef(), "eightbit", [0, -1])
def test_quantized_input_range_bias_add(self):
input_shape = [1, 1, 2, 6]
input_n = quantize_graph.create_node("Placeholder", "input", [])
quantize_graph.set_attr_dtype(input_n, "dtype", dtypes.float32)
quantize_graph.set_attr_shape(input_n, "shape", input_shape)
offset_n = quantize_graph.create_constant_node(
"offset", value=[1, 2, 3, 4, 5, 6], dtype=dtypes.float32, shape=[6])
bias_add_n = quantize_graph.create_node("BiasAdd", "bias_add",
[input_n.name, offset_n.name])
quantize_graph.set_attr_dtype(bias_add_n, "T", dtypes.float32)
float_graph_def = graph_pb2.GraphDef()
float_graph_def.node.extend([input_n, offset_n, bias_add_n])
input_map = {
input_n.name + ":0":
np.reshape([1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12], input_shape)
}
self._RunTestsForQuantizedInputRange(float_graph_def, input_map,
[bias_add_n.name], [-1, 20.])
self._RunTestsForQuantizedInputRange(float_graph_def, input_map,
[bias_add_n.name], [0, 12.])
def test_quantized_input_range_mat_mul(self):
shapes = [[3, 2], [2, 4]]
inputs = []
for i, shape in enumerate(shapes):
node = quantize_graph.create_node("Placeholder", "input_%s" % i, [])
quantize_graph.set_attr_dtype(node, "dtype", dtypes.float32)
quantize_graph.set_attr_shape(node, "shape", shape)
inputs.append(node)
mat_mul_node = quantize_graph.create_node("MatMul", "mat_mul",
[n.name for n in inputs])
quantize_graph.set_attr_dtype(mat_mul_node, "T", dtypes.float32)
float_graph_def = graph_pb2.GraphDef()
float_graph_def.node.extend(inputs + [mat_mul_node])
input_map = {
inputs[0].name + ":0":
np.reshape([1, 2, 3, 4, 5, 6], shapes[0]),
inputs[1].name + ":0":
np.reshape([.8, .7, .6, .5, .4, .3, .2, .1], shapes[1])
}
self._RunTestsForQuantizedInputRange(float_graph_def, input_map,
[mat_mul_node.name], [-1, 20.])
self._RunTestsForQuantizedInputRange(float_graph_def, input_map,
[mat_mul_node.name], [0, 6.])
def _RunTestsForQuantizedInputRange(self, float_graph_def, input_map,
output_names, input_range):
if sys.version_info[0] == 3:
# uint8->quint8 conversion for numpy is not working currently.
return
quantized_input_map = {}
for k, v in input_map.items():
arr = [
int(
round((n - input_range[0]) * 255 / (input_range[1] - input_range[
0]))) for n in v.flat
]
arr = np.array(arr, np.uint8)
arr = arr.reshape(v.shape)
arr = arr.astype(dtypes.quint8.as_numpy_dtype)
quantized_input_map[k] = arr
output_tensors = [output_name + ":0" for output_name in output_names]
float_results = run_graph_def(float_graph_def, input_map, output_tensors)
# Quantize treating the input as quantized in range <input_range>.
rewriter = quantize_graph.GraphRewriter(float_graph_def, "eightbit",
input_range)
graph_def = rewriter.rewrite(output_names)
results = run_graph_def(graph_def, quantized_input_map, output_tensors)
for expected, result in zip(float_results, results):
assert are_tensors_near(expected, result, .5)
ops = [node.op for node in graph_def.node]
self.assertEqual(0, ops.count("QuantizeV2") + ops.count("Quantize"))
self.assertEqual(len(output_names), ops.count("Dequantize"))
# Quantize without treating input as quantized.
rewriter = quantize_graph.GraphRewriter(
float_graph_def, "eightbit", quantized_input_range=None)
graph_def = rewriter.rewrite(output_names)
results = run_graph_def(graph_def, input_map, output_tensors)
for expected, result in zip(float_results, results):
assert are_tensors_near(expected, result, .5)
ops = [node.op for node in graph_def.node]
self.assertEqual(
len(input_map), ops.count("QuantizeV2") + ops.count("Quantize"))
self.assertEqual(len(output_names), ops.count("Dequantize"))
def test_bias_add_w_fake_quant_w_min_max_vars(self):
input_node = quantize_graph.create_constant_node(
"input",
value=[1, 2, 3, 4, 5, 6, 7, 8, 9, 10],
dtype=dtypes.float32,
shape=[1, 1, 2, 5])
offset_node = quantize_graph.create_constant_node(
"offset", value=[1, 2, 3, 4, 5], dtype=dtypes.float32, shape=[5])
bias_add_node = quantize_graph.create_node(
"BiasAdd", "bias_add", [input_node.name, offset_node.name])
quantize_graph.set_attr_dtype(bias_add_node, "T", dtypes.float32)
min_node = quantize_graph.create_constant_node(
"min_bias_add", value=-.5, dtype=dtypes.float32, shape=[])
max_node = quantize_graph.create_constant_node(
"max_bias_add", value=15.5, dtype=dtypes.float32, shape=[])
fake_quant_node = quantize_graph.create_node(
"FakeQuantWithMinMaxVars", "fake_quant",
[bias_add_node.name, min_node.name, max_node.name])
float_graph_def = graph_pb2.GraphDef()
float_graph_def.node.extend([
input_node, offset_node, bias_add_node, min_node, max_node,
fake_quant_node
])
test_graph(float_graph_def, {}, [fake_quant_node.name], log_graph=True)
# Verify there is only one Quantize and one Requantize op.
# Pass in fallback_quantization_range, although it will have no effect
# because the FakeQuantWithMinMaxVars are used instead.
eightbit_rewriter = quantize_graph.GraphRewriter(
float_graph_def,
"eightbit",
quantized_input_range=None,
fallback_quantization_range=[-100, 100])
eightbit_graph_def = eightbit_rewriter.rewrite([fake_quant_node.name])
ops = [node.op for node in eightbit_graph_def.node]
node_names = [node.name for node in eightbit_graph_def.node]
# No quantize since all inputs are const and can be quantized up-front.
self.assertEqual(0, ops.count("QuantizeV2") + ops.count("Quantize"))
# One dequantize at the end.
self.assertEqual(1, ops.count("Dequantize"))
# The fallback constants are not in the graph.
self.assertEqual(0, node_names.count("fallback_quantization_min_value"))
self.assertEqual(0, node_names.count("fallback_quantization_max_value"))
def test_bias_add_w_fallback_min_max_vars(self):
input_node = quantize_graph.create_constant_node(
"input",
value=[1, 2, 3, 4, 5, 6, 7, 8, 9, 10],
dtype=dtypes.float32,
shape=[1, 1, 2, 5])
offset_node = quantize_graph.create_constant_node(
"offset", value=[1, 2, 3, 4, 5], dtype=dtypes.float32, shape=[5])
bias_add_node = quantize_graph.create_node(
"BiasAdd", "bias_add", [input_node.name, offset_node.name])
quantize_graph.set_attr_dtype(bias_add_node, "T", dtypes.float32)
float_graph_def = graph_pb2.GraphDef()
float_graph_def.node.extend([input_node, offset_node, bias_add_node])
test_graph(float_graph_def, {}, [bias_add_node.name], log_graph=True)
# Verify there is only one Quantize, one Requantize op, and no
# RequantizationRange op.
eightbit_rewriter = quantize_graph.GraphRewriter(
float_graph_def,
"eightbit",
quantized_input_range=None,
fallback_quantization_range=[-.5, 15.5])
eightbit_graph_def = eightbit_rewriter.rewrite([bias_add_node.name])
ops = [node.op for node in eightbit_graph_def.node]
node_names = [node.name for node in eightbit_graph_def.node]
# No quantize since all inputs are const and can be quantized up-front.
self.assertEqual(0, ops.count("QuantizeV2") + ops.count("Quantize"))
# One dequantize at the end.
self.assertEqual(1, ops.count("Dequantize"))
# No RequantizationRange
self.assertEqual(0, ops.count("RequantizationRange"))
# The fallback constants are in the graph.
self.assertEqual(1, node_names.count("fallback_quantization_min_value"))
self.assertEqual(1, node_names.count("fallback_quantization_max_value"))
def test_remove_redundant_quantization(self):
a_constant_name = "a_constant"
a_constant_min_name = "a_constant_min"
a_constant_max_name = "a_constant_max"
a_dequantize_name = "a_dequantize"
a_quantize_name = "a_quantize"
b_constant_name = "b_constant"
b_constant_min_name = "b_constant_min"
b_constant_max_name = "b_constant_max"
b_dequantize_name = "b_dequantize"
b_quantize_name = "b_quantize"
mat_mul_name = "mat_mul"
graph_def = graph_pb2.GraphDef()
a_constant = quantize_graph.create_constant_node(
a_constant_name, value=(0,), dtype=dtypes.quint8, shape=[])
graph_def.node.extend([a_constant])
a_constant_min = quantize_graph.create_constant_node(
a_constant_min_name, value=2, dtype=dtypes.float32, shape=[])
graph_def.node.extend([a_constant_min])
a_constant_max = quantize_graph.create_constant_node(
a_constant_max_name, value=2, dtype=dtypes.float32, shape=[])
graph_def.node.extend([a_constant_max])
a_dequantize_node = quantize_graph.create_node(
"Dequantize", a_dequantize_name,
[a_constant_name, a_constant_min_name, a_constant_max_name])
quantize_graph.set_attr_dtype(a_dequantize_node, "T", dtypes.uint8)
graph_def.node.extend([a_dequantize_node])
a_quantize_node = quantize_graph.create_node(
"QuantizeV2", a_quantize_name,
[a_dequantize_name, a_dequantize_name + ":1", a_dequantize_name + ":2"])
quantize_graph.set_attr_dtype(a_quantize_node, "T", dtypes.uint8)
graph_def.node.extend([a_quantize_node])
b_constant = quantize_graph.create_constant_node(
b_constant_name, value=(0,), dtype=dtypes.quint8, shape=[])
graph_def.node.extend([b_constant])
b_constant_min = quantize_graph.create_constant_node(
b_constant_min_name, value=3, dtype=dtypes.float32, shape=[])
graph_def.node.extend([b_constant_min])
b_constant_max = quantize_graph.create_constant_node(
b_constant_max_name, value=3, dtype=dtypes.float32, shape=[])
graph_def.node.extend([b_constant_max])
b_dequantize_node = quantize_graph.create_node(
"Dequantize", b_dequantize_name,
[b_constant_name, b_constant_min_name, b_constant_max_name])
quantize_graph.set_attr_dtype(b_dequantize_node, "T", dtypes.uint8)
graph_def.node.extend([b_dequantize_node])
b_quantize_node = quantize_graph.create_node(
"QuantizeV2", b_quantize_name,
[b_dequantize_name, b_dequantize_name + ":1", b_dequantize_name + ":2"])
quantize_graph.set_attr_dtype(b_quantize_node, "T", dtypes.uint8)
graph_def.node.extend([b_quantize_node])
mat_mul_node = quantize_graph.create_node("QuantizedMatMul", mat_mul_name, [
a_quantize_name, b_quantize_name, a_quantize_name + ":1",
a_quantize_name + ":2", b_quantize_name + ":1", b_quantize_name + ":2"
])
quantize_graph.set_attr_dtype(mat_mul_node, "T1", dtypes.uint8)
quantize_graph.set_attr_dtype(mat_mul_node, "T2", dtypes.int32)
graph_def.node.extend([mat_mul_node])
expected_output = graph_pb2.GraphDef()
a_constant = quantize_graph.create_constant_node(
a_constant_name, value=(0,), dtype=dtypes.quint8, shape=[])
expected_output.node.extend([a_constant])
a_constant_min = quantize_graph.create_constant_node(
a_constant_min_name, value=2, dtype=dtypes.float32, shape=[])
expected_output.node.extend([a_constant_min])
a_constant_max = quantize_graph.create_constant_node(
a_constant_max_name, value=2, dtype=dtypes.float32, shape=[])
expected_output.node.extend([a_constant_max])
b_constant = quantize_graph.create_constant_node(
b_constant_name, value=(0,), dtype=dtypes.quint8, shape=[])
expected_output.node.extend([b_constant])
b_constant_min = quantize_graph.create_constant_node(
b_constant_min_name, value=3, dtype=dtypes.float32, shape=[])
expected_output.node.extend([b_constant_min])
b_constant_max = quantize_graph.create_constant_node(
b_constant_max_name, value=3, dtype=dtypes.float32, shape=[])
expected_output.node.extend([b_constant_max])
mat_mul_node = quantize_graph.create_node("QuantizedMatMul", mat_mul_name, [
a_constant_name, b_constant_name, a_constant_min_name,
a_constant_max_name, b_constant_min_name, b_constant_max_name
])
quantize_graph.set_attr_dtype(mat_mul_node, "T1", dtypes.uint8)
quantize_graph.set_attr_dtype(mat_mul_node, "T2", dtypes.int32)
expected_output.node.extend([mat_mul_node])
expected_output.versions.CopyFrom(graph_def.versions)
expected_output.library.CopyFrom(graph_def.library)
rewriter = quantize_graph.GraphRewriter(
graph_def, [mat_mul_name], quantized_input_range=None)
output = rewriter.remove_redundant_quantization(graph_def)
stripped_output = graph_util.extract_sub_graph(output, [mat_mul_name])
self.assertProtoEquals(expected_output, stripped_output)
if __name__ == "__main__":
test.main()
| [
"tensorflow.tools.quantization.quantize_graph.set_attr_dtype",
"tensorflow.python.framework.importer.import_graph_def",
"numpy.array",
"tensorflow.tools.quantization.quantize_graph.quantize_weight_eightbit",
"tensorflow.core.framework.graph_pb2.GraphDef",
"numpy.reshape",
"tensorflow.tools.quantization.quantize_graph.set_attr_int_list",
"tensorflow.tools.quantization.quantize_graph.quantize_array",
"tensorflow.tools.quantization.quantize_graph.unique_node_name_from_input",
"tensorflow.tools.quantization.quantize_graph.node_name_from_input",
"tensorflow.tools.quantization.quantize_graph.set_attr_float",
"tensorflow.tools.quantization.quantize_graph.set_attr_int",
"tensorflow.tools.quantization.quantize_graph.create_node",
"tensorflow.tools.quantization.quantize_graph.GraphRewriter",
"tensorflow.tools.quantization.quantize_graph.set_attr_bool",
"tensorflow.tools.quantization.quantize_graph.set_attr_shape",
"tensorflow.tools.quantization.quantize_graph.set_attr_string",
"tensorflow.python.client.session.Session",
"tensorflow.tools.quantization.quantize_graph.create_constant_node",
"tensorflow.python.framework.graph_util.extract_sub_graph",
"tensorflow.python.framework.ops.Graph",
"tensorflow.python.platform.test.main",
"tensorflow.python.framework.graph_util.remove_training_nodes"
] | [((1481, 1496), 'tensorflow.python.framework.ops.Graph', 'ops_lib.Graph', ([], {}), '()\n', (1494, 1496), True, 'from tensorflow.python.framework import ops as ops_lib\n'), ((1888, 1908), 'tensorflow.core.framework.graph_pb2.GraphDef', 'graph_pb2.GraphDef', ([], {}), '()\n', (1906, 1908), False, 'from tensorflow.core.framework import graph_pb2\n'), ((1924, 2026), 'tensorflow.tools.quantization.quantize_graph.create_constant_node', 'quantize_graph.create_constant_node', (['a_constant_name'], {'value': 'a', 'dtype': 'dtypes.float32', 'shape': '[m, k]'}), '(a_constant_name, value=a, dtype=dtypes.\n float32, shape=[m, k])\n', (1959, 2026), False, 'from tensorflow.tools.quantization import quantize_graph\n'), ((2088, 2190), 'tensorflow.tools.quantization.quantize_graph.create_constant_node', 'quantize_graph.create_constant_node', (['b_constant_name'], {'value': 'b', 'dtype': 'dtypes.float32', 'shape': '[k, n]'}), '(b_constant_name, value=b, dtype=dtypes.\n float32, shape=[k, n])\n', (2123, 2190), False, 'from tensorflow.tools.quantization import quantize_graph\n'), ((2254, 2344), 'tensorflow.tools.quantization.quantize_graph.create_node', 'quantize_graph.create_node', (['"""MatMul"""', 'mat_mul_name', '[a_constant_name, b_constant_name]'], {}), "('MatMul', mat_mul_name, [a_constant_name,\n b_constant_name])\n", (2280, 2344), False, 'from tensorflow.tools.quantization import quantize_graph\n'), ((2387, 2451), 'tensorflow.tools.quantization.quantize_graph.set_attr_dtype', 'quantize_graph.set_attr_dtype', (['mat_mul_node', '"""T"""', 'dtypes.float32'], {}), "(mat_mul_node, 'T', dtypes.float32)\n", (2416, 2451), False, 'from tensorflow.tools.quantization import quantize_graph\n'), ((2454, 2518), 'tensorflow.tools.quantization.quantize_graph.set_attr_bool', 'quantize_graph.set_attr_bool', (['mat_mul_node', '"""transpose_a"""', '(False)'], {}), "(mat_mul_node, 'transpose_a', False)\n", (2482, 2518), False, 'from tensorflow.tools.quantization import quantize_graph\n'), ((2521, 2585), 'tensorflow.tools.quantization.quantize_graph.set_attr_bool', 'quantize_graph.set_attr_bool', (['mat_mul_node', '"""transpose_b"""', '(False)'], {}), "(mat_mul_node, 'transpose_b', False)\n", (2549, 2585), False, 'from tensorflow.tools.quantization import quantize_graph\n'), ((3000, 3020), 'tensorflow.core.framework.graph_pb2.GraphDef', 'graph_pb2.GraphDef', ([], {}), '()\n', (3018, 3020), False, 'from tensorflow.core.framework import graph_pb2\n'), ((3040, 3207), 'tensorflow.tools.quantization.quantize_graph.create_constant_node', 'quantize_graph.create_constant_node', (['input_constant_name'], {'value': 'input_values', 'dtype': 'dtypes.float32', 'shape': '[image_batch_count, image_height, image_width, depth]'}), '(input_constant_name, value=input_values,\n dtype=dtypes.float32, shape=[image_batch_count, image_height,\n image_width, depth])\n', (3075, 3207), False, 'from tensorflow.tools.quantization import quantize_graph\n'), ((3293, 3457), 'tensorflow.tools.quantization.quantize_graph.create_constant_node', 'quantize_graph.create_constant_node', (['filter_constant_name'], {'value': 'filter_values', 'dtype': 'dtypes.float32', 'shape': '[filter_size, filter_size, depth, filter_count]'}), '(filter_constant_name, value=\n filter_values, dtype=dtypes.float32, shape=[filter_size, filter_size,\n depth, filter_count])\n', (3328, 3457), False, 'from tensorflow.tools.quantization import quantize_graph\n'), ((3537, 3633), 'tensorflow.tools.quantization.quantize_graph.create_node', 'quantize_graph.create_node', (['"""Conv2D"""', 'conv_name', '[input_constant_name, filter_constant_name]'], {}), "('Conv2D', conv_name, [input_constant_name,\n filter_constant_name])\n", (3563, 3633), False, 'from tensorflow.tools.quantization import quantize_graph\n'), ((3639, 3700), 'tensorflow.tools.quantization.quantize_graph.set_attr_dtype', 'quantize_graph.set_attr_dtype', (['conv_node', '"""T"""', 'dtypes.float32'], {}), "(conv_node, 'T', dtypes.float32)\n", (3668, 3700), False, 'from tensorflow.tools.quantization import quantize_graph\n'), ((3703, 3781), 'tensorflow.tools.quantization.quantize_graph.set_attr_int_list', 'quantize_graph.set_attr_int_list', (['conv_node', '"""strides"""', '[1, stride, stride, 1]'], {}), "(conv_node, 'strides', [1, stride, stride, 1])\n", (3735, 3781), False, 'from tensorflow.tools.quantization import quantize_graph\n'), ((3784, 3845), 'tensorflow.tools.quantization.quantize_graph.set_attr_string', 'quantize_graph.set_attr_string', (['conv_node', '"""padding"""', 'padding'], {}), "(conv_node, 'padding', padding)\n", (3814, 3845), False, 'from tensorflow.tools.quantization import quantize_graph\n'), ((6689, 6778), 'tensorflow.tools.quantization.quantize_graph.GraphRewriter', 'quantize_graph.GraphRewriter', (['float_graph_def', '"""eightbit"""'], {'quantized_input_range': 'None'}), "(float_graph_def, 'eightbit',\n quantized_input_range=None)\n", (6717, 6778), False, 'from tensorflow.tools.quantization import quantize_graph\n'), ((7266, 7362), 'tensorflow.tools.quantization.quantize_graph.GraphRewriter', 'quantize_graph.GraphRewriter', (['float_graph_def', '"""weights_rounded"""'], {'quantized_input_range': 'None'}), "(float_graph_def, 'weights_rounded',\n quantized_input_range=None)\n", (7294, 7362), False, 'from tensorflow.tools.quantization import quantize_graph\n'), ((42440, 42451), 'tensorflow.python.platform.test.main', 'test.main', ([], {}), '()\n', (42449, 42451), False, 'from tensorflow.python.platform import test\n'), ((1528, 1587), 'tensorflow.python.framework.importer.import_graph_def', 'importer.import_graph_def', (['graph_def'], {'input_map': '{}', 'name': '""""""'}), "(graph_def, input_map={}, name='')\n", (1553, 1587), False, 'from tensorflow.python.framework import importer\n'), ((1595, 1623), 'tensorflow.python.client.session.Session', 'session.Session', ([], {'graph': 'graph'}), '(graph=graph)\n', (1610, 1623), False, 'from tensorflow.python.client import session\n'), ((7860, 7966), 'tensorflow.tools.quantization.quantize_graph.create_constant_node', 'quantize_graph.create_constant_node', (['shape_constant_name'], {'value': '(-0.8)', 'dtype': 'dtypes.float32', 'shape': '[1]'}), '(shape_constant_name, value=-0.8, dtype=\n dtypes.float32, shape=[1])\n', (7895, 7966), False, 'from tensorflow.tools.quantization import quantize_graph\n'), ((7997, 8069), 'tensorflow.tools.quantization.quantize_graph.quantize_weight_eightbit', 'quantize_graph.quantize_weight_eightbit', (['shape_constant', "b'MIN_COMBINED'"], {}), "(shape_constant, b'MIN_COMBINED')\n", (8036, 8069), False, 'from tensorflow.tools.quantization import quantize_graph\n'), ((9532, 9637), 'tensorflow.tools.quantization.quantize_graph.create_constant_node', 'quantize_graph.create_constant_node', (['"""input"""'], {'value': '[0, 1, 2, 3]', 'dtype': 'dtypes.float32', 'shape': '[4, 1]'}), "('input', value=[0, 1, 2, 3], dtype=\n dtypes.float32, shape=[4, 1])\n", (9567, 9637), False, 'from tensorflow.tools.quantization import quantize_graph\n'), ((9662, 9783), 'tensorflow.tools.quantization.quantize_graph.create_constant_node', 'quantize_graph.create_constant_node', (['"""weight_1"""'], {'value': '[0.5, 0.6, 0.7, 0.8, 0.9]', 'dtype': 'dtypes.float32', 'shape': '[1, 5]'}), "('weight_1', value=[0.5, 0.6, 0.7, 0.8, \n 0.9], dtype=dtypes.float32, shape=[1, 5])\n", (9697, 9783), False, 'from tensorflow.tools.quantization import quantize_graph\n'), ((9927, 10031), 'tensorflow.tools.quantization.quantize_graph.create_constant_node', 'quantize_graph.create_constant_node', (['"""new_shape_node"""'], {'value': '[10, 2]', 'dtype': 'dtypes.int32', 'shape': '[2]'}), "('new_shape_node', value=[10, 2], dtype=\n dtypes.int32, shape=[2])\n", (9962, 10031), False, 'from tensorflow.tools.quantization import quantize_graph\n'), ((10055, 10150), 'tensorflow.tools.quantization.quantize_graph.create_node', 'quantize_graph.create_node', (['"""Reshape"""', '"""reshape"""', '[matmul_1_node.name, new_shape_node.name]'], {}), "('Reshape', 'reshape', [matmul_1_node.name,\n new_shape_node.name])\n", (10081, 10150), False, 'from tensorflow.tools.quantization import quantize_graph\n'), ((10160, 10224), 'tensorflow.tools.quantization.quantize_graph.set_attr_dtype', 'quantize_graph.set_attr_dtype', (['reshape_node', '"""T"""', 'dtypes.float32'], {}), "(reshape_node, 'T', dtypes.float32)\n", (10189, 10224), False, 'from tensorflow.tools.quantization import quantize_graph\n'), ((10285, 10391), 'tensorflow.tools.quantization.quantize_graph.create_constant_node', 'quantize_graph.create_constant_node', (['"""weight_2"""'], {'value': '[1.5, 2.5]', 'dtype': 'dtypes.float32', 'shape': '[2, 1]'}), "('weight_2', value=[1.5, 2.5], dtype=\n dtypes.float32, shape=[2, 1])\n", (10320, 10391), False, 'from tensorflow.tools.quantization import quantize_graph\n'), ((10478, 10498), 'tensorflow.core.framework.graph_pb2.GraphDef', 'graph_pb2.GraphDef', ([], {}), '()\n', (10496, 10498), False, 'from tensorflow.core.framework import graph_pb2\n'), ((10789, 10860), 'tensorflow.tools.quantization.quantize_graph.GraphRewriter', 'quantize_graph.GraphRewriter', (['g', '"""eightbit"""'], {'quantized_input_range': 'None'}), "(g, 'eightbit', quantized_input_range=None)\n", (10817, 10860), False, 'from tensorflow.tools.quantization import quantize_graph\n'), ((11629, 11642), 'numpy.array', 'np.array', (['[1]'], {}), '([1])\n', (11637, 11642), True, 'import numpy as np\n'), ((11654, 11691), 'tensorflow.tools.quantization.quantize_graph.quantize_array', 'quantize_graph.quantize_array', (['arr', '(1)'], {}), '(arr, 1)\n', (11683, 11691), False, 'from tensorflow.tools.quantization import quantize_graph\n'), ((11735, 11772), 'tensorflow.tools.quantization.quantize_graph.quantize_array', 'quantize_graph.quantize_array', (['arr', '(2)'], {}), '(arr, 2)\n', (11764, 11772), False, 'from tensorflow.tools.quantization import quantize_graph\n'), ((11863, 11882), 'numpy.array', 'np.array', (['[1, 1, 1]'], {}), '([1, 1, 1])\n', (11871, 11882), True, 'import numpy as np\n'), ((11894, 11932), 'tensorflow.tools.quantization.quantize_graph.quantize_array', 'quantize_graph.quantize_array', (['arr', '(10)'], {}), '(arr, 10)\n', (11923, 11932), False, 'from tensorflow.tools.quantization import quantize_graph\n'), ((12034, 12060), 'numpy.array', 'np.array', (['[0, 0.3, 0.6, 1]'], {}), '([0, 0.3, 0.6, 1])\n', (12042, 12060), True, 'import numpy as np\n'), ((12072, 12109), 'tensorflow.tools.quantization.quantize_graph.quantize_array', 'quantize_graph.quantize_array', (['arr', '(1)'], {}), '(arr, 1)\n', (12101, 12109), False, 'from tensorflow.tools.quantization import quantize_graph\n'), ((12189, 12226), 'tensorflow.tools.quantization.quantize_graph.quantize_array', 'quantize_graph.quantize_array', (['arr', '(2)'], {}), '(arr, 2)\n', (12218, 12226), False, 'from tensorflow.tools.quantization import quantize_graph\n'), ((12493, 12586), 'tensorflow.tools.quantization.quantize_graph.create_constant_node', 'quantize_graph.create_constant_node', (['"""concat_dim"""'], {'value': '(0)', 'dtype': 'dtypes.int32', 'shape': '[]'}), "('concat_dim', value=0, dtype=dtypes.\n int32, shape=[])\n", (12528, 12586), False, 'from tensorflow.tools.quantization import quantize_graph\n'), ((12599, 12728), 'tensorflow.tools.quantization.quantize_graph.create_constant_node', 'quantize_graph.create_constant_node', (['"""a"""'], {'value': '[1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12]', 'dtype': 'dtypes.int32', 'shape': '[2, 2, 3]'}), "('a', value=[1, 2, 3, 4, 5, 6, 7, 8, 9, \n 10, 11, 12], dtype=dtypes.int32, shape=[2, 2, 3])\n", (12634, 12728), False, 'from tensorflow.tools.quantization import quantize_graph\n'), ((12765, 12902), 'tensorflow.tools.quantization.quantize_graph.create_constant_node', 'quantize_graph.create_constant_node', (['"""b"""'], {'value': '[13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24]', 'dtype': 'dtypes.int32', 'shape': '[2, 2, 3]'}), "('b', value=[13, 14, 15, 16, 17, 18, 19,\n 20, 21, 22, 23, 24], dtype=dtypes.int32, shape=[2, 2, 3])\n", (12800, 12902), False, 'from tensorflow.tools.quantization import quantize_graph\n'), ((12945, 13031), 'tensorflow.tools.quantization.quantize_graph.create_node', 'quantize_graph.create_node', (['"""Concat"""', '"""concat"""', '[concat_dim.name, a.name, b.name]'], {}), "('Concat', 'concat', [concat_dim.name, a.name, b.\n name])\n", (12971, 13031), False, 'from tensorflow.tools.quantization import quantize_graph\n'), ((13071, 13114), 'tensorflow.tools.quantization.quantize_graph.set_attr_int', 'quantize_graph.set_attr_int', (['concat', '"""N"""', '(2)'], {}), "(concat, 'N', 2)\n", (13098, 13114), False, 'from tensorflow.tools.quantization import quantize_graph\n'), ((13119, 13175), 'tensorflow.tools.quantization.quantize_graph.set_attr_dtype', 'quantize_graph.set_attr_dtype', (['concat', '"""T"""', 'dtypes.int32'], {}), "(concat, 'T', dtypes.int32)\n", (13148, 13175), False, 'from tensorflow.tools.quantization import quantize_graph\n'), ((13185, 13205), 'tensorflow.core.framework.graph_pb2.GraphDef', 'graph_pb2.GraphDef', ([], {}), '()\n', (13203, 13205), False, 'from tensorflow.core.framework import graph_pb2\n'), ((13334, 13463), 'tensorflow.tools.quantization.quantize_graph.create_constant_node', 'quantize_graph.create_constant_node', (['"""a"""'], {'value': '[1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12]', 'dtype': 'dtypes.int32', 'shape': '[2, 2, 3]'}), "('a', value=[1, 2, 3, 4, 5, 6, 7, 8, 9, \n 10, 11, 12], dtype=dtypes.int32, shape=[2, 2, 3])\n", (13369, 13463), False, 'from tensorflow.tools.quantization import quantize_graph\n'), ((13504, 13595), 'tensorflow.tools.quantization.quantize_graph.create_constant_node', 'quantize_graph.create_constant_node', (['"""shape"""'], {'value': '[12]', 'dtype': 'dtypes.int32', 'shape': '[1]'}), "('shape', value=[12], dtype=dtypes.int32,\n shape=[1])\n", (13539, 13595), False, 'from tensorflow.tools.quantization import quantize_graph\n'), ((13615, 13685), 'tensorflow.tools.quantization.quantize_graph.create_node', 'quantize_graph.create_node', (['"""Reshape"""', '"""reshape"""', '[a.name, shape.name]'], {}), "('Reshape', 'reshape', [a.name, shape.name])\n", (13641, 13685), False, 'from tensorflow.tools.quantization import quantize_graph\n'), ((13731, 13788), 'tensorflow.tools.quantization.quantize_graph.set_attr_dtype', 'quantize_graph.set_attr_dtype', (['reshape', '"""T"""', 'dtypes.int32'], {}), "(reshape, 'T', dtypes.int32)\n", (13760, 13788), False, 'from tensorflow.tools.quantization import quantize_graph\n'), ((13798, 13818), 'tensorflow.core.framework.graph_pb2.GraphDef', 'graph_pb2.GraphDef', ([], {}), '()\n', (13816, 13818), False, 'from tensorflow.core.framework import graph_pb2\n'), ((14085, 14105), 'tensorflow.core.framework.graph_pb2.GraphDef', 'graph_pb2.GraphDef', ([], {}), '()\n', (14103, 14105), False, 'from tensorflow.core.framework import graph_pb2\n'), ((14127, 14227), 'tensorflow.tools.quantization.quantize_graph.create_constant_node', 'quantize_graph.create_constant_node', (['shape_constant_name'], {'value': '(0)', 'dtype': 'dtypes.int32', 'shape': '[]'}), '(shape_constant_name, value=0, dtype=\n dtypes.int32, shape=[])\n', (14162, 14227), False, 'from tensorflow.tools.quantization import quantize_graph\n'), ((14299, 14442), 'tensorflow.tools.quantization.quantize_graph.create_constant_node', 'quantize_graph.create_constant_node', (['a_constant_name'], {'value': '[1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12]', 'dtype': 'dtypes.float32', 'shape': '[2, 2, 3]'}), '(a_constant_name, value=[1, 2, 3, 4, 5, \n 6, 7, 8, 9, 10, 11, 12], dtype=dtypes.float32, shape=[2, 2, 3])\n', (14334, 14442), False, 'from tensorflow.tools.quantization import quantize_graph\n'), ((14534, 14685), 'tensorflow.tools.quantization.quantize_graph.create_constant_node', 'quantize_graph.create_constant_node', (['b_constant_name'], {'value': '[13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24]', 'dtype': 'dtypes.float32', 'shape': '[2, 2, 3]'}), '(b_constant_name, value=[13, 14, 15, 16,\n 17, 18, 19, 20, 21, 22, 23, 24], dtype=dtypes.float32, shape=[2, 2, 3])\n', (14569, 14685), False, 'from tensorflow.tools.quantization import quantize_graph\n'), ((14779, 14889), 'tensorflow.tools.quantization.quantize_graph.create_node', 'quantize_graph.create_node', (['"""Concat"""', 'concat_name', '[shape_constant_name, a_constant_name, b_constant_name]'], {}), "('Concat', concat_name, [shape_constant_name,\n a_constant_name, b_constant_name])\n", (14805, 14889), False, 'from tensorflow.tools.quantization import quantize_graph\n'), ((14907, 14955), 'tensorflow.tools.quantization.quantize_graph.set_attr_int', 'quantize_graph.set_attr_int', (['concat_node', '"""N"""', '(2)'], {}), "(concat_node, 'N', 2)\n", (14934, 14955), False, 'from tensorflow.tools.quantization import quantize_graph\n'), ((14960, 15023), 'tensorflow.tools.quantization.quantize_graph.set_attr_dtype', 'quantize_graph.set_attr_dtype', (['concat_node', '"""T"""', 'dtypes.float32'], {}), "(concat_node, 'T', dtypes.float32)\n", (14989, 15023), False, 'from tensorflow.tools.quantization import quantize_graph\n'), ((15186, 15275), 'tensorflow.tools.quantization.quantize_graph.GraphRewriter', 'quantize_graph.GraphRewriter', (['float_graph_def', '"""eightbit"""'], {'quantized_input_range': 'None'}), "(float_graph_def, 'eightbit',\n quantized_input_range=None)\n", (15214, 15275), False, 'from tensorflow.tools.quantization import quantize_graph\n'), ((15700, 15720), 'tensorflow.core.framework.graph_pb2.GraphDef', 'graph_pb2.GraphDef', ([], {}), '()\n', (15718, 15720), False, 'from tensorflow.core.framework import graph_pb2\n'), ((15742, 15885), 'tensorflow.tools.quantization.quantize_graph.create_constant_node', 'quantize_graph.create_constant_node', (['input_constant_name'], {'value': '[1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12]', 'dtype': 'dtypes.float32', 'shape': '[2, 6]'}), '(input_constant_name, value=[1, 2, 3, 4,\n 5, 6, 7, 8, 9, 10, 11, 12], dtype=dtypes.float32, shape=[2, 6])\n', (15777, 15885), False, 'from tensorflow.tools.quantization import quantize_graph\n'), ((15986, 16086), 'tensorflow.tools.quantization.quantize_graph.create_constant_node', 'quantize_graph.create_constant_node', (['split_constant_name'], {'value': '(1)', 'dtype': 'dtypes.int32', 'shape': '[]'}), '(split_constant_name, value=1, dtype=\n dtypes.int32, shape=[])\n', (16021, 16086), False, 'from tensorflow.tools.quantization import quantize_graph\n'), ((16158, 16253), 'tensorflow.tools.quantization.quantize_graph.create_node', 'quantize_graph.create_node', (['"""Split"""', 'split_name', '[split_constant_name, input_constant_name]'], {}), "('Split', split_name, [split_constant_name,\n input_constant_name])\n", (16184, 16253), False, 'from tensorflow.tools.quantization import quantize_graph\n'), ((16263, 16318), 'tensorflow.tools.quantization.quantize_graph.set_attr_int', 'quantize_graph.set_attr_int', (['split_node', '"""num_split"""', '(2)'], {}), "(split_node, 'num_split', 2)\n", (16290, 16318), False, 'from tensorflow.tools.quantization import quantize_graph\n'), ((16323, 16385), 'tensorflow.tools.quantization.quantize_graph.set_attr_dtype', 'quantize_graph.set_attr_dtype', (['split_node', '"""T"""', 'dtypes.float32'], {}), "(split_node, 'T', dtypes.float32)\n", (16352, 16385), False, 'from tensorflow.tools.quantization import quantize_graph\n'), ((16454, 16555), 'tensorflow.tools.quantization.quantize_graph.create_constant_node', 'quantize_graph.create_constant_node', (['concat_constant_name'], {'value': '(1)', 'dtype': 'dtypes.int32', 'shape': '[]'}), '(concat_constant_name, value=1, dtype=\n dtypes.int32, shape=[])\n', (16489, 16555), False, 'from tensorflow.tools.quantization import quantize_graph\n'), ((16629, 16745), 'tensorflow.tools.quantization.quantize_graph.create_node', 'quantize_graph.create_node', (['"""Concat"""', 'concat_name', "[concat_constant_name, split_name + ':0', split_name + ':1']"], {}), "('Concat', concat_name, [concat_constant_name, \n split_name + ':0', split_name + ':1'])\n", (16655, 16745), False, 'from tensorflow.tools.quantization import quantize_graph\n'), ((16762, 16810), 'tensorflow.tools.quantization.quantize_graph.set_attr_int', 'quantize_graph.set_attr_int', (['concat_node', '"""N"""', '(2)'], {}), "(concat_node, 'N', 2)\n", (16789, 16810), False, 'from tensorflow.tools.quantization import quantize_graph\n'), ((16815, 16878), 'tensorflow.tools.quantization.quantize_graph.set_attr_dtype', 'quantize_graph.set_attr_dtype', (['concat_node', '"""T"""', 'dtypes.float32'], {}), "(concat_node, 'T', dtypes.float32)\n", (16844, 16878), False, 'from tensorflow.tools.quantization import quantize_graph\n'), ((17424, 17444), 'tensorflow.core.framework.graph_pb2.GraphDef', 'graph_pb2.GraphDef', ([], {}), '()\n', (17442, 17444), False, 'from tensorflow.core.framework import graph_pb2\n'), ((17466, 17609), 'tensorflow.tools.quantization.quantize_graph.create_constant_node', 'quantize_graph.create_constant_node', (['input_constant_name'], {'value': '[1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12]', 'dtype': 'dtypes.float32', 'shape': '[2, 6]'}), '(input_constant_name, value=[1, 2, 3, 4,\n 5, 6, 7, 8, 9, 10, 11, 12], dtype=dtypes.float32, shape=[2, 6])\n', (17501, 17609), False, 'from tensorflow.tools.quantization import quantize_graph\n'), ((17709, 17785), 'tensorflow.tools.quantization.quantize_graph.create_node', 'quantize_graph.create_node', (['"""Identity"""', 'identity_name', '[input_constant_name]'], {}), "('Identity', identity_name, [input_constant_name])\n", (17735, 17785), False, 'from tensorflow.tools.quantization import quantize_graph\n'), ((17837, 17902), 'tensorflow.tools.quantization.quantize_graph.set_attr_dtype', 'quantize_graph.set_attr_dtype', (['identity_node', '"""T"""', 'dtypes.float32'], {}), "(identity_node, 'T', dtypes.float32)\n", (17866, 17902), False, 'from tensorflow.tools.quantization import quantize_graph\n'), ((17989, 18064), 'tensorflow.tools.quantization.quantize_graph.create_node', 'quantize_graph.create_node', (['"""Mul"""', 'mul_name', '[identity_name, identity_name]'], {}), "('Mul', mul_name, [identity_name, identity_name])\n", (18015, 18064), False, 'from tensorflow.tools.quantization import quantize_graph\n'), ((18111, 18171), 'tensorflow.tools.quantization.quantize_graph.set_attr_dtype', 'quantize_graph.set_attr_dtype', (['mul_node', '"""T"""', 'dtypes.float32'], {}), "(mul_node, 'T', dtypes.float32)\n", (18140, 18171), False, 'from tensorflow.tools.quantization import quantize_graph\n'), ((18563, 18583), 'tensorflow.core.framework.graph_pb2.GraphDef', 'graph_pb2.GraphDef', ([], {}), '()\n', (18581, 18583), False, 'from tensorflow.core.framework import graph_pb2\n'), ((18596, 18646), 'tensorflow.tools.quantization.quantize_graph.create_node', 'quantize_graph.create_node', (['"""NoOp"""', 'no_op_name', '[]'], {}), "('NoOp', no_op_name, [])\n", (18622, 18646), False, 'from tensorflow.tools.quantization import quantize_graph\n'), ((18699, 18797), 'tensorflow.tools.quantization.quantize_graph.create_constant_node', 'quantize_graph.create_constant_node', (['a_constant_name'], {'value': '(1)', 'dtype': 'dtypes.float32', 'shape': '[]'}), '(a_constant_name, value=1, dtype=dtypes.\n float32, shape=[])\n', (18734, 18797), False, 'from tensorflow.tools.quantization import quantize_graph\n'), ((18861, 18937), 'tensorflow.tools.quantization.quantize_graph.create_node', 'quantize_graph.create_node', (['"""CheckNumerics"""', 'a_check_name', '[a_constant_name]'], {}), "('CheckNumerics', a_check_name, [a_constant_name])\n", (18887, 18937), False, 'from tensorflow.tools.quantization import quantize_graph\n'), ((19048, 19165), 'tensorflow.tools.quantization.quantize_graph.create_node', 'quantize_graph.create_node', (['"""Identity"""', 'a_identity_name', "[a_constant_name, '^' + a_check_name, '^' + no_op_name]"], {}), "('Identity', a_identity_name, [a_constant_name, \n '^' + a_check_name, '^' + no_op_name])\n", (19074, 19165), False, 'from tensorflow.tools.quantization import quantize_graph\n'), ((19240, 19338), 'tensorflow.tools.quantization.quantize_graph.create_constant_node', 'quantize_graph.create_constant_node', (['b_constant_name'], {'value': '(1)', 'dtype': 'dtypes.float32', 'shape': '[]'}), '(b_constant_name, value=1, dtype=dtypes.\n float32, shape=[])\n', (19275, 19338), False, 'from tensorflow.tools.quantization import quantize_graph\n'), ((19402, 19478), 'tensorflow.tools.quantization.quantize_graph.create_node', 'quantize_graph.create_node', (['"""CheckNumerics"""', 'b_check_name', '[b_constant_name]'], {}), "('CheckNumerics', b_check_name, [b_constant_name])\n", (19428, 19478), False, 'from tensorflow.tools.quantization import quantize_graph\n'), ((19589, 19688), 'tensorflow.tools.quantization.quantize_graph.create_node', 'quantize_graph.create_node', (['"""Identity"""', 'b_identity_name', "[b_constant_name, '^' + b_check_name]"], {}), "('Identity', b_identity_name, [b_constant_name, \n '^' + b_check_name])\n", (19615, 19688), False, 'from tensorflow.tools.quantization import quantize_graph\n'), ((19753, 19832), 'tensorflow.tools.quantization.quantize_graph.create_node', 'quantize_graph.create_node', (['"""Add"""', 'add_name', '[a_identity_name, b_identity_name]'], {}), "('Add', add_name, [a_identity_name, b_identity_name])\n", (19779, 19832), False, 'from tensorflow.tools.quantization import quantize_graph\n'), ((19879, 19939), 'tensorflow.tools.quantization.quantize_graph.set_attr_dtype', 'quantize_graph.set_attr_dtype', (['add_node', '"""T"""', 'dtypes.float32'], {}), "(add_node, 'T', dtypes.float32)\n", (19908, 19939), False, 'from tensorflow.tools.quantization import quantize_graph\n'), ((20001, 20021), 'tensorflow.core.framework.graph_pb2.GraphDef', 'graph_pb2.GraphDef', ([], {}), '()\n', (20019, 20021), False, 'from tensorflow.core.framework import graph_pb2\n'), ((20034, 20084), 'tensorflow.tools.quantization.quantize_graph.create_node', 'quantize_graph.create_node', (['"""NoOp"""', 'no_op_name', '[]'], {}), "('NoOp', no_op_name, [])\n", (20060, 20084), False, 'from tensorflow.tools.quantization import quantize_graph\n'), ((20143, 20241), 'tensorflow.tools.quantization.quantize_graph.create_constant_node', 'quantize_graph.create_constant_node', (['a_constant_name'], {'value': '(1)', 'dtype': 'dtypes.float32', 'shape': '[]'}), '(a_constant_name, value=1, dtype=dtypes.\n float32, shape=[])\n', (20178, 20241), False, 'from tensorflow.tools.quantization import quantize_graph\n'), ((20314, 20411), 'tensorflow.tools.quantization.quantize_graph.create_node', 'quantize_graph.create_node', (['"""Identity"""', 'a_identity_name', "[a_constant_name, '^' + no_op_name]"], {}), "('Identity', a_identity_name, [a_constant_name, \n '^' + no_op_name])\n", (20340, 20411), False, 'from tensorflow.tools.quantization import quantize_graph\n'), ((20484, 20582), 'tensorflow.tools.quantization.quantize_graph.create_constant_node', 'quantize_graph.create_constant_node', (['b_constant_name'], {'value': '(1)', 'dtype': 'dtypes.float32', 'shape': '[]'}), '(b_constant_name, value=1, dtype=dtypes.\n float32, shape=[])\n', (20519, 20582), False, 'from tensorflow.tools.quantization import quantize_graph\n'), ((20648, 20727), 'tensorflow.tools.quantization.quantize_graph.create_node', 'quantize_graph.create_node', (['"""Add"""', 'add_name', '[a_identity_name, b_constant_name]'], {}), "('Add', add_name, [a_identity_name, b_constant_name])\n", (20674, 20727), False, 'from tensorflow.tools.quantization import quantize_graph\n'), ((20774, 20834), 'tensorflow.tools.quantization.quantize_graph.set_attr_dtype', 'quantize_graph.set_attr_dtype', (['add_node', '"""T"""', 'dtypes.float32'], {}), "(add_node, 'T', dtypes.float32)\n", (20803, 20834), False, 'from tensorflow.tools.quantization import quantize_graph\n'), ((21007, 21050), 'tensorflow.python.framework.graph_util.remove_training_nodes', 'graph_util.remove_training_nodes', (['graph_def'], {}), '(graph_def)\n', (21039, 21050), False, 'from tensorflow.python.framework import graph_util\n'), ((21073, 21121), 'tensorflow.python.framework.graph_util.extract_sub_graph', 'graph_util.extract_sub_graph', (['output', '[add_name]'], {}), '(output, [add_name])\n', (21101, 21121), False, 'from tensorflow.python.framework import graph_util\n'), ((21487, 21507), 'tensorflow.core.framework.graph_pb2.GraphDef', 'graph_pb2.GraphDef', ([], {}), '()\n', (21505, 21507), False, 'from tensorflow.core.framework import graph_pb2\n'), ((21529, 21681), 'tensorflow.tools.quantization.quantize_graph.create_constant_node', 'quantize_graph.create_constant_node', (['input_constant_name'], {'value': '[1, 4, 2, 5, 3, 6, -1, -4, -2, -5, -3, -6]', 'dtype': 'dtypes.float32', 'shape': '[1, 1, 6, 2]'}), '(input_constant_name, value=[1, 4, 2, 5,\n 3, 6, -1, -4, -2, -5, -3, -6], dtype=dtypes.float32, shape=[1, 1, 6, 2])\n', (21564, 21681), False, 'from tensorflow.tools.quantization import quantize_graph\n'), ((21781, 21889), 'tensorflow.tools.quantization.quantize_graph.create_constant_node', 'quantize_graph.create_constant_node', (['mean_constant_name'], {'value': '[10, 20]', 'dtype': 'dtypes.float32', 'shape': '[2]'}), '(mean_constant_name, value=[10, 20],\n dtype=dtypes.float32, shape=[2])\n', (21816, 21889), False, 'from tensorflow.tools.quantization import quantize_graph\n'), ((21968, 22084), 'tensorflow.tools.quantization.quantize_graph.create_constant_node', 'quantize_graph.create_constant_node', (['variance_constant_name'], {'value': '[0.25, 0.5]', 'dtype': 'dtypes.float32', 'shape': '[2]'}), '(variance_constant_name, value=[0.25, \n 0.5], dtype=dtypes.float32, shape=[2])\n', (22003, 22084), False, 'from tensorflow.tools.quantization import quantize_graph\n'), ((22186, 22296), 'tensorflow.tools.quantization.quantize_graph.create_constant_node', 'quantize_graph.create_constant_node', (['beta_constant_name'], {'value': '[0.1, 0.6]', 'dtype': 'dtypes.float32', 'shape': '[2]'}), '(beta_constant_name, value=[0.1, 0.6],\n dtype=dtypes.float32, shape=[2])\n', (22221, 22296), False, 'from tensorflow.tools.quantization import quantize_graph\n'), ((22372, 22479), 'tensorflow.tools.quantization.quantize_graph.create_constant_node', 'quantize_graph.create_constant_node', (['gamma_constant_name'], {'value': '[0, 0]', 'dtype': 'dtypes.float32', 'shape': '[2]'}), '(gamma_constant_name, value=[0, 0],\n dtype=dtypes.float32, shape=[2])\n', (22407, 22479), False, 'from tensorflow.tools.quantization import quantize_graph\n'), ((22557, 22752), 'tensorflow.tools.quantization.quantize_graph.create_node', 'quantize_graph.create_node', (['"""BatchNormWithGlobalNormalization"""', 'batch_norm_name', '[input_constant_name, mean_constant_name, variance_constant_name,\n beta_constant_name, gamma_constant_name]'], {}), "('BatchNormWithGlobalNormalization',\n batch_norm_name, [input_constant_name, mean_constant_name,\n variance_constant_name, beta_constant_name, gamma_constant_name])\n", (22583, 22752), False, 'from tensorflow.tools.quantization import quantize_graph\n'), ((22792, 22859), 'tensorflow.tools.quantization.quantize_graph.set_attr_dtype', 'quantize_graph.set_attr_dtype', (['batch_norm_node', '"""T"""', 'dtypes.float32'], {}), "(batch_norm_node, 'T', dtypes.float32)\n", (22821, 22859), False, 'from tensorflow.tools.quantization import quantize_graph\n'), ((22864, 22950), 'tensorflow.tools.quantization.quantize_graph.set_attr_bool', 'quantize_graph.set_attr_bool', (['batch_norm_node', '"""scale_after_normalization"""', '(False)'], {}), "(batch_norm_node, 'scale_after_normalization', \n False)\n", (22892, 22950), False, 'from tensorflow.tools.quantization import quantize_graph\n'), ((22983, 23056), 'tensorflow.tools.quantization.quantize_graph.set_attr_float', 'quantize_graph.set_attr_float', (['batch_norm_node', '"""variance_epsilon"""', '(0.001)'], {}), "(batch_norm_node, 'variance_epsilon', 0.001)\n", (23012, 23056), False, 'from tensorflow.tools.quantization import quantize_graph\n'), ((23287, 23307), 'tensorflow.core.framework.graph_pb2.GraphDef', 'graph_pb2.GraphDef', ([], {}), '()\n', (23305, 23307), False, 'from tensorflow.core.framework import graph_pb2\n'), ((23329, 23478), 'tensorflow.tools.quantization.quantize_graph.create_constant_node', 'quantize_graph.create_constant_node', (['input_constant_name'], {'value': '[1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12]', 'dtype': 'dtypes.float32', 'shape': '[1, 2, 6, 1]'}), '(input_constant_name, value=[1, 2, 3, 4,\n 5, 6, 7, 8, 9, 10, 11, 12], dtype=dtypes.float32, shape=[1, 2, 6, 1])\n', (23364, 23478), False, 'from tensorflow.tools.quantization import quantize_graph\n'), ((23578, 23653), 'tensorflow.tools.quantization.quantize_graph.create_node', 'quantize_graph.create_node', (['"""MaxPool"""', 'max_pool_name', '[input_constant_name]'], {}), "('MaxPool', max_pool_name, [input_constant_name])\n", (23604, 23653), False, 'from tensorflow.tools.quantization import quantize_graph\n'), ((23705, 23775), 'tensorflow.tools.quantization.quantize_graph.set_attr_int_list', 'quantize_graph.set_attr_int_list', (['max_pool_node', '"""ksize"""', '[1, 2, 2, 1]'], {}), "(max_pool_node, 'ksize', [1, 2, 2, 1])\n", (23737, 23775), False, 'from tensorflow.tools.quantization import quantize_graph\n'), ((23780, 23852), 'tensorflow.tools.quantization.quantize_graph.set_attr_int_list', 'quantize_graph.set_attr_int_list', (['max_pool_node', '"""strides"""', '[1, 1, 1, 1]'], {}), "(max_pool_node, 'strides', [1, 1, 1, 1])\n", (23812, 23852), False, 'from tensorflow.tools.quantization import quantize_graph\n'), ((23857, 23922), 'tensorflow.tools.quantization.quantize_graph.set_attr_string', 'quantize_graph.set_attr_string', (['max_pool_node', '"""padding"""', "b'SAME'"], {}), "(max_pool_node, 'padding', b'SAME')\n", (23887, 23922), False, 'from tensorflow.tools.quantization import quantize_graph\n'), ((24149, 24169), 'tensorflow.core.framework.graph_pb2.GraphDef', 'graph_pb2.GraphDef', ([], {}), '()\n', (24167, 24169), False, 'from tensorflow.core.framework import graph_pb2\n'), ((24191, 24340), 'tensorflow.tools.quantization.quantize_graph.create_constant_node', 'quantize_graph.create_constant_node', (['input_constant_name'], {'value': '[1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12]', 'dtype': 'dtypes.float32', 'shape': '[1, 2, 6, 1]'}), '(input_constant_name, value=[1, 2, 3, 4,\n 5, 6, 7, 8, 9, 10, 11, 12], dtype=dtypes.float32, shape=[1, 2, 6, 1])\n', (24226, 24340), False, 'from tensorflow.tools.quantization import quantize_graph\n'), ((24440, 24515), 'tensorflow.tools.quantization.quantize_graph.create_node', 'quantize_graph.create_node', (['"""AvgPool"""', 'avg_pool_name', '[input_constant_name]'], {}), "('AvgPool', avg_pool_name, [input_constant_name])\n", (24466, 24515), False, 'from tensorflow.tools.quantization import quantize_graph\n'), ((24567, 24632), 'tensorflow.tools.quantization.quantize_graph.set_attr_dtype', 'quantize_graph.set_attr_dtype', (['avg_pool_node', '"""T"""', 'dtypes.float32'], {}), "(avg_pool_node, 'T', dtypes.float32)\n", (24596, 24632), False, 'from tensorflow.tools.quantization import quantize_graph\n'), ((24637, 24707), 'tensorflow.tools.quantization.quantize_graph.set_attr_int_list', 'quantize_graph.set_attr_int_list', (['avg_pool_node', '"""ksize"""', '[1, 2, 2, 1]'], {}), "(avg_pool_node, 'ksize', [1, 2, 2, 1])\n", (24669, 24707), False, 'from tensorflow.tools.quantization import quantize_graph\n'), ((24712, 24784), 'tensorflow.tools.quantization.quantize_graph.set_attr_int_list', 'quantize_graph.set_attr_int_list', (['avg_pool_node', '"""strides"""', '[1, 1, 1, 1]'], {}), "(avg_pool_node, 'strides', [1, 1, 1, 1])\n", (24744, 24784), False, 'from tensorflow.tools.quantization import quantize_graph\n'), ((24789, 24854), 'tensorflow.tools.quantization.quantize_graph.set_attr_string', 'quantize_graph.set_attr_string', (['avg_pool_node', '"""padding"""', "b'SAME'"], {}), "(avg_pool_node, 'padding', b'SAME')\n", (24819, 24854), False, 'from tensorflow.tools.quantization import quantize_graph\n'), ((25069, 25089), 'tensorflow.core.framework.graph_pb2.GraphDef', 'graph_pb2.GraphDef', ([], {}), '()\n', (25087, 25089), False, 'from tensorflow.core.framework import graph_pb2\n'), ((25111, 25260), 'tensorflow.tools.quantization.quantize_graph.create_constant_node', 'quantize_graph.create_constant_node', (['input_constant_name'], {'value': '[1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12]', 'dtype': 'dtypes.float32', 'shape': '[1, 2, 6, 1]'}), '(input_constant_name, value=[1, 2, 3, 4,\n 5, 6, 7, 8, 9, 10, 11, 12], dtype=dtypes.float32, shape=[1, 2, 6, 1])\n', (25146, 25260), False, 'from tensorflow.tools.quantization import quantize_graph\n'), ((25356, 25424), 'tensorflow.tools.quantization.quantize_graph.create_node', 'quantize_graph.create_node', (['"""Relu"""', 'relu_name', '[input_constant_name]'], {}), "('Relu', relu_name, [input_constant_name])\n", (25382, 25424), False, 'from tensorflow.tools.quantization import quantize_graph\n'), ((25472, 25533), 'tensorflow.tools.quantization.quantize_graph.set_attr_dtype', 'quantize_graph.set_attr_dtype', (['relu_node', '"""T"""', 'dtypes.float32'], {}), "(relu_node, 'T', dtypes.float32)\n", (25501, 25533), False, 'from tensorflow.tools.quantization import quantize_graph\n'), ((25697, 25834), 'tensorflow.tools.quantization.quantize_graph.create_constant_node', 'quantize_graph.create_constant_node', (['"""input"""'], {'value': '[1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12]', 'dtype': 'dtypes.float32', 'shape': '[1, 2, 6, 1]'}), "('input', value=[1, 2, 3, 4, 5, 6, 7, 8,\n 9, 10, 11, 12], dtype=dtypes.float32, shape=[1, 2, 6, 1])\n", (25732, 25834), False, 'from tensorflow.tools.quantization import quantize_graph\n'), ((25880, 25941), 'tensorflow.tools.quantization.quantize_graph.create_node', 'quantize_graph.create_node', (['"""Relu"""', '"""relu"""', '[input_node.name]'], {}), "('Relu', 'relu', [input_node.name])\n", (25906, 25941), False, 'from tensorflow.tools.quantization import quantize_graph\n'), ((25946, 26007), 'tensorflow.tools.quantization.quantize_graph.set_attr_dtype', 'quantize_graph.set_attr_dtype', (['relu_node', '"""T"""', 'dtypes.float32'], {}), "(relu_node, 'T', dtypes.float32)\n", (25975, 26007), False, 'from tensorflow.tools.quantization import quantize_graph\n'), ((26024, 26121), 'tensorflow.tools.quantization.quantize_graph.create_constant_node', 'quantize_graph.create_constant_node', (['"""min_bias_add"""'], {'value': '(0)', 'dtype': 'dtypes.float32', 'shape': '[]'}), "('min_bias_add', value=0, dtype=dtypes.\n float32, shape=[])\n", (26059, 26121), False, 'from tensorflow.tools.quantization import quantize_graph\n'), ((26141, 26239), 'tensorflow.tools.quantization.quantize_graph.create_constant_node', 'quantize_graph.create_constant_node', (['"""max_bias_add"""'], {'value': '(12)', 'dtype': 'dtypes.float32', 'shape': '[]'}), "('max_bias_add', value=12, dtype=dtypes.\n float32, shape=[])\n", (26176, 26239), False, 'from tensorflow.tools.quantization import quantize_graph\n'), ((26266, 26386), 'tensorflow.tools.quantization.quantize_graph.create_node', 'quantize_graph.create_node', (['"""FakeQuantWithMinMaxVars"""', '"""fake_quant"""', '[relu_node.name, min_node.name, max_node.name]'], {}), "('FakeQuantWithMinMaxVars', 'fake_quant', [\n relu_node.name, min_node.name, max_node.name])\n", (26292, 26386), False, 'from tensorflow.tools.quantization import quantize_graph\n'), ((26422, 26442), 'tensorflow.core.framework.graph_pb2.GraphDef', 'graph_pb2.GraphDef', ([], {}), '()\n', (26440, 26442), False, 'from tensorflow.core.framework import graph_pb2\n'), ((26710, 26799), 'tensorflow.tools.quantization.quantize_graph.GraphRewriter', 'quantize_graph.GraphRewriter', (['float_graph_def', '"""eightbit"""'], {'quantized_input_range': 'None'}), "(float_graph_def, 'eightbit',\n quantized_input_range=None)\n", (26738, 26799), False, 'from tensorflow.tools.quantization import quantize_graph\n'), ((27284, 27304), 'tensorflow.core.framework.graph_pb2.GraphDef', 'graph_pb2.GraphDef', ([], {}), '()\n', (27302, 27304), False, 'from tensorflow.core.framework import graph_pb2\n'), ((27326, 27475), 'tensorflow.tools.quantization.quantize_graph.create_constant_node', 'quantize_graph.create_constant_node', (['input_constant_name'], {'value': '[1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12]', 'dtype': 'dtypes.float32', 'shape': '[1, 2, 6, 1]'}), '(input_constant_name, value=[1, 2, 3, 4,\n 5, 6, 7, 8, 9, 10, 11, 12], dtype=dtypes.float32, shape=[1, 2, 6, 1])\n', (27361, 27475), False, 'from tensorflow.tools.quantization import quantize_graph\n'), ((27572, 27642), 'tensorflow.tools.quantization.quantize_graph.create_node', 'quantize_graph.create_node', (['"""Relu6"""', 'relu6_name', '[input_constant_name]'], {}), "('Relu6', relu6_name, [input_constant_name])\n", (27598, 27642), False, 'from tensorflow.tools.quantization import quantize_graph\n'), ((27691, 27753), 'tensorflow.tools.quantization.quantize_graph.set_attr_dtype', 'quantize_graph.set_attr_dtype', (['relu6_node', '"""T"""', 'dtypes.float32'], {}), "(relu6_node, 'T', dtypes.float32)\n", (27720, 27753), False, 'from tensorflow.tools.quantization import quantize_graph\n'), ((28019, 28039), 'tensorflow.core.framework.graph_pb2.GraphDef', 'graph_pb2.GraphDef', ([], {}), '()\n', (28037, 28039), False, 'from tensorflow.core.framework import graph_pb2\n'), ((28061, 28210), 'tensorflow.tools.quantization.quantize_graph.create_constant_node', 'quantize_graph.create_constant_node', (['input_constant_name'], {'value': '[1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12]', 'dtype': 'dtypes.float32', 'shape': '[1, 1, 2, 6]'}), '(input_constant_name, value=[1, 2, 3, 4,\n 5, 6, 7, 8, 9, 10, 11, 12], dtype=dtypes.float32, shape=[1, 1, 2, 6])\n', (28096, 28210), False, 'from tensorflow.tools.quantization import quantize_graph\n'), ((28312, 28432), 'tensorflow.tools.quantization.quantize_graph.create_constant_node', 'quantize_graph.create_constant_node', (['offset_constant_name'], {'value': '[1, 2, 3, 4, 5, 6]', 'dtype': 'dtypes.float32', 'shape': '[6]'}), '(offset_constant_name, value=[1, 2, 3, 4,\n 5, 6], dtype=dtypes.float32, shape=[6])\n', (28347, 28432), False, 'from tensorflow.tools.quantization import quantize_graph\n'), ((28533, 28634), 'tensorflow.tools.quantization.quantize_graph.create_node', 'quantize_graph.create_node', (['"""BiasAdd"""', 'bias_add_name', '[input_constant_name, offset_constant_name]'], {}), "('BiasAdd', bias_add_name, [input_constant_name,\n offset_constant_name])\n", (28559, 28634), False, 'from tensorflow.tools.quantization import quantize_graph\n'), ((28644, 28709), 'tensorflow.tools.quantization.quantize_graph.set_attr_dtype', 'quantize_graph.set_attr_dtype', (['bias_add_node', '"""T"""', 'dtypes.float32'], {}), "(bias_add_node, 'T', dtypes.float32)\n", (28673, 28709), False, 'from tensorflow.tools.quantization import quantize_graph\n'), ((29277, 29331), 'tensorflow.tools.quantization.quantize_graph.create_node', 'quantize_graph.create_node', (['"""Placeholder"""', '"""input"""', '[]'], {}), "('Placeholder', 'input', [])\n", (29303, 29331), False, 'from tensorflow.tools.quantization import quantize_graph\n'), ((29336, 29399), 'tensorflow.tools.quantization.quantize_graph.set_attr_dtype', 'quantize_graph.set_attr_dtype', (['input_n', '"""dtype"""', 'dtypes.float32'], {}), "(input_n, 'dtype', dtypes.float32)\n", (29365, 29399), False, 'from tensorflow.tools.quantization import quantize_graph\n'), ((29404, 29464), 'tensorflow.tools.quantization.quantize_graph.set_attr_shape', 'quantize_graph.set_attr_shape', (['input_n', '"""shape"""', 'input_shape'], {}), "(input_n, 'shape', input_shape)\n", (29433, 29464), False, 'from tensorflow.tools.quantization import quantize_graph\n'), ((29480, 29588), 'tensorflow.tools.quantization.quantize_graph.create_constant_node', 'quantize_graph.create_constant_node', (['"""offset"""'], {'value': '[1, 2, 3, 4, 5, 6]', 'dtype': 'dtypes.float32', 'shape': '[6]'}), "('offset', value=[1, 2, 3, 4, 5, 6],\n dtype=dtypes.float32, shape=[6])\n", (29515, 29588), False, 'from tensorflow.tools.quantization import quantize_graph\n'), ((29611, 29696), 'tensorflow.tools.quantization.quantize_graph.create_node', 'quantize_graph.create_node', (['"""BiasAdd"""', '"""bias_add"""', '[input_n.name, offset_n.name]'], {}), "('BiasAdd', 'bias_add', [input_n.name, offset_n.name]\n )\n", (29637, 29696), False, 'from tensorflow.tools.quantization import quantize_graph\n'), ((29740, 29802), 'tensorflow.tools.quantization.quantize_graph.set_attr_dtype', 'quantize_graph.set_attr_dtype', (['bias_add_n', '"""T"""', 'dtypes.float32'], {}), "(bias_add_n, 'T', dtypes.float32)\n", (29769, 29802), False, 'from tensorflow.tools.quantization import quantize_graph\n'), ((29826, 29846), 'tensorflow.core.framework.graph_pb2.GraphDef', 'graph_pb2.GraphDef', ([], {}), '()\n', (29844, 29846), False, 'from tensorflow.core.framework import graph_pb2\n'), ((30701, 30774), 'tensorflow.tools.quantization.quantize_graph.create_node', 'quantize_graph.create_node', (['"""MatMul"""', '"""mat_mul"""', '[n.name for n in inputs]'], {}), "('MatMul', 'mat_mul', [n.name for n in inputs])\n", (30727, 30774), False, 'from tensorflow.tools.quantization import quantize_graph\n'), ((30825, 30889), 'tensorflow.tools.quantization.quantize_graph.set_attr_dtype', 'quantize_graph.set_attr_dtype', (['mat_mul_node', '"""T"""', 'dtypes.float32'], {}), "(mat_mul_node, 'T', dtypes.float32)\n", (30854, 30889), False, 'from tensorflow.tools.quantization import quantize_graph\n'), ((30913, 30933), 'tensorflow.core.framework.graph_pb2.GraphDef', 'graph_pb2.GraphDef', ([], {}), '()\n', (30931, 30933), False, 'from tensorflow.core.framework import graph_pb2\n'), ((32355, 32425), 'tensorflow.tools.quantization.quantize_graph.GraphRewriter', 'quantize_graph.GraphRewriter', (['float_graph_def', '"""eightbit"""', 'input_range'], {}), "(float_graph_def, 'eightbit', input_range)\n", (32383, 32425), False, 'from tensorflow.tools.quantization import quantize_graph\n'), ((32955, 33044), 'tensorflow.tools.quantization.quantize_graph.GraphRewriter', 'quantize_graph.GraphRewriter', (['float_graph_def', '"""eightbit"""'], {'quantized_input_range': 'None'}), "(float_graph_def, 'eightbit',\n quantized_input_range=None)\n", (32983, 33044), False, 'from tensorflow.tools.quantization import quantize_graph\n'), ((33552, 33681), 'tensorflow.tools.quantization.quantize_graph.create_constant_node', 'quantize_graph.create_constant_node', (['"""input"""'], {'value': '[1, 2, 3, 4, 5, 6, 7, 8, 9, 10]', 'dtype': 'dtypes.float32', 'shape': '[1, 1, 2, 5]'}), "('input', value=[1, 2, 3, 4, 5, 6, 7, 8,\n 9, 10], dtype=dtypes.float32, shape=[1, 1, 2, 5])\n", (33587, 33681), False, 'from tensorflow.tools.quantization import quantize_graph\n'), ((33729, 33835), 'tensorflow.tools.quantization.quantize_graph.create_constant_node', 'quantize_graph.create_constant_node', (['"""offset"""'], {'value': '[1, 2, 3, 4, 5]', 'dtype': 'dtypes.float32', 'shape': '[5]'}), "('offset', value=[1, 2, 3, 4, 5], dtype=\n dtypes.float32, shape=[5])\n", (33764, 33835), False, 'from tensorflow.tools.quantization import quantize_graph\n'), ((33860, 33950), 'tensorflow.tools.quantization.quantize_graph.create_node', 'quantize_graph.create_node', (['"""BiasAdd"""', '"""bias_add"""', '[input_node.name, offset_node.name]'], {}), "('BiasAdd', 'bias_add', [input_node.name,\n offset_node.name])\n", (33886, 33950), False, 'from tensorflow.tools.quantization import quantize_graph\n'), ((33960, 34025), 'tensorflow.tools.quantization.quantize_graph.set_attr_dtype', 'quantize_graph.set_attr_dtype', (['bias_add_node', '"""T"""', 'dtypes.float32'], {}), "(bias_add_node, 'T', dtypes.float32)\n", (33989, 34025), False, 'from tensorflow.tools.quantization import quantize_graph\n'), ((34042, 34142), 'tensorflow.tools.quantization.quantize_graph.create_constant_node', 'quantize_graph.create_constant_node', (['"""min_bias_add"""'], {'value': '(-0.5)', 'dtype': 'dtypes.float32', 'shape': '[]'}), "('min_bias_add', value=-0.5, dtype=\n dtypes.float32, shape=[])\n", (34077, 34142), False, 'from tensorflow.tools.quantization import quantize_graph\n'), ((34161, 34261), 'tensorflow.tools.quantization.quantize_graph.create_constant_node', 'quantize_graph.create_constant_node', (['"""max_bias_add"""'], {'value': '(15.5)', 'dtype': 'dtypes.float32', 'shape': '[]'}), "('max_bias_add', value=15.5, dtype=\n dtypes.float32, shape=[])\n", (34196, 34261), False, 'from tensorflow.tools.quantization import quantize_graph\n'), ((34288, 34412), 'tensorflow.tools.quantization.quantize_graph.create_node', 'quantize_graph.create_node', (['"""FakeQuantWithMinMaxVars"""', '"""fake_quant"""', '[bias_add_node.name, min_node.name, max_node.name]'], {}), "('FakeQuantWithMinMaxVars', 'fake_quant', [\n bias_add_node.name, min_node.name, max_node.name])\n", (34314, 34412), False, 'from tensorflow.tools.quantization import quantize_graph\n'), ((34448, 34468), 'tensorflow.core.framework.graph_pb2.GraphDef', 'graph_pb2.GraphDef', ([], {}), '()\n', (34466, 34468), False, 'from tensorflow.core.framework import graph_pb2\n'), ((34901, 35031), 'tensorflow.tools.quantization.quantize_graph.GraphRewriter', 'quantize_graph.GraphRewriter', (['float_graph_def', '"""eightbit"""'], {'quantized_input_range': 'None', 'fallback_quantization_range': '[-100, 100]'}), "(float_graph_def, 'eightbit',\n quantized_input_range=None, fallback_quantization_range=[-100, 100])\n", (34929, 35031), False, 'from tensorflow.tools.quantization import quantize_graph\n'), ((35765, 35894), 'tensorflow.tools.quantization.quantize_graph.create_constant_node', 'quantize_graph.create_constant_node', (['"""input"""'], {'value': '[1, 2, 3, 4, 5, 6, 7, 8, 9, 10]', 'dtype': 'dtypes.float32', 'shape': '[1, 1, 2, 5]'}), "('input', value=[1, 2, 3, 4, 5, 6, 7, 8,\n 9, 10], dtype=dtypes.float32, shape=[1, 1, 2, 5])\n", (35800, 35894), False, 'from tensorflow.tools.quantization import quantize_graph\n'), ((35942, 36048), 'tensorflow.tools.quantization.quantize_graph.create_constant_node', 'quantize_graph.create_constant_node', (['"""offset"""'], {'value': '[1, 2, 3, 4, 5]', 'dtype': 'dtypes.float32', 'shape': '[5]'}), "('offset', value=[1, 2, 3, 4, 5], dtype=\n dtypes.float32, shape=[5])\n", (35977, 36048), False, 'from tensorflow.tools.quantization import quantize_graph\n'), ((36073, 36163), 'tensorflow.tools.quantization.quantize_graph.create_node', 'quantize_graph.create_node', (['"""BiasAdd"""', '"""bias_add"""', '[input_node.name, offset_node.name]'], {}), "('BiasAdd', 'bias_add', [input_node.name,\n offset_node.name])\n", (36099, 36163), False, 'from tensorflow.tools.quantization import quantize_graph\n'), ((36173, 36238), 'tensorflow.tools.quantization.quantize_graph.set_attr_dtype', 'quantize_graph.set_attr_dtype', (['bias_add_node', '"""T"""', 'dtypes.float32'], {}), "(bias_add_node, 'T', dtypes.float32)\n", (36202, 36238), False, 'from tensorflow.tools.quantization import quantize_graph\n'), ((36262, 36282), 'tensorflow.core.framework.graph_pb2.GraphDef', 'graph_pb2.GraphDef', ([], {}), '()\n', (36280, 36282), False, 'from tensorflow.core.framework import graph_pb2\n'), ((36553, 36684), 'tensorflow.tools.quantization.quantize_graph.GraphRewriter', 'quantize_graph.GraphRewriter', (['float_graph_def', '"""eightbit"""'], {'quantized_input_range': 'None', 'fallback_quantization_range': '[-0.5, 15.5]'}), "(float_graph_def, 'eightbit',\n quantized_input_range=None, fallback_quantization_range=[-0.5, 15.5])\n", (36581, 36684), False, 'from tensorflow.tools.quantization import quantize_graph\n'), ((37914, 37934), 'tensorflow.core.framework.graph_pb2.GraphDef', 'graph_pb2.GraphDef', ([], {}), '()\n', (37932, 37934), False, 'from tensorflow.core.framework import graph_pb2\n'), ((37952, 38052), 'tensorflow.tools.quantization.quantize_graph.create_constant_node', 'quantize_graph.create_constant_node', (['a_constant_name'], {'value': '(0,)', 'dtype': 'dtypes.quint8', 'shape': '[]'}), '(a_constant_name, value=(0,), dtype=\n dtypes.quint8, shape=[])\n', (37987, 38052), False, 'from tensorflow.tools.quantization import quantize_graph\n'), ((38118, 38220), 'tensorflow.tools.quantization.quantize_graph.create_constant_node', 'quantize_graph.create_constant_node', (['a_constant_min_name'], {'value': '(2)', 'dtype': 'dtypes.float32', 'shape': '[]'}), '(a_constant_min_name, value=2, dtype=\n dtypes.float32, shape=[])\n', (38153, 38220), False, 'from tensorflow.tools.quantization import quantize_graph\n'), ((38290, 38392), 'tensorflow.tools.quantization.quantize_graph.create_constant_node', 'quantize_graph.create_constant_node', (['a_constant_max_name'], {'value': '(2)', 'dtype': 'dtypes.float32', 'shape': '[]'}), '(a_constant_max_name, value=2, dtype=\n dtypes.float32, shape=[])\n', (38325, 38392), False, 'from tensorflow.tools.quantization import quantize_graph\n'), ((38465, 38590), 'tensorflow.tools.quantization.quantize_graph.create_node', 'quantize_graph.create_node', (['"""Dequantize"""', 'a_dequantize_name', '[a_constant_name, a_constant_min_name, a_constant_max_name]'], {}), "('Dequantize', a_dequantize_name, [\n a_constant_name, a_constant_min_name, a_constant_max_name])\n", (38491, 38590), False, 'from tensorflow.tools.quantization import quantize_graph\n'), ((38607, 38674), 'tensorflow.tools.quantization.quantize_graph.set_attr_dtype', 'quantize_graph.set_attr_dtype', (['a_dequantize_node', '"""T"""', 'dtypes.uint8'], {}), "(a_dequantize_node, 'T', dtypes.uint8)\n", (38636, 38674), False, 'from tensorflow.tools.quantization import quantize_graph\n'), ((38744, 38879), 'tensorflow.tools.quantization.quantize_graph.create_node', 'quantize_graph.create_node', (['"""QuantizeV2"""', 'a_quantize_name', "[a_dequantize_name, a_dequantize_name + ':1', a_dequantize_name + ':2']"], {}), "('QuantizeV2', a_quantize_name, [\n a_dequantize_name, a_dequantize_name + ':1', a_dequantize_name + ':2'])\n", (38770, 38879), False, 'from tensorflow.tools.quantization import quantize_graph\n'), ((38896, 38961), 'tensorflow.tools.quantization.quantize_graph.set_attr_dtype', 'quantize_graph.set_attr_dtype', (['a_quantize_node', '"""T"""', 'dtypes.uint8'], {}), "(a_quantize_node, 'T', dtypes.uint8)\n", (38925, 38961), False, 'from tensorflow.tools.quantization import quantize_graph\n'), ((39024, 39124), 'tensorflow.tools.quantization.quantize_graph.create_constant_node', 'quantize_graph.create_constant_node', (['b_constant_name'], {'value': '(0,)', 'dtype': 'dtypes.quint8', 'shape': '[]'}), '(b_constant_name, value=(0,), dtype=\n dtypes.quint8, shape=[])\n', (39059, 39124), False, 'from tensorflow.tools.quantization import quantize_graph\n'), ((39190, 39292), 'tensorflow.tools.quantization.quantize_graph.create_constant_node', 'quantize_graph.create_constant_node', (['b_constant_min_name'], {'value': '(3)', 'dtype': 'dtypes.float32', 'shape': '[]'}), '(b_constant_min_name, value=3, dtype=\n dtypes.float32, shape=[])\n', (39225, 39292), False, 'from tensorflow.tools.quantization import quantize_graph\n'), ((39362, 39464), 'tensorflow.tools.quantization.quantize_graph.create_constant_node', 'quantize_graph.create_constant_node', (['b_constant_max_name'], {'value': '(3)', 'dtype': 'dtypes.float32', 'shape': '[]'}), '(b_constant_max_name, value=3, dtype=\n dtypes.float32, shape=[])\n', (39397, 39464), False, 'from tensorflow.tools.quantization import quantize_graph\n'), ((39537, 39662), 'tensorflow.tools.quantization.quantize_graph.create_node', 'quantize_graph.create_node', (['"""Dequantize"""', 'b_dequantize_name', '[b_constant_name, b_constant_min_name, b_constant_max_name]'], {}), "('Dequantize', b_dequantize_name, [\n b_constant_name, b_constant_min_name, b_constant_max_name])\n", (39563, 39662), False, 'from tensorflow.tools.quantization import quantize_graph\n'), ((39679, 39746), 'tensorflow.tools.quantization.quantize_graph.set_attr_dtype', 'quantize_graph.set_attr_dtype', (['b_dequantize_node', '"""T"""', 'dtypes.uint8'], {}), "(b_dequantize_node, 'T', dtypes.uint8)\n", (39708, 39746), False, 'from tensorflow.tools.quantization import quantize_graph\n'), ((39816, 39951), 'tensorflow.tools.quantization.quantize_graph.create_node', 'quantize_graph.create_node', (['"""QuantizeV2"""', 'b_quantize_name', "[b_dequantize_name, b_dequantize_name + ':1', b_dequantize_name + ':2']"], {}), "('QuantizeV2', b_quantize_name, [\n b_dequantize_name, b_dequantize_name + ':1', b_dequantize_name + ':2'])\n", (39842, 39951), False, 'from tensorflow.tools.quantization import quantize_graph\n'), ((39968, 40033), 'tensorflow.tools.quantization.quantize_graph.set_attr_dtype', 'quantize_graph.set_attr_dtype', (['b_quantize_node', '"""T"""', 'dtypes.uint8'], {}), "(b_quantize_node, 'T', dtypes.uint8)\n", (39997, 40033), False, 'from tensorflow.tools.quantization import quantize_graph\n'), ((40098, 40299), 'tensorflow.tools.quantization.quantize_graph.create_node', 'quantize_graph.create_node', (['"""QuantizedMatMul"""', 'mat_mul_name', "[a_quantize_name, b_quantize_name, a_quantize_name + ':1', a_quantize_name +\n ':2', b_quantize_name + ':1', b_quantize_name + ':2']"], {}), "('QuantizedMatMul', mat_mul_name, [\n a_quantize_name, b_quantize_name, a_quantize_name + ':1', \n a_quantize_name + ':2', b_quantize_name + ':1', b_quantize_name + ':2'])\n", (40124, 40299), False, 'from tensorflow.tools.quantization import quantize_graph\n'), ((40316, 40379), 'tensorflow.tools.quantization.quantize_graph.set_attr_dtype', 'quantize_graph.set_attr_dtype', (['mat_mul_node', '"""T1"""', 'dtypes.uint8'], {}), "(mat_mul_node, 'T1', dtypes.uint8)\n", (40345, 40379), False, 'from tensorflow.tools.quantization import quantize_graph\n'), ((40384, 40447), 'tensorflow.tools.quantization.quantize_graph.set_attr_dtype', 'quantize_graph.set_attr_dtype', (['mat_mul_node', '"""T2"""', 'dtypes.int32'], {}), "(mat_mul_node, 'T2', dtypes.int32)\n", (40413, 40447), False, 'from tensorflow.tools.quantization import quantize_graph\n'), ((40513, 40533), 'tensorflow.core.framework.graph_pb2.GraphDef', 'graph_pb2.GraphDef', ([], {}), '()\n', (40531, 40533), False, 'from tensorflow.core.framework import graph_pb2\n'), ((40551, 40651), 'tensorflow.tools.quantization.quantize_graph.create_constant_node', 'quantize_graph.create_constant_node', (['a_constant_name'], {'value': '(0,)', 'dtype': 'dtypes.quint8', 'shape': '[]'}), '(a_constant_name, value=(0,), dtype=\n dtypes.quint8, shape=[])\n', (40586, 40651), False, 'from tensorflow.tools.quantization import quantize_graph\n'), ((40723, 40825), 'tensorflow.tools.quantization.quantize_graph.create_constant_node', 'quantize_graph.create_constant_node', (['a_constant_min_name'], {'value': '(2)', 'dtype': 'dtypes.float32', 'shape': '[]'}), '(a_constant_min_name, value=2, dtype=\n dtypes.float32, shape=[])\n', (40758, 40825), False, 'from tensorflow.tools.quantization import quantize_graph\n'), ((40901, 41003), 'tensorflow.tools.quantization.quantize_graph.create_constant_node', 'quantize_graph.create_constant_node', (['a_constant_max_name'], {'value': '(2)', 'dtype': 'dtypes.float32', 'shape': '[]'}), '(a_constant_max_name, value=2, dtype=\n dtypes.float32, shape=[])\n', (40936, 41003), False, 'from tensorflow.tools.quantization import quantize_graph\n'), ((41075, 41175), 'tensorflow.tools.quantization.quantize_graph.create_constant_node', 'quantize_graph.create_constant_node', (['b_constant_name'], {'value': '(0,)', 'dtype': 'dtypes.quint8', 'shape': '[]'}), '(b_constant_name, value=(0,), dtype=\n dtypes.quint8, shape=[])\n', (41110, 41175), False, 'from tensorflow.tools.quantization import quantize_graph\n'), ((41247, 41349), 'tensorflow.tools.quantization.quantize_graph.create_constant_node', 'quantize_graph.create_constant_node', (['b_constant_min_name'], {'value': '(3)', 'dtype': 'dtypes.float32', 'shape': '[]'}), '(b_constant_min_name, value=3, dtype=\n dtypes.float32, shape=[])\n', (41282, 41349), False, 'from tensorflow.tools.quantization import quantize_graph\n'), ((41425, 41527), 'tensorflow.tools.quantization.quantize_graph.create_constant_node', 'quantize_graph.create_constant_node', (['b_constant_max_name'], {'value': '(3)', 'dtype': 'dtypes.float32', 'shape': '[]'}), '(b_constant_max_name, value=3, dtype=\n dtypes.float32, shape=[])\n', (41460, 41527), False, 'from tensorflow.tools.quantization import quantize_graph\n'), ((41601, 41789), 'tensorflow.tools.quantization.quantize_graph.create_node', 'quantize_graph.create_node', (['"""QuantizedMatMul"""', 'mat_mul_name', '[a_constant_name, b_constant_name, a_constant_min_name, a_constant_max_name,\n b_constant_min_name, b_constant_max_name]'], {}), "('QuantizedMatMul', mat_mul_name, [\n a_constant_name, b_constant_name, a_constant_min_name,\n a_constant_max_name, b_constant_min_name, b_constant_max_name])\n", (41627, 41789), False, 'from tensorflow.tools.quantization import quantize_graph\n'), ((41807, 41870), 'tensorflow.tools.quantization.quantize_graph.set_attr_dtype', 'quantize_graph.set_attr_dtype', (['mat_mul_node', '"""T1"""', 'dtypes.uint8'], {}), "(mat_mul_node, 'T1', dtypes.uint8)\n", (41836, 41870), False, 'from tensorflow.tools.quantization import quantize_graph\n'), ((41875, 41938), 'tensorflow.tools.quantization.quantize_graph.set_attr_dtype', 'quantize_graph.set_attr_dtype', (['mat_mul_node', '"""T2"""', 'dtypes.int32'], {}), "(mat_mul_node, 'T2', dtypes.int32)\n", (41904, 41938), False, 'from tensorflow.tools.quantization import quantize_graph\n'), ((42117, 42204), 'tensorflow.tools.quantization.quantize_graph.GraphRewriter', 'quantize_graph.GraphRewriter', (['graph_def', '[mat_mul_name]'], {'quantized_input_range': 'None'}), '(graph_def, [mat_mul_name],\n quantized_input_range=None)\n', (42145, 42204), False, 'from tensorflow.tools.quantization import quantize_graph\n'), ((42295, 42347), 'tensorflow.python.framework.graph_util.extract_sub_graph', 'graph_util.extract_sub_graph', (['output', '[mat_mul_name]'], {}), '(output, [mat_mul_name])\n', (42323, 42347), False, 'from tensorflow.python.framework import graph_util\n'), ((9226, 9286), 'tensorflow.tools.quantization.quantize_graph.create_node', 'quantize_graph.create_node', (['"""MatMul"""', 'name', '[a.name, b.name]'], {}), "('MatMul', name, [a.name, b.name])\n", (9252, 9286), False, 'from tensorflow.tools.quantization import quantize_graph\n'), ((9293, 9346), 'tensorflow.tools.quantization.quantize_graph.set_attr_dtype', 'quantize_graph.set_attr_dtype', (['n', '"""T"""', 'dtypes.float32'], {}), "(n, 'T', dtypes.float32)\n", (9322, 9346), False, 'from tensorflow.tools.quantization import quantize_graph\n'), ((9353, 9406), 'tensorflow.tools.quantization.quantize_graph.set_attr_bool', 'quantize_graph.set_attr_bool', (['n', '"""transpose_a"""', '(False)'], {}), "(n, 'transpose_a', False)\n", (9381, 9406), False, 'from tensorflow.tools.quantization import quantize_graph\n'), ((9413, 9466), 'tensorflow.tools.quantization.quantize_graph.set_attr_bool', 'quantize_graph.set_attr_bool', (['n', '"""transpose_b"""', '(False)'], {}), "(n, 'transpose_b', False)\n", (9441, 9466), False, 'from tensorflow.tools.quantization import quantize_graph\n'), ((11436, 11448), 'numpy.array', 'np.array', (['[]'], {}), '([])\n', (11444, 11448), True, 'import numpy as np\n'), ((11562, 11578), 'numpy.array', 'np.array', (['[1, 2]'], {}), '([1, 2])\n', (11570, 11578), True, 'import numpy as np\n'), ((17072, 17122), 'tensorflow.tools.quantization.quantize_graph.node_name_from_input', 'quantize_graph.node_name_from_input', (['"""^SomeName:2"""'], {}), "('^SomeName:2')\n", (17107, 17122), False, 'from tensorflow.tools.quantization import quantize_graph\n'), ((17241, 17298), 'tensorflow.tools.quantization.quantize_graph.unique_node_name_from_input', 'quantize_graph.unique_node_name_from_input', (['"""^SomeName:2"""'], {}), "('^SomeName:2')\n", (17283, 17298), False, 'from tensorflow.tools.quantization import quantize_graph\n'), ((29972, 30036), 'numpy.reshape', 'np.reshape', (['[1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12]', 'input_shape'], {}), '([1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12], input_shape)\n', (29982, 30036), True, 'import numpy as np\n'), ((30469, 30530), 'tensorflow.tools.quantization.quantize_graph.create_node', 'quantize_graph.create_node', (['"""Placeholder"""', "('input_%s' % i)", '[]'], {}), "('Placeholder', 'input_%s' % i, [])\n", (30495, 30530), False, 'from tensorflow.tools.quantization import quantize_graph\n'), ((30537, 30597), 'tensorflow.tools.quantization.quantize_graph.set_attr_dtype', 'quantize_graph.set_attr_dtype', (['node', '"""dtype"""', 'dtypes.float32'], {}), "(node, 'dtype', dtypes.float32)\n", (30566, 30597), False, 'from tensorflow.tools.quantization import quantize_graph\n'), ((30604, 30655), 'tensorflow.tools.quantization.quantize_graph.set_attr_shape', 'quantize_graph.set_attr_shape', (['node', '"""shape"""', 'shape'], {}), "(node, 'shape', shape)\n", (30633, 30655), False, 'from tensorflow.tools.quantization import quantize_graph\n'), ((31053, 31094), 'numpy.reshape', 'np.reshape', (['[1, 2, 3, 4, 5, 6]', 'shapes[0]'], {}), '([1, 2, 3, 4, 5, 6], shapes[0])\n', (31063, 31094), True, 'import numpy as np\n'), ((31139, 31202), 'numpy.reshape', 'np.reshape', (['[0.8, 0.7, 0.6, 0.5, 0.4, 0.3, 0.2, 0.1]', 'shapes[1]'], {}), '([0.8, 0.7, 0.6, 0.5, 0.4, 0.3, 0.2, 0.1], shapes[1])\n', (31149, 31202), True, 'import numpy as np\n'), ((31971, 31994), 'numpy.array', 'np.array', (['arr', 'np.uint8'], {}), '(arr, np.uint8)\n', (31979, 31994), True, 'import numpy as np\n'), ((28957, 28977), 'tensorflow.core.framework.graph_pb2.GraphDef', 'graph_pb2.GraphDef', ([], {}), '()\n', (28975, 28977), False, 'from tensorflow.core.framework import graph_pb2\n'), ((29139, 29159), 'tensorflow.core.framework.graph_pb2.GraphDef', 'graph_pb2.GraphDef', ([], {}), '()\n', (29157, 29159), False, 'from tensorflow.core.framework import graph_pb2\n'), ((11954, 11973), 'numpy.array', 'np.array', (['[1, 1, 1]'], {}), '([1, 1, 1])\n', (11962, 11973), True, 'import numpy as np\n'), ((12131, 12161), 'numpy.array', 'np.array', (['[0.5, 0.5, 0.5, 0.5]'], {}), '([0.5, 0.5, 0.5, 0.5])\n', (12139, 12161), True, 'import numpy as np\n'), ((12248, 12282), 'numpy.array', 'np.array', (['[0.25, 0.25, 0.75, 0.75]'], {}), '([0.25, 0.25, 0.75, 0.75])\n', (12256, 12282), True, 'import numpy as np\n'), ((12385, 12423), 'numpy.array', 'np.array', (['[[0.25, 0.25], [0.75, 0.75]]'], {}), '([[0.25, 0.25], [0.75, 0.75]])\n', (12393, 12423), True, 'import numpy as np\n')] |
# -*- coding: utf-8 -*-
# Generated by Django 1.11.10 on 2018-04-26 09:14
import colorfield.fields
from django.db import migrations, models
import django.db.models.deletion
import giscube.utils
class Migration(migrations.Migration):
initial = True
dependencies = [
('giscube', '0002_update'),
]
operations = [
migrations.CreateModel(
name='GeoJsonLayer',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=50, unique=True)),
('title', models.CharField(blank=True, max_length=100, null=True)),
('description', models.TextField(blank=True, null=True)),
('keywords', models.CharField(blank=True, max_length=200, null=True)),
('active', models.BooleanField(default=True)),
('visibility', models.CharField(choices=[('private', 'Private'), ('public', 'Public')], default='private', max_length=10)),
('visible_on_geoportal', models.BooleanField(default=False)),
('shapetype', models.CharField(blank=True, choices=[('marker', 'Marker'), ('line', 'Line'), ('polygon', 'Polygon'), ('Circle', 'Circle')], max_length=20, null=True)),
('shape_radius', models.IntegerField(blank=True, null=True)),
('stroke_color', colorfield.fields.ColorField(blank=True, default=b'#FF3333', max_length=18, null=True)),
('stroke_width', models.IntegerField(blank=True, default=1, null=True)),
('stroke_dash_array', models.CharField(blank=True, default='', max_length=25, null=True)),
('fill_color', colorfield.fields.ColorField(blank=True, default=b'#FFC300', max_length=18, null=True)),
('fill_opacity', models.DecimalField(blank=True, decimal_places=1, default=1, max_digits=2, null=True)),
('url', models.CharField(blank=True, max_length=100, null=True)),
('data_file', models.FileField(blank=True, null=True, upload_to=giscube.utils.unique_service_directory)),
('service_path', models.CharField(max_length=255)),
('cache_time', models.IntegerField(blank=True, null=True)),
('last_fetch_on', models.DateField(blank=True, null=True)),
('category', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, to='giscube.Category')),
],
options={
'verbose_name': 'GeoJSONLayer',
'verbose_name_plural': 'GeoJSONLayers',
},
),
]
| [
"django.db.models.DateField",
"django.db.models.TextField",
"django.db.models.IntegerField",
"django.db.models.ForeignKey",
"django.db.models.FileField",
"django.db.models.BooleanField",
"django.db.models.AutoField",
"django.db.models.DecimalField",
"django.db.models.CharField"
] | [((450, 543), 'django.db.models.AutoField', 'models.AutoField', ([], {'auto_created': '(True)', 'primary_key': '(True)', 'serialize': '(False)', 'verbose_name': '"""ID"""'}), "(auto_created=True, primary_key=True, serialize=False,\n verbose_name='ID')\n", (466, 543), False, 'from django.db import migrations, models\n'), ((567, 611), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(50)', 'unique': '(True)'}), '(max_length=50, unique=True)\n', (583, 611), False, 'from django.db import migrations, models\n'), ((640, 695), 'django.db.models.CharField', 'models.CharField', ([], {'blank': '(True)', 'max_length': '(100)', 'null': '(True)'}), '(blank=True, max_length=100, null=True)\n', (656, 695), False, 'from django.db import migrations, models\n'), ((730, 769), 'django.db.models.TextField', 'models.TextField', ([], {'blank': '(True)', 'null': '(True)'}), '(blank=True, null=True)\n', (746, 769), False, 'from django.db import migrations, models\n'), ((801, 856), 'django.db.models.CharField', 'models.CharField', ([], {'blank': '(True)', 'max_length': '(200)', 'null': '(True)'}), '(blank=True, max_length=200, null=True)\n', (817, 856), False, 'from django.db import migrations, models\n'), ((886, 919), 'django.db.models.BooleanField', 'models.BooleanField', ([], {'default': '(True)'}), '(default=True)\n', (905, 919), False, 'from django.db import migrations, models\n'), ((953, 1063), 'django.db.models.CharField', 'models.CharField', ([], {'choices': "[('private', 'Private'), ('public', 'Public')]", 'default': '"""private"""', 'max_length': '(10)'}), "(choices=[('private', 'Private'), ('public', 'Public')],\n default='private', max_length=10)\n", (969, 1063), False, 'from django.db import migrations, models\n'), ((1103, 1137), 'django.db.models.BooleanField', 'models.BooleanField', ([], {'default': '(False)'}), '(default=False)\n', (1122, 1137), False, 'from django.db import migrations, models\n'), ((1170, 1325), 'django.db.models.CharField', 'models.CharField', ([], {'blank': '(True)', 'choices': "[('marker', 'Marker'), ('line', 'Line'), ('polygon', 'Polygon'), ('Circle',\n 'Circle')]", 'max_length': '(20)', 'null': '(True)'}), "(blank=True, choices=[('marker', 'Marker'), ('line', 'Line'\n ), ('polygon', 'Polygon'), ('Circle', 'Circle')], max_length=20, null=True)\n", (1186, 1325), False, 'from django.db import migrations, models\n'), ((1356, 1398), 'django.db.models.IntegerField', 'models.IntegerField', ([], {'blank': '(True)', 'null': '(True)'}), '(blank=True, null=True)\n', (1375, 1398), False, 'from django.db import migrations, models\n'), ((1556, 1609), 'django.db.models.IntegerField', 'models.IntegerField', ([], {'blank': '(True)', 'default': '(1)', 'null': '(True)'}), '(blank=True, default=1, null=True)\n', (1575, 1609), False, 'from django.db import migrations, models\n'), ((1650, 1716), 'django.db.models.CharField', 'models.CharField', ([], {'blank': '(True)', 'default': '""""""', 'max_length': '(25)', 'null': '(True)'}), "(blank=True, default='', max_length=25, null=True)\n", (1666, 1716), False, 'from django.db import migrations, models\n'), ((1872, 1961), 'django.db.models.DecimalField', 'models.DecimalField', ([], {'blank': '(True)', 'decimal_places': '(1)', 'default': '(1)', 'max_digits': '(2)', 'null': '(True)'}), '(blank=True, decimal_places=1, default=1, max_digits=2,\n null=True)\n', (1891, 1961), False, 'from django.db import migrations, models\n'), ((1984, 2039), 'django.db.models.CharField', 'models.CharField', ([], {'blank': '(True)', 'max_length': '(100)', 'null': '(True)'}), '(blank=True, max_length=100, null=True)\n', (2000, 2039), False, 'from django.db import migrations, models\n'), ((2072, 2166), 'django.db.models.FileField', 'models.FileField', ([], {'blank': '(True)', 'null': '(True)', 'upload_to': 'giscube.utils.unique_service_directory'}), '(blank=True, null=True, upload_to=giscube.utils.\n unique_service_directory)\n', (2088, 2166), False, 'from django.db import migrations, models\n'), ((2197, 2229), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(255)'}), '(max_length=255)\n', (2213, 2229), False, 'from django.db import migrations, models\n'), ((2263, 2305), 'django.db.models.IntegerField', 'models.IntegerField', ([], {'blank': '(True)', 'null': '(True)'}), '(blank=True, null=True)\n', (2282, 2305), False, 'from django.db import migrations, models\n'), ((2342, 2381), 'django.db.models.DateField', 'models.DateField', ([], {'blank': '(True)', 'null': '(True)'}), '(blank=True, null=True)\n', (2358, 2381), False, 'from django.db import migrations, models\n'), ((2413, 2527), 'django.db.models.ForeignKey', 'models.ForeignKey', ([], {'blank': '(True)', 'null': '(True)', 'on_delete': 'django.db.models.deletion.SET_NULL', 'to': '"""giscube.Category"""'}), "(blank=True, null=True, on_delete=django.db.models.\n deletion.SET_NULL, to='giscube.Category')\n", (2430, 2527), False, 'from django.db import migrations, models\n')] |
# -*- coding: utf-8 -*-
# Generated by Django 1.10.6 on 2017-06-09 03:01
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('extensions', '0011_auto_20170502_0908'),
]
operations = [
migrations.AlterField(
model_name='extension',
name='imports_path',
field=models.CharField(default='imports/', max_length=255),
),
]
| [
"django.db.models.CharField"
] | [((413, 465), 'django.db.models.CharField', 'models.CharField', ([], {'default': '"""imports/"""', 'max_length': '(255)'}), "(default='imports/', max_length=255)\n", (429, 465), False, 'from django.db import migrations, models\n')] |
'''
multi-threading (python3 version)
https://docs.python.org/3/library/threading.html
'''
from time import clock
import threading
THREADS=2
lock = threading.Lock()
A = 0
B = 0
C = 0
def test_globals():
global A, B, C
for i in range(1024*1024):
lock.acquire()
A += 1
B += 2
C = A + B
lock.release()
def main():
print( 'starting threading test')
starttime = clock()
threads = []
for i in range(THREADS):
t = threading.Thread( target=test_globals, args=() )
t.start()
threads.append( t )
for t in threads:
t.join()
print( clock()-starttime)
print('A:', A)
print('B:', B)
print('C:', C)
main() | [
"threading.Lock",
"threading.Thread",
"time.clock"
] | [((150, 166), 'threading.Lock', 'threading.Lock', ([], {}), '()\n', (164, 166), False, 'import threading\n'), ((377, 384), 'time.clock', 'clock', ([], {}), '()\n', (382, 384), False, 'from time import clock\n'), ((431, 477), 'threading.Thread', 'threading.Thread', ([], {'target': 'test_globals', 'args': '()'}), '(target=test_globals, args=())\n', (447, 477), False, 'import threading\n'), ((554, 561), 'time.clock', 'clock', ([], {}), '()\n', (559, 561), False, 'from time import clock\n')] |
import numpy as np
class Board:
"""
0 - black
1 - white
"""
def __init__(self):
board = [
[0, 1] * 4,
[1, 0] * 4
] * 4
players_board = [
[0, 1] * 4, # player 1
[1, 0] * 4
] + [[0] * 8] * 4 + [ # 4 rows of nothing
[0, 2] * 4, # player 2
[2, 0] * 4
]
self.board = np.array(board)
self.players_board = np.array(players_board)
self.x_size = 8
self.y_size = 8
# def move(self, x, y, current_player):
# self.board[x, y] = current_player
# def are_same_and_non_zero(self, array):
# return np.unique(array).size == 1 and array[0] != 0
# def is_board_full(self):
# return not np.any(np.unique(self.board) == 0)
def is_finished(self):
"""is game finished"""
return True
# for i in range(0, self.x_size): # rows
# if self.are_same_and_non_zero(self.board[i, :]):
# self.player_who_won = self.board[i, 0]
# self.result = 'Won {} - row {}'.format(self.player(self.player_who_won), i)
# return True
# for i in range(0, self.y_size): # columns
# if self.are_same_and_non_zero(self.board[:, i]):
# self.player_who_won = self.board[0, i]
# self.result = 'Won {} - col {}'.format(self.player(self.player_who_won), i)
# return True
# if self.are_same_and_non_zero(np.diag(self.board)): # diagonal
# self.player_who_won = self.board[1, 1]
# self.result = 'Won {} - diagonal {}'.format(self.player(self.player_who_won), i)
# return True
# if self.are_same_and_non_zero(np.diag(np.flipud(self.board))): # anty-diagonal
# self.player_who_won = self.board[1, 1]
# self.result = 'Won {} - anty-diagonal {}'.format(self.player(self.player_who_won), i)
# return True
# if self.is_board_full():
# self.player_who_won = 0 # nobody
# self.result = 'Draw'
# return True # draw
return False
def show(self):
# print(self.board)
# print(self.players_board)
return
# def player(self, player_no):
# if player_no == 1: return 'Player 1 (X)'
# if player_no == 2: return 'Player 2 (O)'
# def show_player_info(self, player_no):
# print("It's turn of ", self.player(player_no))
| [
"numpy.array"
] | [((331, 346), 'numpy.array', 'np.array', (['board'], {}), '(board)\n', (339, 346), True, 'import numpy as np\n'), ((372, 395), 'numpy.array', 'np.array', (['players_board'], {}), '(players_board)\n', (380, 395), True, 'import numpy as np\n')] |
#
# This file is part of LiteX-Boards.
#
# Copyright (c) 2017-2019 <NAME> <<EMAIL>>
# SPDX-License-Identifier: BSD-2-Clause
from litex.build.generic_platform import *
from litex.build.xilinx import XilinxPlatform, VivadoProgrammer
# IOs ----------------------------------------------------------------------------------------------
_io = [
# Clk / Rst
("clk125", 0,
Subsignal("p", Pins("G10"), IOStandard("LVDS")),
Subsignal("n", Pins("F10"), IOStandard("LVDS"))
),
("clk300", 0,
Subsignal("p", Pins("AK17"), IOStandard("DIFF_SSTL12")),
Subsignal("n", Pins("AK16"), IOStandard("DIFF_SSTL12"))
),
("cpu_reset", 0, Pins("AN8"), IOStandard("LVCMOS18")),
# Leds
("user_led", 0, Pins("AP8"), IOStandard("LVCMOS18")),
("user_led", 1, Pins("H23"), IOStandard("LVCMOS18")),
("user_led", 2, Pins("P20"), IOStandard("LVCMOS18")),
("user_led", 3, Pins("P21"), IOStandard("LVCMOS18")),
("user_led", 4, Pins("N22"), IOStandard("LVCMOS18")),
("user_led", 5, Pins("M22"), IOStandard("LVCMOS18")),
("user_led", 6, Pins("R23"), IOStandard("LVCMOS18")),
("user_led", 7, Pins("P23"), IOStandard("LVCMOS18")),
# Buttons
("user_btn_c", 0, Pins("AE10"), IOStandard("LVCMOS18")),
("user_btn_n", 0, Pins("AD10"), IOStandard("LVCMOS18")),
("user_btn_s", 0, Pins("AF8"), IOStandard("LVCMOS18")),
("user_btn_w", 0, Pins("AF9"), IOStandard("LVCMOS18")),
("user_btn_e", 0, Pins("AE8"), IOStandard("LVCMOS18")),
# Switches
("user_dip_btn", 0, Pins("AN16"), IOStandard("LVCMOS12")),
("user_dip_btn", 1, Pins("AN19"), IOStandard("LVCMOS12")),
("user_dip_btn", 2, Pins("AP18"), IOStandard("LVCMOS12")),
("user_dip_btn", 3, Pins("AN14"), IOStandard("LVCMOS12")),
# SMA
("user_sma_clock", 0,
Subsignal("p", Pins("D23"), IOStandard("LVDS")),
Subsignal("n", Pins("C23"), IOStandard("LVDS"))
),
("user_sma_clock_p", 0, Pins("D23"), IOStandard("LVCMOS18")),
("user_sma_clock_n", 0, Pins("C23"), IOStandard("LVCMOS18")),
("user_sma_gpio", 0,
Subsignal("p", Pins("H27"), IOStandard("LVDS")),
Subsignal("n", Pins("G27"), IOStandard("LVDS"))
),
("user_sma_gpio_p", 0, Pins("H27"), IOStandard("LVCMOS18")),
("user_sma_gpio_n", 0, Pins("G27"), IOStandard("LVCMOS18")),
# I2C
("i2c", 0,
Subsignal("scl", Pins("J24")),
Subsignal("sda", Pins("J25")),
IOStandard("LVCMOS18")
),
# Serial
("serial", 0,
Subsignal("cts", Pins("L23")),
Subsignal("rts", Pins("K27")),
Subsignal("tx", Pins("K26")),
Subsignal("rx", Pins("G25")),
IOStandard("LVCMOS18")
),
# SPIFlash
("spiflash", 0, # clock needs to be accessed through primitive
Subsignal("cs_n", Pins("U7")),
Subsignal("dq", Pins("AC7 AB7 AA7 Y7")),
IOStandard("LVCMOS18")
),
("spiflash", 1, # clock needs to be accessed through primitive
Subsignal("cs_n", Pins("G26")),
Subsignal("dq", Pins("M20 L20 R21 R22")),
IOStandard("LVCMOS18")
),
# SDCard
("spisdcard", 0,
Subsignal("clk", Pins("AL10")),
Subsignal("cs_n", Pins("AH8")),
Subsignal("mosi", Pins("AD9"), Misc("PULLUP")),
Subsignal("miso", Pins("AP9"), Misc("PULLUP")),
Misc("SLEW=FAST"),
IOStandard("LVCMOS18")
),
("sdcard", 0,
Subsignal("clk", Pins("AL10")),
Subsignal("cmd", Pins("AD9"), Misc("PULLUP True")),
Subsignal("data", Pins("AP9 AN9 AH9 AH8"), Misc("PULLUP True")),
Misc("SLEW=FAST"),
IOStandard("LVCMOS18")
),
# Rotary Encoder
("rotary", 0,
Subsignal("a", Pins("Y21")),
Subsignal("b", Pins("AD26")),
Subsignal("push", Pins("AF28")),
IOStandard("LVCMOS18")
),
# HDMI
("hdmi", 0,
Subsignal("d", Pins(
"AK11 AP11 AP13 AN13 AN11 AM11 AN12 AM12",
"AL12 AK12 AL13 AK13 AD11 AH12 AG12 AJ11",
"AG10 AK8")),
Subsignal("de", Pins("AE11")),
Subsignal("clk", Pins("AF13")),
Subsignal("vsync", Pins("AH13")),
Subsignal("hsync", Pins("AE13")),
Subsignal("spdif", Pins("AE12")),
Subsignal("spdif_out", Pins("AF12")),
IOStandard("LVCMOS18")
),
# DDR4 SDRAM
("ddram", 0,
Subsignal("a", Pins(
"AE17 AH17 AE18 AJ15 AG16 AL17 AK18 AG17",
"AF18 AH19 AF15 AD19 AJ14 AG19"),
IOStandard("SSTL12_DCI")),
Subsignal("ba", Pins("AF17 AL15"), IOStandard("SSTL12_DCI")),
Subsignal("bg", Pins("AG15"), IOStandard("SSTL12_DCI")),
Subsignal("ras_n", Pins("AF14"), IOStandard("SSTL12_DCI")), # A16
Subsignal("cas_n", Pins("AG14"), IOStandard("SSTL12_DCI")), # A15
Subsignal("we_n", Pins("AD16"), IOStandard("SSTL12_DCI")), # A14
Subsignal("cs_n", Pins("AL19"), IOStandard("SSTL12_DCI")),
Subsignal("act_n", Pins("AH14"), IOStandard("SSTL12_DCI")),
#Subsignal("ten", Pins("AH16"), IOStandard("SSTL12_DCI")),
#Subsignal("alert_n", Pins("AJ16"), IOStandard("SSTL12_DCI")),
#Subsignal("par", Pins("AD18"), IOStandard("SSTL12_DCI")),
Subsignal("dm", Pins("AD21 AE25 AJ21 AM21 AH26 AN26 AJ29 AL32"),
IOStandard("POD12_DCI")),
Subsignal("dq", Pins(
"AE23 AG20 AF22 AF20 AE22 AD20 AG22 AE20",
"AJ24 AG24 AJ23 AF23 AH23 AF24 AH22 AG25",
"AL22 AL25 AM20 AK23 AK22 AL24 AL20 AL23",
"AM24 AN23 AN24 AP23 AP25 AN22 AP24 AM22",
"AH28 AK26 AK28 AM27 AJ28 AH27 AK27 AM26",
"AL30 AP29 AM30 AN28 AL29 AP28 AM29 AN27",
"AH31 AH32 AJ34 AK31 AJ31 AJ30 AH34 AK32",
"AN33 AP33 AM34 AP31 AM32 AN31 AL34 AN32"),
IOStandard("POD12_DCI"),
Misc("PRE_EMPHASIS=RDRV_240"),
Misc("EQUALIZATION=EQ_LEVEL2")),
Subsignal("dqs_p", Pins("AG21 AH24 AJ20 AP20 AL27 AN29 AH33 AN34"),
IOStandard("DIFF_POD12_DCI"),
Misc("PRE_EMPHASIS=RDRV_240"),
Misc("EQUALIZATION=EQ_LEVEL2")),
Subsignal("dqs_n", Pins("AH21 AJ25 AK20 AP21 AL28 AP30 AJ33 AP34"),
IOStandard("DIFF_POD12_DCI"),
Misc("PRE_EMPHASIS=RDRV_240"),
Misc("EQUALIZATION=EQ_LEVEL2")),
Subsignal("clk_p", Pins("AE16"), IOStandard("DIFF_SSTL12_DCI")),
Subsignal("clk_n", Pins("AE15"), IOStandard("DIFF_SSTL12_DCI")),
Subsignal("cke", Pins("AD15"), IOStandard("SSTL12_DCI")),
Subsignal("odt", Pins("AJ18"), IOStandard("SSTL12_DCI")),
Subsignal("reset_n", Pins("AL18"), IOStandard("LVCMOS12")),
Misc("SLEW=FAST"),
),
# PCIe
("pcie_x1", 0,
Subsignal("rst_n", Pins("K22"), IOStandard("LVCMOS18")),
Subsignal("clk_p", Pins("AB6")),
Subsignal("clk_n", Pins("AB5")),
Subsignal("rx_p", Pins("AB2")),
Subsignal("rx_n", Pins("AB1")),
Subsignal("tx_p", Pins("AC4")),
Subsignal("tx_n", Pins("AC3"))
),
("pcie_x2", 0,
Subsignal("rst_n", Pins("K22"), IOStandard("LVCMOS18")),
Subsignal("clk_p", Pins("AB6")),
Subsignal("clk_n", Pins("AB5")),
Subsignal("rx_p", Pins("AB2 AD2")),
Subsignal("rx_n", Pins("AB1 AD1")),
Subsignal("tx_p", Pins("AC4 AE4")),
Subsignal("tx_n", Pins("AC3 AE3"))
),
("pcie_x4", 0,
Subsignal("rst_n", Pins("K22"), IOStandard("LVCMOS18")),
Subsignal("clk_p", Pins("AB6")),
Subsignal("clk_n", Pins("AB5")),
Subsignal("rx_p", Pins("AB2 AD2 AF2 AH2")),
Subsignal("rx_n", Pins("AB1 AD1 AF1 AH1")),
Subsignal("tx_p", Pins("AC4 AE4 AG4 AH6")),
Subsignal("tx_n", Pins("AC3 AE3 AG3 AH5"))
),
("pcie_x8", 0,
Subsignal("rst_n", Pins("K22"), IOStandard("LVCMOS18")),
Subsignal("clk_p", Pins("AB6")),
Subsignal("clk_n", Pins("AB5")),
Subsignal("rx_p", Pins("AB2 AD2 AF2 AH2 AJ4 AK2 AM2 AP2")),
Subsignal("rx_n", Pins("AB1 AD1 AF1 AH1 AJ3 AK1 AM1 AP1")),
Subsignal("tx_p", Pins("AC4 AE4 AG4 AH6 AK6 AL4 AM6 AN4")),
Subsignal("tx_n", Pins("AC3 AE3 AG3 AH5 AK5 AL3 AM5 AN3"))
),
# SGMII Clk
("sgmii_clock", 0,
Subsignal("p", Pins("P26"), IOStandard("LVDS_25")),
Subsignal("n", Pins("N26"), IOStandard("LVDS_25"))
),
# SI570
("si570_refclk", 0,
Subsignal("p", Pins("P6")),
Subsignal("n", Pins("P5"))
),
# SMA
("user_sma_mgt_refclk", 0,
Subsignal("p", Pins("V6")),
Subsignal("n", Pins("V5"))
),
("user_sma_mgt_tx", 0,
Subsignal("p", Pins("R4")),
Subsignal("n", Pins("R3"))
),
("user_sma_mgt_rx", 0,
Subsignal("p", Pins("P2")),
Subsignal("n", Pins("P1"))
),
# SFP
("sfp", 0,
Subsignal("txp", Pins("U4")),
Subsignal("txn", Pins("U3")),
Subsignal("rxp", Pins("T2")),
Subsignal("rxn", Pins("T1"))
),
("sfp_tx", 0,
Subsignal("p", Pins("U4")),
Subsignal("n", Pins("U3")),
),
("sfp_rx", 0,
Subsignal("p", Pins("T2")),
Subsignal("n", Pins("T1")),
),
("sfp_tx_disable_n", 0, Pins("AL8"), IOStandard("LVCMOS18")),
("sfp", 1,
Subsignal("txp", Pins("W4")),
Subsignal("txn", Pins("W3")),
Subsignal("rxp", Pins("V2")),
Subsignal("rxn", Pins("V1"))
),
("sfp_tx", 1,
Subsignal("p", Pins("W4")),
Subsignal("n", Pins("W3")),
),
("sfp_rx", 1,
Subsignal("p", Pins("V2")),
Subsignal("n", Pins("V1")),
),
("sfp_tx_disable_n", 1, Pins("D28"), IOStandard("LVCMOS18")),
]
# Connectors ---------------------------------------------------------------------------------------
_connectors = [
("HPC", {
"DP0_C2M_P" : "F6",
"DP0_C2M_N" : "F5",
"DP0_M2C_P" : "E4",
"DP0_M2C_N" : "E3",
"DP1_C2M_P" : "D6",
"DP1_C2M_N" : "D5",
"DP1_M2C_P" : "D2",
"DP1_M2C_N" : "D1",
"DP2_C2M_P" : "C4",
"DP2_C2M_N" : "C3",
"DP2_M2C_P" : "B2",
"DP2_M2C_N" : "B1",
"DP3_C2M_P" : "B6",
"DP3_C2M_N" : "B5",
"DP3_M2C_P" : "A4",
"DP3_M2C_N" : "A3",
"DP4_C2M_P" : "N4",
"DP4_C2M_N" : "N3",
"DP4_M2C_P" : "M2",
"DP4_M2C_N" : "M1",
"DP5_C2M_P" : "J4",
"DP5_C2M_N" : "J3",
"DP5_M2C_P" : "H2",
"DP5_M2C_N" : "H1",
"DP6_C2M_P" : "L4",
"DP6_C2M_N" : "L3",
"DP6_M2C_P" : "K2",
"DP6_M2C_N" : "K1",
"DP7_C2M_P" : "G4",
"DP7_C2M_N" : "G3",
"DP7_M2C_P" : "F2",
"DP7_M2C_N" : "F1",
"LA06_P" : "D13",
"LA06_N" : "C13",
"LA10_P" : "L8",
"LA10_N" : "K8",
"LA14_P" : "B10",
"LA14_N" : "A10",
"LA18_CC_P" : "E22",
"LA18_CC_N" : "E23",
"LA27_P" : "H21",
"LA27_N" : "G21",
"HA01_CC_P" : "E16",
"HA01_CC_N" : "D16",
"HA05_P" : "J15",
"HA05_N" : "J14",
"HA09_P" : "F18",
"HA09_N" : "F17",
"HA13_P" : "B14",
"HA13_N" : "A14",
"HA16_P" : "A19",
"HA16_N" : "A18",
"HA20_P" : "C19",
"HA20_N" : "B19",
"CLK1_M2C_P" : "E25",
"CLK1_M2C_N" : "D25",
"LA00_CC_P" : "H11",
"LA00_CC_N" : "G11",
"LA03_P" : "A13",
"LA03_N" : "A12",
"LA08_P" : "J8",
"LA08_N" : "H8",
"LA12_P" : "E10",
"LA12_N" : "D10",
"LA16_P" : "B9",
"LA16_N" : "A9",
"LA20_P" : "B24",
"LA20_N" : "A24",
"LA22_P" : "G24",
"LA22_N" : "F25",
"LA25_P" : "D20",
"LA25_N" : "D21",
"LA29_P" : "B20",
"LA29_N" : "A20",
"LA31_P" : "B25",
"LA31_N" : "A25",
"LA33_P" : "A27",
"LA33_N" : "A28",
"HA03_P" : "G15",
"HA03_N" : "G14",
"HA07_P" : "L19",
"HA07_N" : "L18",
"HA11_P" : "J19",
"HA11_N" : "J18",
"HA14_P" : "F15",
"HA14_N" : "F14",
"HA18_P" : "B17",
"HA18_N" : "B16",
"HA22_P" : "C18",
"HA22_N" : "C17",
"GBTCLK1_M2C_P" : "H6",
"GBTCLK1_M2C_N" : "H5",
"GBTCLK0_M2C_P" : "K6",
"GBTCLK0_M2C_N" : "K5",
"LA01_CC_P" : "G9",
"LA01_CC_N" : "F9",
"LA05_P" : "L13",
"LA05_N" : "K13",
"LA09_P" : "J9",
"LA09_N" : "H9",
"LA13_P" : "D9",
"LA13_N" : "C9",
"LA17_CC_P" : "D24",
"LA17_CC_N" : "C24",
"LA23_P" : "G22",
"LA23_N" : "F22",
"LA26_P" : "G20",
"LA26_N" : "F20",
"PG_M2C" : "L27",
"HA00_CC_P" : "G17",
"HA00_CC_N" : "G16",
"HA04_P" : "G19",
"HA04_N" : "F19",
"HA08_P" : "K18",
"HA08_N" : "K17",
"HA12_P" : "K16",
"HA12_N" : "J16",
"HA15_P" : "D14",
"HA15_N" : "C14",
"HA19_P" : "D19",
"HA19_N" : "D18",
"PRSNT_M2C_B" : "H24",
"CLK0_M2C_P" : "H12",
"CLK0_M2C_N" : "G12",
"LA02_P" : "K10",
"LA02_N" : "J10",
"LA04_P" : "L12",
"LA04_N" : "K12",
"LA07_P" : "F8",
"LA07_N" : "E8",
"LA11_P" : "K11",
"LA11_N" : "J11",
"LA15_P" : "D8",
"LA15_N" : "C8",
"LA19_P" : "C21",
"LA19_N" : "C22",
"LA21_P" : "F23",
"LA21_N" : "F24",
"LA24_P" : "E20",
"LA24_N" : "E21",
"LA28_P" : "B21",
"LA28_N" : "B22",
"LA30_P" : "C26",
"LA30_N" : "B26",
"LA32_P" : "E26",
"LA32_N" : "D26",
"HA02_P" : "H19",
"HA02_N" : "H18",
"HA06_P" : "L15",
"HA06_N" : "K15",
"HA10_P" : "H17",
"HA10_N" : "H16",
"HA17_CC_P" : "E18",
"HA17_CC_N" : "E17",
"HA21_P" : "E15",
"HA21_N" : "D15",
"HA23_P" : "B15",
"HA23_N" : "A15",
}
),
("LPC", {
"GBTCLK0_M2C_P" : "AA24",
"GBTCLK0_M2C_N" : "AA25",
"LA01_CC_P" : "W25",
"LA01_CC_N" : "Y25",
"LA05_P" : "V27",
"LA05_N" : "V28",
"LA09_P" : "V26",
"LA09_N" : "W26",
"LA13_P" : "AA20",
"LA13_N" : "AB20",
"LA17_CC_P" : "AA32",
"LA17_CC_N" : "AB32",
"LA23_P" : "AD30",
"LA23_N" : "AD31",
"LA26_P" : "AF33",
"LA26_N" : "AG34",
"CLK0_M2C_P" : "AA24",
"CLK0_M2C_N" : "AA25",
"LA02_P" : "AA22",
"LA02_N" : "AB22",
"LA04_P" : "U26",
"LA04_N" : "U27",
"LA07_P" : "V22",
"LA07_N" : "V23",
"LA11_P" : "V21",
"LA11_N" : "W21",
"LA15_P" : "AB25",
"LA15_N" : "AB26",
"LA19_P" : "AA29",
"LA19_N" : "AB29",
"LA21_P" : "AC33",
"LA21_N" : "AD33",
"LA24_P" : "AE32",
"LA24_N" : "AF32",
"LA28_P" : "V31",
"LA28_N" : "W31",
"LA30_P" : "Y31",
"LA30_N" : "Y32",
"LA32_P" : "W30",
"LA32_N" : "Y30",
"LA06_P" : "V29",
"LA06_N" : "W29",
"LA10_P" : "T22",
"LA10_N" : "T23",
"LA14_P" : "U21",
"LA14_N" : "U22",
"LA18_CC_P" : "AB30",
"LA18_CC_N" : "AB31",
"LA27_P" : "AG31",
"LA27_N" : "AG32",
"CLK1_M2C_P" : "AC31",
"CLK1_M2C_N" : "AC32",
"LA00_CC_P" : "W23",
"LA00_CC_N" : "W24",
"LA03_P" : "W28",
"LA03_N" : "Y28",
"LA08_P" : "U24",
"LA08_N" : "U25",
"LA12_P" : "AC22",
"LA12_N" : "AC23",
"LA16_P" : "AB21",
"LA16_N" : "AC21",
"LA20_P" : "AA34",
"LA20_N" : "AB34",
"LA22_P" : "AC34",
"LA22_N" : "AD34",
"LA25_P" : "AE33",
"LA25_N" : "AF34",
"LA29_P" : "U34",
"LA29_N" : "V34",
"LA31_P" : "V33",
"LA31_N" : "W34",
"LA33_P" : "W33",
"LA33_N" : "Y33",
}
),
("pmod0", "AK25 AN21 AH18 AM19 AE26 AF25 AE21 AM17"),
("pmod1", "AL14 AM14 AP16 AP15 AM16 AM15 AN18 AN17"),
]
# Platform -----------------------------------------------------------------------------------------
class Platform(XilinxPlatform):
default_clk_name = "clk125"
default_clk_period = 1e9/125e6
def __init__(self):
XilinxPlatform.__init__(self, "xcku040-ffva1156-2-e", _io, _connectors, toolchain="vivado")
def create_programmer(self):
return VivadoProgrammer()
def do_finalize(self, fragment):
XilinxPlatform.do_finalize(self, fragment)
self.add_period_constraint(self.lookup_request("clk125", loose=True), 1e9/125e6)
self.add_period_constraint(self.lookup_request("clk300", loose=True), 1e9/300e6)
self.add_platform_command("set_property INTERNAL_VREF 0.84 [get_iobanks 44]")
self.add_platform_command("set_property INTERNAL_VREF 0.84 [get_iobanks 45]")
self.add_platform_command("set_property INTERNAL_VREF 0.84 [get_iobanks 46]")
| [
"litex.build.xilinx.XilinxPlatform.__init__",
"litex.build.xilinx.VivadoProgrammer",
"litex.build.xilinx.XilinxPlatform.do_finalize"
] | [((18029, 18124), 'litex.build.xilinx.XilinxPlatform.__init__', 'XilinxPlatform.__init__', (['self', '"""xcku040-ffva1156-2-e"""', '_io', '_connectors'], {'toolchain': '"""vivado"""'}), "(self, 'xcku040-ffva1156-2-e', _io, _connectors,\n toolchain='vivado')\n", (18052, 18124), False, 'from litex.build.xilinx import XilinxPlatform, VivadoProgrammer\n'), ((18170, 18188), 'litex.build.xilinx.VivadoProgrammer', 'VivadoProgrammer', ([], {}), '()\n', (18186, 18188), False, 'from litex.build.xilinx import XilinxPlatform, VivadoProgrammer\n'), ((18235, 18277), 'litex.build.xilinx.XilinxPlatform.do_finalize', 'XilinxPlatform.do_finalize', (['self', 'fragment'], {}), '(self, fragment)\n', (18261, 18277), False, 'from litex.build.xilinx import XilinxPlatform, VivadoProgrammer\n')] |
import os
import traceback
class InputHandler:
IMAGES_PARENT_FOLDER = './images'
def __init__(self):
filesList = []
def listFiles(self,path=''):
if path != '':
self.IMAGES_PARENT_FOLDER = path
try:
self.listFiles = [os.path.join(self.IMAGES_PARENT_FOLDER,imageFile) for imageFile in os.listdir(self.IMAGES_PARENT_FOLDER)\
if os.path.isfile(os.path.join(self.IMAGES_PARENT_FOLDER,imageFile))]
except:
print(traceback.print_exec())
return self.listFiles
if __name__ == '__main__':
obj = InputHandler()
print(obj.listFiles()) | [
"traceback.print_exec",
"os.listdir",
"os.path.join"
] | [((290, 340), 'os.path.join', 'os.path.join', (['self.IMAGES_PARENT_FOLDER', 'imageFile'], {}), '(self.IMAGES_PARENT_FOLDER, imageFile)\n', (302, 340), False, 'import os\n'), ((357, 394), 'os.listdir', 'os.listdir', (['self.IMAGES_PARENT_FOLDER'], {}), '(self.IMAGES_PARENT_FOLDER)\n', (367, 394), False, 'import os\n'), ((528, 550), 'traceback.print_exec', 'traceback.print_exec', ([], {}), '()\n', (548, 550), False, 'import traceback\n'), ((442, 492), 'os.path.join', 'os.path.join', (['self.IMAGES_PARENT_FOLDER', 'imageFile'], {}), '(self.IMAGES_PARENT_FOLDER, imageFile)\n', (454, 492), False, 'import os\n')] |
import os
import tmdbsimple as tmdb
import media
import fresh_tomatoes as ft
movies = []
if os.environ.get('TMDB_API', False):
# Retrieve API KEY
tmdb.API_KEY = os.environ['TMDB_API']
# TMDB Movie Ids
movie_ids = [271110, 297761, 246655, 278154, 135397, 188927]
# Get Configuration
configuration = tmdb.Configuration().info()
image_base_url = configuration['images']['secure_base_url']
image_width = "w500"
for movie_id in movie_ids:
m = tmdb.Movies(movie_id)
# Retrieve Image URL
minfo = m.info()
poster_image_url = image_base_url + image_width + minfo['poster_path']
# Retrieve Youtube Video URL
videos = m.videos()
video = videos['results'][0]
youtube_url = 'https://youtube.com/watch?v=' + video['key']
# Append Movie object
movie = media.Movie(m.title)
movie.storyline = m.overview
movie.poster_url = poster_image_url
movie.trailer_url = youtube_url
movies.append(movie)
else:
# Avatar
avatar = media.Movie("Avatar")
avatar.storyline = ("A paraplegic marine dispatched to the moon Pandora "
"on a unique mission becomes torn between following "
"his orders and protecting the world he feels is "
"his home.")
avatar.poster_url = ("https://upload.wikimedia.org/wikipedia/"
"en/b/b0/Avatar-Teaser-Poster.jpg")
avatar.trailer_url = "https://www.youtube.com/watch?v=-9ceBgWV8io"
# Deadpool
deadpool = media.Movie("Deadpool")
deadpool.storyline = ("A fast-talking mercenary with a morbid sense of "
"humor is subjected to a rogue experiment that "
"leaves him with accelerated healing powers and a "
"quest for revenge.")
deadpool.poster_url = ("https://upload.wikimedia.org/wikipedia/en/4/46/"
"Deadpool_poster.jpg")
deadpool.trailer_url = "https://www.youtube.com/watch?v=gtTfd6tISfw"
# Ghostbusters
ghostbusters = media.Movie("Ghostbusters")
ghostbusters.storyline = ("Following a ghost invasion of Manhattan, "
"paranormal enthusiasts <NAME> and Abby "
"Yates, nuclear engineer <NAME>, "
"and subway worker <NAME> band together "
"to stop the otherworldly threat.")
ghostbusters.poster_url = ("https://upload.wikimedia.org/wikipedia/"
"en/3/32/Ghostbusters_2016_film_poster.png")
ghostbusters.trailer_url = "https://www.youtube.com/watch?v=w3ugHP-yZXw"
# Olympus
olympus = media.Movie("Olympus Has Fallen")
olympus.storyline = ("Disgraced Secret Service agent (and former "
"presidential guard) <NAME> finds himself "
"trapped inside the White House in the wake of a "
"terrorist attack; using his inside knowledge, "
"Banning works with national security to rescue "
"the President from his kidnappers.")
olympus.poster_url = ("https://upload.wikimedia.org/wikipedia/en/b/bf/"
"Olympus_Has_Fallen_poster.jpg")
olympus.trailer_url = "https://www.youtube.com/watch?v=vwx1f0kyNwI"
# Angry Birds
angry_birds = media.Movie("The Angry Birds Movie")
angry_birds.storyline = ("Find out why the birds are so angry. When an "
"island populated by happy, flightless birds "
"is visited by mysterious green piggies, it's "
"up to three unlikely outcasts - Red, Chuck "
"and Bomb - to figure out what the pigs are up "
"to.")
angry_birds.poster_url = ("https://upload.wikimedia.org/wikipedia/en/f/"
"f9/The_Angry_Birds_Movie_poster.png")
angry_birds.trailer_url = "https://www.youtube.com/watch?v=1U2DKKqxHgE"
# Ironman
ironman = media.Movie("Iron Man")
ironman.storyline = ("After being held captive in an Afghan cave, "
"billionaire engineer <NAME> creates a unique "
"weaponized suit of armor to fight evil.")
ironman.poster_url = ("https://upload.wikimedia.org/wikipedia/en/7/70/"
"Ironmanposter.JPG")
ironman.trailer_url = "https://www.youtube.com/watch?v=8hYlB38asDY"
movies = [avatar, deadpool, ghostbusters, olympus, angry_birds, ironman]
ft.open_movies_page(movies)
| [
"fresh_tomatoes.open_movies_page",
"tmdbsimple.Configuration",
"os.environ.get",
"media.Movie",
"tmdbsimple.Movies"
] | [((94, 127), 'os.environ.get', 'os.environ.get', (['"""TMDB_API"""', '(False)'], {}), "('TMDB_API', False)\n", (108, 127), False, 'import os\n'), ((4683, 4710), 'fresh_tomatoes.open_movies_page', 'ft.open_movies_page', (['movies'], {}), '(movies)\n', (4702, 4710), True, 'import fresh_tomatoes as ft\n'), ((1066, 1087), 'media.Movie', 'media.Movie', (['"""Avatar"""'], {}), "('Avatar')\n", (1077, 1087), False, 'import media\n'), ((1586, 1609), 'media.Movie', 'media.Movie', (['"""Deadpool"""'], {}), "('Deadpool')\n", (1597, 1609), False, 'import media\n'), ((2127, 2154), 'media.Movie', 'media.Movie', (['"""Ghostbusters"""'], {}), "('Ghostbusters')\n", (2138, 2154), False, 'import media\n'), ((2759, 2792), 'media.Movie', 'media.Movie', (['"""Olympus Has Fallen"""'], {}), "('Olympus Has Fallen')\n", (2770, 2792), False, 'import media\n'), ((3465, 3501), 'media.Movie', 'media.Movie', (['"""The Angry Birds Movie"""'], {}), "('The Angry Birds Movie')\n", (3476, 3501), False, 'import media\n'), ((4172, 4195), 'media.Movie', 'media.Movie', (['"""Iron Man"""'], {}), "('Iron Man')\n", (4183, 4195), False, 'import media\n'), ((487, 508), 'tmdbsimple.Movies', 'tmdb.Movies', (['movie_id'], {}), '(movie_id)\n', (498, 508), True, 'import tmdbsimple as tmdb\n'), ((861, 881), 'media.Movie', 'media.Movie', (['m.title'], {}), '(m.title)\n', (872, 881), False, 'import media\n'), ((326, 346), 'tmdbsimple.Configuration', 'tmdb.Configuration', ([], {}), '()\n', (344, 346), True, 'import tmdbsimple as tmdb\n')] |
# -*- coding: utf-8 -*-
# Form implementation generated from reading ui file './elements_ui.ui',
# licensing of './elements_ui.ui' applies.
#
# Created: Wed Jun 16 14:29:03 2021
# by: pyside2-uic running on PySide2 5.13.2
#
# WARNING! All changes made in this file will be lost!
from PySide2 import QtCore, QtGui, QtWidgets
class Ui_ElementsWindow(object):
def setupUi(self, ElementsWindow):
ElementsWindow.setObjectName("ElementsWindow")
ElementsWindow.resize(841, 623)
self.centralwidget = QtWidgets.QWidget(ElementsWindow)
self.centralwidget.setObjectName("centralwidget")
self.verticalLayout_2 = QtWidgets.QVBoxLayout(self.centralwidget)
self.verticalLayout_2.setSpacing(0)
self.verticalLayout_2.setContentsMargins(0, 0, 0, 0)
self.verticalLayout_2.setObjectName("verticalLayout_2")
self.verticalLayout = QtWidgets.QVBoxLayout()
self.verticalLayout.setSizeConstraint(
QtWidgets.QLayout.SetDefaultConstraint)
self.verticalLayout.setObjectName("verticalLayout")
self.horizontalLayout = QtWidgets.QHBoxLayout()
self.horizontalLayout.setObjectName("horizontalLayout")
self.btn_refresh = QtWidgets.QPushButton(self.centralwidget)
self.btn_refresh.setCursor(QtCore.Qt.ClosedHandCursor)
self.btn_refresh.setText("")
icon = QtGui.QIcon()
icon.addPixmap(QtGui.QPixmap(":/refresh"), QtGui.QIcon.Normal,
QtGui.QIcon.Off)
self.btn_refresh.setIcon(icon)
self.btn_refresh.setIconSize(QtCore.QSize(20, 20))
self.btn_refresh.setAutoDefault(False)
self.btn_refresh.setDefault(False)
self.btn_refresh.setFlat(True)
self.btn_refresh.setObjectName("btn_refresh")
self.horizontalLayout.addWidget(self.btn_refresh)
self.label = QtWidgets.QLabel(self.centralwidget)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Minimum,
QtWidgets.QSizePolicy.Minimum)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(
self.label.sizePolicy().hasHeightForWidth())
self.label.setSizePolicy(sizePolicy)
font = QtGui.QFont()
font.setWeight(75)
font.setBold(True)
self.label.setFont(font)
self.label.setAlignment(QtCore.Qt.AlignRight | QtCore.Qt.AlignTrailing |
QtCore.Qt.AlignVCenter)
self.label.setObjectName("label")
self.horizontalLayout.addWidget(self.label)
self.combo_element_type = QtWidgets.QComboBox(self.centralwidget)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Preferred,
QtWidgets.QSizePolicy.Minimum)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(
self.combo_element_type.sizePolicy().hasHeightForWidth())
self.combo_element_type.setSizePolicy(sizePolicy)
self.combo_element_type.setCurrentText("")
self.combo_element_type.setSizeAdjustPolicy(
QtWidgets.QComboBox.AdjustToContents)
self.combo_element_type.setObjectName("combo_element_type")
self.horizontalLayout.addWidget(self.combo_element_type)
self.line = QtWidgets.QFrame(self.centralwidget)
self.line.setFrameShape(QtWidgets.QFrame.VLine)
self.line.setFrameShadow(QtWidgets.QFrame.Sunken)
self.line.setObjectName("line")
self.horizontalLayout.addWidget(self.line)
self.label_3 = QtWidgets.QLabel(self.centralwidget)
font = QtGui.QFont()
font.setWeight(75)
font.setBold(True)
self.label_3.setFont(font)
self.label_3.setObjectName("label_3")
self.horizontalLayout.addWidget(self.label_3)
self.label_2 = QtWidgets.QLabel(self.centralwidget)
self.label_2.setObjectName("label_2")
self.horizontalLayout.addWidget(self.label_2)
self.lineEdit = QtWidgets.QLineEdit(self.centralwidget)
self.lineEdit.setObjectName("lineEdit")
self.horizontalLayout.addWidget(self.lineEdit)
self.label_4 = QtWidgets.QLabel(self.centralwidget)
self.label_4.setObjectName("label_4")
self.horizontalLayout.addWidget(self.label_4)
self.lineEdit_2 = QtWidgets.QLineEdit(self.centralwidget)
self.lineEdit_2.setObjectName("lineEdit_2")
self.horizontalLayout.addWidget(self.lineEdit_2)
self.line_2 = QtWidgets.QFrame(self.centralwidget)
self.line_2.setFrameShape(QtWidgets.QFrame.VLine)
self.line_2.setFrameShadow(QtWidgets.QFrame.Sunken)
self.line_2.setObjectName("line_2")
self.horizontalLayout.addWidget(self.line_2)
self.verticalLayout.addLayout(self.horizontalLayout)
self.tableElements = QtWidgets.QTableView(self.centralwidget)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Expanding,
QtWidgets.QSizePolicy.Expanding)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(
self.tableElements.sizePolicy().hasHeightForWidth())
self.tableElements.setSizePolicy(sizePolicy)
self.tableElements.setProperty("showDropIndicator", False)
self.tableElements.setDragDropOverwriteMode(False)
self.tableElements.setAlternatingRowColors(True)
self.tableElements.setSortingEnabled(False)
self.tableElements.setObjectName("tableElements")
self.verticalLayout.addWidget(self.tableElements)
self.verticalLayout_2.addLayout(self.verticalLayout)
ElementsWindow.setCentralWidget(self.centralwidget)
self.menubar = QtWidgets.QMenuBar()
self.menubar.setGeometry(QtCore.QRect(0, 0, 841, 22))
self.menubar.setObjectName("menubar")
ElementsWindow.setMenuBar(self.menubar)
self.statusbar = QtWidgets.QStatusBar(ElementsWindow)
self.statusbar.setEnabled(True)
self.statusbar.setObjectName("statusbar")
ElementsWindow.setStatusBar(self.statusbar)
self.retranslateUi(ElementsWindow)
QtCore.QObject.connect(self.combo_element_type,
QtCore.SIGNAL("currentIndexChanged(QString)"),
ElementsWindow.combo_element_type)
QtCore.QObject.connect(self.btn_refresh, QtCore.SIGNAL("clicked()"),
ElementsWindow.force_refresh)
QtCore.QMetaObject.connectSlotsByName(ElementsWindow)
def retranslateUi(self, ElementsWindow):
ElementsWindow.setWindowTitle(
QtWidgets.QApplication.translate("ElementsWindow", "MainWindow",
None, -1))
self.btn_refresh.setToolTip(
QtWidgets.QApplication.translate("ElementsWindow",
"Force refresh the table ", None,
-1))
self.btn_refresh.setStatusTip(
QtWidgets.QApplication.translate("ElementsWindow",
"Force refresh the table ", None,
-1))
self.btn_refresh.setWhatsThis(
QtWidgets.QApplication.translate("ElementsWindow",
"Force refresh the table ", None,
-1))
self.btn_refresh.setAccessibleDescription(
QtWidgets.QApplication.translate("ElementsWindow",
"Force refresh the table ", None,
-1))
self.label.setText(
QtWidgets.QApplication.translate("ElementsWindow", "Element type: ",
None, -1))
self.combo_element_type.setToolTip(
QtWidgets.QApplication.translate(
"ElementsWindow",
"<html><head/><body><p>Select the element table you wish to view</p></body></html>",
None, -1))
self.label_3.setText(
QtWidgets.QApplication.translate("ElementsWindow", " Filter: ",
None, -1))
self.label_2.setText(
QtWidgets.QApplication.translate("ElementsWindow", "Component: ",
None, -1))
self.label_4.setText(
QtWidgets.QApplication.translate("ElementsWindow", " Layer: ",
None, -1))
from . import main_window_rc_rc
| [
"PySide2.QtCore.QMetaObject.connectSlotsByName",
"PySide2.QtGui.QIcon",
"PySide2.QtWidgets.QSizePolicy",
"PySide2.QtWidgets.QTableView",
"PySide2.QtWidgets.QStatusBar",
"PySide2.QtGui.QPixmap",
"PySide2.QtWidgets.QFrame",
"PySide2.QtWidgets.QHBoxLayout",
"PySide2.QtCore.QRect",
"PySide2.QtCore.QSize",
"PySide2.QtWidgets.QComboBox",
"PySide2.QtWidgets.QWidget",
"PySide2.QtWidgets.QLineEdit",
"PySide2.QtWidgets.QMenuBar",
"PySide2.QtWidgets.QPushButton",
"PySide2.QtGui.QFont",
"PySide2.QtCore.SIGNAL",
"PySide2.QtWidgets.QLabel",
"PySide2.QtWidgets.QApplication.translate",
"PySide2.QtWidgets.QVBoxLayout"
] | [((531, 564), 'PySide2.QtWidgets.QWidget', 'QtWidgets.QWidget', (['ElementsWindow'], {}), '(ElementsWindow)\n', (548, 564), False, 'from PySide2 import QtCore, QtGui, QtWidgets\n'), ((655, 696), 'PySide2.QtWidgets.QVBoxLayout', 'QtWidgets.QVBoxLayout', (['self.centralwidget'], {}), '(self.centralwidget)\n', (676, 696), False, 'from PySide2 import QtCore, QtGui, QtWidgets\n'), ((896, 919), 'PySide2.QtWidgets.QVBoxLayout', 'QtWidgets.QVBoxLayout', ([], {}), '()\n', (917, 919), False, 'from PySide2 import QtCore, QtGui, QtWidgets\n'), ((1111, 1134), 'PySide2.QtWidgets.QHBoxLayout', 'QtWidgets.QHBoxLayout', ([], {}), '()\n', (1132, 1134), False, 'from PySide2 import QtCore, QtGui, QtWidgets\n'), ((1226, 1267), 'PySide2.QtWidgets.QPushButton', 'QtWidgets.QPushButton', (['self.centralwidget'], {}), '(self.centralwidget)\n', (1247, 1267), False, 'from PySide2 import QtCore, QtGui, QtWidgets\n'), ((1383, 1396), 'PySide2.QtGui.QIcon', 'QtGui.QIcon', ([], {}), '()\n', (1394, 1396), False, 'from PySide2 import QtCore, QtGui, QtWidgets\n'), ((1868, 1904), 'PySide2.QtWidgets.QLabel', 'QtWidgets.QLabel', (['self.centralwidget'], {}), '(self.centralwidget)\n', (1884, 1904), False, 'from PySide2 import QtCore, QtGui, QtWidgets\n'), ((1926, 2014), 'PySide2.QtWidgets.QSizePolicy', 'QtWidgets.QSizePolicy', (['QtWidgets.QSizePolicy.Minimum', 'QtWidgets.QSizePolicy.Minimum'], {}), '(QtWidgets.QSizePolicy.Minimum, QtWidgets.QSizePolicy.\n Minimum)\n', (1947, 2014), False, 'from PySide2 import QtCore, QtGui, QtWidgets\n'), ((2292, 2305), 'PySide2.QtGui.QFont', 'QtGui.QFont', ([], {}), '()\n', (2303, 2305), False, 'from PySide2 import QtCore, QtGui, QtWidgets\n'), ((2658, 2697), 'PySide2.QtWidgets.QComboBox', 'QtWidgets.QComboBox', (['self.centralwidget'], {}), '(self.centralwidget)\n', (2677, 2697), False, 'from PySide2 import QtCore, QtGui, QtWidgets\n'), ((2719, 2809), 'PySide2.QtWidgets.QSizePolicy', 'QtWidgets.QSizePolicy', (['QtWidgets.QSizePolicy.Preferred', 'QtWidgets.QSizePolicy.Minimum'], {}), '(QtWidgets.QSizePolicy.Preferred, QtWidgets.\n QSizePolicy.Minimum)\n', (2740, 2809), False, 'from PySide2 import QtCore, QtGui, QtWidgets\n'), ((3405, 3441), 'PySide2.QtWidgets.QFrame', 'QtWidgets.QFrame', (['self.centralwidget'], {}), '(self.centralwidget)\n', (3421, 3441), False, 'from PySide2 import QtCore, QtGui, QtWidgets\n'), ((3670, 3706), 'PySide2.QtWidgets.QLabel', 'QtWidgets.QLabel', (['self.centralwidget'], {}), '(self.centralwidget)\n', (3686, 3706), False, 'from PySide2 import QtCore, QtGui, QtWidgets\n'), ((3722, 3735), 'PySide2.QtGui.QFont', 'QtGui.QFont', ([], {}), '()\n', (3733, 3735), False, 'from PySide2 import QtCore, QtGui, QtWidgets\n'), ((3948, 3984), 'PySide2.QtWidgets.QLabel', 'QtWidgets.QLabel', (['self.centralwidget'], {}), '(self.centralwidget)\n', (3964, 3984), False, 'from PySide2 import QtCore, QtGui, QtWidgets\n'), ((4109, 4148), 'PySide2.QtWidgets.QLineEdit', 'QtWidgets.QLineEdit', (['self.centralwidget'], {}), '(self.centralwidget)\n', (4128, 4148), False, 'from PySide2 import QtCore, QtGui, QtWidgets\n'), ((4275, 4311), 'PySide2.QtWidgets.QLabel', 'QtWidgets.QLabel', (['self.centralwidget'], {}), '(self.centralwidget)\n', (4291, 4311), False, 'from PySide2 import QtCore, QtGui, QtWidgets\n'), ((4438, 4477), 'PySide2.QtWidgets.QLineEdit', 'QtWidgets.QLineEdit', (['self.centralwidget'], {}), '(self.centralwidget)\n', (4457, 4477), False, 'from PySide2 import QtCore, QtGui, QtWidgets\n'), ((4609, 4645), 'PySide2.QtWidgets.QFrame', 'QtWidgets.QFrame', (['self.centralwidget'], {}), '(self.centralwidget)\n', (4625, 4645), False, 'from PySide2 import QtCore, QtGui, QtWidgets\n'), ((4951, 4991), 'PySide2.QtWidgets.QTableView', 'QtWidgets.QTableView', (['self.centralwidget'], {}), '(self.centralwidget)\n', (4971, 4991), False, 'from PySide2 import QtCore, QtGui, QtWidgets\n'), ((5013, 5105), 'PySide2.QtWidgets.QSizePolicy', 'QtWidgets.QSizePolicy', (['QtWidgets.QSizePolicy.Expanding', 'QtWidgets.QSizePolicy.Expanding'], {}), '(QtWidgets.QSizePolicy.Expanding, QtWidgets.\n QSizePolicy.Expanding)\n', (5034, 5105), False, 'from PySide2 import QtCore, QtGui, QtWidgets\n'), ((5879, 5899), 'PySide2.QtWidgets.QMenuBar', 'QtWidgets.QMenuBar', ([], {}), '()\n', (5897, 5899), False, 'from PySide2 import QtCore, QtGui, QtWidgets\n'), ((6081, 6117), 'PySide2.QtWidgets.QStatusBar', 'QtWidgets.QStatusBar', (['ElementsWindow'], {}), '(ElementsWindow)\n', (6101, 6117), False, 'from PySide2 import QtCore, QtGui, QtWidgets\n'), ((6650, 6703), 'PySide2.QtCore.QMetaObject.connectSlotsByName', 'QtCore.QMetaObject.connectSlotsByName', (['ElementsWindow'], {}), '(ElementsWindow)\n', (6687, 6703), False, 'from PySide2 import QtCore, QtGui, QtWidgets\n'), ((1420, 1446), 'PySide2.QtGui.QPixmap', 'QtGui.QPixmap', (['""":/refresh"""'], {}), "(':/refresh')\n", (1433, 1446), False, 'from PySide2 import QtCore, QtGui, QtWidgets\n'), ((1584, 1604), 'PySide2.QtCore.QSize', 'QtCore.QSize', (['(20)', '(20)'], {}), '(20, 20)\n', (1596, 1604), False, 'from PySide2 import QtCore, QtGui, QtWidgets\n'), ((5933, 5960), 'PySide2.QtCore.QRect', 'QtCore.QRect', (['(0)', '(0)', '(841)', '(22)'], {}), '(0, 0, 841, 22)\n', (5945, 5960), False, 'from PySide2 import QtCore, QtGui, QtWidgets\n'), ((6391, 6436), 'PySide2.QtCore.SIGNAL', 'QtCore.SIGNAL', (['"""currentIndexChanged(QString)"""'], {}), "('currentIndexChanged(QString)')\n", (6404, 6436), False, 'from PySide2 import QtCore, QtGui, QtWidgets\n'), ((6553, 6579), 'PySide2.QtCore.SIGNAL', 'QtCore.SIGNAL', (['"""clicked()"""'], {}), "('clicked()')\n", (6566, 6579), False, 'from PySide2 import QtCore, QtGui, QtWidgets\n'), ((6801, 6875), 'PySide2.QtWidgets.QApplication.translate', 'QtWidgets.QApplication.translate', (['"""ElementsWindow"""', '"""MainWindow"""', 'None', '(-1)'], {}), "('ElementsWindow', 'MainWindow', None, -1)\n", (6833, 6875), False, 'from PySide2 import QtCore, QtGui, QtWidgets\n'), ((6971, 7063), 'PySide2.QtWidgets.QApplication.translate', 'QtWidgets.QApplication.translate', (['"""ElementsWindow"""', '"""Force refresh the table """', 'None', '(-1)'], {}), "('ElementsWindow',\n 'Force refresh the table ', None, -1)\n", (7003, 7063), False, 'from PySide2 import QtCore, QtGui, QtWidgets\n'), ((7202, 7294), 'PySide2.QtWidgets.QApplication.translate', 'QtWidgets.QApplication.translate', (['"""ElementsWindow"""', '"""Force refresh the table """', 'None', '(-1)'], {}), "('ElementsWindow',\n 'Force refresh the table ', None, -1)\n", (7234, 7294), False, 'from PySide2 import QtCore, QtGui, QtWidgets\n'), ((7433, 7525), 'PySide2.QtWidgets.QApplication.translate', 'QtWidgets.QApplication.translate', (['"""ElementsWindow"""', '"""Force refresh the table """', 'None', '(-1)'], {}), "('ElementsWindow',\n 'Force refresh the table ', None, -1)\n", (7465, 7525), False, 'from PySide2 import QtCore, QtGui, QtWidgets\n'), ((7676, 7768), 'PySide2.QtWidgets.QApplication.translate', 'QtWidgets.QApplication.translate', (['"""ElementsWindow"""', '"""Force refresh the table """', 'None', '(-1)'], {}), "('ElementsWindow',\n 'Force refresh the table ', None, -1)\n", (7708, 7768), False, 'from PySide2 import QtCore, QtGui, QtWidgets\n'), ((7896, 7974), 'PySide2.QtWidgets.QApplication.translate', 'QtWidgets.QApplication.translate', (['"""ElementsWindow"""', '"""Element type: """', 'None', '(-1)'], {}), "('ElementsWindow', 'Element type: ', None, -1)\n", (7928, 7974), False, 'from PySide2 import QtCore, QtGui, QtWidgets\n'), ((8077, 8231), 'PySide2.QtWidgets.QApplication.translate', 'QtWidgets.QApplication.translate', (['"""ElementsWindow"""', '"""<html><head/><body><p>Select the element table you wish to view</p></body></html>"""', 'None', '(-1)'], {}), "('ElementsWindow',\n '<html><head/><body><p>Select the element table you wish to view</p></body></html>'\n , None, -1)\n", (8109, 8231), False, 'from PySide2 import QtCore, QtGui, QtWidgets\n'), ((8315, 8390), 'PySide2.QtWidgets.QApplication.translate', 'QtWidgets.QApplication.translate', (['"""ElementsWindow"""', '""" Filter: """', 'None', '(-1)'], {}), "('ElementsWindow', ' Filter: ', None, -1)\n", (8347, 8390), False, 'from PySide2 import QtCore, QtGui, QtWidgets\n'), ((8479, 8554), 'PySide2.QtWidgets.QApplication.translate', 'QtWidgets.QApplication.translate', (['"""ElementsWindow"""', '"""Component: """', 'None', '(-1)'], {}), "('ElementsWindow', 'Component: ', None, -1)\n", (8511, 8554), False, 'from PySide2 import QtCore, QtGui, QtWidgets\n'), ((8643, 8717), 'PySide2.QtWidgets.QApplication.translate', 'QtWidgets.QApplication.translate', (['"""ElementsWindow"""', '""" Layer: """', 'None', '(-1)'], {}), "('ElementsWindow', ' Layer: ', None, -1)\n", (8675, 8717), False, 'from PySide2 import QtCore, QtGui, QtWidgets\n')] |
from tanim.utils.config_ops import digest_config
from tanim.utils.iterables import list_update
# Currently, this is only used by both Scene and Mobject.
# Still, we abstract its functionality here, albeit purely nominally.
# All actual implementation has to be handled by derived classes for now.
class Container(object):
def __init__(self, **kwargs):
digest_config(self, kwargs)
self.submobjects = [] # Is it really better to name it submobjects?
def add(self, *mobjects):
if self in mobjects:
raise Exception("Mobject cannot contain self")
self.submobjects = list_update(self.submobjects, mobjects)
return self
def add_to_back(self, *mobjects):
self.remove(*mobjects)
self.submobjects = list(mobjects) + self.submobjects
return self
def remove(self, *mobjects, ):
for mobject in mobjects:
for submod in self.submobjects:
if isinstance(submod, GroupContainer):
submod.remove(mobject)
elif mobject == submod:
self.submobjects.remove(mobject)
return self
class GroupContainer(Container):
def __init__(self, *containers, **kwargs):
self.add(*containers)
| [
"tanim.utils.config_ops.digest_config",
"tanim.utils.iterables.list_update"
] | [((368, 395), 'tanim.utils.config_ops.digest_config', 'digest_config', (['self', 'kwargs'], {}), '(self, kwargs)\n', (381, 395), False, 'from tanim.utils.config_ops import digest_config\n'), ((619, 658), 'tanim.utils.iterables.list_update', 'list_update', (['self.submobjects', 'mobjects'], {}), '(self.submobjects, mobjects)\n', (630, 658), False, 'from tanim.utils.iterables import list_update\n')] |
import numpy as np
from pysz import compress, decompress
def test_compress_decompress():
a = np.linspace(0, 100, num=1000000).reshape((100, 100, 100)).astype(np.float32)
tolerance = 0.0001
compressed = compress(a, tolerance=tolerance)
recovered = decompress(compressed, a.shape, a.dtype)
assert(a.shape == recovered.shape)
assert(np.allclose(a, recovered, atol=tolerance))
test_compress_decompress()
| [
"pysz.decompress",
"numpy.linspace",
"pysz.compress",
"numpy.allclose"
] | [((216, 248), 'pysz.compress', 'compress', (['a'], {'tolerance': 'tolerance'}), '(a, tolerance=tolerance)\n', (224, 248), False, 'from pysz import compress, decompress\n'), ((266, 306), 'pysz.decompress', 'decompress', (['compressed', 'a.shape', 'a.dtype'], {}), '(compressed, a.shape, a.dtype)\n', (276, 306), False, 'from pysz import compress, decompress\n'), ((362, 403), 'numpy.allclose', 'np.allclose', (['a', 'recovered'], {'atol': 'tolerance'}), '(a, recovered, atol=tolerance)\n', (373, 403), True, 'import numpy as np\n'), ((99, 131), 'numpy.linspace', 'np.linspace', (['(0)', '(100)'], {'num': '(1000000)'}), '(0, 100, num=1000000)\n', (110, 131), True, 'import numpy as np\n')] |
import json
from sparkdq.outliers.params.OutlierSolverParams import OutlierSolverParams
from sparkdq.outliers.OutlierSolver import OutlierSolver
class KSigmaParams(OutlierSolverParams):
def __init__(self, deviation=1.5):
self.deviation = deviation
def model(self):
return OutlierSolver.kSigma
@staticmethod
def from_json(json_str):
d = json.loads(json_str)
return KSigmaParams(d["deviation"])
| [
"json.loads"
] | [((382, 402), 'json.loads', 'json.loads', (['json_str'], {}), '(json_str)\n', (392, 402), False, 'import json\n')] |
import re
from setuptools import setup, find_packages
import sys
if sys.version_info < (3, 5):
raise 'must use Python version 3.5 or higher'
with open('./gmailapi_backend/__init__.py', 'r') as f:
MATCH_EXPR = "__version__[^'\"]+(['\"])([^'\"]+)"
VERSION = re.search(MATCH_EXPR, f.read()).group(2).strip()
setup(
name='django-gmailapi-backend',
version=VERSION,
packages=find_packages(),
author="<NAME>",
author_email="<EMAIL>",
license="Apache License 2.0",
entry_points={
'console_scripts': [
'gmail_oauth2 = gmailapi_backend.bin.gmail_oauth2:main',
]
},
install_requires=[
'google-api-python-client~=2.0',
'google-auth>=1.16.0,<3.0.0dev',
],
url="https://github.com/dolfim/django-gmailapi-backend",
long_description_content_type='text/markdown',
long_description=open('README.md').read(),
description='Email backend for Django which sends email via the Gmail API',
classifiers=[
'Intended Audience :: Developers',
'License :: OSI Approved :: Apache Software License',
'Operating System :: MacOS :: MacOS X',
'Operating System :: Microsoft :: Windows',
'Operating System :: POSIX',
'Programming Language :: Python',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
'Framework :: Django',
'Topic :: Communications :: Email',
'Development Status :: 4 - Beta'
],
)
| [
"setuptools.find_packages"
] | [((398, 413), 'setuptools.find_packages', 'find_packages', ([], {}), '()\n', (411, 413), False, 'from setuptools import setup, find_packages\n')] |
"""Script to ensure a configuration file exists."""
import argparse
import os
import openpeerpower.config as config_util
from openpeerpower.core import OpenPeerPower
# mypy: allow-untyped-calls, allow-untyped-defs
def run(args):
"""Handle ensure config commandline script."""
parser = argparse.ArgumentParser(
description=(
"Ensure a Open Peer Power config exists, creates one if necessary."
)
)
parser.add_argument(
"-c",
"--config",
metavar="path_to_config_dir",
default=config_util.get_default_config_dir(),
help="Directory that contains the Open Peer Power configuration",
)
parser.add_argument("--script", choices=["ensure_config"])
args = parser.parse_args()
config_dir = os.path.join(os.getcwd(), args.config)
# Test if configuration directory exists
if not os.path.isdir(config_dir):
print("Creating directory", config_dir)
os.makedirs(config_dir)
opp = OpenPeerPower()
opp.config.config_dir = config_dir
config_path = opp.loop.run_until_complete(async_run(opp))
print("Configuration file:", config_path)
return 0
async def async_run(opp):
"""Make sure config exists."""
path = await config_util.async_ensure_config_exists(opp)
await opp.async_stop(force=True)
return path
| [
"os.makedirs",
"argparse.ArgumentParser",
"os.getcwd",
"openpeerpower.core.OpenPeerPower",
"os.path.isdir",
"openpeerpower.config.async_ensure_config_exists",
"openpeerpower.config.get_default_config_dir"
] | [((297, 406), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Ensure a Open Peer Power config exists, creates one if necessary."""'}), "(description=\n 'Ensure a Open Peer Power config exists, creates one if necessary.')\n", (320, 406), False, 'import argparse\n'), ((998, 1013), 'openpeerpower.core.OpenPeerPower', 'OpenPeerPower', ([], {}), '()\n', (1011, 1013), False, 'from openpeerpower.core import OpenPeerPower\n'), ((797, 808), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (806, 808), False, 'import os\n'), ((880, 905), 'os.path.isdir', 'os.path.isdir', (['config_dir'], {}), '(config_dir)\n', (893, 905), False, 'import os\n'), ((963, 986), 'os.makedirs', 'os.makedirs', (['config_dir'], {}), '(config_dir)\n', (974, 986), False, 'import os\n'), ((1254, 1297), 'openpeerpower.config.async_ensure_config_exists', 'config_util.async_ensure_config_exists', (['opp'], {}), '(opp)\n', (1292, 1297), True, 'import openpeerpower.config as config_util\n'), ((553, 589), 'openpeerpower.config.get_default_config_dir', 'config_util.get_default_config_dir', ([], {}), '()\n', (587, 589), True, 'import openpeerpower.config as config_util\n')] |
#!/bin/env python
from black import main
import spacy
import json
from spacy import displacy
import unidecode
import pandas as pd
import numpy as np
import os
csv_source = "scripts/spacy_files/data/thesis_200_with_school.csv"
df = pd.read_csv(csv_source)
df = df[df['isScan']==False]
df = df.sort_values('isScan', ascending=False)
text1= "Escuela de Enfermería"
text2 = "ESCUELA DE ENFERMERIA"
file = open("scripts/spacy_files/data/escuelas.json", "r")
file = json.load(file)
temp_list = []
for facultad in file:
temp_list.append(facultad['escuela'])
#print(facultad['escuela'])
escuelas = [item for sublist in temp_list for item in sublist] # make the list flat
#print(escuelas)
text1_u = unidecode.unidecode(text1)
text1_l_u = text1_u.lower()
text2_l_u = unidecode.unidecode(text2).lower()
print(text1_l_u, "<-->", text2_l_u)
if text1_l_u == text2_l_u:
print(text1, " is correct.")
def unaccent_list(accent_list):
unaccented_schools = []
for sch in accent_list:
unaccented_schools.append(unidecode.unidecode(sch).lower())
return unaccented_schools
def set_school_to_unaccent(escuelas):
escuelas = unaccent_list(escuelas)
return escuelas
def create_dictionary(schools):
myDict = dict((e,i) for i,e in enumerate(schools))
return myDict
def set_schools_accents(row, dict, dict_c):
index = dict.get(row.lower())
key_list = list(dict_c.keys())
val_list = list(dict_c.values())
try:
position = val_list.index(index)
key_list[position]
except:
return None
if __name__ == "__main__":
u_escuelas = set_school_to_unaccent(escuelas)
u_escuelas_dict = create_dictionary(u_escuelas)
escuelas_dict = create_dictionary(escuelas)
print(u_escuelas_dict)
print(escuelas_dict)
print(set_schools_accents("No school", u_escuelas_dict, escuelas_dict))
| [
"json.load",
"pandas.read_csv",
"unidecode.unidecode"
] | [((233, 256), 'pandas.read_csv', 'pd.read_csv', (['csv_source'], {}), '(csv_source)\n', (244, 256), True, 'import pandas as pd\n'), ((465, 480), 'json.load', 'json.load', (['file'], {}), '(file)\n', (474, 480), False, 'import json\n'), ((701, 727), 'unidecode.unidecode', 'unidecode.unidecode', (['text1'], {}), '(text1)\n', (720, 727), False, 'import unidecode\n'), ((768, 794), 'unidecode.unidecode', 'unidecode.unidecode', (['text2'], {}), '(text2)\n', (787, 794), False, 'import unidecode\n'), ((1035, 1059), 'unidecode.unidecode', 'unidecode.unidecode', (['sch'], {}), '(sch)\n', (1054, 1059), False, 'import unidecode\n')] |
from django.db import models
from django import forms
from audit_log.models.managers import AuditLog
# Create your models here.
class Port(models.Model):
name = models.CharField(max_length=250)
port = models.CharField(max_length=250)
description = models.TextField(blank=True)
audit_log = AuditLog()
#icon = models.ImageField(upload_to='images', blank=True)
def __str__(self):
return self.name
class FormPort(forms.ModelForm):
pass
class Meta:
model = Port | [
"django.db.models.TextField",
"audit_log.models.managers.AuditLog",
"django.db.models.CharField"
] | [((169, 201), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(250)'}), '(max_length=250)\n', (185, 201), False, 'from django.db import models\n'), ((213, 245), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(250)'}), '(max_length=250)\n', (229, 245), False, 'from django.db import models\n'), ((264, 292), 'django.db.models.TextField', 'models.TextField', ([], {'blank': '(True)'}), '(blank=True)\n', (280, 292), False, 'from django.db import models\n'), ((310, 320), 'audit_log.models.managers.AuditLog', 'AuditLog', ([], {}), '()\n', (318, 320), False, 'from audit_log.models.managers import AuditLog\n')] |
import tensorflow as tf
import json
import math
import cv2
import time
import argparse
import concurrent.futures
import posenet
import keyboard
import sys
import numpy as np
from threading import Thread
from slugify import slugify
parser = argparse.ArgumentParser()
parser.add_argument('--model', type=int, default=101)
parser.add_argument('--cam_id', type=int, default=0)
parser.add_argument('--cam_width', type=int, default=1280)
parser.add_argument('--cam_height', type=int, default=720)
parser.add_argument('--scale_factor', type=float, default=0.7125)
parser.add_argument('--file', type=str, default=None, help="Optionally use a video file instead of a live camera")
args = parser.parse_args()
def main():
# tf.config.threading.set_inter_op_parallelism_threads(0)
# tf.config.threading.set_intra_op_parallelism_threads(0)
# print(tf.config.threading.get_inter_op_parallelism_threads())
# print(tf.config.threading.get_intra_op_parallelism_threads())
with tf.compat.v1.Session() as sess:
model_cfg, model_outputs = posenet.load_model(args.model, sess)
output_stride = model_cfg['output_stride']
if args.file is not None:
cap = cv2.VideoCapture(args.file)
else:
cap = cv2.VideoCapture(args.cam_id)
cap.set(3, args.cam_width)
cap.set(4, args.cam_height)
start = time.time()
frame_count = 0
recording = True
# ret,frame1 = cap.read()
# ret,frame2 = cap.read()
file_content = []
while True:
# diff = cv2.absdiff(frame1,frame2)
# gray = cv2.cvtColor(diff, cv2.COLOR_BGR2GRAY)
# blur = cv2.GaussianBlur(gray,(15,15),0)
# _, thresh = cv2.threshold(blur,20,255,cv2.THRESH_BINARY)
# dilated = cv2.dilate(thresh,None, iterations=3)
# contours, _ = cv2.findContours(dilated, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
# # if(len(contours)>0):
# # print("One:")
# # print(dir(contours[0]))
# # print("One it is.")
# for contour in contours:
# (x,y,w,h) = cv2.boundingRect(contour)
# if(cv2.contourArea(contour)>400):
# continue
# cv2.rectangle(frame1,(x,y),(x+w,y+h),(0,255,0),2)
# # cv2.drawContours(frame1,contours, -1,(0,255,0),2)
# cv2.imshow("feed",frame1)
# frame1 = frame2
# ret, frame2 = cap.read()
input_image, display_image, output_scale = posenet.read_cap(cap, scale_factor=args.scale_factor, output_stride=output_stride)
heatmaps_result, offsets_result, displacement_fwd_result, displacement_bwd_result = sess.run(
model_outputs,
feed_dict={'image:0': input_image}
)
pose_scores, keypoint_scores, keypoint_coords = posenet.decode_multi.decode_multiple_poses(
heatmaps_result.squeeze(axis=0),
offsets_result.squeeze(axis=0),
displacement_fwd_result.squeeze(axis=0),
displacement_bwd_result.squeeze(axis=0),
output_stride=output_stride,
max_pose_detections=1,
min_pose_score=0.15)
keypoint_coords *= output_scale
# TODO this isn't particularly fast, use GL for drawing and display someday...
# print("\n ===================================== \n")
img = posenet.draw_skel_and_kp(
display_image, pose_scores, keypoint_scores, keypoint_coords,
min_pose_score=0.15, min_part_score=0.15)
cv2.imshow('posenet', img)
frame_count += 1
if(recording):
normalize_poses(keypoint_coords)
results = json.dumps({
"timestamp":time.time() - start,
"pose_scores":pose_scores.tolist(),
"keypoint_scores":keypoint_scores.tolist(),
"scores": keypoint_scores.size,
"keypoint_coords":normalize_poses(keypoint_coords),
"coords": keypoint_coords.size
})
file_content.append(results)
file_content = file_content[-30:]
if cv2.waitKey(1) & keyboard.is_pressed('w'):
print('you pressed w - service it was!')
time.sleep(0.5)
path = "collected/serves/"
filename = str(slugify("s-"+str(time.time()))+".txt")
x = Thread(target=save_to_file, args=(str(path+filename),str(file_content)))
x.start()
x.join()
file_content = []
if cv2.waitKey(1) & keyboard.is_pressed('d'):
print('you pressed d - forehand it was!')
time.sleep(0.5)
path = "collected/forehand/"
filename = str(slugify("f-"+str(time.time()))+".txt")
x = Thread(target=save_to_file, args=(str(path+filename),str(file_content)))
x.start()
x.join()
file_content = []
if cv2.waitKey(1) & keyboard.is_pressed('a'):
print('you pressed a - backhand it was!')
time.sleep(0.5)
path = "collected/backhand/"
filename = str(slugify("b-"+str(time.time()))+".txt")
x = Thread(target=save_to_file, args=(str(path+filename),str(file_content)))
x.start()
x.join()
file_content = []
if cv2.waitKey(1) & keyboard.is_pressed('q'):
print('you pressed q - quitting!')
cv2.destroyAllWindows()
break
print('Average FPS: ', frame_count / (time.time() - start))
return 0
def my_function(toPrint):
print(toPrint)
def save_to_file(filename,data):
file = open(filename,'w')
file.write(data)
file.close()
def find_middle(left,right):
x = (left[0]+right[0])/2.0
y = (left[1]+right[1])/2.0
return [x,y]
def find_distance(pointA,pointB):
dist = math.sqrt((pointB[0] - pointA[0])**2 + (pointB[1] - pointA[1])**2)
return dist
def normalize_poses(poses):
leftShoulderCords = poses[0][5]
rightShoulderCords = poses[0][6]
middleShoulderPoint = find_middle(leftShoulderCords,rightShoulderCords)
leftHipCords = poses[0][11]
rightHipCords = poses[0][12]
middleHipPoint = find_middle(leftHipCords,rightHipCords)
armHipDistance = find_distance(middleHipPoint,middleShoulderPoint);
normalized = []
for pose in poses[0]:
normalized.append(
[(pose[0]-middleHipPoint[0])/armHipDistance,
(pose[1]-middleHipPoint[1])/armHipDistance]
)
return normalized
if __name__ == "__main__":
main() | [
"argparse.ArgumentParser",
"posenet.draw_skel_and_kp",
"math.sqrt",
"posenet.read_cap",
"keyboard.is_pressed",
"cv2.imshow",
"time.sleep",
"posenet.load_model",
"cv2.waitKey",
"cv2.destroyAllWindows",
"cv2.VideoCapture",
"time.time",
"tensorflow.compat.v1.Session"
] | [((241, 266), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (264, 266), False, 'import argparse\n'), ((6386, 6456), 'math.sqrt', 'math.sqrt', (['((pointB[0] - pointA[0]) ** 2 + (pointB[1] - pointA[1]) ** 2)'], {}), '((pointB[0] - pointA[0]) ** 2 + (pointB[1] - pointA[1]) ** 2)\n', (6395, 6456), False, 'import math\n'), ((983, 1005), 'tensorflow.compat.v1.Session', 'tf.compat.v1.Session', ([], {}), '()\n', (1003, 1005), True, 'import tensorflow as tf\n'), ((1050, 1086), 'posenet.load_model', 'posenet.load_model', (['args.model', 'sess'], {}), '(args.model, sess)\n', (1068, 1086), False, 'import posenet\n'), ((1371, 1382), 'time.time', 'time.time', ([], {}), '()\n', (1380, 1382), False, 'import time\n'), ((1191, 1218), 'cv2.VideoCapture', 'cv2.VideoCapture', (['args.file'], {}), '(args.file)\n', (1207, 1218), False, 'import cv2\n'), ((1251, 1280), 'cv2.VideoCapture', 'cv2.VideoCapture', (['args.cam_id'], {}), '(args.cam_id)\n', (1267, 1280), False, 'import cv2\n'), ((2583, 2670), 'posenet.read_cap', 'posenet.read_cap', (['cap'], {'scale_factor': 'args.scale_factor', 'output_stride': 'output_stride'}), '(cap, scale_factor=args.scale_factor, output_stride=\n output_stride)\n', (2599, 2670), False, 'import posenet\n'), ((3616, 3748), 'posenet.draw_skel_and_kp', 'posenet.draw_skel_and_kp', (['display_image', 'pose_scores', 'keypoint_scores', 'keypoint_coords'], {'min_pose_score': '(0.15)', 'min_part_score': '(0.15)'}), '(display_image, pose_scores, keypoint_scores,\n keypoint_coords, min_pose_score=0.15, min_part_score=0.15)\n', (3640, 3748), False, 'import posenet\n'), ((3817, 3843), 'cv2.imshow', 'cv2.imshow', (['"""posenet"""', 'img'], {}), "('posenet', img)\n", (3827, 3843), False, 'import cv2\n'), ((4456, 4470), 'cv2.waitKey', 'cv2.waitKey', (['(1)'], {}), '(1)\n', (4467, 4470), False, 'import cv2\n'), ((4473, 4497), 'keyboard.is_pressed', 'keyboard.is_pressed', (['"""w"""'], {}), "('w')\n", (4492, 4497), False, 'import keyboard\n'), ((4572, 4587), 'time.sleep', 'time.sleep', (['(0.5)'], {}), '(0.5)\n', (4582, 4587), False, 'import time\n'), ((4904, 4918), 'cv2.waitKey', 'cv2.waitKey', (['(1)'], {}), '(1)\n', (4915, 4918), False, 'import cv2\n'), ((4921, 4945), 'keyboard.is_pressed', 'keyboard.is_pressed', (['"""d"""'], {}), "('d')\n", (4940, 4945), False, 'import keyboard\n'), ((5021, 5036), 'time.sleep', 'time.sleep', (['(0.5)'], {}), '(0.5)\n', (5031, 5036), False, 'import time\n'), ((5367, 5381), 'cv2.waitKey', 'cv2.waitKey', (['(1)'], {}), '(1)\n', (5378, 5381), False, 'import cv2\n'), ((5384, 5408), 'keyboard.is_pressed', 'keyboard.is_pressed', (['"""a"""'], {}), "('a')\n", (5403, 5408), False, 'import keyboard\n'), ((5484, 5499), 'time.sleep', 'time.sleep', (['(0.5)'], {}), '(0.5)\n', (5494, 5499), False, 'import time\n'), ((5831, 5845), 'cv2.waitKey', 'cv2.waitKey', (['(1)'], {}), '(1)\n', (5842, 5845), False, 'import cv2\n'), ((5848, 5872), 'keyboard.is_pressed', 'keyboard.is_pressed', (['"""q"""'], {}), "('q')\n", (5867, 5872), False, 'import keyboard\n'), ((5941, 5964), 'cv2.destroyAllWindows', 'cv2.destroyAllWindows', ([], {}), '()\n', (5962, 5964), False, 'import cv2\n'), ((6046, 6057), 'time.time', 'time.time', ([], {}), '()\n', (6055, 6057), False, 'import time\n'), ((4018, 4029), 'time.time', 'time.time', ([], {}), '()\n', (4027, 4029), False, 'import time\n'), ((4679, 4690), 'time.time', 'time.time', ([], {}), '()\n', (4688, 4690), False, 'import time\n'), ((5130, 5141), 'time.time', 'time.time', ([], {}), '()\n', (5139, 5141), False, 'import time\n'), ((5593, 5604), 'time.time', 'time.time', ([], {}), '()\n', (5602, 5604), False, 'import time\n')] |
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
# import datetime
import random
import uuid
import mock
from openstackclient.tests.unit import utils
from otcextensions.tests.unit.osclient import test_base
from otcextensions.sdk.dcs.v1 import backup
from otcextensions.sdk.dcs.v1 import config
from otcextensions.sdk.dcs.v1 import instance
from otcextensions.sdk.dcs.v1 import restore
from otcextensions.sdk.dcs.v1 import statistic
class TestDCS(utils.TestCommand):
def setUp(self):
super(TestDCS, self).setUp()
self.app.client_manager.dcs = mock.Mock()
self.client = self.app.client_manager.dcs
self.client.get_instance = mock.Mock()
self.client.find_instance = mock.Mock()
self.client.instances = mock.Mock()
self.client.delete_instance = mock.Mock()
self.client.update_instance = mock.Mock()
self.client.create_instance = mock.Mock()
self.client.extend_instance = mock.Mock()
class FakeInstance(test_base.Fake):
"""Fake one or more Instance"""
@classmethod
def generate(cls):
object_info = {
'name': 'group-' + uuid.uuid4().hex,
'id': 'id-' + uuid.uuid4().hex,
'description': 'SOME description',
'status': random.choice(['CREATING', 'CREATEFILED',
'RUNNING', 'ERROR', 'STARTING',
'RESTARTING', 'CLOSING', 'CLOSED',
'EXTENDING']),
'engine': uuid.uuid4().hex,
'capacity': random.randint(1, 100),
'ip': uuid.uuid4().hex,
'port': random.randint(1, 65535),
'resource_spec_code': random.choice(['dcs.single_node',
'dcs.master_standby',
'dcs.cluster'
]),
'engine_version': uuid.uuid4().hex,
'internal_version': uuid.uuid4().hex,
'charging_mode': random.randint(0, 10),
'vpc_id': uuid.uuid4().hex,
'vpc_name': uuid.uuid4().hex,
'subnet_id': uuid.uuid4().hex,
'subnet_name': uuid.uuid4().hex,
'subnet_cidr': uuid.uuid4().hex,
'security_group_id': uuid.uuid4().hex,
'security_group_name': uuid.uuid4().hex,
'created_at': uuid.uuid4().hex,
'error_code': uuid.uuid4().hex,
'product_id': random.choice(['OTC_DCS_SINGLE',
'OTC_DCS_MS',
'OTC_DCS_CL']),
'available_zones': uuid.uuid4().hex,
'max_memory': random.randint(0, 10),
'used_memory': random.randint(0, 10),
'user_id': uuid.uuid4().hex,
'user_name': uuid.uuid4().hex,
'order_id': uuid.uuid4().hex,
'maintain_begin': uuid.uuid4().hex,
'maintain_end': uuid.uuid4().hex,
}
obj = instance.Instance.existing(**object_info)
return obj
class FakeStatistic(test_base.Fake):
"""Fake one or more Statistic"""
@classmethod
def generate(cls):
object_info = {
'instance_id': 'instance_id-' + uuid.uuid4().hex,
'max_memory': random.randint(1, 65535),
'used_memory': random.randint(1, 65535),
'cmd_get_count': random.randint(1, 65535),
'cmd_set_count': random.randint(1, 65535),
'used_cpu': 'cpu-' + uuid.uuid4().hex,
'input_kbps': 'input-' + uuid.uuid4().hex,
'output_kbps': 'output-' + uuid.uuid4().hex,
}
obj = statistic.Statistic.existing(**object_info)
return obj
class FakeBackup(test_base.Fake):
"""Fake one or more Backup"""
@classmethod
def generate(cls):
object_info = {
'instance_id': 'instance_id-' + uuid.uuid4().hex,
'id': 'id-' + uuid.uuid4().hex,
'size': random.randint(1, 65535),
'period': uuid.uuid4().hex,
'description': uuid.uuid4().hex,
'progress': uuid.uuid4().hex,
'created_at': uuid.uuid4().hex,
'updated_at': uuid.uuid4().hex,
'type': uuid.uuid4().hex,
'name': uuid.uuid4().hex,
'error_code': uuid.uuid4().hex,
'is_restorable': True,
}
obj = backup.Backup.existing(**object_info)
return obj
class FakeRestore(test_base.Fake):
"""Fake one or more Restore"""
@classmethod
def generate(cls):
object_info = {
'instance_id': 'instance_id-' + uuid.uuid4().hex,
'max_memory': random.randint(1, 65535),
'used_memory': random.randint(1, 65535),
'cmd_get_count': random.randint(1, 65535),
'cmd_set_count': random.randint(1, 65535),
'used_cpu': 'cpu-' + uuid.uuid4().hex,
'input_kbps': 'input-' + uuid.uuid4().hex,
'output_kbps': 'output-' + uuid.uuid4().hex
}
obj = restore.Restore.existing(**object_info)
return obj
class FakeConfig(test_base.Fake):
"""Fake one or more Config"""
@classmethod
def generate(cls):
object_info = {
'instance_id': 'instance_id-' + uuid.uuid4().hex,
'id': uuid.uuid4().hex,
'name': uuid.uuid4().hex,
'value': uuid.uuid4().hex,
'value_type': uuid.uuid4().hex,
'value_range': uuid.uuid4().hex,
'default_value': uuid.uuid4().hex,
'description': uuid.uuid4().hex
}
obj = config.Config.existing(**object_info)
return obj
| [
"otcextensions.sdk.dcs.v1.restore.Restore.existing",
"random.choice",
"otcextensions.sdk.dcs.v1.statistic.Statistic.existing",
"mock.Mock",
"otcextensions.sdk.dcs.v1.config.Config.existing",
"uuid.uuid4",
"otcextensions.sdk.dcs.v1.instance.Instance.existing",
"otcextensions.sdk.dcs.v1.backup.Backup.existing",
"random.randint"
] | [((1083, 1094), 'mock.Mock', 'mock.Mock', ([], {}), '()\n', (1092, 1094), False, 'import mock\n'), ((1181, 1192), 'mock.Mock', 'mock.Mock', ([], {}), '()\n', (1190, 1192), False, 'import mock\n'), ((1229, 1240), 'mock.Mock', 'mock.Mock', ([], {}), '()\n', (1238, 1240), False, 'import mock\n'), ((1273, 1284), 'mock.Mock', 'mock.Mock', ([], {}), '()\n', (1282, 1284), False, 'import mock\n'), ((1323, 1334), 'mock.Mock', 'mock.Mock', ([], {}), '()\n', (1332, 1334), False, 'import mock\n'), ((1373, 1384), 'mock.Mock', 'mock.Mock', ([], {}), '()\n', (1382, 1384), False, 'import mock\n'), ((1423, 1434), 'mock.Mock', 'mock.Mock', ([], {}), '()\n', (1432, 1434), False, 'import mock\n'), ((1473, 1484), 'mock.Mock', 'mock.Mock', ([], {}), '()\n', (1482, 1484), False, 'import mock\n'), ((3566, 3607), 'otcextensions.sdk.dcs.v1.instance.Instance.existing', 'instance.Instance.existing', ([], {}), '(**object_info)\n', (3592, 3607), False, 'from otcextensions.sdk.dcs.v1 import instance\n'), ((4233, 4276), 'otcextensions.sdk.dcs.v1.statistic.Statistic.existing', 'statistic.Statistic.existing', ([], {}), '(**object_info)\n', (4261, 4276), False, 'from otcextensions.sdk.dcs.v1 import statistic\n'), ((4978, 5015), 'otcextensions.sdk.dcs.v1.backup.Backup.existing', 'backup.Backup.existing', ([], {}), '(**object_info)\n', (5000, 5015), False, 'from otcextensions.sdk.dcs.v1 import backup\n'), ((5635, 5674), 'otcextensions.sdk.dcs.v1.restore.Restore.existing', 'restore.Restore.existing', ([], {}), '(**object_info)\n', (5659, 5674), False, 'from otcextensions.sdk.dcs.v1 import restore\n'), ((6208, 6245), 'otcextensions.sdk.dcs.v1.config.Config.existing', 'config.Config.existing', ([], {}), '(**object_info)\n', (6230, 6245), False, 'from otcextensions.sdk.dcs.v1 import config\n'), ((1786, 1912), 'random.choice', 'random.choice', (["['CREATING', 'CREATEFILED', 'RUNNING', 'ERROR', 'STARTING', 'RESTARTING',\n 'CLOSING', 'CLOSED', 'EXTENDING']"], {}), "(['CREATING', 'CREATEFILED', 'RUNNING', 'ERROR', 'STARTING',\n 'RESTARTING', 'CLOSING', 'CLOSED', 'EXTENDING'])\n", (1799, 1912), False, 'import random\n'), ((2085, 2107), 'random.randint', 'random.randint', (['(1)', '(100)'], {}), '(1, 100)\n', (2099, 2107), False, 'import random\n'), ((2165, 2189), 'random.randint', 'random.randint', (['(1)', '(65535)'], {}), '(1, 65535)\n', (2179, 2189), False, 'import random\n'), ((2225, 2296), 'random.choice', 'random.choice', (["['dcs.single_node', 'dcs.master_standby', 'dcs.cluster']"], {}), "(['dcs.single_node', 'dcs.master_standby', 'dcs.cluster'])\n", (2238, 2296), False, 'import random\n'), ((2573, 2594), 'random.randint', 'random.randint', (['(0)', '(10)'], {}), '(0, 10)\n', (2587, 2594), False, 'import random\n'), ((3029, 3090), 'random.choice', 'random.choice', (["['OTC_DCS_SINGLE', 'OTC_DCS_MS', 'OTC_DCS_CL']"], {}), "(['OTC_DCS_SINGLE', 'OTC_DCS_MS', 'OTC_DCS_CL'])\n", (3042, 3090), False, 'import random\n'), ((3249, 3270), 'random.randint', 'random.randint', (['(0)', '(10)'], {}), '(0, 10)\n', (3263, 3270), False, 'import random\n'), ((3299, 3320), 'random.randint', 'random.randint', (['(0)', '(10)'], {}), '(0, 10)\n', (3313, 3320), False, 'import random\n'), ((3856, 3880), 'random.randint', 'random.randint', (['(1)', '(65535)'], {}), '(1, 65535)\n', (3870, 3880), False, 'import random\n'), ((3909, 3933), 'random.randint', 'random.randint', (['(1)', '(65535)'], {}), '(1, 65535)\n', (3923, 3933), False, 'import random\n'), ((3964, 3988), 'random.randint', 'random.randint', (['(1)', '(65535)'], {}), '(1, 65535)\n', (3978, 3988), False, 'import random\n'), ((4019, 4043), 'random.randint', 'random.randint', (['(1)', '(65535)'], {}), '(1, 65535)\n', (4033, 4043), False, 'import random\n'), ((4557, 4581), 'random.randint', 'random.randint', (['(1)', '(65535)'], {}), '(1, 65535)\n', (4571, 4581), False, 'import random\n'), ((5260, 5284), 'random.randint', 'random.randint', (['(1)', '(65535)'], {}), '(1, 65535)\n', (5274, 5284), False, 'import random\n'), ((5313, 5337), 'random.randint', 'random.randint', (['(1)', '(65535)'], {}), '(1, 65535)\n', (5327, 5337), False, 'import random\n'), ((5368, 5392), 'random.randint', 'random.randint', (['(1)', '(65535)'], {}), '(1, 65535)\n', (5382, 5392), False, 'import random\n'), ((5423, 5447), 'random.randint', 'random.randint', (['(1)', '(65535)'], {}), '(1, 65535)\n', (5437, 5447), False, 'import random\n'), ((2043, 2055), 'uuid.uuid4', 'uuid.uuid4', ([], {}), '()\n', (2053, 2055), False, 'import uuid\n'), ((2127, 2139), 'uuid.uuid4', 'uuid.uuid4', ([], {}), '()\n', (2137, 2139), False, 'import uuid\n'), ((2476, 2488), 'uuid.uuid4', 'uuid.uuid4', ([], {}), '()\n', (2486, 2488), False, 'import uuid\n'), ((2526, 2538), 'uuid.uuid4', 'uuid.uuid4', ([], {}), '()\n', (2536, 2538), False, 'import uuid\n'), ((2618, 2630), 'uuid.uuid4', 'uuid.uuid4', ([], {}), '()\n', (2628, 2630), False, 'import uuid\n'), ((2660, 2672), 'uuid.uuid4', 'uuid.uuid4', ([], {}), '()\n', (2670, 2672), False, 'import uuid\n'), ((2703, 2715), 'uuid.uuid4', 'uuid.uuid4', ([], {}), '()\n', (2713, 2715), False, 'import uuid\n'), ((2748, 2760), 'uuid.uuid4', 'uuid.uuid4', ([], {}), '()\n', (2758, 2760), False, 'import uuid\n'), ((2793, 2805), 'uuid.uuid4', 'uuid.uuid4', ([], {}), '()\n', (2803, 2805), False, 'import uuid\n'), ((2844, 2856), 'uuid.uuid4', 'uuid.uuid4', ([], {}), '()\n', (2854, 2856), False, 'import uuid\n'), ((2897, 2909), 'uuid.uuid4', 'uuid.uuid4', ([], {}), '()\n', (2907, 2909), False, 'import uuid\n'), ((2941, 2953), 'uuid.uuid4', 'uuid.uuid4', ([], {}), '()\n', (2951, 2953), False, 'import uuid\n'), ((2985, 2997), 'uuid.uuid4', 'uuid.uuid4', ([], {}), '()\n', (2995, 2997), False, 'import uuid\n'), ((3205, 3217), 'uuid.uuid4', 'uuid.uuid4', ([], {}), '()\n', (3215, 3217), False, 'import uuid\n'), ((3345, 3357), 'uuid.uuid4', 'uuid.uuid4', ([], {}), '()\n', (3355, 3357), False, 'import uuid\n'), ((3388, 3400), 'uuid.uuid4', 'uuid.uuid4', ([], {}), '()\n', (3398, 3400), False, 'import uuid\n'), ((3430, 3442), 'uuid.uuid4', 'uuid.uuid4', ([], {}), '()\n', (3440, 3442), False, 'import uuid\n'), ((3478, 3490), 'uuid.uuid4', 'uuid.uuid4', ([], {}), '()\n', (3488, 3490), False, 'import uuid\n'), ((3524, 3536), 'uuid.uuid4', 'uuid.uuid4', ([], {}), '()\n', (3534, 3536), False, 'import uuid\n'), ((4605, 4617), 'uuid.uuid4', 'uuid.uuid4', ([], {}), '()\n', (4615, 4617), False, 'import uuid\n'), ((4650, 4662), 'uuid.uuid4', 'uuid.uuid4', ([], {}), '()\n', (4660, 4662), False, 'import uuid\n'), ((4692, 4704), 'uuid.uuid4', 'uuid.uuid4', ([], {}), '()\n', (4702, 4704), False, 'import uuid\n'), ((4736, 4748), 'uuid.uuid4', 'uuid.uuid4', ([], {}), '()\n', (4746, 4748), False, 'import uuid\n'), ((4780, 4792), 'uuid.uuid4', 'uuid.uuid4', ([], {}), '()\n', (4790, 4792), False, 'import uuid\n'), ((4818, 4830), 'uuid.uuid4', 'uuid.uuid4', ([], {}), '()\n', (4828, 4830), False, 'import uuid\n'), ((4856, 4868), 'uuid.uuid4', 'uuid.uuid4', ([], {}), '()\n', (4866, 4868), False, 'import uuid\n'), ((4900, 4912), 'uuid.uuid4', 'uuid.uuid4', ([], {}), '()\n', (4910, 4912), False, 'import uuid\n'), ((5909, 5921), 'uuid.uuid4', 'uuid.uuid4', ([], {}), '()\n', (5919, 5921), False, 'import uuid\n'), ((5947, 5959), 'uuid.uuid4', 'uuid.uuid4', ([], {}), '()\n', (5957, 5959), False, 'import uuid\n'), ((5986, 5998), 'uuid.uuid4', 'uuid.uuid4', ([], {}), '()\n', (5996, 5998), False, 'import uuid\n'), ((6030, 6042), 'uuid.uuid4', 'uuid.uuid4', ([], {}), '()\n', (6040, 6042), False, 'import uuid\n'), ((6075, 6087), 'uuid.uuid4', 'uuid.uuid4', ([], {}), '()\n', (6085, 6087), False, 'import uuid\n'), ((6122, 6134), 'uuid.uuid4', 'uuid.uuid4', ([], {}), '()\n', (6132, 6134), False, 'import uuid\n'), ((6167, 6179), 'uuid.uuid4', 'uuid.uuid4', ([], {}), '()\n', (6177, 6179), False, 'import uuid\n'), ((1655, 1667), 'uuid.uuid4', 'uuid.uuid4', ([], {}), '()\n', (1665, 1667), False, 'import uuid\n'), ((1699, 1711), 'uuid.uuid4', 'uuid.uuid4', ([], {}), '()\n', (1709, 1711), False, 'import uuid\n'), ((3812, 3824), 'uuid.uuid4', 'uuid.uuid4', ([], {}), '()\n', (3822, 3824), False, 'import uuid\n'), ((4078, 4090), 'uuid.uuid4', 'uuid.uuid4', ([], {}), '()\n', (4088, 4090), False, 'import uuid\n'), ((4133, 4145), 'uuid.uuid4', 'uuid.uuid4', ([], {}), '()\n', (4143, 4145), False, 'import uuid\n'), ((4190, 4202), 'uuid.uuid4', 'uuid.uuid4', ([], {}), '()\n', (4200, 4202), False, 'import uuid\n'), ((4475, 4487), 'uuid.uuid4', 'uuid.uuid4', ([], {}), '()\n', (4485, 4487), False, 'import uuid\n'), ((4519, 4531), 'uuid.uuid4', 'uuid.uuid4', ([], {}), '()\n', (4529, 4531), False, 'import uuid\n'), ((5216, 5228), 'uuid.uuid4', 'uuid.uuid4', ([], {}), '()\n', (5226, 5228), False, 'import uuid\n'), ((5482, 5494), 'uuid.uuid4', 'uuid.uuid4', ([], {}), '()\n', (5492, 5494), False, 'import uuid\n'), ((5537, 5549), 'uuid.uuid4', 'uuid.uuid4', ([], {}), '()\n', (5547, 5549), False, 'import uuid\n'), ((5594, 5606), 'uuid.uuid4', 'uuid.uuid4', ([], {}), '()\n', (5604, 5606), False, 'import uuid\n'), ((5873, 5885), 'uuid.uuid4', 'uuid.uuid4', ([], {}), '()\n', (5883, 5885), False, 'import uuid\n')] |
# -*- coding:utf-8 -*-
# Copyright (c) 2020 Huawei Technologies Co.,Ltd.
#
# openGauss is licensed under Mulan PSL v2.
# You can use this software according to the terms
# and conditions of the Mulan PSL v2.
# You may obtain a copy of Mulan PSL v2 at:
#
# http://license.coscl.org.cn/MulanPSL2
#
# THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS,
# WITHOUT WARRANTIES OF ANY KIND,
# EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT,
# MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE.
# See the Mulan PSL v2 for more details.
# ----------------------------------------------------------------------------
import subprocess
from gspylib.inspection.common.CheckItem import BaseItem
from gspylib.inspection.common.CheckResult import ResultStatus
class CheckPortConflict(BaseItem):
def __init__(self):
super(CheckPortConflict, self).__init__(self.__class__.__name__)
def doCheck(self):
cmd = "netstat -apn | grep 'tcp' " \
"| grep 'LISTEN'| awk -F ' ' '$4 ~ /25[0-9][0-9][0-9]/'"
(status, output) = subprocess.getstatusoutput(cmd)
if (status != 0):
self.result.rst = ResultStatus.NG
self.result.val = "Failed to excuted commands: %s\noutput:%s " % (
cmd, output)
else:
if (output.strip() == ""):
self.result.rst = ResultStatus.OK
self.result.val = "ports is normal"
else:
self.result.rst = ResultStatus.NG
self.result.val = output
self.result.raw = "checked ports: (25000-26000)\n" + output
def doSet(self):
pidList = []
cmd = "netstat -apn| grep 'tcp'" \
"| grep 'LISTEN'| awk -F ' ' '$4 ~ /25[0-9][0-9][0-9]/'" \
"| awk '{print $NF}'"
(status, output) = subprocess.getstatusoutput(cmd)
if (status == 0 and output != ""):
for line in output.split('\n'):
if (line.find('/') > 0):
pid = line.split('/')[0].strip()
if (pid.isdigit()):
pidList.append(pid)
if (pidList):
cmd = "kill -9"
for pid in pidList:
cmd += " %s" % pid
(status, output) = subprocess.getstatusoutput(cmd)
if (status != ""):
self.result.val = "Failed to kill process.Error:%s\n" % output
self.result.val += "The cmd is %s " % cmd
else:
self.result.val = \
"Successfully killed the process with occupies the port.\n"
| [
"subprocess.getstatusoutput"
] | [((1074, 1105), 'subprocess.getstatusoutput', 'subprocess.getstatusoutput', (['cmd'], {}), '(cmd)\n', (1100, 1105), False, 'import subprocess\n'), ((1848, 1879), 'subprocess.getstatusoutput', 'subprocess.getstatusoutput', (['cmd'], {}), '(cmd)\n', (1874, 1879), False, 'import subprocess\n'), ((2293, 2324), 'subprocess.getstatusoutput', 'subprocess.getstatusoutput', (['cmd'], {}), '(cmd)\n', (2319, 2324), False, 'import subprocess\n')] |
import subprocess
from .Genome_fasta import get_fasta
import matplotlib
matplotlib.use('Agg')
from matplotlib import pyplot as plt
import numpy as np
import pysam
def run(parser):
args = parser.parse_args()
bases,chrs = get_fasta(args.genome)
l={}
for c in chrs:
l[c]=len(bases[c])
chrs = set(chrs)
#p = subprocess.Popen('bamToBed -i '+args.bamfile,shell=True,stdout=subprocess.PIPE,stderr=subprocess.PIPE)
reads_num=0
reads_cg_num=[0,0,0] #CG,cg,Cg
cgnum_per_read=[]
with pysam.AlignmentFile(args.bamfile) as f:
for line in f:
#t = line.decode('utf-8').strip().split()
chr = line.reference_name#t[0]
start= line.reference_start
end= line.reference_end
strand= not line.is_reverse # True +strand; False -strand
if not chr in chrs: continue
end=min(end+1,l[chr])
reads_num+=1
if strand:#=='+':
cg=[bases[chr].count('CG',start,end)+bases[chr].count('Cg',start,end),bases[chr].count('cG',start,end)+bases[chr].count('cg',start,end)]
else:
cg=[bases[chr].count('GC',start,end)+bases[chr].count('gC',start,end),bases[chr].count('Gc',start,end)+bases[chr].count('gc',start,end)]
#We need to consider strand specific situation.
#'+' strand we have CG but '-' we should count 'GC'.
#print cg
# for i in range(1,ls):
# r2=read[i]
# r1=read[i-1]
# if 'G'==r2 or 'g'==r2:
# if 'C'==r1: cg[0]+=1
# if 'c'==r1: cg[1]+=1
#count = int(cg[0]>0)+int(cg[1]>0)
if cg[0]+cg[1]==0: continue
#print cg
cgnum_per_read.append(sum(cg))
if cg[0]>0 and cg[1]>0:
reads_cg_num[2]+=1
continue
if cg[0]>0:
reads_cg_num[0]+=1
else:
reads_cg_num[1]+=1
#print reads_cg_num
#print reads_num
plt.figure()
plt.subplot(211)
labels = ['noCG','NonRepeat CG','Repeat cg','CGcg mix']
colors = ['r','b','g','y']
explode=(0.05,0,0,0)
sizes=[reads_num-sum(reads_cg_num)]+reads_cg_num
patches,l_text,p_text = plt.pie(sizes,explode=explode,labels=labels,colors=colors, labeldistance = 1.1,autopct = '%3.1f%%',shadow = False, startangle = 90,pctdistance = 0.6)
plt.axis('equal')
#plt.legend(loc=2,bbox_to_anchor=(0, 0))
ax=plt.subplot(212)
t=np.zeros(20)
for num in cgnum_per_read:
t[min(num-1,19)]+=1
labels = list(map(str,np.arange(1,20)))+['20+']
#print(t)
t = (np.array(t).astype(float)/sum(reads_cg_num))*100
plt.bar(np.arange(20),t)
ax.set_xticks(np.arange(20))
ax.set_xticklabels(labels)
ax.set_ylabel('Percentage of reads including CG')
ax.set_xlabel('CG number per read')
plt.text(4,max(t)+4,'All reads including CG site: '+str(sum(reads_cg_num)))
#print args.output+'.pdf'
plt.savefig(args.output+'.pdf')
if __name__=="__main__":
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('-b','--bamfile',help="bam file name", metavar="FILE")
parser.add_argument('-g','--genome',help="Genome fasta file path")
parser.add_argument('-o','--output',help="pie figure's filename")
run(parser)
| [
"matplotlib.pyplot.savefig",
"argparse.ArgumentParser",
"matplotlib.use",
"matplotlib.pyplot.pie",
"pysam.AlignmentFile",
"numpy.array",
"matplotlib.pyplot.figure",
"numpy.zeros",
"matplotlib.pyplot.axis",
"matplotlib.pyplot.subplot",
"numpy.arange"
] | [((72, 93), 'matplotlib.use', 'matplotlib.use', (['"""Agg"""'], {}), "('Agg')\n", (86, 93), False, 'import matplotlib\n'), ((1984, 1996), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (1994, 1996), True, 'from matplotlib import pyplot as plt\n'), ((2001, 2017), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(211)'], {}), '(211)\n', (2012, 2017), True, 'from matplotlib import pyplot as plt\n'), ((2215, 2365), 'matplotlib.pyplot.pie', 'plt.pie', (['sizes'], {'explode': 'explode', 'labels': 'labels', 'colors': 'colors', 'labeldistance': '(1.1)', 'autopct': '"""%3.1f%%"""', 'shadow': '(False)', 'startangle': '(90)', 'pctdistance': '(0.6)'}), "(sizes, explode=explode, labels=labels, colors=colors, labeldistance\n =1.1, autopct='%3.1f%%', shadow=False, startangle=90, pctdistance=0.6)\n", (2222, 2365), True, 'from matplotlib import pyplot as plt\n'), ((2370, 2387), 'matplotlib.pyplot.axis', 'plt.axis', (['"""equal"""'], {}), "('equal')\n", (2378, 2387), True, 'from matplotlib import pyplot as plt\n'), ((2440, 2456), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(212)'], {}), '(212)\n', (2451, 2456), True, 'from matplotlib import pyplot as plt\n'), ((2463, 2475), 'numpy.zeros', 'np.zeros', (['(20)'], {}), '(20)\n', (2471, 2475), True, 'import numpy as np\n'), ((2960, 2993), 'matplotlib.pyplot.savefig', 'plt.savefig', (["(args.output + '.pdf')"], {}), "(args.output + '.pdf')\n", (2971, 2993), True, 'from matplotlib import pyplot as plt\n'), ((3051, 3076), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (3074, 3076), False, 'import argparse\n'), ((523, 556), 'pysam.AlignmentFile', 'pysam.AlignmentFile', (['args.bamfile'], {}), '(args.bamfile)\n', (542, 556), False, 'import pysam\n'), ((2671, 2684), 'numpy.arange', 'np.arange', (['(20)'], {}), '(20)\n', (2680, 2684), True, 'import numpy as np\n'), ((2706, 2719), 'numpy.arange', 'np.arange', (['(20)'], {}), '(20)\n', (2715, 2719), True, 'import numpy as np\n'), ((2561, 2577), 'numpy.arange', 'np.arange', (['(1)', '(20)'], {}), '(1, 20)\n', (2570, 2577), True, 'import numpy as np\n'), ((2610, 2621), 'numpy.array', 'np.array', (['t'], {}), '(t)\n', (2618, 2621), True, 'import numpy as np\n')] |
#!/usr/bin/env python3
# Copyright 2016 <NAME>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""A test for what happens when two waveforms are averaged together."""
from potty_oh import common
from potty_oh.wav_file import wav_file_context
from potty_oh.waveform import mix_down
from potty_oh.signal_generator import Generator
from potty_oh.music.pitch import Key
from potty_oh.music.interval import Interval
def main():
parser = common.get_cmd_line_parser(description=__doc__)
common.ParserArguments.filename(parser)
common.ParserArguments.length(parser)
common.ParserArguments.framerate(parser)
common.ParserArguments.set_defaults(parser, type='constant',
length=2.0)
args = parser.parse_args()
common.defaults.framerate = args.framerate
sg = Generator(length=args.length, verbose=args.debug)
key = Key()
unison = sg.sin_constant(key.interval(Interval.unison))
maj_third = sg.sin_constant(key.interval(Interval.major_third))
min_third = sg.sin_constant(key.interval(Interval.minor_third))
fifth = sg.sin_constant(key.interval(Interval.fifth))
powerchord = unison.mix_down(fifth)
maj_triad = powerchord.mix_down(maj_third)
min_triad = mix_down(powerchord, min_third)
with wav_file_context(args.filename) as fout:
fout.write_frames(powerchord.frames)
fout.write_frames(maj_triad.frames)
fout.write_frames(min_triad.frames)
return 0
if __name__ == "__main__":
common.call_main(main)
| [
"potty_oh.common.get_cmd_line_parser",
"potty_oh.common.ParserArguments.length",
"potty_oh.common.ParserArguments.filename",
"potty_oh.waveform.mix_down",
"potty_oh.common.call_main",
"potty_oh.common.ParserArguments.set_defaults",
"potty_oh.signal_generator.Generator",
"potty_oh.common.ParserArguments.framerate",
"potty_oh.music.pitch.Key",
"potty_oh.wav_file.wav_file_context"
] | [((958, 1005), 'potty_oh.common.get_cmd_line_parser', 'common.get_cmd_line_parser', ([], {'description': '__doc__'}), '(description=__doc__)\n', (984, 1005), False, 'from potty_oh import common\n'), ((1010, 1049), 'potty_oh.common.ParserArguments.filename', 'common.ParserArguments.filename', (['parser'], {}), '(parser)\n', (1041, 1049), False, 'from potty_oh import common\n'), ((1054, 1091), 'potty_oh.common.ParserArguments.length', 'common.ParserArguments.length', (['parser'], {}), '(parser)\n', (1083, 1091), False, 'from potty_oh import common\n'), ((1096, 1136), 'potty_oh.common.ParserArguments.framerate', 'common.ParserArguments.framerate', (['parser'], {}), '(parser)\n', (1128, 1136), False, 'from potty_oh import common\n'), ((1141, 1213), 'potty_oh.common.ParserArguments.set_defaults', 'common.ParserArguments.set_defaults', (['parser'], {'type': '"""constant"""', 'length': '(2.0)'}), "(parser, type='constant', length=2.0)\n", (1176, 1213), False, 'from potty_oh import common\n'), ((1342, 1391), 'potty_oh.signal_generator.Generator', 'Generator', ([], {'length': 'args.length', 'verbose': 'args.debug'}), '(length=args.length, verbose=args.debug)\n', (1351, 1391), False, 'from potty_oh.signal_generator import Generator\n'), ((1403, 1408), 'potty_oh.music.pitch.Key', 'Key', ([], {}), '()\n', (1406, 1408), False, 'from potty_oh.music.pitch import Key\n'), ((1768, 1799), 'potty_oh.waveform.mix_down', 'mix_down', (['powerchord', 'min_third'], {}), '(powerchord, min_third)\n', (1776, 1799), False, 'from potty_oh.waveform import mix_down\n'), ((2031, 2053), 'potty_oh.common.call_main', 'common.call_main', (['main'], {}), '(main)\n', (2047, 2053), False, 'from potty_oh import common\n'), ((1810, 1841), 'potty_oh.wav_file.wav_file_context', 'wav_file_context', (['args.filename'], {}), '(args.filename)\n', (1826, 1841), False, 'from potty_oh.wav_file import wav_file_context\n')] |
from dataclasses import dataclass
from hrepr import H
from hrepr import hrepr as real_hrepr
from hrepr.h import styledir
from .common import one_test_per_assert
css_hrepr = open(f"{styledir}/hrepr.css", encoding="utf-8").read()
hrepr = real_hrepr.variant(fill_resources=False)
@dataclass
class Point:
x: int
y: int
class Opaque:
pass
def hshort(x, **kw):
return hrepr(x, max_depth=0, **kw)
@one_test_per_assert
def test_singletons():
assert hrepr(True) == H.span["hreprv-True"]("True")
assert hrepr(False) == H.span["hreprv-False"]("False")
assert hrepr(None) == H.span["hreprv-None"]("None")
@one_test_per_assert
def test_numbers():
assert hrepr(123) == H.span["hreprt-int"]("123")
assert hrepr(1.25) == H.span["hreprt-float"]("1.25")
@one_test_per_assert
def test_string():
assert hshort("hello") == H.span["hreprt-str"]("hello")
assert hrepr("3 spaces") == H.span["hreprt-str"]("3 spaces")
assert hrepr("hello this is a bit long") == H.span["hreprt-str"](
"hello this is a bit long"
)
assert hshort("hello this is a bit long") == H.span["hreprt-str"](
"hello this is a b..."
)
assert hshort("hello this is a bit long", string_cutoff=10) == H.span[
"hreprt-str"
]("hello t...")
assert hshort("hello this is a bit long", string_cutoff=5) == H.span[
"hreprt-str"
]("he...")
assert hshort("hello this is a bit long", string_cutoff=10000) == H.span[
"hreprt-str"
]("hello this is a bit long")
@one_test_per_assert
def test_bytes():
assert hrepr(b"hello") == H.span["hreprt-bytes"]("68656c6c6f")
assert hshort(b"hello") == H.span["hreprt-bytes"]("68656c6c6f")
assert hrepr(b"hello this is a bit long") == H.span["hreprt-bytes"](
"68656c6c6f2074686973206973206120626974206c6f6e67"
)
assert hshort(b"hello this is a bit long") == H.span["hreprt-bytes"](
"68656c6c6f2074686..."
)
def test_function():
assert hrepr(Opaque) == H.span["hreprk-class"](
H.span["hrepr-defn-key"]("class"),
" ",
H.span["hrepr-defn-name"]("Opaque"),
)
def test_structures():
for typ, o, c in (
(tuple, "(", ")"),
(list, "[", "]"),
(set, "{", "}"),
(frozenset, "{", "}"),
):
clsname = typ.__name__
assert hrepr(typ((1, 2))) == H.div[
f"hreprt-{clsname}", "hrepr-bracketed"
](
H.div["hrepr-open"](o),
H.div["hreprl-h", "hrepr-body"](
H.div(H.span["hreprt-int"]("1")),
H.div(H.span["hreprt-int"]("2")),
),
H.div["hrepr-close"](c),
)
def test_short_structures():
for val, o, c in (
((1, 2), "(", ")"),
([1, 2], "[", "]"),
({1, 2}, "{", "}"),
(frozenset({1, 2}), "{", "}"),
({"x": 1, "y": 2}, "{", "}"),
):
clsname = type(val).__name__
assert hrepr(val, max_depth=0) == H.div[
f"hreprt-{clsname}", "hrepr-bracketed"
](
H.div["hrepr-open"](o),
H.div["hreprl-s", "hrepr-body"](H.div("...")),
H.div["hrepr-close"](c),
)
def test_dict():
pt = {"x": 1, "y": 2}
assert hrepr(pt) == H.div["hreprt-dict", "hrepr-bracketed"](
H.div["hrepr-open"]("{"),
H.table["hrepr-body"](
H.tr(
H.td(H.span["hreprt-str"]("x")),
H.td["hrepr-delim"](": "),
H.td(H.span["hreprt-int"]("1")),
),
H.tr(
H.td(H.span["hreprt-str"]("y")),
H.td["hrepr-delim"](": "),
H.td(H.span["hreprt-int"]("2")),
),
),
H.div["hrepr-close"]("}"),
)
def test_dataclass():
pt = Point(1, 2)
assert hrepr(pt) == H.div["hreprt-Point", "hrepr-instance", "hreprl-v"](
H.div["hrepr-title"]("Point"),
H.table["hrepr-body"](
H.tr(
H.td(H.span["hreprt-symbol"]("x")),
H.td["hrepr-delim"]("="),
H.td(H.span["hreprt-int"]("1")),
),
H.tr(
H.td(H.span["hreprt-symbol"]("y")),
H.td["hrepr-delim"]("="),
H.td(H.span["hreprt-int"]("2")),
),
),
)
assert hrepr(pt, max_depth=0) == H.div[
"hreprt-Point", "hrepr-instance", "hreprl-s"
](
H.div["hrepr-title"]("Point"),
H.div["hreprl-s", "hrepr-body"](H.div("...")),
)
def test_tag():
tg = H.span["hello"](1, 2, H.b("there"))
assert hrepr(tg) == tg
def test_multiref():
li = [1, 2]
lili = [li, li]
assert hrepr(lili) == H.div["hreprt-list", "hrepr-bracketed"](
H.div["hrepr-open"]("["),
H.div["hreprl-h", "hrepr-body"](
H.div(
H.div["hrepr-refbox"](
H.span["hrepr-ref"]("#", 1, "="),
H.div["hreprt-list", "hrepr-bracketed"](
H.div["hrepr-open"]("["),
H.div["hreprl-h", "hrepr-body"](
H.div(H.span["hreprt-int"]("1")),
H.div(H.span["hreprt-int"]("2")),
),
H.div["hrepr-close"]("]"),
),
)
),
H.div(
H.div["hrepr-refbox"](
H.span["hrepr-ref"]("#", 1, "="),
H.div["hreprt-list", "hrepr-bracketed"](
H.div["hrepr-open"]("["),
H.div["hreprl-s", "hrepr-body"](H.div("..."),),
H.div["hrepr-close"]("]"),
),
)
),
),
H.div["hrepr-close"]("]"),
)
assert hrepr(lili, shortrefs=True) == H.div[
"hreprt-list", "hrepr-bracketed"
](
H.div["hrepr-open"]("["),
H.div["hreprl-h", "hrepr-body"](
H.div(
H.div["hrepr-refbox"](
H.span["hrepr-ref"]("#", 1, "="),
H.div["hreprt-list", "hrepr-bracketed"](
H.div["hrepr-open"]("["),
H.div["hreprl-h", "hrepr-body"](
H.div(H.span["hreprt-int"]("1")),
H.div(H.span["hreprt-int"]("2")),
),
H.div["hrepr-close"]("]"),
),
)
),
H.div(H.span["hrepr-ref"]("#", 1)),
),
H.div["hrepr-close"]("]"),
)
def test_recursive():
li = [1]
li.append(li)
assert hrepr(li) == H.div["hrepr-refbox"](
H.span["hrepr-ref"]("#", 1, "="),
H.div["hreprt-list", "hrepr-bracketed"](
H.div["hrepr-open"]("["),
H.div["hreprl-h", "hrepr-body"](
H.div(H.span["hreprt-int"]("1")),
H.div(
H.div["hrepr-refbox"](
H.span["hrepr-ref"]("⟳", 1, "="),
H.div["hreprt-list", "hrepr-bracketed"](
H.div["hrepr-open"]("["),
H.div["hreprl-s", "hrepr-body"](H.div("..."),),
H.div["hrepr-close"]("]"),
),
)
),
),
H.div["hrepr-close"]("]"),
),
)
assert hrepr(li, shortrefs=True) == H.div["hrepr-refbox"](
H.span["hrepr-ref"]("#", 1, "="),
H.div["hreprt-list", "hrepr-bracketed"](
H.div["hrepr-open"]("["),
H.div["hreprl-h", "hrepr-body"](
H.div(H.span["hreprt-int"]("1")),
H.div(H.span["hrepr-ref"]("⟳", 1)),
),
H.div["hrepr-close"]("]"),
),
)
def test_unsupported():
assert hshort(Opaque()) == H.span["hreprt-Opaque"](
"<", "tests.test_hrepr.Opaque", ">"
)
def test_as_page():
utf8 = H.meta(
{"http-equiv": "Content-type"}, content="text/html", charset="UTF-8"
)
assert real_hrepr.page(1) == H.inline(
H.raw("<!DOCTYPE html>"),
H.html(H.head(utf8, H.style(css_hrepr)), H.body(real_hrepr(1)),),
)
def test_hrepr_multiarg():
assert hrepr(1, 2) == H.inline(
H.span["hreprt-int"]("1"), H.span["hreprt-int"]("2"),
)
def test_preprocess():
assert hrepr(1, preprocess=lambda x, hrepr: x + 1) == H.span["hreprt-int"](
"2"
)
def test_postprocess():
assert hrepr(1, postprocess=lambda x, obj, hrepr: x["newclass"]) == H.span[
"newclass", "hreprt-int"
]("1")
| [
"hrepr.H.raw",
"hrepr.H.div",
"hrepr.hrepr",
"hrepr.H.meta",
"hrepr.hrepr.page",
"hrepr.H.b",
"hrepr.hrepr.variant",
"hrepr.H.style"
] | [((239, 279), 'hrepr.hrepr.variant', 'real_hrepr.variant', ([], {'fill_resources': '(False)'}), '(fill_resources=False)\n', (257, 279), True, 'from hrepr import hrepr as real_hrepr\n'), ((8050, 8126), 'hrepr.H.meta', 'H.meta', (["{'http-equiv': 'Content-type'}"], {'content': '"""text/html"""', 'charset': '"""UTF-8"""'}), "({'http-equiv': 'Content-type'}, content='text/html', charset='UTF-8')\n", (8056, 8126), False, 'from hrepr import H\n'), ((4588, 4600), 'hrepr.H.b', 'H.b', (['"""there"""'], {}), "('there')\n", (4591, 4600), False, 'from hrepr import H\n'), ((8152, 8170), 'hrepr.hrepr.page', 'real_hrepr.page', (['(1)'], {}), '(1)\n', (8167, 8170), True, 'from hrepr import hrepr as real_hrepr\n'), ((8192, 8216), 'hrepr.H.raw', 'H.raw', (['"""<!DOCTYPE html>"""'], {}), "('<!DOCTYPE html>')\n", (8197, 8216), False, 'from hrepr import H\n'), ((4518, 4530), 'hrepr.H.div', 'H.div', (['"""..."""'], {}), "('...')\n", (4523, 4530), False, 'from hrepr import H\n'), ((3135, 3147), 'hrepr.H.div', 'H.div', (['"""..."""'], {}), "('...')\n", (3140, 3147), False, 'from hrepr import H\n'), ((8246, 8264), 'hrepr.H.style', 'H.style', (['css_hrepr'], {}), '(css_hrepr)\n', (8253, 8264), False, 'from hrepr import H\n'), ((8274, 8287), 'hrepr.hrepr', 'real_hrepr', (['(1)'], {}), '(1)\n', (8284, 8287), True, 'from hrepr import hrepr as real_hrepr\n'), ((5648, 5660), 'hrepr.H.div', 'H.div', (['"""..."""'], {}), "('...')\n", (5653, 5660), False, 'from hrepr import H\n'), ((7264, 7276), 'hrepr.H.div', 'H.div', (['"""..."""'], {}), "('...')\n", (7269, 7276), False, 'from hrepr import H\n')] |
import numpy as np
from unittest import TestCase
import numpy.testing as npt
from distancematrix.util import diag_indices_of
from distancematrix.consumer.distance_matrix import DistanceMatrix
class TestContextualMatrixProfile(TestCase):
def setUp(self):
self.dist_matrix = np.array([
[8.67, 1.10, 1.77, 1.26, 1.91, 4.29, 6.32, 4.24, 4.64, 5.06, 6.41, 4.07, 4.67, 9.32, 5.09],
[4.33, 4.99, 0.14, 2.79, 2.10, 6.26, 9.40, 4.14, 5.53, 4.26, 8.21, 5.91, 6.83, 9.26, 6.19],
[0.16, 9.05, 1.35, 4.78, 7.01, 4.36, 5.24, 8.81, 7.90, 5.84, 8.90, 7.88, 3.37, 4.70, 6.94],
[0.94, 8.70, 3.87, 6.29, 0.32, 1.79, 5.80, 2.61, 1.43, 6.32, 1.62, 0.20, 2.28, 7.11, 2.15],
[9.90, 4.51, 2.11, 2.83, 5.52, 8.55, 6.90, 0.24, 1.58, 4.26, 8.75, 3.71, 9.93, 8.33, 0.38],
[7.30, 5.84, 9.63, 1.95, 3.76, 3.61, 9.42, 5.56, 5.09, 7.07, 1.90, 4.78, 1.06, 0.69, 3.67],
[2.17, 8.37, 3.99, 4.28, 4.37, 2.86, 8.61, 3.39, 8.37, 6.95, 6.57, 1.79, 7.40, 4.41, 7.64],
[6.26, 0.29, 6.44, 8.84, 1.24, 2.52, 6.25, 3.07, 5.55, 3.19, 8.16, 5.32, 9.01, 0.39, 9.],
[4.67, 8.88, 3.05, 3.06, 2.36, 8.34, 4.91, 5.46, 9.25, 9.78, 0.03, 5.64, 5.10, 3.58, 6.92],
[1.01, 0.91, 6.28, 7.79, 0.68, 5.50, 6.72, 5.11, 0.80, 9.30, 9.77, 4.71, 3.26, 7.29, 6.26]])
def mock_initialise(self, dm):
dm.initialise(1, self.dist_matrix.shape[0], self.dist_matrix.shape[1])
def test_process_diagonal(self):
dm = DistanceMatrix()
self.mock_initialise(dm)
for diag in range(-self.dist_matrix.shape[0] + 1, self.dist_matrix.shape[1]):
diag_ind = diag_indices_of(self.dist_matrix, diag)
dm.process_diagonal(diag, np.atleast_2d(self.dist_matrix[diag_ind]))
npt.assert_equal(dm.distance_matrix, self.dist_matrix)
def test_process_diagonal_partial_calculation(self):
dm = DistanceMatrix()
self.mock_initialise(dm)
correct = np.full_like(self.dist_matrix, np.nan, dtype=float)
for diag in range(-8, self.dist_matrix.shape[1], 3):
diag_ind = diag_indices_of(self.dist_matrix, diag)
dm.process_diagonal(diag, np.atleast_2d(self.dist_matrix[diag_ind]))
correct[diag_ind] = self.dist_matrix[diag_ind]
npt.assert_equal(dm.distance_matrix, correct)
def test_process_column(self):
dm = DistanceMatrix()
self.mock_initialise(dm)
for column in range(0, self.dist_matrix.shape[1]):
dm.process_column(column, np.atleast_2d(self.dist_matrix[:, column]))
npt.assert_equal(dm.distance_matrix, self.dist_matrix)
def test_process_column_partial_calculation(self):
dm = DistanceMatrix()
self.mock_initialise(dm)
correct = np.full_like(self.dist_matrix, np.nan, dtype=float)
for column in [2, 3, 4, 5, 10, 11, 12]:
dm.process_column(column, np.atleast_2d(self.dist_matrix[:, column]))
correct[:, column] = self.dist_matrix[:, column]
npt.assert_equal(dm.distance_matrix, correct)
def test_streaming_process_column(self):
dm = DistanceMatrix()
dm.initialise(1, 5, 5)
dm.process_column(0, np.atleast_2d(self.dist_matrix[0, 0]))
dm.process_column(1, np.atleast_2d(self.dist_matrix[:2, 1]))
expected = np.full((5, 5), np.nan)
expected[0, 0] = self.dist_matrix[0, 0]
expected[:2, 1] = self.dist_matrix[:2, 1]
npt.assert_equal(dm.distance_matrix, expected)
for column in range(0, 5):
dm.process_column(column, np.atleast_2d(self.dist_matrix[:5, :5][:, column]))
npt.assert_equal(dm.distance_matrix, self.dist_matrix[:5, :5])
dm.shift_query(1)
dm.shift_series(3)
correct = np.full((5, 5), np.nan)
correct[0:4, 0:2] = self.dist_matrix[1:5, 3:5]
npt.assert_equal(dm.distance_matrix, correct)
for column in range(0, 5):
dm.process_column(column, np.atleast_2d(self.dist_matrix[1:6, 3:8][:, column]))
npt.assert_equal(dm.distance_matrix, self.dist_matrix[1:6, 3:8])
dm.shift_query(2)
dm.shift_series(1)
dm.process_column(4, np.atleast_2d(self.dist_matrix[3:8, 8]))
correct = np.full((5, 5), np.nan)
correct[0:3, 0:4] = self.dist_matrix[3:6, 4:8]
correct[:, 4] = self.dist_matrix[3:8, 8]
npt.assert_equal(dm.distance_matrix, correct)
def test_streaming_process_diagonal(self):
dm = DistanceMatrix()
dm.initialise(1, 5, 5)
dm.process_diagonal(0, np.atleast_2d(self.dist_matrix[0, 0]))
diag_ind = diag_indices_of(self.dist_matrix[:3, :3], 1)
dm.process_diagonal(1, np.atleast_2d(np.atleast_2d(self.dist_matrix[diag_ind])))
expected = np.full((5, 5), np.nan)
expected[0, 0] = self.dist_matrix[0, 0]
expected[0, 1] = self.dist_matrix[0, 1]
expected[1, 2] = self.dist_matrix[1, 2]
npt.assert_equal(dm.distance_matrix, expected)
for diag in range(-4,5):
diag_ind = diag_indices_of(self.dist_matrix[:5, :5], diag)
dm.process_diagonal(diag, np.atleast_2d(self.dist_matrix[diag_ind]))
npt.assert_equal(dm.distance_matrix, self.dist_matrix[:5, :5])
dm.shift_query(2)
dm.shift_series(1)
expected = self.dist_matrix[2:7, 1:6].copy()
expected[-2:, :] = np.nan
expected[:, -1:] = np.nan
npt.assert_equal(dm.distance_matrix, expected)
for diag in range(-4,5):
diag_ind = diag_indices_of(self.dist_matrix[:5, :5], diag)
dm.process_diagonal(diag, np.atleast_2d(self.dist_matrix[diag_ind]))
npt.assert_equal(dm.distance_matrix, self.dist_matrix[:5, :5])
| [
"numpy.atleast_2d",
"numpy.testing.assert_equal",
"numpy.full_like",
"distancematrix.util.diag_indices_of",
"numpy.array",
"distancematrix.consumer.distance_matrix.DistanceMatrix",
"numpy.full"
] | [((289, 1251), 'numpy.array', 'np.array', (['[[8.67, 1.1, 1.77, 1.26, 1.91, 4.29, 6.32, 4.24, 4.64, 5.06, 6.41, 4.07, \n 4.67, 9.32, 5.09], [4.33, 4.99, 0.14, 2.79, 2.1, 6.26, 9.4, 4.14, 5.53,\n 4.26, 8.21, 5.91, 6.83, 9.26, 6.19], [0.16, 9.05, 1.35, 4.78, 7.01, \n 4.36, 5.24, 8.81, 7.9, 5.84, 8.9, 7.88, 3.37, 4.7, 6.94], [0.94, 8.7, \n 3.87, 6.29, 0.32, 1.79, 5.8, 2.61, 1.43, 6.32, 1.62, 0.2, 2.28, 7.11, \n 2.15], [9.9, 4.51, 2.11, 2.83, 5.52, 8.55, 6.9, 0.24, 1.58, 4.26, 8.75,\n 3.71, 9.93, 8.33, 0.38], [7.3, 5.84, 9.63, 1.95, 3.76, 3.61, 9.42, 5.56,\n 5.09, 7.07, 1.9, 4.78, 1.06, 0.69, 3.67], [2.17, 8.37, 3.99, 4.28, 4.37,\n 2.86, 8.61, 3.39, 8.37, 6.95, 6.57, 1.79, 7.4, 4.41, 7.64], [6.26, 0.29,\n 6.44, 8.84, 1.24, 2.52, 6.25, 3.07, 5.55, 3.19, 8.16, 5.32, 9.01, 0.39,\n 9.0], [4.67, 8.88, 3.05, 3.06, 2.36, 8.34, 4.91, 5.46, 9.25, 9.78, 0.03,\n 5.64, 5.1, 3.58, 6.92], [1.01, 0.91, 6.28, 7.79, 0.68, 5.5, 6.72, 5.11,\n 0.8, 9.3, 9.77, 4.71, 3.26, 7.29, 6.26]]'], {}), '([[8.67, 1.1, 1.77, 1.26, 1.91, 4.29, 6.32, 4.24, 4.64, 5.06, 6.41,\n 4.07, 4.67, 9.32, 5.09], [4.33, 4.99, 0.14, 2.79, 2.1, 6.26, 9.4, 4.14,\n 5.53, 4.26, 8.21, 5.91, 6.83, 9.26, 6.19], [0.16, 9.05, 1.35, 4.78, \n 7.01, 4.36, 5.24, 8.81, 7.9, 5.84, 8.9, 7.88, 3.37, 4.7, 6.94], [0.94, \n 8.7, 3.87, 6.29, 0.32, 1.79, 5.8, 2.61, 1.43, 6.32, 1.62, 0.2, 2.28, \n 7.11, 2.15], [9.9, 4.51, 2.11, 2.83, 5.52, 8.55, 6.9, 0.24, 1.58, 4.26,\n 8.75, 3.71, 9.93, 8.33, 0.38], [7.3, 5.84, 9.63, 1.95, 3.76, 3.61, 9.42,\n 5.56, 5.09, 7.07, 1.9, 4.78, 1.06, 0.69, 3.67], [2.17, 8.37, 3.99, 4.28,\n 4.37, 2.86, 8.61, 3.39, 8.37, 6.95, 6.57, 1.79, 7.4, 4.41, 7.64], [6.26,\n 0.29, 6.44, 8.84, 1.24, 2.52, 6.25, 3.07, 5.55, 3.19, 8.16, 5.32, 9.01,\n 0.39, 9.0], [4.67, 8.88, 3.05, 3.06, 2.36, 8.34, 4.91, 5.46, 9.25, 9.78,\n 0.03, 5.64, 5.1, 3.58, 6.92], [1.01, 0.91, 6.28, 7.79, 0.68, 5.5, 6.72,\n 5.11, 0.8, 9.3, 9.77, 4.71, 3.26, 7.29, 6.26]])\n', (297, 1251), True, 'import numpy as np\n'), ((1505, 1521), 'distancematrix.consumer.distance_matrix.DistanceMatrix', 'DistanceMatrix', ([], {}), '()\n', (1519, 1521), False, 'from distancematrix.consumer.distance_matrix import DistanceMatrix\n'), ((1795, 1849), 'numpy.testing.assert_equal', 'npt.assert_equal', (['dm.distance_matrix', 'self.dist_matrix'], {}), '(dm.distance_matrix, self.dist_matrix)\n', (1811, 1849), True, 'import numpy.testing as npt\n'), ((1921, 1937), 'distancematrix.consumer.distance_matrix.DistanceMatrix', 'DistanceMatrix', ([], {}), '()\n', (1935, 1937), False, 'from distancematrix.consumer.distance_matrix import DistanceMatrix\n'), ((1990, 2041), 'numpy.full_like', 'np.full_like', (['self.dist_matrix', 'np.nan'], {'dtype': 'float'}), '(self.dist_matrix, np.nan, dtype=float)\n', (2002, 2041), True, 'import numpy as np\n'), ((2316, 2361), 'numpy.testing.assert_equal', 'npt.assert_equal', (['dm.distance_matrix', 'correct'], {}), '(dm.distance_matrix, correct)\n', (2332, 2361), True, 'import numpy.testing as npt\n'), ((2411, 2427), 'distancematrix.consumer.distance_matrix.DistanceMatrix', 'DistanceMatrix', ([], {}), '()\n', (2425, 2427), False, 'from distancematrix.consumer.distance_matrix import DistanceMatrix\n'), ((2612, 2666), 'numpy.testing.assert_equal', 'npt.assert_equal', (['dm.distance_matrix', 'self.dist_matrix'], {}), '(dm.distance_matrix, self.dist_matrix)\n', (2628, 2666), True, 'import numpy.testing as npt\n'), ((2736, 2752), 'distancematrix.consumer.distance_matrix.DistanceMatrix', 'DistanceMatrix', ([], {}), '()\n', (2750, 2752), False, 'from distancematrix.consumer.distance_matrix import DistanceMatrix\n'), ((2805, 2856), 'numpy.full_like', 'np.full_like', (['self.dist_matrix', 'np.nan'], {'dtype': 'float'}), '(self.dist_matrix, np.nan, dtype=float)\n', (2817, 2856), True, 'import numpy as np\n'), ((3058, 3103), 'numpy.testing.assert_equal', 'npt.assert_equal', (['dm.distance_matrix', 'correct'], {}), '(dm.distance_matrix, correct)\n', (3074, 3103), True, 'import numpy.testing as npt\n'), ((3163, 3179), 'distancematrix.consumer.distance_matrix.DistanceMatrix', 'DistanceMatrix', ([], {}), '()\n', (3177, 3179), False, 'from distancematrix.consumer.distance_matrix import DistanceMatrix\n'), ((3368, 3391), 'numpy.full', 'np.full', (['(5, 5)', 'np.nan'], {}), '((5, 5), np.nan)\n', (3375, 3391), True, 'import numpy as np\n'), ((3498, 3544), 'numpy.testing.assert_equal', 'npt.assert_equal', (['dm.distance_matrix', 'expected'], {}), '(dm.distance_matrix, expected)\n', (3514, 3544), True, 'import numpy.testing as npt\n'), ((3679, 3741), 'numpy.testing.assert_equal', 'npt.assert_equal', (['dm.distance_matrix', 'self.dist_matrix[:5, :5]'], {}), '(dm.distance_matrix, self.dist_matrix[:5, :5])\n', (3695, 3741), True, 'import numpy.testing as npt\n'), ((3815, 3838), 'numpy.full', 'np.full', (['(5, 5)', 'np.nan'], {}), '((5, 5), np.nan)\n', (3822, 3838), True, 'import numpy as np\n'), ((3902, 3947), 'numpy.testing.assert_equal', 'npt.assert_equal', (['dm.distance_matrix', 'correct'], {}), '(dm.distance_matrix, correct)\n', (3918, 3947), True, 'import numpy.testing as npt\n'), ((4084, 4148), 'numpy.testing.assert_equal', 'npt.assert_equal', (['dm.distance_matrix', 'self.dist_matrix[1:6, 3:8]'], {}), '(dm.distance_matrix, self.dist_matrix[1:6, 3:8])\n', (4100, 4148), True, 'import numpy.testing as npt\n'), ((4292, 4315), 'numpy.full', 'np.full', (['(5, 5)', 'np.nan'], {}), '((5, 5), np.nan)\n', (4299, 4315), True, 'import numpy as np\n'), ((4428, 4473), 'numpy.testing.assert_equal', 'npt.assert_equal', (['dm.distance_matrix', 'correct'], {}), '(dm.distance_matrix, correct)\n', (4444, 4473), True, 'import numpy.testing as npt\n'), ((4535, 4551), 'distancematrix.consumer.distance_matrix.DistanceMatrix', 'DistanceMatrix', ([], {}), '()\n', (4549, 4551), False, 'from distancematrix.consumer.distance_matrix import DistanceMatrix\n'), ((4673, 4717), 'distancematrix.util.diag_indices_of', 'diag_indices_of', (['self.dist_matrix[:3, :3]', '(1)'], {}), '(self.dist_matrix[:3, :3], 1)\n', (4688, 4717), False, 'from distancematrix.util import diag_indices_of\n'), ((4826, 4849), 'numpy.full', 'np.full', (['(5, 5)', 'np.nan'], {}), '((5, 5), np.nan)\n', (4833, 4849), True, 'import numpy as np\n'), ((5002, 5048), 'numpy.testing.assert_equal', 'npt.assert_equal', (['dm.distance_matrix', 'expected'], {}), '(dm.distance_matrix, expected)\n', (5018, 5048), True, 'import numpy.testing as npt\n'), ((5244, 5306), 'numpy.testing.assert_equal', 'npt.assert_equal', (['dm.distance_matrix', 'self.dist_matrix[:5, :5]'], {}), '(dm.distance_matrix, self.dist_matrix[:5, :5])\n', (5260, 5306), True, 'import numpy.testing as npt\n'), ((5490, 5536), 'numpy.testing.assert_equal', 'npt.assert_equal', (['dm.distance_matrix', 'expected'], {}), '(dm.distance_matrix, expected)\n', (5506, 5536), True, 'import numpy.testing as npt\n'), ((5731, 5793), 'numpy.testing.assert_equal', 'npt.assert_equal', (['dm.distance_matrix', 'self.dist_matrix[:5, :5]'], {}), '(dm.distance_matrix, self.dist_matrix[:5, :5])\n', (5747, 5793), True, 'import numpy.testing as npt\n'), ((1665, 1704), 'distancematrix.util.diag_indices_of', 'diag_indices_of', (['self.dist_matrix', 'diag'], {}), '(self.dist_matrix, diag)\n', (1680, 1704), False, 'from distancematrix.util import diag_indices_of\n'), ((2127, 2166), 'distancematrix.util.diag_indices_of', 'diag_indices_of', (['self.dist_matrix', 'diag'], {}), '(self.dist_matrix, diag)\n', (2142, 2166), False, 'from distancematrix.util import diag_indices_of\n'), ((3241, 3278), 'numpy.atleast_2d', 'np.atleast_2d', (['self.dist_matrix[0, 0]'], {}), '(self.dist_matrix[0, 0])\n', (3254, 3278), True, 'import numpy as np\n'), ((3309, 3347), 'numpy.atleast_2d', 'np.atleast_2d', (['self.dist_matrix[:2, 1]'], {}), '(self.dist_matrix[:2, 1])\n', (3322, 3347), True, 'import numpy as np\n'), ((4232, 4271), 'numpy.atleast_2d', 'np.atleast_2d', (['self.dist_matrix[3:8, 8]'], {}), '(self.dist_matrix[3:8, 8])\n', (4245, 4271), True, 'import numpy as np\n'), ((4615, 4652), 'numpy.atleast_2d', 'np.atleast_2d', (['self.dist_matrix[0, 0]'], {}), '(self.dist_matrix[0, 0])\n', (4628, 4652), True, 'import numpy as np\n'), ((5106, 5153), 'distancematrix.util.diag_indices_of', 'diag_indices_of', (['self.dist_matrix[:5, :5]', 'diag'], {}), '(self.dist_matrix[:5, :5], diag)\n', (5121, 5153), False, 'from distancematrix.util import diag_indices_of\n'), ((5594, 5641), 'distancematrix.util.diag_indices_of', 'diag_indices_of', (['self.dist_matrix[:5, :5]', 'diag'], {}), '(self.dist_matrix[:5, :5], diag)\n', (5609, 5641), False, 'from distancematrix.util import diag_indices_of\n'), ((1743, 1784), 'numpy.atleast_2d', 'np.atleast_2d', (['self.dist_matrix[diag_ind]'], {}), '(self.dist_matrix[diag_ind])\n', (1756, 1784), True, 'import numpy as np\n'), ((2205, 2246), 'numpy.atleast_2d', 'np.atleast_2d', (['self.dist_matrix[diag_ind]'], {}), '(self.dist_matrix[diag_ind])\n', (2218, 2246), True, 'import numpy as np\n'), ((2559, 2601), 'numpy.atleast_2d', 'np.atleast_2d', (['self.dist_matrix[:, column]'], {}), '(self.dist_matrix[:, column])\n', (2572, 2601), True, 'import numpy as np\n'), ((2944, 2986), 'numpy.atleast_2d', 'np.atleast_2d', (['self.dist_matrix[:, column]'], {}), '(self.dist_matrix[:, column])\n', (2957, 2986), True, 'import numpy as np\n'), ((3619, 3669), 'numpy.atleast_2d', 'np.atleast_2d', (['self.dist_matrix[:5, :5][:, column]'], {}), '(self.dist_matrix[:5, :5][:, column])\n', (3632, 3669), True, 'import numpy as np\n'), ((4022, 4074), 'numpy.atleast_2d', 'np.atleast_2d', (['self.dist_matrix[1:6, 3:8][:, column]'], {}), '(self.dist_matrix[1:6, 3:8][:, column])\n', (4035, 4074), True, 'import numpy as np\n'), ((4763, 4804), 'numpy.atleast_2d', 'np.atleast_2d', (['self.dist_matrix[diag_ind]'], {}), '(self.dist_matrix[diag_ind])\n', (4776, 4804), True, 'import numpy as np\n'), ((5192, 5233), 'numpy.atleast_2d', 'np.atleast_2d', (['self.dist_matrix[diag_ind]'], {}), '(self.dist_matrix[diag_ind])\n', (5205, 5233), True, 'import numpy as np\n'), ((5680, 5721), 'numpy.atleast_2d', 'np.atleast_2d', (['self.dist_matrix[diag_ind]'], {}), '(self.dist_matrix[diag_ind])\n', (5693, 5721), True, 'import numpy as np\n')] |
"""Constants file for Supervisor."""
from enum import Enum
from ipaddress import ip_network
from pathlib import Path
SUPERVISOR_VERSION = "DEV"
URL_HASSIO_ADDONS = "https://github.com/home-assistant/addons"
URL_HASSIO_APPARMOR = "https://version.home-assistant.io/apparmor.txt"
URL_HASSIO_VERSION = "https://version.home-assistant.io/{channel}.json"
SUPERVISOR_DATA = Path("/data")
FILE_HASSIO_ADDONS = Path(SUPERVISOR_DATA, "addons.json")
FILE_HASSIO_AUTH = Path(SUPERVISOR_DATA, "auth.json")
FILE_HASSIO_CONFIG = Path(SUPERVISOR_DATA, "config.json")
FILE_HASSIO_DISCOVERY = Path(SUPERVISOR_DATA, "discovery.json")
FILE_HASSIO_DOCKER = Path(SUPERVISOR_DATA, "docker.json")
FILE_HASSIO_HOMEASSISTANT = Path(SUPERVISOR_DATA, "homeassistant.json")
FILE_HASSIO_INGRESS = Path(SUPERVISOR_DATA, "ingress.json")
FILE_HASSIO_SERVICES = Path(SUPERVISOR_DATA, "services.json")
FILE_HASSIO_UPDATER = Path(SUPERVISOR_DATA, "updater.json")
FILE_SUFFIX_CONFIGURATION = [".yaml", ".yml", ".json"]
MACHINE_ID = Path("/etc/machine-id")
SOCKET_DBUS = Path("/run/dbus/system_bus_socket")
SOCKET_DOCKER = Path("/run/docker.sock")
RUN_SUPERVISOR_STATE = Path("/run/supervisor")
SYSTEMD_JOURNAL_PERSISTENT = Path("/var/log/journal")
SYSTEMD_JOURNAL_VOLATILE = Path("/run/log/journal")
DOCKER_NETWORK = "hassio"
DOCKER_NETWORK_MASK = ip_network("172.30.32.0/23")
DOCKER_NETWORK_RANGE = ip_network("172.30.33.0/24")
# This needs to match the dockerd --cpu-rt-runtime= argument.
DOCKER_CPU_RUNTIME_TOTAL = 950_000
# The rt runtimes are guarantees, hence we cannot allocate more
# time than available! Support up to 5 containers with equal time
# allocated.
# Note that the time is multiplied by CPU count. This means that
# a single container can schedule up to 950/5*4 = 760ms in RT priority
# on a quad core system.
DOCKER_CPU_RUNTIME_ALLOCATION = int(DOCKER_CPU_RUNTIME_TOTAL / 5)
DNS_SUFFIX = "local.hass.io"
LABEL_ARCH = "io.hass.arch"
LABEL_MACHINE = "io.hass.machine"
LABEL_TYPE = "io.hass.type"
LABEL_VERSION = "io.hass.version"
META_ADDON = "addon"
META_HOMEASSISTANT = "homeassistant"
META_SUPERVISOR = "supervisor"
JSON_DATA = "data"
JSON_MESSAGE = "message"
JSON_RESULT = "result"
RESULT_ERROR = "error"
RESULT_OK = "ok"
CONTENT_TYPE_BINARY = "application/octet-stream"
CONTENT_TYPE_JSON = "application/json"
CONTENT_TYPE_PNG = "image/png"
CONTENT_TYPE_TAR = "application/tar"
CONTENT_TYPE_TEXT = "text/plain"
CONTENT_TYPE_URL = "application/x-www-form-urlencoded"
COOKIE_INGRESS = "ingress_session"
HEADER_TOKEN = "X-Supervisor-Token"
HEADER_TOKEN_OLD = "X-Hassio-Key"
ENV_TIME = "TZ"
ENV_TOKEN = "SUPERVISOR_TOKEN"
ENV_TOKEN_HASSIO = "HASSIO_TOKEN"
ENV_HOMEASSISTANT_REPOSITORY = "HOMEASSISTANT_REPOSITORY"
ENV_SUPERVISOR_DEV = "SUPERVISOR_DEV"
ENV_SUPERVISOR_MACHINE = "SUPERVISOR_MACHINE"
ENV_SUPERVISOR_NAME = "SUPERVISOR_NAME"
ENV_SUPERVISOR_SHARE = "SUPERVISOR_SHARE"
ENV_SUPERVISOR_CPU_RT = "SUPERVISOR_CPU_RT"
REQUEST_FROM = "HASSIO_FROM"
ATTR_ACCESS_TOKEN = "access_token"
ATTR_ACCESSPOINTS = "accesspoints"
ATTR_ACTIVE = "active"
ATTR_ADDON = "addon"
ATTR_ADDONS = "addons"
ATTR_ADDONS_CUSTOM_LIST = "addons_custom_list"
ATTR_ADDONS_REPOSITORIES = "addons_repositories"
ATTR_ADDRESS = "address"
ATTR_ADDRESS_DATA = "address-data"
ATTR_ADMIN = "admin"
ATTR_ADVANCED = "advanced"
ATTR_APPARMOR = "apparmor"
ATTR_APPLICATION = "application"
ATTR_ARCH = "arch"
ATTR_ARGS = "args"
ATTR_LABELS = "labels"
ATTR_AUDIO = "audio"
ATTR_AUDIO_INPUT = "audio_input"
ATTR_AUDIO_OUTPUT = "audio_output"
ATTR_AUTH = "auth"
ATTR_AUTH_API = "auth_api"
ATTR_AUTO_UPDATE = "auto_update"
ATTR_AVAILABLE = "available"
ATTR_BLK_READ = "blk_read"
ATTR_BLK_WRITE = "blk_write"
ATTR_BOARD = "board"
ATTR_BOOT = "boot"
ATTR_BRANCH = "branch"
ATTR_BUILD = "build"
ATTR_BUILD_FROM = "build_from"
ATTR_CARD = "card"
ATTR_CHANGELOG = "changelog"
ATTR_CHANNEL = "channel"
ATTR_CHASSIS = "chassis"
ATTR_CHECKS = "checks"
ATTR_CLI = "cli"
ATTR_CONFIG = "config"
ATTR_CONFIGURATION = "configuration"
ATTR_CONNECTED = "connected"
ATTR_CONNECTIONS = "connections"
ATTR_CONTAINERS = "containers"
ATTR_CPE = "cpe"
ATTR_CPU_PERCENT = "cpu_percent"
ATTR_CRYPTO = "crypto"
ATTR_DATA = "data"
ATTR_DATE = "date"
ATTR_DEBUG = "debug"
ATTR_DEBUG_BLOCK = "debug_block"
ATTR_DEFAULT = "default"
ATTR_DEPLOYMENT = "deployment"
ATTR_DESCRIPTON = "description"
ATTR_DETACHED = "detached"
ATTR_DEVICES = "devices"
ATTR_DEVICETREE = "devicetree"
ATTR_DIAGNOSTICS = "diagnostics"
ATTR_DISCOVERY = "discovery"
ATTR_DISK = "disk"
ATTR_DISK_FREE = "disk_free"
ATTR_DISK_LIFE_TIME = "disk_life_time"
ATTR_DISK_TOTAL = "disk_total"
ATTR_DISK_USED = "disk_used"
ATTR_DNS = "dns"
ATTR_DOCKER = "docker"
ATTR_DOCKER_API = "docker_api"
ATTR_DOCUMENTATION = "documentation"
ATTR_DOMAINS = "domains"
ATTR_ENABLE = "enable"
ATTR_ENABLED = "enabled"
ATTR_ENVIRONMENT = "environment"
ATTR_EVENT = "event"
ATTR_FEATURES = "features"
ATTR_FILENAME = "filename"
ATTR_FLAGS = "flags"
ATTR_FOLDERS = "folders"
ATTR_FREQUENCY = "frequency"
ATTR_FULL_ACCESS = "full_access"
ATTR_GATEWAY = "gateway"
ATTR_GPIO = "gpio"
ATTR_HASSIO_API = "hassio_api"
ATTR_HASSIO_ROLE = "hassio_role"
ATTR_HASSOS = "hassos"
ATTR_HEALTHY = "healthy"
ATTR_HOMEASSISTANT = "homeassistant"
ATTR_HOMEASSISTANT_API = "homeassistant_api"
ATTR_HOST = "host"
ATTR_HOST_DBUS = "host_dbus"
ATTR_HOST_INTERNET = "host_internet"
ATTR_HOST_IPC = "host_ipc"
ATTR_HOST_NETWORK = "host_network"
ATTR_HOST_PID = "host_pid"
ATTR_HOSTNAME = "hostname"
ATTR_ICON = "icon"
ATTR_ID = "id"
ATTR_IMAGE = "image"
ATTR_IMAGES = "images"
ATTR_INDEX = "index"
ATTR_INGRESS = "ingress"
ATTR_INGRESS_ENTRY = "ingress_entry"
ATTR_INGRESS_PANEL = "ingress_panel"
ATTR_INGRESS_PORT = "ingress_port"
ATTR_INGRESS_TOKEN = "ingress_token"
ATTR_INGRESS_URL = "ingress_url"
ATTR_INIT = "init"
ATTR_INITIALIZE = "initialize"
ATTR_INPUT = "input"
ATTR_INSTALLED = "installed"
ATTR_INTERFACE = "interface"
ATTR_INTERFACES = "interfaces"
ATTR_IP_ADDRESS = "ip_address"
ATTR_IPV4 = "ipv4"
ATTR_IPV6 = "ipv6"
ATTR_ISSUES = "issues"
ATTR_KERNEL = "kernel"
ATTR_KERNEL_MODULES = "kernel_modules"
ATTR_LAST_BOOT = "last_boot"
ATTR_LEGACY = "legacy"
ATTR_LOCALS = "locals"
ATTR_LOCATON = "location"
ATTR_LOGGING = "logging"
ATTR_LOGO = "logo"
ATTR_LONG_DESCRIPTION = "long_description"
ATTR_MAC = "mac"
ATTR_MACHINE = "machine"
ATTR_MAINTAINER = "maintainer"
ATTR_MAP = "map"
ATTR_MEMORY_LIMIT = "memory_limit"
ATTR_MEMORY_PERCENT = "memory_percent"
ATTR_MEMORY_USAGE = "memory_usage"
ATTR_MESSAGE = "message"
ATTR_METHOD = "method"
ATTR_MODE = "mode"
ATTR_MULTICAST = "multicast"
ATTR_NAME = "name"
ATTR_NAMESERVERS = "nameservers"
ATTR_NETWORK = "network"
ATTR_NETWORK_DESCRIPTION = "network_description"
ATTR_NETWORK_RX = "network_rx"
ATTR_NETWORK_TX = "network_tx"
ATTR_OBSERVER = "observer"
ATTR_OPERATING_SYSTEM = "operating_system"
ATTR_OPTIONS = "options"
ATTR_OTA = "ota"
ATTR_OUTPUT = "output"
ATTR_PANEL_ADMIN = "panel_admin"
ATTR_PANEL_ICON = "panel_icon"
ATTR_PANEL_TITLE = "panel_title"
ATTR_PANELS = "panels"
ATTR_PARENT = "parent"
ATTR_PASSWORD = "password"
ATTR_PORT = "port"
ATTR_PORTS = "ports"
ATTR_PORTS_DESCRIPTION = "ports_description"
ATTR_PREFIX = "prefix"
ATTR_PRIMARY = "primary"
ATTR_PRIORITY = "priority"
ATTR_PRIVILEGED = "privileged"
ATTR_PROTECTED = "protected"
ATTR_PROVIDERS = "providers"
ATTR_PSK = "psk"
ATTR_RATING = "rating"
ATTR_REALTIME = "realtime"
ATTR_REFRESH_TOKEN = "refresh_token"
ATTR_REGISTRIES = "registries"
ATTR_REGISTRY = "registry"
ATTR_REPOSITORIES = "repositories"
ATTR_REPOSITORY = "repository"
ATTR_SCHEMA = "schema"
ATTR_SECURITY = "security"
ATTR_SERIAL = "serial"
ATTR_SERVERS = "servers"
ATTR_SERVICE = "service"
ATTR_SERVICES = "services"
ATTR_SESSION = "session"
ATTR_SIGNAL = "signal"
ATTR_SIZE = "size"
ATTR_SLUG = "slug"
ATTR_SNAPSHOT_EXCLUDE = "snapshot_exclude"
ATTR_SNAPSHOTS = "snapshots"
ATTR_SOURCE = "source"
ATTR_SQUASH = "squash"
ATTR_SSD = "ssid"
ATTR_SSID = "ssid"
ATTR_SSL = "ssl"
ATTR_STAGE = "stage"
ATTR_STARTUP = "startup"
ATTR_STATE = "state"
ATTR_STATIC = "static"
ATTR_STDIN = "stdin"
ATTR_STORAGE = "storage"
ATTR_SUGGESTIONS = "suggestions"
ATTR_SUPERVISOR = "supervisor"
ATTR_SUPERVISOR_INTERNET = "supervisor_internet"
ATTR_SUPPORTED = "supported"
ATTR_SUPPORTED_ARCH = "supported_arch"
ATTR_SYSTEM = "system"
ATTR_JOURNALD = "journald"
ATTR_TIMEOUT = "timeout"
ATTR_TIMEZONE = "timezone"
ATTR_TITLE = "title"
ATTR_TMPFS = "tmpfs"
ATTR_TOTP = "totp"
ATTR_TRANSLATIONS = "translations"
ATTR_TYPE = "type"
ATTR_UART = "uart"
ATTR_UDEV = "udev"
ATTR_UNHEALTHY = "unhealthy"
ATTR_UNSAVED = "unsaved"
ATTR_UNSUPPORTED = "unsupported"
ATTR_UPDATE_AVAILABLE = "update_available"
ATTR_UPDATE_KEY = "update_key"
ATTR_URL = "url"
ATTR_USB = "usb"
ATTR_USER = "user"
ATTR_USERNAME = "username"
ATTR_UUID = "uuid"
ATTR_VALID = "valid"
ATTR_VALUE = "value"
ATTR_VERSION = "version"
ATTR_VERSION_LATEST = "version_latest"
ATTR_VIDEO = "video"
ATTR_VLAN = "vlan"
ATTR_VOLUME = "volume"
ATTR_VPN = "vpn"
ATTR_WAIT_BOOT = "wait_boot"
ATTR_WATCHDOG = "watchdog"
ATTR_WEBUI = "webui"
ATTR_WIFI = "wifi"
ATTR_CONTENT_TRUST = "content_trust"
ATTR_FORCE_SECURITY = "force_security"
PROVIDE_SERVICE = "provide"
NEED_SERVICE = "need"
WANT_SERVICE = "want"
MAP_CONFIG = "config"
MAP_SSL = "ssl"
MAP_ADDONS = "addons"
MAP_BACKUP = "backup"
MAP_SHARE = "share"
MAP_MEDIA = "media"
ARCH_ARMHF = "armhf"
ARCH_ARMV7 = "armv7"
ARCH_AARCH64 = "aarch64"
ARCH_AMD64 = "amd64"
ARCH_I386 = "i386"
ARCH_ALL = [ARCH_ARMHF, ARCH_ARMV7, ARCH_AARCH64, ARCH_AMD64, ARCH_I386]
REPOSITORY_CORE = "core"
REPOSITORY_LOCAL = "local"
FOLDER_HOMEASSISTANT = "homeassistant"
FOLDER_SHARE = "share"
FOLDER_ADDONS = "addons/local"
FOLDER_SSL = "ssl"
FOLDER_MEDIA = "media"
SNAPSHOT_FULL = "full"
SNAPSHOT_PARTIAL = "partial"
CRYPTO_AES128 = "aes128"
SECURITY_PROFILE = "profile"
SECURITY_DEFAULT = "default"
SECURITY_DISABLE = "disable"
ROLE_DEFAULT = "default"
ROLE_HOMEASSISTANT = "homeassistant"
ROLE_BACKUP = "backup"
ROLE_MANAGER = "manager"
ROLE_ADMIN = "admin"
ROLE_ALL = [ROLE_DEFAULT, ROLE_HOMEASSISTANT, ROLE_BACKUP, ROLE_MANAGER, ROLE_ADMIN]
class AddonBoot(str, Enum):
"""Boot mode for the add-on."""
AUTO = "auto"
MANUAL = "manual"
class AddonStartup(str, Enum):
"""Startup types of Add-on."""
INITIALIZE = "initialize"
SYSTEM = "system"
SERVICES = "services"
APPLICATION = "application"
ONCE = "once"
class AddonStage(str, Enum):
"""Stage types of add-on."""
STABLE = "stable"
EXPERIMENTAL = "experimental"
DEPRECATED = "deprecated"
class AddonState(str, Enum):
"""State of add-on."""
STARTED = "started"
STOPPED = "stopped"
UNKNOWN = "unknown"
ERROR = "error"
class UpdateChannel(str, Enum):
"""Core supported update channels."""
STABLE = "stable"
BETA = "beta"
DEV = "dev"
class CoreState(str, Enum):
"""Represent current loading state."""
INITIALIZE = "initialize"
SETUP = "setup"
STARTUP = "startup"
RUNNING = "running"
FREEZE = "freeze"
SHUTDOWN = "shutdown"
STOPPING = "stopping"
CLOSE = "close"
class LogLevel(str, Enum):
"""Logging level of system."""
DEBUG = "debug"
INFO = "info"
WARNING = "warning"
ERROR = "error"
CRITICAL = "critical"
class HostFeature(str, Enum):
"""Host feature."""
HASSOS = "hassos"
HOSTNAME = "hostname"
NETWORK = "network"
REBOOT = "reboot"
SERVICES = "services"
SHUTDOWN = "shutdown"
| [
"ipaddress.ip_network",
"pathlib.Path"
] | [((371, 384), 'pathlib.Path', 'Path', (['"""/data"""'], {}), "('/data')\n", (375, 384), False, 'from pathlib import Path\n'), ((407, 443), 'pathlib.Path', 'Path', (['SUPERVISOR_DATA', '"""addons.json"""'], {}), "(SUPERVISOR_DATA, 'addons.json')\n", (411, 443), False, 'from pathlib import Path\n'), ((463, 497), 'pathlib.Path', 'Path', (['SUPERVISOR_DATA', '"""auth.json"""'], {}), "(SUPERVISOR_DATA, 'auth.json')\n", (467, 497), False, 'from pathlib import Path\n'), ((519, 555), 'pathlib.Path', 'Path', (['SUPERVISOR_DATA', '"""config.json"""'], {}), "(SUPERVISOR_DATA, 'config.json')\n", (523, 555), False, 'from pathlib import Path\n'), ((580, 619), 'pathlib.Path', 'Path', (['SUPERVISOR_DATA', '"""discovery.json"""'], {}), "(SUPERVISOR_DATA, 'discovery.json')\n", (584, 619), False, 'from pathlib import Path\n'), ((641, 677), 'pathlib.Path', 'Path', (['SUPERVISOR_DATA', '"""docker.json"""'], {}), "(SUPERVISOR_DATA, 'docker.json')\n", (645, 677), False, 'from pathlib import Path\n'), ((706, 749), 'pathlib.Path', 'Path', (['SUPERVISOR_DATA', '"""homeassistant.json"""'], {}), "(SUPERVISOR_DATA, 'homeassistant.json')\n", (710, 749), False, 'from pathlib import Path\n'), ((772, 809), 'pathlib.Path', 'Path', (['SUPERVISOR_DATA', '"""ingress.json"""'], {}), "(SUPERVISOR_DATA, 'ingress.json')\n", (776, 809), False, 'from pathlib import Path\n'), ((833, 871), 'pathlib.Path', 'Path', (['SUPERVISOR_DATA', '"""services.json"""'], {}), "(SUPERVISOR_DATA, 'services.json')\n", (837, 871), False, 'from pathlib import Path\n'), ((894, 931), 'pathlib.Path', 'Path', (['SUPERVISOR_DATA', '"""updater.json"""'], {}), "(SUPERVISOR_DATA, 'updater.json')\n", (898, 931), False, 'from pathlib import Path\n'), ((1002, 1025), 'pathlib.Path', 'Path', (['"""/etc/machine-id"""'], {}), "('/etc/machine-id')\n", (1006, 1025), False, 'from pathlib import Path\n'), ((1040, 1075), 'pathlib.Path', 'Path', (['"""/run/dbus/system_bus_socket"""'], {}), "('/run/dbus/system_bus_socket')\n", (1044, 1075), False, 'from pathlib import Path\n'), ((1092, 1116), 'pathlib.Path', 'Path', (['"""/run/docker.sock"""'], {}), "('/run/docker.sock')\n", (1096, 1116), False, 'from pathlib import Path\n'), ((1140, 1163), 'pathlib.Path', 'Path', (['"""/run/supervisor"""'], {}), "('/run/supervisor')\n", (1144, 1163), False, 'from pathlib import Path\n'), ((1193, 1217), 'pathlib.Path', 'Path', (['"""/var/log/journal"""'], {}), "('/var/log/journal')\n", (1197, 1217), False, 'from pathlib import Path\n'), ((1245, 1269), 'pathlib.Path', 'Path', (['"""/run/log/journal"""'], {}), "('/run/log/journal')\n", (1249, 1269), False, 'from pathlib import Path\n'), ((1319, 1347), 'ipaddress.ip_network', 'ip_network', (['"""172.30.32.0/23"""'], {}), "('172.30.32.0/23')\n", (1329, 1347), False, 'from ipaddress import ip_network\n'), ((1371, 1399), 'ipaddress.ip_network', 'ip_network', (['"""172.30.33.0/24"""'], {}), "('172.30.33.0/24')\n", (1381, 1399), False, 'from ipaddress import ip_network\n')] |
import torch
import torch.nn as nn
import os
import torch.nn.functional as F
class LDS(nn.Module):
def __init__(self,):
super(LDS, self).__init__()
self.pool1 = nn.MaxPool2d(kernel_size=(2, 2), stride=2, padding=0)
self.pool2 = nn.MaxPool2d(kernel_size=(2, 2), stride=2, padding=0)
self.pool3 = nn.MaxPool2d(kernel_size=(2, 2), stride=2, padding=1)
def forward(self, x):
x_pool1 = self.pool1(x)
x_pool2 = self.pool2(x_pool1)
x_pool3 = self.pool3(x_pool2)
return x_pool3
class ConvBlock(nn.Module):
def __init__(self, in_planes, out_planes, kernel_size, stride=1, padding=0, dilation=1, groups=1, relu=True, bn=True, bias=False):
super(ConvBlock, self).__init__()
self.out_channels = out_planes
self.conv = nn.Conv2d(in_planes, out_planes, kernel_size=kernel_size, stride=stride, padding=padding, dilation=dilation, groups=groups, bias=bias)
self.bn = nn.BatchNorm2d(out_planes, eps=1e-5, momentum=0.01, affine=True) if bn else None
self.relu = nn.ReLU(inplace=False) if relu else None
def forward(self, x):
x = self.conv(x)
if self.bn is not None:
x = self.bn(x)
if self.relu is not None:
x = self.relu(x)
return x
class LSN_init(nn.Module):
def __init__(self, in_planes, out_planes, stride=1):
super(LSN_init, self).__init__()
self.out_channels = out_planes
inter_planes = out_planes // 4
self.part_a = nn.Sequential(
ConvBlock(in_planes, inter_planes, kernel_size=(3, 3), stride=stride, padding=1),
ConvBlock(inter_planes, inter_planes, kernel_size=1, stride=1),
ConvBlock(inter_planes, inter_planes, kernel_size=(3, 3), stride=stride, padding=1)
)
self.part_b = ConvBlock(inter_planes, out_planes, kernel_size=1, stride=1, relu=False)
def forward(self, x):
out1 = self.part_a(x)
out2 = self.part_b(out1)
return out1, out2
class LSN_later(nn.Module):
def __init__(self, in_planes, out_planes, stride=1):
super(LSN_later, self).__init__()
self.out_channels = out_planes
inter_planes = out_planes // 4
self.part_a = ConvBlock(in_planes, inter_planes, kernel_size=(3, 3), stride=stride, padding=1)
self.part_b = ConvBlock(inter_planes, out_planes, kernel_size=1, stride=1, relu=False)
def forward(self, x):
out1 = self.part_a(x)
out2 = self.part_b(out1)
return out1, out2
class IBN(nn.Module):
def __init__(self, out_planes, bn=True):
super(IBN, self).__init__()
self.out_channels = out_planes
self.bn = nn.BatchNorm2d(out_planes, eps=1e-5, momentum=0.01, affine=True) if bn else None
def forward(self, x):
if self.bn is not None:
x = self.bn(x)
return x
class One_Three_Conv(nn.Module):
def __init__(self, in_planes, out_planes, stride=1):
super(One_Three_Conv, self).__init__()
self.out_channels = out_planes
inter_planes = in_planes // 4
self.single_branch = nn.Sequential(
ConvBlock(in_planes, inter_planes, kernel_size=1, stride=1),
ConvBlock(inter_planes, out_planes, kernel_size=(3, 3), stride=stride, padding=1, relu=False)
)
def forward(self, x):
out = self.single_branch(x)
return out
class Relu_Conv(nn.Module):
def __init__(self, in_planes, out_planes, stride=1):
super(Relu_Conv, self).__init__()
self.out_channels = out_planes
self.relu = nn.ReLU(inplace=False)
self.single_branch = nn.Sequential(
ConvBlock(in_planes, out_planes, kernel_size=(3, 3), stride=stride, padding=1)
)
def forward(self, x):
x = self.relu(x)
out = self.single_branch(x)
return out
class Ds_Conv(nn.Module):
def __init__(self, in_planes, out_planes, stride=1, padding=(1, 1)):
super(Ds_Conv, self).__init__()
self.out_channels = out_planes
self.single_branch = nn.Sequential(
ConvBlock(in_planes, out_planes, kernel_size=(3, 3), stride=stride, padding=padding, relu=False)
)
def forward(self, x):
out = self.single_branch(x)
return out
class LRFNet(nn.Module):
"""LRFNet for object detection
The network is based on the SSD architecture.
Each multibox layer branches into
1) conv2d for class conf scores
2) conv2d for localization predictions
3) associated priorbox layer to produce default bounding
boxes specific to the layer's feature map size.
Args:
phase: (string) Can be "test" or "train"
base: VGG16 layers for input, size of either 300 or 512
extras: extra layers that feed to multibox loc and conf layers
head: "multibox head" consists of loc and conf conv layers
"""
def __init__(self, phase, size, base, extras, head, num_classes):
super(LRFNet, self).__init__()
self.phase = phase
self.num_classes = num_classes
self.size = size
# vgg network
self.base = nn.ModuleList(base)
self.lds = LDS()
# convs for merging the lsn and ssd features
self.Norm1 = Relu_Conv(512, 512, stride=1)
self.Norm2 = Relu_Conv(1024, 1024, stride=1)
self.Norm3 = Relu_Conv(512, 512, stride=1)
self.Norm4 = Relu_Conv(256, 256, stride=1)
# convs for generate the lsn features
self.icn1 = LSN_init(3, 512, stride=1)
self.icn2 = LSN_later(128, 1024, stride=2)
self.icn3 = LSN_later(256, 512, stride=2)
# convs with s=2 to downsample the features
self.dsc1 = Ds_Conv(512, 1024, stride=2, padding=(1, 1))
self.dsc2 = Ds_Conv(1024, 512, stride=2, padding=(1, 1))
self.dsc3 = Ds_Conv(512, 256, stride=2, padding=(1, 1))
# convs to reduce the feature dimensions of current level
self.agent1 = ConvBlock(512, 256, kernel_size=1, stride=1)
self.agent2 = ConvBlock(1024, 512, kernel_size=1, stride=1)
self.agent3 = ConvBlock(512, 256, kernel_size=1, stride=1)
# convs to reduce the feature dimensions of other levels
self.proj1 = ConvBlock(1024, 128, kernel_size=1, stride=1)
self.proj2 = ConvBlock(512, 128, kernel_size=1, stride=1)
self.proj3 = ConvBlock(256, 128, kernel_size=1, stride=1)
# convs to reduce the feature dimensions of other levels
self.convert1 = ConvBlock(384, 256, kernel_size=1)
self.convert2 = ConvBlock(256, 512, kernel_size=1)
self.convert3 = ConvBlock(128, 256, kernel_size=1)
# convs to merge the features of the current and higher level features
self.merge1 = ConvBlock(512, 512, kernel_size=3, stride=1, padding=1)
self.merge2 = ConvBlock(1024, 1024, kernel_size=3, stride=1, padding=1)
self.merge3 = ConvBlock(512, 512, kernel_size=3, stride=1, padding=1)
self.ibn1 = IBN(512, bn=True)
self.ibn2 = IBN(1024, bn=True)
self.relu = nn.ReLU(inplace=False)
self.extras = nn.ModuleList(extras)
self.loc = nn.ModuleList(head[0])
self.conf = nn.ModuleList(head[1])
if self.phase == 'test':
self.softmax = nn.Softmax()
def forward(self, x):
"""Applies network layers and ops on input image(s) x.
Args:
x: input image or batch of images. Shape: [batch,3,300,300].
Return:
Depending on phase:
test:
list of concat outputs from:
1: softmax layers, Shape: [batch*num_priors,num_classes]
2: localization layers, Shape: [batch,num_priors*4]
3: priorbox layers, Shape: [2,num_priors*4]
train:
list of concat outputs from:
1: confidence layers, Shape: [batch*num_priors,num_classes]
2: localization layers, Shape: [batch,num_priors*4]
3: priorbox layers, Shape: [2,num_priors*4]
"""
sources = list()
loc = list()
conf = list()
new_sources = list()
# apply lds to the initial image
x_pool = self.lds(x)
# apply vgg up to conv4_3
for k in range(22):
x = self.base[k](x)
conv4_3_bn = self.ibn1(x)
x_pool1_skip, x_pool1_icn = self.icn1(x_pool)
s = self.Norm1(conv4_3_bn * x_pool1_icn)
# apply vgg up to fc7
for k in range(22, 34):
x = self.base[k](x)
conv7_bn = self.ibn2(x)
x_pool2_skip, x_pool2_icn = self.icn2(x_pool1_skip)
p = self.Norm2(self.dsc1(s) + conv7_bn * x_pool2_icn)
x = self.base[34](x)
# apply extra layers and cache source layer outputs
for k, v in enumerate(self.extras):
x = v(x)
if k == 0:
x_pool3_skip, x_pool3_icn = self.icn3(x_pool2_skip)
w = self.Norm3(self.dsc2(p) + x * x_pool3_icn)
elif k == 2:
q = self.Norm4(self.dsc3(w) + x)
sources.append(q)
elif k == 5 or k == 7:
sources.append(x)
else:
pass
# project the forward features into lower dimension.
tmp1 = self.proj1(p)
tmp2 = self.proj2(w)
tmp3 = self.proj3(q)
# The conv4_3 level
proj1 = F.upsample(tmp1, size=(38, 38), mode='bilinear')
proj2 = F.upsample(tmp2, size=(38, 38), mode='bilinear')
proj3 = F.upsample(tmp3, size=(38, 38), mode='bilinear')
proj = torch.cat([proj1, proj2, proj3], dim=1)
agent1 = self.agent1(s)
convert1 = self.convert1(proj)
pred1 = torch.cat([agent1, convert1], dim=1)
pred1 = self.merge1(pred1)
new_sources.append(pred1)
# The fc_7 level
proj2 = F.upsample(tmp2, size=(19, 19), mode='bilinear')
proj3 = F.upsample(tmp3, size=(19, 19), mode='bilinear')
proj = torch.cat([proj2, proj3], dim=1)
agent2 = self.agent2(p)
convert2 = self.convert2(proj)
pred2 = torch.cat([agent2, convert2], dim=1)
pred2 = self.merge2(pred2)
new_sources.append(pred2)
# The conv8 level
proj3 = F.upsample(tmp3, size=(10, 10), mode='bilinear')
proj = proj3
agent3 = self.agent3(w)
convert3 = self.convert3(proj)
pred3 = torch.cat([agent3, convert3], dim=1)
pred3 = self.merge3(pred3)
new_sources.append(pred3)
for prediction in sources:
new_sources.append(prediction)
# apply multibox head to source layers
for (x, l, c) in zip(new_sources, self.loc, self.conf):
loc.append(l(x).permute(0, 2, 3, 1).contiguous())
conf.append(c(x).permute(0, 2, 3, 1).contiguous())
loc = torch.cat([o.view(o.size(0), -1) for o in loc], 1)
conf = torch.cat([o.view(o.size(0), -1) for o in conf], 1)
if self.phase == "test":
output = (
loc.view(loc.size(0), -1, 4), # loc preds
self.softmax(conf.view(-1, self.num_classes)), # conf preds
)
else:
output = (
loc.view(loc.size(0), -1, 4),
conf.view(conf.size(0), -1, self.num_classes),
)
return output
def load_weights(self, base_file):
other, ext = os.path.splitext(base_file)
if ext == '.pkl' or '.pth':
print('Loading weights into state dict...')
self.load_state_dict(torch.load(base_file))
print('Finished!')
else:
print('Sorry only .pth and .pkl files supported.')
def vgg(cfg, i, batch_norm=False):
layers = []
in_channels = i
for v in cfg:
if v == 'M':
layers += [nn.MaxPool2d(kernel_size=2, stride=2)]
elif v == 'C':
layers += [nn.MaxPool2d(kernel_size=2, stride=2, ceil_mode=True)]
else:
conv2d = nn.Conv2d(in_channels, v, kernel_size=3, padding=1)
if batch_norm:
layers += [conv2d, nn.BatchNorm2d(v), nn.ReLU(inplace=False)]
else:
layers += [conv2d, nn.ReLU(inplace=False)]
in_channels = v
pool5 = nn.MaxPool2d(kernel_size=3, stride=1, padding=1)
conv6 = nn.Conv2d(512, 1024, kernel_size=3, padding=6, dilation=6)
conv7 = nn.Conv2d(1024, 1024, kernel_size=1)
layers += [pool5, conv6,
nn.ReLU(inplace=False), conv7, nn.ReLU(inplace=False)]
return layers
base = {
'300': [64, 64, 'M', 128, 128, 'M', 256, 256, 256, 'C', 512, 512, 512, 'M',
512, 512, 512]}
def add_extras(size, cfg, i, batch_norm=False):
# Extra layers added to VGG for feature scaling
layers = []
in_channels = i
flag = False
for k, v in enumerate(cfg):
if in_channels != 'S':
if v == 'S':
if in_channels == 256 and size == 512:
layers += [One_Three_Conv(in_channels, cfg[k+1], stride=2), nn.ReLU(inplace=False)]
else:
layers += [One_Three_Conv(in_channels, cfg[k+1], stride=2), nn.ReLU(inplace=False)]
in_channels = v
layers += [ConvBlock(256, 128, kernel_size=1,stride=1)]
layers += [ConvBlock(128, 256, kernel_size=3,stride=1)]
layers += [ConvBlock(256, 128, kernel_size=1,stride=1)]
layers += [ConvBlock(128, 256, kernel_size=3,stride=1)]
return layers
extras = {
'300': [1024, 'S', 512, 'S', 256]}
def multibox(size, vgg, extra_layers, cfg, num_classes):
loc_layers = []
conf_layers = []
vgg_source = [1, -2]
for k, v in enumerate(vgg_source):
if k == 0:
loc_layers += [nn.Conv2d(512,
cfg[k] * 4, kernel_size=3, padding=1)]
conf_layers +=[nn.Conv2d(512,
cfg[k] * num_classes, kernel_size=3, padding=1)]
else:
loc_layers += [nn.Conv2d(vgg[v].out_channels,
cfg[k] * 4, kernel_size=3, padding=1)]
conf_layers += [nn.Conv2d(vgg[v].out_channels,
cfg[k] * num_classes, kernel_size=3, padding=1)]
i = 2
indicator = 3
for k, v in enumerate(extra_layers):
if (k < indicator+1 and k % 2 == 0) or (k > indicator+1 and k % 2 != 0):
loc_layers += [nn.Conv2d(v.out_channels, cfg[i]
* 4, kernel_size=3, padding=1)]
conf_layers += [nn.Conv2d(v.out_channels, cfg[i]
* num_classes, kernel_size=3, padding=1)]
i += 1
return vgg, extra_layers, (loc_layers, conf_layers)
mbox = {
'300': [6, 6, 6, 6, 4, 4]}
def build_net(phase, size=300, num_classes=81):
if size != 300:
print("Error: The input image size is not supported!")
return
return LRFNet(phase, size, *multibox(size, vgg(base[str(size)], 3),
add_extras(size, extras[str(size)], 1024),
mbox[str(size)], num_classes), num_classes)
| [
"torch.nn.functional.upsample",
"torch.nn.BatchNorm2d",
"torch.nn.ReLU",
"torch.nn.Softmax",
"torch.nn.ModuleList",
"torch.load",
"os.path.splitext",
"torch.nn.Conv2d",
"torch.nn.MaxPool2d",
"torch.cat"
] | [((12471, 12519), 'torch.nn.MaxPool2d', 'nn.MaxPool2d', ([], {'kernel_size': '(3)', 'stride': '(1)', 'padding': '(1)'}), '(kernel_size=3, stride=1, padding=1)\n', (12483, 12519), True, 'import torch.nn as nn\n'), ((12532, 12590), 'torch.nn.Conv2d', 'nn.Conv2d', (['(512)', '(1024)'], {'kernel_size': '(3)', 'padding': '(6)', 'dilation': '(6)'}), '(512, 1024, kernel_size=3, padding=6, dilation=6)\n', (12541, 12590), True, 'import torch.nn as nn\n'), ((12603, 12639), 'torch.nn.Conv2d', 'nn.Conv2d', (['(1024)', '(1024)'], {'kernel_size': '(1)'}), '(1024, 1024, kernel_size=1)\n', (12612, 12639), True, 'import torch.nn as nn\n'), ((183, 236), 'torch.nn.MaxPool2d', 'nn.MaxPool2d', ([], {'kernel_size': '(2, 2)', 'stride': '(2)', 'padding': '(0)'}), '(kernel_size=(2, 2), stride=2, padding=0)\n', (195, 236), True, 'import torch.nn as nn\n'), ((258, 311), 'torch.nn.MaxPool2d', 'nn.MaxPool2d', ([], {'kernel_size': '(2, 2)', 'stride': '(2)', 'padding': '(0)'}), '(kernel_size=(2, 2), stride=2, padding=0)\n', (270, 311), True, 'import torch.nn as nn\n'), ((333, 386), 'torch.nn.MaxPool2d', 'nn.MaxPool2d', ([], {'kernel_size': '(2, 2)', 'stride': '(2)', 'padding': '(1)'}), '(kernel_size=(2, 2), stride=2, padding=1)\n', (345, 386), True, 'import torch.nn as nn\n'), ((811, 949), 'torch.nn.Conv2d', 'nn.Conv2d', (['in_planes', 'out_planes'], {'kernel_size': 'kernel_size', 'stride': 'stride', 'padding': 'padding', 'dilation': 'dilation', 'groups': 'groups', 'bias': 'bias'}), '(in_planes, out_planes, kernel_size=kernel_size, stride=stride,\n padding=padding, dilation=dilation, groups=groups, bias=bias)\n', (820, 949), True, 'import torch.nn as nn\n'), ((3648, 3670), 'torch.nn.ReLU', 'nn.ReLU', ([], {'inplace': '(False)'}), '(inplace=False)\n', (3655, 3670), True, 'import torch.nn as nn\n'), ((5223, 5242), 'torch.nn.ModuleList', 'nn.ModuleList', (['base'], {}), '(base)\n', (5236, 5242), True, 'import torch.nn as nn\n'), ((7163, 7185), 'torch.nn.ReLU', 'nn.ReLU', ([], {'inplace': '(False)'}), '(inplace=False)\n', (7170, 7185), True, 'import torch.nn as nn\n'), ((7209, 7230), 'torch.nn.ModuleList', 'nn.ModuleList', (['extras'], {}), '(extras)\n', (7222, 7230), True, 'import torch.nn as nn\n'), ((7250, 7272), 'torch.nn.ModuleList', 'nn.ModuleList', (['head[0]'], {}), '(head[0])\n', (7263, 7272), True, 'import torch.nn as nn\n'), ((7293, 7315), 'torch.nn.ModuleList', 'nn.ModuleList', (['head[1]'], {}), '(head[1])\n', (7306, 7315), True, 'import torch.nn as nn\n'), ((9554, 9602), 'torch.nn.functional.upsample', 'F.upsample', (['tmp1'], {'size': '(38, 38)', 'mode': '"""bilinear"""'}), "(tmp1, size=(38, 38), mode='bilinear')\n", (9564, 9602), True, 'import torch.nn.functional as F\n'), ((9619, 9667), 'torch.nn.functional.upsample', 'F.upsample', (['tmp2'], {'size': '(38, 38)', 'mode': '"""bilinear"""'}), "(tmp2, size=(38, 38), mode='bilinear')\n", (9629, 9667), True, 'import torch.nn.functional as F\n'), ((9684, 9732), 'torch.nn.functional.upsample', 'F.upsample', (['tmp3'], {'size': '(38, 38)', 'mode': '"""bilinear"""'}), "(tmp3, size=(38, 38), mode='bilinear')\n", (9694, 9732), True, 'import torch.nn.functional as F\n'), ((9748, 9787), 'torch.cat', 'torch.cat', (['[proj1, proj2, proj3]'], {'dim': '(1)'}), '([proj1, proj2, proj3], dim=1)\n', (9757, 9787), False, 'import torch\n'), ((9876, 9912), 'torch.cat', 'torch.cat', (['[agent1, convert1]'], {'dim': '(1)'}), '([agent1, convert1], dim=1)\n', (9885, 9912), False, 'import torch\n'), ((10024, 10072), 'torch.nn.functional.upsample', 'F.upsample', (['tmp2'], {'size': '(19, 19)', 'mode': '"""bilinear"""'}), "(tmp2, size=(19, 19), mode='bilinear')\n", (10034, 10072), True, 'import torch.nn.functional as F\n'), ((10089, 10137), 'torch.nn.functional.upsample', 'F.upsample', (['tmp3'], {'size': '(19, 19)', 'mode': '"""bilinear"""'}), "(tmp3, size=(19, 19), mode='bilinear')\n", (10099, 10137), True, 'import torch.nn.functional as F\n'), ((10153, 10185), 'torch.cat', 'torch.cat', (['[proj2, proj3]'], {'dim': '(1)'}), '([proj2, proj3], dim=1)\n', (10162, 10185), False, 'import torch\n'), ((10274, 10310), 'torch.cat', 'torch.cat', (['[agent2, convert2]'], {'dim': '(1)'}), '([agent2, convert2], dim=1)\n', (10283, 10310), False, 'import torch\n'), ((10423, 10471), 'torch.nn.functional.upsample', 'F.upsample', (['tmp3'], {'size': '(10, 10)', 'mode': '"""bilinear"""'}), "(tmp3, size=(10, 10), mode='bilinear')\n", (10433, 10471), True, 'import torch.nn.functional as F\n'), ((10581, 10617), 'torch.cat', 'torch.cat', (['[agent3, convert3]'], {'dim': '(1)'}), '([agent3, convert3], dim=1)\n', (10590, 10617), False, 'import torch\n'), ((11603, 11630), 'os.path.splitext', 'os.path.splitext', (['base_file'], {}), '(base_file)\n', (11619, 11630), False, 'import os\n'), ((12684, 12706), 'torch.nn.ReLU', 'nn.ReLU', ([], {'inplace': '(False)'}), '(inplace=False)\n', (12691, 12706), True, 'import torch.nn as nn\n'), ((12715, 12737), 'torch.nn.ReLU', 'nn.ReLU', ([], {'inplace': '(False)'}), '(inplace=False)\n', (12722, 12737), True, 'import torch.nn as nn\n'), ((964, 1029), 'torch.nn.BatchNorm2d', 'nn.BatchNorm2d', (['out_planes'], {'eps': '(1e-05)', 'momentum': '(0.01)', 'affine': '(True)'}), '(out_planes, eps=1e-05, momentum=0.01, affine=True)\n', (978, 1029), True, 'import torch.nn as nn\n'), ((1065, 1087), 'torch.nn.ReLU', 'nn.ReLU', ([], {'inplace': '(False)'}), '(inplace=False)\n', (1072, 1087), True, 'import torch.nn as nn\n'), ((2729, 2794), 'torch.nn.BatchNorm2d', 'nn.BatchNorm2d', (['out_planes'], {'eps': '(1e-05)', 'momentum': '(0.01)', 'affine': '(True)'}), '(out_planes, eps=1e-05, momentum=0.01, affine=True)\n', (2743, 2794), True, 'import torch.nn as nn\n'), ((7376, 7388), 'torch.nn.Softmax', 'nn.Softmax', ([], {}), '()\n', (7386, 7388), True, 'import torch.nn as nn\n'), ((11756, 11777), 'torch.load', 'torch.load', (['base_file'], {}), '(base_file)\n', (11766, 11777), False, 'import torch\n'), ((12022, 12059), 'torch.nn.MaxPool2d', 'nn.MaxPool2d', ([], {'kernel_size': '(2)', 'stride': '(2)'}), '(kernel_size=2, stride=2)\n', (12034, 12059), True, 'import torch.nn as nn\n'), ((12197, 12248), 'torch.nn.Conv2d', 'nn.Conv2d', (['in_channels', 'v'], {'kernel_size': '(3)', 'padding': '(1)'}), '(in_channels, v, kernel_size=3, padding=1)\n', (12206, 12248), True, 'import torch.nn as nn\n'), ((13947, 13999), 'torch.nn.Conv2d', 'nn.Conv2d', (['(512)', '(cfg[k] * 4)'], {'kernel_size': '(3)', 'padding': '(1)'}), '(512, cfg[k] * 4, kernel_size=3, padding=1)\n', (13956, 13999), True, 'import torch.nn as nn\n'), ((14061, 14123), 'torch.nn.Conv2d', 'nn.Conv2d', (['(512)', '(cfg[k] * num_classes)'], {'kernel_size': '(3)', 'padding': '(1)'}), '(512, cfg[k] * num_classes, kernel_size=3, padding=1)\n', (14070, 14123), True, 'import torch.nn as nn\n'), ((14199, 14267), 'torch.nn.Conv2d', 'nn.Conv2d', (['vgg[v].out_channels', '(cfg[k] * 4)'], {'kernel_size': '(3)', 'padding': '(1)'}), '(vgg[v].out_channels, cfg[k] * 4, kernel_size=3, padding=1)\n', (14208, 14267), True, 'import torch.nn as nn\n'), ((14330, 14408), 'torch.nn.Conv2d', 'nn.Conv2d', (['vgg[v].out_channels', '(cfg[k] * num_classes)'], {'kernel_size': '(3)', 'padding': '(1)'}), '(vgg[v].out_channels, cfg[k] * num_classes, kernel_size=3, padding=1)\n', (14339, 14408), True, 'import torch.nn as nn\n'), ((14612, 14675), 'torch.nn.Conv2d', 'nn.Conv2d', (['v.out_channels', '(cfg[i] * 4)'], {'kernel_size': '(3)', 'padding': '(1)'}), '(v.out_channels, cfg[i] * 4, kernel_size=3, padding=1)\n', (14621, 14675), True, 'import torch.nn as nn\n'), ((14738, 14811), 'torch.nn.Conv2d', 'nn.Conv2d', (['v.out_channels', '(cfg[i] * num_classes)'], {'kernel_size': '(3)', 'padding': '(1)'}), '(v.out_channels, cfg[i] * num_classes, kernel_size=3, padding=1)\n', (14747, 14811), True, 'import torch.nn as nn\n'), ((12107, 12160), 'torch.nn.MaxPool2d', 'nn.MaxPool2d', ([], {'kernel_size': '(2)', 'stride': '(2)', 'ceil_mode': '(True)'}), '(kernel_size=2, stride=2, ceil_mode=True)\n', (12119, 12160), True, 'import torch.nn as nn\n'), ((12311, 12328), 'torch.nn.BatchNorm2d', 'nn.BatchNorm2d', (['v'], {}), '(v)\n', (12325, 12328), True, 'import torch.nn as nn\n'), ((12330, 12352), 'torch.nn.ReLU', 'nn.ReLU', ([], {'inplace': '(False)'}), '(inplace=False)\n', (12337, 12352), True, 'import torch.nn as nn\n'), ((12407, 12429), 'torch.nn.ReLU', 'nn.ReLU', ([], {'inplace': '(False)'}), '(inplace=False)\n', (12414, 12429), True, 'import torch.nn as nn\n'), ((13253, 13275), 'torch.nn.ReLU', 'nn.ReLU', ([], {'inplace': '(False)'}), '(inplace=False)\n', (13260, 13275), True, 'import torch.nn as nn\n'), ((13379, 13401), 'torch.nn.ReLU', 'nn.ReLU', ([], {'inplace': '(False)'}), '(inplace=False)\n', (13386, 13401), True, 'import torch.nn as nn\n')] |
from __future__ import (absolute_import, division, print_function, unicode_literals)
from builtins import (ascii, bytes, chr, dict, filter, hex, input, int, map, next, oct, open, pow, range, round, str,
super, zip)
from ...._utils import send_session_request
from ..._PortalEndpointBase import PortalEndpointBase
from .CreateUpdateGroupParams import CreateUpdateGroupParams
class Group(PortalEndpointBase):
@property
def id(self):
return self._pdata["id"]
@property
def _url_full(self):
return "{0}/{1}".format(self._url_base, self.id)
def __init__(self, requests_session, url_base, id):
super().__init__(requests_session, url_base)
self._pdata = {"id": id}
def get_properties(self):
"""
Gets the properties of the item.
"""
return self._get()
def update(self, update_group_params, clear_empty_fields=False):
"""
Updates the group properties.
"""
update_group_params = update_group_params._get_params() if isinstance(
update_group_params, CreateUpdateGroupParams) else update_group_params.copy()
if not "clearEmptyFields" in update_group_params:
update_group_params["clearEmptyFields"] = clear_empty_fields
r = self._create_operation_request(self, "update", method="POST", data=update_group_params)
return send_session_request(self._session, r).json() | [
"builtins.super"
] | [((659, 666), 'builtins.super', 'super', ([], {}), '()\n', (664, 666), False, 'from builtins import ascii, bytes, chr, dict, filter, hex, input, int, map, next, oct, open, pow, range, round, str, super, zip\n')] |
from unittest import TestCase
import numpy as np
from robustnessgym.cachedops.spacy import Spacy
from robustnessgym.slicebuilders.subpopulations.length import LengthSubpopulation
from tests.testbeds import MockTestBedv0
class TestLengthSubpopulation(TestCase):
def setUp(self):
self.testbed = MockTestBedv0()
self.testbed.dataset = Spacy()(self.testbed.dataset, columns=["text"])
def test_score(self):
# Create the length subpopulation
length = LengthSubpopulation(intervals=[(1, 3), (4, 5)])
# Compute scores
scores = length.score(self.testbed.dataset[:], columns=["text"])
self.assertTrue(np.allclose(scores, np.array([5, 5, 5, 5, 5, 5])))
print(self.testbed.dataset.column_names)
print(Spacy.retrieve(self.testbed.dataset[:], ["text"]))
# Apply the subpopulation
slices, slice_matrix = length(self.testbed.dataset, columns=["text"])
# Check that the slice membership lines up
self.assertTrue(np.allclose(slice_matrix, np.array([[0, 1]] * 6)))
| [
"tests.testbeds.MockTestBedv0",
"robustnessgym.cachedops.spacy.Spacy",
"numpy.array",
"robustnessgym.cachedops.spacy.Spacy.retrieve",
"robustnessgym.slicebuilders.subpopulations.length.LengthSubpopulation"
] | [((309, 324), 'tests.testbeds.MockTestBedv0', 'MockTestBedv0', ([], {}), '()\n', (322, 324), False, 'from tests.testbeds import MockTestBedv0\n'), ((490, 537), 'robustnessgym.slicebuilders.subpopulations.length.LengthSubpopulation', 'LengthSubpopulation', ([], {'intervals': '[(1, 3), (4, 5)]'}), '(intervals=[(1, 3), (4, 5)])\n', (509, 537), False, 'from robustnessgym.slicebuilders.subpopulations.length import LengthSubpopulation\n'), ((356, 363), 'robustnessgym.cachedops.spacy.Spacy', 'Spacy', ([], {}), '()\n', (361, 363), False, 'from robustnessgym.cachedops.spacy import Spacy\n'), ((776, 825), 'robustnessgym.cachedops.spacy.Spacy.retrieve', 'Spacy.retrieve', (['self.testbed.dataset[:]', "['text']"], {}), "(self.testbed.dataset[:], ['text'])\n", (790, 825), False, 'from robustnessgym.cachedops.spacy import Spacy\n'), ((681, 709), 'numpy.array', 'np.array', (['[5, 5, 5, 5, 5, 5]'], {}), '([5, 5, 5, 5, 5, 5])\n', (689, 709), True, 'import numpy as np\n'), ((1042, 1064), 'numpy.array', 'np.array', (['([[0, 1]] * 6)'], {}), '([[0, 1]] * 6)\n', (1050, 1064), True, 'import numpy as np\n')] |
import json
from astroquery.vizier import Vizier
with open("Jankowski_2018_raw.txt", "r") as raw_file:
lines = raw_file.readlines()
print(lines)
pulsar_dict = {}
for row in lines[3:]:
row = row.split("|")
print(row)
pulsar = row[0].strip().replace("−", "-")
freqs = []
fluxs = []
flux_errs = []
# If no error means it's an upper limit andnow sure how to handle it
if row[1].strip() != "" and row[2].strip() != "":
freqs.append(728)
fluxs.append(float(row[1].strip()))
flux_errs.append(float(row[2].strip()))
if row[3].strip() != "" and row[4].strip() != "":
freqs.append(1382)
fluxs.append(float(row[3].strip()))
flux_errs.append(float(row[4].strip()))
if row[5].strip() != "" and row[6].strip() != "":
freqs.append(3100)
fluxs.append(float(row[5].strip()))
flux_errs.append(float(row[6].strip()))
pulsar_dict[pulsar] = {"Frequency MHz":freqs, "Flux Density mJy":fluxs, "Flux Density error mJy":flux_errs}
with open("Jankowski_2018.yaml", "w") as cat_file:
cat_file.write(json.dumps(pulsar_dict))
print(pulsar_dict) | [
"json.dumps"
] | [((1103, 1126), 'json.dumps', 'json.dumps', (['pulsar_dict'], {}), '(pulsar_dict)\n', (1113, 1126), False, 'import json\n')] |
# Copyright (c) 2019 Leiden University Medical Center
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import subprocess
import sys
from pathlib import Path
SOUNDS_DIR = (Path(__file__).parent / Path("sounds")).absolute()
DEFAULT_SUCCESS_SOUND = SOUNDS_DIR / Path("applause")
DEFAULT_FAIL_SOUND = SOUNDS_DIR / Path("buzzer")
def play_sound(sound_file: Path):
if sys.platform == "linux":
# paplay comes from PulseAudio and should be installed by default on
# most systems.
_play_sound_unix(sound_file.with_suffix(".oga"), program="paplay")
elif sys.platform == "darwin":
# Afplay comes installed by default on Macintosh
_play_sound_unix(sound_file.with_suffix(".mp3"), program="afplay")
else:
# A windows implementation should be possible with the winsound
# implementation, but that does not play ogg audio.
raise NotImplementedError(
"Playing sounds not supported by pytest-notification on {}"
"".format(sys.platform))
def _play_sound_unix(sound_file: Path, program):
"""
Play a sound file on unix with the program.
:param sound_file: Path to the sound file.
:param program: Which program to use.
:return: No returns. Plays a sound file.
"""
# Play the sound non blocking, use Popen.
subprocess.Popen([program, str(sound_file)])
| [
"pathlib.Path"
] | [((1269, 1285), 'pathlib.Path', 'Path', (['"""applause"""'], {}), "('applause')\n", (1273, 1285), False, 'from pathlib import Path\n'), ((1320, 1334), 'pathlib.Path', 'Path', (['"""buzzer"""'], {}), "('buzzer')\n", (1324, 1334), False, 'from pathlib import Path\n'), ((1205, 1219), 'pathlib.Path', 'Path', (['"""sounds"""'], {}), "('sounds')\n", (1209, 1219), False, 'from pathlib import Path\n'), ((1181, 1195), 'pathlib.Path', 'Path', (['__file__'], {}), '(__file__)\n', (1185, 1195), False, 'from pathlib import Path\n')] |
#!/usr/bin/env python3
# Copyright (c) 2016 Anki, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License in the file LICENSE.txt or at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
'''"If This Then That" Gmail example
This example demonstrates how "If This Then That" (http://ifttt.com) can be used
make Cozmo respond when a Gmail account receives an email. Instructions below
will lead you through setting up an applet on the IFTTT website. When the applet
trigger is called (which sends a web request received by the web server started
in this example), Cozmo will play an animation, speak the email sender's name and
show a mailbox image on his face.
Please place Cozmo on the charger for this example. When necessary, he will be
rolled off and back on.
Follow these steps to set up and run the example:
1) Provide a a static ip, URL or similar that can be reached from the If This
Then That server. One easy way to do this is with ngrok, which sets up
a secure tunnel to localhost running on your machine.
To set up ngrok:
a) Follow instructions here to download and install:
https://ngrok.com/download
b) Run this command to create a secure public URL for port 8080:
./ngrok http 8080
c) Note the HTTP forwarding address shown in the terminal (e.g., http://55e57164.ngrok.io).
You will use this address in your applet, below.
WARNING: Using ngrok exposes your local web server to the internet. See the ngrok
documentation for more information: https://ngrok.com/docs
2) Set up your applet on the "If This Then That" website.
a) Sign up and sign into https://ifttt.com
b) Create an applet: https://ifttt.com/create
c) Set up your trigger.
1. Click "this".
2. Select "Gmail" as your service. If prompted, click "Connect",
select your Gmail account, and click “Allow” to provide permissions
to IFTTT for your email account. Click "Done".
3. Under "Choose a Trigger", select “Any new email in inbox".
d) Set up your action.
1. Click “that".
2. Select “Maker" to set it as your action channel. Connect to the Maker channel if prompted.
3. Click “Make a web request" and fill out the fields as follows. Remember your publicly
accessible URL from above (e.g., http://55e57164.ngrok.io) and use it in the URL field,
followed by "/iftttGmail" as shown below:
URL: http://55e57164.ngrok.io/iftttGmail
Method: POST
Content Type: application/json
Body: {"FromAddress":"{{FromAddress}}"}
5. Click “Create Action" then “Finish".
3) Test your applet.
a) Run this script at the command line: ./ifttt_gmail.py
b) On ifttt.com, on your applet page, click “Check now”. See that IFTTT confirms that the applet
was checked.
c) Send an email to the Gmail account in your recipe
d) On your IFTTT applet webpage, again click “Check now”. This should cause IFTTT to detect that
the email was received and send a web request to the ifttt_gmail.py script.
e) In response to the ifttt web request, Cozmo should roll off the charger, raise and lower
his lift, announce the email, and then show a mailbox image on his face.
'''
import asyncio
import re
import sys
try:
from aiohttp import web
except ImportError:
sys.exit("Cannot import from aiohttp. Do `pip3 install --user aiohttp` to install")
import cozmo
from common import IFTTTRobot
app = web.Application()
async def serve_gmail(request):
'''Define an HTTP POST handler for receiving requests from If This Then That.
You may modify this method to change how Cozmo reacts to the email
being received.
'''
json_object = await request.json()
# Extract the name of the email sender.
from_email_address = json_object["FromAddress"]
# Use a regular expression to break apart pieces of the email address
match_object = re.search(r'([\w.]+)@([\w.]+)', from_email_address)
email_local_part = match_object.group(1)
robot = request.app['robot']
async def read_name():
try:
async with robot.perform_off_charger():
'''If necessary, Move Cozmo's Head and Lift to make it easy to see Cozmo's face.'''
await robot.get_in_position()
# First, have Cozmo play animation "ID_pokedB", which tells
# Cozmo to raise and lower his lift. To change the animation,
# you may replace "ID_pokedB" with another animation. Run
# remote_control_cozmo.py to see a list of animations.
await robot.play_anim(name='ID_pokedB').wait_for_completed()
# Next, have Cozmo speak the name of the email sender.
await robot.say_text("Email from " + email_local_part).wait_for_completed()
# Last, have Cozmo display an email image on his face.
robot.display_image_file_on_face("../face_images/ifttt_gmail.png")
except cozmo.RobotBusy:
cozmo.logger.warning("Robot was busy so didn't read email address: "+ from_email_address)
# Perform Cozmo's task in the background so the HTTP server responds immediately.
asyncio.ensure_future(read_name())
return web.Response(text="OK")
# Attach the function as an HTTP handler.
app.router.add_post('/iftttGmail', serve_gmail)
if __name__ == '__main__':
cozmo.setup_basic_logging()
cozmo.robot.Robot.drive_off_charger_on_connect = False
# Use our custom robot class with extra helper methods
cozmo.conn.CozmoConnection.robot_factory = IFTTTRobot
try:
sdk_conn = cozmo.connect_on_loop(app.loop)
# Wait for the robot to become available and add it to the app object.
app['robot'] = app.loop.run_until_complete(sdk_conn.wait_for_robot())
except cozmo.ConnectionError as e:
sys.exit("A connection error occurred: %s" % e)
web.run_app(app)
| [
"aiohttp.web.run_app",
"sys.exit",
"cozmo.logger.warning",
"aiohttp.web.Response",
"aiohttp.web.Application",
"cozmo.connect_on_loop",
"cozmo.setup_basic_logging",
"re.search"
] | [((4131, 4148), 'aiohttp.web.Application', 'web.Application', ([], {}), '()\n', (4146, 4148), False, 'from aiohttp import web\n'), ((4596, 4648), 're.search', 're.search', (['"""([\\\\w.]+)@([\\\\w.]+)"""', 'from_email_address'], {}), "('([\\\\w.]+)@([\\\\w.]+)', from_email_address)\n", (4605, 4648), False, 'import re\n'), ((5934, 5957), 'aiohttp.web.Response', 'web.Response', ([], {'text': '"""OK"""'}), "(text='OK')\n", (5946, 5957), False, 'from aiohttp import web\n'), ((6082, 6109), 'cozmo.setup_basic_logging', 'cozmo.setup_basic_logging', ([], {}), '()\n', (6107, 6109), False, 'import cozmo\n'), ((6605, 6621), 'aiohttp.web.run_app', 'web.run_app', (['app'], {}), '(app)\n', (6616, 6621), False, 'from aiohttp import web\n'), ((3994, 4082), 'sys.exit', 'sys.exit', (['"""Cannot import from aiohttp. Do `pip3 install --user aiohttp` to install"""'], {}), "(\n 'Cannot import from aiohttp. Do `pip3 install --user aiohttp` to install')\n", (4002, 4082), False, 'import sys\n'), ((6316, 6347), 'cozmo.connect_on_loop', 'cozmo.connect_on_loop', (['app.loop'], {}), '(app.loop)\n', (6337, 6347), False, 'import cozmo\n'), ((6552, 6599), 'sys.exit', 'sys.exit', (["('A connection error occurred: %s' % e)"], {}), "('A connection error occurred: %s' % e)\n", (6560, 6599), False, 'import sys\n'), ((5706, 5800), 'cozmo.logger.warning', 'cozmo.logger.warning', (['("Robot was busy so didn\'t read email address: " + from_email_address)'], {}), '("Robot was busy so didn\'t read email address: " +\n from_email_address)\n', (5726, 5800), False, 'import cozmo\n')] |
# -*- coding: utf-8 -*-
"""
Created on Fri May 30 17:15:27 2014
@author: Parke
"""
from __future__ import division, print_function, absolute_import
import numpy as np
import matplotlib as mplot
import matplotlib.pyplot as plt
import mypy.my_numpy as mnp
dpi = 100
fullwidth = 10.0
halfwidth = 5.0
# use these with line.set_dashes and iterate through more linestyles than come with matplotlib
# consider ussing a ::2 slice for fewer
dashes = [[],
[30, 10],
[20, 8],
[10, 5],
[3, 2],
[30, 5, 3, 5, 10, 5, 3, 5],
[15] + [5, 3]*3 + [5],
[15] + [5, 3]*2 + [5],
[15] + [5, 3] + [5]]
def click_coords(fig=None, timeout=600.):
if fig is None:
fig = plt.gcf()
xy = []
def onclick(event):
if not event.inaxes:
fig.canvas.stop_event_loop()
else:
xy.append([event.xdata, event.ydata])
print("Gathering coordinates of mouse clicks. Click outside of the axes " \
"when done.")
cid = fig.canvas.mpl_connect('button_press_event', onclick)
fig.canvas.start_event_loop(timeout=timeout)
fig.canvas.mpl_disconnect(cid)
return np.array(xy)
def common_axes(fig, pos=None):
if pos is None:
bigax = fig.add_subplot(111)
else:
bigax = fig.add_axes(pos)
[bigax.spines[s].set_visible(False) for s in ['top', 'bottom', 'left', 'right']]
bigax.tick_params(labelleft=False, labelbottom=False, left='off', bottom='off')
bigax.set_zorder(-10)
return bigax
def log_frac(x, frac):
l0, l1 = list(map(np.log10, x))
ld = l1 - l0
l = ld*frac + l0
return 10**l
def log2linear(x, errneg=None, errpos=None):
xl = 10**x
result = [xl]
if errneg is not None:
xn = xl - 10**(x - np.abs(errneg))
result.append(xn)
if errpos is not None:
xp = 10**(x + errpos) - xl
result.append(xp)
return result
def linear2log(x, errneg=None, errpos=None):
xl = np.log10(x)
result = [x]
if errneg is not None:
xn = xl - np.log10(x - np.abs(errneg))
result.append(xn)
if errpos is not None:
xp = np.log10(x + errpos) - xl
result.append(xp)
return result
def step(*args, **kwargs):
edges, values = args[0], args[1]
# deal with potentially gappy 2-column bin specifications
edges = np.asarray(edges)
if edges.ndim == 2:
if np.any(edges[1:,0] < edges[:-1,1]):
raise ValueError('Some bins overlap')
if np.any(edges[1:,0] < edges[:-1,0]):
raise ValueError('Bins must be in increasing order.')
gaps = edges[1:,0] > edges[:-1,1]
edges = np.unique(edges)
if np.any(gaps):
values = np.insert(values, np.nonzero(gaps), np.nan)
edges = mnp.lace(edges[:-1], edges[1:])
values = mnp.lace(values, values)
args = list(args)
args[0], args[1] = edges, values
ax = kwargs.pop('ax', plt.gca())
return ax.plot(*args, **kwargs)
def point_along_line(x, y, xfrac=None, xlbl=None, scale='linear'):
if scale == 'log':
lx, ly = point_along_line(np.log10(x), np.log10(y), xfrac, xlbl, ylbl, scale)
return 10 ** lx, 10 ** ly
if xfrac is not None:
if xfrac == 0:
return x[0], y[0]
if xfrac == 1:
return x[-1], y[-1]
else:
d = np.cumsum(np.sqrt(np.diff(x)**2 + np.diff(y)**2))
d = np.insert(d, 0, 0)
f = d/d[-1]
xp, yp = [np.interp(xfrac, f, a) for a in [x,y]]
return xp, yp
if xlbl is not None:
return xlbl, np.interp(xlbl, x, y)
def textSize(ax_or_fig=None, coordinate='data'):
"""
Return x & y scale factors for converting text sizes in points to another coordinate. Useful for properly spacing
text labels and such when you need to know sizes before the text is made (otherwise you can use textBoxSize).
Coordinate can be 'data', 'axes', or 'figure'.
If data coordinates are requested and the data is plotted on a log scale, then the factor will be given in dex.
"""
if ax_or_fig is None:
fig = plt.gcf()
ax = fig.gca()
else:
if isinstance(ax_or_fig, plt.Figure):
fig = ax_or_fig
ax = fig.gca()
elif isinstance(ax_or_fig, plt.Axes):
ax = ax_or_fig
fig = ax.get_figure()
else:
raise TypeError('ax_or_fig must be a Figure or Axes instance, if given.')
w_fig_in, h_fig_in = ax.get_figure().get_size_inches()
if coordinate == 'fig':
return 1.0/(w_fig_in*72), 1.0/(h_fig_in*72)
w_ax_norm, h_ax_norm = ax.get_position().size
w_ax_in = w_ax_norm * w_fig_in
h_ax_in = h_ax_norm * h_fig_in
w_ax_pts, h_ax_pts = w_ax_in*72, h_ax_in*72
if coordinate == 'axes':
return 1.0/w_ax_pts, 1.0/h_ax_pts
if coordinate == 'data':
xlim = ax.get_xlim()
ylim = ax.get_ylim()
if ax.get_xscale() == 'log': xlim = np.log10(xlim)
if ax.get_yscale() == 'log': ylim = np.log10(ylim)
w_ax_data = xlim[1] - xlim[0]
h_ax_data = ylim[1] - ylim[0]
return w_ax_data/w_ax_pts, h_ax_data/h_ax_pts
def tight_axis_limits(ax=None, xory='both', margin=0.05):
if ax is None: ax = plt.gca()
def newlim(oldlim):
delta = abs(oldlim[1] - oldlim[0])
pad = delta*margin
if oldlim[1] > oldlim[0]:
return (oldlim[0] - pad, oldlim[1] + pad)
else:
return (oldlim[0] + pad, oldlim[1] - pad)
def newlim_log(oldlim):
loglim = [np.log10(l) for l in oldlim]
newloglim = newlim(loglim)
return (10.0**newloglim[0], 10.0**newloglim[1])
def newlim_either(oldlim,axlim,scale):
if axlim[1] < axlim [0]: oldlim = oldlim[::-1]
if scale == 'linear':
return newlim(oldlim)
elif scale == 'log':
return newlim_log(oldlim)
elif scale == 'symlog':
raise NotImplementedError('Past Parke to future Parke, you did\'t write an implementation for symlog'
'scaled axes.')
if xory == 'x' or xory == 'both':
datalim = ax.dataLim.extents[[0,2]]
axlim = ax.get_xlim()
scale = ax.get_xscale()
ax.set_xlim(newlim_either(datalim,axlim,scale))
if xory == 'y' or xory == 'both':
datalim = ax.dataLim.extents[[1,3]]
axlim = ax.get_ylim()
scale = ax.get_yscale()
ax.set_ylim(newlim_either(datalim,axlim,scale))
#TODO: discard this function?
def standard_figure(app, slideAR=1.6, height=1.0):
"""Generate a figure of standard size for publishing.
implemented values for app (application) are:
'fullslide'
height is the fractional height of the figure relative to the "standard"
height. For slides the standard is the full height of a slide.
returns the figure object and default font size
"""
if app == 'fullslide':
fontsize = 20
figsize = [fullwidth, fullwidth/slideAR*height]
fig = mplot.pyplot.figure(figsize=figsize, dpi=dpi)
mplot.rcParams.update({'font.size': fontsize})
return fig, fontsize
def pcolor_reg(x, y, z, **kw):
"""
Similar to `pcolor`, but assume that the grid is uniform,
and do plotting with the (much faster) `imshow` function.
"""
x, y, z = np.asarray(x), np.asarray(y), np.asarray(z)
if x.ndim != 1 or y.ndim != 1:
raise ValueError("x and y should be 1-dimensional")
if z.ndim != 2 or z.shape != (y.size, x.size):
raise ValueError("z.shape should be (y.size, x.size)")
dx = np.diff(x)
dy = np.diff(y)
if not np.allclose(dx, dx[0], 1e-2) or not np.allclose(dy, dy[0], 1e-2):
raise ValueError("The grid must be uniform")
if np.issubdtype(z.dtype, np.complexfloating):
zp = np.zeros(z.shape, float)
zp[...] = z[...]
z = zp
plt.imshow(z, origin='lower',
extent=[x.min(), x.max(), y.min(), y.max()],
interpolation='nearest',
aspect='auto',
**kw)
plt.axis('tight')
def errorpoly(x, y, yerr, fmt=None, ecolor=None, ealpha=0.5, ax=None, **kw):
if ax is None: ax = plt.gca()
p = ax.plot(x, y, **kw) if fmt is None else ax.plot(x, y, fmt, **kw)
if len(yerr.shape) == 2:
ylo = y - yerr[0,:]
yhi = y + yerr[1,:]
else:
ylo, yhi = y - yerr, y + yerr
if ecolor is None: ecolor = p[0].get_color()
# deal with matplotlib sometimes not showing polygon when it extends beyond plot range
xlim = ax.get_xlim()
inrange = mnp.inranges(x, xlim)
if not np.all(inrange):
n = np.sum(inrange)
yends = np.interp(xlim, x, y)
yloends = np.interp(xlim, x, ylo)
yhiends = np.interp(xlim, x, yhi)
x = np.insert(x[inrange], [0, n], xlim)
y = np.insert(y[inrange], [0, n], yends)
ylo = np.insert(ylo[inrange], [0, n], yloends)
yhi = np.insert(yhi[inrange], [0, n], yhiends)
f = ax.fill_between(x,ylo,yhi,color=ecolor,alpha=ealpha)
return p[0],f
def onscreen_pres(mpl, screenwidth=1200):
"""
Set matplotlibrc values so that plots are readable as they are created
and maximized for an audience far from a screen.
Parameters
----------
mpl : module
Current matplotlib module. Use 'import matplotlib as mpl'.
screewidth : int
Width of the screen in question in pixels.
Returns
-------
None
"""
mpl.rcParams['lines.linewidth'] = 2
fontsize = round(14 / (800.0 / screenwidth))
mpl.rcParams['font.size'] = fontsize
def textBoxSize(txt, transformation=None, figure=None):
"""Get the width and height of a text object's bounding box transformed to the desired coordinates. Defaults to
figure coordinates if transformation is None."""
fig= txt.get_figure() if figure is None else figure
if transformation is None:
transformation = fig.transFigure
coordConvert = transformation.inverted().transform
bboxDisp = txt.get_window_extent(fig.canvas.renderer)
bboxConv = coordConvert(bboxDisp)
w = bboxConv[1,0] - bboxConv[0,0]
h = bboxConv[1,1] - bboxConv[0,1]
return w, h
def stars3d(ra, dec, dist, T=5000.0, r=1.0, labels='', view=None, size=(800,800), txt_scale=1.0):
"""
Make a 3D diagram of stars positions relative to the Sun, with
semi-accurate colors and distances as desired. Coordinates must be in
degrees. Distance is assumed to be in pc (for axes labels).
Meant to be used with only a handful of stars.
"""
from mayavi import mlab
from color.maps import true_temp
n = len(ra)
dec, ra = dec*np.pi/180.0, ra*np.pi/180.0
makearr = lambda v: np.array([v] * n) if np.isscalar(v) else v
T, r, labels = list(map(makearr, (T, r, labels)))
# add the sun
ra, dec, dist = list(map(np.append, (ra, dec, dist), (0.0, 0.0, 0.0)))
r, T, labels = list(map(np.append, (r, T, labels), (1.0, 5780.0, 'Sun')))
# get xyz coordinates
z = dist * np.sin(dec)
h = dist * np.cos(dec)
x = h * np.cos(ra)
y = h * np.sin(ra)
# make figure
fig = mlab.figure(bgcolor=(0,0,0), fgcolor=(1,1,1), size=size)
# plot lines down to the dec=0 plane for all but the sun
lines = []
for x1, y1, z1 in list(zip(x, y, z))[:-1]:
xx, yy, zz = [x1, x1], [y1, y1], [0.0, z1]
line = mlab.plot3d(xx, yy, zz, color=(0.7,0.7,0.7), line_width=0.5,
figure=fig)
lines.append(line)
# plot spheres
r_factor = np.max(dist) / 30.0
pts = mlab.quiver3d(x, y, z, r, r, r, scalars=T, mode='sphere',
scale_factor=r_factor, figure=fig, resolution=100)
pts.glyph.color_mode = 'color_by_scalar'
# center the glyphs on the data point
pts.glyph.glyph_source.glyph_source.center = [0, 0, 0]
# set a temperature colormap
cmap = true_temp(T)
pts.module_manager.scalar_lut_manager.lut.table = cmap
# set the camera view
mlab.view(focalpoint=(0.0, 0.0, 0.0), figure=fig)
if view is not None:
mlab.view(*view, figure=fig)
## add labels
# unit vec to camera
view = mlab.view()
az, el = view[:2]
hc = np.sin(el * np.pi / 180.0)
xc = hc * np.cos(az * np.pi / 180.0)
yc = hc * np.sin(az * np.pi / 180.0)
zc = -np.cos(el * np.pi / 180.0)
# unit vec orthoganal to camera
if xc**2 + yc**2 == 0.0:
xoff = 1.0
yoff = 0.0
zoff = 0.0
else:
xoff = yc / np.sqrt(xc**2 + yc**2)
yoff = np.sqrt(1.0 - xoff**2)
zoff = 0.0
# xoff, yoff, zoff = xc, yc, zc
# scale orthogonal vec by sphere size
r_label = 1.0 * r_factor
xoff, yoff, zoff = [r_label * v for v in [xoff, yoff, zoff]]
# plot labels
size = r_factor * txt_scale * 0.75
for xx, yy, zz, label in zip(x, y, z, labels):
mlab.text3d(xx + xoff, yy + yoff, zz + zoff, label, figure=fig,
color=(1,1,1), scale=size)
## add translucent dec=0 surface
n = 101
t = np.linspace(0.0, 2*np.pi, n)
r = np.max(dist * np.cos(dec))
x, y = r*np.cos(t), r*np.sin(t)
z = np.zeros(n+1)
x, y = [np.insert(a, 0, 0.0) for a in [x,y]]
triangles = [(0, i, i + 1) for i in range(1, n)]
mlab.triangular_mesh(x, y, z, triangles, color=(1,1,1), opacity=0.3, figure=fig)
## add ra=0 line
line = mlab.plot3d([0, r], [0, 0], [0, 0], color=(1,1,1), line_width=1, figure=fig)
rtxt = '{:.1f} pc'.format(r)
orientation=np.array([180.0, 180.0, 0.0])
mlab.text3d(r, 0, 0, rtxt, figure=fig, scale=size*1.25, orient_to_camera=False, orientation=orientation)
if view is not None:
mlab.view(*view, figure=fig)
return fig
| [
"numpy.log10",
"numpy.sqrt",
"numpy.array",
"numpy.sin",
"mypy.my_numpy.inranges",
"mayavi.mlab.view",
"numpy.isscalar",
"numpy.asarray",
"numpy.diff",
"numpy.max",
"numpy.issubdtype",
"numpy.linspace",
"mayavi.mlab.quiver3d",
"matplotlib.pyplot.axis",
"numpy.abs",
"numpy.allclose",
"mypy.my_numpy.lace",
"matplotlib.rcParams.update",
"matplotlib.pyplot.gcf",
"matplotlib.pyplot.gca",
"numpy.any",
"mayavi.mlab.text3d",
"numpy.cos",
"numpy.interp",
"numpy.nonzero",
"color.maps.true_temp",
"mayavi.mlab.triangular_mesh",
"numpy.insert",
"numpy.unique",
"mayavi.mlab.figure",
"numpy.sum",
"numpy.zeros",
"mayavi.mlab.plot3d",
"matplotlib.pyplot.figure",
"numpy.all"
] | [((1184, 1196), 'numpy.array', 'np.array', (['xy'], {}), '(xy)\n', (1192, 1196), True, 'import numpy as np\n'), ((1998, 2009), 'numpy.log10', 'np.log10', (['x'], {}), '(x)\n', (2006, 2009), True, 'import numpy as np\n'), ((2378, 2395), 'numpy.asarray', 'np.asarray', (['edges'], {}), '(edges)\n', (2388, 2395), True, 'import numpy as np\n'), ((2808, 2839), 'mypy.my_numpy.lace', 'mnp.lace', (['edges[:-1]', 'edges[1:]'], {}), '(edges[:-1], edges[1:])\n', (2816, 2839), True, 'import mypy.my_numpy as mnp\n'), ((2853, 2877), 'mypy.my_numpy.lace', 'mnp.lace', (['values', 'values'], {}), '(values, values)\n', (2861, 2877), True, 'import mypy.my_numpy as mnp\n'), ((7155, 7201), 'matplotlib.rcParams.update', 'mplot.rcParams.update', (["{'font.size': fontsize}"], {}), "({'font.size': fontsize})\n", (7176, 7201), True, 'import matplotlib as mplot\n'), ((7677, 7687), 'numpy.diff', 'np.diff', (['x'], {}), '(x)\n', (7684, 7687), True, 'import numpy as np\n'), ((7697, 7707), 'numpy.diff', 'np.diff', (['y'], {}), '(y)\n', (7704, 7707), True, 'import numpy as np\n'), ((7846, 7888), 'numpy.issubdtype', 'np.issubdtype', (['z.dtype', 'np.complexfloating'], {}), '(z.dtype, np.complexfloating)\n', (7859, 7888), True, 'import numpy as np\n'), ((8158, 8175), 'matplotlib.pyplot.axis', 'plt.axis', (['"""tight"""'], {}), "('tight')\n", (8166, 8175), True, 'import matplotlib.pyplot as plt\n'), ((8675, 8696), 'mypy.my_numpy.inranges', 'mnp.inranges', (['x', 'xlim'], {}), '(x, xlim)\n', (8687, 8696), True, 'import mypy.my_numpy as mnp\n'), ((11251, 11311), 'mayavi.mlab.figure', 'mlab.figure', ([], {'bgcolor': '(0, 0, 0)', 'fgcolor': '(1, 1, 1)', 'size': 'size'}), '(bgcolor=(0, 0, 0), fgcolor=(1, 1, 1), size=size)\n', (11262, 11311), False, 'from mayavi import mlab\n'), ((11686, 11799), 'mayavi.mlab.quiver3d', 'mlab.quiver3d', (['x', 'y', 'z', 'r', 'r', 'r'], {'scalars': 'T', 'mode': '"""sphere"""', 'scale_factor': 'r_factor', 'figure': 'fig', 'resolution': '(100)'}), "(x, y, z, r, r, r, scalars=T, mode='sphere', scale_factor=\n r_factor, figure=fig, resolution=100)\n", (11699, 11799), False, 'from mayavi import mlab\n'), ((12010, 12022), 'color.maps.true_temp', 'true_temp', (['T'], {}), '(T)\n', (12019, 12022), False, 'from color.maps import true_temp\n'), ((12113, 12162), 'mayavi.mlab.view', 'mlab.view', ([], {'focalpoint': '(0.0, 0.0, 0.0)', 'figure': 'fig'}), '(focalpoint=(0.0, 0.0, 0.0), figure=fig)\n', (12122, 12162), False, 'from mayavi import mlab\n'), ((12279, 12290), 'mayavi.mlab.view', 'mlab.view', ([], {}), '()\n', (12288, 12290), False, 'from mayavi import mlab\n'), ((12322, 12348), 'numpy.sin', 'np.sin', (['(el * np.pi / 180.0)'], {}), '(el * np.pi / 180.0)\n', (12328, 12348), True, 'import numpy as np\n'), ((13156, 13186), 'numpy.linspace', 'np.linspace', (['(0.0)', '(2 * np.pi)', 'n'], {}), '(0.0, 2 * np.pi, n)\n', (13167, 13186), True, 'import numpy as np\n'), ((13264, 13279), 'numpy.zeros', 'np.zeros', (['(n + 1)'], {}), '(n + 1)\n', (13272, 13279), True, 'import numpy as np\n'), ((13384, 13470), 'mayavi.mlab.triangular_mesh', 'mlab.triangular_mesh', (['x', 'y', 'z', 'triangles'], {'color': '(1, 1, 1)', 'opacity': '(0.3)', 'figure': 'fig'}), '(x, y, z, triangles, color=(1, 1, 1), opacity=0.3,\n figure=fig)\n', (13404, 13470), False, 'from mayavi import mlab\n'), ((13498, 13576), 'mayavi.mlab.plot3d', 'mlab.plot3d', (['[0, r]', '[0, 0]', '[0, 0]'], {'color': '(1, 1, 1)', 'line_width': '(1)', 'figure': 'fig'}), '([0, r], [0, 0], [0, 0], color=(1, 1, 1), line_width=1, figure=fig)\n', (13509, 13576), False, 'from mayavi import mlab\n'), ((13624, 13653), 'numpy.array', 'np.array', (['[180.0, 180.0, 0.0]'], {}), '([180.0, 180.0, 0.0])\n', (13632, 13653), True, 'import numpy as np\n'), ((13658, 13769), 'mayavi.mlab.text3d', 'mlab.text3d', (['r', '(0)', '(0)', 'rtxt'], {'figure': 'fig', 'scale': '(size * 1.25)', 'orient_to_camera': '(False)', 'orientation': 'orientation'}), '(r, 0, 0, rtxt, figure=fig, scale=size * 1.25, orient_to_camera=\n False, orientation=orientation)\n', (13669, 13769), False, 'from mayavi import mlab\n'), ((739, 748), 'matplotlib.pyplot.gcf', 'plt.gcf', ([], {}), '()\n', (746, 748), True, 'import matplotlib.pyplot as plt\n'), ((2431, 2467), 'numpy.any', 'np.any', (['(edges[1:, 0] < edges[:-1, 1])'], {}), '(edges[1:, 0] < edges[:-1, 1])\n', (2437, 2467), True, 'import numpy as np\n'), ((2528, 2564), 'numpy.any', 'np.any', (['(edges[1:, 0] < edges[:-1, 0])'], {}), '(edges[1:, 0] < edges[:-1, 0])\n', (2534, 2564), True, 'import numpy as np\n'), ((2688, 2704), 'numpy.unique', 'np.unique', (['edges'], {}), '(edges)\n', (2697, 2704), True, 'import numpy as np\n'), ((2716, 2728), 'numpy.any', 'np.any', (['gaps'], {}), '(gaps)\n', (2722, 2728), True, 'import numpy as np\n'), ((2963, 2972), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (2970, 2972), True, 'import matplotlib.pyplot as plt\n'), ((4159, 4168), 'matplotlib.pyplot.gcf', 'plt.gcf', ([], {}), '()\n', (4166, 4168), True, 'import matplotlib.pyplot as plt\n'), ((5312, 5321), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (5319, 5321), True, 'import matplotlib.pyplot as plt\n'), ((7104, 7149), 'matplotlib.pyplot.figure', 'mplot.pyplot.figure', ([], {'figsize': 'figsize', 'dpi': 'dpi'}), '(figsize=figsize, dpi=dpi)\n', (7123, 7149), True, 'import matplotlib as mplot\n'), ((7415, 7428), 'numpy.asarray', 'np.asarray', (['x'], {}), '(x)\n', (7425, 7428), True, 'import numpy as np\n'), ((7430, 7443), 'numpy.asarray', 'np.asarray', (['y'], {}), '(y)\n', (7440, 7443), True, 'import numpy as np\n'), ((7445, 7458), 'numpy.asarray', 'np.asarray', (['z'], {}), '(z)\n', (7455, 7458), True, 'import numpy as np\n'), ((7903, 7927), 'numpy.zeros', 'np.zeros', (['z.shape', 'float'], {}), '(z.shape, float)\n', (7911, 7927), True, 'import numpy as np\n'), ((8279, 8288), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (8286, 8288), True, 'import matplotlib.pyplot as plt\n'), ((8708, 8723), 'numpy.all', 'np.all', (['inrange'], {}), '(inrange)\n', (8714, 8723), True, 'import numpy as np\n'), ((8737, 8752), 'numpy.sum', 'np.sum', (['inrange'], {}), '(inrange)\n', (8743, 8752), True, 'import numpy as np\n'), ((8769, 8790), 'numpy.interp', 'np.interp', (['xlim', 'x', 'y'], {}), '(xlim, x, y)\n', (8778, 8790), True, 'import numpy as np\n'), ((8809, 8832), 'numpy.interp', 'np.interp', (['xlim', 'x', 'ylo'], {}), '(xlim, x, ylo)\n', (8818, 8832), True, 'import numpy as np\n'), ((8851, 8874), 'numpy.interp', 'np.interp', (['xlim', 'x', 'yhi'], {}), '(xlim, x, yhi)\n', (8860, 8874), True, 'import numpy as np\n'), ((8887, 8922), 'numpy.insert', 'np.insert', (['x[inrange]', '[0, n]', 'xlim'], {}), '(x[inrange], [0, n], xlim)\n', (8896, 8922), True, 'import numpy as np\n'), ((8935, 8971), 'numpy.insert', 'np.insert', (['y[inrange]', '[0, n]', 'yends'], {}), '(y[inrange], [0, n], yends)\n', (8944, 8971), True, 'import numpy as np\n'), ((8986, 9026), 'numpy.insert', 'np.insert', (['ylo[inrange]', '[0, n]', 'yloends'], {}), '(ylo[inrange], [0, n], yloends)\n', (8995, 9026), True, 'import numpy as np\n'), ((9041, 9081), 'numpy.insert', 'np.insert', (['yhi[inrange]', '[0, n]', 'yhiends'], {}), '(yhi[inrange], [0, n], yhiends)\n', (9050, 9081), True, 'import numpy as np\n'), ((11137, 11148), 'numpy.sin', 'np.sin', (['dec'], {}), '(dec)\n', (11143, 11148), True, 'import numpy as np\n'), ((11164, 11175), 'numpy.cos', 'np.cos', (['dec'], {}), '(dec)\n', (11170, 11175), True, 'import numpy as np\n'), ((11188, 11198), 'numpy.cos', 'np.cos', (['ra'], {}), '(ra)\n', (11194, 11198), True, 'import numpy as np\n'), ((11211, 11221), 'numpy.sin', 'np.sin', (['ra'], {}), '(ra)\n', (11217, 11221), True, 'import numpy as np\n'), ((11496, 11570), 'mayavi.mlab.plot3d', 'mlab.plot3d', (['xx', 'yy', 'zz'], {'color': '(0.7, 0.7, 0.7)', 'line_width': '(0.5)', 'figure': 'fig'}), '(xx, yy, zz, color=(0.7, 0.7, 0.7), line_width=0.5, figure=fig)\n', (11507, 11570), False, 'from mayavi import mlab\n'), ((11656, 11668), 'numpy.max', 'np.max', (['dist'], {}), '(dist)\n', (11662, 11668), True, 'import numpy as np\n'), ((12195, 12223), 'mayavi.mlab.view', 'mlab.view', (['*view'], {'figure': 'fig'}), '(*view, figure=fig)\n', (12204, 12223), False, 'from mayavi import mlab\n'), ((12363, 12389), 'numpy.cos', 'np.cos', (['(az * np.pi / 180.0)'], {}), '(az * np.pi / 180.0)\n', (12369, 12389), True, 'import numpy as np\n'), ((12404, 12430), 'numpy.sin', 'np.sin', (['(az * np.pi / 180.0)'], {}), '(az * np.pi / 180.0)\n', (12410, 12430), True, 'import numpy as np\n'), ((12441, 12467), 'numpy.cos', 'np.cos', (['(el * np.pi / 180.0)'], {}), '(el * np.pi / 180.0)\n', (12447, 12467), True, 'import numpy as np\n'), ((12654, 12678), 'numpy.sqrt', 'np.sqrt', (['(1.0 - xoff ** 2)'], {}), '(1.0 - xoff ** 2)\n', (12661, 12678), True, 'import numpy as np\n'), ((12988, 13084), 'mayavi.mlab.text3d', 'mlab.text3d', (['(xx + xoff)', '(yy + yoff)', '(zz + zoff)', 'label'], {'figure': 'fig', 'color': '(1, 1, 1)', 'scale': 'size'}), '(xx + xoff, yy + yoff, zz + zoff, label, figure=fig, color=(1, 1,\n 1), scale=size)\n', (12999, 13084), False, 'from mayavi import mlab\n'), ((13290, 13310), 'numpy.insert', 'np.insert', (['a', '(0)', '(0.0)'], {}), '(a, 0, 0.0)\n', (13299, 13310), True, 'import numpy as np\n'), ((13796, 13824), 'mayavi.mlab.view', 'mlab.view', (['*view'], {'figure': 'fig'}), '(*view, figure=fig)\n', (13805, 13824), False, 'from mayavi import mlab\n'), ((2167, 2187), 'numpy.log10', 'np.log10', (['(x + errpos)'], {}), '(x + errpos)\n', (2175, 2187), True, 'import numpy as np\n'), ((3136, 3147), 'numpy.log10', 'np.log10', (['x'], {}), '(x)\n', (3144, 3147), True, 'import numpy as np\n'), ((3149, 3160), 'numpy.log10', 'np.log10', (['y'], {}), '(y)\n', (3157, 3160), True, 'import numpy as np\n'), ((3452, 3470), 'numpy.insert', 'np.insert', (['d', '(0)', '(0)'], {}), '(d, 0, 0)\n', (3461, 3470), True, 'import numpy as np\n'), ((3628, 3649), 'numpy.interp', 'np.interp', (['xlbl', 'x', 'y'], {}), '(xlbl, x, y)\n', (3637, 3649), True, 'import numpy as np\n'), ((5024, 5038), 'numpy.log10', 'np.log10', (['xlim'], {}), '(xlim)\n', (5032, 5038), True, 'import numpy as np\n'), ((5083, 5097), 'numpy.log10', 'np.log10', (['ylim'], {}), '(ylim)\n', (5091, 5097), True, 'import numpy as np\n'), ((5620, 5631), 'numpy.log10', 'np.log10', (['l'], {}), '(l)\n', (5628, 5631), True, 'import numpy as np\n'), ((7719, 7747), 'numpy.allclose', 'np.allclose', (['dx', 'dx[0]', '(0.01)'], {}), '(dx, dx[0], 0.01)\n', (7730, 7747), True, 'import numpy as np\n'), ((7755, 7783), 'numpy.allclose', 'np.allclose', (['dy', 'dy[0]', '(0.01)'], {}), '(dy, dy[0], 0.01)\n', (7766, 7783), True, 'import numpy as np\n'), ((10847, 10861), 'numpy.isscalar', 'np.isscalar', (['v'], {}), '(v)\n', (10858, 10861), True, 'import numpy as np\n'), ((10826, 10843), 'numpy.array', 'np.array', (['([v] * n)'], {}), '([v] * n)\n', (10834, 10843), True, 'import numpy as np\n'), ((12617, 12643), 'numpy.sqrt', 'np.sqrt', (['(xc ** 2 + yc ** 2)'], {}), '(xc ** 2 + yc ** 2)\n', (12624, 12643), True, 'import numpy as np\n'), ((13207, 13218), 'numpy.cos', 'np.cos', (['dec'], {}), '(dec)\n', (13213, 13218), True, 'import numpy as np\n'), ((13233, 13242), 'numpy.cos', 'np.cos', (['t'], {}), '(t)\n', (13239, 13242), True, 'import numpy as np\n'), ((13246, 13255), 'numpy.sin', 'np.sin', (['t'], {}), '(t)\n', (13252, 13255), True, 'import numpy as np\n'), ((2769, 2785), 'numpy.nonzero', 'np.nonzero', (['gaps'], {}), '(gaps)\n', (2779, 2785), True, 'import numpy as np\n'), ((3517, 3539), 'numpy.interp', 'np.interp', (['xfrac', 'f', 'a'], {}), '(xfrac, f, a)\n', (3526, 3539), True, 'import numpy as np\n'), ((1794, 1808), 'numpy.abs', 'np.abs', (['errneg'], {}), '(errneg)\n', (1800, 1808), True, 'import numpy as np\n'), ((2085, 2099), 'numpy.abs', 'np.abs', (['errneg'], {}), '(errneg)\n', (2091, 2099), True, 'import numpy as np\n'), ((3404, 3414), 'numpy.diff', 'np.diff', (['x'], {}), '(x)\n', (3411, 3414), True, 'import numpy as np\n'), ((3420, 3430), 'numpy.diff', 'np.diff', (['y'], {}), '(y)\n', (3427, 3430), True, 'import numpy as np\n')] |
from django.shortcuts import render,redirect
from django.http import HttpResponse,HttpResponseRedirect
from django.views import generic
from django.contrib.auth.decorators import login_required
from django.utils.decorators import method_decorator
from .models import Character,Comic,Power,CharacterPower,CharacterComic
from django_filters.views import FilterView
from .filters import Marvel_worldFilter,Marvel_comicFilter
from .forms import CharacterForm,PowerForm,ComicForm
from django.urls import reverse,reverse_lazy
def index(request):
return HttpResponse("Hello, world. You're at the marvel world super hero")
class AboutPageView(generic.TemplateView):
template_name = 'marvel_world/about.html'
class HomePageView(generic.TemplateView):
template_name = 'marvel_world/home.html'
@method_decorator(login_required, name='dispatch')
class CharacterListView(generic.ListView):
model = Character
context_object_name = 'characters'
template_name = 'marvel_world/characters.html'
paginate_by = 50
def get_queryset(self):
return Character.objects.all().select_related('alignment','eye_color','skin_color','hair_color','race','gender','publisher').order_by('character_name')
@method_decorator(login_required, name='dispatch')
class CharacterDetailView(generic.DetailView):
model = Character
context_object_name= 'character'
template_name = 'marvel_world/character_information.html'
@method_decorator(login_required, name='dispatch')
class ComicListView(generic.ListView):
model = Comic
context_object_name = 'comics'
template_name = 'marvel_world/comics.html'
paginate_by = 600
def get_queryset(self):
return Comic.objects.all().order_by('comic_name')
@method_decorator(login_required, name='dispatch')
class ComicDetailView(generic.DetailView):
model = Comic
context_object_name= 'comic'
template_name = 'marvel_world/comic_information.html'
@method_decorator(login_required, name='dispatch')
class PowerListView(generic.ListView):
model = Power
context_object_name = 'powers'
template_name = 'marvel_world/super_power.html'
paginate_by = 50
def get_queryset(self):
return Power.objects.all().order_by('power_name')
@method_decorator(login_required, name='dispatch')
class PowerDetailView(generic.DetailView):
model = Power
context_object_name= 'power'
template_name = 'marvel_world/super_power_information.html'
@method_decorator(login_required, name='dispatch')
class CharacterFilterView(FilterView):
filterset_class = Marvel_worldFilter
template_name = 'marvel_world/character_filter.html'
@method_decorator(login_required, name='dispatch')
class ComicFilterView(FilterView):
filterset_class = Marvel_comicFilter
template_name = 'marvel_world/comic_filter.html'
@method_decorator(login_required, name='dispatch')
class CharacterCreateView(generic.View):
model = Character
form_class = CharacterForm
success_message = "Character created successfully"
template_name = 'marvel_world/character_new.html'
# fields = '__all__' <-- superseded by form_class
# success_url = reverse_lazy('heritagesites/site_list')
def dispatch(self, *args, **kwargs):
return super().dispatch(*args, **kwargs)
def post(self, request):
form = CharacterForm(request.POST)
if form.is_valid():
character = form.save(commit=False)
character.save()
for power in form.cleaned_data['super_power']:
CharacterPower.objects.create(character=character, power=power)
for comic in form.cleaned_data['comics']:
CharacterComic.objects.create(character=character, comic=comic)
return redirect(character) # shortcut to object's get_absolute_url()
# return HttpResponseRedirect(site.get_absolute_url())
return render(request, 'marvel_world/character_new.html', {'form': form})
def get(self, request):
form = CharacterForm()
return render(request, 'marvel_world/character_new.html', {'form': form})
@method_decorator(login_required, name='dispatch')
class PowerCreateView(generic.View):
model = Power
form_class = PowerForm
success_message = "Super power created successfully"
template_name = 'marvel_world/power_new.html'
# fields = '__all__' <-- superseded by form_class
# success_url = reverse_lazy('heritagesites/site_list')
def dispatch(self, *args, **kwargs):
return super().dispatch(*args, **kwargs)
def post(self, request):
form = PowerForm(request.POST)
if form.is_valid():
power = form.save(commit=False)
power.save()
for character in form.cleaned_data['character']:
CharacterPower.objects.create(character=character, power=power)
return redirect(power) # shortcut to object's get_absolute_url()
# return HttpResponseRedirect(site.get_absolute_url())
return render(request, 'marvel_world/power_new.html', {'form': form})
def get(self, request):
form = PowerForm()
return render(request, 'marvel_world/power_new.html', {'form': form})
@method_decorator(login_required, name='dispatch')
class ComicCreateView(generic.View):
model = Comic
form_class = ComicForm
success_message = "Comic created successfully"
template_name = 'marvel_world/comic_new.html'
# fields = '__all__' <-- superseded by form_class
# success_url = reverse_lazy('heritagesites/site_list')
def dispatch(self, *args, **kwargs):
return super().dispatch(*args, **kwargs)
def post(self, request):
form = ComicForm(request.POST)
if form.is_valid():
comic = form.save(commit=False)
comic.save()
for character in form.cleaned_data['character']:
CharacterComic.objects.create(character=character, comic=comic)
return redirect(comic) # shortcut to object's get_absolute_url()
# return HttpResponseRedirect(site.get_absolute_url())
return render(request, 'marvel_world/comic_new.html', {'form': form})
def get(self, request):
form = ComicForm()
return render(request, 'marvel_world/comic_new.html', {'form': form})
#class CharacterDetailView(generic.DetailView):model = Characters context_object_name= 'character'template_name='marvel_world/character_information.html'
@method_decorator(login_required, name='dispatch')
class CharacterUpdateView(generic.UpdateView):
model = Character
form_class = CharacterForm
# fields = '__all__' <-- superseded by form_class
context_object_name = 'character'
# pk_url_kwarg = 'site_pk'
success_message = "Character updated successfully"
template_name = 'marvel_world/character_update.html'
def dispatch(self, *args, **kwargs):
return super().dispatch(*args, **kwargs)
def form_valid(self, form):
character = form.save(commit=False)
# site.updated_by = self.request.user
# site.date_updated = timezone.now()
character.save()
# Current country_area_id values linked to site
old_ids = CharacterPower.objects\
.values_list('power_id', flat=True)\
.filter(character_id=character.character_id)
# New countries list
new_powers = form.cleaned_data['super_power']
# TODO can these loops be refactored?
# New ids
new_ids = []
# Insert new unmatched country entries
for power in new_powers:
new_id = power.power_id
new_ids.append(new_id)
if new_id in old_ids:
continue
else:
CharacterPower.objects \
.create(character=character, power=power)
# Delete old unmatched country entries
for old_id in old_ids:
if old_id in new_ids:
continue
else:
CharacterPower.objects \
.filter(character_id=character.character_id, power_id=old_id) \
.delete()
old_ids1 = CharacterComic.objects\
.values_list('comic_id', flat=True)\
.filter(character_id=character.character_id)
# New countries list
new_comics = form.cleaned_data['comics']
# TODO can these loops be refactored?
# New ids
new_ids1 = []
# Insert new unmatched country entries
for comic in new_comics:
new_id1 = comic.comic_id
new_ids1.append(new_id1)
if new_id1 in old_ids1:
continue
else:
CharacterComic.objects \
.create(character=character, comic=comic)
# Delete old unmatched country entries
for old_id1 in old_ids1:
if old_id1 in new_ids1:
continue
else:
CharacterComic.objects \
.filter(character_id=character.character_id, comic_id=old_id1) \
.delete()
return HttpResponseRedirect(character.get_absolute_url())
@method_decorator(login_required, name='dispatch')
class PowerUpdateView(generic.UpdateView):
model = Power
form_class = PowerForm
# fields = '__all__' <-- superseded by form_class
context_object_name = 'power'
# pk_url_kwarg = 'site_pk'
success_message = "Super power updated successfully"
template_name = 'marvel_world/power_update.html'
def dispatch(self, *args, **kwargs):
return super().dispatch(*args, **kwargs)
def form_valid(self, form):
power = form.save(commit=False)
# site.updated_by = self.request.user
# site.date_updated = timezone.now()
power.save()
# Current country_area_id values linked to site
old_ids = CharacterPower.objects\
.values_list('character_id', flat=True)\
.filter(power_id=power.power_id)
# New countries list
new_chs = form.cleaned_data['character']
# TODO can these loops be refactored?
# New ids
new_ids = []
# Insert new unmatched country entries
for character in new_chs:
new_id = character.character_id
new_ids.append(new_id)
if new_id in old_ids:
continue
else:
CharacterPower.objects \
.create(character=character, power=power)
# Delete old unmatched country entries
for old_id in old_ids:
if old_id in new_ids:
continue
else:
CharacterPower.objects \
.filter(character_id=old_id, power_id=power.power_id) \
.delete()
return HttpResponseRedirect(power.get_absolute_url())
# return redirect('heritagesites/site_detail', pk=site.pk)
@method_decorator(login_required, name='dispatch')
class ComicUpdateView(generic.UpdateView):
model = Comic
form_class = ComicForm
# fields = '__all__' <-- superseded by form_class
context_object_name = 'comic'
# pk_url_kwarg = 'site_pk'
success_message = "Comic updated successfully"
template_name = 'marvel_world/comic_update.html'
def dispatch(self, *args, **kwargs):
return super().dispatch(*args, **kwargs)
def form_valid(self, form):
comic = form.save(commit=False)
# site.updated_by = self.request.user
# site.date_updated = timezone.now()
comic.save()
# Current country_area_id values linked to site
old_ids = CharacterComic.objects\
.values_list('character_id', flat=True)\
.filter(comic_id=comic.comic_id)
# New countries list
new_chs = form.cleaned_data['character']
# TODO can these loops be refactored?
# New ids
new_ids = []
# Insert new unmatched country entries
for character in new_chs:
new_id = character.character_id
new_ids.append(new_id)
if new_id in old_ids:
continue
else:
CharacterComic.objects \
.create(character=character, comic=comic)
# Delete old unmatched country entries
for old_id in old_ids:
if old_id in new_ids:
continue
else:
CharacterComic.objects \
.filter(character_id=old_id, comic_id=comic.comic_id) \
.delete()
return HttpResponseRedirect(comic.get_absolute_url())
@method_decorator(login_required, name='dispatch')
class CharacterDeleteView(generic.DeleteView):
model =Character
success_message = "Character deleted successfully"
success_url = reverse_lazy('characters')
context_object_name = 'character'
template_name = 'marvel_world/character_delete.html'
def dispatch(self, *args, **kwargs):
return super().dispatch(*args, **kwargs)
def delete(self, request, *args, **kwargs):
self.object = self.get_object()
# Delete HeritageSiteJurisdiction entries
CharacterPower.objects \
.filter(character_id=self.object.character_id) \
.delete()
CharacterComic.objects \
.filter(character_id=self.object.character_id) \
.delete()
self.object.delete()
return HttpResponseRedirect(self.get_success_url())
@method_decorator(login_required, name='dispatch')
class PowerDeleteView(generic.DeleteView):
model =Power
success_message = "Super power deleted successfully"
success_url = reverse_lazy('super_power')
context_object_name = 'power'
template_name = 'marvel_world/power_delete.html'
def dispatch(self, *args, **kwargs):
return super().dispatch(*args, **kwargs)
def delete(self, request, *args, **kwargs):
self.object = self.get_object()
# Delete HeritageSiteJurisdiction entries
CharacterPower.objects \
.filter(power_id=self.object.power_id) \
.delete()
self.object.delete()
return HttpResponseRedirect(self.get_success_url())
@method_decorator(login_required, name='dispatch')
class ComicDeleteView(generic.DeleteView):
model =Comic
success_message = "Comic deleted successfully"
success_url = reverse_lazy('comics')
context_object_name = 'comic'
template_name = 'marvel_world/comic_delete.html'
def dispatch(self, *args, **kwargs):
return super().dispatch(*args, **kwargs)
def delete(self, request, *args, **kwargs):
self.object = self.get_object()
# Delete HeritageSiteJurisdiction entries
CharacterComic.objects \
.filter(comic_id=self.object.comic_id) \
.delete()
self.object.delete()
return HttpResponseRedirect(self.get_success_url()) | [
"django.shortcuts.render",
"django.http.HttpResponse",
"django.utils.decorators.method_decorator",
"django.shortcuts.redirect",
"django.urls.reverse_lazy"
] | [((792, 841), 'django.utils.decorators.method_decorator', 'method_decorator', (['login_required'], {'name': '"""dispatch"""'}), "(login_required, name='dispatch')\n", (808, 841), False, 'from django.utils.decorators import method_decorator\n'), ((1187, 1236), 'django.utils.decorators.method_decorator', 'method_decorator', (['login_required'], {'name': '"""dispatch"""'}), "(login_required, name='dispatch')\n", (1203, 1236), False, 'from django.utils.decorators import method_decorator\n'), ((1407, 1456), 'django.utils.decorators.method_decorator', 'method_decorator', (['login_required'], {'name': '"""dispatch"""'}), "(login_required, name='dispatch')\n", (1423, 1456), False, 'from django.utils.decorators import method_decorator\n'), ((1685, 1734), 'django.utils.decorators.method_decorator', 'method_decorator', (['login_required'], {'name': '"""dispatch"""'}), "(login_required, name='dispatch')\n", (1701, 1734), False, 'from django.utils.decorators import method_decorator\n'), ((1889, 1938), 'django.utils.decorators.method_decorator', 'method_decorator', (['login_required'], {'name': '"""dispatch"""'}), "(login_required, name='dispatch')\n", (1905, 1938), False, 'from django.utils.decorators import method_decorator\n'), ((2171, 2220), 'django.utils.decorators.method_decorator', 'method_decorator', (['login_required'], {'name': '"""dispatch"""'}), "(login_required, name='dispatch')\n", (2187, 2220), False, 'from django.utils.decorators import method_decorator\n'), ((2381, 2430), 'django.utils.decorators.method_decorator', 'method_decorator', (['login_required'], {'name': '"""dispatch"""'}), "(login_required, name='dispatch')\n", (2397, 2430), False, 'from django.utils.decorators import method_decorator\n'), ((2563, 2612), 'django.utils.decorators.method_decorator', 'method_decorator', (['login_required'], {'name': '"""dispatch"""'}), "(login_required, name='dispatch')\n", (2579, 2612), False, 'from django.utils.decorators import method_decorator\n'), ((2737, 2786), 'django.utils.decorators.method_decorator', 'method_decorator', (['login_required'], {'name': '"""dispatch"""'}), "(login_required, name='dispatch')\n", (2753, 2786), False, 'from django.utils.decorators import method_decorator\n'), ((3878, 3927), 'django.utils.decorators.method_decorator', 'method_decorator', (['login_required'], {'name': '"""dispatch"""'}), "(login_required, name='dispatch')\n", (3894, 3927), False, 'from django.utils.decorators import method_decorator\n'), ((4866, 4915), 'django.utils.decorators.method_decorator', 'method_decorator', (['login_required'], {'name': '"""dispatch"""'}), "(login_required, name='dispatch')\n", (4882, 4915), False, 'from django.utils.decorators import method_decorator\n'), ((6004, 6053), 'django.utils.decorators.method_decorator', 'method_decorator', (['login_required'], {'name': '"""dispatch"""'}), "(login_required, name='dispatch')\n", (6020, 6053), False, 'from django.utils.decorators import method_decorator\n'), ((8212, 8261), 'django.utils.decorators.method_decorator', 'method_decorator', (['login_required'], {'name': '"""dispatch"""'}), "(login_required, name='dispatch')\n", (8228, 8261), False, 'from django.utils.decorators import method_decorator\n'), ((9698, 9747), 'django.utils.decorators.method_decorator', 'method_decorator', (['login_required'], {'name': '"""dispatch"""'}), "(login_required, name='dispatch')\n", (9714, 9747), False, 'from django.utils.decorators import method_decorator\n'), ((11117, 11166), 'django.utils.decorators.method_decorator', 'method_decorator', (['login_required'], {'name': '"""dispatch"""'}), "(login_required, name='dispatch')\n", (11133, 11166), False, 'from django.utils.decorators import method_decorator\n'), ((11886, 11935), 'django.utils.decorators.method_decorator', 'method_decorator', (['login_required'], {'name': '"""dispatch"""'}), "(login_required, name='dispatch')\n", (11902, 11935), False, 'from django.utils.decorators import method_decorator\n'), ((12542, 12591), 'django.utils.decorators.method_decorator', 'method_decorator', (['login_required'], {'name': '"""dispatch"""'}), "(login_required, name='dispatch')\n", (12558, 12591), False, 'from django.utils.decorators import method_decorator\n'), ((548, 615), 'django.http.HttpResponse', 'HttpResponse', (['"""Hello, world. You\'re at the marvel world super hero"""'], {}), '("Hello, world. You\'re at the marvel world super hero")\n', (560, 615), False, 'from django.http import HttpResponse, HttpResponseRedirect\n'), ((11299, 11325), 'django.urls.reverse_lazy', 'reverse_lazy', (['"""characters"""'], {}), "('characters')\n", (11311, 11325), False, 'from django.urls import reverse, reverse_lazy\n'), ((12062, 12089), 'django.urls.reverse_lazy', 'reverse_lazy', (['"""super_power"""'], {}), "('super_power')\n", (12074, 12089), False, 'from django.urls import reverse, reverse_lazy\n'), ((12712, 12734), 'django.urls.reverse_lazy', 'reverse_lazy', (['"""comics"""'], {}), "('comics')\n", (12724, 12734), False, 'from django.urls import reverse, reverse_lazy\n'), ((3683, 3749), 'django.shortcuts.render', 'render', (['request', '"""marvel_world/character_new.html"""', "{'form': form}"], {}), "(request, 'marvel_world/character_new.html', {'form': form})\n", (3689, 3749), False, 'from django.shortcuts import render, redirect\n'), ((3810, 3876), 'django.shortcuts.render', 'render', (['request', '"""marvel_world/character_new.html"""', "{'form': form}"], {}), "(request, 'marvel_world/character_new.html', {'form': form})\n", (3816, 3876), False, 'from django.shortcuts import render, redirect\n'), ((4683, 4745), 'django.shortcuts.render', 'render', (['request', '"""marvel_world/power_new.html"""', "{'form': form}"], {}), "(request, 'marvel_world/power_new.html', {'form': form})\n", (4689, 4745), False, 'from django.shortcuts import render, redirect\n'), ((4802, 4864), 'django.shortcuts.render', 'render', (['request', '"""marvel_world/power_new.html"""', "{'form': form}"], {}), "(request, 'marvel_world/power_new.html', {'form': form})\n", (4808, 4864), False, 'from django.shortcuts import render, redirect\n'), ((5665, 5727), 'django.shortcuts.render', 'render', (['request', '"""marvel_world/comic_new.html"""', "{'form': form}"], {}), "(request, 'marvel_world/comic_new.html', {'form': form})\n", (5671, 5727), False, 'from django.shortcuts import render, redirect\n'), ((5784, 5846), 'django.shortcuts.render', 'render', (['request', '"""marvel_world/comic_new.html"""', "{'form': form}"], {}), "(request, 'marvel_world/comic_new.html', {'form': form})\n", (5790, 5846), False, 'from django.shortcuts import render, redirect\n'), ((3554, 3573), 'django.shortcuts.redirect', 'redirect', (['character'], {}), '(character)\n', (3562, 3573), False, 'from django.shortcuts import render, redirect\n'), ((4558, 4573), 'django.shortcuts.redirect', 'redirect', (['power'], {}), '(power)\n', (4566, 4573), False, 'from django.shortcuts import render, redirect\n'), ((5540, 5555), 'django.shortcuts.redirect', 'redirect', (['comic'], {}), '(comic)\n', (5548, 5555), False, 'from django.shortcuts import render, redirect\n')] |
"""Set-up and execute the main loop"""
import RPi.GPIO as GPIO
import time
GPIO.setmode(GPIO.BCM)
GPIO.setwarnings(False)
#Right motor input A
GPIO.setup(18,GPIO.OUT)
#Right motor input B
GPIO.setup(23,GPIO.OUT)
GPIO.output(18,GPIO.HIGH)
GPIO.output(23,GPIO.LOW)
| [
"RPi.GPIO.setup",
"RPi.GPIO.setwarnings",
"RPi.GPIO.output",
"RPi.GPIO.setmode"
] | [((76, 98), 'RPi.GPIO.setmode', 'GPIO.setmode', (['GPIO.BCM'], {}), '(GPIO.BCM)\n', (88, 98), True, 'import RPi.GPIO as GPIO\n'), ((99, 122), 'RPi.GPIO.setwarnings', 'GPIO.setwarnings', (['(False)'], {}), '(False)\n', (115, 122), True, 'import RPi.GPIO as GPIO\n'), ((145, 169), 'RPi.GPIO.setup', 'GPIO.setup', (['(18)', 'GPIO.OUT'], {}), '(18, GPIO.OUT)\n', (155, 169), True, 'import RPi.GPIO as GPIO\n'), ((190, 214), 'RPi.GPIO.setup', 'GPIO.setup', (['(23)', 'GPIO.OUT'], {}), '(23, GPIO.OUT)\n', (200, 214), True, 'import RPi.GPIO as GPIO\n'), ((215, 241), 'RPi.GPIO.output', 'GPIO.output', (['(18)', 'GPIO.HIGH'], {}), '(18, GPIO.HIGH)\n', (226, 241), True, 'import RPi.GPIO as GPIO\n'), ((241, 266), 'RPi.GPIO.output', 'GPIO.output', (['(23)', 'GPIO.LOW'], {}), '(23, GPIO.LOW)\n', (252, 266), True, 'import RPi.GPIO as GPIO\n')] |
import pymysql
# 连接配置信息
config = {
'host': '127.0.0.1',
'port': 3306,
'user': 'root',
'password': '',
'db': 'classdata',
'charset': 'utf8',
'cursorclass': pymysql.cursors.DictCursor,
}
def get_summary_db(unitag):
# 创建连接
conn = pymysql.connect(**config)
cur = conn.cursor()
# 执行sql语句
try:
# 执行sql语句,进行查询
sql = 'SELECT * FROM summary where unitag= %s'
cur.execute(sql,unitag)
# 获取查询结果
result = cur.fetchall()
return result
finally:
cur.close()
conn.close()
def get_result_db(unitag):
# 创建连接
conn = pymysql.connect(**config)
cur = conn.cursor()
# 执行sql语句
try:
# 执行sql语句,进行查询
sql = 'SELECT * FROM result where unitag= %s'
cur.execute(sql,unitag)
# 获取查询结果
result = cur.fetchall()
return result
finally:
cur.close()
conn.close()
| [
"pymysql.connect"
] | [((266, 291), 'pymysql.connect', 'pymysql.connect', ([], {}), '(**config)\n', (281, 291), False, 'import pymysql\n'), ((624, 649), 'pymysql.connect', 'pymysql.connect', ([], {}), '(**config)\n', (639, 649), False, 'import pymysql\n')] |
# Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import importlib
import os
import sys
import tempfile
from pyiree.tf import compiler
# Dynamically import tensorflow.
try:
# Use a dynamic import so as to avoid hermetic dependency analysis
# (i.e. we only want the tensorflow from the environment).
tf = importlib.import_module("tensorflow")
# Just in case if linked against a pre-V2 defaulted version.
if hasattr(tf, "enable_v2_behavior"):
tf.enable_v2_behavior()
tf = tf.compat.v2
except ImportError:
print("Not running tests because tensorflow is not available")
sys.exit(0)
class StatelessModule(tf.Module):
def __init__(self):
pass
@tf.function(input_signature=[
tf.TensorSpec([4], tf.float32),
tf.TensorSpec([4], tf.float32)
])
def add(self, a, b):
return tf.tanh(a + b)
class RuntimeTest(tf.test.TestCase):
def testLoadSavedModelToXlaPipeline(self):
"""Tests that a basic saved model to XLA workflow grossly functions.
This is largely here to verify that everything is linked in that needs to be
and that there are not no-ops, etc.
"""
with tempfile.TemporaryDirectory() as temp_dir:
sm_dir = os.path.join(temp_dir, "simple.sm")
print("Saving to:", sm_dir)
my_module = StatelessModule()
options = tf.saved_model.SaveOptions(save_debug_info=True)
tf.saved_model.save(my_module, sm_dir, options=options)
# Load it up.
input_module = compiler.tf_load_saved_model(sm_dir)
xla_asm = input_module.to_asm()
print("XLA ASM:", xla_asm)
self.assertRegex(xla_asm, "mhlo.tanh")
if __name__ == "__main__":
tf.test.main()
| [
"tempfile.TemporaryDirectory",
"importlib.import_module",
"os.path.join",
"sys.exit",
"pyiree.tf.compiler.tf_load_saved_model"
] | [((949, 986), 'importlib.import_module', 'importlib.import_module', (['"""tensorflow"""'], {}), "('tensorflow')\n", (972, 986), False, 'import importlib\n'), ((1225, 1236), 'sys.exit', 'sys.exit', (['(0)'], {}), '(0)\n', (1233, 1236), False, 'import sys\n'), ((1765, 1794), 'tempfile.TemporaryDirectory', 'tempfile.TemporaryDirectory', ([], {}), '()\n', (1792, 1794), False, 'import tempfile\n'), ((1823, 1858), 'os.path.join', 'os.path.join', (['temp_dir', '"""simple.sm"""'], {}), "(temp_dir, 'simple.sm')\n", (1835, 1858), False, 'import os\n'), ((2098, 2134), 'pyiree.tf.compiler.tf_load_saved_model', 'compiler.tf_load_saved_model', (['sm_dir'], {}), '(sm_dir)\n', (2126, 2134), False, 'from pyiree.tf import compiler\n')] |
#!/usr/bin/env python3
"""
script for calculating gc skew
<NAME>
<EMAIL>
"""
# python modules
import os
import sys
import argparse
import numpy as np
from scipy import signal
from itertools import cycle, product
# plotting modules
from matplotlib import use as mplUse
mplUse('Agg')
import matplotlib.pyplot as plt
from matplotlib.backends.backend_pdf import PdfPages
plt.rcParams['pdf.fonttype'] = 42
from matplotlib import rc
rc('font',**{'family':'sans-serif','sans-serif':['Helvetica']})
# ctb
from ctbBio.fasta import iterate_fasta as parse_fasta
def plot_two(title, subtitle, A, B, labels, legend, vert = False):
"""
plot with differnt y axes
title = title for chart
A = data for left axis [[x], [y]]
B = data for right axis
lables = [left label, right label, x label]
legend = [[left legend], [right legend]]
"""
fig, ax1 = plt.subplots()
colors = ['0.75', 'b', 'r', 'c', 'y', 'm', 'k', 'g']
a_colors = cycle(colors)
b_colors = cycle(colors[::-1])
a_label = cycle(legend[0])
b_label = cycle(legend[1])
# plot left axis and x - axis
for a in A:
x, y = a
ax1.set_ylabel(labels[0], labelpad = 3)
ax1.set_xlabel(labels[-1])
ax1.plot(x, y, c = next(a_colors), marker = 'o', ms = 4, label = next(a_label))
# add vertical lines
if vert is not False:
for i in vert:
x, c = i
ax1.axvline(x = x, c = c, label = next(a_label), linewidth = 2)
# plot right axis
ax2 = ax1.twinx()
for b in B:
x, y = b
ax2.set_ylabel(labels[1], labelpad = 8)
ax2.plot(x, y, c = next(b_colors), linewidth = 2, label = next(b_label))
xmin = min([min(i[1]) for i in A] + [min(i[0]) for i in B])
xmax = max([max(i[0]) for i in A] + [max(i[0]) for i in B])
ax2.set_xlim(xmin, xmax)
# title
plt.suptitle(title, fontsize = 16)
plt.title(subtitle, fontsize = 10)
# legend
ax1.legend(loc = 'upper left', \
bbox_to_anchor=(0.55, -0.125), \
prop = {'size':8}, \
framealpha = 0.0
)
plt.legend(loc = 'upper right', \
bbox_to_anchor=(0.45, -0.125), \
prop = {'size':8}, \
framealpha = 0.0\
)
# save
pdf = PdfPages('%s.pdf' % title.replace(' ', '_'))
pdf.savefig(bbox_inches = 'tight')
plt.close()
pdf.close()
def check_peaks(peaks, length):
"""
select pair of min and max that are not too close or
too far apart and have greatest y distance between one another
"""
# if ori/ter peaks are too close or too far apart, they are probably wrong
closest, farthest = int(length * float(0.45)), int(length * float(0.55))
pairs = []
for pair in list(product(*peaks)):
### added this to make sure gets origin and ter right
tr, pk = sorted(list(pair), key = lambda x: x[1], reverse = False) # trough and peak
a = (tr[0] - pk[0]) % length
b = (pk[0] - tr[0]) % length
pt = abs(tr[1] - pk[1]) # distance between values
if (a <= farthest and a >= closest) or (b <=farthest and b >= closest):
pairs.append([pt, tr, pk])
if len(pairs) == 0:
return [False, False]
pt, tr, pk = sorted(pairs, reverse = True)[0]
return [tr[0], pk[0]]
def find_ori_ter(c_skew, length):
"""
find origin and terminus of replication based on
cumulative GC Skew
"""
# find origin and terminus of replication based on
# cumulative gc skew min and max peaks
c_skew_min = signal.argrelextrema(np.asarray(c_skew[1]), np.less, order = 1)[0].tolist()
c_skew_max = signal.argrelextrema(np.asarray(c_skew[1]), np.greater, order = 1)[0].tolist()
# return False if no peaks were detected
if len(c_skew_min) == 0 or len(c_skew_min) == 0:
return [False, False]
else:
c_skew_min = [[c_skew[0][i], c_skew[1][i]] for i in c_skew_min]
c_skew_max = [[c_skew[0][i], c_skew[1][i]] for i in c_skew_max]
ori, ter = check_peaks([c_skew_min, c_skew_max], length)
return ori, ter
def gc_skew(name, length, seq, window, slide, plot_skew):
"""
calculate gc skew and cumulative sum of gc skew over sequence windows
gc skew = ((G - C) / (G + C)) * window size * genome length
"""
# convert to G - C
replacements = {'G':1, 'C':-1, 'A':0, 'T':0, 'N':0}
gmc = [] # G - C
for base in seq:
try:
gmc.append(replacements[base])
except:
gmc.append(0)
# convert to G + C
gpc = [abs(i) for i in gmc] # G + C
# calculate sliding windows for (G - C) and (G + C)
weights = np.ones(window)/window
gmc = [[i, c] for i, c in enumerate(signal.fftconvolve(gmc, weights, 'same').tolist())]
gpc = [[i, c] for i, c in enumerate(signal.fftconvolve(gpc, weights, 'same').tolist())]
# calculate gc skew and cummulative gc skew sum
skew = [[], []] # x and y for gc skew
c_skew = [[], []] # x and y for gc skew cummulative sums
cs = 0 # cummulative sum
# select windows to use based on slide
for i, m in gmc[0::slide]:
p = gpc[i][1]
if p == 0:
gcs = 0
else:
gcs = m/p
cs += gcs
skew[0].append(i)
c_skew[0].append(i)
skew[1].append(gcs)
c_skew[1].append(cs)
ori, ter = find_ori_ter(c_skew, length)
# plot data
if plot_skew is True:
title = '%s GC Skew' % (name)
subtitle = '(window = %s, slide = %s)' % (window, slide)
labels = ['GC Skew', 'Cumulative GC Skew', 'Position on Genome (bp)']
# remove some points for plotting (approx. 1,000 datapoints)
N = int(len(skew[0])/1000)
if N != 0:
skew = [skew[0][0::N], skew[1][0::N]]
if ori is False:
plot_two(title, subtitle, [skew], [c_skew], labels, \
[[labels[0]], [labels[1]]])
else:
plot_two(title, subtitle, [skew], [c_skew], labels, \
[[labels[0], 'Ori:%s' % ('{:,}'.format(ori)), \
'Ter:%s' % ('{:,}'.format(ter))], [labels[1]]], \
vert = [(ori, 'r'), (ter, 'b')])
return ori, ter, skew, c_skew
def parse_genomes(fastas, single):
"""
generator for parsing fastas
if single is True, combine sequences in multifasta file
"""
if single is True:
for genome in fastas:
sequence = []
for seq in parse_fasta(genome):
sequence.extend(list(seq[1].upper()))
yield (genome.name.rsplit('.', 1)[0], len(sequence), sequence)
else:
for genome in fastas:
for seq in parse_fasta(genome):
ID = seq[0].split('>', 1)[1].split()[0]
yield (ID, len(seq[1]), list(seq[1].upper()))
def open_files(files):
"""
open files in list, use stdin if first
item in list is '-'
"""
if files is None:
return files
if files[0] == '-':
return (sys.stdin)
return (open(i) for i in files)
if __name__ == '__main__':
parser = argparse.ArgumentParser(description = \
'# calculate gc skew and find Ori and Ter of replication')
parser.add_argument(\
'-f', nargs = '*', action = 'store', required = True, \
help = 'fasta(s)')
parser.add_argument(\
'-l', default = False, type = int, \
help = 'minimum contig length (default = 10 x window)')
parser.add_argument(\
'-w', default = 1000, type = int, \
help = 'window length (default = 1000)')
parser.add_argument(\
'-s', default = 10, type = int, \
help = 'slide length (default = 10)')
parser.add_argument(\
'--single', action = 'store_true', \
help = 'combine multi-fasta sequences into single genome')
parser.add_argument(\
'--no-plot', action = 'store_false', \
help = 'do not generate plots, print GC Skew to stdout')
args = vars(parser.parse_args())
fastas = open_files(args['f'])
single, plot_skew = args['single'], args['no_plot']
window, slide = args['w'], args['s']
min_len = args['l']
if min_len is False:
min_len = 10 * window
for name, length, seq in parse_genomes(fastas, single):
if length < min_len:
print('%s: Too Short' % (name), file=sys.stderr)
continue
ori, ter, skew, c_skew = gc_skew(name, length, seq, window, slide, plot_skew)
if ori == False:
ori, ter = 'n/a', 'n/a'
else:
ori, ter = '{:,}'.format(ori), '{:,}'.format(ter)
print('%s -> Origin: %s Terminus: %s' \
% (name, ori, ter), file=sys.stderr)
if plot_skew is False:
print('\t'.join(['# Name', 'Position', 'GC Skew', 'Cumulative GC Skew']))
for i, pos in enumerate(skew[0]):
out = [name, pos, skew[1][i], c_skew[1][i]]
print('\t'.join([str(i) for i in out]))
| [
"ctbBio.fasta.iterate_fasta",
"itertools.cycle",
"numpy.ones",
"argparse.ArgumentParser",
"matplotlib.use",
"itertools.product",
"numpy.asarray",
"scipy.signal.fftconvolve",
"matplotlib.pyplot.suptitle",
"matplotlib.pyplot.close",
"matplotlib.rc",
"matplotlib.pyplot.title",
"matplotlib.pyplot.subplots",
"matplotlib.pyplot.legend"
] | [((272, 285), 'matplotlib.use', 'mplUse', (['"""Agg"""'], {}), "('Agg')\n", (278, 285), True, 'from matplotlib import use as mplUse\n'), ((431, 498), 'matplotlib.rc', 'rc', (['"""font"""'], {}), "('font', **{'family': 'sans-serif', 'sans-serif': ['Helvetica']})\n", (433, 498), False, 'from matplotlib import rc\n'), ((872, 886), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {}), '()\n', (884, 886), True, 'import matplotlib.pyplot as plt\n'), ((959, 972), 'itertools.cycle', 'cycle', (['colors'], {}), '(colors)\n', (964, 972), False, 'from itertools import cycle, product\n'), ((988, 1007), 'itertools.cycle', 'cycle', (['colors[::-1]'], {}), '(colors[::-1])\n', (993, 1007), False, 'from itertools import cycle, product\n'), ((1022, 1038), 'itertools.cycle', 'cycle', (['legend[0]'], {}), '(legend[0])\n', (1027, 1038), False, 'from itertools import cycle, product\n'), ((1053, 1069), 'itertools.cycle', 'cycle', (['legend[1]'], {}), '(legend[1])\n', (1058, 1069), False, 'from itertools import cycle, product\n'), ((1858, 1890), 'matplotlib.pyplot.suptitle', 'plt.suptitle', (['title'], {'fontsize': '(16)'}), '(title, fontsize=16)\n', (1870, 1890), True, 'import matplotlib.pyplot as plt\n'), ((1897, 1929), 'matplotlib.pyplot.title', 'plt.title', (['subtitle'], {'fontsize': '(10)'}), '(subtitle, fontsize=10)\n', (1906, 1929), True, 'import matplotlib.pyplot as plt\n'), ((2118, 2217), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'loc': '"""upper right"""', 'bbox_to_anchor': '(0.45, -0.125)', 'prop': "{'size': 8}", 'framealpha': '(0.0)'}), "(loc='upper right', bbox_to_anchor=(0.45, -0.125), prop={'size': \n 8}, framealpha=0.0)\n", (2128, 2217), True, 'import matplotlib.pyplot as plt\n'), ((2394, 2405), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (2403, 2405), True, 'import matplotlib.pyplot as plt\n'), ((7128, 7227), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""# calculate gc skew and find Ori and Ter of replication"""'}), "(description=\n '# calculate gc skew and find Ori and Ter of replication')\n", (7151, 7227), False, 'import argparse\n'), ((2787, 2802), 'itertools.product', 'product', (['*peaks'], {}), '(*peaks)\n', (2794, 2802), False, 'from itertools import cycle, product\n'), ((4690, 4705), 'numpy.ones', 'np.ones', (['window'], {}), '(window)\n', (4697, 4705), True, 'import numpy as np\n'), ((6496, 6515), 'ctbBio.fasta.iterate_fasta', 'parse_fasta', (['genome'], {}), '(genome)\n', (6507, 6515), True, 'from ctbBio.fasta import iterate_fasta as parse_fasta\n'), ((6710, 6729), 'ctbBio.fasta.iterate_fasta', 'parse_fasta', (['genome'], {}), '(genome)\n', (6721, 6729), True, 'from ctbBio.fasta import iterate_fasta as parse_fasta\n'), ((3606, 3627), 'numpy.asarray', 'np.asarray', (['c_skew[1]'], {}), '(c_skew[1])\n', (3616, 3627), True, 'import numpy as np\n'), ((3699, 3720), 'numpy.asarray', 'np.asarray', (['c_skew[1]'], {}), '(c_skew[1])\n', (3709, 3720), True, 'import numpy as np\n'), ((4753, 4793), 'scipy.signal.fftconvolve', 'signal.fftconvolve', (['gmc', 'weights', '"""same"""'], {}), "(gmc, weights, 'same')\n", (4771, 4793), False, 'from scipy import signal\n'), ((4845, 4885), 'scipy.signal.fftconvolve', 'signal.fftconvolve', (['gpc', 'weights', '"""same"""'], {}), "(gpc, weights, 'same')\n", (4863, 4885), False, 'from scipy import signal\n')] |
import os
import json
import shutil
with open("entry.tp") as entry:
entry = json.loads(entry.read())
startcmd = entry['plugin_start_cmd'].split("%TP_PLUGIN_FOLDER%")[1].split("\\")
filedirectory = startcmd[0]
fileName = startcmd[1]
if os.path.exists(filedirectory):
os.remove(os.path.join(os.getcwd(), "WinTools"))
else:
os.makedirs("temp/"+filedirectory)
for file in os.listdir("."):
if file not in ["compile.py", "utils", "requirements.txt", "build", "dist", "main.py", "main.spec", "__pycache__", "temp"]:
print("copying", file)
shutil.copy(os.path.join(os.getcwd(), file), os.path.join("temp", filedirectory))
os.rename("dist\Main.exe", "dist\WinTools.exe")
shutil.copy(os.path.join(os.getcwd(), r"dist\WinTools.exe"), "temp/"+filedirectory)
shutil.make_archive(base_name="WinTools", format='zip', root_dir="temp", base_dir="WinTools")
os.rename("WinTools.zip", "WinTools.tpp")
| [
"os.path.exists",
"os.listdir",
"shutil.make_archive",
"os.makedirs",
"os.rename",
"os.path.join",
"os.getcwd"
] | [((246, 275), 'os.path.exists', 'os.path.exists', (['filedirectory'], {}), '(filedirectory)\n', (260, 275), False, 'import os\n'), ((389, 404), 'os.listdir', 'os.listdir', (['"""."""'], {}), "('.')\n", (399, 404), False, 'import os\n'), ((664, 713), 'os.rename', 'os.rename', (['"""dist\\\\Main.exe"""', '"""dist\\\\WinTools.exe"""'], {}), "('dist\\\\Main.exe', 'dist\\\\WinTools.exe')\n", (673, 713), False, 'import os\n'), ((797, 894), 'shutil.make_archive', 'shutil.make_archive', ([], {'base_name': '"""WinTools"""', 'format': '"""zip"""', 'root_dir': '"""temp"""', 'base_dir': '"""WinTools"""'}), "(base_name='WinTools', format='zip', root_dir='temp',\n base_dir='WinTools')\n", (816, 894), False, 'import shutil\n'), ((892, 933), 'os.rename', 'os.rename', (['"""WinTools.zip"""', '"""WinTools.tpp"""'], {}), "('WinTools.zip', 'WinTools.tpp')\n", (901, 933), False, 'import os\n'), ((340, 376), 'os.makedirs', 'os.makedirs', (["('temp/' + filedirectory)"], {}), "('temp/' + filedirectory)\n", (351, 376), False, 'import os\n'), ((737, 748), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (746, 748), False, 'import os\n'), ((304, 315), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (313, 315), False, 'import os\n'), ((618, 653), 'os.path.join', 'os.path.join', (['"""temp"""', 'filedirectory'], {}), "('temp', filedirectory)\n", (630, 653), False, 'import os\n'), ((598, 609), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (607, 609), False, 'import os\n')] |
# <NAME> (<EMAIL>)
# April 2018
import os, sys
BASE_DIR = os.path.normpath(
os.path.join(os.path.dirname(os.path.abspath(__file__))))
sys.path.append(os.path.join(BASE_DIR, '..'))
from datasets import *
from generate_outputs import *
from scipy.optimize import linear_sum_assignment
#import matplotlib.pyplot as plt
import numpy as np
def compute_all_keypoints(sess, net, data):
P = data.point_clouds
assert(P.shape[0] == data.n_data)
assert(P.shape[1] == data.n_points)
KP = data.keypoints
assert(KP.shape[0] == data.n_data)
assert(KP.shape[1] == data.n_labels)
A = predict_A(P, sess, net)
assert(A.shape[0] == data.n_data)
assert(A.shape[1] == data.n_points)
assert(A.shape[2] == net.K)
pred_KP = np.argmax(A, axis=1)
return P, KP, pred_KP
def evaluate_PCK(P, KP, pred_KP):
n_data = P.shape[0]
n_points = P.shape[1]
n_labels = KP.shape[1]
K = pred_KP.shape[1]
# dists_info: (point_cloud_index, label, basis_index, distance)
dists_info = []
for k in range(n_data):
# NOTE:
# Skip if the keypoint does not exist.
labels = [i for i in range(n_labels) if KP[k,i] >= 0]
# Find the closest prediction (w/o matching).
for i, label in enumerate(labels):
all_dists = np.zeros(K)
idx_i = KP[k,label]
assert(idx_i < n_points)
p_i = P[k,idx_i]
for j in range(K):
idx_j = pred_KP[k,j]
assert(idx_j < n_points)
p_j = P[k,idx_j]
all_dists[j] = np.linalg.norm(p_i - p_j)
j = np.argmin(all_dists)
dists_info.append((k, i, j, all_dists[j]))
dists_info = np.array(dists_info)
return dists_info
def evaluate_PCK_after_label_basis_matching(P, KP, pred_KP):
n_data = P.shape[0]
n_points = P.shape[1]
n_labels = KP.shape[1]
K = pred_KP.shape[1]
# Find the best mapping from labels to bases.
all_dists = np.zeros((n_data, n_labels, K))
label_counts = np.zeros(n_labels)
for k in range(n_data):
for i in range(n_labels):
# NOTE:
# Skip if the keypoint does not exist.
if KP[k,i] < 0: continue
idx_i = KP[k,i]
assert(idx_i < n_points)
p_i = P[k,idx_i]
label_counts[i] += 1.
for j in range(K):
idx_j = pred_KP[k,j]
assert(idx_j < n_points)
p_j = P[k,idx_j]
all_dists[k,i,j] += np.linalg.norm(p_i - p_j)
mean_dists = np.sum(all_dists, axis=0) / \
np.expand_dims(label_counts, axis=-1)
row_ind, col_ind = linear_sum_assignment(mean_dists)
# dists_info: (point_cloud_index, label, basis_index, distance)
dists_info = []
for k in range(n_data):
for (i, j) in zip(row_ind, col_ind):
if KP[k,i] < 0: continue
dists_info.append((k, i, j, all_dists[k,i,j]))
dists_info = np.array(dists_info)
return dists_info
def save_results(dists_info, out_dir, postfix=None):
# dists_info: (point_cloud_index, label, basis_index, distance)
dists = dists_info[:,3]
if postfix is not None:
out_file = os.path.join(out_dir, 'distances_{}.npy'.format(postfix))
else:
out_file = os.path.join(out_dir, 'distances.npy')
np.save(out_file, dists)
print("Saved '{}'.".format(out_file))
'''
# Draw plot.
n_matches = dists.size
x_list = np.linspace(0.0, 0.1, 20 + 1)
counts = np.zeros(x_list.size, dtype=int)
for i in range(x_list.size):
counts[i] = np.sum(dists <= x_list[i])
y_list = counts.astype(x_list.dtype) / float(n_matches)
plt.clf()
plt.plot(x_list, y_list)
plt.ylim(0., 1.)
plt.yticks(np.linspace(0., 1., 10 + 1))
if postfix is not None:
out_file = os.path.join(out_dir, 'pck_{}.png'.format(postfix))
else:
out_file = os.path.join(out_dir, 'pck.png')
plt.savefig(out_file)
print("Saved '{}'.".format(out_file))
'''
def evaluate(sess, net, data, out_dir):
if not os.path.exists(out_dir): os.makedirs(out_dir)
P, KP, pred_KP = compute_all_keypoints(sess, net, data)
dists = evaluate_PCK(P, KP, pred_KP)
save_results(dists, out_dir)
dists_after_matching = evaluate_PCK_after_label_basis_matching(
P, KP, pred_KP)
save_results(dists_after_matching, out_dir, postfix='after_matching')
| [
"os.path.exists",
"scipy.optimize.linear_sum_assignment",
"os.makedirs",
"os.path.join",
"numpy.argmax",
"numpy.linalg.norm",
"numpy.array",
"numpy.zeros",
"numpy.sum",
"numpy.expand_dims",
"numpy.argmin",
"os.path.abspath",
"numpy.save"
] | [((159, 187), 'os.path.join', 'os.path.join', (['BASE_DIR', '""".."""'], {}), "(BASE_DIR, '..')\n", (171, 187), False, 'import os, sys\n'), ((758, 778), 'numpy.argmax', 'np.argmax', (['A'], {'axis': '(1)'}), '(A, axis=1)\n', (767, 778), True, 'import numpy as np\n'), ((1732, 1752), 'numpy.array', 'np.array', (['dists_info'], {}), '(dists_info)\n', (1740, 1752), True, 'import numpy as np\n'), ((2008, 2039), 'numpy.zeros', 'np.zeros', (['(n_data, n_labels, K)'], {}), '((n_data, n_labels, K))\n', (2016, 2039), True, 'import numpy as np\n'), ((2059, 2077), 'numpy.zeros', 'np.zeros', (['n_labels'], {}), '(n_labels)\n', (2067, 2077), True, 'import numpy as np\n'), ((2707, 2740), 'scipy.optimize.linear_sum_assignment', 'linear_sum_assignment', (['mean_dists'], {}), '(mean_dists)\n', (2728, 2740), False, 'from scipy.optimize import linear_sum_assignment\n'), ((3019, 3039), 'numpy.array', 'np.array', (['dists_info'], {}), '(dists_info)\n', (3027, 3039), True, 'import numpy as np\n'), ((3393, 3417), 'numpy.save', 'np.save', (['out_file', 'dists'], {}), '(out_file, dists)\n', (3400, 3417), True, 'import numpy as np\n'), ((2604, 2629), 'numpy.sum', 'np.sum', (['all_dists'], {'axis': '(0)'}), '(all_dists, axis=0)\n', (2610, 2629), True, 'import numpy as np\n'), ((2646, 2683), 'numpy.expand_dims', 'np.expand_dims', (['label_counts'], {'axis': '(-1)'}), '(label_counts, axis=-1)\n', (2660, 2683), True, 'import numpy as np\n'), ((3349, 3387), 'os.path.join', 'os.path.join', (['out_dir', '"""distances.npy"""'], {}), "(out_dir, 'distances.npy')\n", (3361, 3387), False, 'import os, sys\n'), ((4146, 4169), 'os.path.exists', 'os.path.exists', (['out_dir'], {}), '(out_dir)\n', (4160, 4169), False, 'import os, sys\n'), ((4171, 4191), 'os.makedirs', 'os.makedirs', (['out_dir'], {}), '(out_dir)\n', (4182, 4191), False, 'import os, sys\n'), ((114, 139), 'os.path.abspath', 'os.path.abspath', (['__file__'], {}), '(__file__)\n', (129, 139), False, 'import os, sys\n'), ((1309, 1320), 'numpy.zeros', 'np.zeros', (['K'], {}), '(K)\n', (1317, 1320), True, 'import numpy as np\n'), ((1638, 1658), 'numpy.argmin', 'np.argmin', (['all_dists'], {}), '(all_dists)\n', (1647, 1658), True, 'import numpy as np\n'), ((1595, 1620), 'numpy.linalg.norm', 'np.linalg.norm', (['(p_i - p_j)'], {}), '(p_i - p_j)\n', (1609, 1620), True, 'import numpy as np\n'), ((2560, 2585), 'numpy.linalg.norm', 'np.linalg.norm', (['(p_i - p_j)'], {}), '(p_i - p_j)\n', (2574, 2585), True, 'import numpy as np\n')] |
import os
from conans import ConanFile, tools
from conans.errors import ConanInvalidConfiguration
class CxxOptsConan(ConanFile):
name = "cxxopts"
homepage = "https://github.com/jarro2783/cxxopts"
url = "https://github.com/conan-io/conan-center-index"
description = "Lightweight C++ option parser library, supporting the standard GNU style syntax for options."
license = "MIT"
topics = ("conan", "option-parser", "positional-arguments ", "header-only")
settings = "compiler"
options = { "unicode": [True, False] }
default_options = { "unicode": False }
no_copy_source = True
@property
def _source_subfolder(self):
return "source_subfolder"
@property
def _minimum_cpp_standard(self):
return 11
@property
def _minimum_compilers_version(self):
return {
"Visual Studio": "14",
"gcc": "5",
"clang": "3.9",
"apple-clang": "8",
}
def configure(self):
if self.settings.compiler.get_safe("cppstd"):
tools.check_min_cppstd(self, self._minimum_cpp_standard)
min_version = self._minimum_compilers_version.get(str(self.settings.compiler))
if not min_version:
self.output.warn("{} recipe lacks information about the {} compiler support.".format(
self.name, self.settings.compiler))
else:
if tools.Version(self.settings.compiler.version) < min_version:
raise ConanInvalidConfiguration("{} requires C++{} support. The current compiler {} {} does not support it.".format(
self.name, self._minimum_cpp_standard, self.settings.compiler, self.settings.compiler.version))
def requirements(self):
if self.options.unicode:
self.requires("icu/64.2")
def source(self):
tools.get(**self.conan_data["sources"][self.version])
os.rename("{}-{}".format(self.name, self.version), self._source_subfolder)
def package(self):
self.copy("LICENSE", dst="licenses", src=self._source_subfolder)
self.copy("{}.hpp".format(self.name), dst="include", src=os.path.join(self._source_subfolder, "include"))
def package_id(self):
self.info.header_only()
def package_info(self):
if self.options.unicode:
self.cpp_info.defines = ["CXXOPTS_USE_UNICODE"]
| [
"conans.tools.check_min_cppstd",
"conans.tools.get",
"os.path.join",
"conans.tools.Version"
] | [((1859, 1912), 'conans.tools.get', 'tools.get', ([], {}), "(**self.conan_data['sources'][self.version])\n", (1868, 1912), False, 'from conans import ConanFile, tools\n'), ((1067, 1123), 'conans.tools.check_min_cppstd', 'tools.check_min_cppstd', (['self', 'self._minimum_cpp_standard'], {}), '(self, self._minimum_cpp_standard)\n', (1089, 1123), False, 'from conans import ConanFile, tools\n'), ((1418, 1463), 'conans.tools.Version', 'tools.Version', (['self.settings.compiler.version'], {}), '(self.settings.compiler.version)\n', (1431, 1463), False, 'from conans import ConanFile, tools\n'), ((2158, 2205), 'os.path.join', 'os.path.join', (['self._source_subfolder', '"""include"""'], {}), "(self._source_subfolder, 'include')\n", (2170, 2205), False, 'import os\n')] |
"""
Standard Regression model
-------------------------
"""
import numpy as np
import pandas as pd
from typing import Union
from ..logging import get_logger
from .regression_model import RegressionModel
from sklearn.linear_model import LinearRegression
logger = get_logger(__name__)
class LinearRegressionModel(RegressionModel):
def __init__(self,
lags: Union[int, list] = None,
lags_exog: Union[int, list, bool] = None,
**kwargs):
"""
Simple wrapper for the linear regression model in scikit-learn, LinearRegression().
Parameters
----------
lags : Union[int, list]
Number of lagged target values used to predict the next time step. If an integer is given
the last `lags` lags are used (inclusive). Otherwise a list of integers with lags is required.
lags_exog : Union[int, list, bool]
Number of lagged exogenous values used to predict the next time step. If an integer is given
the last `lags_exog` lags are used (inclusive). Otherwise a list of integers with lags is required.
If True `lags` will be used to determine lags_exog. If False, the values of all exogenous variables
at the current time `t`. This might lead to leakage if for predictions the values of the exogenous
variables at time `t` are not known.
**kwargs
Additional keyword arguments passed to `sklearn.linear_model.LinearRegression`.
"""
self.kwargs = kwargs
super().__init__(
lags=lags,
lags_exog=lags_exog,
model=LinearRegression(**kwargs)
)
def __str__(self):
return 'LinearRegression(lags={}, lags_exog={})'.format(self.lags, self.lags_exog) | [
"sklearn.linear_model.LinearRegression"
] | [((1658, 1684), 'sklearn.linear_model.LinearRegression', 'LinearRegression', ([], {}), '(**kwargs)\n', (1674, 1684), False, 'from sklearn.linear_model import LinearRegression\n')] |
import random
from pymongo import MongoClient
from observable import Observable
from phrase import Phrase
class MongoDbProxy:
"""Proxy for MongoDB"""
def __init__(self, url, dbName, tableName):
self.client = MongoClient(url)
self.db = self.client[dbName]
self.table = tableName
self.count = self.db[self.table].find().count()
def get_db(self):
return self.db
def add_phrase(self, phrase):
#[{ "english": eng, "polish" : pl}]
record = {"english" : phrase.eng, "polish" : phrase.meanings}
self.db[self.table].insert(record)
self.count = self.db[self.table].find().count()
def show_one(self, phrase):
print("eng: \'%s\' pol: \'%s\'" % (phrase["english"], phrase["polish"]))
def get_all(self):
#define your data struct here
words = {}
for i, phrase in enumerate(self.db[self.table].find()):
eng = phrase["english"]
#lang = phrase["lang"]
meaning = phrase["polish"]
words[eng] = meaning
return words
def show_all(self):
if self.count > 0:
for i, phrase in enumerate(self.db[self.table].find()):
print(i, end=" ")
self.show_one(phrase)
else:
print("Database is empty")
def show_random(self):
entries = self.db[self.table].find()
self.count = entries.count()
if self.count > 0:
self.show_one(entries[random.randrange(self.count)])
else:
print("Database is empty")
def record_exists(self, eng):
if self.db[self.table].find_one({"english" : eng}):
return True
else:
return False
def drop_record(self, eng):
self.db[self.table].delete_one({"english":eng})
def drop_db(self):
print("Dropping")
self.db.self.table.drop()
self.count = self.db[self.table].find().count()
class Model:
"""That needs a table of pairs - eng and its meanings"""
def __init__(self):
self.phrases = Observable({})
self.db = MongoDbProxy("mongodb://localhost:27017/", "RepeatItDb", "phrases")
data = self.db.get_all()
self.phrases.setData(data)
def addWord(self, key, lang, meanings):
newData = self.phrases.getData()
newData[key] = meanings
self.phrases.setData(newData)
def getAllWords(self):
return self.phrases.getData()
def removeWord(self, key):
newData = self.phrases.getData()
newData.pop(key)
self.phrases.setData(newData)
def saveWord(self, wordAndMeaning):
word = wordAndMeaning[0]
meaning = wordAndMeaning[1]
self.addWord(word, "pl", meaning)
def saveDb(self):
dbData = self.db.get_all()
modelData = self.getAllWords()
#That's for future optimization: update db instead of adding it all
dbKeysSet = set(dbData.keys())
dbValuesSet = set(dbData.values())
modelKeysSet = set(modelData.keys())
modelValuesSet = set(modelData.values())
newRecordsKeys = modelKeysSet - dbKeysSet
deletedRecordsKeys = dbKeysSet - modelKeysSet
if len(newRecordsKeys):
for newKey in newRecordsKeys:
self.db.add_phrase(Phrase(newKey, "pl", modelData[newKey]))
if len(deletedRecordsKeys):
for deletedKey in deletedRecordsKeys:
self.db.drop_record(deletedKey)
#Handle also value update
print("Saving database...")
| [
"phrase.Phrase",
"pymongo.MongoClient",
"observable.Observable",
"random.randrange"
] | [((231, 247), 'pymongo.MongoClient', 'MongoClient', (['url'], {}), '(url)\n', (242, 247), False, 'from pymongo import MongoClient\n'), ((2173, 2187), 'observable.Observable', 'Observable', (['{}'], {}), '({})\n', (2183, 2187), False, 'from observable import Observable\n'), ((1564, 1592), 'random.randrange', 'random.randrange', (['self.count'], {}), '(self.count)\n', (1580, 1592), False, 'import random\n'), ((3500, 3539), 'phrase.Phrase', 'Phrase', (['newKey', '"""pl"""', 'modelData[newKey]'], {}), "(newKey, 'pl', modelData[newKey])\n", (3506, 3539), False, 'from phrase import Phrase\n')] |
#!flask/bin/python
#from user import User
from sampleObjects.User import User
from datetime import datetime
from sampleObjects.DetectionPoint import DetectionPoint
import time, requests, random, atexit
def requestGenerator():
userObject = randomUser()
detectionPointObject = randomDetectionPoint()
req = requests.post('http://localhost:5000/addevent', json = {"User": userObject.__dict__, "DetectionPoint" : detectionPointObject.__dict__, "Time" : str(datetime.now().isoformat())})
print (req.text)
checkResp = requests.get('http://localhost:5000/getResponses')
print (checkResp.text)
def randomUser():
user = random.randint(1,3)
attacker=0
if (user==1):
attacker = User("Phillipo", "255.255.255.101", "xxxx")
elif (user==2):
attacker = User("Sergio", "192.168.127.12", "yyyy")
elif (user==3):
attacker = User("Anonymous", "172.16.31.10", "354343jjk23")
return attacker
def randomDetectionPoint():
rand = random.randint(1,2)
dp=0
if (rand==1):
dp = DetectionPoint("HTTP Verb", "GET Request used where POST is expected")
elif (rand==2):
dp = DetectionPoint("Login Page", "Hidden field altered within the login form")
return dp
for i in range (50):
requestGenerator()
time.sleep(1.5)
def closingTime():
print ("Exiting")
atexit.register(closingTime)
| [
"sampleObjects.DetectionPoint.DetectionPoint",
"atexit.register",
"sampleObjects.User.User",
"time.sleep",
"requests.get",
"datetime.datetime.now",
"random.randint"
] | [((1351, 1379), 'atexit.register', 'atexit.register', (['closingTime'], {}), '(closingTime)\n', (1366, 1379), False, 'import time, requests, random, atexit\n'), ((535, 585), 'requests.get', 'requests.get', (['"""http://localhost:5000/getResponses"""'], {}), "('http://localhost:5000/getResponses')\n", (547, 585), False, 'import time, requests, random, atexit\n'), ((643, 663), 'random.randint', 'random.randint', (['(1)', '(3)'], {}), '(1, 3)\n', (657, 663), False, 'import time, requests, random, atexit\n'), ((990, 1010), 'random.randint', 'random.randint', (['(1)', '(2)'], {}), '(1, 2)\n', (1004, 1010), False, 'import time, requests, random, atexit\n'), ((1293, 1308), 'time.sleep', 'time.sleep', (['(1.5)'], {}), '(1.5)\n', (1303, 1308), False, 'import time, requests, random, atexit\n'), ((715, 758), 'sampleObjects.User.User', 'User', (['"""Phillipo"""', '"""255.255.255.101"""', '"""xxxx"""'], {}), "('Phillipo', '255.255.255.101', 'xxxx')\n", (719, 758), False, 'from sampleObjects.User import User\n'), ((1050, 1120), 'sampleObjects.DetectionPoint.DetectionPoint', 'DetectionPoint', (['"""HTTP Verb"""', '"""GET Request used where POST is expected"""'], {}), "('HTTP Verb', 'GET Request used where POST is expected')\n", (1064, 1120), False, 'from sampleObjects.DetectionPoint import DetectionPoint\n'), ((798, 838), 'sampleObjects.User.User', 'User', (['"""Sergio"""', '"""192.168.127.12"""', '"""yyyy"""'], {}), "('Sergio', '192.168.127.12', 'yyyy')\n", (802, 838), False, 'from sampleObjects.User import User\n'), ((1154, 1228), 'sampleObjects.DetectionPoint.DetectionPoint', 'DetectionPoint', (['"""Login Page"""', '"""Hidden field altered within the login form"""'], {}), "('Login Page', 'Hidden field altered within the login form')\n", (1168, 1228), False, 'from sampleObjects.DetectionPoint import DetectionPoint\n'), ((878, 926), 'sampleObjects.User.User', 'User', (['"""Anonymous"""', '"""172.16.31.10"""', '"""354343jjk23"""'], {}), "('Anonymous', '172.16.31.10', '354343jjk23')\n", (882, 926), False, 'from sampleObjects.User import User\n'), ((467, 481), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (479, 481), False, 'from datetime import datetime\n')] |
import os
from pathlib import Path
def write(file_name, content):
Path(os.path.dirname(file_name)).mkdir(parents=True, exist_ok=True)
with open(file_name, 'w') as file:
file.write(content)
def read_line_looping(file_name, count):
i = 0
lines = []
file = open(file_name, 'r')
line = file.readline()
if line == '':
raise EmptyFileError(f'Error: Dictionary {file_name} seems to be empty')
while i < count:
lines.append(line.strip())
i += 1
line = file.readline()
if line == '':
file.close()
file = open(file_name, 'r')
line = file.readline()
file.close()
return lines
class EmptyFileError(Exception):
pass
| [
"os.path.dirname"
] | [((77, 103), 'os.path.dirname', 'os.path.dirname', (['file_name'], {}), '(file_name)\n', (92, 103), False, 'import os\n')] |
from aux_sys_err_prediction_module.additive.R_runmed_spline.my_R_runmed_spline_fit import R_runmed_smooth_spline
from numpy import random, array, median, zeros, arange, hstack
from win32com.client import Dispatch
import math
myName = 'R_runmed_spline'
useMAD = True # use median absolute deviations instead of sum of squared residues
# -----------------------------------------------------------------------
def R_runmed_spline_MAIN(ARG3, Controller):
pars = Controller.updatedSettings['refiningPars']['regressionSettings'][myName]
# ARG3
x = ARG3[0][0]
y = ARG3[0][1]
sc = Dispatch("StatConnectorSrv.StatConnector")
sc.Init("R")
# get the best smoothing parameter
bestSpar = R_runmed_spline_KCV_OPTIMIZATION(x, y, sc=sc, **pars)
# get the prediction error for this smoothing parameter
bestPredErr = R_runmed_spline_KCV_predErr(x, y, spar=bestSpar, sc=sc, **pars)
# compare with original SSE
# is fit successful?
# return isSuccessfulFit, yFit, yEval, runMedData
SSE = sum(y ** 2)
MAD = 1.4826 * median(abs(y))
if useMAD:
SSE = MAD
if bestPredErr < SSE:
isSuccessfulFit = True
#
ppmArrs = [[] for i in range(len(ARG3))]
for ind in range(len(ARG3)):
x = ARG3[ind][0]
y = ARG3[ind][1]
xEval = ARG3[ind][2]
#
yFit, runMedData = R_runmed_smooth_spline(x, y, x, spar=bestSpar, sc=sc, **pars)
yEval, runMedData = R_runmed_smooth_spline(x, y, xEval, spar=bestSpar, sc=sc, **pars)
#
ppmArrs[ind] = [yFit, yEval]
else:
isSuccessfulFit = False
#
ppmArrs = [[] for i in range(len(ARG3))]
for ind in range(len(ARG3)):
x = ARG3[ind][0]
y = ARG3[ind][1]
xEval = ARG3[ind][2]
#
yFit = zeros(len(x), 'd')
yEval = zeros(len(xEval), 'd')
#
ppmArrs[ind] = [yFit, yEval]
sc.Close()
return isSuccessfulFit, bestPredErr, ppmArrs
# -----------------------------------------------------------------------
# -----------------------------------------------------------------------
def R_runmed_spline_KCV_OPTIMIZATION(x, y, sc, **pars):
sparRange = array([float(i) for i in pars['spar range'].split(',')])
sparStepsNum = int(pars['spar steps number'])
sparStep = round((sparRange[1] - sparRange[0]) / sparStepsNum, 5)
sparSet = arange(sparRange[0], sparRange[1], sparStep)
predErrSet = zeros(len(sparSet), 'd')
for i in range(len(sparSet)):
predErr = R_runmed_spline_KCV_predErr(x, y, spar=sparSet[i], sc=sc, **pars)
predErrSet[i] = predErr
## p(zip(sparSet, predErrSet))
spar = sparSet[predErrSet == min(predErrSet)][-1] # take the last one (smoothest) if there are few
## print('spar ', spar)
return spar
# -----------------------------------------------------------------------
# -----------------------------------------------------------------------
def R_runmed_spline_KCV_predErr(x, y, **kwargs):
"""
just returns the prediction error
"""
K = int(kwargs['K'])
# --Related to K-fold CV---------------------------
L = len(x)
N = L / K ##min length of pieces
W = list(range(L))
Z = list(range(1, K + 1))
Z = [N for j in Z]
R = L % K
Z[0:R] = [j + 1 for j in Z[0:R]] # length of the pieces
random.shuffle(W)
ind = 0
predErr = 0
allResiduals = array([])
SSE = sum(y ** 2) # VLAD. Why do I need this???
# ---running through K training/testings-------------
for val in Z:
j = math.floor(val)
# ---making training/testing subsets-------------
test = W[ind:ind + j]
test.sort()
train = W[0:ind] + W[ind + j:]
train.sort()
ind += j
# -----------------------------------------------
# ---fit runmed_spline here----------------------
yFit, runMed = R_runmed_smooth_spline(x[train], y[train], x[test], **kwargs)
residualsTest = y[test] - yFit
predErr += sum(residualsTest ** 2)
allResiduals = hstack((allResiduals, residualsTest))
# -----------------------------------------------
if useMAD:
predErr = 1.4826 * median(abs(allResiduals))
return predErr
# -----------------------------------------------------------------------
if __name__ == '__main__':
from numpy import linspace, cos, lexsort, zeros, sin
from pylab import plot, show, subplot, savefig, clf, ylim
from pprint import pprint as p
from time import clock as c
x1 = linspace(0, 30, 300)
## y1 = cos(x1)
## y1 = zeros(len(x1),'d') #nice test
y1 = x1 * 0.03
y1 += random.normal(scale=0.2, size=y1.shape)
ind = lexsort(keys=(y1, x1))
x1 = x1[ind]
y1 = y1[ind]
t1 = c()
isSuccessfulFit, yFit, yEval, runMedData, predErr = \
R_runmed_spline_MAIN(x1, y1, x1, runMedSpan=0.01, K=10, sparRange=[0.6, 1.1, 0.1])
t2 = c()
print('done in %s seconds' % (t2 - t1))
subplot(211)
plot(x1, y1, 'bo')
plot(runMedData[0], runMedData[1], 'y^')
plot(x1, yEval, 'r+-')
ylim([-1.5, +1.5])
subplot(212)
plot(x1, y1 - yEval, 'go')
ylim([-1.5, +1.5])
show()
| [
"numpy.random.normal",
"pylab.ylim",
"win32com.client.Dispatch",
"time.clock",
"pylab.subplot",
"math.floor",
"pylab.plot",
"pylab.show",
"numpy.hstack",
"aux_sys_err_prediction_module.additive.R_runmed_spline.my_R_runmed_spline_fit.R_runmed_smooth_spline",
"numpy.array",
"numpy.linspace",
"numpy.lexsort",
"numpy.arange",
"numpy.random.shuffle"
] | [((619, 661), 'win32com.client.Dispatch', 'Dispatch', (['"""StatConnectorSrv.StatConnector"""'], {}), "('StatConnectorSrv.StatConnector')\n", (627, 661), False, 'from win32com.client import Dispatch\n'), ((2552, 2596), 'numpy.arange', 'arange', (['sparRange[0]', 'sparRange[1]', 'sparStep'], {}), '(sparRange[0], sparRange[1], sparStep)\n', (2558, 2596), False, 'from numpy import random, array, median, zeros, arange, hstack\n'), ((3556, 3573), 'numpy.random.shuffle', 'random.shuffle', (['W'], {}), '(W)\n', (3570, 3573), False, 'from numpy import random, array, median, zeros, arange, hstack\n'), ((3624, 3633), 'numpy.array', 'array', (['[]'], {}), '([])\n', (3629, 3633), False, 'from numpy import random, array, median, zeros, arange, hstack\n'), ((4806, 4826), 'numpy.linspace', 'linspace', (['(0)', '(30)', '(300)'], {}), '(0, 30, 300)\n', (4814, 4826), False, 'from numpy import linspace, cos, lexsort, zeros, sin\n'), ((4930, 4969), 'numpy.random.normal', 'random.normal', ([], {'scale': '(0.2)', 'size': 'y1.shape'}), '(scale=0.2, size=y1.shape)\n', (4943, 4969), False, 'from numpy import random, array, median, zeros, arange, hstack\n'), ((4981, 5003), 'numpy.lexsort', 'lexsort', ([], {'keys': '(y1, x1)'}), '(keys=(y1, x1))\n', (4988, 5003), False, 'from numpy import linspace, cos, lexsort, zeros, sin\n'), ((5052, 5055), 'time.clock', 'c', ([], {}), '()\n', (5053, 5055), True, 'from time import clock as c\n'), ((5217, 5220), 'time.clock', 'c', ([], {}), '()\n', (5218, 5220), True, 'from time import clock as c\n'), ((5273, 5285), 'pylab.subplot', 'subplot', (['(211)'], {}), '(211)\n', (5280, 5285), False, 'from pylab import plot, show, subplot, savefig, clf, ylim\n'), ((5291, 5309), 'pylab.plot', 'plot', (['x1', 'y1', '"""bo"""'], {}), "(x1, y1, 'bo')\n", (5295, 5309), False, 'from pylab import plot, show, subplot, savefig, clf, ylim\n'), ((5315, 5355), 'pylab.plot', 'plot', (['runMedData[0]', 'runMedData[1]', '"""y^"""'], {}), "(runMedData[0], runMedData[1], 'y^')\n", (5319, 5355), False, 'from pylab import plot, show, subplot, savefig, clf, ylim\n'), ((5361, 5383), 'pylab.plot', 'plot', (['x1', 'yEval', '"""r+-"""'], {}), "(x1, yEval, 'r+-')\n", (5365, 5383), False, 'from pylab import plot, show, subplot, savefig, clf, ylim\n'), ((5389, 5407), 'pylab.ylim', 'ylim', (['[-1.5, +1.5]'], {}), '([-1.5, +1.5])\n', (5393, 5407), False, 'from pylab import plot, show, subplot, savefig, clf, ylim\n'), ((5413, 5425), 'pylab.subplot', 'subplot', (['(212)'], {}), '(212)\n', (5420, 5425), False, 'from pylab import plot, show, subplot, savefig, clf, ylim\n'), ((5431, 5457), 'pylab.plot', 'plot', (['x1', '(y1 - yEval)', '"""go"""'], {}), "(x1, y1 - yEval, 'go')\n", (5435, 5457), False, 'from pylab import plot, show, subplot, savefig, clf, ylim\n'), ((5463, 5481), 'pylab.ylim', 'ylim', (['[-1.5, +1.5]'], {}), '([-1.5, +1.5])\n', (5467, 5481), False, 'from pylab import plot, show, subplot, savefig, clf, ylim\n'), ((5487, 5493), 'pylab.show', 'show', ([], {}), '()\n', (5491, 5493), False, 'from pylab import plot, show, subplot, savefig, clf, ylim\n'), ((3779, 3794), 'math.floor', 'math.floor', (['val'], {}), '(val)\n', (3789, 3794), False, 'import math\n'), ((4132, 4193), 'aux_sys_err_prediction_module.additive.R_runmed_spline.my_R_runmed_spline_fit.R_runmed_smooth_spline', 'R_runmed_smooth_spline', (['x[train]', 'y[train]', 'x[test]'], {}), '(x[train], y[train], x[test], **kwargs)\n', (4154, 4193), False, 'from aux_sys_err_prediction_module.additive.R_runmed_spline.my_R_runmed_spline_fit import R_runmed_smooth_spline\n'), ((4302, 4339), 'numpy.hstack', 'hstack', (['(allResiduals, residualsTest)'], {}), '((allResiduals, residualsTest))\n', (4308, 4339), False, 'from numpy import random, array, median, zeros, arange, hstack\n'), ((1446, 1507), 'aux_sys_err_prediction_module.additive.R_runmed_spline.my_R_runmed_spline_fit.R_runmed_smooth_spline', 'R_runmed_smooth_spline', (['x', 'y', 'x'], {'spar': 'bestSpar', 'sc': 'sc'}), '(x, y, x, spar=bestSpar, sc=sc, **pars)\n', (1468, 1507), False, 'from aux_sys_err_prediction_module.additive.R_runmed_spline.my_R_runmed_spline_fit import R_runmed_smooth_spline\n'), ((1541, 1606), 'aux_sys_err_prediction_module.additive.R_runmed_spline.my_R_runmed_spline_fit.R_runmed_smooth_spline', 'R_runmed_smooth_spline', (['x', 'y', 'xEval'], {'spar': 'bestSpar', 'sc': 'sc'}), '(x, y, xEval, spar=bestSpar, sc=sc, **pars)\n', (1563, 1606), False, 'from aux_sys_err_prediction_module.additive.R_runmed_spline.my_R_runmed_spline_fit import R_runmed_smooth_spline\n')] |
#!/usr/bin/env python
# Copyright (c) 2016-present, <NAME>
# All rights reserved.
#
# This software may be modified and distributed under the terms
# of the BSD license. See the LICENSE file for details.
import os
import sys
from setuptools import setup
try:
import cffi
except ImportError:
cffi = None
import setup_zstd
SUPPORT_LEGACY = False
SYSTEM_ZSTD = False
WARNINGS_AS_ERRORS = False
if os.environ.get('ZSTD_WARNINGS_AS_ERRORS', ''):
WARNINGS_AS_ERRORS = True
if '--legacy' in sys.argv:
SUPPORT_LEGACY = True
sys.argv.remove('--legacy')
if '--system-zstd' in sys.argv:
SYSTEM_ZSTD = True
sys.argv.remove('--system-zstd')
if '--warnings-as-errors' in sys.argv:
WARNINGS_AS_ERRORS = True
sys.argv.remote('--warning-as-errors')
# Code for obtaining the Extension instance is in its own module to
# facilitate reuse in other projects.
extensions = [
setup_zstd.get_c_extension(name='zstd',
support_legacy=SUPPORT_LEGACY,
system_zstd=SYSTEM_ZSTD,
warnings_as_errors=WARNINGS_AS_ERRORS),
]
install_requires = []
if cffi:
import make_cffi
extensions.append(make_cffi.ffi.distutils_extension())
# Need change in 1.10 for ffi.from_buffer() to handle all buffer types
# (like memoryview).
# Need feature in 1.11 for ffi.gc() to declare size of objects so we avoid
# garbage collection pitfalls.
install_requires.append('cffi>=1.11')
version = None
with open('c-ext/python-zstandard.h', 'r') as fh:
for line in fh:
if not line.startswith('#define PYTHON_ZSTANDARD_VERSION'):
continue
version = line.split()[2][1:-1]
break
if not version:
raise Exception('could not resolve package version; '
'this should never happen')
setup(
name='zstandard',
version=version,
description='Zstandard bindings for Python',
long_description=open('README.rst', 'r').read(),
url='https://github.com/indygreg/python-zstandard',
author='<NAME>',
author_email='<EMAIL>',
license='BSD',
classifiers=[
'Development Status :: 4 - Beta',
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Programming Language :: C',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
],
keywords='zstandard zstd compression',
packages=['zstandard'],
ext_modules=extensions,
test_suite='tests',
install_requires=install_requires,
)
| [
"setup_zstd.get_c_extension",
"os.environ.get",
"make_cffi.ffi.distutils_extension",
"sys.argv.remote",
"sys.argv.remove"
] | [((407, 452), 'os.environ.get', 'os.environ.get', (['"""ZSTD_WARNINGS_AS_ERRORS"""', '""""""'], {}), "('ZSTD_WARNINGS_AS_ERRORS', '')\n", (421, 452), False, 'import os\n'), ((542, 569), 'sys.argv.remove', 'sys.argv.remove', (['"""--legacy"""'], {}), "('--legacy')\n", (557, 569), False, 'import sys\n'), ((630, 662), 'sys.argv.remove', 'sys.argv.remove', (['"""--system-zstd"""'], {}), "('--system-zstd')\n", (645, 662), False, 'import sys\n'), ((737, 775), 'sys.argv.remote', 'sys.argv.remote', (['"""--warning-as-errors"""'], {}), "('--warning-as-errors')\n", (752, 775), False, 'import sys\n'), ((902, 1040), 'setup_zstd.get_c_extension', 'setup_zstd.get_c_extension', ([], {'name': '"""zstd"""', 'support_legacy': 'SUPPORT_LEGACY', 'system_zstd': 'SYSTEM_ZSTD', 'warnings_as_errors': 'WARNINGS_AS_ERRORS'}), "(name='zstd', support_legacy=SUPPORT_LEGACY,\n system_zstd=SYSTEM_ZSTD, warnings_as_errors=WARNINGS_AS_ERRORS)\n", (928, 1040), False, 'import setup_zstd\n'), ((1209, 1244), 'make_cffi.ffi.distutils_extension', 'make_cffi.ffi.distutils_extension', ([], {}), '()\n', (1242, 1244), False, 'import make_cffi\n')] |
# Copyright (c) 2017-2018 <NAME>
#
# SPDX-License-Identifier: BSD-3-Clause
# The BSD-3-Clause license for this file can be found in the LICENSE file included with this distribution
# or at https://spdx.org/licenses/BSD-3-Clause.html#licenseText
import os
import pytest
from imx import img
# Used Directories
DATA_DIR = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'data')
# Test Files
DCD_TXT = os.path.join(DATA_DIR, 'dcd_test.txt')
DCD_BIN = os.path.join(DATA_DIR, 'dcd_test.bin')
def setup_module(module):
# Prepare test environment
pass
def teardown_module(module):
# Clean test environment
pass
def test_txt_parser():
with open(DCD_TXT, 'r') as f:
dcd_obj = img.SegDCD.parse_txt(f.read())
assert dcd_obj is not None
assert len(dcd_obj) == 12
def test_bin_parser():
with open(DCD_BIN, 'rb') as f:
dcd_obj = img.SegDCD.parse(f.read())
assert dcd_obj is not None
assert len(dcd_obj) == 12
| [
"os.path.abspath",
"os.path.join"
] | [((410, 448), 'os.path.join', 'os.path.join', (['DATA_DIR', '"""dcd_test.txt"""'], {}), "(DATA_DIR, 'dcd_test.txt')\n", (422, 448), False, 'import os\n'), ((459, 497), 'os.path.join', 'os.path.join', (['DATA_DIR', '"""dcd_test.bin"""'], {}), "(DATA_DIR, 'dcd_test.bin')\n", (471, 497), False, 'import os\n'), ((350, 375), 'os.path.abspath', 'os.path.abspath', (['__file__'], {}), '(__file__)\n', (365, 375), False, 'import os\n')] |
from setuptools import setup, find_packages
from retrobiocat_web import __version__
with open('requirements.txt') as f:
requirements = f.read().splitlines()
setup(
name = 'retrobiocat_web',
packages = find_packages(),
include_package_data=True,
version = __version__,
license='',
description = 'Retrosynthesis',
author = '<NAME>',
author_email = '<EMAIL>',
url = '',
download_url = '',
keywords = ['enzyme'],
install_requires=requirements,
classifiers=[
'Development Status :: 3 - Alpha',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 3'],
) | [
"setuptools.find_packages"
] | [((209, 224), 'setuptools.find_packages', 'find_packages', ([], {}), '()\n', (222, 224), False, 'from setuptools import setup, find_packages\n')] |
# -*- coding: utf-8 -*-
import pickle
import numpy as np
from rdkit import Chem
from rdkit.Chem import AllChem,DataStructs
def get_classes(path):
f = open(path, 'rb')
dict_ = pickle.load(f)
f.close()
classes = sorted(dict_.items(), key=lambda d: d[1],reverse=True)
classes = [(x,y) for x,y in classes]
return classes
def create_rxn_Morgan2FP_concatenate(rsmi, psmi, rxnfpsize=16384, pfpsize=16384, useFeatures=False, calculate_rfp=True, useChirality=True):
# Similar as the above function but takes smiles separately and returns pfp and rfp separately
rsmi = rsmi.encode('utf-8')
psmi = psmi.encode('utf-8')
try:
mol = Chem.MolFromSmiles(rsmi)
except Exception as e:
print(e)
return
try:
fp_bit = AllChem.GetMorganFingerprintAsBitVect(
mol=mol, radius=2, nBits=rxnfpsize, useFeatures=useFeatures, useChirality=useChirality)
fp = np.empty(rxnfpsize, dtype='float32')
DataStructs.ConvertToNumpyArray(fp_bit, fp)
except Exception as e:
print("Cannot build reactant fp due to {}".format(e))
return
rfp = fp
try:
mol = Chem.MolFromSmiles(psmi)
except Exception as e:
return
try:
fp_bit = AllChem.GetMorganFingerprintAsBitVect(
mol=mol, radius=2, nBits=pfpsize, useFeatures=useFeatures, useChirality=useChirality)
fp = np.empty(pfpsize, dtype='float32')
DataStructs.ConvertToNumpyArray(fp_bit, fp)
except Exception as e:
print("Cannot build product fp due to {}".format(e))
return
pfp = fp
rxn_fp = pfp - rfp
final_fp = np.concatenate((pfp, rxn_fp))
return final_fp | [
"pickle.load",
"rdkit.Chem.MolFromSmiles",
"rdkit.Chem.AllChem.GetMorganFingerprintAsBitVect",
"rdkit.Chem.DataStructs.ConvertToNumpyArray",
"numpy.empty",
"numpy.concatenate"
] | [((196, 210), 'pickle.load', 'pickle.load', (['f'], {}), '(f)\n', (207, 210), False, 'import pickle\n'), ((1698, 1727), 'numpy.concatenate', 'np.concatenate', (['(pfp, rxn_fp)'], {}), '((pfp, rxn_fp))\n', (1712, 1727), True, 'import numpy as np\n'), ((694, 718), 'rdkit.Chem.MolFromSmiles', 'Chem.MolFromSmiles', (['rsmi'], {}), '(rsmi)\n', (712, 718), False, 'from rdkit import Chem\n'), ((809, 938), 'rdkit.Chem.AllChem.GetMorganFingerprintAsBitVect', 'AllChem.GetMorganFingerprintAsBitVect', ([], {'mol': 'mol', 'radius': '(2)', 'nBits': 'rxnfpsize', 'useFeatures': 'useFeatures', 'useChirality': 'useChirality'}), '(mol=mol, radius=2, nBits=rxnfpsize,\n useFeatures=useFeatures, useChirality=useChirality)\n', (846, 938), False, 'from rdkit.Chem import AllChem, DataStructs\n'), ((963, 999), 'numpy.empty', 'np.empty', (['rxnfpsize'], {'dtype': '"""float32"""'}), "(rxnfpsize, dtype='float32')\n", (971, 999), True, 'import numpy as np\n'), ((1009, 1052), 'rdkit.Chem.DataStructs.ConvertToNumpyArray', 'DataStructs.ConvertToNumpyArray', (['fp_bit', 'fp'], {}), '(fp_bit, fp)\n', (1040, 1052), False, 'from rdkit.Chem import AllChem, DataStructs\n'), ((1201, 1225), 'rdkit.Chem.MolFromSmiles', 'Chem.MolFromSmiles', (['psmi'], {}), '(psmi)\n', (1219, 1225), False, 'from rdkit import Chem\n'), ((1298, 1425), 'rdkit.Chem.AllChem.GetMorganFingerprintAsBitVect', 'AllChem.GetMorganFingerprintAsBitVect', ([], {'mol': 'mol', 'radius': '(2)', 'nBits': 'pfpsize', 'useFeatures': 'useFeatures', 'useChirality': 'useChirality'}), '(mol=mol, radius=2, nBits=pfpsize,\n useFeatures=useFeatures, useChirality=useChirality)\n', (1335, 1425), False, 'from rdkit.Chem import AllChem, DataStructs\n'), ((1450, 1484), 'numpy.empty', 'np.empty', (['pfpsize'], {'dtype': '"""float32"""'}), "(pfpsize, dtype='float32')\n", (1458, 1484), True, 'import numpy as np\n'), ((1494, 1537), 'rdkit.Chem.DataStructs.ConvertToNumpyArray', 'DataStructs.ConvertToNumpyArray', (['fp_bit', 'fp'], {}), '(fp_bit, fp)\n', (1525, 1537), False, 'from rdkit.Chem import AllChem, DataStructs\n')] |
import functools
import logging
import random
from flask import Flask, render_template, request
import joblib
from lxml.html import html5parser
import lxml.html
import requests
import yarl
import webstruct.model
import webstruct.sequence_encoding
import webstruct.webannotator
webstruct_demo = Flask(__name__, instance_relative_config=True)
webstruct_demo.config.from_pyfile('config.py')
def absolutize_link(link, base_url):
if link.startswith('#'):
return link
try:
target_url = yarl.URL(link)
except:
return link
if target_url.is_absolute() and target_url.scheme:
return link
if target_url.is_absolute() and not target_url.scheme:
target_url = target_url.with_scheme(base_url.scheme)
return str(target_url)
try:
target_url = base_url.join(target_url)
except:
return link
return str(target_url)
def absolute_links(tree, url):
_LINK_SOURCES = ['src', 'href']
try:
base_url = yarl.URL(url)
except:
return tree
for _, element in lxml.html.etree.iterwalk(tree, events=('start', )):
if not isinstance(element.tag, str):
continue
for attr in _LINK_SOURCES:
if attr not in element.attrib:
continue
element.attrib[attr] = absolutize_link(element.attrib[attr], base_url)
return tree
def parent_links(tree, base_url):
base_url = yarl.URL(base_url)
for _, element in lxml.html.etree.iterwalk(tree, events=('start', )):
if not isinstance(element.tag, str):
continue
if element.tag != 'a':
continue
if 'href' not in element.attrib:
continue
url = element.attrib['href']
if url.startswith('#'):
continue
element.attrib['target'] = '_parent'
element.attrib['href'] = str(base_url.update_query(url=url))
return tree
def remove_namespace(tree):
_NS="{http://www.w3.org/1999/xhtml}"
for _, element in lxml.html.etree.iterwalk(tree, events=('start', )):
if not isinstance(element.tag, str):
continue
if not element.tag.startswith(_NS):
continue
element.tag = element.tag[len(_NS):]
return tree
_TOKENS_PER_PART = 2000
def run_model(tree, model):
html_tokens, _ = model.html_tokenizer.tokenize_single(tree)
if not html_tokens:
return tree, list(), list()
tree = html_tokens[0].elem.getroottree().getroot()
tags = model.model.predict([html_tokens[i:i+_TOKENS_PER_PART] for i in range(0, len(html_tokens), _TOKENS_PER_PART)])
tags = [i for t in tags for i in t]
return tree, html_tokens, tags
def download(url):
splash_url = webstruct_demo.config.get('SPLASH_URL', None)
splash_user = webstruct_demo.config.get('SPLASH_USER', None)
splash_pass = webstruct_demo.config.get('SPLASH_PASS', None)
is_splash = functools.reduce(lambda x,y: x and y is not None,
[splash_url, splash_user, splash_pass],
True)
if not is_splash:
response = requests.get(url)
return response.content, response.url
load = {'url': url,
'images': 0,
'base_url': url}
response = requests.post(splash_url + '/render.html',
json=load,
auth=requests.auth.HTTPBasicAuth(splash_user, splash_pass))
return response.content, url
def extract_ner(response_content, response_url, base_url):
url = response_url
tree = html5parser.document_fromstring(response_content)
tree = remove_namespace(tree)
tree = absolute_links(tree, url)
tree = parent_links(tree, base_url)
title = tree.xpath('//title')[0].text
model = joblib.load(webstruct_demo.config['MODEL_PATH'])
tree, tokens, tags = run_model(tree, model)
tree = model.html_tokenizer.detokenize_single(tokens, tags)
tree = webstruct.webannotator.to_webannotator(
tree,
entity_colors=model.entity_colors,
url=url
)
content = lxml.html.tostring(tree, encoding='utf-8').decode('utf-8')
entities = webstruct.sequence_encoding.IobEncoder.group(zip(tokens, tags))
entities = webstruct.model._drop_empty(
(model.build_entity(tokens), tag)
for (tokens, tag) in entities if tag != 'O'
)
groups = webstruct.model.extract_entitiy_groups(
tokens,
tags,
dont_penalize=None,
join_tokens=model.build_entity
)
return content, title, entities, groups
def sample_entities(entities):
unique = list(set(entities))
random.shuffle(unique)
sampled = unique[:5]
sampled = sorted(sampled, key=lambda e:(e[1], e[0]))
return sampled
def sample_groups(groups):
groups = [tuple(sorted(g)) for g in groups]
sampled = sorted(list(set(groups)), key=lambda g:-len(g))
return sampled[:2]
@webstruct_demo.route('/')
def index():
url = request.args.get('url', 'http://en.wikipedia.org/')
output = request.args.get('output', 'html')
try:
response_content, response_url = download(url)
content, title, entities, groups = extract_ner(response_content,
response_url,
request.url)
except:
logging.exception('Got exception')
content = None
title = 'Error during obtaining %s' % (url, )
entities = []
groups = []
_TEMPLATE_MAPPING = {'html': 'main.html',
'entities': 'entities.html',
'groups': 'groups.html'}
template = _TEMPLATE_MAPPING.get(output, _TEMPLATE_MAPPING['html'])
sampled_entities = sample_entities(entities)
sampled_groups = sample_groups(groups)
base_url = yarl.URL(request.url)
routing = {t: str(base_url.update_query(output=t)) for t in ['html', 'entities', 'groups']}
values = {'url': url,
'title': title,
'entities': entities,
'sampled_entities': sampled_entities,
'sampled_groups': sampled_groups,
'routing': routing,
'srcdoc': content,
'groups': groups,
'output': output}
return render_template(template, **values)
| [
"flask.render_template",
"flask.request.args.get",
"requests.auth.HTTPBasicAuth",
"random.shuffle",
"flask.Flask",
"functools.reduce",
"joblib.load",
"requests.get",
"logging.exception",
"lxml.html.html5parser.document_fromstring",
"yarl.URL"
] | [((298, 344), 'flask.Flask', 'Flask', (['__name__'], {'instance_relative_config': '(True)'}), '(__name__, instance_relative_config=True)\n', (303, 344), False, 'from flask import Flask, render_template, request\n'), ((1444, 1462), 'yarl.URL', 'yarl.URL', (['base_url'], {}), '(base_url)\n', (1452, 1462), False, 'import yarl\n'), ((2942, 3042), 'functools.reduce', 'functools.reduce', (['(lambda x, y: x and y is not None)', '[splash_url, splash_user, splash_pass]', '(True)'], {}), '(lambda x, y: x and y is not None, [splash_url, splash_user,\n splash_pass], True)\n', (2958, 3042), False, 'import functools\n'), ((3605, 3654), 'lxml.html.html5parser.document_fromstring', 'html5parser.document_fromstring', (['response_content'], {}), '(response_content)\n', (3636, 3654), False, 'from lxml.html import html5parser\n'), ((3822, 3870), 'joblib.load', 'joblib.load', (["webstruct_demo.config['MODEL_PATH']"], {}), "(webstruct_demo.config['MODEL_PATH'])\n", (3833, 3870), False, 'import joblib\n'), ((4724, 4746), 'random.shuffle', 'random.shuffle', (['unique'], {}), '(unique)\n', (4738, 4746), False, 'import random\n'), ((5062, 5113), 'flask.request.args.get', 'request.args.get', (['"""url"""', '"""http://en.wikipedia.org/"""'], {}), "('url', 'http://en.wikipedia.org/')\n", (5078, 5113), False, 'from flask import Flask, render_template, request\n'), ((5127, 5161), 'flask.request.args.get', 'request.args.get', (['"""output"""', '"""html"""'], {}), "('output', 'html')\n", (5143, 5161), False, 'from flask import Flask, render_template, request\n'), ((5944, 5965), 'yarl.URL', 'yarl.URL', (['request.url'], {}), '(request.url)\n', (5952, 5965), False, 'import yarl\n'), ((6398, 6433), 'flask.render_template', 'render_template', (['template'], {}), '(template, **values)\n', (6413, 6433), False, 'from flask import Flask, render_template, request\n'), ((511, 525), 'yarl.URL', 'yarl.URL', (['link'], {}), '(link)\n', (519, 525), False, 'import yarl\n'), ((1001, 1014), 'yarl.URL', 'yarl.URL', (['url'], {}), '(url)\n', (1009, 1014), False, 'import yarl\n'), ((3146, 3163), 'requests.get', 'requests.get', (['url'], {}), '(url)\n', (3158, 3163), False, 'import requests\n'), ((3421, 3474), 'requests.auth.HTTPBasicAuth', 'requests.auth.HTTPBasicAuth', (['splash_user', 'splash_pass'], {}), '(splash_user, splash_pass)\n', (3448, 3474), False, 'import requests\n'), ((5457, 5491), 'logging.exception', 'logging.exception', (['"""Got exception"""'], {}), "('Got exception')\n", (5474, 5491), False, 'import logging\n')] |
# Generated by Django 3.1.6 on 2021-02-15 19:01
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('website', '0083_remove_aisubmission_code'),
]
operations = [
migrations.AddField(
model_name='exam',
name='division',
field=models.IntegerField(default=1),
preserve_default=False,
),
migrations.CreateModel(
name='ExamPair',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=100, unique=True)),
('contest', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='exampairs', to='website.contest')),
],
),
migrations.AddField(
model_name='exam',
name='exampair',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='exams', to='website.exampair'),
),
]
| [
"django.db.models.ForeignKey",
"django.db.models.AutoField",
"django.db.models.CharField",
"django.db.models.IntegerField"
] | [((373, 403), 'django.db.models.IntegerField', 'models.IntegerField', ([], {'default': '(1)'}), '(default=1)\n', (392, 403), False, 'from django.db import migrations, models\n'), ((996, 1132), 'django.db.models.ForeignKey', 'models.ForeignKey', ([], {'blank': '(True)', 'null': '(True)', 'on_delete': 'django.db.models.deletion.SET_NULL', 'related_name': '"""exams"""', 'to': '"""website.exampair"""'}), "(blank=True, null=True, on_delete=django.db.models.\n deletion.SET_NULL, related_name='exams', to='website.exampair')\n", (1013, 1132), False, 'from django.db import migrations, models\n'), ((557, 650), 'django.db.models.AutoField', 'models.AutoField', ([], {'auto_created': '(True)', 'primary_key': '(True)', 'serialize': '(False)', 'verbose_name': '"""ID"""'}), "(auto_created=True, primary_key=True, serialize=False,\n verbose_name='ID')\n", (573, 650), False, 'from django.db import migrations, models\n'), ((674, 719), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(100)', 'unique': '(True)'}), '(max_length=100, unique=True)\n', (690, 719), False, 'from django.db import migrations, models\n'), ((750, 865), 'django.db.models.ForeignKey', 'models.ForeignKey', ([], {'on_delete': 'django.db.models.deletion.CASCADE', 'related_name': '"""exampairs"""', 'to': '"""website.contest"""'}), "(on_delete=django.db.models.deletion.CASCADE, related_name\n ='exampairs', to='website.contest')\n", (767, 865), False, 'from django.db import migrations, models\n')] |
"""Core experiments for the dependency label prediction task."""
import collections
import copy
import logging
from typing import (Any, Dict, Iterator, Optional, Sequence, Set, Tuple, Type,
Union)
from ldp import datasets, learning
from ldp.models import probes, projections
from ldp.parse import ptb
from ldp.parse import representations as reps
from ldp.utils.typing import Device
import numpy
import torch
import wandb
UNK = 'unk'
class DLPIndexer:
"""Map pairs of words to their syntactic relationship, if any."""
def __init__(self, samples: Sequence[ptb.Sample], unk: str = UNK):
"""Map each relation label to an integer.
Args:
samples (Sequence[ptb.Sample]): The samples from which to determine
possible relations.
unk (str): Label to use when un-indexed dependency label is
encountered.
"""
labels = {rel for sample in samples for rel in sample.relations}
self.indexer = {unk: 0}
for label in sorted(labels):
self.indexer[label] = len(self.indexer)
self.unk = unk
def __call__(self, sample: ptb.Sample) -> torch.Tensor:
"""Map all possible (word, word) pairs to labels.
Args:
sample (ptb.Sample): The sample to label.
Returns:
torch.Tensor: For length W sentence, returns shape (W, W) matrix
where element (v, w) is the index of the label describing
the relationship between word v and w, if any. Defaults to
the "unk" label, even if there is no relationship between
v and w.
"""
heads, relations = sample.heads, sample.relations
labels = torch.empty(len(heads), len(heads), dtype=torch.long)
labels.fill_(self.indexer[self.unk])
for word, (head, rel) in enumerate(zip(heads, relations)):
if head == -1:
labels[word, word] = self.indexer[rel]
else:
label = self.indexer.get(rel, self.indexer[self.unk])
labels[word, head] = label
return labels
def __len__(self) -> int:
"""Return the number of unique labels for this task."""
return len(self.indexer)
class ControlDLPIndexer:
"""Map pairs of words to arbitrary syntactic relationships."""
def __init__(self,
samples: Sequence[ptb.Sample],
dist: Optional[Union[numpy.ndarray, Sequence[float]]] = None):
"""Map each relation label to an arbitrary (integer) label.
We only do this for pairs of words which have a head-dependent
relationship in the original dataset.
Args:
samples (Sequence[ptb.Samples]): The samples from which to pull
possible word pairs.
dist (Optional[Union[numpy.ndarray, Sequence[float]]], optional): A
distribution to use when sampling tags per word type.
By default, is computed from the list of samples.
"""
if dist is None:
counts: Dict[str, int] = collections.defaultdict(lambda: 0)
for sample in samples:
for relation in sample.relations:
counts[relation] += 1
dist = numpy.array([float(count) for count in counts.values()])
dist /= numpy.sum(dist)
assert dist is not None, 'uninitialized distribution?'
self.dist = dist
self.rels: Dict[Tuple[str, str], int] = {}
for sample in samples:
sentence = sample.sentence
heads = sample.heads
for dep, head in enumerate(heads):
if head == -1:
head = dep
words = (sentence[dep], sentence[head])
if words not in self.rels:
# Add one so that 0 is reserved for "no relationship" tag.
rel = numpy.random.choice(len(dist), p=dist) + 1
self.rels[words] = rel
def __call__(self, sample: ptb.Sample) -> torch.Tensor:
"""Map all possible (word, word) pairs to labels.
Args:
sample (ptb.Sample): The sample to label.
Returns:
torch.Tensor: For length W sentence, returns shape (W, W) matrix
where element (v, w) is the index of the label describing
the relationship between word v and w, if any. Defaults to
the "unk" label, even if there is no relationship between
v and w.
"""
heads = sample.heads
labels = torch.zeros(len(heads), len(heads), dtype=torch.long)
for dep, head in enumerate(heads):
if head == -1:
head = dep
words = (sample.sentence[dep], sample.sentence[head])
labels[dep, head] = self.rels.get(words, 0)
return labels
def __len__(self) -> int:
"""Return the number of relationships, including the null one."""
return len(self.dist) + 1
class DLPTaskDataset(datasets.TaskDataset):
"""Iterate over (word representation pair, dependency label) pairs."""
def __init__(
self,
representations: reps.RepresentationLayerDataset,
annotations: Sequence[ptb.Sample],
indexer: Type[Union[DLPIndexer, ControlDLPIndexer]] = DLPIndexer,
**kwargs: Any,
):
"""Initialize dataset by mapping each dependency label to an index.
The kwargs are forwarded to indexer when it is instantiated.
Args:
representations (representations.RepresentationsLayerDataset): Word
representations corresponding to the words to be paired and
labeled.
annotations (Sequence[ptb.PTBSample]): The PTB annotations from
which to pull dependency labels.
indexer (Union[DLPIndexer, ControlDLPIndexer]): Type of the indexer
to use for mapping PTB dependency label annotations to integer
tensors. Instantiated with given annotations unless the
samples keyword is set in kwargs.
Raises:
ValueError: If number of representations/annotations do not match.
"""
if len(representations) != len(annotations):
raise ValueError(f'got {len(representations)} representations '
f'but {len(annotations)} annotations')
self.representations = representations
self.annotations = annotations
kwargs = kwargs.copy()
kwargs.setdefault('samples', annotations)
self.indexer = indexer(**kwargs)
def __getitem__(self, index: int) -> Tuple[torch.Tensor, torch.Tensor]:
"""Return (representations, integral POS tags) for index'th sentence.
Args:
index (int): Index of the sentence in the dataset.
Returns:
Tuple[torch.Tensor, torch.Tensor]: First tensor is shape
(sentence_length, representation_dimension) containing word
representations, and second is shape (sentence_length,)
containing integral POS tags.
"""
representations = self.representations[index]
annotations = self.annotations[index]
assert len(representations) == len(
annotations.sentence), 'diff sentence lengths?'
rels = self.indexer(annotations)
# Find all pairs of words sharing an edge.
indexes = set(range(len(representations)))
pairs = [(i, j) for i in indexes for j in indexes if rels[i, j]]
assert pairs and len(pairs) == len(representations), 'missing edges?'
# Stack everything before returning it.
bigrams = torch.stack([
torch.stack((representations[i], representations[j]))
for i, j in pairs
])
labels = torch.stack([rels[i, j] for i, j in pairs])
return bigrams, labels
def __iter__(self) -> Iterator[Tuple[torch.Tensor, torch.Tensor]]:
"""Yield all (sentence representations, sentence POS tags) samples."""
for index in range(len(self)):
yield self[index]
def __len__(self) -> int:
"""Return the number of sentences (batches) in the dataset."""
return len(self.annotations)
@property
def sample_representations_shape(self) -> Sequence[int]:
"""Return the dimensionality of the representation pairs."""
return (2, self.representations.dataset.dimension)
@property
def sample_features_shape(self) -> Sequence[int]:
"""Return the shape of each individual POS tag.
Since POS tags are integral scalars, there is no such shape!
"""
return ()
def count_samples(self) -> int:
"""Return the number of words in the dataset."""
return sum(
self.representations.dataset.length(index)
for index in range(len(self.representations)))
def count_unique_features(self) -> int:
"""Return number of unique POS seen in data."""
return len(self.indexer)
# Define the valid probe types for this task.
Probe = Union[probes.Linear, probes.MLP]
def train(train_dataset: datasets.TaskDataset,
dev_dataset: datasets.TaskDataset,
test_dataset: datasets.TaskDataset,
probe_t: Type[Probe] = probes.Linear,
project_to: Optional[int] = None,
share_projection: bool = False,
epochs: int = 25,
patience: int = 4,
lr: float = 1e-3,
device: Optional[Device] = None,
also_log_to_wandb: bool = False) -> Tuple[Probe, float]:
"""Train a probe on dependency label prediction.
Args:
train_dataset (TaskDataset): Training data for probe.
dev_dataset (TaskDataset): Validation data for probe, used for early
stopping.
test_dataset (TaskDataset): Test data for probe, used to compute
final accuracy after training.
probe_t (Type[Probe], optional): Probe type to train.
Defaults to probes.Linear.
project_to (Optional[int], optional): Project representations to this
dimensionality. Defaults to no projection.
share_projection (bool): If set, project the left and right components
of pairwise probes with the same projection. E.g. if the probe is
bilinear of the form xAy, we will always compute (Px)A(Py) as
opposed to (Px)A(Qy) for distinct projections P, Q. Defaults to NOT
shared.
epochs (int, optional): Maximum passes through the training dataset.
Defaults to 25.
patience (int, optional): Allow dev loss to not improve for this many
epochs, then stop training. Defaults to 4.
lr (float, optional): Learning rate for optimizer. Defaults to 1e-3.
device (Optional[Device], optional): Torch device on which to
train probe. Defaults to CPU.
also_log_to_wandb (Optional[pathlib.Path], optional): If set, log
training data to wandb. By default, wandb is not used.
Returns:
Tuple[Probe, float]: The trained probe and its test accuracy.
"""
log = logging.getLogger(__name__)
device = device or 'cpu'
ndims = train_dataset.sample_representations_shape[-1]
log.info('representations have dimension %d', ndims)
ntags = train_dataset.count_unique_features()
assert ntags is not None, 'no label count, is dataset for different task?'
log.info('dependency labeling task has %d tags', ntags)
if project_to is None or ndims == project_to:
logging.info('projection dim = reps dim, not projecting')
projection = None
elif share_projection:
projection = projections.Projection(ndims, project_to)
else:
projection = projections.Projection(2 * ndims, 2 * project_to)
probe = probe_t(2 * (project_to or ndims), ntags, project=projection)
learning.train(probe,
train_dataset,
dev_dataset=dev_dataset,
stopper=learning.EarlyStopping(patience=patience),
epochs=epochs,
lr=lr,
device=device,
also_log_to_wandb=also_log_to_wandb)
accuracy = learning.test(probe, test_dataset, device=device)
return probe, accuracy
# TODO(evandez): May as well commonize this, since it's shared with POS.
def axis_alignment(
probe: Probe,
dev_dataset: datasets.TaskDataset,
test_dataset: datasets.TaskDataset,
device: Optional[Device] = None,
also_log_to_wandb: bool = False) -> Sequence[Tuple[int, float]]:
"""Measure whether the given probe is axis aligned.
Args:
probe (Probe): The probe to evaluate.
dev_dataset (datasets.TaskDataset): Data used to determine which axes
to cut.
test_dataset (datasets.TaskDataset): Data used to determine the effect
of cutting an axis.
device (Optional[Device], optional): Torch device on which to
train probe. Defaults to CPU.
also_log_to_wandb (bool, optional): If set, log results to wandb.
Returns:
Sequence[Tuple[int, float]]: The ablated axes paired with optimal probe
accuracy after that axis is zeroed.
"""
log = logging.getLogger(__name__)
projection = probe.project
assert projection is not None, 'no projection?'
axes = set(range(projection.project.in_features))
ablated: Set[int] = set()
accuracies = []
while axes:
best_model, best_axis, best_accuracy = probe, -1, -1.
for axis in axes:
model = copy.deepcopy(best_model).eval()
assert model.project is not None, 'no projection?'
model.project.project.weight.data[:, sorted(ablated | {axis})] = 0
accuracy = learning.test(model, dev_dataset, device=device)
if accuracy > best_accuracy:
best_model = model
best_axis = axis
best_accuracy = accuracy
accuracy = learning.test(best_model, test_dataset, device=device)
log.info('ablating axis %d, test accuracy %f', best_axis, accuracy)
if also_log_to_wandb:
wandb.log({
'axis': best_axis,
'dev accuracy': best_accuracy,
'test accuracy': accuracy,
})
axes.remove(best_axis)
ablated.add(best_axis)
accuracies.append((best_axis, accuracy))
return tuple(accuracies)
| [
"logging.getLogger",
"wandb.log",
"torch.stack",
"ldp.learning.EarlyStopping",
"numpy.sum",
"collections.defaultdict",
"copy.deepcopy",
"ldp.models.projections.Projection",
"logging.info",
"ldp.learning.test"
] | [((11285, 11312), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (11302, 11312), False, 'import logging\n'), ((12378, 12427), 'ldp.learning.test', 'learning.test', (['probe', 'test_dataset'], {'device': 'device'}), '(probe, test_dataset, device=device)\n', (12391, 12427), False, 'from ldp import datasets, learning\n'), ((13442, 13469), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (13459, 13469), False, 'import logging\n'), ((7924, 7967), 'torch.stack', 'torch.stack', (['[rels[i, j] for i, j in pairs]'], {}), '([rels[i, j] for i, j in pairs])\n', (7935, 7967), False, 'import torch\n'), ((11709, 11766), 'logging.info', 'logging.info', (['"""projection dim = reps dim, not projecting"""'], {}), "('projection dim = reps dim, not projecting')\n", (11721, 11766), False, 'import logging\n'), ((14199, 14253), 'ldp.learning.test', 'learning.test', (['best_model', 'test_dataset'], {'device': 'device'}), '(best_model, test_dataset, device=device)\n', (14212, 14253), False, 'from ldp import datasets, learning\n'), ((3130, 3165), 'collections.defaultdict', 'collections.defaultdict', (['(lambda : 0)'], {}), '(lambda : 0)\n', (3153, 3165), False, 'import collections\n'), ((3388, 3403), 'numpy.sum', 'numpy.sum', (['dist'], {}), '(dist)\n', (3397, 3403), False, 'import numpy\n'), ((11841, 11882), 'ldp.models.projections.Projection', 'projections.Projection', (['ndims', 'project_to'], {}), '(ndims, project_to)\n', (11863, 11882), False, 'from ldp.models import probes, projections\n'), ((11914, 11963), 'ldp.models.projections.Projection', 'projections.Projection', (['(2 * ndims)', '(2 * project_to)'], {}), '(2 * ndims, 2 * project_to)\n', (11936, 11963), False, 'from ldp.models import probes, projections\n'), ((12170, 12211), 'ldp.learning.EarlyStopping', 'learning.EarlyStopping', ([], {'patience': 'patience'}), '(patience=patience)\n', (12192, 12211), False, 'from ldp import datasets, learning\n'), ((13981, 14029), 'ldp.learning.test', 'learning.test', (['model', 'dev_dataset'], {'device': 'device'}), '(model, dev_dataset, device=device)\n', (13994, 14029), False, 'from ldp import datasets, learning\n'), ((14373, 14465), 'wandb.log', 'wandb.log', (["{'axis': best_axis, 'dev accuracy': best_accuracy, 'test accuracy': accuracy}"], {}), "({'axis': best_axis, 'dev accuracy': best_accuracy,\n 'test accuracy': accuracy})\n", (14382, 14465), False, 'import wandb\n'), ((7812, 7865), 'torch.stack', 'torch.stack', (['(representations[i], representations[j])'], {}), '((representations[i], representations[j]))\n', (7823, 7865), False, 'import torch\n'), ((13783, 13808), 'copy.deepcopy', 'copy.deepcopy', (['best_model'], {}), '(best_model)\n', (13796, 13808), False, 'import copy\n')] |
import os
import sentry_sdk
from sentry_sdk.integrations.django import DjangoIntegration
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
PWA_SERVICE_WORKER_PATH = os.path.join(
BASE_DIR, 'routes/static/routes/js', 'serviceworker.js')
print(os.path.join(
BASE_DIR, 'routes/static/routes/js', 'serviceworker.js'))
DEBUG = int(os.environ.get("DEBUG", default=0))
SECRET_KEY = os.environ.get("SECRET_KEY", '<KEY>')
# 'DJANGO_ALLOWED_HOSTS' should be a single string of hosts with a space between each.
# For example: 'DJANGO_ALLOWED_HOSTS=localhost 127.0.0.1 [::1]'
ALLOWED_HOSTS = os.environ.get("DJANGO_ALLOWED_HOSTS", 'localhost').split(" ")
# Application definition
INSTALLED_APPS = [
'routes',
'accounts',
'dashboard.apps.DashboardConfig',
'api.apps.ApiConfig',
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'widget_tweaks',
'rest_framework',
'pwa',
]
# 'celery',
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'tracks.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'tracks.wsgi.application'
# Database
# https://docs.djangoproject.com/en/2.2/ref/settings/#databases
DATABASES = {
"default": {
"ENGINE": os.environ.get("SQL_ENGINE", "django.db.backends.sqlite3"),
"NAME": os.environ.get("SQL_DATABASE", os.path.join(BASE_DIR, "db.sqlite3")),
"USER": os.environ.get("SQL_USER", "user"),
"PASSWORD": os.environ.get("SQL_PASSWORD", "password"),
"HOST": os.environ.get("SQL_HOST", "localhost"),
"PORT": os.environ.get("SQL_PORT", "5432"),
}
}
# Password validation
# https://docs.djangoproject.com/en/2.2/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/2.2/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.2/howto/static-files/
STATIC_URL = '/static/'
MEDIA_URL = '/media/'
STATIC_ROOT = './static/'
MEDIA_ROOT = './media/'
LOGIN_REDIRECT_URL = 'home'
LOGOUT_REDIRECT_URL = 'home'
# no email for localhost or staging
EMAIL_USE_TLS = os.environ.get("EMAIL_USE_TLS")
EMAIL_HOST = os.environ.get("EMAIL_HOST")
EMAIL_HOST_USER = os.environ.get("EMAIL_HOST_USER")
EMAIL_HOST_PASSWORD = os.environ.get("EMAIL_HOST_PASSWORD")
EMAIL_PORT = os.environ.get("EMAIL_PORT")
EMAIL_BACKEND = os.environ.get("EMAIL_BACKEND")
DEFAULT_FROM_EMAIL = '<EMAIL>'
# CELERY
# CELERY_BROKER_URL = 'redis://redis:6379/0'
# CELERY_RESULT_BACKEND = 'redis://redis:6379/0'
# BROKER_URL = 'redis://localhost:6379/0'
# CELERY_RESULT_BACKEND = 'redis://localhost:6379/'
# CELERY_ACCEPT_CONTENT = ['application/json']
# CELERY_TASK_SERIALIZER = 'json'
# CELERY_RESULT_SERIALIZER = 'json'
REST_FRAMEWORK = {
# Use Django's standard `django.contrib.auth` permissions,
# or allow read-only access for unauthenticated users.
'DEFAULT_PERMISSION_CLASSES': [
'rest_framework.permissions.DjangoModelPermissionsOrAnonReadOnly'
],
'DEFAULT_AUTHENTICATION_CLASSES': (
'rest_framework.authentication.TokenAuthentication',
'rest_framework.authentication.SessionAuthentication',
),
'DEFAULT_PAGINATION_CLASS': 'rest_framework.pagination.LimitOffsetPagination',
'PAGE_SIZE': 10
}
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'formatters': {
'console': {
'format': '%(levelname)s %(asctime)s %(module)s: %(message)s'
},
},
'handlers': {
'console': {
'class': 'logging.StreamHandler',
'formatter': 'console'
},
},
'loggers': {
'': {
'handlers': ['console'],
'level': os.getenv('DJANGO_LOG_LEVEL', 'INFO'),
},
'django': {
'handlers': ['console'],
'level': os.getenv('DJANGO_LOG_LEVEL', 'INFO'),
},
'django.request': {
'level': 'INFO',
'handlers': ['console']
}
# 'celery': {
# 'handlers': ['console'],
# 'level': os.getenv('DJANGO_LOG_LEVEL', 'INFO'),
# },
},
}
# STATICFILES_DIRS = [
# os.path.join(BASE_DIR, 'static'),
# ]
PWA_APP_NAME = 'ChalkTracks'
PWA_APP_DESCRIPTION = "Indoor Climbing Tracker"
PWA_APP_THEME_COLOR = '#000000'
PWA_APP_BACKGROUND_COLOR = '#000000'
PWA_APP_DISPLAY = 'standalone'
PWA_APP_SCOPE = '/'
PWA_APP_ORIENTATION = 'portrait'
PWA_APP_START_URL = '/'
PWA_APP_ICONS = [
{
'src': '/static/routes/favicon_io/favicon-32x32.png',
'sizes': '32x32',
"type": "image/png",
"purpose": "any maskable"
}, {
"src": "/static/routes/favicon_io/android-chrome-192x192.png",
"sizes": "192x192",
"type": "image/png",
"purpose": "any maskable"
}, {
"src": "/static/routes/favicon_io/android-chrome-512x512.png",
"sizes": "512x512",
"type": "image/png",
"purpose": "any maskable"
}
]
PWA_APP_DIR = 'ltr'
PWA_APP_LANG = 'en-US'
sentry_sdk.init(
dsn="https://09ce3488b18c4db19820b873eecc30c4@sentry.io/1878812",
integrations=[DjangoIntegration()],
# If you wish to associate users to errors (assuming you are using
# django.contrib.auth) you may enable sending PII data.
send_default_pii=True
)
| [
"os.getenv",
"sentry_sdk.integrations.django.DjangoIntegration",
"os.path.join",
"os.environ.get",
"os.path.abspath"
] | [((260, 329), 'os.path.join', 'os.path.join', (['BASE_DIR', '"""routes/static/routes/js"""', '"""serviceworker.js"""'], {}), "(BASE_DIR, 'routes/static/routes/js', 'serviceworker.js')\n", (272, 329), False, 'import os\n'), ((480, 517), 'os.environ.get', 'os.environ.get', (['"""SECRET_KEY"""', '"""<KEY>"""'], {}), "('SECRET_KEY', '<KEY>')\n", (494, 517), False, 'import os\n'), ((3635, 3666), 'os.environ.get', 'os.environ.get', (['"""EMAIL_USE_TLS"""'], {}), "('EMAIL_USE_TLS')\n", (3649, 3666), False, 'import os\n'), ((3680, 3708), 'os.environ.get', 'os.environ.get', (['"""EMAIL_HOST"""'], {}), "('EMAIL_HOST')\n", (3694, 3708), False, 'import os\n'), ((3727, 3760), 'os.environ.get', 'os.environ.get', (['"""EMAIL_HOST_USER"""'], {}), "('EMAIL_HOST_USER')\n", (3741, 3760), False, 'import os\n'), ((3783, 3820), 'os.environ.get', 'os.environ.get', (['"""EMAIL_HOST_PASSWORD"""'], {}), "('EMAIL_HOST_PASSWORD')\n", (3797, 3820), False, 'import os\n'), ((3834, 3862), 'os.environ.get', 'os.environ.get', (['"""EMAIL_PORT"""'], {}), "('EMAIL_PORT')\n", (3848, 3862), False, 'import os\n'), ((3879, 3910), 'os.environ.get', 'os.environ.get', (['"""EMAIL_BACKEND"""'], {}), "('EMAIL_BACKEND')\n", (3893, 3910), False, 'import os\n'), ((341, 410), 'os.path.join', 'os.path.join', (['BASE_DIR', '"""routes/static/routes/js"""', '"""serviceworker.js"""'], {}), "(BASE_DIR, 'routes/static/routes/js', 'serviceworker.js')\n", (353, 410), False, 'import os\n'), ((430, 464), 'os.environ.get', 'os.environ.get', (['"""DEBUG"""'], {'default': '(0)'}), "('DEBUG', default=0)\n", (444, 464), False, 'import os\n'), ((205, 230), 'os.path.abspath', 'os.path.abspath', (['__file__'], {}), '(__file__)\n', (220, 230), False, 'import os\n'), ((686, 737), 'os.environ.get', 'os.environ.get', (['"""DJANGO_ALLOWED_HOSTS"""', '"""localhost"""'], {}), "('DJANGO_ALLOWED_HOSTS', 'localhost')\n", (700, 737), False, 'import os\n'), ((2246, 2304), 'os.environ.get', 'os.environ.get', (['"""SQL_ENGINE"""', '"""django.db.backends.sqlite3"""'], {}), "('SQL_ENGINE', 'django.db.backends.sqlite3')\n", (2260, 2304), False, 'import os\n'), ((2408, 2442), 'os.environ.get', 'os.environ.get', (['"""SQL_USER"""', '"""user"""'], {}), "('SQL_USER', 'user')\n", (2422, 2442), False, 'import os\n'), ((2464, 2506), 'os.environ.get', 'os.environ.get', (['"""SQL_PASSWORD"""', '"""password"""'], {}), "('SQL_PASSWORD', 'password')\n", (2478, 2506), False, 'import os\n'), ((2524, 2563), 'os.environ.get', 'os.environ.get', (['"""SQL_HOST"""', '"""localhost"""'], {}), "('SQL_HOST', 'localhost')\n", (2538, 2563), False, 'import os\n'), ((2581, 2615), 'os.environ.get', 'os.environ.get', (['"""SQL_PORT"""', '"""5432"""'], {}), "('SQL_PORT', '5432')\n", (2595, 2615), False, 'import os\n'), ((2353, 2389), 'os.path.join', 'os.path.join', (['BASE_DIR', '"""db.sqlite3"""'], {}), "(BASE_DIR, 'db.sqlite3')\n", (2365, 2389), False, 'import os\n'), ((5222, 5259), 'os.getenv', 'os.getenv', (['"""DJANGO_LOG_LEVEL"""', '"""INFO"""'], {}), "('DJANGO_LOG_LEVEL', 'INFO')\n", (5231, 5259), False, 'import os\n'), ((5350, 5387), 'os.getenv', 'os.getenv', (['"""DJANGO_LOG_LEVEL"""', '"""INFO"""'], {}), "('DJANGO_LOG_LEVEL', 'INFO')\n", (5359, 5387), False, 'import os\n'), ((6647, 6666), 'sentry_sdk.integrations.django.DjangoIntegration', 'DjangoIntegration', ([], {}), '()\n', (6664, 6666), False, 'from sentry_sdk.integrations.django import DjangoIntegration\n')] |
'''
* @author Waldinsamkeit
* @email <EMAIL>
* @create date 2020-09-25 14:33:38
* @desc
'''
import torch
'''--------------------- Weighted Binary cross Entropy ----------------------'''
'''
In Torch BCELoss, weight is set to every element of input instead of to every class
'''
def weighted_binary_cross_entropy(output, target, weights=None):
if weights is not None:
assert len(weights) == 2
loss = weights[1] * (target * torch.log(output)) + \
weights[0] * ((1 - target) * torch.log(1 - output))
else:
loss = target * torch.log(output) + (1 - target) * torch.log(1 - output)
return torch.neg(torch.mean(loss))
''' ---------------------- Binary focal loss function -------------------------- '''
'''
In some degree, it can reduce the influence of imbalanced dataset
'''
def focal_loss(y_true,y_pred,device):
alpha,gamma = torch.tensor(0.25).to(device) , torch.tensor(2.0).to(device)
y_pred=torch.clamp(y_pred,1e-7,1-1e-7)
return - alpha * y_true * torch.log(y_pred) * (1 - y_pred) ** gamma\
- (1 - alpha) * (1 - y_true) * torch.log(1 - y_pred) * y_pred
| [
"torch.mean",
"torch.tensor",
"torch.log",
"torch.clamp"
] | [((969, 1006), 'torch.clamp', 'torch.clamp', (['y_pred', '(1e-07)', '(1 - 1e-07)'], {}), '(y_pred, 1e-07, 1 - 1e-07)\n', (980, 1006), False, 'import torch\n'), ((660, 676), 'torch.mean', 'torch.mean', (['loss'], {}), '(loss)\n', (670, 676), False, 'import torch\n'), ((581, 598), 'torch.log', 'torch.log', (['output'], {}), '(output)\n', (590, 598), False, 'import torch\n'), ((616, 637), 'torch.log', 'torch.log', (['(1 - output)'], {}), '(1 - output)\n', (625, 637), False, 'import torch\n'), ((897, 915), 'torch.tensor', 'torch.tensor', (['(0.25)'], {}), '(0.25)\n', (909, 915), False, 'import torch\n'), ((929, 946), 'torch.tensor', 'torch.tensor', (['(2.0)'], {}), '(2.0)\n', (941, 946), False, 'import torch\n'), ((1031, 1048), 'torch.log', 'torch.log', (['y_pred'], {}), '(y_pred)\n', (1040, 1048), False, 'import torch\n'), ((1113, 1134), 'torch.log', 'torch.log', (['(1 - y_pred)'], {}), '(1 - y_pred)\n', (1122, 1134), False, 'import torch\n'), ((460, 477), 'torch.log', 'torch.log', (['output'], {}), '(output)\n', (469, 477), False, 'import torch\n'), ((524, 545), 'torch.log', 'torch.log', (['(1 - output)'], {}), '(1 - output)\n', (533, 545), False, 'import torch\n')] |
from typing import *
import attr
from dlms_cosem.hdlc import validators
@attr.s(auto_attribs=True)
class HdlcAddress:
"""
A client address shall always be expressed on one byte.
To enable addressing more than one logical device within a single physical device
and to support the multi-drop configuration the server address may be divided in
two parts– may be divided into two parts:
The logical address to address a logical device (separate addressable entity
within a physical device) makes up the upper HDLC address
The logical address must always be present.
The physical address is used to address a physical device ( a physical device on
a multi-drop)
The physical address can be omitted it not used.
"""
logical_address: int = attr.ib(validator=[validators.validate_hdlc_address])
physical_address: Optional[int] = attr.ib(
default=None, validator=[validators.validate_hdlc_address]
)
address_type: str = attr.ib(
default="client", validator=[validators.validate_hdlc_address_type]
)
@property
def length(self):
"""
The number of bytes the address makes up.
:return:
"""
return len(self.to_bytes())
def to_bytes(self):
out: List[Optional[int]] = list()
if self.address_type == "client":
# shift left 1 bit and set the lsb to mark end of address.
out.append(((self.logical_address << 1) | 0b00000001))
else:
# server address type
logical_higher, logical_lower = self._split_address(self.logical_address)
if self.physical_address:
physical_higher, physical_lower = self._split_address(
self.physical_address
)
# mark physical lower as end
physical_lower = physical_lower | 0b00000001
out.extend(
[logical_higher, logical_lower, physical_higher, physical_lower]
)
else:
# no physical address so mark the logial as end.
logical_lower = logical_lower | 0b00000001
out.extend([logical_higher, logical_lower])
out_bytes = list()
for address in out:
if address:
out_bytes.append(address.to_bytes(1, "big"))
return b"".join(out_bytes)
@staticmethod
def _split_address(address: int) -> Tuple[Optional[int], int]:
higher: Optional[int]
lower: int
if address > 0b01111111:
lower = (address & 0b0000000001111111) << 1
higher = (address & 0b0011111110000000) >> 6
else:
lower = address << 1
higher = None
return higher, lower
@staticmethod
def _address_to_byte(address: int) -> bytes:
return address.to_bytes(1, "big")
@classmethod
def destination_from_bytes(cls, frame_bytes: bytes, address_type: str):
destination_address_data, _ = HdlcAddress.find_address_in_frame_bytes(
frame_bytes
)
(
destination_logical,
destination_physical,
destination_length,
) = destination_address_data
return cls(destination_logical, destination_physical, address_type)
@classmethod
def source_from_bytes(cls, frame_bytes: bytes, address_type: str):
_, source_address_data = HdlcAddress.find_address_in_frame_bytes(frame_bytes)
source_logical, source_physical, source_length = source_address_data
return cls(source_logical, source_physical, address_type)
@staticmethod
def find_address_in_frame_bytes(
hdlc_frame_bytes: bytes,
) -> Tuple[Tuple[int, Optional[int], int], Tuple[int, Optional[int], int]]:
"""
address can be 1, 2 or 4 bytes long. the end byte is indicated by the of
the last byte LSB being 1
The first address is the destination address and the seconds is the
source address.
:param frame_bytes:
:return:
"""
# Find destination address.
destination_length: int = 1
destination_logical: int = 0
destination_physical: Optional[int] = 0
destination_positions_list: List[Tuple[int, int]] = [(3, 1), (4, 2), (6, 4)]
address_bytes: bytes
for pos, _length in destination_positions_list:
end_byte = hdlc_frame_bytes[pos]
if bool(end_byte & 0b00000001):
# Found end byte:
destination_length = _length
break
continue
if destination_length == 1:
address_bytes = hdlc_frame_bytes[3].to_bytes(1, "big")
destination_logical = address_bytes[0] >> 1
destination_physical = None
elif destination_length == 2:
address_bytes = hdlc_frame_bytes[3:5]
destination_logical = address_bytes[0] >> 1
destination_physical = address_bytes[1] >> 1
elif destination_length == 4:
address_bytes = hdlc_frame_bytes[3:7]
destination_logical = HdlcAddress.parse_two_byte_address(address_bytes[:2])
destination_physical = HdlcAddress.parse_two_byte_address(address_bytes[3:])
# Find source address
source_length: int = 1
source_logical: int = 0
source_physical: Optional[int] = 0
source_position_list: List[Tuple[int, int]] = [
(item[0] + destination_length, item[1])
for item in destination_positions_list
]
for pos, _length in source_position_list:
end_byte = hdlc_frame_bytes[pos]
if bool(end_byte & 0b00000001):
# Found end byte:
source_length = _length
break
continue
if source_length == 1:
address_bytes = hdlc_frame_bytes[3 + destination_length].to_bytes(1, "big")
source_logical = address_bytes[0] >> 1
source_physical = None
elif source_length == 2:
address_bytes = hdlc_frame_bytes[3 + destination_length : 5 + source_length]
source_logical = address_bytes[0] >> 1
source_physical = address_bytes[1] >> 1
elif destination_length == 4:
address_bytes = hdlc_frame_bytes[3 + destination_length : 7 + source_length]
source_logical = HdlcAddress.parse_two_byte_address(address_bytes[:2])
source_physical = HdlcAddress.parse_two_byte_address(address_bytes[3:])
return (
(destination_logical, destination_physical, destination_length),
(source_logical, source_physical, source_length),
)
@staticmethod
def parse_two_byte_address(address_bytes: bytes):
if address_bytes != 2:
raise ValueError(f"Can only parse 2 bytes for address")
upper = address_bytes[0] >> 1
lower = address_bytes[1] >> 1
return lower + (upper << 7)
| [
"attr.s",
"attr.ib"
] | [((77, 102), 'attr.s', 'attr.s', ([], {'auto_attribs': '(True)'}), '(auto_attribs=True)\n', (83, 102), False, 'import attr\n'), ((790, 843), 'attr.ib', 'attr.ib', ([], {'validator': '[validators.validate_hdlc_address]'}), '(validator=[validators.validate_hdlc_address])\n', (797, 843), False, 'import attr\n'), ((882, 949), 'attr.ib', 'attr.ib', ([], {'default': 'None', 'validator': '[validators.validate_hdlc_address]'}), '(default=None, validator=[validators.validate_hdlc_address])\n', (889, 949), False, 'import attr\n'), ((988, 1064), 'attr.ib', 'attr.ib', ([], {'default': '"""client"""', 'validator': '[validators.validate_hdlc_address_type]'}), "(default='client', validator=[validators.validate_hdlc_address_type])\n", (995, 1064), False, 'import attr\n')] |
from __future__ import division, absolute_import, print_function
import warnings
import numpy as np
try:
import scipy.stats as stats
except ImportError:
pass
from .common import Benchmark
class Anderson_KSamp(Benchmark):
def setup(self, *args):
self.rand = [np.random.normal(loc=i, size=1000) for i in range(3)]
def time_anderson_ksamp(self):
with warnings.catch_warnings():
warnings.simplefilter('ignore', UserWarning)
stats.anderson_ksamp(self.rand)
class CorrelationFunctions(Benchmark):
param_names = ['alternative']
params = [
['two-sided', 'less', 'greater']
]
def setup(self, mode):
a = np.random.rand(2,2) * 10
self.a = a
def time_fisher_exact(self, alternative):
oddsratio, pvalue = stats.fisher_exact(self.a, alternative=alternative)
class InferentialStats(Benchmark):
def setup(self):
np.random.seed(12345678)
self.a = stats.norm.rvs(loc=5, scale=10, size=500)
self.b = stats.norm.rvs(loc=8, scale=10, size=20)
self.c = stats.norm.rvs(loc=8, scale=20, size=20)
def time_ttest_ind_same_var(self):
# test different sized sample with variances
stats.ttest_ind(self.a, self.b)
stats.ttest_ind(self.a, self.b, equal_var=False)
def time_ttest_ind_diff_var(self):
# test different sized sample with different variances
stats.ttest_ind(self.a, self.c)
stats.ttest_ind(self.a, self.c, equal_var=False)
class Distribution(Benchmark):
param_names = ['distribution', 'properties']
params = [
['cauchy', 'gamma', 'beta'],
['pdf', 'cdf', 'rvs', 'fit']
]
def setup(self, distribution, properties):
np.random.seed(12345678)
self.x = np.random.rand(100)
def time_distribution(self, distribution, properties):
if distribution == 'gamma':
if properties == 'pdf':
stats.gamma.pdf(self.x, a=5, loc=4, scale=10)
elif properties == 'cdf':
stats.gamma.cdf(self.x, a=5, loc=4, scale=10)
elif properties == 'rvs':
stats.gamma.rvs(size=1000, a=5, loc=4, scale=10)
elif properties == 'fit':
stats.gamma.fit(self.x, loc=4, scale=10)
elif distribution == 'cauchy':
if properties == 'pdf':
stats.cauchy.pdf(self.x, loc=4, scale=10)
elif properties == 'cdf':
stats.cauchy.cdf(self.x, loc=4, scale=10)
elif properties == 'rvs':
stats.cauchy.rvs(size=1000, loc=4, scale=10)
elif properties == 'fit':
stats.cauchy.fit(self.x, loc=4, scale=10)
elif distribution == 'beta':
if properties == 'pdf':
stats.beta.pdf(self.x, a=5, b=3, loc=4, scale=10)
elif properties == 'cdf':
stats.beta.cdf(self.x, a=5, b=3, loc=4, scale=10)
elif properties == 'rvs':
stats.beta.rvs(size=1000, a=5, b=3, loc=4, scale=10)
elif properties == 'fit':
stats.beta.fit(self.x, loc=4, scale=10)
# Retain old benchmark results (remove this if changing the benchmark)
time_distribution.version = "fb22ae5386501008d945783921fe44aef3f82c1dafc40cddfaccaeec38b792b0"
class DescriptiveStats(Benchmark):
param_names = ['n_levels']
params = [
[10, 1000]
]
def setup(self, n_levels):
np.random.seed(12345678)
self.levels = np.random.randint(n_levels, size=(1000, 10))
def time_mode(self, n_levels):
stats.mode(self.levels, axis=0)
| [
"scipy.stats.beta.rvs",
"scipy.stats.gamma.rvs",
"numpy.random.rand",
"scipy.stats.norm.rvs",
"scipy.stats.ttest_ind",
"scipy.stats.gamma.pdf",
"scipy.stats.cauchy.fit",
"numpy.random.seed",
"scipy.stats.beta.fit",
"warnings.simplefilter",
"numpy.random.normal",
"scipy.stats.gamma.cdf",
"scipy.stats.fisher_exact",
"scipy.stats.gamma.fit",
"scipy.stats.beta.cdf",
"scipy.stats.cauchy.cdf",
"scipy.stats.cauchy.rvs",
"scipy.stats.anderson_ksamp",
"scipy.stats.cauchy.pdf",
"scipy.stats.mode",
"warnings.catch_warnings",
"numpy.random.randint",
"scipy.stats.beta.pdf"
] | [((813, 864), 'scipy.stats.fisher_exact', 'stats.fisher_exact', (['self.a'], {'alternative': 'alternative'}), '(self.a, alternative=alternative)\n', (831, 864), True, 'import scipy.stats as stats\n'), ((931, 955), 'numpy.random.seed', 'np.random.seed', (['(12345678)'], {}), '(12345678)\n', (945, 955), True, 'import numpy as np\n'), ((973, 1014), 'scipy.stats.norm.rvs', 'stats.norm.rvs', ([], {'loc': '(5)', 'scale': '(10)', 'size': '(500)'}), '(loc=5, scale=10, size=500)\n', (987, 1014), True, 'import scipy.stats as stats\n'), ((1032, 1072), 'scipy.stats.norm.rvs', 'stats.norm.rvs', ([], {'loc': '(8)', 'scale': '(10)', 'size': '(20)'}), '(loc=8, scale=10, size=20)\n', (1046, 1072), True, 'import scipy.stats as stats\n'), ((1090, 1130), 'scipy.stats.norm.rvs', 'stats.norm.rvs', ([], {'loc': '(8)', 'scale': '(20)', 'size': '(20)'}), '(loc=8, scale=20, size=20)\n', (1104, 1130), True, 'import scipy.stats as stats\n'), ((1232, 1263), 'scipy.stats.ttest_ind', 'stats.ttest_ind', (['self.a', 'self.b'], {}), '(self.a, self.b)\n', (1247, 1263), True, 'import scipy.stats as stats\n'), ((1272, 1320), 'scipy.stats.ttest_ind', 'stats.ttest_ind', (['self.a', 'self.b'], {'equal_var': '(False)'}), '(self.a, self.b, equal_var=False)\n', (1287, 1320), True, 'import scipy.stats as stats\n'), ((1432, 1463), 'scipy.stats.ttest_ind', 'stats.ttest_ind', (['self.a', 'self.c'], {}), '(self.a, self.c)\n', (1447, 1463), True, 'import scipy.stats as stats\n'), ((1472, 1520), 'scipy.stats.ttest_ind', 'stats.ttest_ind', (['self.a', 'self.c'], {'equal_var': '(False)'}), '(self.a, self.c, equal_var=False)\n', (1487, 1520), True, 'import scipy.stats as stats\n'), ((1762, 1786), 'numpy.random.seed', 'np.random.seed', (['(12345678)'], {}), '(12345678)\n', (1776, 1786), True, 'import numpy as np\n'), ((1804, 1823), 'numpy.random.rand', 'np.random.rand', (['(100)'], {}), '(100)\n', (1818, 1823), True, 'import numpy as np\n'), ((3507, 3531), 'numpy.random.seed', 'np.random.seed', (['(12345678)'], {}), '(12345678)\n', (3521, 3531), True, 'import numpy as np\n'), ((3554, 3598), 'numpy.random.randint', 'np.random.randint', (['n_levels'], {'size': '(1000, 10)'}), '(n_levels, size=(1000, 10))\n', (3571, 3598), True, 'import numpy as np\n'), ((3643, 3674), 'scipy.stats.mode', 'stats.mode', (['self.levels'], {'axis': '(0)'}), '(self.levels, axis=0)\n', (3653, 3674), True, 'import scipy.stats as stats\n'), ((283, 317), 'numpy.random.normal', 'np.random.normal', ([], {'loc': 'i', 'size': '(1000)'}), '(loc=i, size=1000)\n', (299, 317), True, 'import numpy as np\n'), ((386, 411), 'warnings.catch_warnings', 'warnings.catch_warnings', ([], {}), '()\n', (409, 411), False, 'import warnings\n'), ((425, 469), 'warnings.simplefilter', 'warnings.simplefilter', (['"""ignore"""', 'UserWarning'], {}), "('ignore', UserWarning)\n", (446, 469), False, 'import warnings\n'), ((482, 513), 'scipy.stats.anderson_ksamp', 'stats.anderson_ksamp', (['self.rand'], {}), '(self.rand)\n', (502, 513), True, 'import scipy.stats as stats\n'), ((691, 711), 'numpy.random.rand', 'np.random.rand', (['(2)', '(2)'], {}), '(2, 2)\n', (705, 711), True, 'import numpy as np\n'), ((1972, 2017), 'scipy.stats.gamma.pdf', 'stats.gamma.pdf', (['self.x'], {'a': '(5)', 'loc': '(4)', 'scale': '(10)'}), '(self.x, a=5, loc=4, scale=10)\n', (1987, 2017), True, 'import scipy.stats as stats\n'), ((2072, 2117), 'scipy.stats.gamma.cdf', 'stats.gamma.cdf', (['self.x'], {'a': '(5)', 'loc': '(4)', 'scale': '(10)'}), '(self.x, a=5, loc=4, scale=10)\n', (2087, 2117), True, 'import scipy.stats as stats\n'), ((2407, 2448), 'scipy.stats.cauchy.pdf', 'stats.cauchy.pdf', (['self.x'], {'loc': '(4)', 'scale': '(10)'}), '(self.x, loc=4, scale=10)\n', (2423, 2448), True, 'import scipy.stats as stats\n'), ((2172, 2220), 'scipy.stats.gamma.rvs', 'stats.gamma.rvs', ([], {'size': '(1000)', 'a': '(5)', 'loc': '(4)', 'scale': '(10)'}), '(size=1000, a=5, loc=4, scale=10)\n', (2187, 2220), True, 'import scipy.stats as stats\n'), ((2503, 2544), 'scipy.stats.cauchy.cdf', 'stats.cauchy.cdf', (['self.x'], {'loc': '(4)', 'scale': '(10)'}), '(self.x, loc=4, scale=10)\n', (2519, 2544), True, 'import scipy.stats as stats\n'), ((2829, 2878), 'scipy.stats.beta.pdf', 'stats.beta.pdf', (['self.x'], {'a': '(5)', 'b': '(3)', 'loc': '(4)', 'scale': '(10)'}), '(self.x, a=5, b=3, loc=4, scale=10)\n', (2843, 2878), True, 'import scipy.stats as stats\n'), ((2275, 2315), 'scipy.stats.gamma.fit', 'stats.gamma.fit', (['self.x'], {'loc': '(4)', 'scale': '(10)'}), '(self.x, loc=4, scale=10)\n', (2290, 2315), True, 'import scipy.stats as stats\n'), ((2599, 2643), 'scipy.stats.cauchy.rvs', 'stats.cauchy.rvs', ([], {'size': '(1000)', 'loc': '(4)', 'scale': '(10)'}), '(size=1000, loc=4, scale=10)\n', (2615, 2643), True, 'import scipy.stats as stats\n'), ((2933, 2982), 'scipy.stats.beta.cdf', 'stats.beta.cdf', (['self.x'], {'a': '(5)', 'b': '(3)', 'loc': '(4)', 'scale': '(10)'}), '(self.x, a=5, b=3, loc=4, scale=10)\n', (2947, 2982), True, 'import scipy.stats as stats\n'), ((2698, 2739), 'scipy.stats.cauchy.fit', 'stats.cauchy.fit', (['self.x'], {'loc': '(4)', 'scale': '(10)'}), '(self.x, loc=4, scale=10)\n', (2714, 2739), True, 'import scipy.stats as stats\n'), ((3037, 3089), 'scipy.stats.beta.rvs', 'stats.beta.rvs', ([], {'size': '(1000)', 'a': '(5)', 'b': '(3)', 'loc': '(4)', 'scale': '(10)'}), '(size=1000, a=5, b=3, loc=4, scale=10)\n', (3051, 3089), True, 'import scipy.stats as stats\n'), ((3144, 3183), 'scipy.stats.beta.fit', 'stats.beta.fit', (['self.x'], {'loc': '(4)', 'scale': '(10)'}), '(self.x, loc=4, scale=10)\n', (3158, 3183), True, 'import scipy.stats as stats\n')] |