code
stringlengths 22
1.05M
| apis
sequencelengths 1
3.31k
| extract_api
stringlengths 75
3.25M
|
---|---|---|
from mpl_toolkits.mplot3d import Axes3D
import matplotlib.pyplot as plt
from matplotlib import cm
import numpy as np
import os
import contorno
from constantes import INTERVALOS, PASSOS, TAMANHO_BARRA, DELTA_T, DELTA_X
z_temp = contorno.p_3
TAMANHO_BARRA = 2
x = np.linspace(0.0, TAMANHO_BARRA, INTERVALOS+1)
y = np.linspace(0.0, DELTA_T, PASSOS+1)
z = []
for k in range(PASSOS+1):
z_k = np.copy(z_temp)
z.append(z_k)
for i in range(1, INTERVALOS):
z_temp[i] = z_k[i] + (DELTA_T/(DELTA_X**2)) * (z_k[i+1]-2*z_k[i]+z_k[i-1])
z = np.asarray(z)
x, y = np.meshgrid(x, y)
fig = plt.figure()
ax = fig.gca(projection='3d')
surf = ax.plot_surface(x, y, z, cmap=cm.coolwarm, antialiased=False)
ax.set_xlabel('x')
ax.set_ylabel('t')
ax.set_zlabel('T(x,t)')
fig.colorbar(surf, shrink=0.5, aspect=5)
plt.show()
| [
"numpy.copy",
"numpy.asarray",
"numpy.linspace",
"matplotlib.pyplot.figure",
"numpy.meshgrid",
"matplotlib.pyplot.show"
] | [((264, 311), 'numpy.linspace', 'np.linspace', (['(0.0)', 'TAMANHO_BARRA', '(INTERVALOS + 1)'], {}), '(0.0, TAMANHO_BARRA, INTERVALOS + 1)\n', (275, 311), True, 'import numpy as np\n'), ((314, 351), 'numpy.linspace', 'np.linspace', (['(0.0)', 'DELTA_T', '(PASSOS + 1)'], {}), '(0.0, DELTA_T, PASSOS + 1)\n', (325, 351), True, 'import numpy as np\n'), ((551, 564), 'numpy.asarray', 'np.asarray', (['z'], {}), '(z)\n', (561, 564), True, 'import numpy as np\n'), ((572, 589), 'numpy.meshgrid', 'np.meshgrid', (['x', 'y'], {}), '(x, y)\n', (583, 589), True, 'import numpy as np\n'), ((597, 609), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (607, 609), True, 'import matplotlib.pyplot as plt\n'), ((815, 825), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (823, 825), True, 'import matplotlib.pyplot as plt\n'), ((394, 409), 'numpy.copy', 'np.copy', (['z_temp'], {}), '(z_temp)\n', (401, 409), True, 'import numpy as np\n')] |
import sys
from class_vis import prettyPicture
from prep_terrain_data import makeTerrainData
import matplotlib.pyplot as plt
import copy
import numpy as np
import pylab as pl
features_train, labels_train, features_test, labels_test = makeTerrainData()
########################## SVM #################################
### we handle the import statement and SVC creation for you here
from sklearn.svm import SVC
clf = SVC(kernel="linear")
#### now your job is to fit the classifier
#### using the training features/labels, and to
#### make a set of predictions on the test data
clf.fit(features_train,labels_train)
pred = clf.predict(features_test)
#### store your predictions in a list named pred
from sklearn.metrics import accuracy_score
acc = accuracy_score(pred, labels_test)
def submitAccuracy():
return acc
| [
"sklearn.metrics.accuracy_score",
"prep_terrain_data.makeTerrainData",
"sklearn.svm.SVC"
] | [((247, 264), 'prep_terrain_data.makeTerrainData', 'makeTerrainData', ([], {}), '()\n', (262, 264), False, 'from prep_terrain_data import makeTerrainData\n'), ((437, 457), 'sklearn.svm.SVC', 'SVC', ([], {'kernel': '"""linear"""'}), "(kernel='linear')\n", (440, 457), False, 'from sklearn.svm import SVC\n'), ((784, 817), 'sklearn.metrics.accuracy_score', 'accuracy_score', (['pred', 'labels_test'], {}), '(pred, labels_test)\n', (798, 817), False, 'from sklearn.metrics import accuracy_score\n')] |
# -*- coding: utf-8 -*-
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from builtins import open
from builtins import str
from future import standard_library
standard_library.install_aliases()
try:
from queue import Queue, Empty
except ImportError:
from Queue import Queue, Empty
import sys
import types
import billiard
from signal import (
signal,
SIGINT,
)
from threading import (
Event,
Thread,
)
__all__ = [
'multiprocess',
'multithread',
'SignalHandler',
'Task'
]
class SignalHandler(object):
def __init__(self, stopper, threads):
self.stopper = stopper
self.threads = threads
def __call__(self, signum, frame):
self.stopper.set()
for task in self.threads:
task.join()
sys.exit(0)
class Task(object):
def __init__(self, func, args=(), key=None):
self._func = func
self._args = args
self._key = key if key is not None else func.__name__
self._result = None
self._is_done = False
@property
def func(self):
"""
Task function/method property - getter only.
:getter: Gets the task function/method object
"""
return self._func
@property
def args(self):
"""
Task function/method arguments property - getter only.
:getter: Gets the task function/method arguments
"""
return self._args
@property
def key(self):
"""
Task function/method key - getter only.
:getter: Gets the task function/method key
"""
return self._key
@property
def result(self):
"""
Task function/method result property.
:getter: Gets the task function/method result (produced by calling
the function on the defined arguments)
:setter: Sets the task function/method result
"""
return self._result
@result.setter
def result(self, r):
self._result = r
self._is_done = True
@property
def is_done(self):
"""
Task function/method status property - getter only.
:getter: Gets the task function/method status
"""
return self._is_done
def multithread(tasks, pool_size=10):
"""
Executes several tasks concurrently via ``threading`` threads, puts the
results into a queue, and generates these back to the caller.
"""
task_q = Queue()
num_tasks = 0
for task in tasks:
task_q.put(task)
num_tasks += 1
def run(i, task_q, result_q, stopper):
while not stopper.is_set():
try:
task = task_q.get_nowait()
except Empty:
break
else:
task.result = task.func(*task.args) if task.args else task.func()
if type(task.result) in (types.GeneratorType, list, tuple, set):
for r in task.result:
result_q.put((task.key, r,))
else:
result_q.put((task.key, task.result,))
task_q.task_done()
result_q = Queue()
stopper = Event()
threads = tuple(Thread(target=run, args=(i, task_q, result_q, stopper,)) for i in range(pool_size))
handler = SignalHandler(stopper, threads)
signal(SIGINT, handler)
for thread in threads:
thread.start()
task_q.join()
while not result_q.empty():
key, result = result_q.get_nowait()
yield key, result
def multiprocess(tasks, pool_size=10):
"""
Executes several tasks concurrently via Python ``multiprocessing``
processes, puts the results into a queue, and generates these back to the
caller.
"""
pool = billiard.Pool(pool_size)
result_q = Queue()
def build_results(result):
if type(result) in (types.GeneratorType, list, tuple, set):
for r in result:
result_q.put(r)
else:
result_q.put(result)
for task in tasks:
run = pool.apply_async(task.func, args=task.args, callback=build_results)
run.get()
pool.close()
pool.join()
while not result_q.empty():
result = result_q.get_nowait()
yield result
| [
"signal.signal",
"billiard.Pool",
"future.standard_library.install_aliases",
"threading.Event",
"sys.exit",
"threading.Thread",
"Queue.Queue"
] | [((263, 297), 'future.standard_library.install_aliases', 'standard_library.install_aliases', ([], {}), '()\n', (295, 297), False, 'from future import standard_library\n'), ((2592, 2599), 'Queue.Queue', 'Queue', ([], {}), '()\n', (2597, 2599), False, 'from Queue import Queue, Empty\n'), ((3287, 3294), 'Queue.Queue', 'Queue', ([], {}), '()\n', (3292, 3294), False, 'from Queue import Queue, Empty\n'), ((3310, 3317), 'threading.Event', 'Event', ([], {}), '()\n', (3315, 3317), False, 'from threading import Event, Thread\n'), ((3474, 3497), 'signal.signal', 'signal', (['SIGINT', 'handler'], {}), '(SIGINT, handler)\n', (3480, 3497), False, 'from signal import signal, SIGINT\n'), ((3901, 3925), 'billiard.Pool', 'billiard.Pool', (['pool_size'], {}), '(pool_size)\n', (3914, 3925), False, 'import billiard\n'), ((3942, 3949), 'Queue.Queue', 'Queue', ([], {}), '()\n', (3947, 3949), False, 'from Queue import Queue, Empty\n'), ((889, 900), 'sys.exit', 'sys.exit', (['(0)'], {}), '(0)\n', (897, 900), False, 'import sys\n'), ((3339, 3394), 'threading.Thread', 'Thread', ([], {'target': 'run', 'args': '(i, task_q, result_q, stopper)'}), '(target=run, args=(i, task_q, result_q, stopper))\n', (3345, 3394), False, 'from threading import Event, Thread\n')] |
import datetime
import io
import json_tricks
import logging
import os
from os.path import (abspath, basename, dirname, exists, expanduser,
join, realpath, relpath, splitext)
import re
import shutil
import sys
from traits.api import (Any, Dict, Enum, HasTraits, Instance, List, Long,
Str)
from whoosh import fields, qparser, query
from whoosh.util.times import datetime_to_long, long_to_datetime
from .common import get_project_dir
from .media import Media, MediaData, get_media_data
from .directory import Directory
from . import processor
logger = logging.getLogger(__name__)
if sys.version_info[0] > 2:
unicode = str
string_types = (str,)
import csv
else:
string_types = (basestring,)
import backports.csv as csv
INT = fields.NUMERIC(numtype=int)
FLOAT = fields.NUMERIC(numtype=float)
def get_file_saved_time(path):
dt = datetime.datetime.fromtimestamp(os.stat(path).st_ctime)
return dt.ctime()
def _get_sample(fname):
sample = ''
with io.open(fname, 'r', newline='', encoding='utf-8') as fp:
sample += fp.readline() + fp.readline()
return sample
def _get_csv_headers(fname):
sample = _get_sample(fname)
sniffer = csv.Sniffer()
has_header = sniffer.has_header(sample)
dialect = sniffer.sniff(sample)
with io.open(fname, 'r', newline='', encoding='utf-8') as fp:
reader = csv.reader(fp, dialect)
header = next(reader)
return has_header, header, dialect
class TagInfo(HasTraits):
name = Str
type = Enum("string", "text", "int", "float", "bool")
default = Any
def __repr__(self):
return 'TagInfo(%r, %r)' % (self.name, self.type)
def _default_default(self):
map = {"string": "", "text": "", "int": 0, "float": 0.0,
"bool": False}
return map[self.type]
def open_file(fname_or_file, mode='rb'):
if hasattr(fname_or_file, 'read'):
return fname_or_file
else:
return open(fname_or_file, mode)
def sanitize_name(name):
name = name.lower()
name = re.sub(r'\s+', '_', name)
return re.sub(r'\W+', '', name)
def get_non_existing_filename(fname):
if exists(fname):
base, ext = splitext(basename(fname))
return join(dirname(fname), base + '_a' + ext)
else:
return fname
COMMON_TAGS = dict(
file_name='string', path='string', relpath='string',
ctime='string', mtime='string', size='int', type='string'
)
def _cleanup_query(q, tag_types):
type_map = dict(float=FLOAT.from_bytes, int=INT.from_bytes)
for term in q.leaves():
if isinstance(term, query.Term):
if isinstance(term.text, (str, unicode, bytes)):
fieldtype = tag_types[term.fieldname]
if fieldtype in type_map:
term.text = type_map[fieldtype](term.text)
else:
term.text = term.text.lower()
elif isinstance(term, query.Phrase):
term.words = [x.lower() for x in term.words]
def _check_value(value, expr):
if isinstance(expr, string_types):
return expr in value.lower()
else:
return expr == value
def _check_range(x, term):
result = True
if term.start is not None:
if term.startexcl:
result &= x > term.start
else:
result &= x >= term.start
if term.end is not None and result:
if term.endexcl:
result &= x < term.end
else:
result &= x <= term.end
return result
def _check_date_range(x, term):
result = True
if term.startdate is not None:
result &= x >= term.start
if term.enddate is not None and result:
result &= x <= term.end
return result
def _search_media(expr, m_key, get_tag):
"""Given search expression, index to media, and a getter to get the attribute
check if the media matches expression.
"""
if expr.is_leaf():
if isinstance(expr, query.Term):
attr = expr.fieldname
return _check_value(get_tag(m_key, attr), expr.text)
elif isinstance(expr, query.Phrase):
attr = expr.fieldname
text = " ".join(expr.words)
return _check_value(get_tag(m_key, attr), text)
elif isinstance(expr, query.DateRange):
if expr.fieldname == 'ctime':
value = get_tag(m_key, 'ctime_')
elif expr.fieldname == 'mtime':
value = get_tag(m_key, 'mtime_')
return _check_date_range(value, expr)
elif isinstance(expr, query.NumericRange):
attr = expr.fieldname
return _check_range(get_tag(m_key, attr), expr)
else:
print("Unsupported term: %r" % expr)
return False
else:
if isinstance(expr, query.And):
result = True
for child in expr.children():
result &= _search_media(child, m_key, get_tag)
if not result:
break
return result
elif isinstance(expr, query.Or):
result = False
for child in expr.children():
result |= _search_media(child, m_key, get_tag)
if result:
break
return result
elif isinstance(expr, query.Not):
subquery = list(expr.children())[0]
return not _search_media(subquery, m_key, get_tag)
else:
print("Unsupported term: %r" % expr)
return False
class Project(HasTraits):
name = Str
description = Str
path = Str
root = Instance(Directory)
tags = List(TagInfo)
_media = Dict(Str, Media)
extensions = List(Str)
processors = List(processor.FactoryBase)
number_of_files = Long
# Path where the project data is saved.
save_file = Str
last_save_time = Str
_data = Dict
_tag_data = Dict
_relpath2index = Dict()
_query_parser = Instance(qparser.QueryParser)
def add_tags(self, tags):
tags = list(self.tags) + tags
self.update_tags(tags)
def update_tags(self, new_tags):
old_tags = self.tags
new_tag_names = set(tag.name for tag in new_tags)
tag_info = dict((tag.name, tag.type) for tag in old_tags)
removed = []
added = []
for tag in new_tags:
if tag.name not in tag_info:
added.append(tag)
elif tag_info[tag.name] != tag.type:
removed.append(tag)
added.append(tag)
for tag in old_tags:
if tag.name not in new_tag_names:
removed.append(tag)
for tag in removed:
del self._tag_data[tag.name]
n_entries = len(self._relpath2index)
for tag in added:
self._tag_data[tag.name] = [tag.default]*n_entries
# The above can be the first time when self._tag_data is accessed, when
# creating a new project for example. In this case,
# self.__tag_data_default is called, so if self.tags is set then the
# removed tags will not exist in _tag_data causing an error. So we only
# set self.tags below.
self.tags = new_tags
# Update the cached media
for m in self._media.values():
for tag in removed:
del m.tags[tag.name]
for tag in added:
m.tags[tag.name] = tag.default
self._query_parser = self._make_query_parser()
def copy(self):
"""Make a copy of this project. This does not copy the data but only
the tags, extensions and the other settings of the project.
This will not copy any of the processor states but only their settings.
"""
name = self.name + ' copy'
p = Project(name=name)
traits = ['description', 'extensions', 'path', 'processors', 'tags']
p.copy_traits(self, traits, copy='deep')
# Clear out the _done information from the processors
for proc in p.processors:
proc._done.clear()
return p
# #### CRUD interface to the data ####
def update(self, media_data, tags=None):
"""Create/update the internal data given the media data and tags.
Parameters
----------
f: vixen.directory.File instance
tags: dict
"""
relpath = media_data.relpath
if not self.has_media(relpath):
index = len(self._relpath2index)
self._relpath2index[relpath] = index
for key in MediaData._fields:
self._data[key].append(None)
for tag in self.tags:
self._tag_data[tag.name].append(tag.default)
index = self._relpath2index[relpath]
for i, key in enumerate(MediaData._fields):
self._data[key][index] = media_data[i]
if tags:
for key, value in tags.items():
self._tag_data[key][index] = value
media = self._media.get(relpath)
if media is not None:
media.update(media_data, tags)
def get(self, relpath):
"""Given the relative path of some media, return a Media instance.
"""
if relpath in self._media:
return self._media[relpath]
else:
data = {}
index = self._relpath2index[relpath]
for key in MediaData._fields:
data[key] = self._data[key][index]
tags = {}
for key in self._tag_data:
tags[key] = self._tag_data[key][index]
media = Media.from_data(MediaData(**data), tags)
media.on_trait_change(self._media_tag_handler, 'tags_items')
self._media[relpath] = media
return media
def remove(self, relpaths):
"""Given a list of relative path of some media, remove them from the
database.
"""
relpath2index = self._relpath2index
indices = [(x, relpath2index[x]) for x in relpaths]
for relpath, index in sorted(indices, reverse=True):
last = len(relpath2index) - 1
if index == last:
self._delete_record(last, relpath)
else:
self._replace_with_last_record(index, last)
self._delete_record(last, relpath)
def has_media(self, relpath):
"""Returns True if the media data is available.
"""
return relpath in self._relpath2index
def keys(self):
"""Return all the keys for the media relative paths."""
return self._relpath2index.keys()
def _get_media_attr(self, index, attr):
"""Given an index to the media, return its value.
"""
if attr in self._data:
return self._data[attr][index]
elif attr in self._tag_data:
return self._tag_data[attr][index]
# #### End of CRUD interface to the data ####
def clean(self):
"""Scan the project and remove any dead entries.
This is useful when you remove or rename files. This does not refresh
the directory tree or set the number of files. It simply cleans up the
db of files that no longer exist.
"""
logger.info('Cleaning project: %s', self.name)
root_path = self.path
to_remove = []
relpath2index = self._relpath2index
for rpath in list(relpath2index.keys()):
fname = os.path.join(root_path, rpath)
if not os.path.exists(fname):
to_remove.append(rpath)
self.remove(to_remove)
def export_csv(self, fname, cols=None):
"""Export metadata to a csv file. If `cols` are not specified,
it writes out all the useful metadata.
Parameters
-----------
fname: str: a path to the csv file to dump.
cols: sequence: a sequence of columns to write.
"""
logger.info('Exporting CSV: %s', fname)
all_keys = ((set(MediaData._fields) | set(self._tag_data.keys()))
- set(('ctime_', 'mtime_')))
if cols is None:
cols = all_keys
cols = list(sorted(cols))
data_cols = set([x for x in cols if x in self._data])
with io.open(fname, 'w', newline='', encoding='utf-8') as of:
# Write the header.
writer = csv.writer(of)
writer.writerow(cols)
for i in range(len(self._relpath2index)):
line = []
for col in cols:
if col in data_cols:
elem = self._data[col][i]
else:
elem = self._tag_data[col][i]
line.append(elem)
writer.writerow(line)
def import_csv(self, fname):
"""Read tag information from given CSV filename.
Returns the success status and the error message if any. Note that this
only applies tags for column headers with known tags. Unknown tags are
not added.
Parameters
----------
fname : str Input filename.
"""
logger.info('Importing tags from: %s', fname)
has_header, header, dialect = _get_csv_headers(fname)
if not has_header:
return False, "The CSV file does not appear to have a header."
if 'path' not in header:
msg = "The CSV file does not have a 'path' column."
return False, msg
tags = {x: header.index(x.name) for x in self.tags if x.name in header}
path_idx = header.index('path')
TRUE = ('1', 't', 'true', 'y', 'yes')
type_map = {
'bool': lambda x: x.lower() in TRUE,
'string': lambda x: x,
'text': lambda x: x,
'int': int,
'float': float
}
count = 0
total = 0
with io.open(fname, 'r', newline='', encoding='utf-8') as fp:
reader = csv.reader(fp, dialect)
next(reader) # Skip header
for record in reader:
total += 1
path = record[path_idx]
rpath = relpath(path, self.path)
index = self._relpath2index.get(rpath, None)
media = self._media.get(rpath)
if index is not None:
count += 1
for tag, header_index in tags.items():
data = record[header_index]
try:
value = type_map[tag.type](data)
if media is not None:
media.tags[tag.name] = value
else:
self._tag_data[tag.name][index] = value
except ValueError:
pass
msg = "Read tags for %d paths out of %d entries." % (count, total)
if count == 0 and total > 0:
msg += ("\nPlease check that your path column matches "
"the media paths.")
return False, msg
else:
msg += ("\nPlease check the imported tags and make sure you "
"save the project.")
return True, msg
def load(self, fp=None):
"""Load media info from opened file object.
"""
if fp is None:
if not exists(self.save_file):
return
fp = open_file(self.save_file, 'rb')
else:
fp = open_file(fp, 'rb')
data = json_tricks.load(
fp, preserve_order=False, ignore_comments=False
)
fp.close()
self.name = data.get('name', '')
self.description = data.get('description', '')
self.path = data.get('path')
self.tags = [TagInfo(name=x[0], type=x[1]) for x in data['tags']]
self.processors = [processor.load(x)
for x in data.get('processors', [])]
version = data.get('version')
if version == 1:
self._read_version1_media(data['media'])
else:
self._data = data['media_data']
self._tag_data = data['tag_data']
self._relpath2index = data['relpath2index']
root = Directory()
root.__setstate__(data.get('root'))
self.extensions = root.extensions
self.root = root
self.number_of_files = len(self._relpath2index)
def save(self):
"""Save current media info to a file object
"""
if len(self.save_file) > 0:
self.save_as(self.save_file)
self._update_last_save_time()
else:
raise IOError("No valid save file set.")
def save_as(self, fp):
"""Save copy to specified path.
"""
fp = open_file(fp, 'wb')
tags = [(t.name, t.type) for t in self.tags]
root = self.root.__getstate__()
processors = [processor.dump(x) for x in self.processors]
data = dict(
version=2, path=self.path, name=self.name,
description=self.description, tags=tags,
media_data=self._data, tag_data=self._tag_data,
relpath2index=self._relpath2index,
root=root, processors=processors
)
json_tricks.dump(data, fp, compression=True)
fp.close()
logger.info('Saved project: %s', self.name)
def scan(self, refresh=False):
"""Find all the media recursively inside the root directory.
This will not clobber existing records but will add any new ones.
"""
self._setup_root()
def _scan(dir):
for f in dir.files:
if not self.has_media(f.relpath) or refresh:
data = get_media_data(f.path, f.relpath)
self.update(data)
for d in dir.directories:
if refresh:
d.refresh()
_scan(d)
if refresh:
self.root.refresh()
_scan(self.root)
self.number_of_files = len(self._relpath2index)
def search(self, q):
"""A generator which yields the (filename, relpath) for each file
satisfying the search query.
"""
logger.info('Searching for %s', q)
try:
parsed_q = self._query_parser.parse(q)
except Exception:
logger.warn("Invalid search expression: %s", q)
print("Invalid search expression: %s" % q)
return
tag_types = self._get_tag_types()
_cleanup_query(parsed_q, tag_types)
for key, index in self._relpath2index.items():
if _search_media(parsed_q, index, self._get_media_attr):
yield basename(key), key
def refresh(self):
logger.info('Refreshing project: %s', self.name)
self.clean()
self.scan(refresh=True)
# #### Private protocol ################################################
def _setup_root(self):
path = abspath(expanduser(self.path))
root = self.root
if root is None or realpath(root.path) != realpath(path):
self.root = Directory(path=path, extensions=self.extensions)
def _tags_default(self):
return [TagInfo(name='completed', type='bool')]
def _save_file_default(self):
if len(self.name) > 0:
fname = sanitize_name(self.name) + '.vxn'
d = get_project_dir()
return get_non_existing_filename(join(d, fname))
else:
return ''
def _update_last_save_time(self):
self.last_save_time = get_file_saved_time(self.save_file)
def _last_save_time_default(self):
if exists(self.save_file):
return get_file_saved_time(self.save_file)
else:
return ''
def _name_changed(self, name):
if len(name) > 0:
old_save_file = self.save_file
old_dir = dirname(old_save_file)
new_save_file = join(old_dir, sanitize_name(name) + '.vxn')
if new_save_file != old_save_file:
self.save_file = new_save_file
if exists(old_save_file):
shutil.move(old_save_file, self.save_file)
def _extensions_changed(self, ext):
if self.root is not None:
self.root.extensions = ext
def _extensions_items_changed(self):
if self.root is not None:
self.root.extensions = self.extensions
def _get_tag_types(self):
result = dict(COMMON_TAGS)
result.update(dict((t.name, t.type) for t in self.tags))
return result
def _make_schema(self):
from whoosh.fields import BOOLEAN, DATETIME, TEXT, Schema
kw = dict(
type=TEXT, file_name=TEXT, path=TEXT,
mtime=DATETIME, ctime=DATETIME, size=INT
)
type_to_field = dict(
string=TEXT, text=TEXT, int=INT, float=FLOAT, bool=BOOLEAN
)
for tag in self.tags:
kw[tag.name] = type_to_field[tag.type]
return Schema(**kw)
def _make_query_parser(self):
schema = self._make_schema()
qp = qparser.QueryParser('path', schema=schema)
qp.add_plugin(qparser.GtLtPlugin())
from whoosh.qparser.dateparse import DateParserPlugin
qp.add_plugin(DateParserPlugin())
return qp
def __query_parser_default(self):
return self._make_query_parser()
def __data_default(self):
data = {}
for key in MediaData._fields:
data[key] = []
return data
def __tag_data_default(self):
tags = {}
for key in self.tags:
tags[key.name] = []
return tags
def _media_tag_handler(self, obj, tname, old, new):
index = self._relpath2index[obj.relpath]
for tag in new.changed:
self._tag_data[tag][index] = obj.tags[tag]
def _read_version1_media(self, media):
data = self.__data_default()
tag_data = self.__tag_data_default()
relpath2index = {}
keymap = dict.fromkeys(MediaData._fields)
for k in keymap:
keymap[k] = k
keymap['_ctime'] = 'ctime_'
keymap['_mtime'] = 'mtime_'
for index, (key, m) in enumerate(media):
relpath2index[key] = index
tags = m.pop('tags')
for tname, v in tags.items():
tag_data[tname].append(v)
for k, v in m.items():
data[keymap[k]].append(v)
if 'file_name' not in m:
data['file_name'].append(basename(key))
data['mtime_'] = [datetime_to_long(x) for x in data['mtime_']]
data['ctime_'] = [datetime_to_long(x) for x in data['ctime_']]
self._data = data
self._tag_data = tag_data
self._relpath2index = relpath2index
def _delete_record(self, index, relpath):
for key in MediaData._fields:
del self._data[key][index]
for key in self._tag_data:
del self._tag_data[key][index]
if relpath in self._media:
del self._media[relpath]
del self._relpath2index[relpath]
def _replace_with_last_record(self, index, last):
_data = self._data
_tag_data = self._tag_data
for key in MediaData._fields:
_data[key][index] = _data[key][last]
for key in self._tag_data:
_tag_data[key][index] = _tag_data[key][last]
last_relpath = _data['relpath'][last]
self._relpath2index[last_relpath] = index
def _save_as_v1(self, fp):
"""Save copy to specified path.
This mainly exists for testing and making sure we still read the old
saved files.
"""
def _rewrite_dir(state):
"Rewrite directories in the old format."
state['files'] = [x[0] for x in state['files']]
state['directories'] = [_rewrite_dir(d)
for d in state['directories']]
state.pop('relpath')
state.pop('name')
return state
fp = open_file(fp, 'wb')
media = [(key, self.get(key).to_dict()) for key in self._relpath2index]
tags = [(t.name, t.type) for t in self.tags]
root = _rewrite_dir(self.root.__getstate__())
processors = [processor.dump(x) for x in self.processors]
for k, m in media:
m['_ctime'] = long_to_datetime(m['_ctime'])
m['_mtime'] = long_to_datetime(m['_mtime'])
data = dict(
version=1, path=self.path, name=self.name,
description=self.description, tags=tags, media=media,
root=root, processors=processors
)
json_tricks.dump(data, fp, compression=True)
fp.close()
logger.info('Saved project: %s', self.name)
| [
"logging.getLogger",
"whoosh.fields.Schema",
"io.open",
"whoosh.qparser.QueryParser",
"traits.api.Enum",
"backports.csv.Sniffer",
"os.path.exists",
"whoosh.qparser.dateparse.DateParserPlugin",
"shutil.move",
"json_tricks.load",
"whoosh.util.times.long_to_datetime",
"os.path.expanduser",
"os.path.relpath",
"whoosh.util.times.datetime_to_long",
"os.path.dirname",
"json_tricks.dump",
"re.sub",
"whoosh.qparser.GtLtPlugin",
"traits.api.List",
"traits.api.Instance",
"backports.csv.writer",
"os.path.join",
"traits.api.Dict",
"os.path.realpath",
"backports.csv.reader",
"os.path.basename",
"os.stat",
"whoosh.fields.NUMERIC"
] | [((598, 625), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (615, 625), False, 'import logging\n'), ((791, 818), 'whoosh.fields.NUMERIC', 'fields.NUMERIC', ([], {'numtype': 'int'}), '(numtype=int)\n', (805, 818), False, 'from whoosh import fields, qparser, query\n'), ((827, 856), 'whoosh.fields.NUMERIC', 'fields.NUMERIC', ([], {'numtype': 'float'}), '(numtype=float)\n', (841, 856), False, 'from whoosh import fields, qparser, query\n'), ((1229, 1242), 'backports.csv.Sniffer', 'csv.Sniffer', ([], {}), '()\n', (1240, 1242), True, 'import backports.csv as csv\n'), ((1553, 1599), 'traits.api.Enum', 'Enum', (['"""string"""', '"""text"""', '"""int"""', '"""float"""', '"""bool"""'], {}), "('string', 'text', 'int', 'float', 'bool')\n", (1557, 1599), False, 'from traits.api import Any, Dict, Enum, HasTraits, Instance, List, Long, Str\n'), ((2083, 2108), 're.sub', 're.sub', (['"""\\\\s+"""', '"""_"""', 'name'], {}), "('\\\\s+', '_', name)\n", (2089, 2108), False, 'import re\n'), ((2120, 2144), 're.sub', 're.sub', (['"""\\\\W+"""', '""""""', 'name'], {}), "('\\\\W+', '', name)\n", (2126, 2144), False, 'import re\n'), ((2192, 2205), 'os.path.exists', 'exists', (['fname'], {}), '(fname)\n', (2198, 2205), False, 'from os.path import abspath, basename, dirname, exists, expanduser, join, realpath, relpath, splitext\n'), ((5651, 5670), 'traits.api.Instance', 'Instance', (['Directory'], {}), '(Directory)\n', (5659, 5670), False, 'from traits.api import Any, Dict, Enum, HasTraits, Instance, List, Long, Str\n'), ((5682, 5695), 'traits.api.List', 'List', (['TagInfo'], {}), '(TagInfo)\n', (5686, 5695), False, 'from traits.api import Any, Dict, Enum, HasTraits, Instance, List, Long, Str\n'), ((5710, 5726), 'traits.api.Dict', 'Dict', (['Str', 'Media'], {}), '(Str, Media)\n', (5714, 5726), False, 'from traits.api import Any, Dict, Enum, HasTraits, Instance, List, Long, Str\n'), ((5745, 5754), 'traits.api.List', 'List', (['Str'], {}), '(Str)\n', (5749, 5754), False, 'from traits.api import Any, Dict, Enum, HasTraits, Instance, List, Long, Str\n'), ((5773, 5800), 'traits.api.List', 'List', (['processor.FactoryBase'], {}), '(processor.FactoryBase)\n', (5777, 5800), False, 'from traits.api import Any, Dict, Enum, HasTraits, Instance, List, Long, Str\n'), ((5982, 5988), 'traits.api.Dict', 'Dict', ([], {}), '()\n', (5986, 5988), False, 'from traits.api import Any, Dict, Enum, HasTraits, Instance, List, Long, Str\n'), ((6010, 6039), 'traits.api.Instance', 'Instance', (['qparser.QueryParser'], {}), '(qparser.QueryParser)\n', (6018, 6039), False, 'from traits.api import Any, Dict, Enum, HasTraits, Instance, List, Long, Str\n'), ((1028, 1077), 'io.open', 'io.open', (['fname', '"""r"""'], {'newline': '""""""', 'encoding': '"""utf-8"""'}), "(fname, 'r', newline='', encoding='utf-8')\n", (1035, 1077), False, 'import io\n'), ((1332, 1381), 'io.open', 'io.open', (['fname', '"""r"""'], {'newline': '""""""', 'encoding': '"""utf-8"""'}), "(fname, 'r', newline='', encoding='utf-8')\n", (1339, 1381), False, 'import io\n'), ((1406, 1429), 'backports.csv.reader', 'csv.reader', (['fp', 'dialect'], {}), '(fp, dialect)\n', (1416, 1429), True, 'import backports.csv as csv\n'), ((15621, 15686), 'json_tricks.load', 'json_tricks.load', (['fp'], {'preserve_order': '(False)', 'ignore_comments': '(False)'}), '(fp, preserve_order=False, ignore_comments=False)\n', (15637, 15686), False, 'import json_tricks\n'), ((17356, 17400), 'json_tricks.dump', 'json_tricks.dump', (['data', 'fp'], {'compression': '(True)'}), '(data, fp, compression=True)\n', (17372, 17400), False, 'import json_tricks\n'), ((19775, 19797), 'os.path.exists', 'exists', (['self.save_file'], {}), '(self.save_file)\n', (19781, 19797), False, 'from os.path import abspath, basename, dirname, exists, expanduser, join, realpath, relpath, splitext\n'), ((21140, 21152), 'whoosh.fields.Schema', 'Schema', ([], {}), '(**kw)\n', (21146, 21152), False, 'from whoosh.fields import BOOLEAN, DATETIME, TEXT, Schema\n'), ((21238, 21280), 'whoosh.qparser.QueryParser', 'qparser.QueryParser', (['"""path"""'], {'schema': 'schema'}), "('path', schema=schema)\n", (21257, 21280), False, 'from whoosh import fields, qparser, query\n'), ((24812, 24856), 'json_tricks.dump', 'json_tricks.dump', (['data', 'fp'], {'compression': '(True)'}), '(data, fp, compression=True)\n', (24828, 24856), False, 'import json_tricks\n'), ((931, 944), 'os.stat', 'os.stat', (['path'], {}), '(path)\n', (938, 944), False, 'import os\n'), ((2236, 2251), 'os.path.basename', 'basename', (['fname'], {}), '(fname)\n', (2244, 2251), False, 'from os.path import abspath, basename, dirname, exists, expanduser, join, realpath, relpath, splitext\n'), ((2273, 2287), 'os.path.dirname', 'dirname', (['fname'], {}), '(fname)\n', (2280, 2287), False, 'from os.path import abspath, basename, dirname, exists, expanduser, join, realpath, relpath, splitext\n'), ((11501, 11531), 'os.path.join', 'os.path.join', (['root_path', 'rpath'], {}), '(root_path, rpath)\n', (11513, 11531), False, 'import os\n'), ((12309, 12358), 'io.open', 'io.open', (['fname', '"""w"""'], {'newline': '""""""', 'encoding': '"""utf-8"""'}), "(fname, 'w', newline='', encoding='utf-8')\n", (12316, 12358), False, 'import io\n'), ((12419, 12433), 'backports.csv.writer', 'csv.writer', (['of'], {}), '(of)\n', (12429, 12433), True, 'import backports.csv as csv\n'), ((13950, 13999), 'io.open', 'io.open', (['fname', '"""r"""'], {'newline': '""""""', 'encoding': '"""utf-8"""'}), "(fname, 'r', newline='', encoding='utf-8')\n", (13957, 13999), False, 'import io\n'), ((14028, 14051), 'backports.csv.reader', 'csv.reader', (['fp', 'dialect'], {}), '(fp, dialect)\n', (14038, 14051), True, 'import backports.csv as csv\n'), ((19095, 19116), 'os.path.expanduser', 'expanduser', (['self.path'], {}), '(self.path)\n', (19105, 19116), False, 'from os.path import abspath, basename, dirname, exists, expanduser, join, realpath, relpath, splitext\n'), ((20017, 20039), 'os.path.dirname', 'dirname', (['old_save_file'], {}), '(old_save_file)\n', (20024, 20039), False, 'from os.path import abspath, basename, dirname, exists, expanduser, join, realpath, relpath, splitext\n'), ((21303, 21323), 'whoosh.qparser.GtLtPlugin', 'qparser.GtLtPlugin', ([], {}), '()\n', (21321, 21323), False, 'from whoosh import fields, qparser, query\n'), ((21409, 21427), 'whoosh.qparser.dateparse.DateParserPlugin', 'DateParserPlugin', ([], {}), '()\n', (21425, 21427), False, 'from whoosh.qparser.dateparse import DateParserPlugin\n'), ((22718, 22737), 'whoosh.util.times.datetime_to_long', 'datetime_to_long', (['x'], {}), '(x)\n', (22734, 22737), False, 'from whoosh.util.times import datetime_to_long, long_to_datetime\n'), ((22789, 22808), 'whoosh.util.times.datetime_to_long', 'datetime_to_long', (['x'], {}), '(x)\n', (22805, 22808), False, 'from whoosh.util.times import datetime_to_long, long_to_datetime\n'), ((24521, 24550), 'whoosh.util.times.long_to_datetime', 'long_to_datetime', (["m['_ctime']"], {}), "(m['_ctime'])\n", (24537, 24550), False, 'from whoosh.util.times import datetime_to_long, long_to_datetime\n'), ((24577, 24606), 'whoosh.util.times.long_to_datetime', 'long_to_datetime', (["m['_mtime']"], {}), "(m['_mtime'])\n", (24593, 24606), False, 'from whoosh.util.times import datetime_to_long, long_to_datetime\n'), ((11551, 11572), 'os.path.exists', 'os.path.exists', (['fname'], {}), '(fname)\n', (11565, 11572), False, 'import os\n'), ((14217, 14241), 'os.path.relpath', 'relpath', (['path', 'self.path'], {}), '(path, self.path)\n', (14224, 14241), False, 'from os.path import abspath, basename, dirname, exists, expanduser, join, realpath, relpath, splitext\n'), ((15458, 15480), 'os.path.exists', 'exists', (['self.save_file'], {}), '(self.save_file)\n', (15464, 15480), False, 'from os.path import abspath, basename, dirname, exists, expanduser, join, realpath, relpath, splitext\n'), ((19170, 19189), 'os.path.realpath', 'realpath', (['root.path'], {}), '(root.path)\n', (19178, 19189), False, 'from os.path import abspath, basename, dirname, exists, expanduser, join, realpath, relpath, splitext\n'), ((19193, 19207), 'os.path.realpath', 'realpath', (['path'], {}), '(path)\n', (19201, 19207), False, 'from os.path import abspath, basename, dirname, exists, expanduser, join, realpath, relpath, splitext\n'), ((19567, 19581), 'os.path.join', 'join', (['d', 'fname'], {}), '(d, fname)\n', (19571, 19581), False, 'from os.path import abspath, basename, dirname, exists, expanduser, join, realpath, relpath, splitext\n'), ((20225, 20246), 'os.path.exists', 'exists', (['old_save_file'], {}), '(old_save_file)\n', (20231, 20246), False, 'from os.path import abspath, basename, dirname, exists, expanduser, join, realpath, relpath, splitext\n'), ((20268, 20310), 'shutil.move', 'shutil.move', (['old_save_file', 'self.save_file'], {}), '(old_save_file, self.save_file)\n', (20279, 20310), False, 'import shutil\n'), ((22676, 22689), 'os.path.basename', 'basename', (['key'], {}), '(key)\n', (22684, 22689), False, 'from os.path import abspath, basename, dirname, exists, expanduser, join, realpath, relpath, splitext\n'), ((18813, 18826), 'os.path.basename', 'basename', (['key'], {}), '(key)\n', (18821, 18826), False, 'from os.path import abspath, basename, dirname, exists, expanduser, join, realpath, relpath, splitext\n')] |
"""This submodule contains a JSON reference translator."""
__author__ = '<NAME>'
__copyright__ = 'Copyright © 2021 <NAME>'
__license__ = 'MIT'
__all__ = ()
import prance.util.url as _url
def _reference_key(ref_url, item_path):
"""
Return a portion of the dereferenced URL.
format - ref-url_obj-path
"""
return ref_url.path.split('/')[-1] + '_' + '_'.join(item_path[1:])
def _local_ref(path):
url = '#/' + '/'.join(path)
return {'$ref': url}
# Underscored to allow some time for the public API to be stabilized.
class _RefTranslator:
"""
Resolve JSON pointers/references in a spec by translation.
References to objects in other files are copied to the /components/schemas
object of the root document, while being translated to point to the the new
object locations.
"""
def __init__(self, specs, url):
"""
Construct a JSON reference translator.
The translated specs are in the `specs` member after a call to
`translate_references` has been made.
If a URL is given, it is used as a base for calculating the absolute
URL of relative file references.
:param dict specs: The parsed specs in which to translate any references.
:param str url: [optional] The URL to base relative references on.
"""
import copy
self.specs = copy.deepcopy(specs)
self.__strict = True
self.__reference_cache = {}
self.__collected_references = {}
if url:
self.url = _url.absurl(url)
url_key = (_url.urlresource(self.url), self.__strict)
# If we have a url, we want to add ourselves to the reference cache
# - that creates a reference loop, but prevents child resolvers from
# creating a new resolver for this url.
self.__reference_cache[url_key] = self.specs
else:
self.url = None
def translate_references(self):
"""
Iterate over the specification document, performing the translation.
Traverses over the whole document, adding the referenced object from
external files to the /components/schemas object in the root document
and translating the references to the new location.
"""
self.specs = self._translate_partial(self.url, self.specs)
# Add collected references to the root document.
if self.__collected_references:
if 'components' not in self.specs:
self.specs['components'] = {}
if 'schemas' not in self.specs['components']:
self.specs['components'].update({'schemas': {}})
self.specs['components']['schemas'].update(self.__collected_references)
def _dereference(self, ref_url, obj_path):
"""
Dereference the URL and object path.
Returns the dereferenced object.
:param mixed ref_url: The URL at which the reference is located.
:param list obj_path: The object path within the URL resource.
:param tuple recursions: A recursion stack for resolving references.
:return: A copy of the dereferenced value, with all internal references
resolved.
"""
# In order to start dereferencing anything in the referenced URL, we have
# to read and parse it, of course.
contents = _url.fetch_url(ref_url, self.__reference_cache, strict=self.__strict)
# In this inner parser's specification, we can now look for the referenced
# object.
value = contents
if len(obj_path) != 0:
from prance.util.path import path_get
try:
value = path_get(value, obj_path)
except (KeyError, IndexError, TypeError) as ex:
raise _url.ResolutionError('Cannot resolve reference "%s": %s'
% (ref_url.geturl(), str(ex)))
# Deep copy value; we don't want to create recursive structures
import copy
value = copy.deepcopy(value)
# Now resolve partial specs
value = self._translate_partial(ref_url, value)
# That's it!
return value
def _translate_partial(self, base_url, partial):
changes = dict(tuple(self._translating_iterator(base_url, partial, ())))
paths = sorted(changes.keys(), key = len)
from prance.util.path import path_set
for path in paths:
value = changes[path]
if len(path) == 0:
partial = value
else:
path_set(partial, list(path), value, create = True)
return partial
def _translating_iterator(self, base_url, partial, path):
from prance.util.iterators import reference_iterator
for _, ref_string, item_path in reference_iterator(partial):
ref_url, obj_path = _url.split_url_reference(base_url, ref_string)
full_path = path + item_path
if ref_url.path == self.url.path:
# Reference to the root document.
ref_path = obj_path
else:
# Reference to a non-root document.
ref_key = _reference_key(ref_url, obj_path)
if ref_key not in self.__collected_references:
self.__collected_references[ref_key] = None
ref_value = self._dereference(ref_url, obj_path)
self.__collected_references[ref_key] = ref_value
ref_path = ['components', 'schemas', ref_key]
ref_obj = _local_ref(ref_path)
yield full_path, ref_obj
| [
"prance.util.url.fetch_url",
"prance.util.url.absurl",
"prance.util.iterators.reference_iterator",
"prance.util.url.split_url_reference",
"copy.deepcopy",
"prance.util.path.path_get",
"prance.util.url.urlresource"
] | [((1375, 1395), 'copy.deepcopy', 'copy.deepcopy', (['specs'], {}), '(specs)\n', (1388, 1395), False, 'import copy\n'), ((3372, 3441), 'prance.util.url.fetch_url', '_url.fetch_url', (['ref_url', 'self.__reference_cache'], {'strict': 'self.__strict'}), '(ref_url, self.__reference_cache, strict=self.__strict)\n', (3386, 3441), True, 'import prance.util.url as _url\n'), ((4039, 4059), 'copy.deepcopy', 'copy.deepcopy', (['value'], {}), '(value)\n', (4052, 4059), False, 'import copy\n'), ((4827, 4854), 'prance.util.iterators.reference_iterator', 'reference_iterator', (['partial'], {}), '(partial)\n', (4845, 4854), False, 'from prance.util.iterators import reference_iterator\n'), ((1543, 1559), 'prance.util.url.absurl', '_url.absurl', (['url'], {}), '(url)\n', (1554, 1559), True, 'import prance.util.url as _url\n'), ((4888, 4934), 'prance.util.url.split_url_reference', '_url.split_url_reference', (['base_url', 'ref_string'], {}), '(base_url, ref_string)\n', (4912, 4934), True, 'import prance.util.url as _url\n'), ((1583, 1609), 'prance.util.url.urlresource', '_url.urlresource', (['self.url'], {}), '(self.url)\n', (1599, 1609), True, 'import prance.util.url as _url\n'), ((3691, 3716), 'prance.util.path.path_get', 'path_get', (['value', 'obj_path'], {}), '(value, obj_path)\n', (3699, 3716), False, 'from prance.util.path import path_get\n')] |
from io import StringIO
from unittest import TestCase
from dropSQL.parser.streams import *
class StreamTestCase(TestCase):
def test(self):
s = '12'
cs = Characters(StringIO(s))
ch = cs.peek().ok()
self.assertEqual(ch, '1')
ch = cs.peek().ok()
self.assertEqual(ch, '1')
ch = cs.next().ok()
self.assertEqual(ch, '1')
ch = cs.next().ok()
self.assertEqual(ch, '2')
r = cs.next()
self.assertFalse(r)
self.assertTrue(r.err())
r = cs.next()
self.assertFalse(r)
cs.back()
r = cs.next()
self.assertTrue(r)
self.assertEqual(r.ok(), '2')
cs.back(2)
r = cs.next()
self.assertTrue(r)
self.assertEqual(r.ok(), '1')
| [
"io.StringIO"
] | [((187, 198), 'io.StringIO', 'StringIO', (['s'], {}), '(s)\n', (195, 198), False, 'from io import StringIO\n')] |
# coding: utf-8
import os
import pickle
import shutil
import tempfile
import unittest
import ray
from ray import tune
from ray.rllib import _register_all
from ray.tune import Trainable
from ray.tune.utils import validate_save_restore
class SerialTuneRelativeLocalDirTest(unittest.TestCase):
local_mode = True
prefix = "Serial"
class MockTrainable(Trainable):
_name = "MockTrainable"
def setup(self, config):
self.state = {"hi": 1}
def step(self):
return {"timesteps_this_iter": 1, "done": True}
def save_checkpoint(self, checkpoint_dir):
checkpoint_path = os.path.join(
checkpoint_dir, "checkpoint-{}".format(self._iteration)
)
with open(checkpoint_path, "wb") as f:
pickle.dump(self.state, f)
return checkpoint_path
def load_checkpoint(self, checkpoint_path):
with open(checkpoint_path, "rb") as f:
extra_data = pickle.load(f)
self.state.update(extra_data)
def setUp(self):
self.absolute_local_dir = None
ray.init(num_cpus=1, num_gpus=0, local_mode=self.local_mode)
def tearDown(self):
if self.absolute_local_dir is not None:
shutil.rmtree(self.absolute_local_dir, ignore_errors=True)
self.absolute_local_dir = None
ray.shutdown()
# Without this line, test_tune_server.testAddTrial would fail.
_register_all()
def _get_trial_dir(self, absoulte_exp_dir):
print("looking for", self.MockTrainable._name)
print("in", os.listdir(absoulte_exp_dir))
trial_dirname = next(
(
child_dir
for child_dir in os.listdir(absoulte_exp_dir)
if (
os.path.isdir(os.path.join(absoulte_exp_dir, child_dir))
and child_dir.startswith(self.MockTrainable._name)
)
)
)
trial_absolute_dir = os.path.join(absoulte_exp_dir, trial_dirname)
return trial_dirname, trial_absolute_dir
def _train(self, exp_name, local_dir, absolute_local_dir):
(trial,) = tune.run(
self.MockTrainable,
name=exp_name,
stop={"training_iteration": 1},
checkpoint_freq=1,
local_dir=local_dir,
config={"env": "CartPole-v0", "log_level": "DEBUG"},
).trials
exp_dir = os.path.join(absolute_local_dir, exp_name)
_, abs_trial_dir = self._get_trial_dir(exp_dir)
self.assertIsNone(trial.error_file)
self.assertEqual(trial.local_dir, exp_dir)
self.assertEqual(trial.logdir, abs_trial_dir)
self.assertTrue(os.path.isdir(absolute_local_dir), absolute_local_dir)
self.assertTrue(os.path.isdir(exp_dir))
self.assertTrue(os.path.isdir(abs_trial_dir))
self.assertTrue(
os.path.isfile(
os.path.join(abs_trial_dir, "checkpoint_000001/checkpoint-1")
)
)
def _restore(self, exp_name, local_dir, absolute_local_dir):
trial_name, abs_trial_dir = self._get_trial_dir(
os.path.join(absolute_local_dir, exp_name)
)
checkpoint_path = os.path.join(
local_dir, exp_name, trial_name, "checkpoint_000001/checkpoint-1"
) # Relative checkpoint path
# The file tune would find. The absolute checkpoint path.
tune_find_file = os.path.abspath(os.path.expanduser(checkpoint_path))
self.assertTrue(
os.path.isfile(tune_find_file), "{} is not exist!".format(tune_find_file)
)
(trial,) = tune.run(
self.MockTrainable,
name=exp_name,
stop={"training_iteration": 2}, # train one more iteration.
restore=checkpoint_path, # Restore the checkpoint
config={"env": "CartPole-v0", "log_level": "DEBUG"},
).trials
self.assertIsNone(trial.error_file)
def testDottedRelativePath(self):
local_dir = "./test_dotted_relative_local_dir"
exp_name = self.prefix + "DottedRelativeLocalDir"
absolute_local_dir = os.path.abspath(local_dir)
self.absolute_local_dir = absolute_local_dir
self.assertFalse(os.path.exists(absolute_local_dir))
self._train(exp_name, local_dir, absolute_local_dir)
self._restore(exp_name, local_dir, absolute_local_dir)
def testRelativePath(self):
local_dir = "test_relative_local_dir"
exp_name = self.prefix + "RelativePath"
absolute_local_dir = os.path.abspath(local_dir)
self.absolute_local_dir = absolute_local_dir
self.assertFalse(os.path.exists(absolute_local_dir))
self._train(exp_name, local_dir, absolute_local_dir)
self._restore(exp_name, local_dir, absolute_local_dir)
def testTildeAbsolutePath(self):
local_dir = "~/test_tilde_absolute_local_dir"
exp_name = self.prefix + "TildeAbsolutePath"
absolute_local_dir = os.path.abspath(os.path.expanduser(local_dir))
self.absolute_local_dir = absolute_local_dir
self.assertFalse(os.path.exists(absolute_local_dir))
self._train(exp_name, local_dir, absolute_local_dir)
self._restore(exp_name, local_dir, absolute_local_dir)
def testTempfile(self):
local_dir = tempfile.mkdtemp()
exp_name = self.prefix + "Tempfile"
self.absolute_local_dir = local_dir
self._train(exp_name, local_dir, local_dir)
self._restore(exp_name, local_dir, local_dir)
def testCheckpointWithNoop(self):
"""Tests that passing the checkpoint_dir right back works."""
class MockTrainable(Trainable):
def setup(self, config):
pass
def step(self):
return {"score": 1}
def save_checkpoint(self, checkpoint_dir):
with open(os.path.join(checkpoint_dir, "test.txt"), "wb") as f:
pickle.dump("test", f)
return checkpoint_dir
def load_checkpoint(self, checkpoint_dir):
with open(os.path.join(checkpoint_dir, "test.txt"), "rb") as f:
x = pickle.load(f)
assert x == "test"
return checkpoint_dir
validate_save_restore(MockTrainable)
validate_save_restore(MockTrainable, use_object_store=True)
if __name__ == "__main__":
import pytest
import sys
sys.exit(pytest.main(["-v", __file__]))
| [
"os.path.expanduser",
"os.path.exists",
"os.listdir",
"pickle.dump",
"ray.shutdown",
"os.path.join",
"pickle.load",
"pytest.main",
"os.path.isfile",
"os.path.isdir",
"tempfile.mkdtemp",
"ray.tune.utils.validate_save_restore",
"shutil.rmtree",
"os.path.abspath",
"ray.init",
"ray.tune.run",
"ray.rllib._register_all"
] | [((1131, 1191), 'ray.init', 'ray.init', ([], {'num_cpus': '(1)', 'num_gpus': '(0)', 'local_mode': 'self.local_mode'}), '(num_cpus=1, num_gpus=0, local_mode=self.local_mode)\n', (1139, 1191), False, 'import ray\n'), ((1387, 1401), 'ray.shutdown', 'ray.shutdown', ([], {}), '()\n', (1399, 1401), False, 'import ray\n'), ((1481, 1496), 'ray.rllib._register_all', '_register_all', ([], {}), '()\n', (1494, 1496), False, 'from ray.rllib import _register_all\n'), ((2024, 2069), 'os.path.join', 'os.path.join', (['absoulte_exp_dir', 'trial_dirname'], {}), '(absoulte_exp_dir, trial_dirname)\n', (2036, 2069), False, 'import os\n'), ((2481, 2523), 'os.path.join', 'os.path.join', (['absolute_local_dir', 'exp_name'], {}), '(absolute_local_dir, exp_name)\n', (2493, 2523), False, 'import os\n'), ((3282, 3361), 'os.path.join', 'os.path.join', (['local_dir', 'exp_name', 'trial_name', '"""checkpoint_000001/checkpoint-1"""'], {}), "(local_dir, exp_name, trial_name, 'checkpoint_000001/checkpoint-1')\n", (3294, 3361), False, 'import os\n'), ((4210, 4236), 'os.path.abspath', 'os.path.abspath', (['local_dir'], {}), '(local_dir)\n', (4225, 4236), False, 'import os\n'), ((4631, 4657), 'os.path.abspath', 'os.path.abspath', (['local_dir'], {}), '(local_dir)\n', (4646, 4657), False, 'import os\n'), ((5404, 5422), 'tempfile.mkdtemp', 'tempfile.mkdtemp', ([], {}), '()\n', (5420, 5422), False, 'import tempfile\n'), ((6365, 6401), 'ray.tune.utils.validate_save_restore', 'validate_save_restore', (['MockTrainable'], {}), '(MockTrainable)\n', (6386, 6401), False, 'from ray.tune.utils import validate_save_restore\n'), ((6410, 6469), 'ray.tune.utils.validate_save_restore', 'validate_save_restore', (['MockTrainable'], {'use_object_store': '(True)'}), '(MockTrainable, use_object_store=True)\n', (6431, 6469), False, 'from ray.tune.utils import validate_save_restore\n'), ((6546, 6575), 'pytest.main', 'pytest.main', (["['-v', __file__]"], {}), "(['-v', __file__])\n", (6557, 6575), False, 'import pytest\n'), ((1277, 1335), 'shutil.rmtree', 'shutil.rmtree', (['self.absolute_local_dir'], {'ignore_errors': '(True)'}), '(self.absolute_local_dir, ignore_errors=True)\n', (1290, 1335), False, 'import shutil\n'), ((1621, 1649), 'os.listdir', 'os.listdir', (['absoulte_exp_dir'], {}), '(absoulte_exp_dir)\n', (1631, 1649), False, 'import os\n'), ((2203, 2379), 'ray.tune.run', 'tune.run', (['self.MockTrainable'], {'name': 'exp_name', 'stop': "{'training_iteration': 1}", 'checkpoint_freq': '(1)', 'local_dir': 'local_dir', 'config': "{'env': 'CartPole-v0', 'log_level': 'DEBUG'}"}), "(self.MockTrainable, name=exp_name, stop={'training_iteration': 1},\n checkpoint_freq=1, local_dir=local_dir, config={'env': 'CartPole-v0',\n 'log_level': 'DEBUG'})\n", (2211, 2379), False, 'from ray import tune\n'), ((2755, 2788), 'os.path.isdir', 'os.path.isdir', (['absolute_local_dir'], {}), '(absolute_local_dir)\n', (2768, 2788), False, 'import os\n'), ((2834, 2856), 'os.path.isdir', 'os.path.isdir', (['exp_dir'], {}), '(exp_dir)\n', (2847, 2856), False, 'import os\n'), ((2882, 2910), 'os.path.isdir', 'os.path.isdir', (['abs_trial_dir'], {}), '(abs_trial_dir)\n', (2895, 2910), False, 'import os\n'), ((3202, 3244), 'os.path.join', 'os.path.join', (['absolute_local_dir', 'exp_name'], {}), '(absolute_local_dir, exp_name)\n', (3214, 3244), False, 'import os\n'), ((3520, 3555), 'os.path.expanduser', 'os.path.expanduser', (['checkpoint_path'], {}), '(checkpoint_path)\n', (3538, 3555), False, 'import os\n'), ((3594, 3624), 'os.path.isfile', 'os.path.isfile', (['tune_find_file'], {}), '(tune_find_file)\n', (3608, 3624), False, 'import os\n'), ((3698, 3859), 'ray.tune.run', 'tune.run', (['self.MockTrainable'], {'name': 'exp_name', 'stop': "{'training_iteration': 2}", 'restore': 'checkpoint_path', 'config': "{'env': 'CartPole-v0', 'log_level': 'DEBUG'}"}), "(self.MockTrainable, name=exp_name, stop={'training_iteration': 2},\n restore=checkpoint_path, config={'env': 'CartPole-v0', 'log_level':\n 'DEBUG'})\n", (3706, 3859), False, 'from ray import tune\n'), ((4315, 4349), 'os.path.exists', 'os.path.exists', (['absolute_local_dir'], {}), '(absolute_local_dir)\n', (4329, 4349), False, 'import os\n'), ((4736, 4770), 'os.path.exists', 'os.path.exists', (['absolute_local_dir'], {}), '(absolute_local_dir)\n', (4750, 4770), False, 'import os\n'), ((5086, 5115), 'os.path.expanduser', 'os.path.expanduser', (['local_dir'], {}), '(local_dir)\n', (5104, 5115), False, 'import os\n'), ((5195, 5229), 'os.path.exists', 'os.path.exists', (['absolute_local_dir'], {}), '(absolute_local_dir)\n', (5209, 5229), False, 'import os\n'), ((810, 836), 'pickle.dump', 'pickle.dump', (['self.state', 'f'], {}), '(self.state, f)\n', (821, 836), False, 'import pickle\n'), ((1005, 1019), 'pickle.load', 'pickle.load', (['f'], {}), '(f)\n', (1016, 1019), False, 'import pickle\n'), ((2981, 3042), 'os.path.join', 'os.path.join', (['abs_trial_dir', '"""checkpoint_000001/checkpoint-1"""'], {}), "(abs_trial_dir, 'checkpoint_000001/checkpoint-1')\n", (2993, 3042), False, 'import os\n'), ((1754, 1782), 'os.listdir', 'os.listdir', (['absoulte_exp_dir'], {}), '(absoulte_exp_dir)\n', (1764, 1782), False, 'import os\n'), ((6046, 6068), 'pickle.dump', 'pickle.dump', (['"""test"""', 'f'], {}), "('test', f)\n", (6057, 6068), False, 'import pickle\n'), ((6267, 6281), 'pickle.load', 'pickle.load', (['f'], {}), '(f)\n', (6278, 6281), False, 'import pickle\n'), ((5972, 6012), 'os.path.join', 'os.path.join', (['checkpoint_dir', '"""test.txt"""'], {}), "(checkpoint_dir, 'test.txt')\n", (5984, 6012), False, 'import os\n'), ((6189, 6229), 'os.path.join', 'os.path.join', (['checkpoint_dir', '"""test.txt"""'], {}), "(checkpoint_dir, 'test.txt')\n", (6201, 6229), False, 'import os\n'), ((1838, 1879), 'os.path.join', 'os.path.join', (['absoulte_exp_dir', 'child_dir'], {}), '(absoulte_exp_dir, child_dir)\n', (1850, 1879), False, 'import os\n')] |
import numpy as np
import pytest
import theano
import theano.tensor as tt
# Don't import test classes otherwise they get tested as part of the file
from tests import unittest_tools as utt
from tests.gpuarray.config import mode_with_gpu, mode_without_gpu, test_ctx_name
from tests.tensor.test_basic import (
TestAlloc,
TestComparison,
TestJoinAndSplit,
TestReshape,
)
from tests.tensor.utils import rand, safe_make_node
from theano.gpuarray.basic_ops import (
GpuAlloc,
GpuAllocEmpty,
GpuContiguous,
GpuEye,
GpuFromHost,
GpuJoin,
GpuReshape,
GpuSplit,
GpuToGpu,
GpuTri,
HostFromGpu,
gpu_contiguous,
gpu_join,
host_from_gpu,
)
from theano.gpuarray.elemwise import GpuDimShuffle, GpuElemwise
from theano.gpuarray.subtensor import GpuSubtensor
from theano.gpuarray.type import GpuArrayType, get_context, gpuarray_shared_constructor
from theano.tensor import TensorType
from theano.tensor.basic import alloc
pygpu = pytest.importorskip("pygpu")
gpuarray = pygpu.gpuarray
utt.seed_rng()
rng = np.random.RandomState(seed=utt.fetch_seed())
def inplace_func(
inputs,
outputs,
mode=None,
allow_input_downcast=False,
on_unused_input="raise",
name=None,
):
if mode is None:
mode = mode_with_gpu
return theano.function(
inputs,
outputs,
mode=mode,
allow_input_downcast=allow_input_downcast,
accept_inplace=True,
on_unused_input=on_unused_input,
name=name,
)
def fake_shared(value, name=None, strict=False, allow_downcast=None, **kwargs):
from theano.tensor.sharedvar import scalar_constructor, tensor_constructor
for c in (gpuarray_shared_constructor, tensor_constructor, scalar_constructor):
try:
return c(
value, name=name, strict=strict, allow_downcast=allow_downcast, **kwargs
)
except TypeError:
continue
def rand_gpuarray(*shape, **kwargs):
r = rng.rand(*shape) * 2 - 1
dtype = kwargs.pop("dtype", theano.config.floatX)
cls = kwargs.pop("cls", None)
if len(kwargs) != 0:
raise TypeError("Unexpected argument %s", list(kwargs.keys())[0])
return gpuarray.array(r, dtype=dtype, cls=cls, context=get_context(test_ctx_name))
def makeTester(
name,
op,
gpu_op,
cases,
checks=None,
mode_gpu=mode_with_gpu,
mode_nogpu=mode_without_gpu,
skip=False,
eps=1e-10,
):
if checks is None:
checks = {}
_op = op
_gpu_op = gpu_op
_cases = cases
_skip = skip
_checks = checks
class Checker(utt.OptimizationTestMixin):
op = staticmethod(_op)
gpu_op = staticmethod(_gpu_op)
cases = _cases
skip = _skip
checks = _checks
def setup_method(self):
eval(self.__class__.__module__ + "." + self.__class__.__name__)
def test_all(self):
if skip:
pytest.skip(skip)
for testname, inputs in cases.items():
for _ in range(len(inputs)):
if type(inputs[_]) is float:
inputs[_] = np.asarray(inputs[_], dtype=theano.config.floatX)
self.run_case(testname, inputs)
def run_case(self, testname, inputs):
inputs_ref = [theano.shared(inp) for inp in inputs]
inputs_tst = [theano.shared(inp) for inp in inputs]
try:
node_ref = safe_make_node(self.op, *inputs_ref)
node_tst = safe_make_node(self.op, *inputs_tst)
except Exception as exc:
err_msg = (
"Test %s::%s: Error occurred while making " "a node with inputs %s"
) % (self.gpu_op, testname, inputs)
exc.args += (err_msg,)
raise
try:
f_ref = inplace_func([], node_ref.outputs, mode=mode_nogpu)
f_tst = inplace_func([], node_tst.outputs, mode=mode_gpu)
except Exception as exc:
err_msg = (
"Test %s::%s: Error occurred while trying to " "make a Function"
) % (self.gpu_op, testname)
exc.args += (err_msg,)
raise
self.assertFunctionContains1(f_tst, self.gpu_op)
ref_e = None
try:
expecteds = f_ref()
except Exception as exc:
ref_e = exc
try:
variables = f_tst()
except Exception as exc:
if ref_e is None:
err_msg = (
"Test %s::%s: exception when calling the " "Function"
) % (self.gpu_op, testname)
exc.args += (err_msg,)
raise
else:
# if we raised an exception of the same type we're good.
if isinstance(exc, type(ref_e)):
return
else:
err_msg = (
"Test %s::%s: exception raised during test "
"call was not the same as the reference "
"call (got: %s, expected %s)"
% (self.gpu_op, testname, type(exc), type(ref_e))
)
exc.args += (err_msg,)
raise
for i, (variable, expected) in enumerate(zip(variables, expecteds)):
condition = (
variable.dtype != expected.dtype
or variable.shape != expected.shape
or not TensorType.values_eq_approx(variable, expected)
)
assert not condition, (
"Test %s::%s: Output %s gave the wrong "
"value. With inputs %s, expected %s "
"(dtype %s), got %s (dtype %s)."
% (
self.op,
testname,
i,
inputs,
expected,
expected.dtype,
variable,
variable.dtype,
)
)
for description, check in self.checks.items():
assert check(inputs, variables), (
"Test %s::%s: Failed check: %s " "(inputs were %s, ouputs were %s)"
) % (self.op, testname, description, inputs, variables)
Checker.__name__ = name
if hasattr(Checker, "__qualname__"):
Checker.__qualname__ = name
return Checker
def test_transfer_cpu_gpu():
a = tt.fmatrix("a")
g = GpuArrayType(dtype="float32", broadcastable=(False, False))("g")
av = np.asarray(rng.rand(5, 4), dtype="float32")
gv = gpuarray.array(av, context=get_context(test_ctx_name))
f = theano.function([a], GpuFromHost(test_ctx_name)(a))
fv = f(av)
assert GpuArrayType.values_eq(fv, gv)
f = theano.function([g], host_from_gpu(g))
fv = f(gv)
assert np.all(fv == av)
def test_transfer_gpu_gpu():
g = GpuArrayType(
dtype="float32", broadcastable=(False, False), context_name=test_ctx_name
)()
av = np.asarray(rng.rand(5, 4), dtype="float32")
gv = gpuarray.array(av, context=get_context(test_ctx_name))
mode = mode_with_gpu.excluding(
"cut_gpua_host_transfers", "local_cut_gpua_host_gpua"
)
f = theano.function([g], GpuToGpu(test_ctx_name)(g), mode=mode)
topo = f.maker.fgraph.toposort()
assert len(topo) == 1
assert isinstance(topo[0].op, GpuToGpu)
fv = f(gv)
assert GpuArrayType.values_eq(fv, gv)
def test_transfer_strided():
# This is just to ensure that it works in theano
# libgpuarray has a much more comprehensive suit of tests to
# ensure correctness
a = tt.fmatrix("a")
g = GpuArrayType(dtype="float32", broadcastable=(False, False))("g")
av = np.asarray(rng.rand(5, 8), dtype="float32")
gv = gpuarray.array(av, context=get_context(test_ctx_name))
av = av[:, ::2]
gv = gv[:, ::2]
f = theano.function([a], GpuFromHost(test_ctx_name)(a))
fv = f(av)
assert GpuArrayType.values_eq(fv, gv)
f = theano.function([g], host_from_gpu(g))
fv = f(gv)
assert np.all(fv == av)
def gpu_alloc_expected(x, *shp):
g = gpuarray.empty(shp, dtype=x.dtype, context=get_context(test_ctx_name))
g[:] = x
return g
TestGpuAlloc = makeTester(
name="GpuAllocTester",
# The +1 is there to allow the lift to the GPU.
op=lambda *args: alloc(*args) + 1,
gpu_op=GpuAlloc(test_ctx_name),
cases=dict(
correct01=(rand(), np.int32(7)),
# just gives a DeepCopyOp with possibly wrong results on the CPU
# correct01_bcast=(rand(1), np.int32(7)),
correct02=(rand(), np.int32(4), np.int32(7)),
correct12=(rand(7), np.int32(4), np.int32(7)),
correct13=(rand(7), np.int32(2), np.int32(4), np.int32(7)),
correct23=(rand(4, 7), np.int32(2), np.int32(4), np.int32(7)),
bad_shape12=(rand(7), np.int32(7), np.int32(5)),
),
)
class TestGPUAlloc(TestAlloc):
dtype = "float32"
mode = mode_with_gpu
shared = staticmethod(gpuarray_shared_constructor)
allocs = [GpuAlloc(test_ctx_name), GpuAlloc(test_ctx_name), tt.Alloc()]
def test_alloc_empty():
for dt in ["float32", "int8"]:
f = theano.function([], GpuAllocEmpty(dt, context_name=test_ctx_name)(2, 3))
assert len(f.maker.fgraph.apply_nodes) == 1
out = f()
assert out.shape == (2, 3)
assert out.dtype == dt
f = theano.function(
[],
[
GpuAllocEmpty("uint64", test_ctx_name)(3, 2),
GpuAllocEmpty("uint64", test_ctx_name)(3, 2),
],
)
out = f()
assert out[0].shape == (3, 2)
assert out[0].dtype == "uint64"
assert out[1].shape == (3, 2)
assert out[1].dtype == "uint64"
assert (
len(
[
node
for node in f.maker.fgraph.apply_nodes
if isinstance(node.op, GpuAllocEmpty)
]
)
== 1
)
def test_shape():
x = GpuArrayType(dtype="float32", broadcastable=[False, False, False])()
v = gpuarray.zeros((3, 4, 5), dtype="float32", context=get_context(test_ctx_name))
f = theano.function([x], x.shape)
topo = f.maker.fgraph.toposort()
assert np.all(f(v) == (3, 4, 5))
if theano.config.mode != "FAST_COMPILE":
assert len(topo) == 4
assert isinstance(topo[0].op, tt.opt.Shape_i)
assert isinstance(topo[1].op, tt.opt.Shape_i)
assert isinstance(topo[2].op, tt.opt.Shape_i)
assert isinstance(topo[3].op, tt.opt.MakeVector)
mode = mode_with_gpu.excluding("local_shape_to_shape_i")
f = theano.function([x], x.shape, mode=mode)
topo = f.maker.fgraph.toposort()
assert np.all(f(v) == (3, 4, 5))
assert len(topo) == 1
assert isinstance(topo[0].op, tt.Shape)
def test_gpu_contiguous():
a = tt.fmatrix("a")
i = tt.iscalar("i")
a_val = np.asarray(np.random.rand(4, 5), dtype="float32")
# The reshape is needed otherwise we make the subtensor on the CPU
# to transfer less data.
f = theano.function(
[a, i], gpu_contiguous(a.reshape((5, 4))[::i]), mode=mode_with_gpu
)
topo = f.maker.fgraph.toposort()
assert any([isinstance(node.op, GpuSubtensor) for node in topo])
assert any([isinstance(node.op, GpuContiguous) for node in topo])
assert f(a_val, 1).flags.c_contiguous
assert f(a_val, 2).flags.c_contiguous
assert f(a_val, 2).flags.c_contiguous
class TestGPUReshape(TestReshape):
def setup_method(self):
self.shared = gpuarray_shared_constructor
self.op = GpuReshape
self.mode = mode_with_gpu
self.ignore_topo = (
HostFromGpu,
GpuFromHost,
theano.compile.DeepCopyOp,
GpuDimShuffle,
GpuElemwise,
tt.opt.Shape_i,
tt.opt.MakeVector,
)
assert self.op == GpuReshape
class TestGPUComparison(TestComparison):
def setup_method(self):
utt.seed_rng()
self.mode = mode_with_gpu
self.shared = gpuarray_shared_constructor
self.dtypes = ["float64", "float32"]
class TestGPUJoinAndSplit(TestJoinAndSplit):
def setup_method(self):
self.mode = mode_with_gpu.excluding("constant_folding")
self.join_op = GpuJoin()
self.split_op_class = GpuSplit
# Use join instead of MakeVector since there is no MakeVector on GPU
self.make_vector_op = GpuJoin()
# this is to avoid errors with limited devices
self.floatX = "float32"
self.hide_error = theano.config.mode not in ["DebugMode", "DEBUG_MODE"]
def shared(x, **kwargs):
return gpuarray_shared_constructor(x, target=test_ctx_name, **kwargs)
self.shared = shared
def test_gpusplit_opt(self):
# Test that we move the node to the GPU
# Also test float16 computation at the same time.
rng = np.random.RandomState(seed=utt.fetch_seed())
m = self.shared(rng.rand(4, 6).astype("float16"))
o = tt.Split(2)(m, 0, [2, 2])
assert o[0].dtype == "float16"
f = theano.function([], o, mode=self.mode)
assert any(
[
isinstance(node.op, self.split_op_class)
for node in f.maker.fgraph.toposort()
]
)
o1, o2 = f()
assert np.allclose(o1, m.get_value(borrow=True)[:2])
assert np.allclose(o2, m.get_value(borrow=True)[2:])
def test_gpujoin_gpualloc():
a = tt.fmatrix("a")
a_val = np.asarray(np.random.rand(4, 5), dtype="float32")
b = tt.fmatrix("b")
b_val = np.asarray(np.random.rand(3, 5), dtype="float32")
f = theano.function(
[a, b], tt.join(0, tt.zeros_like(a), tt.ones_like(b)) + 4, mode=mode_without_gpu
)
f_gpu = theano.function(
[a, b], tt.join(0, tt.zeros_like(a), tt.ones_like(b)), mode=mode_with_gpu
)
f_gpu2 = theano.function(
[a, b], tt.join(0, tt.zeros_like(a), tt.ones_like(b)) + 4, mode=mode_with_gpu
)
assert sum([node.op == tt.alloc for node in f.maker.fgraph.toposort()]) == 2
assert sum([node.op == tt.join_ for node in f.maker.fgraph.toposort()]) == 1
assert (
sum([isinstance(node.op, GpuAlloc) for node in f_gpu.maker.fgraph.toposort()])
== 2
)
assert sum([node.op == gpu_join for node in f_gpu.maker.fgraph.toposort()]) == 1
assert (
sum([isinstance(node.op, GpuAlloc) for node in f_gpu2.maker.fgraph.toposort()])
== 2
)
assert sum([node.op == gpu_join for node in f_gpu2.maker.fgraph.toposort()]) == 1
assert np.allclose(f(a_val, b_val), f_gpu2(a_val, b_val))
def test_gpueye():
def check(dtype, N, M_=None, k=0):
# Theano does not accept None as a tensor.
# So we must use a real value.
M = M_
# Currently DebugMode does not support None as inputs even if this is
# allowed.
if M is None:
M = N
N_symb = tt.iscalar()
M_symb = tt.iscalar()
k_symb = tt.iscalar()
out = tt.eye(N_symb, M_symb, k_symb, dtype=dtype) + np.array(1).astype(dtype)
f = theano.function([N_symb, M_symb, k_symb], out, mode=mode_with_gpu)
result = np.asarray(f(N, M, k)) - np.array(1).astype(dtype)
assert np.allclose(result, np.eye(N, M_, k, dtype=dtype))
assert result.dtype == np.dtype(dtype)
assert any([isinstance(node.op, GpuEye) for node in f.maker.fgraph.toposort()])
for dtype in ["float32", "int32", "float16"]:
check(dtype, 3)
# M != N, k = 0
check(dtype, 3, 5)
check(dtype, 5, 3)
# N == M, k != 0
check(dtype, 3, 3, 1)
check(dtype, 3, 3, -1)
# N < M, k != 0
check(dtype, 3, 5, 1)
check(dtype, 3, 5, -1)
# N > M, k != 0
check(dtype, 5, 3, 1)
check(dtype, 5, 3, -1)
# k > M, -k > N, k > M, k > N
check(dtype, 5, 3, 3)
check(dtype, 3, 5, 3)
check(dtype, 5, 3, -3)
check(dtype, 3, 5, -3)
check(dtype, 5, 3, 6)
check(dtype, 3, 5, -6)
def test_hostfromgpu_shape_i():
# Test that the shape is lifted over hostfromgpu
m = mode_with_gpu.including(
"local_dot_to_dot22", "local_dot22_to_dot22scalar", "specialize"
)
a = tt.fmatrix("a")
ca = theano.gpuarray.type.GpuArrayType("float32", (False, False))()
av = np.asarray(np.random.rand(5, 4), dtype="float32")
cv = gpuarray.asarray(
np.random.rand(5, 4), dtype="float32", context=get_context(test_ctx_name)
)
f = theano.function([a], GpuFromHost(test_ctx_name)(a), mode=m)
assert any(isinstance(x.op, GpuFromHost) for x in f.maker.fgraph.toposort())
f = theano.function([a], GpuFromHost(test_ctx_name)(a).shape, mode=m)
topo = f.maker.fgraph.toposort()
assert isinstance(topo[0].op, tt.opt.Shape_i)
assert isinstance(topo[1].op, tt.opt.Shape_i)
assert isinstance(topo[2].op, tt.opt.MakeVector)
assert tuple(f(av)) == (5, 4)
f = theano.function([ca], host_from_gpu(ca), mode=m)
assert host_from_gpu in [x.op for x in f.maker.fgraph.toposort()]
f = theano.function([ca], host_from_gpu(ca).shape, mode=m)
topo = f.maker.fgraph.toposort()
assert isinstance(topo[0].op, theano.compile.Shape_i)
assert isinstance(topo[1].op, theano.compile.Shape_i)
assert isinstance(topo[2].op, tt.opt.MakeVector)
assert tuple(f(cv)) == (5, 4)
def test_Gpujoin_inplace():
# Test Gpujoin to work inplace.
#
# This function tests the case when several elements are passed to the
# Gpujoin function but all except one of them are empty. In this case
# Gpujoin should work inplace and the output should be the view of the
# non-empty element.
s = tt.lscalar()
data = np.array([3, 4, 5], dtype=theano.config.floatX)
x = gpuarray_shared_constructor(data, borrow=True)
z = tt.zeros((s,))
join = GpuJoin(view=0)
c = join(0, x, z)
f = theano.function([s], theano.Out(c, borrow=True))
if not isinstance(mode_with_gpu, theano.compile.DebugMode):
assert x.get_value(borrow=True, return_internal_type=True) is f(0)
assert np.allclose(f(0), [3, 4, 5])
def test_gpu_tril_triu():
def check_l(m, k=0):
m_symb = tt.matrix(dtype=m.dtype)
k_symb = tt.iscalar()
f = theano.function(
[m_symb, k_symb], tt.tril(m_symb, k_symb), mode=mode_with_gpu
)
result = f(m, k)
assert np.allclose(result, np.tril(m, k))
assert result.dtype == np.dtype(dtype)
assert any([isinstance(node.op, GpuTri) for node in f.maker.fgraph.toposort()])
def check_u(m, k=0):
m_symb = tt.matrix(dtype=m.dtype)
k_symb = tt.iscalar()
f = theano.function(
[m_symb, k_symb], tt.triu(m_symb, k_symb), mode=mode_with_gpu
)
result = f(m, k)
assert np.allclose(result, np.triu(m, k))
assert result.dtype == np.dtype(dtype)
assert any([isinstance(node.op, GpuTri) for node in f.maker.fgraph.toposort()])
utt.seed_rng()
test_rng = np.random.RandomState(seed=utt.fetch_seed())
for dtype in ["float64", "float32", "float16"]:
# try a big one
m = np.asarray(test_rng.rand(5000, 5000) * 2 - 1, dtype=dtype)
check_l(m, 0)
check_l(m, 1)
check_l(m, -1)
check_u(m, 0)
check_u(m, 1)
check_u(m, -1)
m = np.asarray(test_rng.rand(10, 10) * 2 - 1, dtype=dtype)
check_l(m, 0)
check_l(m, 1)
check_l(m, -1)
check_u(m, 0)
check_u(m, 1)
check_u(m, -1)
m = np.asarray(test_rng.rand(10, 5) * 2 - 1, dtype=dtype)
check_l(m, 0)
check_l(m, 1)
check_l(m, -1)
check_u(m, 0)
check_u(m, 1)
check_u(m, -1)
def test_gputri():
def check(dtype, N, M_=None, k=0):
# Theano does not accept None as a tensor.
# So we must use a real value.
M = M_
# Currently DebugMode does not support None as inputs even if this is
# allowed.
if M is None:
M = N
N_symb = tt.iscalar()
M_symb = tt.iscalar()
k_symb = tt.iscalar()
out = tt.tri(N_symb, M_symb, k_symb, dtype=dtype) + np.array(1).astype(dtype)
f = theano.function([N_symb, M_symb, k_symb], out, mode=mode_with_gpu)
result = np.asarray(f(N, M, k)) - np.array(1).astype(dtype)
assert np.allclose(result, np.tri(N, M_, k, dtype=dtype))
assert result.dtype == np.dtype(dtype)
assert any([isinstance(node.op, GpuTri) for node in f.maker.fgraph.toposort()])
for dtype in ["float64", "float32", "int32", "float16"]:
# try a big one
check(dtype, 1000, 1000, 0)
check(dtype, 1000, 1000, -400)
check(dtype, 1000, 1000, 400)
check(dtype, 5)
# M != N, k = 0
check(dtype, 3, 5)
check(dtype, 5, 3)
# N == M, k != 0
check(dtype, 3, 3, 1)
check(dtype, 3, 3, -1)
# N < M, k != 0
check(dtype, 3, 5, 1)
check(dtype, 3, 5, -1)
# N > M, k != 0
check(dtype, 5, 3, 1)
check(dtype, 5, 3, -1)
# k > M, -k > N, k > M, k > N
check(dtype, 5, 3, 3)
check(dtype, 3, 5, 3)
check(dtype, 5, 3, -3)
check(dtype, 3, 5, -3)
check(dtype, 5, 3, 6)
check(dtype, 3, 5, -6)
| [
"tests.unittest_tools.seed_rng",
"theano.tensor.iscalar",
"theano.tensor.lscalar",
"numpy.random.rand",
"tests.gpuarray.config.mode_with_gpu.excluding",
"numpy.int32",
"theano.tensor.zeros_like",
"numpy.array",
"theano.gpuarray.type.gpuarray_shared_constructor",
"tests.unittest_tools.fetch_seed",
"theano.tensor.basic.alloc",
"theano.gpuarray.basic_ops.GpuAllocEmpty",
"theano.shared",
"theano.function",
"theano.gpuarray.basic_ops.GpuJoin",
"theano.gpuarray.type.GpuArrayType",
"numpy.asarray",
"theano.gpuarray.basic_ops.GpuToGpu",
"theano.tensor.zeros",
"theano.tensor.Split",
"theano.tensor.triu",
"numpy.dtype",
"numpy.triu",
"pytest.skip",
"theano.gpuarray.basic_ops.GpuFromHost",
"numpy.tri",
"theano.tensor.TensorType.values_eq_approx",
"numpy.eye",
"theano.tensor.tri",
"theano.gpuarray.basic_ops.host_from_gpu",
"theano.tensor.fmatrix",
"theano.gpuarray.basic_ops.GpuAlloc",
"theano.tensor.ones_like",
"theano.gpuarray.type.get_context",
"theano.tensor.tril",
"theano.tensor.matrix",
"tests.gpuarray.config.mode_with_gpu.including",
"tests.tensor.utils.safe_make_node",
"pytest.importorskip",
"theano.Out",
"theano.tensor.eye",
"numpy.tril",
"tests.tensor.utils.rand",
"numpy.all",
"theano.gpuarray.type.GpuArrayType.values_eq",
"theano.tensor.Alloc"
] | [((987, 1015), 'pytest.importorskip', 'pytest.importorskip', (['"""pygpu"""'], {}), "('pygpu')\n", (1006, 1015), False, 'import pytest\n'), ((1043, 1057), 'tests.unittest_tools.seed_rng', 'utt.seed_rng', ([], {}), '()\n', (1055, 1057), True, 'from tests import unittest_tools as utt\n'), ((1309, 1470), 'theano.function', 'theano.function', (['inputs', 'outputs'], {'mode': 'mode', 'allow_input_downcast': 'allow_input_downcast', 'accept_inplace': '(True)', 'on_unused_input': 'on_unused_input', 'name': 'name'}), '(inputs, outputs, mode=mode, allow_input_downcast=\n allow_input_downcast, accept_inplace=True, on_unused_input=\n on_unused_input, name=name)\n', (1324, 1470), False, 'import theano\n'), ((6760, 6775), 'theano.tensor.fmatrix', 'tt.fmatrix', (['"""a"""'], {}), "('a')\n", (6770, 6775), True, 'import theano.tensor as tt\n'), ((7054, 7084), 'theano.gpuarray.type.GpuArrayType.values_eq', 'GpuArrayType.values_eq', (['fv', 'gv'], {}), '(fv, gv)\n', (7076, 7084), False, 'from theano.gpuarray.type import GpuArrayType, get_context, gpuarray_shared_constructor\n'), ((7159, 7175), 'numpy.all', 'np.all', (['(fv == av)'], {}), '(fv == av)\n', (7165, 7175), True, 'import numpy as np\n'), ((7448, 7526), 'tests.gpuarray.config.mode_with_gpu.excluding', 'mode_with_gpu.excluding', (['"""cut_gpua_host_transfers"""', '"""local_cut_gpua_host_gpua"""'], {}), "('cut_gpua_host_transfers', 'local_cut_gpua_host_gpua')\n", (7471, 7526), False, 'from tests.gpuarray.config import mode_with_gpu, mode_without_gpu, test_ctx_name\n'), ((7742, 7772), 'theano.gpuarray.type.GpuArrayType.values_eq', 'GpuArrayType.values_eq', (['fv', 'gv'], {}), '(fv, gv)\n', (7764, 7772), False, 'from theano.gpuarray.type import GpuArrayType, get_context, gpuarray_shared_constructor\n'), ((7955, 7970), 'theano.tensor.fmatrix', 'tt.fmatrix', (['"""a"""'], {}), "('a')\n", (7965, 7970), True, 'import theano.tensor as tt\n'), ((8290, 8320), 'theano.gpuarray.type.GpuArrayType.values_eq', 'GpuArrayType.values_eq', (['fv', 'gv'], {}), '(fv, gv)\n', (8312, 8320), False, 'from theano.gpuarray.type import GpuArrayType, get_context, gpuarray_shared_constructor\n'), ((8395, 8411), 'numpy.all', 'np.all', (['(fv == av)'], {}), '(fv == av)\n', (8401, 8411), True, 'import numpy as np\n'), ((10462, 10491), 'theano.function', 'theano.function', (['[x]', 'x.shape'], {}), '([x], x.shape)\n', (10477, 10491), False, 'import theano\n'), ((10871, 10920), 'tests.gpuarray.config.mode_with_gpu.excluding', 'mode_with_gpu.excluding', (['"""local_shape_to_shape_i"""'], {}), "('local_shape_to_shape_i')\n", (10894, 10920), False, 'from tests.gpuarray.config import mode_with_gpu, mode_without_gpu, test_ctx_name\n'), ((10929, 10969), 'theano.function', 'theano.function', (['[x]', 'x.shape'], {'mode': 'mode'}), '([x], x.shape, mode=mode)\n', (10944, 10969), False, 'import theano\n'), ((11151, 11166), 'theano.tensor.fmatrix', 'tt.fmatrix', (['"""a"""'], {}), "('a')\n", (11161, 11166), True, 'import theano.tensor as tt\n'), ((11175, 11190), 'theano.tensor.iscalar', 'tt.iscalar', (['"""i"""'], {}), "('i')\n", (11185, 11190), True, 'import theano.tensor as tt\n'), ((13815, 13830), 'theano.tensor.fmatrix', 'tt.fmatrix', (['"""a"""'], {}), "('a')\n", (13825, 13830), True, 'import theano.tensor as tt\n'), ((13901, 13916), 'theano.tensor.fmatrix', 'tt.fmatrix', (['"""b"""'], {}), "('b')\n", (13911, 13916), True, 'import theano.tensor as tt\n'), ((16526, 16619), 'tests.gpuarray.config.mode_with_gpu.including', 'mode_with_gpu.including', (['"""local_dot_to_dot22"""', '"""local_dot22_to_dot22scalar"""', '"""specialize"""'], {}), "('local_dot_to_dot22', 'local_dot22_to_dot22scalar',\n 'specialize')\n", (16549, 16619), False, 'from tests.gpuarray.config import mode_with_gpu, mode_without_gpu, test_ctx_name\n'), ((16638, 16653), 'theano.tensor.fmatrix', 'tt.fmatrix', (['"""a"""'], {}), "('a')\n", (16648, 16653), True, 'import theano.tensor as tt\n'), ((18108, 18120), 'theano.tensor.lscalar', 'tt.lscalar', ([], {}), '()\n', (18118, 18120), True, 'import theano.tensor as tt\n'), ((18132, 18179), 'numpy.array', 'np.array', (['[3, 4, 5]'], {'dtype': 'theano.config.floatX'}), '([3, 4, 5], dtype=theano.config.floatX)\n', (18140, 18179), True, 'import numpy as np\n'), ((18188, 18234), 'theano.gpuarray.type.gpuarray_shared_constructor', 'gpuarray_shared_constructor', (['data'], {'borrow': '(True)'}), '(data, borrow=True)\n', (18215, 18234), False, 'from theano.gpuarray.type import GpuArrayType, get_context, gpuarray_shared_constructor\n'), ((18243, 18257), 'theano.tensor.zeros', 'tt.zeros', (['(s,)'], {}), '((s,))\n', (18251, 18257), True, 'import theano.tensor as tt\n'), ((18270, 18285), 'theano.gpuarray.basic_ops.GpuJoin', 'GpuJoin', ([], {'view': '(0)'}), '(view=0)\n', (18277, 18285), False, 'from theano.gpuarray.basic_ops import GpuAlloc, GpuAllocEmpty, GpuContiguous, GpuEye, GpuFromHost, GpuJoin, GpuReshape, GpuSplit, GpuToGpu, GpuTri, HostFromGpu, gpu_contiguous, gpu_join, host_from_gpu\n'), ((19420, 19434), 'tests.unittest_tools.seed_rng', 'utt.seed_rng', ([], {}), '()\n', (19432, 19434), True, 'from tests import unittest_tools as utt\n'), ((1091, 1107), 'tests.unittest_tools.fetch_seed', 'utt.fetch_seed', ([], {}), '()\n', (1105, 1107), True, 'from tests import unittest_tools as utt\n'), ((6784, 6843), 'theano.gpuarray.type.GpuArrayType', 'GpuArrayType', ([], {'dtype': '"""float32"""', 'broadcastable': '(False, False)'}), "(dtype='float32', broadcastable=(False, False))\n", (6796, 6843), False, 'from theano.gpuarray.type import GpuArrayType, get_context, gpuarray_shared_constructor\n'), ((7115, 7131), 'theano.gpuarray.basic_ops.host_from_gpu', 'host_from_gpu', (['g'], {}), '(g)\n', (7128, 7131), False, 'from theano.gpuarray.basic_ops import GpuAlloc, GpuAllocEmpty, GpuContiguous, GpuEye, GpuFromHost, GpuJoin, GpuReshape, GpuSplit, GpuToGpu, GpuTri, HostFromGpu, gpu_contiguous, gpu_join, host_from_gpu\n'), ((7215, 7307), 'theano.gpuarray.type.GpuArrayType', 'GpuArrayType', ([], {'dtype': '"""float32"""', 'broadcastable': '(False, False)', 'context_name': 'test_ctx_name'}), "(dtype='float32', broadcastable=(False, False), context_name=\n test_ctx_name)\n", (7227, 7307), False, 'from theano.gpuarray.type import GpuArrayType, get_context, gpuarray_shared_constructor\n'), ((7979, 8038), 'theano.gpuarray.type.GpuArrayType', 'GpuArrayType', ([], {'dtype': '"""float32"""', 'broadcastable': '(False, False)'}), "(dtype='float32', broadcastable=(False, False))\n", (7991, 8038), False, 'from theano.gpuarray.type import GpuArrayType, get_context, gpuarray_shared_constructor\n'), ((8351, 8367), 'theano.gpuarray.basic_ops.host_from_gpu', 'host_from_gpu', (['g'], {}), '(g)\n', (8364, 8367), False, 'from theano.gpuarray.basic_ops import GpuAlloc, GpuAllocEmpty, GpuContiguous, GpuEye, GpuFromHost, GpuJoin, GpuReshape, GpuSplit, GpuToGpu, GpuTri, HostFromGpu, gpu_contiguous, gpu_join, host_from_gpu\n'), ((8710, 8733), 'theano.gpuarray.basic_ops.GpuAlloc', 'GpuAlloc', (['test_ctx_name'], {}), '(test_ctx_name)\n', (8718, 8733), False, 'from theano.gpuarray.basic_ops import GpuAlloc, GpuAllocEmpty, GpuContiguous, GpuEye, GpuFromHost, GpuJoin, GpuReshape, GpuSplit, GpuToGpu, GpuTri, HostFromGpu, gpu_contiguous, gpu_join, host_from_gpu\n'), ((9378, 9401), 'theano.gpuarray.basic_ops.GpuAlloc', 'GpuAlloc', (['test_ctx_name'], {}), '(test_ctx_name)\n', (9386, 9401), False, 'from theano.gpuarray.basic_ops import GpuAlloc, GpuAllocEmpty, GpuContiguous, GpuEye, GpuFromHost, GpuJoin, GpuReshape, GpuSplit, GpuToGpu, GpuTri, HostFromGpu, gpu_contiguous, gpu_join, host_from_gpu\n'), ((9403, 9426), 'theano.gpuarray.basic_ops.GpuAlloc', 'GpuAlloc', (['test_ctx_name'], {}), '(test_ctx_name)\n', (9411, 9426), False, 'from theano.gpuarray.basic_ops import GpuAlloc, GpuAllocEmpty, GpuContiguous, GpuEye, GpuFromHost, GpuJoin, GpuReshape, GpuSplit, GpuToGpu, GpuTri, HostFromGpu, gpu_contiguous, gpu_join, host_from_gpu\n'), ((9428, 9438), 'theano.tensor.Alloc', 'tt.Alloc', ([], {}), '()\n', (9436, 9438), True, 'import theano.tensor as tt\n'), ((10298, 10364), 'theano.gpuarray.type.GpuArrayType', 'GpuArrayType', ([], {'dtype': '"""float32"""', 'broadcastable': '[False, False, False]'}), "(dtype='float32', broadcastable=[False, False, False])\n", (10310, 10364), False, 'from theano.gpuarray.type import GpuArrayType, get_context, gpuarray_shared_constructor\n'), ((11214, 11234), 'numpy.random.rand', 'np.random.rand', (['(4)', '(5)'], {}), '(4, 5)\n', (11228, 11234), True, 'import numpy as np\n'), ((12294, 12308), 'tests.unittest_tools.seed_rng', 'utt.seed_rng', ([], {}), '()\n', (12306, 12308), True, 'from tests import unittest_tools as utt\n'), ((12533, 12576), 'tests.gpuarray.config.mode_with_gpu.excluding', 'mode_with_gpu.excluding', (['"""constant_folding"""'], {}), "('constant_folding')\n", (12556, 12576), False, 'from tests.gpuarray.config import mode_with_gpu, mode_without_gpu, test_ctx_name\n'), ((12600, 12609), 'theano.gpuarray.basic_ops.GpuJoin', 'GpuJoin', ([], {}), '()\n', (12607, 12609), False, 'from theano.gpuarray.basic_ops import GpuAlloc, GpuAllocEmpty, GpuContiguous, GpuEye, GpuFromHost, GpuJoin, GpuReshape, GpuSplit, GpuToGpu, GpuTri, HostFromGpu, gpu_contiguous, gpu_join, host_from_gpu\n'), ((12756, 12765), 'theano.gpuarray.basic_ops.GpuJoin', 'GpuJoin', ([], {}), '()\n', (12763, 12765), False, 'from theano.gpuarray.basic_ops import GpuAlloc, GpuAllocEmpty, GpuContiguous, GpuEye, GpuFromHost, GpuJoin, GpuReshape, GpuSplit, GpuToGpu, GpuTri, HostFromGpu, gpu_contiguous, gpu_join, host_from_gpu\n'), ((13425, 13463), 'theano.function', 'theano.function', (['[]', 'o'], {'mode': 'self.mode'}), '([], o, mode=self.mode)\n', (13440, 13463), False, 'import theano\n'), ((13854, 13874), 'numpy.random.rand', 'np.random.rand', (['(4)', '(5)'], {}), '(4, 5)\n', (13868, 13874), True, 'import numpy as np\n'), ((13940, 13960), 'numpy.random.rand', 'np.random.rand', (['(3)', '(5)'], {}), '(3, 5)\n', (13954, 13960), True, 'import numpy as np\n'), ((15292, 15304), 'theano.tensor.iscalar', 'tt.iscalar', ([], {}), '()\n', (15302, 15304), True, 'import theano.tensor as tt\n'), ((15322, 15334), 'theano.tensor.iscalar', 'tt.iscalar', ([], {}), '()\n', (15332, 15334), True, 'import theano.tensor as tt\n'), ((15352, 15364), 'theano.tensor.iscalar', 'tt.iscalar', ([], {}), '()\n', (15362, 15364), True, 'import theano.tensor as tt\n'), ((15463, 15529), 'theano.function', 'theano.function', (['[N_symb, M_symb, k_symb]', 'out'], {'mode': 'mode_with_gpu'}), '([N_symb, M_symb, k_symb], out, mode=mode_with_gpu)\n', (15478, 15529), False, 'import theano\n'), ((16663, 16723), 'theano.gpuarray.type.GpuArrayType', 'theano.gpuarray.type.GpuArrayType', (['"""float32"""', '(False, False)'], {}), "('float32', (False, False))\n", (16696, 16723), False, 'import theano\n'), ((16746, 16766), 'numpy.random.rand', 'np.random.rand', (['(5)', '(4)'], {}), '(5, 4)\n', (16760, 16766), True, 'import numpy as np\n'), ((16820, 16840), 'numpy.random.rand', 'np.random.rand', (['(5)', '(4)'], {}), '(5, 4)\n', (16834, 16840), True, 'import numpy as np\n'), ((17379, 17396), 'theano.gpuarray.basic_ops.host_from_gpu', 'host_from_gpu', (['ca'], {}), '(ca)\n', (17392, 17396), False, 'from theano.gpuarray.basic_ops import GpuAlloc, GpuAllocEmpty, GpuContiguous, GpuEye, GpuFromHost, GpuJoin, GpuReshape, GpuSplit, GpuToGpu, GpuTri, HostFromGpu, gpu_contiguous, gpu_join, host_from_gpu\n'), ((18338, 18364), 'theano.Out', 'theano.Out', (['c'], {'borrow': '(True)'}), '(c, borrow=True)\n', (18348, 18364), False, 'import theano\n'), ((18615, 18639), 'theano.tensor.matrix', 'tt.matrix', ([], {'dtype': 'm.dtype'}), '(dtype=m.dtype)\n', (18624, 18639), True, 'import theano.tensor as tt\n'), ((18657, 18669), 'theano.tensor.iscalar', 'tt.iscalar', ([], {}), '()\n', (18667, 18669), True, 'import theano.tensor as tt\n'), ((19037, 19061), 'theano.tensor.matrix', 'tt.matrix', ([], {'dtype': 'm.dtype'}), '(dtype=m.dtype)\n', (19046, 19061), True, 'import theano.tensor as tt\n'), ((19079, 19091), 'theano.tensor.iscalar', 'tt.iscalar', ([], {}), '()\n', (19089, 19091), True, 'import theano.tensor as tt\n'), ((20502, 20514), 'theano.tensor.iscalar', 'tt.iscalar', ([], {}), '()\n', (20512, 20514), True, 'import theano.tensor as tt\n'), ((20532, 20544), 'theano.tensor.iscalar', 'tt.iscalar', ([], {}), '()\n', (20542, 20544), True, 'import theano.tensor as tt\n'), ((20562, 20574), 'theano.tensor.iscalar', 'tt.iscalar', ([], {}), '()\n', (20572, 20574), True, 'import theano.tensor as tt\n'), ((20673, 20739), 'theano.function', 'theano.function', (['[N_symb, M_symb, k_symb]', 'out'], {'mode': 'mode_with_gpu'}), '([N_symb, M_symb, k_symb], out, mode=mode_with_gpu)\n', (20688, 20739), False, 'import theano\n'), ((2273, 2299), 'theano.gpuarray.type.get_context', 'get_context', (['test_ctx_name'], {}), '(test_ctx_name)\n', (2284, 2299), False, 'from theano.gpuarray.type import GpuArrayType, get_context, gpuarray_shared_constructor\n'), ((6939, 6965), 'theano.gpuarray.type.get_context', 'get_context', (['test_ctx_name'], {}), '(test_ctx_name)\n', (6950, 6965), False, 'from theano.gpuarray.type import GpuArrayType, get_context, gpuarray_shared_constructor\n'), ((6997, 7023), 'theano.gpuarray.basic_ops.GpuFromHost', 'GpuFromHost', (['test_ctx_name'], {}), '(test_ctx_name)\n', (7008, 7023), False, 'from theano.gpuarray.basic_ops import GpuAlloc, GpuAllocEmpty, GpuContiguous, GpuEye, GpuFromHost, GpuJoin, GpuReshape, GpuSplit, GpuToGpu, GpuTri, HostFromGpu, gpu_contiguous, gpu_join, host_from_gpu\n'), ((7409, 7435), 'theano.gpuarray.type.get_context', 'get_context', (['test_ctx_name'], {}), '(test_ctx_name)\n', (7420, 7435), False, 'from theano.gpuarray.type import GpuArrayType, get_context, gpuarray_shared_constructor\n'), ((7570, 7593), 'theano.gpuarray.basic_ops.GpuToGpu', 'GpuToGpu', (['test_ctx_name'], {}), '(test_ctx_name)\n', (7578, 7593), False, 'from theano.gpuarray.basic_ops import GpuAlloc, GpuAllocEmpty, GpuContiguous, GpuEye, GpuFromHost, GpuJoin, GpuReshape, GpuSplit, GpuToGpu, GpuTri, HostFromGpu, gpu_contiguous, gpu_join, host_from_gpu\n'), ((8134, 8160), 'theano.gpuarray.type.get_context', 'get_context', (['test_ctx_name'], {}), '(test_ctx_name)\n', (8145, 8160), False, 'from theano.gpuarray.type import GpuArrayType, get_context, gpuarray_shared_constructor\n'), ((8233, 8259), 'theano.gpuarray.basic_ops.GpuFromHost', 'GpuFromHost', (['test_ctx_name'], {}), '(test_ctx_name)\n', (8244, 8259), False, 'from theano.gpuarray.basic_ops import GpuAlloc, GpuAllocEmpty, GpuContiguous, GpuEye, GpuFromHost, GpuJoin, GpuReshape, GpuSplit, GpuToGpu, GpuTri, HostFromGpu, gpu_contiguous, gpu_join, host_from_gpu\n'), ((8498, 8524), 'theano.gpuarray.type.get_context', 'get_context', (['test_ctx_name'], {}), '(test_ctx_name)\n', (8509, 8524), False, 'from theano.gpuarray.type import GpuArrayType, get_context, gpuarray_shared_constructor\n'), ((10426, 10452), 'theano.gpuarray.type.get_context', 'get_context', (['test_ctx_name'], {}), '(test_ctx_name)\n', (10437, 10452), False, 'from theano.gpuarray.type import GpuArrayType, get_context, gpuarray_shared_constructor\n'), ((12986, 13048), 'theano.gpuarray.type.gpuarray_shared_constructor', 'gpuarray_shared_constructor', (['x'], {'target': 'test_ctx_name'}), '(x, target=test_ctx_name, **kwargs)\n', (13013, 13048), False, 'from theano.gpuarray.type import GpuArrayType, get_context, gpuarray_shared_constructor\n'), ((13348, 13359), 'theano.tensor.Split', 'tt.Split', (['(2)'], {}), '(2)\n', (13356, 13359), True, 'import theano.tensor as tt\n'), ((14156, 14172), 'theano.tensor.zeros_like', 'tt.zeros_like', (['a'], {}), '(a)\n', (14169, 14172), True, 'import theano.tensor as tt\n'), ((14174, 14189), 'theano.tensor.ones_like', 'tt.ones_like', (['b'], {}), '(b)\n', (14186, 14189), True, 'import theano.tensor as tt\n'), ((15379, 15422), 'theano.tensor.eye', 'tt.eye', (['N_symb', 'M_symb', 'k_symb'], {'dtype': 'dtype'}), '(N_symb, M_symb, k_symb, dtype=dtype)\n', (15385, 15422), True, 'import theano.tensor as tt\n'), ((15634, 15663), 'numpy.eye', 'np.eye', (['N', 'M_', 'k'], {'dtype': 'dtype'}), '(N, M_, k, dtype=dtype)\n', (15640, 15663), True, 'import numpy as np\n'), ((15696, 15711), 'numpy.dtype', 'np.dtype', (['dtype'], {}), '(dtype)\n', (15704, 15711), True, 'import numpy as np\n'), ((16867, 16893), 'theano.gpuarray.type.get_context', 'get_context', (['test_ctx_name'], {}), '(test_ctx_name)\n', (16878, 16893), False, 'from theano.gpuarray.type import GpuArrayType, get_context, gpuarray_shared_constructor\n'), ((16930, 16956), 'theano.gpuarray.basic_ops.GpuFromHost', 'GpuFromHost', (['test_ctx_name'], {}), '(test_ctx_name)\n', (16941, 16956), False, 'from theano.gpuarray.basic_ops import GpuAlloc, GpuAllocEmpty, GpuContiguous, GpuEye, GpuFromHost, GpuJoin, GpuReshape, GpuSplit, GpuToGpu, GpuTri, HostFromGpu, gpu_contiguous, gpu_join, host_from_gpu\n'), ((17506, 17523), 'theano.gpuarray.basic_ops.host_from_gpu', 'host_from_gpu', (['ca'], {}), '(ca)\n', (17519, 17523), False, 'from theano.gpuarray.basic_ops import GpuAlloc, GpuAllocEmpty, GpuContiguous, GpuEye, GpuFromHost, GpuJoin, GpuReshape, GpuSplit, GpuToGpu, GpuTri, HostFromGpu, gpu_contiguous, gpu_join, host_from_gpu\n'), ((18730, 18753), 'theano.tensor.tril', 'tt.tril', (['m_symb', 'k_symb'], {}), '(m_symb, k_symb)\n', (18737, 18753), True, 'import theano.tensor as tt\n'), ((18844, 18857), 'numpy.tril', 'np.tril', (['m', 'k'], {}), '(m, k)\n', (18851, 18857), True, 'import numpy as np\n'), ((18890, 18905), 'numpy.dtype', 'np.dtype', (['dtype'], {}), '(dtype)\n', (18898, 18905), True, 'import numpy as np\n'), ((19151, 19174), 'theano.tensor.triu', 'tt.triu', (['m_symb', 'k_symb'], {}), '(m_symb, k_symb)\n', (19158, 19174), True, 'import theano.tensor as tt\n'), ((19265, 19278), 'numpy.triu', 'np.triu', (['m', 'k'], {}), '(m, k)\n', (19272, 19278), True, 'import numpy as np\n'), ((19311, 19326), 'numpy.dtype', 'np.dtype', (['dtype'], {}), '(dtype)\n', (19319, 19326), True, 'import numpy as np\n'), ((19477, 19493), 'tests.unittest_tools.fetch_seed', 'utt.fetch_seed', ([], {}), '()\n', (19491, 19493), True, 'from tests import unittest_tools as utt\n'), ((20589, 20632), 'theano.tensor.tri', 'tt.tri', (['N_symb', 'M_symb', 'k_symb'], {'dtype': 'dtype'}), '(N_symb, M_symb, k_symb, dtype=dtype)\n', (20595, 20632), True, 'import theano.tensor as tt\n'), ((20843, 20872), 'numpy.tri', 'np.tri', (['N', 'M_', 'k'], {'dtype': 'dtype'}), '(N, M_, k, dtype=dtype)\n', (20849, 20872), True, 'import numpy as np\n'), ((20905, 20920), 'numpy.dtype', 'np.dtype', (['dtype'], {}), '(dtype)\n', (20913, 20920), True, 'import numpy as np\n'), ((2968, 2985), 'pytest.skip', 'pytest.skip', (['skip'], {}), '(skip)\n', (2979, 2985), False, 'import pytest\n'), ((3339, 3357), 'theano.shared', 'theano.shared', (['inp'], {}), '(inp)\n', (3352, 3357), False, 'import theano\n'), ((3403, 3421), 'theano.shared', 'theano.shared', (['inp'], {}), '(inp)\n', (3416, 3421), False, 'import theano\n'), ((3486, 3522), 'tests.tensor.utils.safe_make_node', 'safe_make_node', (['self.op', '*inputs_ref'], {}), '(self.op, *inputs_ref)\n', (3500, 3522), False, 'from tests.tensor.utils import rand, safe_make_node\n'), ((3550, 3586), 'tests.tensor.utils.safe_make_node', 'safe_make_node', (['self.op', '*inputs_tst'], {}), '(self.op, *inputs_tst)\n', (3564, 3586), False, 'from tests.tensor.utils import rand, safe_make_node\n'), ((8681, 8693), 'theano.tensor.basic.alloc', 'alloc', (['*args'], {}), '(*args)\n', (8686, 8693), False, 'from theano.tensor.basic import alloc\n'), ((9533, 9578), 'theano.gpuarray.basic_ops.GpuAllocEmpty', 'GpuAllocEmpty', (['dt'], {'context_name': 'test_ctx_name'}), '(dt, context_name=test_ctx_name)\n', (9546, 9578), False, 'from theano.gpuarray.basic_ops import GpuAlloc, GpuAllocEmpty, GpuContiguous, GpuEye, GpuFromHost, GpuJoin, GpuReshape, GpuSplit, GpuToGpu, GpuTri, HostFromGpu, gpu_contiguous, gpu_join, host_from_gpu\n'), ((9782, 9820), 'theano.gpuarray.basic_ops.GpuAllocEmpty', 'GpuAllocEmpty', (['"""uint64"""', 'test_ctx_name'], {}), "('uint64', test_ctx_name)\n", (9795, 9820), False, 'from theano.gpuarray.basic_ops import GpuAlloc, GpuAllocEmpty, GpuContiguous, GpuEye, GpuFromHost, GpuJoin, GpuReshape, GpuSplit, GpuToGpu, GpuTri, HostFromGpu, gpu_contiguous, gpu_join, host_from_gpu\n'), ((9840, 9878), 'theano.gpuarray.basic_ops.GpuAllocEmpty', 'GpuAllocEmpty', (['"""uint64"""', 'test_ctx_name'], {}), "('uint64', test_ctx_name)\n", (9853, 9878), False, 'from theano.gpuarray.basic_ops import GpuAlloc, GpuAllocEmpty, GpuContiguous, GpuEye, GpuFromHost, GpuJoin, GpuReshape, GpuSplit, GpuToGpu, GpuTri, HostFromGpu, gpu_contiguous, gpu_join, host_from_gpu\n'), ((13260, 13276), 'tests.unittest_tools.fetch_seed', 'utt.fetch_seed', ([], {}), '()\n', (13274, 13276), True, 'from tests import unittest_tools as utt\n'), ((14032, 14048), 'theano.tensor.zeros_like', 'tt.zeros_like', (['a'], {}), '(a)\n', (14045, 14048), True, 'import theano.tensor as tt\n'), ((14050, 14065), 'theano.tensor.ones_like', 'tt.ones_like', (['b'], {}), '(b)\n', (14062, 14065), True, 'import theano.tensor as tt\n'), ((14274, 14290), 'theano.tensor.zeros_like', 'tt.zeros_like', (['a'], {}), '(a)\n', (14287, 14290), True, 'import theano.tensor as tt\n'), ((14292, 14307), 'theano.tensor.ones_like', 'tt.ones_like', (['b'], {}), '(b)\n', (14304, 14307), True, 'import theano.tensor as tt\n'), ((17079, 17105), 'theano.gpuarray.basic_ops.GpuFromHost', 'GpuFromHost', (['test_ctx_name'], {}), '(test_ctx_name)\n', (17090, 17105), False, 'from theano.gpuarray.basic_ops import GpuAlloc, GpuAllocEmpty, GpuContiguous, GpuEye, GpuFromHost, GpuJoin, GpuReshape, GpuSplit, GpuToGpu, GpuTri, HostFromGpu, gpu_contiguous, gpu_join, host_from_gpu\n'), ((8770, 8776), 'tests.tensor.utils.rand', 'rand', ([], {}), '()\n', (8774, 8776), False, 'from tests.tensor.utils import rand, safe_make_node\n'), ((8778, 8789), 'numpy.int32', 'np.int32', (['(7)'], {}), '(7)\n', (8786, 8789), True, 'import numpy as np\n'), ((8934, 8940), 'tests.tensor.utils.rand', 'rand', ([], {}), '()\n', (8938, 8940), False, 'from tests.tensor.utils import rand, safe_make_node\n'), ((8942, 8953), 'numpy.int32', 'np.int32', (['(4)'], {}), '(4)\n', (8950, 8953), True, 'import numpy as np\n'), ((8955, 8966), 'numpy.int32', 'np.int32', (['(7)'], {}), '(7)\n', (8963, 8966), True, 'import numpy as np\n'), ((8988, 8995), 'tests.tensor.utils.rand', 'rand', (['(7)'], {}), '(7)\n', (8992, 8995), False, 'from tests.tensor.utils import rand, safe_make_node\n'), ((8997, 9008), 'numpy.int32', 'np.int32', (['(4)'], {}), '(4)\n', (9005, 9008), True, 'import numpy as np\n'), ((9010, 9021), 'numpy.int32', 'np.int32', (['(7)'], {}), '(7)\n', (9018, 9021), True, 'import numpy as np\n'), ((9043, 9050), 'tests.tensor.utils.rand', 'rand', (['(7)'], {}), '(7)\n', (9047, 9050), False, 'from tests.tensor.utils import rand, safe_make_node\n'), ((9052, 9063), 'numpy.int32', 'np.int32', (['(2)'], {}), '(2)\n', (9060, 9063), True, 'import numpy as np\n'), ((9065, 9076), 'numpy.int32', 'np.int32', (['(4)'], {}), '(4)\n', (9073, 9076), True, 'import numpy as np\n'), ((9078, 9089), 'numpy.int32', 'np.int32', (['(7)'], {}), '(7)\n', (9086, 9089), True, 'import numpy as np\n'), ((9111, 9121), 'tests.tensor.utils.rand', 'rand', (['(4)', '(7)'], {}), '(4, 7)\n', (9115, 9121), False, 'from tests.tensor.utils import rand, safe_make_node\n'), ((9123, 9134), 'numpy.int32', 'np.int32', (['(2)'], {}), '(2)\n', (9131, 9134), True, 'import numpy as np\n'), ((9136, 9147), 'numpy.int32', 'np.int32', (['(4)'], {}), '(4)\n', (9144, 9147), True, 'import numpy as np\n'), ((9149, 9160), 'numpy.int32', 'np.int32', (['(7)'], {}), '(7)\n', (9157, 9160), True, 'import numpy as np\n'), ((9184, 9191), 'tests.tensor.utils.rand', 'rand', (['(7)'], {}), '(7)\n', (9188, 9191), False, 'from tests.tensor.utils import rand, safe_make_node\n'), ((9193, 9204), 'numpy.int32', 'np.int32', (['(7)'], {}), '(7)\n', (9201, 9204), True, 'import numpy as np\n'), ((9206, 9217), 'numpy.int32', 'np.int32', (['(5)'], {}), '(5)\n', (9214, 9217), True, 'import numpy as np\n'), ((15425, 15436), 'numpy.array', 'np.array', (['(1)'], {}), '(1)\n', (15433, 15436), True, 'import numpy as np\n'), ((15573, 15584), 'numpy.array', 'np.array', (['(1)'], {}), '(1)\n', (15581, 15584), True, 'import numpy as np\n'), ((20635, 20646), 'numpy.array', 'np.array', (['(1)'], {}), '(1)\n', (20643, 20646), True, 'import numpy as np\n'), ((20782, 20793), 'numpy.array', 'np.array', (['(1)'], {}), '(1)\n', (20790, 20793), True, 'import numpy as np\n'), ((3168, 3217), 'numpy.asarray', 'np.asarray', (['inputs[_]'], {'dtype': 'theano.config.floatX'}), '(inputs[_], dtype=theano.config.floatX)\n', (3178, 3217), True, 'import numpy as np\n'), ((5709, 5756), 'theano.tensor.TensorType.values_eq_approx', 'TensorType.values_eq_approx', (['variable', 'expected'], {}), '(variable, expected)\n', (5736, 5756), False, 'from theano.tensor import TensorType\n')] |
from pytube import YouTube
def download_video(watch_url):
yt = YouTube(watch_url)
(yt.streams
.filter(progressive=True, file_extension='mp4')
.order_by('resolution')
.desc()
.first()
.download())
| [
"pytube.YouTube"
] | [((68, 86), 'pytube.YouTube', 'YouTube', (['watch_url'], {}), '(watch_url)\n', (75, 86), False, 'from pytube import YouTube\n')] |
from easydict import EasyDict
hopper_ppo_default_config = dict(
env=dict(
env_id='HopperMuJoCoEnv-v0',
norm_obs=dict(use_norm=False, ),
norm_reward=dict(use_norm=False, ),
collector_env_num=8,
evaluator_env_num=10,
use_act_scale=True,
n_evaluator_episode=10,
stop_value=3000,
),
policy=dict(
cuda=True,
on_policy=True,
recompute_adv=True,
model=dict(
obs_shape=11,
action_shape=3,
continuous=True,
),
continuous=True,
learn=dict(
epoch_per_collect=10,
batch_size=64,
learning_rate=3e-4,
value_weight=0.5,
entropy_weight=0.0,
clip_ratio=0.2,
adv_norm=True,
value_norm=True,
),
collect=dict(
n_sample=2048,
unroll_len=1,
discount_factor=0.99,
gae_lambda=0.97,
),
eval=dict(evaluator=dict(eval_freq=5000, )),
other=dict(replay_buffer=dict(
replay_buffer_size=10000,
replay_buffer_start_size=0,
), ),
),
)
hopper_ppo_default_config = EasyDict(hopper_ppo_default_config)
main_config = hopper_ppo_default_config
hopper_ppo_create_default_config = dict(
env=dict(
type='pybullet',
import_names=['dizoo.pybullet.envs.pybullet_env'],
),
env_manager=dict(type='subprocess'),
policy=dict(
type='ppo',
import_names=['ding.policy.ppo'],
),
replay_buffer=dict(type='naive', ),
)
hopper_ppo_create_default_config = EasyDict(hopper_ppo_create_default_config)
create_config = hopper_ppo_create_default_config
| [
"easydict.EasyDict"
] | [((1219, 1254), 'easydict.EasyDict', 'EasyDict', (['hopper_ppo_default_config'], {}), '(hopper_ppo_default_config)\n', (1227, 1254), False, 'from easydict import EasyDict\n'), ((1646, 1688), 'easydict.EasyDict', 'EasyDict', (['hopper_ppo_create_default_config'], {}), '(hopper_ppo_create_default_config)\n', (1654, 1688), False, 'from easydict import EasyDict\n')] |
import os
from tempfile import TemporaryDirectory
from quickbase_client.utils.pywriting_utils import BasicPyFileWriter
from quickbase_client.utils.pywriting_utils import PyPackageWriter
class TestBasicFileWriter:
def test_outputs_lines(self):
w = BasicPyFileWriter()
w.add_line('import abc')
w.add_line('import os').space()
s = w.get_file_as_string()
assert s == 'import abc\nimport os\n'
def test_indent_dedent(self):
w = BasicPyFileWriter()
w.add_line('def foo():').indent().add_line('return 5').dedent().space()
s = w.get_file_as_string()
assert s == 'def foo():\n return 5\n'
def test_use_refs(self):
w = BasicPyFileWriter()
w.add_line('a = "A"')
ref = w.make_ref()
w.add_line('d = "D"')
ref.add_line('b = "B"').add_line('c = "C"')
s = w.get_file_as_string()
lns = s.split('\n')
assert 'a' in lns[0]
assert 'b' in lns[1]
assert 'c' in lns[2]
assert 'd' in lns[3]
class TestPyPackageWriter:
def test_includes_init(self):
with TemporaryDirectory() as d:
w = PyPackageWriter(pkg_name='foo', parent_dir=d)
assert '__init__' in w.modules
assert w.has_module_name('__init__')
assert w.pkg_path == os.path.join(d, 'foo')
w.write()
assert os.path.exists(d)
assert os.path.exists(os.path.join(d, 'foo'))
assert os.path.exists(os.path.join(d, 'foo', '__init__.py'))
| [
"tempfile.TemporaryDirectory",
"os.path.exists",
"quickbase_client.utils.pywriting_utils.PyPackageWriter",
"os.path.join",
"quickbase_client.utils.pywriting_utils.BasicPyFileWriter"
] | [((263, 282), 'quickbase_client.utils.pywriting_utils.BasicPyFileWriter', 'BasicPyFileWriter', ([], {}), '()\n', (280, 282), False, 'from quickbase_client.utils.pywriting_utils import BasicPyFileWriter\n'), ((484, 503), 'quickbase_client.utils.pywriting_utils.BasicPyFileWriter', 'BasicPyFileWriter', ([], {}), '()\n', (501, 503), False, 'from quickbase_client.utils.pywriting_utils import BasicPyFileWriter\n'), ((710, 729), 'quickbase_client.utils.pywriting_utils.BasicPyFileWriter', 'BasicPyFileWriter', ([], {}), '()\n', (727, 729), False, 'from quickbase_client.utils.pywriting_utils import BasicPyFileWriter\n'), ((1125, 1145), 'tempfile.TemporaryDirectory', 'TemporaryDirectory', ([], {}), '()\n', (1143, 1145), False, 'from tempfile import TemporaryDirectory\n'), ((1168, 1213), 'quickbase_client.utils.pywriting_utils.PyPackageWriter', 'PyPackageWriter', ([], {'pkg_name': '"""foo"""', 'parent_dir': 'd'}), "(pkg_name='foo', parent_dir=d)\n", (1183, 1213), False, 'from quickbase_client.utils.pywriting_utils import PyPackageWriter\n'), ((1403, 1420), 'os.path.exists', 'os.path.exists', (['d'], {}), '(d)\n', (1417, 1420), False, 'import os\n'), ((1339, 1361), 'os.path.join', 'os.path.join', (['d', '"""foo"""'], {}), "(d, 'foo')\n", (1351, 1361), False, 'import os\n'), ((1455, 1477), 'os.path.join', 'os.path.join', (['d', '"""foo"""'], {}), "(d, 'foo')\n", (1467, 1477), False, 'import os\n'), ((1513, 1550), 'os.path.join', 'os.path.join', (['d', '"""foo"""', '"""__init__.py"""'], {}), "(d, 'foo', '__init__.py')\n", (1525, 1550), False, 'import os\n')] |
from functools import partial
from corpustools.corpus.classes import Word
from corpustools.symbolsim.edit_distance import edit_distance
from corpustools.symbolsim.khorsi import khorsi
from corpustools.symbolsim.phono_edit_distance import phono_edit_distance
from corpustools.symbolsim.phono_align import Aligner
from corpustools.multiproc import filter_mp, score_mp
def _is_edit_distance_neighbor(w, query, sequence_type, max_distance):
w_len = len(getattr(w, sequence_type))
query_len = len(getattr(query, sequence_type))
if w_len > query_len+max_distance:
return False
if w_len < query_len-max_distance:
return False
return edit_distance(getattr(w, sequence_type), getattr(query, sequence_type),
sequence_type, max_distance) <= max_distance
def _is_phono_edit_distance_neighbor(w, query, sequence_type, specifier, max_distance):
return phono_edit_distance(getattr(w, sequence_type), getattr(query, sequence_type), sequence_type, specifier) <= max_distance
def _is_khorsi_neighbor(w, query, freq_base, sequence_type, max_distance):
return khorsi(getattr(w, sequence_type), getattr(query, sequence_type), freq_base, sequence_type, max_distance) >= max_distance
def neighborhood_density_all_words(corpus_context, tierdict, tier_type = None, sequence_type = None,
algorithm = 'edit_distance', max_distance = 1, output_format = 'spelling',
num_cores = -1, settable_attr = None, collapse_homophones = False,
stop_check = None, call_back = None):
"""Calculate the neighborhood density of all words in the corpus and
adds them as attributes of the words.
Parameters
----------
corpus_context : CorpusContext
Context manager for a corpus
algorithm : str
The algorithm used to determine distance
max_distance : float, optional
Maximum edit distance from the queried word to consider a word a neighbor.
stop_check : callable, optional
Optional function to check whether to gracefully terminate early
call_back : callable, optional
Optional function to supply progress information during the function
settable_attr: string
Name of attribute that neighbourhood density results will be assigned to
"""
function = partial(neighborhood_density, corpus_context,
tierdict = tierdict,
tier_type = tier_type,
sequence_type = sequence_type,
algorithm = algorithm,
max_distance = max_distance,
collapse_homophones = collapse_homophones)
if call_back is not None:
call_back('Calculating neighborhood densities...')
call_back(0,len(corpus_context))
cur = 0
results = dict()
last_value_removed = None
last_key_removed = None
if num_cores == -1 or num_cores == 1:
for w in corpus_context:
if stop_check is not None and stop_check():
return
if last_value_removed:
tierdict[last_key_removed].append(last_value_removed)
w_sequence = getattr(w, corpus_context.sequence_type)
last_key_removed = str(w_sequence)
for i, item in enumerate(tierdict[last_key_removed]):
if str(item) == str(w):
last_value_removed = tierdict[last_key_removed].pop(i)
break
res = neighborhood_density(corpus_context, w, tierdict,
tier_type = tier_type,
sequence_type = sequence_type,
algorithm = algorithm,
max_distance = max_distance,
collapse_homophones = collapse_homophones)
results[str(w)] = [getattr(r, output_format) for r in res[1]]
setattr(w.original, settable_attr.name, res[0])
# for w in corpus_context:
# if stop_check is not None and stop_check():
# return
# cur += 1
# call_back(cur)
# res = function(w)
# results[str(w)] = [getattr(r, output_format) for r in res[1]]
# setattr(w.original, settable_attr.name, res[0]-1)
# #the -1 is to account for the fact that words are counted as their own neighbour, and this is incorrect
# #subtracting 1 here is easier than fixing the neighbourhood density algorithm
else:
iterable = ((w,) for w in corpus_context)
neighbors = score_mp(iterable, function, num_cores, call_back, stop_check, chunk_size = 1)
for n in neighbors:
#Have to look up the key, then look up the object due to how
#multiprocessing pickles objects
setattr(corpus_context.corpus.find(corpus_context.corpus.key(n[0])),
#corpus_context.attribute.name, n[1][0])
settable_attr.name, n[1][0])
return results
def neighborhood_density(corpus_context, query, tierdict,
algorithm = 'edit_distance', max_distance = 1, collapse_homophones = False,
force_quadratic = False, file_type = None, tier_type=None, sequence_type = None,
stop_check = None, call_back = None):
"""Calculate the neighborhood density of a particular word in the corpus.
Parameters
----------
corpus_context : CorpusContext
Context manager for a corpus
query : Word
The word whose neighborhood density to calculate.
algorithm : str
The algorithm used to determine distance
max_distance : float, optional
Maximum edit distance from the queried word to consider a word a neighbor
force_quadratic : bool
Force use of the less efficient quadratic algorithm even when finding edit
distance of 1 neighborhoods
stop_check : callable, optional
Optional function to check whether to gracefully terminate early
call_back : callable, optional
Optional function to supply progress information during the function
Returns
-------
tuple(int, set)
Tuple of the number of neighbors and the set of neighbor Words.
"""
matches = []
query = ensure_query_is_word(query, corpus_context, corpus_context.sequence_type, tier_type)
if call_back is not None:
call_back('Finding neighbors for {}...'.format(query))
call_back(0,len(corpus_context))
cur = 0
if algorithm == 'edit_distance' and max_distance == 1 and not force_quadratic:
return fast_neighborhood_density(corpus_context, query, corpus_context.sequence_type, tier_type, tierdict,
file_type=file_type, collapse_homophones=collapse_homophones)
if algorithm == 'edit_distance':
is_neighbor = partial(_is_edit_distance_neighbor,
sequence_type = corpus_context.sequence_type,
max_distance = max_distance)
elif algorithm == 'phono_edit_distance':
is_neighbor = partial(_is_phono_edit_distance_neighbor,
specifier = corpus_context.specifier,
sequence_type = corpus_context.sequence_type,
max_distance = max_distance)
elif algorithm == 'khorsi':
freq_base = corpus_context.get_frequency_base()
is_neighbor = partial(_is_khorsi_neighbor,
freq_base = freq_base,
sequence_type = corpus_context.sequence_type,
max_distance = max_distance)
for w in corpus_context:
if stop_check is not None and stop_check():
return
if call_back is not None:
cur += 1
if cur % 10 == 0:
call_back(cur)
if not is_neighbor(w, query):
continue
matches.append(w)
neighbors = set(matches)-set([query])
return (len(neighbors), neighbors)
def fast_neighborhood_density(corpus_context, query, sequence_type, tier_type,
tierdict, file_type=None, trans_delimiter='.', collapse_homophones = False):
"""Generates all neighbors of edit distance <= 1 and searches
for them in corpus_context.
Will be faster than neighborhood_density when:
n > m * (1 + s), where
n: number of words in corpus
m: length of query
s: size of segment inventory
"""
neighbors = list()
query = ensure_query_is_word(query, corpus_context, sequence_type, tier_type, file_type=file_type)
for candidate in generate_neighbor_candidates(corpus_context, query, sequence_type):
if tier_type.att_type == 'tier':
cand_str = trans_delimiter.join(candidate)
else:
cand_str = ''.join(candidate)
if cand_str in tierdict:
for w in tierdict[cand_str]:
w_sequence = getattr(w, sequence_type)
if collapse_homophones and any(getattr(word, sequence_type) == w_sequence for word in neighbors):
continue
else:
neighbors.append(w)
return (len(neighbors), neighbors)
def generate_neighbor_candidates(corpus_context, query, sequence_type):
sequence = getattr(query, sequence_type)
yield [str(c) for c in sequence]
for i in range(len(sequence)):
yield [str(c) for c in sequence[:i]] + [str(c) for c in sequence[i+1:]] # deletion
for char in corpus_context.inventory:
if str(char) not in ['#', sequence[i]]:
yield [str(c) for c in sequence[:i]] + [str(char)] + [str(c) for c in sequence[i:]] # insertion
yield [str(c) for c in sequence[:i]] + [str(char)] + [str(c) for c in sequence[i+1:]] # substitution
for char in corpus_context.inventory: # final pass to get insertion at len+1
if str(char) not in ['#', sequence[i]]:
yield [str(c) for c in sequence[:]] + [str(char)] # insertion
def find_mutation_minpairs_all_words(corpus_context, tierdict, tier_type = None, num_cores = -1, collapse_homophones = False,
stop_check = None, call_back = None):
function = partial(find_mutation_minpairs, corpus_context, tier_type=tier_type, collapse_homophones = collapse_homophones)
if call_back is not None:
call_back('Calculating neighborhood densities...')
call_back(0,len(corpus_context))
cur = 0
results = dict()
last_value_removed = None
last_key_removed = None
if num_cores == -1 or num_cores == 1:
for w in corpus_context:
if stop_check is not None and stop_check():
return
if last_value_removed:
tierdict[last_key_removed].append(last_value_removed)
w_sequence = getattr(w, corpus_context.sequence_type)
last_key_removed = str(w_sequence)
for i, item in enumerate(tierdict[last_key_removed]):
if str(item) == str(w):
last_value_removed = tierdict[last_key_removed].pop(i)
break
res = find_mutation_minpairs(corpus_context, w,
tier_type=tier_type, collapse_homophones = collapse_homophones)
results[str(w)] = res[1]
setattr(w.original, corpus_context.attribute.name, res[0])
# for w in corpus_context:
# if stop_check is not None and stop_check():
# return
# cur += 1
# call_back(cur)
# res = function(w)
# results[str(w)] = res[1]#[str(r) for r in res[1]]
# setattr(w.original, corpus_context.attribute.name, res[0])
else:
iterable = ((w,) for w in corpus_context)
neighbors = score_mp(iterable, function, num_cores, call_back, stop_check, chunk_size= 1)
for n in neighbors:
#Have to look up the key, then look up the object due to how
#multiprocessing pickles objects
setattr(corpus_context.corpus.find(corpus_context.corpus.key(n[0])), corpus_context.attribute.name, n[1][0])
return results
def find_mutation_minpairs(corpus_context, query, tier_type = None, collapse_homophones = False,
stop_check = None, call_back = None):
"""Find all minimal pairs of the query word based only on segment
mutations (not deletions/insertions)
Parameters
----------
corpus_context : CorpusContext
Context manager for a corpus
query : Word
The word whose minimal pairs to find
stop_check : callable or None
Optional function to check whether to gracefully terminate early
call_back : callable or None
Optional function to supply progress information during the function
Returns
-------
list
The found minimal pairs for the queried word
"""
matches = []
sequence_type = corpus_context.sequence_type
query = ensure_query_is_word(query, corpus_context, corpus_context.sequence_type, tier_type)
if call_back is not None:
call_back('Finding neighbors...')
call_back(0,len(corpus_context))
cur = 0
al = Aligner(features_tf=False, ins_penalty=float('inf'), del_penalty=float('inf'), sub_penalty=1)
for w in corpus_context:
w_sequence = getattr(w, sequence_type)
query_sequence = getattr(query, sequence_type)
if stop_check is not None and stop_check():
return
if call_back is not None:
cur += 1
if cur % 10 == 0:
call_back(cur)
if (len(w_sequence) > len(query_sequence)+1 or
len(w_sequence) < len(query_sequence)-1):
continue
m = al.make_similarity_matrix(query_sequence, w_sequence)
if m[-1][-1]['f'] != 1:
continue
w_sequence = getattr(w, sequence_type)
if collapse_homophones and any(getattr(m, sequence_type) == w_sequence for m in matches):
continue
else:
#matches.append(str(w_sequence))
matches.append(w)
matches = [m.spelling for m in matches]
neighbors = list(set(matches)-set([str(query_sequence)]))
return (len(neighbors), neighbors)
def ensure_query_is_word(query, corpus, sequence_type, tier_type, trans_delimiter='.', file_type=None):
if isinstance(query, Word):
query_word = query
else:
if tier_type.att_type == 'spelling':
if file_type == sequence_type:
query_word = Word(**{sequence_type: list(query)})
else:
query_word = query.replace(trans_delimiter, '')
query_word = Word(**{sequence_type: list(query_word)})
elif tier_type.att_type == 'tier':
if file_type == sequence_type:
query_with̠td = '.'.join(query) if '.' not in query else query
for entry in corpus:
corpus_word_with_td = str(getattr(entry, sequence_type))
if query_with̠td == corpus_word_with_td: # if a word in corpus has the same transcription
return entry # that word in the corpus is to be referred to.
# the following should be run if no word found in corpus with the transcription
new_query = parse(query, trans_delimiter)
query_word = Word(**{sequence_type: new_query})
else: # if file contains spelling
try:
query_word = corpus.corpus.find(query)
except KeyError:
# if the word in the file can't be found in the corpus
new_query = parse(query, trans_delimiter)
query_word = Word(**{sequence_type: list(new_query)})
return query_word
def parse(word, delimiter):
return word.split(delimiter) if delimiter in word else list(word) | [
"corpustools.corpus.classes.Word",
"functools.partial",
"corpustools.multiproc.score_mp"
] | [((2313, 2525), 'functools.partial', 'partial', (['neighborhood_density', 'corpus_context'], {'tierdict': 'tierdict', 'tier_type': 'tier_type', 'sequence_type': 'sequence_type', 'algorithm': 'algorithm', 'max_distance': 'max_distance', 'collapse_homophones': 'collapse_homophones'}), '(neighborhood_density, corpus_context, tierdict=tierdict, tier_type=\n tier_type, sequence_type=sequence_type, algorithm=algorithm,\n max_distance=max_distance, collapse_homophones=collapse_homophones)\n', (2320, 2525), False, 'from functools import partial\n'), ((10300, 10413), 'functools.partial', 'partial', (['find_mutation_minpairs', 'corpus_context'], {'tier_type': 'tier_type', 'collapse_homophones': 'collapse_homophones'}), '(find_mutation_minpairs, corpus_context, tier_type=tier_type,\n collapse_homophones=collapse_homophones)\n', (10307, 10413), False, 'from functools import partial\n'), ((4584, 4660), 'corpustools.multiproc.score_mp', 'score_mp', (['iterable', 'function', 'num_cores', 'call_back', 'stop_check'], {'chunk_size': '(1)'}), '(iterable, function, num_cores, call_back, stop_check, chunk_size=1)\n', (4592, 4660), False, 'from corpustools.multiproc import filter_mp, score_mp\n'), ((6873, 6984), 'functools.partial', 'partial', (['_is_edit_distance_neighbor'], {'sequence_type': 'corpus_context.sequence_type', 'max_distance': 'max_distance'}), '(_is_edit_distance_neighbor, sequence_type=corpus_context.\n sequence_type, max_distance=max_distance)\n', (6880, 6984), False, 'from functools import partial\n'), ((11910, 11986), 'corpustools.multiproc.score_mp', 'score_mp', (['iterable', 'function', 'num_cores', 'call_back', 'stop_check'], {'chunk_size': '(1)'}), '(iterable, function, num_cores, call_back, stop_check, chunk_size=1)\n', (11918, 11986), False, 'from corpustools.multiproc import filter_mp, score_mp\n'), ((7115, 7273), 'functools.partial', 'partial', (['_is_phono_edit_distance_neighbor'], {'specifier': 'corpus_context.specifier', 'sequence_type': 'corpus_context.sequence_type', 'max_distance': 'max_distance'}), '(_is_phono_edit_distance_neighbor, specifier=corpus_context.\n specifier, sequence_type=corpus_context.sequence_type, max_distance=\n max_distance)\n', (7122, 7273), False, 'from functools import partial\n'), ((7476, 7601), 'functools.partial', 'partial', (['_is_khorsi_neighbor'], {'freq_base': 'freq_base', 'sequence_type': 'corpus_context.sequence_type', 'max_distance': 'max_distance'}), '(_is_khorsi_neighbor, freq_base=freq_base, sequence_type=\n corpus_context.sequence_type, max_distance=max_distance)\n', (7483, 7601), False, 'from functools import partial\n'), ((15562, 15596), 'corpustools.corpus.classes.Word', 'Word', ([], {}), '(**{sequence_type: new_query})\n', (15566, 15596), False, 'from corpustools.corpus.classes import Word\n')] |
import gym
import numpy as np
from itertools import product
import matplotlib.pyplot as plt
def print_policy(Q, env):
""" This is a helper function to print a nice policy from the Q function"""
moves = [u'←', u'↓',u'→', u'↑']
if not hasattr(env, 'desc'):
env = env.env
dims = env.desc.shape
policy = np.chararray(dims, unicode=True)
policy[:] = ' '
for s in range(len(Q)):
idx = np.unravel_index(s, dims)
policy[idx] = moves[np.argmax(Q[s])]
if env.desc[idx] in ['H', 'G']:
policy[idx] = u'·'
print('\n'.join([''.join([u'{:2}'.format(item) for item in row])
for row in policy]))
def plot_V(Q, env):
""" This is a helper function to plot the state values from the Q function"""
fig = plt.figure()
if not hasattr(env, 'desc'):
env = env.env
dims = env.desc.shape
V = np.zeros(dims)
for s in range(len(Q)):
idx = np.unravel_index(s, dims)
V[idx] = np.max(Q[s])
if env.desc[idx] in ['H', 'G']:
V[idx] = 0.
plt.imshow(V, origin='upper',
extent=[0,dims[0],0,dims[1]], vmin=.0, vmax=.6,
cmap=plt.cm.RdYlGn, interpolation='none')
for x, y in product(range(dims[0]), range(dims[1])):
plt.text(y+0.5, dims[0]-x-0.5, '{:.3f}'.format(V[x,y]),
horizontalalignment='center',
verticalalignment='center')
plt.xticks([])
plt.yticks([])
def plot_Q(Q, env):
""" This is a helper function to plot the Q function """
from matplotlib import colors, patches
fig = plt.figure()
ax = fig.gca()
if not hasattr(env, 'desc'):
env = env.env
dims = env.desc.shape
up = np.array([[0, 1], [0.5, 0.5], [1,1]])
down = np.array([[0, 0], [0.5, 0.5], [1,0]])
left = np.array([[0, 0], [0.5, 0.5], [0,1]])
right = np.array([[1, 0], [0.5, 0.5], [1,1]])
tri = [left, down, right, up]
pos = [[0.2, 0.5], [0.5, 0.2], [0.8, 0.5], [0.5, 0.8]]
cmap = plt.cm.RdYlGn
norm = colors.Normalize(vmin=.0,vmax=.6)
ax.imshow(np.zeros(dims), origin='upper', extent=[0,dims[0],0,dims[1]], vmin=.0, vmax=.6, cmap=cmap)
ax.grid(which='major', color='black', linestyle='-', linewidth=2)
for s in range(len(Q)):
idx = np.unravel_index(s, dims)
x, y = idx
if env.desc[idx] in ['H', 'G']:
ax.add_patch(patches.Rectangle((y, 3-x), 1, 1, color=cmap(.0)))
plt.text(y+0.5, dims[0]-x-0.5, '{:.2f}'.format(.0),
horizontalalignment='center',
verticalalignment='center')
continue
for a in range(len(tri)):
ax.add_patch(patches.Polygon(tri[a] + np.array([y, 3-x]), color=cmap(Q[s][a])))
plt.text(y+pos[a][0], dims[0]-1-x+pos[a][1], '{:.2f}'.format(Q[s][a]),
horizontalalignment='center', verticalalignment='center',
fontsize=9, fontweight=('bold' if Q[s][a] == np.max(Q[s]) else 'normal'))
plt.xticks([])
plt.yticks([])
def choose_abs_greedy_action(state, Q, epsilon):
action = None
if np.random.uniform(0, 1) < epsilon:
action = np.random.randint(env.action_space.n)
else:
action = np.argmax(Q[state,:])
return action
def max_action_state(state, Q):
action = np.argmax(Q[state,:])
return Q[state, action]
def sarsa(env, alpha=0.1, gamma=0.9, epsilon=0.1, num_ep=int(1e4)):
#Q = np.zeros((env.observation_space.n, env.action_space.n))
Q = np.random.rand(env.observation_space.n, env.action_space.n)
# TODO: implement the sarsa algorithm
# This is some starting point performing random walks in the environment:
for i in range(num_ep):
s = env.reset()
done = False
a = choose_abs_greedy_action(s, Q, epsilon)
while not done:
s_, r, done, _ = env.step(a)
a_ = choose_abs_greedy_action(s_, Q, epsilon)
#update Q using sarsa
Q[s, a] = Q[s, a] + alpha * (r + (gamma * Q[s_,a_]) - Q[s,a])
s = s_
a = a_
return Q
def qlearning(env, alpha=0.1, gamma=0.9, epsilon=0.1, num_ep=int(1e4)):
#Q = np.zeros((env.observation_space.n, env.action_space.n))
Q = np.random.rand(env.observation_space.n, env.action_space.n)
# TODO: implement the qlearning algorithm
for i in range(num_ep):
s = env.reset()
done = False
while not done:
a = choose_abs_greedy_action(s, Q, epsilon)
s_, r, done, _ = env.step(a)
#update Q using Q learning
Q[s, a] = Q[s, a] + alpha * (r+ ( gamma * max_action_state(s_, Q)) - Q[s,a] )
s = s_
return Q
env=gym.make('FrozenLake-v0')
#env=gym.make('FrozenLake-v0', is_slippery=False)
#env=gym.make('FrozenLake-v0', map_name="8x8")
print("Running sarsa...")
Q = sarsa(env)
plot_V(Q, env)
plot_Q(Q, env)
print_policy(Q, env)
plt.show()
print("Running qlearning")
Q = qlearning(env)
plot_V(Q, env)
plot_Q(Q, env)
print_policy(Q, env)
plt.show()
| [
"matplotlib.pyplot.imshow",
"numpy.random.rand",
"matplotlib.pyplot.xticks",
"numpy.argmax",
"numpy.max",
"numpy.array",
"numpy.zeros",
"matplotlib.pyplot.figure",
"matplotlib.pyplot.yticks",
"numpy.unravel_index",
"matplotlib.colors.Normalize",
"numpy.chararray",
"numpy.random.uniform",
"numpy.random.randint",
"gym.make",
"matplotlib.pyplot.show"
] | [((4710, 4735), 'gym.make', 'gym.make', (['"""FrozenLake-v0"""'], {}), "('FrozenLake-v0')\n", (4718, 4735), False, 'import gym\n'), ((4926, 4936), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (4934, 4936), True, 'import matplotlib.pyplot as plt\n'), ((5035, 5045), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (5043, 5045), True, 'import matplotlib.pyplot as plt\n'), ((330, 362), 'numpy.chararray', 'np.chararray', (['dims'], {'unicode': '(True)'}), '(dims, unicode=True)\n', (342, 362), True, 'import numpy as np\n'), ((780, 792), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (790, 792), True, 'import matplotlib.pyplot as plt\n'), ((882, 896), 'numpy.zeros', 'np.zeros', (['dims'], {}), '(dims)\n', (890, 896), True, 'import numpy as np\n'), ((1063, 1191), 'matplotlib.pyplot.imshow', 'plt.imshow', (['V'], {'origin': '"""upper"""', 'extent': '[0, dims[0], 0, dims[1]]', 'vmin': '(0.0)', 'vmax': '(0.6)', 'cmap': 'plt.cm.RdYlGn', 'interpolation': '"""none"""'}), "(V, origin='upper', extent=[0, dims[0], 0, dims[1]], vmin=0.0,\n vmax=0.6, cmap=plt.cm.RdYlGn, interpolation='none')\n", (1073, 1191), True, 'import matplotlib.pyplot as plt\n'), ((1431, 1445), 'matplotlib.pyplot.xticks', 'plt.xticks', (['[]'], {}), '([])\n', (1441, 1445), True, 'import matplotlib.pyplot as plt\n'), ((1450, 1464), 'matplotlib.pyplot.yticks', 'plt.yticks', (['[]'], {}), '([])\n', (1460, 1464), True, 'import matplotlib.pyplot as plt\n'), ((1601, 1613), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (1611, 1613), True, 'import matplotlib.pyplot as plt\n'), ((1725, 1763), 'numpy.array', 'np.array', (['[[0, 1], [0.5, 0.5], [1, 1]]'], {}), '([[0, 1], [0.5, 0.5], [1, 1]])\n', (1733, 1763), True, 'import numpy as np\n'), ((1774, 1812), 'numpy.array', 'np.array', (['[[0, 0], [0.5, 0.5], [1, 0]]'], {}), '([[0, 0], [0.5, 0.5], [1, 0]])\n', (1782, 1812), True, 'import numpy as np\n'), ((1823, 1861), 'numpy.array', 'np.array', (['[[0, 0], [0.5, 0.5], [0, 1]]'], {}), '([[0, 0], [0.5, 0.5], [0, 1]])\n', (1831, 1861), True, 'import numpy as np\n'), ((1873, 1911), 'numpy.array', 'np.array', (['[[1, 0], [0.5, 0.5], [1, 1]]'], {}), '([[1, 0], [0.5, 0.5], [1, 1]])\n', (1881, 1911), True, 'import numpy as np\n'), ((2045, 2081), 'matplotlib.colors.Normalize', 'colors.Normalize', ([], {'vmin': '(0.0)', 'vmax': '(0.6)'}), '(vmin=0.0, vmax=0.6)\n', (2061, 2081), False, 'from matplotlib import colors, patches\n'), ((3027, 3041), 'matplotlib.pyplot.xticks', 'plt.xticks', (['[]'], {}), '([])\n', (3037, 3041), True, 'import matplotlib.pyplot as plt\n'), ((3046, 3060), 'matplotlib.pyplot.yticks', 'plt.yticks', (['[]'], {}), '([])\n', (3056, 3060), True, 'import matplotlib.pyplot as plt\n'), ((3312, 3334), 'numpy.argmax', 'np.argmax', (['Q[state, :]'], {}), '(Q[state, :])\n', (3321, 3334), True, 'import numpy as np\n'), ((3503, 3562), 'numpy.random.rand', 'np.random.rand', (['env.observation_space.n', 'env.action_space.n'], {}), '(env.observation_space.n, env.action_space.n)\n', (3517, 3562), True, 'import numpy as np\n'), ((4240, 4299), 'numpy.random.rand', 'np.random.rand', (['env.observation_space.n', 'env.action_space.n'], {}), '(env.observation_space.n, env.action_space.n)\n', (4254, 4299), True, 'import numpy as np\n'), ((425, 450), 'numpy.unravel_index', 'np.unravel_index', (['s', 'dims'], {}), '(s, dims)\n', (441, 450), True, 'import numpy as np\n'), ((939, 964), 'numpy.unravel_index', 'np.unravel_index', (['s', 'dims'], {}), '(s, dims)\n', (955, 964), True, 'import numpy as np\n'), ((982, 994), 'numpy.max', 'np.max', (['Q[s]'], {}), '(Q[s])\n', (988, 994), True, 'import numpy as np\n'), ((2098, 2112), 'numpy.zeros', 'np.zeros', (['dims'], {}), '(dims)\n', (2106, 2112), True, 'import numpy as np\n'), ((2302, 2327), 'numpy.unravel_index', 'np.unravel_index', (['s', 'dims'], {}), '(s, dims)\n', (2318, 2327), True, 'import numpy as np\n'), ((3130, 3153), 'numpy.random.uniform', 'np.random.uniform', (['(0)', '(1)'], {}), '(0, 1)\n', (3147, 3153), True, 'import numpy as np\n'), ((3176, 3213), 'numpy.random.randint', 'np.random.randint', (['env.action_space.n'], {}), '(env.action_space.n)\n', (3193, 3213), True, 'import numpy as np\n'), ((3232, 3254), 'numpy.argmax', 'np.argmax', (['Q[state, :]'], {}), '(Q[state, :])\n', (3241, 3254), True, 'import numpy as np\n'), ((479, 494), 'numpy.argmax', 'np.argmax', (['Q[s]'], {}), '(Q[s])\n', (488, 494), True, 'import numpy as np\n'), ((2723, 2743), 'numpy.array', 'np.array', (['[y, 3 - x]'], {}), '([y, 3 - x])\n', (2731, 2743), True, 'import numpy as np\n'), ((2993, 3005), 'numpy.max', 'np.max', (['Q[s]'], {}), '(Q[s])\n', (2999, 3005), True, 'import numpy as np\n')] |
# Generated by Django 3.0.4 on 2020-07-14 11:00
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
("core", "0026_auto_20200713_1535"),
("ai_lab", "0002_ailabusecase"),
]
operations = [
migrations.CreateModel(
name="AiLabCaseStudy",
fields=[
(
"articlepage_ptr",
models.OneToOneField(
auto_created=True,
on_delete=django.db.models.deletion.CASCADE,
parent_link=True,
primary_key=True,
serialize=False,
to="core.ArticlePage",
),
),
(
"use_case",
models.ForeignKey(
on_delete=django.db.models.deletion.PROTECT,
to="ai_lab.AiLabUseCase",
),
),
],
options={"abstract": False,},
bases=("core.articlepage", models.Model),
),
]
| [
"django.db.models.OneToOneField",
"django.db.models.ForeignKey"
] | [((463, 633), 'django.db.models.OneToOneField', 'models.OneToOneField', ([], {'auto_created': '(True)', 'on_delete': 'django.db.models.deletion.CASCADE', 'parent_link': '(True)', 'primary_key': '(True)', 'serialize': '(False)', 'to': '"""core.ArticlePage"""'}), "(auto_created=True, on_delete=django.db.models.deletion\n .CASCADE, parent_link=True, primary_key=True, serialize=False, to=\n 'core.ArticlePage')\n", (483, 633), False, 'from django.db import migrations, models\n'), ((881, 974), 'django.db.models.ForeignKey', 'models.ForeignKey', ([], {'on_delete': 'django.db.models.deletion.PROTECT', 'to': '"""ai_lab.AiLabUseCase"""'}), "(on_delete=django.db.models.deletion.PROTECT, to=\n 'ai_lab.AiLabUseCase')\n", (898, 974), False, 'from django.db import migrations, models\n')] |
# -*- coding: utf-8 -*-
# Copyright (c) 2019, Frappe Technologies and Contributors
# License: MIT. See LICENSE
import unittest
import frappe
from frappe.utils import set_request
from frappe.website.serve import get_response
test_dependencies = ["Blog Post"]
class TestWebsiteRouteMeta(unittest.TestCase):
def test_meta_tag_generation(self):
blogs = frappe.get_all(
"Blog Post", fields=["name", "route"], filters={"published": 1, "route": ("!=", "")}, limit=1
)
blog = blogs[0]
# create meta tags for this route
doc = frappe.new_doc("Website Route Meta")
doc.append("meta_tags", {"key": "type", "value": "blog_post"})
doc.append("meta_tags", {"key": "og:title", "value": "My Blog"})
doc.name = blog.route
doc.insert()
# set request on this route
set_request(path=blog.route)
response = get_response()
self.assertTrue(response.status_code, 200)
html = response.get_data().decode()
self.assertTrue("""<meta name="type" content="blog_post">""" in html)
self.assertTrue("""<meta property="og:title" content="My Blog">""" in html)
def tearDown(self):
frappe.db.rollback()
| [
"frappe.website.serve.get_response",
"frappe.db.rollback",
"frappe.get_all",
"frappe.utils.set_request",
"frappe.new_doc"
] | [((356, 469), 'frappe.get_all', 'frappe.get_all', (['"""Blog Post"""'], {'fields': "['name', 'route']", 'filters': "{'published': 1, 'route': ('!=', '')}", 'limit': '(1)'}), "('Blog Post', fields=['name', 'route'], filters={'published':\n 1, 'route': ('!=', '')}, limit=1)\n", (370, 469), False, 'import frappe\n'), ((537, 573), 'frappe.new_doc', 'frappe.new_doc', (['"""Website Route Meta"""'], {}), "('Website Route Meta')\n", (551, 573), False, 'import frappe\n'), ((778, 806), 'frappe.utils.set_request', 'set_request', ([], {'path': 'blog.route'}), '(path=blog.route)\n', (789, 806), False, 'from frappe.utils import set_request\n'), ((820, 834), 'frappe.website.serve.get_response', 'get_response', ([], {}), '()\n', (832, 834), False, 'from frappe.website.serve import get_response\n'), ((1095, 1115), 'frappe.db.rollback', 'frappe.db.rollback', ([], {}), '()\n', (1113, 1115), False, 'import frappe\n')] |
# Lint as: python3
# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for car_layers."""
from lingvo import compat as tf
from lingvo.core import py_utils
from lingvo.core import test_utils
from lingvo.tasks.car import car_layers
class CarLayersTest(test_utils.TestCase):
def _testNestedOutShape(self, p, input_shape, expected_shape):
batch_size, num_points, _ = input_shape
g = tf.Graph()
with g.as_default():
net = p.Instantiate()
input_data = py_utils.NestedMap(
points=tf.random.uniform(input_shape[:-1] + (3,)),
features=tf.random.uniform(input_shape),
padding=tf.zeros((batch_size, num_points), dtype=tf.float32),
label=tf.random.uniform((batch_size,),
minval=0,
maxval=16,
dtype=tf.int32))
result = net.FPropDefaultTheta(input_data)
with self.session(graph=g):
self.evaluate(tf.global_variables_initializer())
np_result = self.evaluate(result)
grouped_points_result = np_result.grouped_points
self.assertEqual(grouped_points_result.features.shape,
expected_shape.grouped_points.features)
self.assertEqual(grouped_points_result.points.shape,
expected_shape.grouped_points.points)
self.assertEqual(grouped_points_result.padding.shape,
expected_shape.grouped_points.padding)
query_points_result = np_result.query_points
self.assertEqual(query_points_result.points.shape,
expected_shape.query_points.points)
self.assertEqual(query_points_result.padding.shape,
expected_shape.query_points.padding)
def testSamplingAndGrouping(self):
for num_points in [1024, 256]:
for input_dims in [3, 6, 9]:
for group_size in [32, 64]:
p = car_layers.SamplingAndGroupingLayer.Params().Set(
name='SampleGroupTest',
num_samples=256,
ball_radius=0.2,
group_size=group_size,
sample_neighbors_uniformly=True)
grouped_points_shape = py_utils.NestedMap(
features=(8, 256, group_size, input_dims),
points=(8, 256, group_size, 3),
padding=(8, 256, group_size))
query_points_shape = py_utils.NestedMap(
points=(8, 256, 3), padding=(8, 256))
expected_shape = py_utils.NestedMap({
'grouped_points': grouped_points_shape,
'query_points': query_points_shape
})
self._testNestedOutShape(p, (8, num_points, input_dims),
expected_shape)
if __name__ == '__main__':
tf.test.main()
| [
"lingvo.compat.random.uniform",
"lingvo.compat.global_variables_initializer",
"lingvo.compat.zeros",
"lingvo.compat.test.main",
"lingvo.tasks.car.car_layers.SamplingAndGroupingLayer.Params",
"lingvo.compat.Graph",
"lingvo.core.py_utils.NestedMap"
] | [((3382, 3396), 'lingvo.compat.test.main', 'tf.test.main', ([], {}), '()\n', (3394, 3396), True, 'from lingvo import compat as tf\n'), ((1039, 1049), 'lingvo.compat.Graph', 'tf.Graph', ([], {}), '()\n', (1047, 1049), True, 'from lingvo import compat as tf\n'), ((1616, 1649), 'lingvo.compat.global_variables_initializer', 'tf.global_variables_initializer', ([], {}), '()\n', (1647, 1649), True, 'from lingvo import compat as tf\n'), ((1159, 1201), 'lingvo.compat.random.uniform', 'tf.random.uniform', (['(input_shape[:-1] + (3,))'], {}), '(input_shape[:-1] + (3,))\n', (1176, 1201), True, 'from lingvo import compat as tf\n'), ((1222, 1252), 'lingvo.compat.random.uniform', 'tf.random.uniform', (['input_shape'], {}), '(input_shape)\n', (1239, 1252), True, 'from lingvo import compat as tf\n'), ((1272, 1324), 'lingvo.compat.zeros', 'tf.zeros', (['(batch_size, num_points)'], {'dtype': 'tf.float32'}), '((batch_size, num_points), dtype=tf.float32)\n', (1280, 1324), True, 'from lingvo import compat as tf\n'), ((1342, 1411), 'lingvo.compat.random.uniform', 'tf.random.uniform', (['(batch_size,)'], {'minval': '(0)', 'maxval': '(16)', 'dtype': 'tf.int32'}), '((batch_size,), minval=0, maxval=16, dtype=tf.int32)\n', (1359, 1411), True, 'from lingvo import compat as tf\n'), ((2799, 2927), 'lingvo.core.py_utils.NestedMap', 'py_utils.NestedMap', ([], {'features': '(8, 256, group_size, input_dims)', 'points': '(8, 256, group_size, 3)', 'padding': '(8, 256, group_size)'}), '(features=(8, 256, group_size, input_dims), points=(8, \n 256, group_size, 3), padding=(8, 256, group_size))\n', (2817, 2927), False, 'from lingvo.core import py_utils\n'), ((2997, 3053), 'lingvo.core.py_utils.NestedMap', 'py_utils.NestedMap', ([], {'points': '(8, 256, 3)', 'padding': '(8, 256)'}), '(points=(8, 256, 3), padding=(8, 256))\n', (3015, 3053), False, 'from lingvo.core import py_utils\n'), ((3096, 3196), 'lingvo.core.py_utils.NestedMap', 'py_utils.NestedMap', (["{'grouped_points': grouped_points_shape, 'query_points': query_points_shape}"], {}), "({'grouped_points': grouped_points_shape, 'query_points':\n query_points_shape})\n", (3114, 3196), False, 'from lingvo.core import py_utils\n'), ((2532, 2576), 'lingvo.tasks.car.car_layers.SamplingAndGroupingLayer.Params', 'car_layers.SamplingAndGroupingLayer.Params', ([], {}), '()\n', (2574, 2576), False, 'from lingvo.tasks.car import car_layers\n')] |
# -*- coding: utf-8 -*-
# @Filename : take_snapshot.py
# @Date : 2019-07-15-13-44
# @Project: ITC-sniff-for-changes-in-directory
# @Author: <NAME>
# @Website: http://itcave.eu
# @Email: <EMAIL>
# @License: MIT
# @Copyright (C) 2019 ITGO <NAME>
# Generic imports
import os
import pickle
import re
import argparse
from datetime import datetime
def clear_path_string(s):
"""
Simple function that removes chars that are not allowed in file names
:param s: path_string
:return: cleaned_path_string
"""
return (re.sub('[^a-zA-Z]+', '#', s)).lower()
def sniff(sniff_path):
"""
Walks the path and stores information about directory content
:param sniff_path: relative or absolute path
:return: void
"""
sniff_path = str(sniff_path).lower()
# Variable in which information will be stored
dir_store = {}
# Recursive loop that walks through all of the subdirectories
for subdir, dirs, files in os.walk(sniff_path):
if subdir not in dir_store:
dir_store[subdir] = {}
dir_store[subdir]['subdirs'] = dirs
dir_store[subdir]['files'] = files
dir_store[subdir]['file_details'] = {}
for file in files:
f_path = os.path.join(subdir, file)
# The information that will be store for each of the files - in this case last file modification date
# Important: it's cross-platform relevant!
modified_date = os.path.getmtime(f_path)
dir_store[subdir]['file_details'][file] = (modified_date,)
# Name of a file in which data will be stored
dump_name = clear_path_string(sniff_path) + '_' + datetime.now().strftime('%Y%m%d%H%M%S')
# Save pickled data
with open(dump_name + '.pkl', 'wb') as output:
pickle.dump(dir_store, output, pickle.HIGHEST_PROTOCOL)
print("Directory Snapshot taken:", dump_name)
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Directory Sniffer')
parser.add_argument('path', help='Path to the directory that you want to take a snapshot of')
args = parser.parse_args()
sniff(args.path)
| [
"pickle.dump",
"argparse.ArgumentParser",
"os.path.join",
"os.path.getmtime",
"datetime.datetime.now",
"re.sub",
"os.walk"
] | [((956, 975), 'os.walk', 'os.walk', (['sniff_path'], {}), '(sniff_path)\n', (963, 975), False, 'import os\n'), ((1934, 1990), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Directory Sniffer"""'}), "(description='Directory Sniffer')\n", (1957, 1990), False, 'import argparse\n'), ((1784, 1839), 'pickle.dump', 'pickle.dump', (['dir_store', 'output', 'pickle.HIGHEST_PROTOCOL'], {}), '(dir_store, output, pickle.HIGHEST_PROTOCOL)\n', (1795, 1839), False, 'import pickle\n'), ((533, 561), 're.sub', 're.sub', (['"""[^a-zA-Z]+"""', '"""#"""', 's'], {}), "('[^a-zA-Z]+', '#', s)\n", (539, 561), False, 'import re\n'), ((1234, 1260), 'os.path.join', 'os.path.join', (['subdir', 'file'], {}), '(subdir, file)\n', (1246, 1260), False, 'import os\n'), ((1459, 1483), 'os.path.getmtime', 'os.path.getmtime', (['f_path'], {}), '(f_path)\n', (1475, 1483), False, 'import os\n'), ((1660, 1674), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (1672, 1674), False, 'from datetime import datetime\n')] |
import sys
import pygame
from app_window import App_window
from button import Button
from snake import Snake
from food import Food
from settings import WIDTH, HEIGHT, FONT, BG_COL, QUIT_BUTTON_COLOUR, PLAY_BUTTON_COLOUR, BLACK, FPS, RED
class App:
def __init__(self):
pygame.init()
self.clock = pygame.time.Clock()
self.window = pygame.display.set_mode((WIDTH, HEIGHT))
self.gameover = pygame.font.SysFont("Comicsansms",
90,
bold=False,
italic=True)
self.font = pygame.font.SysFont(FONT, 20, bold=1)
self.running = True
self.state = "intro"
self.intro_buttons = []
self.playing_buttons = []
self.gameover_buttons = []
self.active_buttons = self.intro_buttons
self.app_window = App_window(self)
self.snake = Snake(self)
self.food = Food(self)
self.make_buttons()
def make_buttons(self):
# INTRO PLAY AND QUIT BUTTON
intro_play_button = Button(self,
50,
300,
WIDTH - 100,
50,
PLAY_BUTTON_COLOUR,
hover_colour=(49, 218, 46),
function=self.intro_to_play,
text="PLAY")
self.intro_buttons.append(intro_play_button)
intro_quit_button = Button(self,
50,
HEIGHT - 100,
WIDTH - 100,
50,
QUIT_BUTTON_COLOUR,
hover_colour=(219, 53, 43),
function=self.intro_quit,
text="QUIT")
self.intro_buttons.append(intro_quit_button)
# PLAYING QUIT BUTTON
playing_quit_button = Button(self, (WIDTH // 2) - 50,
20,
100,
33,
QUIT_BUTTON_COLOUR,
hover_colour=(219, 53, 43),
function=self.playing_quit,
text="QUIT")
self.playing_buttons.append(playing_quit_button)
# GAMEOVER BUTTON
gameover_play_again_button = Button(self,
50,
300,
WIDTH - 100,
50,
PLAY_BUTTON_COLOUR,
hover_colour=(36, 183, 23),
function=self.reset,
text="PLAY AGAIN")
self.gameover_buttons.append(gameover_play_again_button)
gameover_quit_button = Button(self,
50,
HEIGHT - 100,
WIDTH - 100,
50,
QUIT_BUTTON_COLOUR,
hover_colour=(216, 53, 43),
function=self.intro_quit,
text="QUIT")
self.gameover_buttons.append(gameover_quit_button)
def show_text(self, text, pos):
text = self.font.render(text, False, BLACK)
self.window.blit(text, (pos[0], pos[1]))
def reset(self):
# reset the game
self.state = "play"
self.active_buttons = self.playing_buttons
self.snake = Snake(self)
FPS[0] = 5
def run(self):
while self.running:
self.events()
self.update()
self.draw()
self.clock.tick(FPS[0])
pygame.quit()
sys.exit()
def events(self):
if self.state == "intro":
self.intro_events()
if self.state == "play":
self.playing_events()
if self.state == "dead":
self.gameover_events()
def update(self):
if self.state == "intro":
self.intro_update()
if self.state == "play":
self.playing_update()
if self.state == "dead":
self.gameover_update()
def draw(self):
self.window.fill(BG_COL)
if self.state == "intro":
self.intro_draw()
if self.state == "play":
self.playing_draw()
if self.state == "dead":
self.gameover_draw()
pygame.display.update()
# INTRO FUNCTIONS
def intro_events(self):
for event in pygame.event.get():
if event.type == pygame.QUIT:
self.running = False
if event.type == pygame.KEYDOWN and event.key == pygame.K_ESCAPE:
self.running = False
if event.type == pygame.MOUSEBUTTONDOWN:
for button in self.active_buttons:
if button.hovered:
button.click()
def intro_update(self):
for button in self.active_buttons:
button.update()
def intro_draw(self):
for button in self.active_buttons:
button.draw()
def intro_to_play(self):
self.state = "play"
self.active_buttons = self.playing_buttons
def intro_quit(self):
self.running = False
# PlAY FUNCTIONS
def playing_events(self):
for event in pygame.event.get():
if event.type == pygame.QUIT:
self.running = False
# checks if a key is pressed down
if event.type == pygame.KEYDOWN:
if event.key == pygame.K_ESCAPE:
self.running = False
if event.key == pygame.K_LEFT and self.snake.direction != [
1, 0
]:
self.snake.direction = [-1, 0]
if event.key == pygame.K_RIGHT and self.snake.direction != [
-1, 0
]:
self.snake.direction = [1, 0]
if event.key == pygame.K_UP and self.snake.direction != [0, 1]:
self.snake.direction = [0, -1]
if event.key == pygame.K_DOWN and self.snake.direction != [
0, -1
]:
self.snake.direction = [0, 1]
if event.type == pygame.MOUSEBUTTONDOWN:
for button in self.active_buttons:
if button.hovered:
button.click()
def playing_update(self):
for button in self.active_buttons:
button.update()
self.app_window.update()
def playing_draw(self):
self.app_window.draw()
for button in self.active_buttons:
button.draw()
self.show_text("Score: " + str(self.snake.length - 1), [20, 20])
def playing_quit(self):
self.running = False
# GAMEOVER FUNCTIONS
def gameover_events(self):
for event in pygame.event.get():
if event.type == pygame.QUIT:
self.running = False
if event.type == pygame.KEYDOWN and event.key == pygame.K_ESCAPE:
self.running = False
if event.type == pygame.MOUSEBUTTONDOWN:
for button in self.active_buttons:
if button.hovered:
button.click()
def gameover_update(self):
for button in self.active_buttons:
button.update()
def gameover_draw(self):
for button in self.active_buttons:
button.draw()
self.game_over("GAME OVER", [WIDTH - 440, 30])
def game_over(self, text, pos):
text = self.gameover.render(text, False, RED)
self.window.blit(text, (pos[0], pos[1]))
| [
"app_window.App_window",
"snake.Snake",
"pygame.init",
"pygame.quit",
"sys.exit",
"pygame.event.get",
"pygame.display.set_mode",
"button.Button",
"pygame.time.Clock",
"pygame.display.update",
"food.Food",
"pygame.font.SysFont"
] | [((282, 295), 'pygame.init', 'pygame.init', ([], {}), '()\n', (293, 295), False, 'import pygame\n'), ((317, 336), 'pygame.time.Clock', 'pygame.time.Clock', ([], {}), '()\n', (334, 336), False, 'import pygame\n'), ((359, 399), 'pygame.display.set_mode', 'pygame.display.set_mode', (['(WIDTH, HEIGHT)'], {}), '((WIDTH, HEIGHT))\n', (382, 399), False, 'import pygame\n'), ((424, 487), 'pygame.font.SysFont', 'pygame.font.SysFont', (['"""Comicsansms"""', '(90)'], {'bold': '(False)', 'italic': '(True)'}), "('Comicsansms', 90, bold=False, italic=True)\n", (443, 487), False, 'import pygame\n'), ((640, 677), 'pygame.font.SysFont', 'pygame.font.SysFont', (['FONT', '(20)'], {'bold': '(1)'}), '(FONT, 20, bold=1)\n', (659, 677), False, 'import pygame\n'), ((911, 927), 'app_window.App_window', 'App_window', (['self'], {}), '(self)\n', (921, 927), False, 'from app_window import App_window\n'), ((949, 960), 'snake.Snake', 'Snake', (['self'], {}), '(self)\n', (954, 960), False, 'from snake import Snake\n'), ((981, 991), 'food.Food', 'Food', (['self'], {}), '(self)\n', (985, 991), False, 'from food import Food\n'), ((1115, 1247), 'button.Button', 'Button', (['self', '(50)', '(300)', '(WIDTH - 100)', '(50)', 'PLAY_BUTTON_COLOUR'], {'hover_colour': '(49, 218, 46)', 'function': 'self.intro_to_play', 'text': '"""PLAY"""'}), "(self, 50, 300, WIDTH - 100, 50, PLAY_BUTTON_COLOUR, hover_colour=(49,\n 218, 46), function=self.intro_to_play, text='PLAY')\n", (1121, 1247), False, 'from button import Button\n'), ((1606, 1744), 'button.Button', 'Button', (['self', '(50)', '(HEIGHT - 100)', '(WIDTH - 100)', '(50)', 'QUIT_BUTTON_COLOUR'], {'hover_colour': '(219, 53, 43)', 'function': 'self.intro_quit', 'text': '"""QUIT"""'}), "(self, 50, HEIGHT - 100, WIDTH - 100, 50, QUIT_BUTTON_COLOUR,\n hover_colour=(219, 53, 43), function=self.intro_quit, text='QUIT')\n", (1612, 1744), False, 'from button import Button\n'), ((2136, 2272), 'button.Button', 'Button', (['self', '(WIDTH // 2 - 50)', '(20)', '(100)', '(33)', 'QUIT_BUTTON_COLOUR'], {'hover_colour': '(219, 53, 43)', 'function': 'self.playing_quit', 'text': '"""QUIT"""'}), "(self, WIDTH // 2 - 50, 20, 100, 33, QUIT_BUTTON_COLOUR, hover_colour\n =(219, 53, 43), function=self.playing_quit, text='QUIT')\n", (2142, 2272), False, 'from button import Button\n'), ((2651, 2781), 'button.Button', 'Button', (['self', '(50)', '(300)', '(WIDTH - 100)', '(50)', 'PLAY_BUTTON_COLOUR'], {'hover_colour': '(36, 183, 23)', 'function': 'self.reset', 'text': '"""PLAY AGAIN"""'}), "(self, 50, 300, WIDTH - 100, 50, PLAY_BUTTON_COLOUR, hover_colour=(36,\n 183, 23), function=self.reset, text='PLAY AGAIN')\n", (2657, 2781), False, 'from button import Button\n'), ((3227, 3365), 'button.Button', 'Button', (['self', '(50)', '(HEIGHT - 100)', '(WIDTH - 100)', '(50)', 'QUIT_BUTTON_COLOUR'], {'hover_colour': '(216, 53, 43)', 'function': 'self.intro_quit', 'text': '"""QUIT"""'}), "(self, 50, HEIGHT - 100, WIDTH - 100, 50, QUIT_BUTTON_COLOUR,\n hover_colour=(216, 53, 43), function=self.intro_quit, text='QUIT')\n", (3233, 3365), False, 'from button import Button\n'), ((4010, 4021), 'snake.Snake', 'Snake', (['self'], {}), '(self)\n', (4015, 4021), False, 'from snake import Snake\n'), ((4209, 4222), 'pygame.quit', 'pygame.quit', ([], {}), '()\n', (4220, 4222), False, 'import pygame\n'), ((4231, 4241), 'sys.exit', 'sys.exit', ([], {}), '()\n', (4239, 4241), False, 'import sys\n'), ((4947, 4970), 'pygame.display.update', 'pygame.display.update', ([], {}), '()\n', (4968, 4970), False, 'import pygame\n'), ((5044, 5062), 'pygame.event.get', 'pygame.event.get', ([], {}), '()\n', (5060, 5062), False, 'import pygame\n'), ((5875, 5893), 'pygame.event.get', 'pygame.event.get', ([], {}), '()\n', (5891, 5893), False, 'import pygame\n'), ((7472, 7490), 'pygame.event.get', 'pygame.event.get', ([], {}), '()\n', (7488, 7490), False, 'import pygame\n')] |
from sklearn.metrics import f1_score,accuracy_score
import numpy as np
from utilities.tools import load_model
import pandas as pd
def predict_MSRP_test_data(n_models,nb_words,nlp_f,test_data_1,test_data_2,test_labels):
models=[]
n_h_features=nlp_f.shape[1]
print('loading the models...')
for i in range(n_models):
models.append(load_model(i+1,nb_words,n_h_features))
preds=[]
print('predicting the test data...\n')
i=0
for m in models:
i+=1
preds_prob=m.predict([test_data_1, test_data_2,nlp_f], batch_size=64, verbose=0)
preds.append(preds_prob[:,1])
preds=np.asarray(preds)
final_labels=np.zeros(len(test_data_1),dtype=int)
#average the predicttion
for i in range(len(test_data_1)):
final_labels[i]=round(np.mean(preds[:,i]))
if i%100==0:
print(i ,' out of ',len(test_data_1))
print("test data accuracy: ", accuracy_score(final_labels,test_labels))
print("test data f_measure: ", f1_score(final_labels, test_labels))
submission = pd.DataFrame({"Quality": final_labels})
submission.to_csv("predictions/MSRP.tsv", index=True,index_label='test_id')
def predict_Quora_test_data(n_models,nb_words,nlp_f,test_data_1,test_data_2):
models=[]
n_h_features=nlp_f.shape[1]
print('loading the models...')
for i in range(n_models):
models.append(load_model(i+1,nb_words,n_h_features))
preds=[]
print('predicting the test data...\n')
i=0
for m in models:
i+=1
preds_prob=m.predict([test_data_1, test_data_2,nlp_f], batch_size=125, verbose=0)
preds.append(preds_prob[:,1])
preds=np.asarray(preds)
final_labels=np.zeros(len(test_data_1),dtype=float)
#average the predicttion
for i in range(len(test_data_1)):
final_labels[i]=np.mean(preds[:,i])
if i%10000==0:
print(i ,' out of ',len(test_data_1))
print('making the sumbission file')
submission = pd.DataFrame({"is_duplicate": final_labels})
submission.to_csv("predictions/Quora.tsv", index=True,index_label='test_id') | [
"numpy.mean",
"sklearn.metrics.f1_score",
"numpy.asarray",
"utilities.tools.load_model",
"pandas.DataFrame",
"sklearn.metrics.accuracy_score"
] | [((624, 641), 'numpy.asarray', 'np.asarray', (['preds'], {}), '(preds)\n', (634, 641), True, 'import numpy as np\n'), ((1052, 1091), 'pandas.DataFrame', 'pd.DataFrame', (["{'Quality': final_labels}"], {}), "({'Quality': final_labels})\n", (1064, 1091), True, 'import pandas as pd\n'), ((1657, 1674), 'numpy.asarray', 'np.asarray', (['preds'], {}), '(preds)\n', (1667, 1674), True, 'import numpy as np\n'), ((1974, 2018), 'pandas.DataFrame', 'pd.DataFrame', (["{'is_duplicate': final_labels}"], {}), "({'is_duplicate': final_labels})\n", (1986, 2018), True, 'import pandas as pd\n'), ((920, 961), 'sklearn.metrics.accuracy_score', 'accuracy_score', (['final_labels', 'test_labels'], {}), '(final_labels, test_labels)\n', (934, 961), False, 'from sklearn.metrics import f1_score, accuracy_score\n'), ((997, 1032), 'sklearn.metrics.f1_score', 'f1_score', (['final_labels', 'test_labels'], {}), '(final_labels, test_labels)\n', (1005, 1032), False, 'from sklearn.metrics import f1_score, accuracy_score\n'), ((1822, 1842), 'numpy.mean', 'np.mean', (['preds[:, i]'], {}), '(preds[:, i])\n', (1829, 1842), True, 'import numpy as np\n'), ((354, 395), 'utilities.tools.load_model', 'load_model', (['(i + 1)', 'nb_words', 'n_h_features'], {}), '(i + 1, nb_words, n_h_features)\n', (364, 395), False, 'from utilities.tools import load_model\n'), ((793, 813), 'numpy.mean', 'np.mean', (['preds[:, i]'], {}), '(preds[:, i])\n', (800, 813), True, 'import numpy as np\n'), ((1386, 1427), 'utilities.tools.load_model', 'load_model', (['(i + 1)', 'nb_words', 'n_h_features'], {}), '(i + 1, nb_words, n_h_features)\n', (1396, 1427), False, 'from utilities.tools import load_model\n')] |
# coding=utf-8
import logging
import traceback
from os import makedirs
from os.path import exists, join
from textwrap import fill
import matplotlib.patheffects as PathEffects
import matplotlib.pyplot as plt
import numpy as np
import seaborn as sns
from koino.plot import big_square, default_alpha
from matplotlib import cm
from ..utils.base import jaccard
def plot_silhouette(
X, figure_fp, n_clusters, silhouette_values, cluster_labels, silhouette_avg
):
# Create a subplot with 1 row and 2 columns
fig, (ax1, ax2) = plt.subplots(1, 2, figsize=(26, 10))
# The 1st subplot is the silhouette plot
# The silhouette coefficient can range from -1, 1 but here all
# lie within [-0.1, 1]
ax1.set_xlim([-0.1, 1])
# The (n_clusters+1)*10 is for inserting blank space between silhouette
# plots of individual clusters, to demarcate them clearly.
ax1.set_ylim([0, len(X) + (n_clusters + 1) * 10])
y_lower = 10
for k in range(n_clusters):
# Aggregate the silhouette scores for samples belonging to
# cluster i, and sort them
ith_cluster_silhouette_values = np.sort(silhouette_values[cluster_labels == k])
size_cluster_i = ith_cluster_silhouette_values.shape[0]
y_upper = y_lower + size_cluster_i
color = cm.spectral(float(k) / n_clusters)
ax1.fill_betweenx(
np.arange(y_lower, y_upper),
0,
ith_cluster_silhouette_values,
facecolor=color,
edgecolor=color,
alpha=default_alpha,
)
# Label the silhouette plots with their cluster numbers at the
# middle
ax1.text(-0.05, y_lower + 0.5 * size_cluster_i, str(k))
# Compute the new y_lower for next plot
y_lower = y_upper + 10 # 10 for the 0 samples
ax1.set_title("The silhouette plot for the various clusters.")
ax1.set_xlabel("The silhouette coefficient values")
ax1.set_ylabel("Cluster label")
# The vertical line for average silhouette score of all the values
ax1.axvline(x=silhouette_avg, color="red", linestyle="--")
ax1.set_yticks([]) # Clear the yaxis labels / ticks
ax1.set_xticks([-0.1, 0, 0.2, 0.4, 0.6, 0.8, 1])
# Construct cluster
# 2nd Plot showing the actual clusters formed
colors = cm.spectral(cluster_labels.astype(float) / n_clusters)
# colors = y
ax2.scatter(X[:, 0], X[:, 1], marker=".", s=20, lw=0, alpha=default_alpha, c=colors)
ax2.set_title("The visualization of the clustered data.")
ax2.set_xlabel("Feature space for the 1st feature")
ax2.set_ylabel("Feature space for the 2nd feature")
plt.suptitle(
("Silhouette analysis for KMeans " "with n_clusters = %d" % n_clusters),
fontsize=14,
fontweight="bold",
)
plt.savefig(figure_fp)
plt.close()
plt.clf()
def plot_cluster_assignments(
X, y, n_clusters, figures_dir, transparent=False, cluster_names=None, title=""
):
"""Clustering assignments scatter plot
Notes
-----
Can use mean or median to fix cluster centroid coordinates."""
if cluster_names is None:
cluster_names = ["Cluster {}".format(i + 1) for i in range(n_clusters)]
# We first reorder the data points according to the centroids labels
X = np.vstack([X[y == i] for i in range(n_clusters)])
y = np.hstack([y[y == i] for i in range(n_clusters)])
# Choose a color palette with seaborn.
palette = np.array(sns.color_palette("hls", n_clusters))
fig, ax = plt.subplots(figsize=big_square)
# for i in range(n_clusters):
# mask = y == i
# ax.scatter(X[mask, 0], X[mask, 1], lw=0, s=20, c=palette[i],
# label=cluster_names[i])
ax.set_title(title)
ax.scatter(X[:, 0], X[:, 1], lw=0, s=20, c=palette[y.astype(np.int)])
ax.axis("off")
# Add the labels for each cluster.
for i in range(n_clusters):
# Position of each label.
samples = np.atleast_2d(X[y == i, :2])
if not len(samples):
logging.warning(
"Probably singular cluster {} (shape:{})".format(i + 1, X[y == i].shape)
)
continue
xtext, ytext = np.median(samples, axis=0)
name = fill(cluster_names[i], width=20)
assert np.isfinite(xtext)
assert np.isfinite(ytext)
txt = ax.text(xtext, ytext, name, fontsize=20, wrap=True, ha="left")
txt.set_path_effects(
[PathEffects.Stroke(linewidth=5, foreground="w"), PathEffects.Normal()]
)
# plt.legend()
figure_fp = join(figures_dir, "Clustered {}.png".format(title))
fig.tight_layout()
try:
fig.savefig(figure_fp, transparent=transparent)
except ValueError:
logging.warning(traceback.format_exc())
finally:
plt.close()
plt.clf()
def overlap_jaccard(
indx,
y_a,
y_b,
names_a,
names_b,
n_a=None,
n_b=None,
figsize=None,
output_dir=None,
alabel="socio-demographic",
blabel="purchases",
transparent=False,
):
"""Compute and plot contingency tables based on set intersection and
jaccard score.
# TODO: Normaliser par len(sd_set) ou len(diet_set) ?
"""
if not (n_a or n_b) or not output_dir:
return
elif output_dir and not exists(output_dir):
makedirs(output_dir)
else:
assert n_a and n_b
assert len(indx) == len(y_a) == len(y_b)
assert len(names_a) == n_a
assert len(names_b) == n_b
a_sets = [set(indx[y_a == i]) for i in range(n_a)]
b_sets = [set(indx[y_b == i]) for i in range(n_b)]
inter_sets = np.asarray(
[[len(set_a & set_t) for set_a in a_sets] for set_t in b_sets], dtype=np.int_
)
fig, ax = plt.subplots(figsize=figsize)
plt.title("Overlap between {} and {} clusters".format(alabel, blabel))
sns.heatmap(
inter_sets,
annot=True,
fmt="6.0f",
ax=ax,
square=True,
xticklabels=names_a,
yticklabels=names_b,
)
plt.tight_layout()
inter_path = join(output_dir, "Clusters Intersection.png")
plt.savefig(inter_path, transparent=transparent)
plt.close()
plt.clf()
jac_arr = np.asarray(
[[jaccard(set_a, set_b) for set_a in a_sets] for set_b in b_sets],
dtype=np.float_,
)
fig, ax = plt.subplots(figsize=figsize)
plt.title("Jaccard scores between {} and {} clusters".format(alabel, blabel))
sns.heatmap(
jac_arr,
annot=True,
fmt=".3f",
ax=ax,
square=True,
xticklabels=names_a,
yticklabels=names_b,
)
plt.tight_layout()
jaccard_path = join(output_dir, "Clusters Jaccard.png")
plt.savefig(jaccard_path, transparent=transparent)
plt.close()
plt.clf()
| [
"matplotlib.patheffects.Normal",
"textwrap.fill",
"numpy.isfinite",
"numpy.arange",
"numpy.atleast_2d",
"os.path.exists",
"seaborn.color_palette",
"numpy.sort",
"matplotlib.pyplot.close",
"matplotlib.pyplot.savefig",
"seaborn.heatmap",
"matplotlib.pyplot.suptitle",
"traceback.format_exc",
"numpy.median",
"os.makedirs",
"matplotlib.pyplot.clf",
"os.path.join",
"matplotlib.patheffects.Stroke",
"matplotlib.pyplot.tight_layout",
"matplotlib.pyplot.subplots"
] | [((534, 570), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(1)', '(2)'], {'figsize': '(26, 10)'}), '(1, 2, figsize=(26, 10))\n', (546, 570), True, 'import matplotlib.pyplot as plt\n'), ((2652, 2768), 'matplotlib.pyplot.suptitle', 'plt.suptitle', (["('Silhouette analysis for KMeans with n_clusters = %d' % n_clusters)"], {'fontsize': '(14)', 'fontweight': '"""bold"""'}), "('Silhouette analysis for KMeans with n_clusters = %d' %\n n_clusters, fontsize=14, fontweight='bold')\n", (2664, 2768), True, 'import matplotlib.pyplot as plt\n'), ((2805, 2827), 'matplotlib.pyplot.savefig', 'plt.savefig', (['figure_fp'], {}), '(figure_fp)\n', (2816, 2827), True, 'import matplotlib.pyplot as plt\n'), ((2832, 2843), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (2841, 2843), True, 'import matplotlib.pyplot as plt\n'), ((2848, 2857), 'matplotlib.pyplot.clf', 'plt.clf', ([], {}), '()\n', (2855, 2857), True, 'import matplotlib.pyplot as plt\n'), ((3526, 3558), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'figsize': 'big_square'}), '(figsize=big_square)\n', (3538, 3558), True, 'import matplotlib.pyplot as plt\n'), ((5747, 5776), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'figsize': 'figsize'}), '(figsize=figsize)\n', (5759, 5776), True, 'import matplotlib.pyplot as plt\n'), ((5856, 5969), 'seaborn.heatmap', 'sns.heatmap', (['inter_sets'], {'annot': '(True)', 'fmt': '"""6.0f"""', 'ax': 'ax', 'square': '(True)', 'xticklabels': 'names_a', 'yticklabels': 'names_b'}), "(inter_sets, annot=True, fmt='6.0f', ax=ax, square=True,\n xticklabels=names_a, yticklabels=names_b)\n", (5867, 5969), True, 'import seaborn as sns\n'), ((6033, 6051), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (6049, 6051), True, 'import matplotlib.pyplot as plt\n'), ((6069, 6114), 'os.path.join', 'join', (['output_dir', '"""Clusters Intersection.png"""'], {}), "(output_dir, 'Clusters Intersection.png')\n", (6073, 6114), False, 'from os.path import exists, join\n'), ((6119, 6167), 'matplotlib.pyplot.savefig', 'plt.savefig', (['inter_path'], {'transparent': 'transparent'}), '(inter_path, transparent=transparent)\n', (6130, 6167), True, 'import matplotlib.pyplot as plt\n'), ((6172, 6183), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (6181, 6183), True, 'import matplotlib.pyplot as plt\n'), ((6188, 6197), 'matplotlib.pyplot.clf', 'plt.clf', ([], {}), '()\n', (6195, 6197), True, 'import matplotlib.pyplot as plt\n'), ((6346, 6375), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'figsize': 'figsize'}), '(figsize=figsize)\n', (6358, 6375), True, 'import matplotlib.pyplot as plt\n'), ((6462, 6572), 'seaborn.heatmap', 'sns.heatmap', (['jac_arr'], {'annot': '(True)', 'fmt': '""".3f"""', 'ax': 'ax', 'square': '(True)', 'xticklabels': 'names_a', 'yticklabels': 'names_b'}), "(jac_arr, annot=True, fmt='.3f', ax=ax, square=True, xticklabels\n =names_a, yticklabels=names_b)\n", (6473, 6572), True, 'import seaborn as sns\n'), ((6635, 6653), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (6651, 6653), True, 'import matplotlib.pyplot as plt\n'), ((6673, 6713), 'os.path.join', 'join', (['output_dir', '"""Clusters Jaccard.png"""'], {}), "(output_dir, 'Clusters Jaccard.png')\n", (6677, 6713), False, 'from os.path import exists, join\n'), ((6718, 6768), 'matplotlib.pyplot.savefig', 'plt.savefig', (['jaccard_path'], {'transparent': 'transparent'}), '(jaccard_path, transparent=transparent)\n', (6729, 6768), True, 'import matplotlib.pyplot as plt\n'), ((6773, 6784), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (6782, 6784), True, 'import matplotlib.pyplot as plt\n'), ((6789, 6798), 'matplotlib.pyplot.clf', 'plt.clf', ([], {}), '()\n', (6796, 6798), True, 'import matplotlib.pyplot as plt\n'), ((1125, 1172), 'numpy.sort', 'np.sort', (['silhouette_values[cluster_labels == k]'], {}), '(silhouette_values[cluster_labels == k])\n', (1132, 1172), True, 'import numpy as np\n'), ((3473, 3509), 'seaborn.color_palette', 'sns.color_palette', (['"""hls"""', 'n_clusters'], {}), "('hls', n_clusters)\n", (3490, 3509), True, 'import seaborn as sns\n'), ((3959, 3987), 'numpy.atleast_2d', 'np.atleast_2d', (['X[y == i, :2]'], {}), '(X[y == i, :2])\n', (3972, 3987), True, 'import numpy as np\n'), ((4193, 4219), 'numpy.median', 'np.median', (['samples'], {'axis': '(0)'}), '(samples, axis=0)\n', (4202, 4219), True, 'import numpy as np\n'), ((4235, 4267), 'textwrap.fill', 'fill', (['cluster_names[i]'], {'width': '(20)'}), '(cluster_names[i], width=20)\n', (4239, 4267), False, 'from textwrap import fill\n'), ((4283, 4301), 'numpy.isfinite', 'np.isfinite', (['xtext'], {}), '(xtext)\n', (4294, 4301), True, 'import numpy as np\n'), ((4317, 4335), 'numpy.isfinite', 'np.isfinite', (['ytext'], {}), '(ytext)\n', (4328, 4335), True, 'import numpy as np\n'), ((4804, 4815), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (4813, 4815), True, 'import matplotlib.pyplot as plt\n'), ((4824, 4833), 'matplotlib.pyplot.clf', 'plt.clf', ([], {}), '()\n', (4831, 4833), True, 'import matplotlib.pyplot as plt\n'), ((1372, 1399), 'numpy.arange', 'np.arange', (['y_lower', 'y_upper'], {}), '(y_lower, y_upper)\n', (1381, 1399), True, 'import numpy as np\n'), ((5333, 5353), 'os.makedirs', 'makedirs', (['output_dir'], {}), '(output_dir)\n', (5341, 5353), False, 'from os import makedirs\n'), ((4456, 4503), 'matplotlib.patheffects.Stroke', 'PathEffects.Stroke', ([], {'linewidth': '(5)', 'foreground': '"""w"""'}), "(linewidth=5, foreground='w')\n", (4474, 4503), True, 'import matplotlib.patheffects as PathEffects\n'), ((4505, 4525), 'matplotlib.patheffects.Normal', 'PathEffects.Normal', ([], {}), '()\n', (4523, 4525), True, 'import matplotlib.patheffects as PathEffects\n'), ((4759, 4781), 'traceback.format_exc', 'traceback.format_exc', ([], {}), '()\n', (4779, 4781), False, 'import traceback\n'), ((5305, 5323), 'os.path.exists', 'exists', (['output_dir'], {}), '(output_dir)\n', (5311, 5323), False, 'from os.path import exists, join\n')] |
import typing
nt = typing.NamedTuple("name", [("field", str)]) | [
"typing.NamedTuple"
] | [((20, 63), 'typing.NamedTuple', 'typing.NamedTuple', (['"""name"""', "[('field', str)]"], {}), "('name', [('field', str)])\n", (37, 63), False, 'import typing\n')] |
from CommonServerPython import *
''' IMPORTS '''
import re
import requests
# Disable insecure warnings
requests.packages.urllib3.disable_warnings()
''' GLOBALS/PARAMS '''
VENDOR = 'Have I Been Pwned? V2'
MAX_RETRY_ALLOWED = demisto.params().get('max_retry_time', -1)
API_KEY = demisto.params().get('api_key')
USE_SSL = not demisto.params().get('insecure', False)
BASE_URL = 'https://haveibeenpwned.com/api/v3'
HEADERS = {
'hibp-api-key': API_KEY,
'user-agent': 'DBOT-API',
'Content-Type': 'application/json',
'Accept': 'application/json'
}
DEFAULT_DBOT_SCORE_EMAIL = 2 if demisto.params().get('default_dbot_score_email') == 'SUSPICIOUS' else 3
DEFAULT_DBOT_SCORE_DOMAIN = 2 if demisto.params().get('default_dbot_score_domain') == 'SUSPICIOUS' else 3
SUFFIXES = {
"email": '/breachedaccount/',
"domain": '/breaches?domain=',
"username": '/breachedaccount/',
"paste": '/pasteaccount/',
"email_truncate_verified": '?truncateResponse=false&includeUnverified=true',
"domain_truncate_verified": '&truncateResponse=false&includeUnverified=true',
"username_truncate_verified": '?truncateResponse=false&includeUnverified=true'
}
RETRIES_END_TIME = datetime.min
''' HELPER FUNCTIONS '''
def http_request(method, url_suffix, params=None, data=None):
while True:
res = requests.request(
method,
BASE_URL + url_suffix,
verify=USE_SSL,
params=params,
data=data,
headers=HEADERS
)
if res.status_code != 429:
# Rate limit response code
break
if datetime.now() > RETRIES_END_TIME:
return_error('Max retry time has exceeded.')
wait_regex = re.search(r'\d+', res.json()['message'])
if wait_regex:
wait_amount = wait_regex.group()
else:
demisto.error('failed extracting wait time will use default (5). Res body: {}'.format(res.text))
wait_amount = 5
if datetime.now() + timedelta(seconds=int(wait_amount)) > RETRIES_END_TIME:
return_error('Max retry time has exceeded.')
time.sleep(int(wait_amount))
if res.status_code == 404:
return None
if not res.status_code == 200:
if not res.status_code == 401:
demisto.error(
'Error in API call to Pwned Integration [%d]. Full text: %s' % (res.status_code, res.text))
return_error('Error in API call to Pwned Integration [%d] - %s' % (res.status_code, res.reason))
return None
return res.json()
def html_description_to_human_readable(breach_description):
"""
Converting from html description to hr
:param breach_description: Description of breach from API response
:return: Description string that altered HTML urls to clickable urls
for better readability in war-room
"""
html_link_pattern = re.compile('<a href="(.+?)"(.+?)>(.+?)</a>')
patterns_found = html_link_pattern.findall(breach_description)
for link in patterns_found:
html_actual_address = link[0]
html_readable_name = link[2]
link_from_desc = '[' + html_readable_name + ']' + '(' + html_actual_address + ')'
breach_description = re.sub(html_link_pattern, link_from_desc, breach_description, count=1)
return breach_description
def data_to_markdown(query_type, query_arg, api_res, api_paste_res=None):
records_found = False
md = '### Have I Been Pwned query for ' + query_type.lower() + ': *' + query_arg + '*\n'
if api_res:
records_found = True
for breach in api_res:
verified_breach = 'Verified' if breach['IsVerified'] else 'Unverified'
md += '#### ' + breach['Title'] + ' (' + breach['Domain'] + '): ' + str(breach['PwnCount']) + \
' records breached [' + verified_breach + ' breach]\n'
md += 'Date: **' + breach['BreachDate'] + '**\n\n'
md += html_description_to_human_readable(breach['Description']) + '\n'
md += 'Data breached: **' + ','.join(breach['DataClasses']) + '**\n'
if api_paste_res:
records_found = True
pastes_list = []
for paste_breach in api_paste_res:
paste_entry = \
{
'Source': paste_breach['Source'],
'Title': paste_breach['Title'],
'ID': paste_breach['Id'],
'Date': '',
'Amount of emails in paste': str(paste_breach['EmailCount'])
}
if paste_breach['Date']:
paste_entry['Date'] = paste_breach['Date'].split('T')[0]
pastes_list.append(paste_entry)
md += tableToMarkdown('The email address was found in the following "Pastes":',
pastes_list,
['ID', 'Title', 'Date', 'Source', 'Amount of emails in paste'])
if not records_found:
md += 'No records found'
return md
def create_dbot_score_dictionary(indicator_value, indicator_type, dbot_score):
return {
'Indicator': indicator_value,
'Type': indicator_type,
'Vendor': VENDOR,
'Score': dbot_score
}
def create_context_entry(context_type, context_main_value, comp_sites, comp_pastes, malicious_score):
context_dict = dict() # dict
if context_type == 'email':
context_dict['Address'] = context_main_value
else:
context_dict['Name'] = context_main_value
context_dict['Pwned-V2'] = {
'Compromised': {
'Vendor': VENDOR,
'Reporters': ', '.join(comp_sites + comp_pastes)
}
}
if malicious_score == 3:
context_dict['Malicious'] = add_malicious_to_context(context_type)
return context_dict
def add_malicious_to_context(malicious_type):
return {
'Vendor': VENDOR,
'Description': 'The ' + malicious_type + ' has been compromised'
}
def email_to_entry_context(email, api_email_res, api_paste_res):
dbot_score = 0
comp_email = dict() # type: dict
comp_sites = sorted([item['Title'] for item in api_email_res])
comp_pastes = sorted(set(item['Source'] for item in api_paste_res))
if len(comp_sites) > 0:
dbot_score = DEFAULT_DBOT_SCORE_EMAIL
email_context = create_context_entry('email', email, comp_sites, comp_pastes, DEFAULT_DBOT_SCORE_EMAIL)
comp_email[outputPaths['email']] = email_context
comp_email['DBotScore'] = create_dbot_score_dictionary(email, 'email', dbot_score)
return comp_email
def domain_to_entry_context(domain, api_res):
comp_sites = [item['Title'] for item in api_res]
comp_sites = sorted(comp_sites)
comp_domain = dict() # type: dict
dbot_score = 0
if len(comp_sites) > 0:
dbot_score = DEFAULT_DBOT_SCORE_DOMAIN
domain_context = create_context_entry('domain', domain, comp_sites, [], DEFAULT_DBOT_SCORE_DOMAIN)
comp_domain[outputPaths['domain']] = domain_context
comp_domain['DBotScore'] = create_dbot_score_dictionary(domain, 'domain', dbot_score)
return comp_domain
def set_retry_end_time():
global RETRIES_END_TIME
if MAX_RETRY_ALLOWED != -1:
RETRIES_END_TIME = datetime.now() + timedelta(seconds=int(MAX_RETRY_ALLOWED))
''' COMMANDS + REQUESTS FUNCTIONS '''
def test_module(args_dict):
"""
If the http request was successful the test will return OK
:return: 3 arrays of outputs
"""
http_request('GET', SUFFIXES.get("username", '') + 'test')
return ['ok'], [None], [None]
def pwned_email_command(args_dict):
"""
Executing the pwned request for emails list, in order to support list input, the function returns 3 lists of outputs
:param args_dict: the demisto argument - in this case the email list is needed
:return: 3 arrays of outputs
"""
email_list = argToList(args_dict.get('email', ''))
api_email_res_list, api_paste_res_list = pwned_email(email_list)
md_list = []
ec_list = []
for email, api_email_res, api_paste_res in zip(email_list, api_email_res_list, api_paste_res_list):
md_list.append(data_to_markdown('Email', email, api_email_res, api_paste_res))
ec_list.append(email_to_entry_context(email, api_email_res or [], api_paste_res or []))
return md_list, ec_list, api_email_res_list
def pwned_email(email_list):
"""
Executing the http requests
:param email_list: the email list that needed for the http requests
:return: 2 arrays of http requests outputs
"""
api_email_res_list = []
api_paste_res_list = []
for email in email_list:
email_suffix = SUFFIXES.get("email") + email + SUFFIXES.get("email_truncate_verified")
paste_suffix = SUFFIXES.get("paste") + email
api_email_res_list.append(http_request('GET', url_suffix=email_suffix))
api_paste_res_list.append(http_request('GET', url_suffix=paste_suffix))
return api_email_res_list, api_paste_res_list
def pwned_domain_command(args_dict):
"""
Executing the pwned request for domains list, in order to support list input, the function returns 3 lists of
outputs
:param args_dict: the demisto argument - in this case the domain list is needed
:return: 3 arrays of outputs
"""
domain_list = argToList(args_dict.get('domain', ''))
api_res_list = pwned_domain(domain_list)
md_list = []
ec_list = []
for domain, api_res in zip(domain_list, api_res_list):
md_list.append(data_to_markdown('Domain', domain, api_res))
ec_list.append(domain_to_entry_context(domain, api_res or []))
return md_list, ec_list, api_res_list
def pwned_domain(domain_list):
"""
Executing the http request
:param domain_list: the domains list that needed for the http requests
:return: an array of http requests outputs
"""
api_res_list = []
for domain in domain_list:
suffix = SUFFIXES.get("domain") + domain + SUFFIXES.get("domain_truncate_verified")
api_res_list.append(http_request('GET', url_suffix=suffix))
return api_res_list
def pwned_username_command(args_dict):
"""
Executing the pwned request for usernames list, in order to support list input, the function returns 3 lists of
outputs
:param args_dict: the demisto argument - in this case the username list is needed
:return: 3 arrays of outputs
"""
username_list = argToList(args_dict.get('username', ''))
api_res_list = pwned_username(username_list)
md_list = []
ec_list = []
for username, api_res in zip(username_list, api_res_list):
md_list.append(data_to_markdown('Username', username, api_res))
ec_list.append(domain_to_entry_context(username, api_res or []))
return md_list, ec_list, api_res_list
def pwned_username(username_list):
"""
Executing the http request
:param username_list: the username list that needed for the http requests
:return: an array of http requests outputs
"""
api_res_list = []
for username in username_list:
suffix = SUFFIXES.get("username") + username + SUFFIXES.get("username_truncate_verified")
api_res_list.append(http_request('GET', url_suffix=suffix))
return api_res_list
command = demisto.command()
LOG('Command being called is: {}'.format(command))
try:
handle_proxy()
set_retry_end_time()
commands = {
'test-module': test_module,
'email': pwned_email_command,
'pwned-email': pwned_email_command,
'domain': pwned_domain_command,
'pwned-domain': pwned_domain_command,
'pwned-username': pwned_username_command
}
if command in commands:
md_list, ec_list, api_email_res_list = commands[command](demisto.args())
for md, ec, api_paste_res in zip(md_list, ec_list, api_email_res_list):
return_outputs(md, ec, api_paste_res)
# Log exceptions
except Exception as e:
return_error(str(e))
| [
"re.sub",
"requests.packages.urllib3.disable_warnings",
"requests.request",
"re.compile"
] | [((106, 150), 'requests.packages.urllib3.disable_warnings', 'requests.packages.urllib3.disable_warnings', ([], {}), '()\n', (148, 150), False, 'import requests\n'), ((2908, 2952), 're.compile', 're.compile', (['"""<a href="(.+?)"(.+?)>(.+?)</a>"""'], {}), '(\'<a href="(.+?)"(.+?)>(.+?)</a>\')\n', (2918, 2952), False, 'import re\n'), ((1325, 1436), 'requests.request', 'requests.request', (['method', '(BASE_URL + url_suffix)'], {'verify': 'USE_SSL', 'params': 'params', 'data': 'data', 'headers': 'HEADERS'}), '(method, BASE_URL + url_suffix, verify=USE_SSL, params=\n params, data=data, headers=HEADERS)\n', (1341, 1436), False, 'import requests\n'), ((3246, 3316), 're.sub', 're.sub', (['html_link_pattern', 'link_from_desc', 'breach_description'], {'count': '(1)'}), '(html_link_pattern, link_from_desc, breach_description, count=1)\n', (3252, 3316), False, 'import re\n')] |
# Generated by Django 2.2.15 on 2021-01-29 20:20
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('sitewebapp', '0010_auditionanswers_auditionquestions_audtionrounds_candidates'),
]
operations = [
migrations.CreateModel(
name='auditionRounds',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('roundno', models.IntegerField(default=1)),
('candidate', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='inductees', to='sitewebapp.Candidates')),
],
),
migrations.AlterField(
model_name='auditionquestions',
name='round',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='round', to='sitewebapp.auditionRounds'),
),
migrations.DeleteModel(
name='audtionRounds',
),
]
| [
"django.db.migrations.DeleteModel",
"django.db.models.AutoField",
"django.db.models.IntegerField",
"django.db.models.ForeignKey"
] | [((999, 1043), 'django.db.migrations.DeleteModel', 'migrations.DeleteModel', ([], {'name': '"""audtionRounds"""'}), "(name='audtionRounds')\n", (1021, 1043), False, 'from django.db import migrations, models\n'), ((862, 983), 'django.db.models.ForeignKey', 'models.ForeignKey', ([], {'on_delete': 'django.db.models.deletion.CASCADE', 'related_name': '"""round"""', 'to': '"""sitewebapp.auditionRounds"""'}), "(on_delete=django.db.models.deletion.CASCADE, related_name\n ='round', to='sitewebapp.auditionRounds')\n", (879, 983), False, 'from django.db import migrations, models\n'), ((415, 508), 'django.db.models.AutoField', 'models.AutoField', ([], {'auto_created': '(True)', 'primary_key': '(True)', 'serialize': '(False)', 'verbose_name': '"""ID"""'}), "(auto_created=True, primary_key=True, serialize=False,\n verbose_name='ID')\n", (431, 508), False, 'from django.db import migrations, models\n'), ((535, 565), 'django.db.models.IntegerField', 'models.IntegerField', ([], {'default': '(1)'}), '(default=1)\n', (554, 565), False, 'from django.db import migrations, models\n'), ((598, 719), 'django.db.models.ForeignKey', 'models.ForeignKey', ([], {'on_delete': 'django.db.models.deletion.CASCADE', 'related_name': '"""inductees"""', 'to': '"""sitewebapp.Candidates"""'}), "(on_delete=django.db.models.deletion.CASCADE, related_name\n ='inductees', to='sitewebapp.Candidates')\n", (615, 719), False, 'from django.db import migrations, models\n')] |
import inspect
import json
import os
import random
import subprocess
import time
import requests
import ast
import paramiko
import rancher
from rancher import ApiError
from lib.aws import AmazonWebServices
DEFAULT_TIMEOUT = 120
DEFAULT_MULTI_CLUSTER_APP_TIMEOUT = 300
CATTLE_TEST_URL = os.environ.get('CATTLE_TEST_URL', "http://localhost:80")
ADMIN_TOKEN = os.environ.get('ADMIN_TOKEN', "None")
CATTLE_API_URL = CATTLE_TEST_URL + "/v3"
kube_fname = os.path.join(os.path.dirname(os.path.realpath(__file__)),
"k8s_kube_config")
MACHINE_TIMEOUT = float(os.environ.get('RANCHER_MACHINE_TIMEOUT', "1200"))
TEST_IMAGE = "sangeetha/mytestcontainer"
CLUSTER_NAME = os.environ.get("RANCHER_CLUSTER_NAME", "")
RANCHER_CLEANUP_CLUSTER = \
ast.literal_eval(os.environ.get('RANCHER_CLEANUP_CLUSTER', "True"))
env_file = os.path.join(
os.path.dirname(os.path.realpath(__file__)),
"rancher_env.config")
CLUSTER_NAME_2 = ""
def random_str():
return 'random-{0}-{1}'.format(random_num(), int(time.time()))
def random_num():
return random.randint(0, 1000000)
def random_int(start, end):
return random.randint(start, end)
def random_test_name(name="test"):
return name + "-" + str(random_int(10000, 99999))
def get_admin_client():
return rancher.Client(url=CATTLE_API_URL, token=ADMIN_TOKEN, verify=False)
def get_client_for_token(token):
return rancher.Client(url=CATTLE_API_URL, token=token, verify=False)
def get_project_client_for_token(project, token):
p_url = project.links['self'] + '/schemas'
p_client = rancher.Client(url=p_url, token=token, verify=False)
return p_client
def get_cluster_client_for_token(cluster, token):
c_url = cluster.links['self'] + '/schemas'
c_client = rancher.Client(url=c_url, token=token, verify=False)
return c_client
def up(cluster, token):
c_url = cluster.links['self'] + '/schemas'
c_client = rancher.Client(url=c_url, token=token, verify=False)
return c_client
def wait_state(client, obj, state, timeout=DEFAULT_TIMEOUT):
wait_for(lambda: client.reload(obj).state == state, timeout)
return client.reload(obj)
def wait_for_condition(client, resource, check_function, fail_handler=None,
timeout=DEFAULT_TIMEOUT):
start = time.time()
resource = client.reload(resource)
while not check_function(resource):
if time.time() - start > timeout:
exceptionMsg = 'Timeout waiting for ' + resource.baseType + \
' to satisfy condition: ' + \
inspect.getsource(check_function)
if fail_handler:
exceptionMsg = exceptionMsg + fail_handler(resource)
raise Exception(exceptionMsg)
time.sleep(.5)
resource = client.reload(resource)
return resource
def wait_for(callback, timeout=DEFAULT_TIMEOUT, timeout_message=None):
start = time.time()
ret = callback()
while ret is None or ret is False:
time.sleep(.5)
if time.time() - start > timeout:
if timeout_message:
raise Exception(timeout_message)
else:
raise Exception('Timeout waiting for condition')
ret = callback()
return ret
def random_name():
return "test" + "-" + str(random_int(10000, 99999))
def create_project_and_ns(token, cluster, project_name=None, ns_name=None):
client = get_client_for_token(token)
p = create_project(client, cluster, project_name)
c_client = get_cluster_client_for_token(cluster, token)
ns = create_ns(c_client, cluster, p, ns_name)
return p, ns
def create_project(client, cluster, project_name=None):
if project_name is None:
project_name = random_name()
p = client.create_project(name=project_name,
clusterId=cluster.id)
time.sleep(5)
p = wait_until_available(client, p)
assert p.state == 'active'
return p
def create_project_with_pspt(client, cluster, pspt):
p = client.create_project(name=random_name(),
clusterId=cluster.id)
p = wait_until_available(client, p)
assert p.state == 'active'
return set_pspt_for_project(p, client, pspt)
def set_pspt_for_project(project, client, pspt):
project.setpodsecuritypolicytemplate(podSecurityPolicyTemplateId=pspt.id)
project = wait_until_available(client, project)
assert project.state == 'active'
return project
def create_ns(client, cluster, project, ns_name=None):
if ns_name is None:
ns_name = random_name()
ns = client.create_namespace(name=ns_name,
clusterId=cluster.id,
projectId=project.id)
wait_for_ns_to_become_active(client, ns)
ns = client.reload(ns)
assert ns.state == 'active'
return ns
def assign_members_to_cluster(client, user, cluster, role_template_id):
crtb = client.create_cluster_role_template_binding(
clusterId=cluster.id,
roleTemplateId=role_template_id,
subjectKind="User",
userId=user.id)
return crtb
def assign_members_to_project(client, user, project, role_template_id):
prtb = client.create_project_role_template_binding(
projectId=project.id,
roleTemplateId=role_template_id,
subjectKind="User",
userId=user.id)
return prtb
def change_member_role_in_cluster(client, user, crtb, role_template_id):
crtb = client.update(
crtb,
roleTemplateId=role_template_id,
userId=user.id)
return crtb
def change_member_role_in_project(client, user, prtb, role_template_id):
prtb = client.update(
prtb,
roleTemplateId=role_template_id,
userId=user.id)
return prtb
def create_kubeconfig(cluster):
generateKubeConfigOutput = cluster.generateKubeconfig()
print(generateKubeConfigOutput.config)
file = open(kube_fname, "w")
file.write(generateKubeConfigOutput.config)
file.close()
def validate_psp_error_worklaod(p_client, workload, error_message):
workload = wait_for_wl_transitioning(p_client, workload)
assert workload.state == "updating"
assert workload.transitioning == "error"
print(workload.transitioningMessage)
assert error_message in workload.transitioningMessage
def validate_workload(p_client, workload, type, ns_name, pod_count=1,
wait_for_cron_pods=60):
workload = wait_for_wl_to_active(p_client, workload)
assert workload.state == "active"
# For cronjob, wait for the first pod to get created after
# scheduled wait time
if type == "cronJob":
time.sleep(wait_for_cron_pods)
pods = p_client.list_pod(workloadId=workload.id).data
assert len(pods) == pod_count
for pod in pods:
wait_for_pod_to_running(p_client, pod)
wl_result = execute_kubectl_cmd(
"get " + type + " " + workload.name + " -n " + ns_name)
if type == "deployment" or type == "statefulSet":
assert wl_result["status"]["readyReplicas"] == pod_count
if type == "daemonSet":
assert wl_result["status"]["currentNumberScheduled"] == pod_count
if type == "cronJob":
assert len(wl_result["status"]["active"]) >= pod_count
return
for key, value in workload.workloadLabels.items():
label = key + "=" + value
get_pods = "get pods -l" + label + " -n " + ns_name
pods_result = execute_kubectl_cmd(get_pods)
assert len(pods_result["items"]) == pod_count
for pod in pods_result["items"]:
assert pod["status"]["phase"] == "Running"
return pods_result["items"]
def validate_workload_with_sidekicks(p_client, workload, type, ns_name,
pod_count=1):
workload = wait_for_wl_to_active(p_client, workload)
assert workload.state == "active"
pods = wait_for_pods_in_workload(p_client, workload, pod_count)
assert len(pods) == pod_count
for pod in pods:
wait_for_pod_to_running(p_client, pod)
wl_result = execute_kubectl_cmd(
"get " + type + " " + workload.name + " -n " + ns_name)
assert wl_result["status"]["readyReplicas"] == pod_count
for key, value in workload.workloadLabels.items():
label = key + "=" + value
get_pods = "get pods -l" + label + " -n " + ns_name
execute_kubectl_cmd(get_pods)
pods_result = execute_kubectl_cmd(get_pods)
assert len(pods_result["items"]) == pod_count
for pod in pods_result["items"]:
assert pod["status"]["phase"] == "Running"
assert len(pod["status"]["containerStatuses"]) == 2
assert "running" in pod["status"]["containerStatuses"][0]["state"]
assert "running" in pod["status"]["containerStatuses"][1]["state"]
def validate_workload_paused(p_client, workload, expectedstatus):
workloadStatus = p_client.list_workload(uuid=workload.uuid).data[0].paused
assert workloadStatus == expectedstatus
def validate_pod_images(expectedimage, workload, ns_name):
for key, value in workload.workloadLabels.items():
label = key + "=" + value
get_pods = "get pods -l" + label + " -n " + ns_name
pods = execute_kubectl_cmd(get_pods)
for pod in pods["items"]:
assert pod["spec"]["containers"][0]["image"] == expectedimage
def validate_pods_are_running_by_id(expectedpods, workload, ns_name):
for key, value in workload.workloadLabels.items():
label = key + "=" + value
get_pods = "get pods -l" + label + " -n " + ns_name
pods = execute_kubectl_cmd(get_pods)
curpodnames = []
for pod in pods["items"]:
curpodnames.append(pod["metadata"]["name"])
for expectedpod in expectedpods["items"]:
assert expectedpod["metadata"]["name"] in curpodnames
def validate_workload_image(client, workload, expectedImage, ns):
workload = client.list_workload(uuid=workload.uuid).data[0]
assert workload.containers[0].image == expectedImage
validate_pod_images(expectedImage, workload, ns.name)
def execute_kubectl_cmd(cmd, json_out=True, stderr=False):
command = 'kubectl --kubeconfig {0} {1}'.format(
kube_fname, cmd)
if json_out:
command += ' -o json'
if stderr:
result = run_command_with_stderr(command)
else:
result = run_command(command)
if json_out:
result = json.loads(result)
print(result)
return result
def run_command(command):
return subprocess.check_output(command, shell=True, text=True)
def run_command_with_stderr(command):
try:
output = subprocess.check_output(command, shell=True,
stderr=subprocess.PIPE)
returncode = 0
except subprocess.CalledProcessError as e:
output = e.output
returncode = e.returncode
print(returncode)
return (output, returncode)
def wait_for_wl_to_active(client, workload, timeout=DEFAULT_TIMEOUT):
start = time.time()
workloads = client.list_workload(uuid=workload.uuid).data
assert len(workloads) == 1
wl = workloads[0]
while wl.state != "active":
if time.time() - start > timeout:
raise AssertionError(
"Timed out waiting for state to get to active")
time.sleep(.5)
workloads = client.list_workload(uuid=workload.uuid).data
assert len(workloads) == 1
wl = workloads[0]
return wl
def wait_for_ingress_to_active(client, ingress, timeout=DEFAULT_TIMEOUT):
start = time.time()
ingresses = client.list_ingress(uuid=ingress.uuid).data
assert len(ingresses) == 1
wl = ingresses[0]
while wl.state != "active":
if time.time() - start > timeout:
raise AssertionError(
"Timed out waiting for state to get to active")
time.sleep(.5)
ingresses = client.list_ingress(uuid=ingress.uuid).data
assert len(ingresses) == 1
wl = ingresses[0]
return wl
def wait_for_wl_transitioning(client, workload, timeout=DEFAULT_TIMEOUT,
state="error"):
start = time.time()
workloads = client.list_workload(uuid=workload.uuid).data
assert len(workloads) == 1
wl = workloads[0]
while wl.transitioning != state:
if time.time() - start > timeout:
raise AssertionError(
"Timed out waiting for state to get to active")
time.sleep(.5)
workloads = client.list_workload(uuid=workload.uuid).data
assert len(workloads) == 1
wl = workloads[0]
return wl
def wait_for_pod_to_running(client, pod, timeout=DEFAULT_TIMEOUT):
start = time.time()
pods = client.list_pod(uuid=pod.uuid).data
assert len(pods) == 1
p = pods[0]
while p.state != "running":
if time.time() - start > timeout:
raise AssertionError(
"Timed out waiting for state to get to active")
time.sleep(.5)
pods = client.list_pod(uuid=pod.uuid).data
assert len(pods) == 1
p = pods[0]
return p
def get_schedulable_nodes(cluster):
client = get_admin_client()
nodes = client.list_node(clusterId=cluster.id).data
schedulable_nodes = []
for node in nodes:
if node.worker:
schedulable_nodes.append(node)
return schedulable_nodes
def get_role_nodes(cluster, role):
etcd_nodes = []
control_nodes = []
worker_nodes = []
node_list = []
client = get_admin_client()
nodes = client.list_node(clusterId=cluster.id).data
for node in nodes:
if node.etcd:
etcd_nodes.append(node)
if node.controlPlane:
control_nodes.append(node)
if node.worker:
worker_nodes.append(node)
if role == "etcd":
node_list = etcd_nodes
if role == "control":
node_list = control_nodes
if role == "worker":
node_list = worker_nodes
return node_list
def validate_ingress(p_client, cluster, workloads, host, path,
insecure_redirect=False):
time.sleep(10)
curl_args = " "
if (insecure_redirect):
curl_args = " -L --insecure "
if len(host) > 0:
curl_args += " --header 'Host: " + host + "'"
nodes = get_schedulable_nodes(cluster)
target_name_list = get_target_names(p_client, workloads)
for node in nodes:
host_ip = node.externalIpAddress
cmd = curl_args + " http://" + host_ip + path
validate_http_response(cmd, target_name_list)
def validate_ingress_using_endpoint(p_client, ingress, workloads,
timeout=300):
target_name_list = get_target_names(p_client, workloads)
start = time.time()
fqdn_available = False
url = None
while not fqdn_available:
if time.time() - start > timeout:
raise AssertionError(
"Timed out waiting for endpoint to be available")
time.sleep(.5)
ingress_list = p_client.list_ingress(uuid=ingress.uuid).data
assert len(ingress_list) == 1
ingress = ingress_list[0]
if hasattr(ingress, 'publicEndpoints'):
for public_endpoint in ingress.publicEndpoints:
if public_endpoint["hostname"].startswith(ingress.name):
fqdn_available = True
url = \
public_endpoint["protocol"].lower() + "://" + \
public_endpoint["hostname"]
if "path" in public_endpoint.keys():
url += public_endpoint["path"]
time.sleep(10)
validate_http_response(url, target_name_list)
def get_target_names(p_client, workloads):
pods = []
for workload in workloads:
pod_list = p_client.list_pod(workloadId=workload.id).data
pods.extend(pod_list)
target_name_list = []
for pod in pods:
target_name_list.append(pod.name)
print("target name list:" + str(target_name_list))
return target_name_list
def get_endpoint_url_for_workload(p_client, workload, timeout=600):
fqdn_available = False
url = ""
start = time.time()
while not fqdn_available:
if time.time() - start > timeout:
raise AssertionError(
"Timed out waiting for endpoint to be available")
time.sleep(.5)
workload_list = p_client.list_workload(uuid=workload.uuid).data
assert len(workload_list) == 1
workload = workload_list[0]
if hasattr(workload, 'publicEndpoints'):
assert len(workload.publicEndpoints) > 0
url = "http://"
url = url + workload.publicEndpoints[0]["addresses"][0] + ":"
url = url + str(workload.publicEndpoints[0]["port"])
fqdn_available = True
return url
def wait_until_lb_is_active(url, timeout=300):
start = time.time()
while check_for_no_access(url):
time.sleep(.5)
print("No access yet")
if time.time() - start > timeout:
raise Exception('Timed out waiting for LB to become active')
return
def check_for_no_access(url):
try:
requests.get(url)
return False
except requests.ConnectionError:
print("Connection Error - " + url)
return True
def validate_http_response(cmd, target_name_list, client_pod=None):
target_hit_list = target_name_list[:]
count = 5 * len(target_name_list)
for i in range(1, count):
if len(target_hit_list) == 0:
break
if client_pod is None:
curl_cmd = "curl " + cmd
result = run_command(curl_cmd)
else:
wget_cmd = "wget -qO- " + cmd
result = kubectl_pod_exec(client_pod, wget_cmd)
result = result.decode()
result = result.rstrip()
print("cmd: \t" + cmd)
print("result: \t" + result)
assert result in target_name_list
if result in target_hit_list:
target_hit_list.remove(result)
print("After removing all, the rest is: ", target_hit_list)
assert len(target_hit_list) == 0
def validate_cluster(client, cluster, intermediate_state="provisioning",
check_intermediate_state=True, skipIngresscheck=True,
nodes_not_in_active_state=[], k8s_version=""):
cluster = validate_cluster_state(
client, cluster,
check_intermediate_state=check_intermediate_state,
intermediate_state=intermediate_state,
nodes_not_in_active_state=nodes_not_in_active_state)
# Create Daemon set workload and have an Ingress with Workload
# rule pointing to this daemonset
create_kubeconfig(cluster)
if k8s_version != "":
check_cluster_version(cluster, k8s_version)
if hasattr(cluster, 'rancherKubernetesEngineConfig'):
check_cluster_state(len(get_role_nodes(cluster, "etcd")))
project, ns = create_project_and_ns(ADMIN_TOKEN, cluster)
p_client = get_project_client_for_token(project, ADMIN_TOKEN)
con = [{"name": "test1",
"image": TEST_IMAGE}]
name = random_test_name("default")
workload = p_client.create_workload(name=name,
containers=con,
namespaceId=ns.id,
daemonSetConfig={})
validate_workload(p_client, workload, "daemonSet", ns.name,
len(get_schedulable_nodes(cluster)))
if not skipIngresscheck:
host = "test" + str(random_int(10000, 99999)) + ".com"
path = "/name.html"
rule = {"host": host,
"paths":
[{"workloadIds": [workload.id], "targetPort": "80"}]}
ingress = p_client.create_ingress(name=name,
namespaceId=ns.id,
rules=[rule])
wait_for_ingress_to_active(p_client, ingress)
validate_ingress(p_client, cluster, [workload], host, path)
return cluster
def check_cluster_version(cluster, version):
cluster_k8s_version = \
cluster.appliedSpec["rancherKubernetesEngineConfig"][
"kubernetesVersion"]
assert cluster_k8s_version == version, \
"cluster_k8s_version: " + cluster_k8s_version + \
" Expected: " + version
expected_k8s_version = version[:version.find("-")]
k8s_version = execute_kubectl_cmd("version")
kubectl_k8s_version = k8s_version["serverVersion"]["gitVersion"]
assert kubectl_k8s_version == expected_k8s_version, \
"kubectl version: " + kubectl_k8s_version + \
" Expected: " + expected_k8s_version
def check_cluster_state(etcd_count):
css_resp = execute_kubectl_cmd("get cs")
css = css_resp["items"]
components = ["scheduler", "controller-manager"]
for i in range(0, etcd_count):
components.append("etcd-" + str(i))
print("components to check - " + str(components))
for cs in css:
component_name = cs["metadata"]["name"]
assert component_name in components
components.remove(component_name)
assert cs["conditions"][0]["status"] == "True"
assert cs["conditions"][0]["type"] == "Healthy"
assert len(components) == 0
def validate_dns_record(pod, record, expected):
# requires pod with `dig` available - TEST_IMAGE
host = '{0}.{1}.svc.cluster.local'.format(
record["name"], record["namespaceId"])
validate_dns_entry(pod, host, expected)
def validate_dns_entry(pod, host, expected):
# requires pod with `dig` available - TEST_IMAGE
cmd = 'ping -c 1 -W 1 {0}'.format(host)
ping_output = kubectl_pod_exec(pod, cmd)
ping_validation_pass = False
for expected_value in expected:
if expected_value in str(ping_output):
ping_validation_pass = True
break
assert ping_validation_pass is True
assert " 0% packet loss" in str(ping_output)
dig_cmd = 'dig {0} +short'.format(host)
dig_output = kubectl_pod_exec(pod, dig_cmd)
for expected_value in expected:
assert expected_value in str(dig_output)
def wait_for_nodes_to_become_active(client, cluster, exception_list=[],
retry_count=0):
nodes = client.list_node(clusterId=cluster.id).data
node_auto_deleted = False
for node in nodes:
if node.requestedHostname not in exception_list:
node = wait_for_node_status(client, node, "active")
if node is None:
print("Need to re-evalauate new node list")
node_auto_deleted = True
retry_count += 1
print("Retry Count:" + str(retry_count))
if node_auto_deleted and retry_count < 5:
wait_for_nodes_to_become_active(client, cluster, exception_list,
retry_count)
def wait_for_node_status(client, node, state):
uuid = node.uuid
start = time.time()
nodes = client.list_node(uuid=uuid).data
node_count = len(nodes)
# Handle the case of nodes getting auto deleted when they are part of
# nodepools
if node_count == 1:
node_status = nodes[0].state
else:
print("Node does not exist anymore -" + uuid)
return None
while node_status != state:
if time.time() - start > MACHINE_TIMEOUT:
raise AssertionError(
"Timed out waiting for state to get to active")
time.sleep(5)
nodes = client.list_node(uuid=uuid).data
node_count = len(nodes)
if node_count == 1:
node_status = nodes[0].state
else:
print("Node does not exist anymore -" + uuid)
return None
return node
def wait_for_node_to_be_deleted(client, node, timeout=300):
uuid = node.uuid
start = time.time()
nodes = client.list_node(uuid=uuid).data
node_count = len(nodes)
while node_count != 0:
if time.time() - start > timeout:
raise AssertionError(
"Timed out waiting for state to get to active")
time.sleep(.5)
nodes = client.list_node(uuid=uuid).data
node_count = len(nodes)
def wait_for_cluster_node_count(client, cluster, expected_node_count,
timeout=300):
start = time.time()
nodes = client.list_node(clusterId=cluster.id).data
node_count = len(nodes)
while node_count != expected_node_count:
if time.time() - start > timeout:
raise AssertionError(
"Timed out waiting for state to get to active")
time.sleep(.5)
nodes = client.list_node(clusterId=cluster.id).data
node_count = len(nodes)
def get_custom_host_registration_cmd(client, cluster, roles, node):
allowed_roles = ["etcd", "worker", "controlplane"]
cluster_tokens = client.list_cluster_registration_token(
clusterId=cluster.id).data
if len(cluster_tokens) > 0:
cluster_token = cluster_tokens[0]
else:
cluster_token = create_custom_host_registration_token(client, cluster)
cmd = cluster_token.nodeCommand
for role in roles:
assert role in allowed_roles
cmd += " --" + role
additional_options = " --address " + node.public_ip_address + \
" --internal-address " + node.private_ip_address
cmd += additional_options
return cmd
def create_custom_host_registration_token(client, cluster):
cluster_token = client.create_cluster_registration_token(
clusterId=cluster.id)
cluster_token = client.wait_success(cluster_token)
assert cluster_token.state == 'active'
return cluster_token
def get_cluster_type(client, cluster):
cluster_configs = [
"amazonElasticContainerServiceConfig",
"azureKubernetesServiceConfig",
"googleKubernetesEngineConfig",
"rancherKubernetesEngineConfig"
]
if "rancherKubernetesEngineConfig" in cluster:
nodes = client.list_node(clusterId=cluster.id).data
if len(nodes) > 0:
if nodes[0].nodeTemplateId is None:
return "Custom"
for cluster_config in cluster_configs:
if cluster_config in cluster:
return cluster_config
return "Imported"
def delete_cluster(client, cluster):
nodes = client.list_node(clusterId=cluster.id).data
# Delete Cluster
client.delete(cluster)
# Delete nodes(in cluster) from AWS for Imported and Custom Cluster
if (len(nodes) > 0):
cluster_type = get_cluster_type(client, cluster)
print(cluster_type)
if get_cluster_type(client, cluster) in ["Imported", "Custom"]:
nodes = client.list_node(clusterId=cluster.id).data
filters = [
{'Name': 'tag:Name',
'Values': ['testcustom*', 'teststess*']}]
ip_filter = {}
ip_list = []
ip_filter['Name'] = \
'network-interface.addresses.association.public-ip'
ip_filter['Values'] = ip_list
filters.append(ip_filter)
for node in nodes:
ip_list.append(node.externalIpAddress)
assert len(ip_filter) > 0
print(ip_filter)
aws_nodes = AmazonWebServices().get_nodes(filters)
for node in aws_nodes:
print(node.public_ip_address)
AmazonWebServices().delete_nodes(aws_nodes)
def check_connectivity_between_workloads(p_client1, workload1, p_client2,
workload2, allow_connectivity=True):
wl1_pods = p_client1.list_pod(workloadId=workload1.id).data
wl2_pods = p_client2.list_pod(workloadId=workload2.id).data
for pod in wl1_pods:
for o_pod in wl2_pods:
check_connectivity_between_pods(pod, o_pod, allow_connectivity)
def check_connectivity_between_workload_pods(p_client, workload):
pods = p_client.list_pod(workloadId=workload.id).data
for pod in pods:
for o_pod in pods:
check_connectivity_between_pods(pod, o_pod)
def check_connectivity_between_pods(pod1, pod2, allow_connectivity=True):
pod_ip = pod2.status.podIp
cmd = "ping -c 1 -W 1 " + pod_ip
response = kubectl_pod_exec(pod1, cmd)
print("Actual ping Response from " + pod1.name + ":" + str(response))
if allow_connectivity:
assert pod_ip in str(response) and " 0% packet loss" in str(response)
else:
assert pod_ip in str(response) and " 100% packet loss" in str(response)
def kubectl_pod_exec(pod, cmd):
command = "exec " + pod.name + " -n " + pod.namespaceId + " -- " + cmd
return execute_kubectl_cmd(command, json_out=False, stderr=True)
def exec_shell_command(ip, port, cmd, password):
ssh = paramiko.SSHClient()
ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy())
ssh.connect(ip, username="root", password=password, port=port)
stdin, stdout, stderr = ssh.exec_command(cmd)
response = stdout.readlines()
return response
def wait_for_ns_to_become_active(client, ns, timeout=DEFAULT_TIMEOUT):
start = time.time()
time.sleep(2)
nss = client.list_namespace(uuid=ns.uuid).data
assert len(nss) == 1
ns = nss[0]
while ns.state != "active":
if time.time() - start > timeout:
raise AssertionError(
"Timed out waiting for state to get to active")
time.sleep(.5)
nss = client.list_namespace(uuid=ns.uuid).data
assert len(nss) == 1
ns = nss[0]
return ns
def wait_for_pod_images(p_client, workload, ns_name, expectedimage, numofpods,
timeout=DEFAULT_TIMEOUT):
start = time.time()
for key, value in workload.workloadLabels.items():
label = key + "=" + value
get_pods = "get pods -l" + label + " -n " + ns_name
pods = execute_kubectl_cmd(get_pods)
for x in range(0, numofpods - 1):
pod = pods["items"][x]
podimage = pod["spec"]["containers"][0]["image"]
while podimage != expectedimage:
if time.time() - start > timeout:
raise AssertionError(
"Timed out waiting for correct pod images")
time.sleep(.5)
pods = execute_kubectl_cmd(get_pods)
pod = pods["items"][x]
podimage = pod["spec"]["containers"][0]["image"]
def wait_for_pods_in_workload(p_client, workload, pod_count,
timeout=DEFAULT_TIMEOUT):
start = time.time()
pods = p_client.list_pod(workloadId=workload.id).data
while len(pods) != pod_count:
if time.time() - start > timeout:
raise AssertionError(
"Timed out waiting for state to get to active")
time.sleep(.5)
pods = p_client.list_pod(workloadId=workload.id).data
return pods
def get_admin_client_and_cluster():
client = get_admin_client()
if CLUSTER_NAME == "":
clusters = client.list_cluster().data
else:
clusters = client.list_cluster(name=CLUSTER_NAME).data
assert len(clusters) > 0
cluster = clusters[0]
return client, cluster
def validate_cluster_state(client, cluster,
check_intermediate_state=True,
intermediate_state="provisioning",
nodes_not_in_active_state=[]):
if check_intermediate_state:
cluster = wait_for_condition(
client, cluster,
lambda x: x.state == intermediate_state,
lambda x: 'State is: ' + x.state,
timeout=MACHINE_TIMEOUT)
assert cluster.state == intermediate_state
cluster = wait_for_condition(
client, cluster,
lambda x: x.state == "active",
lambda x: 'State is: ' + x.state,
timeout=MACHINE_TIMEOUT)
assert cluster.state == "active"
wait_for_nodes_to_become_active(client, cluster,
exception_list=nodes_not_in_active_state)
return cluster
def wait_until_available(client, obj, timeout=DEFAULT_TIMEOUT):
start = time.time()
sleep = 0.01
while True:
time.sleep(sleep)
sleep *= 2
if sleep > 2:
sleep = 2
try:
obj = client.reload(obj)
except ApiError as e:
if e.error.status != 403:
raise e
else:
return obj
delta = time.time() - start
if delta > timeout:
msg = 'Timeout waiting for [{}:{}] for condition after {}' \
' seconds'.format(obj.type, obj.id, delta)
raise Exception(msg)
def delete_node(aws_nodes):
for node in aws_nodes:
AmazonWebServices().delete_node(node)
def cluster_cleanup(client, cluster, aws_nodes=None):
if RANCHER_CLEANUP_CLUSTER:
client.delete(cluster)
if aws_nodes is not None:
delete_node(aws_nodes)
else:
env_details = "env.CATTLE_TEST_URL='" + CATTLE_TEST_URL + "'\n"
env_details += "env.ADMIN_TOKEN='" + ADMIN_TOKEN + "'\n"
env_details += "env.CLUSTER_NAME='" + cluster.name + "'\n"
create_config_file(env_details)
def create_config_file(env_details):
file = open(env_file, "w")
file.write(env_details)
file.close()
def validate_hostPort(p_client, workload, source_port, cluster):
pods = p_client.list_pod(workloadId=workload.id).data
nodes = get_schedulable_nodes(cluster)
for node in nodes:
target_name_list = []
for pod in pods:
print(pod.nodeId + " check " + node.id)
if pod.nodeId == node.id:
target_name_list.append(pod.name)
break
host_ip = node.externalIpAddress
curl_cmd = " http://" + host_ip + ":" + \
str(source_port) + "/name.html"
validate_http_response(curl_cmd, target_name_list)
def validate_lb(p_client, workload):
url = get_endpoint_url_for_workload(p_client, workload)
target_name_list = get_target_names(p_client, [workload])
wait_until_lb_is_active(url)
validate_http_response(url + "/name.html", target_name_list)
def validate_nodePort(p_client, workload, cluster):
source_port = workload.publicEndpoints[0]["port"]
nodes = get_schedulable_nodes(cluster)
pods = p_client.list_pod(workloadId=workload.id).data
target_name_list = []
for pod in pods:
target_name_list.append(pod.name)
print("target name list:" + str(target_name_list))
for node in nodes:
host_ip = node.externalIpAddress
curl_cmd = " http://" + host_ip + ":" + \
str(source_port) + "/name.html"
validate_http_response(curl_cmd, target_name_list)
def validate_clusterIp(p_client, workload, cluster_ip, test_pods):
pods = p_client.list_pod(workloadId=workload.id).data
target_name_list = []
for pod in pods:
target_name_list.append(pod["name"])
curl_cmd = "http://" + cluster_ip + "/name.html"
for pod in test_pods:
validate_http_response(curl_cmd, target_name_list, pod)
def wait_for_pv_to_be_available(c_client, pv_object, timeout=DEFAULT_TIMEOUT):
start = time.time()
time.sleep(2)
list = c_client.list_persistent_volume(uuid=pv_object.uuid).data
assert len(list) == 1
pv = list[0]
while pv.state != "available":
if time.time() - start > timeout:
raise AssertionError(
"Timed out waiting for state to get to available")
time.sleep(.5)
list = c_client.list_persistent_volume(uuid=pv_object.uuid).data
assert len(list) == 1
pv = list[0]
return pv
def wait_for_pvc_to_be_bound(p_client, pvc_object, timeout=DEFAULT_TIMEOUT):
start = time.time()
time.sleep(2)
list = p_client.list_persistent_volume_claim(uuid=pvc_object.uuid).data
assert len(list) == 1
pvc = list[0]
while pvc.state != "bound":
if time.time() - start > timeout:
raise AssertionError(
"Timed out waiting for state to get to bound")
time.sleep(.5)
list = p_client.list_persistent_volume_claim(uuid=pvc_object.uuid).data
assert len(list) == 1
pvc = list[0]
return pvc
def create_wl_with_nfs(p_client, ns_id, pvc_name, wl_name,
mount_path, sub_path, is_daemonSet=False):
volumes = [{"type": "volume",
"name": "vol1",
"persistentVolumeClaim": {
"readOnly": "false",
"type": "persistentVolumeClaimVolumeSource",
"persistentVolumeClaimId": pvc_name
}}]
volumeMounts = [{"readOnly": "False",
"type": "volumeMount",
"mountPath": mount_path,
"subPath": sub_path,
"name": "vol1"
}]
con = [{"name": "test1",
"image": TEST_IMAGE,
"volumeMounts": volumeMounts
}]
if is_daemonSet:
workload = p_client.create_workload(name=wl_name,
containers=con,
namespaceId=ns_id,
volumes=volumes,
daemonSetConfig={})
else:
workload = p_client.create_workload(name=wl_name,
containers=con,
namespaceId=ns_id,
volumes=volumes)
return workload
def write_content_to_file(pod, content, filename):
cmd_write = "/bin/bash -c 'echo {1} > {0}'".format(filename, content)
output = kubectl_pod_exec(pod, cmd_write)
assert output.strip().decode('utf-8') == ""
def validate_file_content(pod, content, filename):
cmd_get_content = "/bin/bash -c 'cat {0}' ".format(filename)
output = kubectl_pod_exec(pod, cmd_get_content)
assert output.strip().decode('utf-8') == content
def wait_for_mcapp_to_active(client, multiClusterApp, timeout=DEFAULT_MULTI_CLUSTER_APP_TIMEOUT):
print("\nuuid:")
print(multiClusterApp.uuid)
time.sleep(5)
mcapps = client.list_multiClusterApp(uuid=multiClusterApp.uuid, name=multiClusterApp.name).data
start = time.time()
assert len(mcapps) == 1
mapp = mcapps[0]
print(mapp.state)
while mapp.state != "active":
print(mapp.uuid)
print(mapp.state)
if time.time() - start > timeout:
raise AssertionError(
"Timed out waiting for state to get to active")
time.sleep(.5)
multiclusterapps = client.list_multiClusterApp(uuid=multiClusterApp.uuid, name=multiClusterApp.name).data
assert len(multiclusterapps) == 1
mapp = multiclusterapps[0]
return mapp
def validate_mcapp_cluster(app_id, p_client):
mcapp = p_client.list_app(name=app_id).data
assert len(mcapp) == 1
app = mcapp[0]
return app
def wait_for_mcapp_cluster_level_to_active(client, app_id, timeout=DEFAULT_MULTI_CLUSTER_APP_TIMEOUT):
mcapps = client.list_app(name=app_id).data
start = time.time()
assert len(mcapps) == 1
mapp = mcapps[0]
while mapp.state != "active":
if time.time() - start > timeout:
raise AssertionError(
"Timed out waiting for state to get to active")
time.sleep(.5)
apps = client.list_app(name=app_id).data
assert len(apps) == 1
mapp = apps[0]
return mapp
def get_admin_client_and_cluster_mcapp():
clusters = []
client = get_admin_client()
if CLUSTER_NAME == "" or CLUSTER_NAME_2 == "":
clusters = client.list_cluster().data
else:
clusters.append(client.list_cluster(name=CLUSTER_NAME).data)
clusters.append(client.list_cluster(name=CLUSTER_NAME_2).data)
assert len(clusters) == 2
return client, clusters
def validate_multi_cluster_app_cluster(app_id1, app_id2, p_client1, p_client2):
validate_mcapp_cluster(app_id1, p_client1)
if app_id2 != "":
validate_mcapp_cluster(app_id2, p_client2)
# verify app in cluster is active or not
wait_for_mcapp_cluster_level_to_active(p_client1, app_id1)
if app_id2 != "":
wait_for_mcapp_cluster_level_to_active(p_client2, app_id2) | [
"rancher.Client",
"subprocess.check_output",
"json.loads",
"lib.aws.AmazonWebServices",
"paramiko.SSHClient",
"paramiko.AutoAddPolicy",
"os.environ.get",
"time.sleep",
"requests.get",
"os.path.realpath",
"inspect.getsource",
"time.time",
"random.randint"
] | [((288, 344), 'os.environ.get', 'os.environ.get', (['"""CATTLE_TEST_URL"""', '"""http://localhost:80"""'], {}), "('CATTLE_TEST_URL', 'http://localhost:80')\n", (302, 344), False, 'import os\n'), ((359, 396), 'os.environ.get', 'os.environ.get', (['"""ADMIN_TOKEN"""', '"""None"""'], {}), "('ADMIN_TOKEN', 'None')\n", (373, 396), False, 'import os\n'), ((688, 730), 'os.environ.get', 'os.environ.get', (['"""RANCHER_CLUSTER_NAME"""', '""""""'], {}), "('RANCHER_CLUSTER_NAME', '')\n", (702, 730), False, 'import os\n'), ((580, 629), 'os.environ.get', 'os.environ.get', (['"""RANCHER_MACHINE_TIMEOUT"""', '"""1200"""'], {}), "('RANCHER_MACHINE_TIMEOUT', '1200')\n", (594, 629), False, 'import os\n'), ((780, 829), 'os.environ.get', 'os.environ.get', (['"""RANCHER_CLEANUP_CLUSTER"""', '"""True"""'], {}), "('RANCHER_CLEANUP_CLUSTER', 'True')\n", (794, 829), False, 'import os\n'), ((1068, 1094), 'random.randint', 'random.randint', (['(0)', '(1000000)'], {}), '(0, 1000000)\n', (1082, 1094), False, 'import random\n'), ((1136, 1162), 'random.randint', 'random.randint', (['start', 'end'], {}), '(start, end)\n', (1150, 1162), False, 'import random\n'), ((1291, 1358), 'rancher.Client', 'rancher.Client', ([], {'url': 'CATTLE_API_URL', 'token': 'ADMIN_TOKEN', 'verify': '(False)'}), '(url=CATTLE_API_URL, token=ADMIN_TOKEN, verify=False)\n', (1305, 1358), False, 'import rancher\n'), ((1405, 1466), 'rancher.Client', 'rancher.Client', ([], {'url': 'CATTLE_API_URL', 'token': 'token', 'verify': '(False)'}), '(url=CATTLE_API_URL, token=token, verify=False)\n', (1419, 1466), False, 'import rancher\n'), ((1581, 1633), 'rancher.Client', 'rancher.Client', ([], {'url': 'p_url', 'token': 'token', 'verify': '(False)'}), '(url=p_url, token=token, verify=False)\n', (1595, 1633), False, 'import rancher\n'), ((1768, 1820), 'rancher.Client', 'rancher.Client', ([], {'url': 'c_url', 'token': 'token', 'verify': '(False)'}), '(url=c_url, token=token, verify=False)\n', (1782, 1820), False, 'import rancher\n'), ((1929, 1981), 'rancher.Client', 'rancher.Client', ([], {'url': 'c_url', 'token': 'token', 'verify': '(False)'}), '(url=c_url, token=token, verify=False)\n', (1943, 1981), False, 'import rancher\n'), ((2299, 2310), 'time.time', 'time.time', ([], {}), '()\n', (2308, 2310), False, 'import time\n'), ((2913, 2924), 'time.time', 'time.time', ([], {}), '()\n', (2922, 2924), False, 'import time\n'), ((3860, 3873), 'time.sleep', 'time.sleep', (['(5)'], {}), '(5)\n', (3870, 3873), False, 'import time\n'), ((10467, 10522), 'subprocess.check_output', 'subprocess.check_output', (['command'], {'shell': '(True)', 'text': '(True)'}), '(command, shell=True, text=True)\n', (10490, 10522), False, 'import subprocess\n'), ((10967, 10978), 'time.time', 'time.time', ([], {}), '()\n', (10976, 10978), False, 'import time\n'), ((11518, 11529), 'time.time', 'time.time', ([], {}), '()\n', (11527, 11529), False, 'import time\n'), ((12110, 12121), 'time.time', 'time.time', ([], {}), '()\n', (12119, 12121), False, 'import time\n'), ((12659, 12670), 'time.time', 'time.time', ([], {}), '()\n', (12668, 12670), False, 'import time\n'), ((14071, 14085), 'time.sleep', 'time.sleep', (['(10)'], {}), '(10)\n', (14081, 14085), False, 'import time\n'), ((14715, 14726), 'time.time', 'time.time', ([], {}), '()\n', (14724, 14726), False, 'import time\n'), ((15596, 15610), 'time.sleep', 'time.sleep', (['(10)'], {}), '(10)\n', (15606, 15610), False, 'import time\n'), ((16141, 16152), 'time.time', 'time.time', ([], {}), '()\n', (16150, 16152), False, 'import time\n'), ((16874, 16885), 'time.time', 'time.time', ([], {}), '()\n', (16883, 16885), False, 'import time\n'), ((22973, 22984), 'time.time', 'time.time', ([], {}), '()\n', (22982, 22984), False, 'import time\n'), ((23852, 23863), 'time.time', 'time.time', ([], {}), '()\n', (23861, 23863), False, 'import time\n'), ((24338, 24349), 'time.time', 'time.time', ([], {}), '()\n', (24347, 24349), False, 'import time\n'), ((28811, 28831), 'paramiko.SSHClient', 'paramiko.SSHClient', ([], {}), '()\n', (28829, 28831), False, 'import paramiko\n'), ((29150, 29161), 'time.time', 'time.time', ([], {}), '()\n', (29159, 29161), False, 'import time\n'), ((29166, 29179), 'time.sleep', 'time.sleep', (['(2)'], {}), '(2)\n', (29176, 29179), False, 'import time\n'), ((29728, 29739), 'time.time', 'time.time', ([], {}), '()\n', (29737, 29739), False, 'import time\n'), ((30546, 30557), 'time.time', 'time.time', ([], {}), '()\n', (30555, 30557), False, 'import time\n'), ((32138, 32149), 'time.time', 'time.time', ([], {}), '()\n', (32147, 32149), False, 'import time\n'), ((35242, 35253), 'time.time', 'time.time', ([], {}), '()\n', (35251, 35253), False, 'import time\n'), ((35258, 35271), 'time.sleep', 'time.sleep', (['(2)'], {}), '(2)\n', (35268, 35271), False, 'import time\n'), ((35814, 35825), 'time.time', 'time.time', ([], {}), '()\n', (35823, 35825), False, 'import time\n'), ((35830, 35843), 'time.sleep', 'time.sleep', (['(2)'], {}), '(2)\n', (35840, 35843), False, 'import time\n'), ((38275, 38288), 'time.sleep', 'time.sleep', (['(5)'], {}), '(5)\n', (38285, 38288), False, 'import time\n'), ((38401, 38412), 'time.time', 'time.time', ([], {}), '()\n', (38410, 38412), False, 'import time\n'), ((39260, 39271), 'time.time', 'time.time', ([], {}), '()\n', (39269, 39271), False, 'import time\n'), ((482, 508), 'os.path.realpath', 'os.path.realpath', (['__file__'], {}), '(__file__)\n', (498, 508), False, 'import os\n'), ((876, 902), 'os.path.realpath', 'os.path.realpath', (['__file__'], {}), '(__file__)\n', (892, 902), False, 'import os\n'), ((2750, 2765), 'time.sleep', 'time.sleep', (['(0.5)'], {}), '(0.5)\n', (2760, 2765), False, 'import time\n'), ((2993, 3008), 'time.sleep', 'time.sleep', (['(0.5)'], {}), '(0.5)\n', (3003, 3008), False, 'import time\n'), ((6676, 6706), 'time.sleep', 'time.sleep', (['wait_for_cron_pods'], {}), '(wait_for_cron_pods)\n', (6686, 6706), False, 'import time\n'), ((10373, 10391), 'json.loads', 'json.loads', (['result'], {}), '(result)\n', (10383, 10391), False, 'import json\n'), ((10589, 10657), 'subprocess.check_output', 'subprocess.check_output', (['command'], {'shell': '(True)', 'stderr': 'subprocess.PIPE'}), '(command, shell=True, stderr=subprocess.PIPE)\n', (10612, 10657), False, 'import subprocess\n'), ((11274, 11289), 'time.sleep', 'time.sleep', (['(0.5)'], {}), '(0.5)\n', (11284, 11289), False, 'import time\n'), ((11823, 11838), 'time.sleep', 'time.sleep', (['(0.5)'], {}), '(0.5)\n', (11833, 11838), False, 'import time\n'), ((12422, 12437), 'time.sleep', 'time.sleep', (['(0.5)'], {}), '(0.5)\n', (12432, 12437), False, 'import time\n'), ((12940, 12955), 'time.sleep', 'time.sleep', (['(0.5)'], {}), '(0.5)\n', (12950, 12955), False, 'import time\n'), ((14949, 14964), 'time.sleep', 'time.sleep', (['(0.5)'], {}), '(0.5)\n', (14959, 14964), False, 'import time\n'), ((16333, 16348), 'time.sleep', 'time.sleep', (['(0.5)'], {}), '(0.5)\n', (16343, 16348), False, 'import time\n'), ((16930, 16945), 'time.sleep', 'time.sleep', (['(0.5)'], {}), '(0.5)\n', (16940, 16945), False, 'import time\n'), ((17151, 17168), 'requests.get', 'requests.get', (['url'], {}), '(url)\n', (17163, 17168), False, 'import requests\n'), ((23481, 23494), 'time.sleep', 'time.sleep', (['(5)'], {}), '(5)\n', (23491, 23494), False, 'import time\n'), ((24112, 24127), 'time.sleep', 'time.sleep', (['(0.5)'], {}), '(0.5)\n', (24122, 24127), False, 'import time\n'), ((24627, 24642), 'time.sleep', 'time.sleep', (['(0.5)'], {}), '(0.5)\n', (24637, 24642), False, 'import time\n'), ((28868, 28892), 'paramiko.AutoAddPolicy', 'paramiko.AutoAddPolicy', ([], {}), '()\n', (28890, 28892), False, 'import paramiko\n'), ((29452, 29467), 'time.sleep', 'time.sleep', (['(0.5)'], {}), '(0.5)\n', (29462, 29467), False, 'import time\n'), ((30798, 30813), 'time.sleep', 'time.sleep', (['(0.5)'], {}), '(0.5)\n', (30808, 30813), False, 'import time\n'), ((32191, 32208), 'time.sleep', 'time.sleep', (['sleep'], {}), '(sleep)\n', (32201, 32208), False, 'import time\n'), ((35570, 35585), 'time.sleep', 'time.sleep', (['(0.5)'], {}), '(0.5)\n', (35580, 35585), False, 'import time\n'), ((36143, 36158), 'time.sleep', 'time.sleep', (['(0.5)'], {}), '(0.5)\n', (36153, 36158), False, 'import time\n'), ((38717, 38732), 'time.sleep', 'time.sleep', (['(0.5)'], {}), '(0.5)\n', (38727, 38732), False, 'import time\n'), ((39503, 39518), 'time.sleep', 'time.sleep', (['(0.5)'], {}), '(0.5)\n', (39513, 39518), False, 'import time\n'), ((1023, 1034), 'time.time', 'time.time', ([], {}), '()\n', (1032, 1034), False, 'import time\n'), ((30255, 30270), 'time.sleep', 'time.sleep', (['(0.5)'], {}), '(0.5)\n', (30265, 30270), False, 'import time\n'), ((32467, 32478), 'time.time', 'time.time', ([], {}), '()\n', (32476, 32478), False, 'import time\n'), ((2401, 2412), 'time.time', 'time.time', ([], {}), '()\n', (2410, 2412), False, 'import time\n'), ((2568, 2601), 'inspect.getsource', 'inspect.getsource', (['check_function'], {}), '(check_function)\n', (2585, 2601), False, 'import inspect\n'), ((3019, 3030), 'time.time', 'time.time', ([], {}), '()\n', (3028, 3030), False, 'import time\n'), ((11137, 11148), 'time.time', 'time.time', ([], {}), '()\n', (11146, 11148), False, 'import time\n'), ((11686, 11697), 'time.time', 'time.time', ([], {}), '()\n', (11695, 11697), False, 'import time\n'), ((12285, 12296), 'time.time', 'time.time', ([], {}), '()\n', (12294, 12296), False, 'import time\n'), ((12803, 12814), 'time.time', 'time.time', ([], {}), '()\n', (12812, 12814), False, 'import time\n'), ((14810, 14821), 'time.time', 'time.time', ([], {}), '()\n', (14819, 14821), False, 'import time\n'), ((16194, 16205), 'time.time', 'time.time', ([], {}), '()\n', (16203, 16205), False, 'import time\n'), ((16987, 16998), 'time.time', 'time.time', ([], {}), '()\n', (16996, 16998), False, 'import time\n'), ((23336, 23347), 'time.time', 'time.time', ([], {}), '()\n', (23345, 23347), False, 'import time\n'), ((23975, 23986), 'time.time', 'time.time', ([], {}), '()\n', (23984, 23986), False, 'import time\n'), ((24490, 24501), 'time.time', 'time.time', ([], {}), '()\n', (24499, 24501), False, 'import time\n'), ((29315, 29326), 'time.time', 'time.time', ([], {}), '()\n', (29324, 29326), False, 'import time\n'), ((30661, 30672), 'time.time', 'time.time', ([], {}), '()\n', (30670, 30672), False, 'import time\n'), ((32747, 32766), 'lib.aws.AmazonWebServices', 'AmazonWebServices', ([], {}), '()\n', (32764, 32766), False, 'from lib.aws import AmazonWebServices\n'), ((35430, 35441), 'time.time', 'time.time', ([], {}), '()\n', (35439, 35441), False, 'import time\n'), ((36007, 36018), 'time.time', 'time.time', ([], {}), '()\n', (36016, 36018), False, 'import time\n'), ((38580, 38591), 'time.time', 'time.time', ([], {}), '()\n', (38589, 38591), False, 'import time\n'), ((39366, 39377), 'time.time', 'time.time', ([], {}), '()\n', (39375, 39377), False, 'import time\n'), ((27291, 27310), 'lib.aws.AmazonWebServices', 'AmazonWebServices', ([], {}), '()\n', (27308, 27310), False, 'from lib.aws import AmazonWebServices\n'), ((30110, 30121), 'time.time', 'time.time', ([], {}), '()\n', (30119, 30121), False, 'import time\n'), ((27427, 27446), 'lib.aws.AmazonWebServices', 'AmazonWebServices', ([], {}), '()\n', (27444, 27446), False, 'from lib.aws import AmazonWebServices\n')] |
"""Bindings for the Barnes Hut TSNE algorithm with fast nearest neighbors
Refs:
References
[1] <NAME>, L.J.P.; Hinton, G.E. Visualizing High-Dimensional Data
Using t-SNE. Journal of Machine Learning Research 9:2579-2605, 2008.
[2] <NAME>, L.J.P. t-Distributed Stochastic Neighbor Embedding
http://homepage.tudelft.nl/19j49/t-SNE.html
"""
import numpy as N
import ctypes
import os
import pkg_resources
def ord_string(s):
b = bytearray()
arr = b.extend(map(ord, s))
return N.array([x for x in b] + [0]).astype(N.uint8)
class TSNE(object):
def __init__(self,
n_components=2,
perplexity=50.0,
early_exaggeration=2.0,
learning_rate=200.0,
num_neighbors=1023,
force_magnify_iters=250,
pre_momentum=0.5,
post_momentum=0.8,
theta=0.5,
epssq=0.0025,
n_iter=1000,
n_iter_without_progress=1000,
min_grad_norm=1e-7,
perplexity_epsilon=1e-3,
metric='euclidean',
init='random',
return_style='once',
num_snapshots=5,
verbose=0,
random_seed=None,
use_interactive=False,
viz_timeout=10000,
viz_server="tcp://localhost:5556",
dump_points=False,
dump_file="dump.txt",
dump_interval=1,
print_interval=10,
device=0,
):
"""Initialization method for barnes hut T-SNE class.
"""
# Initialize the variables
self.n_components = int(n_components)
if self.n_components != 2:
raise ValueError('The current barnes-hut implementation does not support projection into dimensions other than 2 for now.')
self.perplexity = float(perplexity)
self.early_exaggeration = float(early_exaggeration)
self.learning_rate = float(learning_rate)
self.n_iter = int(n_iter)
self.n_iter_without_progress = int(n_iter_without_progress)
self.min_grad_norm = float(min_grad_norm)
if metric not in ['euclidean']:
raise ValueError('Non-Euclidean metrics are not currently supported. Please use metric=\'euclidean\' for now.')
else:
self.metric = metric
if init not in ['random']:
raise ValueError('Non-Random initialization is not currently supported. Please use init=\'random\' for now.')
else:
self.init = init
self.verbose = int(verbose)
# Initialize non-sklearn variables
self.num_neighbors = int(num_neighbors)
self.force_magnify_iters = int(force_magnify_iters)
self.perplexity_epsilon = float(perplexity_epsilon)
self.pre_momentum = float(pre_momentum)
self.post_momentum = float(post_momentum)
self.theta = float(theta)
self.epssq =float(epssq)
self.device = int(device)
self.print_interval = int(print_interval)
# Point dumpoing
self.dump_file = str(dump_file)
self.dump_points = bool(dump_points)
self.dump_interval = int(dump_interval)
# Viz
self.use_interactive = bool(use_interactive)
self.viz_server = str(viz_server)
self.viz_timeout = int(viz_timeout)
# Return style
if return_style not in ['once','snapshots']:
raise ValueError('Invalid return style...')
elif return_style == 'once':
self.return_style = 0
elif return_style == 'snapshots':
self.return_style = 1
self.num_snapshots = int(num_snapshots)
# Build the hooks for the BH T-SNE library
self._path = pkg_resources.resource_filename('tsnecuda','') # Load from current location
# self._faiss_lib = N.ctypeslib.load_library('libfaiss', self._path) # Load the ctypes library
# self._gpufaiss_lib = N.ctypeslib.load_library('libgpufaiss', self._path) # Load the ctypes library
self._lib = N.ctypeslib.load_library('libtsnecuda', self._path) # Load the ctypes library
# Hook the BH T-SNE function
self._lib.pymodule_bh_tsne.restype = None
self._lib.pymodule_bh_tsne.argtypes = [
N.ctypeslib.ndpointer(N.float32, ndim=2, flags='ALIGNED, F_CONTIGUOUS, WRITEABLE'), # result
N.ctypeslib.ndpointer(N.float32, ndim=2, flags='ALIGNED, CONTIGUOUS'), # points
ctypes.POINTER(N.ctypeslib.c_intp), # dims
ctypes.c_float, # Perplexity
ctypes.c_float, # Learning Rate
ctypes.c_float, # Magnitude Factor
ctypes.c_int, # Num Neighbors
ctypes.c_int, # Iterations
ctypes.c_int, # Iterations no progress
ctypes.c_int, # Force Magnify iterations
ctypes.c_float, # Perplexity search epsilon
ctypes.c_float, # pre-exaggeration momentum
ctypes.c_float, # post-exaggeration momentum
ctypes.c_float, # Theta
ctypes.c_float, # epssq
ctypes.c_float, # Minimum gradient norm
ctypes.c_int, # Initialization types
N.ctypeslib.ndpointer(N.float32, ndim=2, flags='ALIGNED, F_CONTIGUOUS'), # Initialization Data
ctypes.c_bool, # Dump points
N.ctypeslib.ndpointer(N.uint8, flags='ALIGNED, CONTIGUOUS'), # Dump File
ctypes.c_int, # Dump interval
ctypes.c_bool, # Use interactive
N.ctypeslib.ndpointer(N.uint8, flags='ALIGNED, CONTIGUOUS'), # Viz Server
ctypes.c_int, # Viz timeout
ctypes.c_int, # Verbosity
ctypes.c_int, # Print interval
ctypes.c_int, # GPU Device
ctypes.c_int, # Return style
ctypes.c_int ] # Number of snapshots
def fit_transform(self, X, y=None):
"""Fit X into an embedded space and return that transformed output.
Arguments:
X {array} -- Input array, shape: (n_points, n_dimensions)
Keyword Arguments:
y {None} -- Ignored (default: {None})
"""
# Setup points/embedding requirements
self.points = N.require(X, N.float32, ['CONTIGUOUS', 'ALIGNED'])
self.embedding = N.zeros(shape=(X.shape[0],self.n_components))
self.embedding = N.require(self.embedding , N.float32, ['F_CONTIGUOUS', 'ALIGNED', 'WRITEABLE'])
# Handle Initialization
if y is None:
self.initialization_type = 1
self.init_data = N.require(N.zeros((1,1)),N.float32,['CONTIGUOUS','ALIGNED'])
else:
self.initialization_type = 3
self.init_data = N.require(y, N.float32, ['F_CONTIGUOUS', 'ALIGNED'])
# Handle dumping and viz strings
self.dump_file_ = N.require(ord_string(self.dump_file), N.uint8, ['CONTIGUOUS', 'ALIGNED'])
self.viz_server_ = N.require(ord_string(self.viz_server), N.uint8, ['CONTIGUOUS', 'ALIGNED'])
self._lib.pymodule_bh_tsne(
self.embedding, # result
self.points, # points
self.points.ctypes.shape, # dims
ctypes.c_float(self.perplexity), # Perplexity
ctypes.c_float(self.learning_rate), # Learning Rate
ctypes.c_float(self.early_exaggeration), # Magnitude Factor
ctypes.c_int(self.num_neighbors), # Num Neighbors
ctypes.c_int(self.n_iter), # Iterations
ctypes.c_int(self.n_iter_without_progress), # Iterations no progress
ctypes.c_int(self.force_magnify_iters), # Force Magnify iterations
ctypes.c_float(self.perplexity_epsilon), # Perplexity search epsilon
ctypes.c_float(self.pre_momentum), # pre-exaggeration momentum
ctypes.c_float(self.post_momentum), # post-exaggeration momentum
ctypes.c_float(self.theta), # Theta
ctypes.c_float(self.epssq), # epssq
ctypes.c_float(self.min_grad_norm), # Minimum gradient norm
ctypes.c_int(self.initialization_type), # Initialization types
self.init_data, # Initialization Data
ctypes.c_bool(self.dump_points), # Dump points
self.dump_file_, # Dump File
ctypes.c_int(self.dump_interval), # Dump interval
ctypes.c_bool(self.use_interactive), # Use interactive
self.viz_server_, # Viz Server
ctypes.c_int(self.viz_timeout), # Viz timeout
ctypes.c_int(self.verbose), # Verbosity
ctypes.c_int(self.print_interval), # Print interval
ctypes.c_int(self.device), # GPU Device
ctypes.c_int(self.return_style), # Return style
ctypes.c_int(self.num_snapshots) ) # Number of snapshots
return self.embedding
| [
"ctypes.POINTER",
"numpy.require",
"pkg_resources.resource_filename",
"numpy.array",
"numpy.zeros",
"numpy.ctypeslib.ndpointer",
"ctypes.c_bool",
"ctypes.c_int",
"numpy.ctypeslib.load_library",
"ctypes.c_float"
] | [((3861, 3908), 'pkg_resources.resource_filename', 'pkg_resources.resource_filename', (['"""tsnecuda"""', '""""""'], {}), "('tsnecuda', '')\n", (3892, 3908), False, 'import pkg_resources\n'), ((4169, 4220), 'numpy.ctypeslib.load_library', 'N.ctypeslib.load_library', (['"""libtsnecuda"""', 'self._path'], {}), "('libtsnecuda', self._path)\n", (4193, 4220), True, 'import numpy as N\n'), ((6450, 6500), 'numpy.require', 'N.require', (['X', 'N.float32', "['CONTIGUOUS', 'ALIGNED']"], {}), "(X, N.float32, ['CONTIGUOUS', 'ALIGNED'])\n", (6459, 6500), True, 'import numpy as N\n'), ((6526, 6572), 'numpy.zeros', 'N.zeros', ([], {'shape': '(X.shape[0], self.n_components)'}), '(shape=(X.shape[0], self.n_components))\n', (6533, 6572), True, 'import numpy as N\n'), ((6597, 6675), 'numpy.require', 'N.require', (['self.embedding', 'N.float32', "['F_CONTIGUOUS', 'ALIGNED', 'WRITEABLE']"], {}), "(self.embedding, N.float32, ['F_CONTIGUOUS', 'ALIGNED', 'WRITEABLE'])\n", (6606, 6675), True, 'import numpy as N\n'), ((487, 516), 'numpy.array', 'N.array', (['([x for x in b] + [0])'], {}), '([x for x in b] + [0])\n', (494, 516), True, 'import numpy as N\n'), ((4400, 4487), 'numpy.ctypeslib.ndpointer', 'N.ctypeslib.ndpointer', (['N.float32'], {'ndim': '(2)', 'flags': '"""ALIGNED, F_CONTIGUOUS, WRITEABLE"""'}), "(N.float32, ndim=2, flags=\n 'ALIGNED, F_CONTIGUOUS, WRITEABLE')\n", (4421, 4487), True, 'import numpy as N\n'), ((4509, 4578), 'numpy.ctypeslib.ndpointer', 'N.ctypeslib.ndpointer', (['N.float32'], {'ndim': '(2)', 'flags': '"""ALIGNED, CONTIGUOUS"""'}), "(N.float32, ndim=2, flags='ALIGNED, CONTIGUOUS')\n", (4530, 4578), True, 'import numpy as N\n'), ((4605, 4639), 'ctypes.POINTER', 'ctypes.POINTER', (['N.ctypeslib.c_intp'], {}), '(N.ctypeslib.c_intp)\n', (4619, 4639), False, 'import ctypes\n'), ((5379, 5450), 'numpy.ctypeslib.ndpointer', 'N.ctypeslib.ndpointer', (['N.float32'], {'ndim': '(2)', 'flags': '"""ALIGNED, F_CONTIGUOUS"""'}), "(N.float32, ndim=2, flags='ALIGNED, F_CONTIGUOUS')\n", (5400, 5450), True, 'import numpy as N\n'), ((5535, 5594), 'numpy.ctypeslib.ndpointer', 'N.ctypeslib.ndpointer', (['N.uint8'], {'flags': '"""ALIGNED, CONTIGUOUS"""'}), "(N.uint8, flags='ALIGNED, CONTIGUOUS')\n", (5556, 5594), True, 'import numpy as N\n'), ((5719, 5778), 'numpy.ctypeslib.ndpointer', 'N.ctypeslib.ndpointer', (['N.uint8'], {'flags': '"""ALIGNED, CONTIGUOUS"""'}), "(N.uint8, flags='ALIGNED, CONTIGUOUS')\n", (5740, 5778), True, 'import numpy as N\n'), ((6947, 6999), 'numpy.require', 'N.require', (['y', 'N.float32', "['F_CONTIGUOUS', 'ALIGNED']"], {}), "(y, N.float32, ['F_CONTIGUOUS', 'ALIGNED'])\n", (6956, 6999), True, 'import numpy as N\n'), ((7426, 7457), 'ctypes.c_float', 'ctypes.c_float', (['self.perplexity'], {}), '(self.perplexity)\n', (7440, 7457), False, 'import ctypes\n'), ((7488, 7522), 'ctypes.c_float', 'ctypes.c_float', (['self.learning_rate'], {}), '(self.learning_rate)\n', (7502, 7522), False, 'import ctypes\n'), ((7556, 7595), 'ctypes.c_float', 'ctypes.c_float', (['self.early_exaggeration'], {}), '(self.early_exaggeration)\n', (7570, 7595), False, 'import ctypes\n'), ((7632, 7664), 'ctypes.c_int', 'ctypes.c_int', (['self.num_neighbors'], {}), '(self.num_neighbors)\n', (7644, 7664), False, 'import ctypes\n'), ((7698, 7723), 'ctypes.c_int', 'ctypes.c_int', (['self.n_iter'], {}), '(self.n_iter)\n', (7710, 7723), False, 'import ctypes\n'), ((7754, 7796), 'ctypes.c_int', 'ctypes.c_int', (['self.n_iter_without_progress'], {}), '(self.n_iter_without_progress)\n', (7766, 7796), False, 'import ctypes\n'), ((7839, 7877), 'ctypes.c_int', 'ctypes.c_int', (['self.force_magnify_iters'], {}), '(self.force_magnify_iters)\n', (7851, 7877), False, 'import ctypes\n'), ((7922, 7961), 'ctypes.c_float', 'ctypes.c_float', (['self.perplexity_epsilon'], {}), '(self.perplexity_epsilon)\n', (7936, 7961), False, 'import ctypes\n'), ((8007, 8040), 'ctypes.c_float', 'ctypes.c_float', (['self.pre_momentum'], {}), '(self.pre_momentum)\n', (8021, 8040), False, 'import ctypes\n'), ((8086, 8120), 'ctypes.c_float', 'ctypes.c_float', (['self.post_momentum'], {}), '(self.post_momentum)\n', (8100, 8120), False, 'import ctypes\n'), ((8167, 8193), 'ctypes.c_float', 'ctypes.c_float', (['self.theta'], {}), '(self.theta)\n', (8181, 8193), False, 'import ctypes\n'), ((8219, 8245), 'ctypes.c_float', 'ctypes.c_float', (['self.epssq'], {}), '(self.epssq)\n', (8233, 8245), False, 'import ctypes\n'), ((8271, 8305), 'ctypes.c_float', 'ctypes.c_float', (['self.min_grad_norm'], {}), '(self.min_grad_norm)\n', (8285, 8305), False, 'import ctypes\n'), ((8347, 8385), 'ctypes.c_int', 'ctypes.c_int', (['self.initialization_type'], {}), '(self.initialization_type)\n', (8359, 8385), False, 'import ctypes\n'), ((8480, 8511), 'ctypes.c_bool', 'ctypes.c_bool', (['self.dump_points'], {}), '(self.dump_points)\n', (8493, 8511), False, 'import ctypes\n'), ((8588, 8620), 'ctypes.c_int', 'ctypes.c_int', (['self.dump_interval'], {}), '(self.dump_interval)\n', (8600, 8620), False, 'import ctypes\n'), ((8654, 8689), 'ctypes.c_bool', 'ctypes.c_bool', (['self.use_interactive'], {}), '(self.use_interactive)\n', (8667, 8689), False, 'import ctypes\n'), ((8772, 8802), 'ctypes.c_int', 'ctypes.c_int', (['self.viz_timeout'], {}), '(self.viz_timeout)\n', (8784, 8802), False, 'import ctypes\n'), ((8834, 8860), 'ctypes.c_int', 'ctypes.c_int', (['self.verbose'], {}), '(self.verbose)\n', (8846, 8860), False, 'import ctypes\n'), ((8890, 8923), 'ctypes.c_int', 'ctypes.c_int', (['self.print_interval'], {}), '(self.print_interval)\n', (8902, 8923), False, 'import ctypes\n'), ((8958, 8983), 'ctypes.c_int', 'ctypes.c_int', (['self.device'], {}), '(self.device)\n', (8970, 8983), False, 'import ctypes\n'), ((9014, 9045), 'ctypes.c_int', 'ctypes.c_int', (['self.return_style'], {}), '(self.return_style)\n', (9026, 9045), False, 'import ctypes\n'), ((9078, 9110), 'ctypes.c_int', 'ctypes.c_int', (['self.num_snapshots'], {}), '(self.num_snapshots)\n', (9090, 9110), False, 'import ctypes\n'), ((6812, 6827), 'numpy.zeros', 'N.zeros', (['(1, 1)'], {}), '((1, 1))\n', (6819, 6827), True, 'import numpy as N\n')] |
from __future__ import absolute_import, division, print_function
import pytest
import json
import asyncio
import stripe
import urllib3
from stripe import six, util
from async_stripe.http_client import TornadoAsyncHTTPClient
pytestmark = pytest.mark.asyncio
VALID_API_METHODS = ("get", "post", "delete")
class StripeClientTestCase(object):
REQUEST_LIBRARIES = ["AsyncHTTPClient"]
@pytest.fixture
def request_mocks(self, mocker):
request_mocks = {}
for lib in self.REQUEST_LIBRARIES:
request_mocks[lib] = mocker.patch("async_stripe.http_client.%s" % (lib,))
return request_mocks
class TestNewDefaultHttpClient(StripeClientTestCase):
@pytest.fixture(autouse=True)
def setup_warnings(self, request_mocks):
original_filters = stripe.http_client.warnings.filters[:]
stripe.http_client.warnings.simplefilter("ignore")
yield
stripe.http_client.warnings.filters = original_filters
def check_default(self, none_libs, expected):
for lib in none_libs:
setattr(stripe.http_client, lib, None)
inst = stripe.http_client.new_default_http_client()
assert isinstance(inst, expected)
def test_new_default_http_client_tornado(self):
self.check_default((), TornadoAsyncHTTPClient)
class TestRetrySleepTimeDefaultHttpClient(StripeClientTestCase):
from contextlib import contextmanager
def assert_sleep_times(self, client, expected):
until = len(expected)
actual = list(
map(lambda i: client._sleep_time_seconds(i + 1), range(until))
)
assert expected == actual
@contextmanager
def mock_max_delay(self, new_value):
original_value = stripe.http_client.HTTPClient.MAX_DELAY
stripe.http_client.HTTPClient.MAX_DELAY = new_value
try:
yield self
finally:
stripe.http_client.HTTPClient.MAX_DELAY = original_value
def test_sleep_time_exponential_back_off(self):
client = stripe.http_client.new_default_http_client()
client._add_jitter_time = lambda t: t
with self.mock_max_delay(10):
self.assert_sleep_times(client, [0.5, 1.0, 2.0, 4.0, 8.0])
def test_initial_delay_as_minimum(self):
client = stripe.http_client.new_default_http_client()
client._add_jitter_time = lambda t: t * 0.001
initial_delay = stripe.http_client.HTTPClient.INITIAL_DELAY
self.assert_sleep_times(client, [initial_delay] * 5)
def test_maximum_delay(self):
client = stripe.http_client.new_default_http_client()
client._add_jitter_time = lambda t: t
max_delay = stripe.http_client.HTTPClient.MAX_DELAY
expected = [0.5, 1.0, max_delay, max_delay, max_delay]
self.assert_sleep_times(client, expected)
def test_retry_after_header(self):
client = stripe.http_client.new_default_http_client()
client._add_jitter_time = lambda t: t
# Prefer retry-after if it's bigger
assert 30 == client._sleep_time_seconds(
2, (None, 409, {"retry-after": "30"})
)
# Prefer default if it's bigger
assert 2 == client._sleep_time_seconds(
3, (None, 409, {"retry-after": "1"})
)
# Ignore crazy-big values
assert 1 == client._sleep_time_seconds(
2, (None, 409, {"retry-after": "300"})
)
def test_randomness_added(self):
client = stripe.http_client.new_default_http_client()
random_value = 0.8
client._add_jitter_time = lambda t: t * random_value
base_value = stripe.http_client.HTTPClient.INITIAL_DELAY * random_value
with self.mock_max_delay(10):
expected = [
stripe.http_client.HTTPClient.INITIAL_DELAY,
base_value * 2,
base_value * 4,
base_value * 8,
base_value * 16,
]
self.assert_sleep_times(client, expected)
def test_jitter_has_randomness_but_within_range(self):
client = stripe.http_client.new_default_http_client()
jittered_ones = set(
map(lambda _: client._add_jitter_time(1), list(range(100)))
)
assert len(jittered_ones) > 1
assert all(0.5 <= val <= 1 for val in jittered_ones)
class TestRetryConditionsDefaultHttpClient(StripeClientTestCase):
def test_should_retry_on_codes(self):
one_xx = list(range(100, 104))
two_xx = list(range(200, 209))
three_xx = list(range(300, 308))
four_xx = list(range(400, 431))
client = stripe.http_client.new_default_http_client()
client._max_network_retries = lambda: 1
codes = one_xx + two_xx + three_xx + four_xx
codes.remove(409)
# These status codes should not be retried by default.
for code in codes:
assert client._should_retry((None, code, None), None, 0) is False
# These status codes should be retried by default.
assert client._should_retry((None, 409, None), None, 0) is True
assert client._should_retry((None, 500, None), None, 0) is True
assert client._should_retry((None, 503, None), None, 0) is True
def test_should_retry_on_error(self, mocker):
client = stripe.http_client.new_default_http_client()
client._max_network_retries = lambda: 1
api_connection_error = mocker.Mock()
api_connection_error.should_retry = True
assert client._should_retry(None, api_connection_error, 0) is True
api_connection_error.should_retry = False
assert client._should_retry(None, api_connection_error, 0) is False
def test_should_retry_on_stripe_should_retry_true(self, mocker):
client = stripe.http_client.new_default_http_client()
client._max_network_retries = lambda: 1
headers = {"stripe-should-retry": "true"}
# Ordinarily, we would not retry a 400, but with the header as true, we would.
assert client._should_retry((None, 400, {}), None, 0) is False
assert client._should_retry((None, 400, headers), None, 0) is True
def test_should_retry_on_stripe_should_retry_false(self, mocker):
client = stripe.http_client.new_default_http_client()
client._max_network_retries = lambda: 1
headers = {"stripe-should-retry": "false"}
# Ordinarily, we would retry a 500, but with the header as false, we would not.
assert client._should_retry((None, 500, {}), None, 0) is True
assert client._should_retry((None, 500, headers), None, 0) is False
def test_should_retry_on_num_retries(self, mocker):
client = stripe.http_client.new_default_http_client()
max_test_retries = 10
client._max_network_retries = lambda: max_test_retries
api_connection_error = mocker.Mock()
api_connection_error.should_retry = True
assert (
client._should_retry(
None, api_connection_error, max_test_retries + 1
)
is False
)
assert (
client._should_retry((None, 409, None), None, max_test_retries + 1)
is False
)
class TestHTTPClient(object):
@pytest.fixture(autouse=True)
def setup_stripe(self):
orig_attrs = {"enable_telemetry": stripe.enable_telemetry}
stripe.enable_telemetry = False
yield
stripe.enable_telemetry = orig_attrs["enable_telemetry"]
async def test_sends_telemetry_on_second_request(self, mocker):
class TestClient(stripe.http_client.HTTPClient):
pass
stripe.enable_telemetry = True
url = "http://fake.url"
client = TestClient()
response_future = asyncio.Future()
response_future.set_result(["", 200, {"Request-Id": "req_123"}])
client.request = mocker.MagicMock(
return_value=response_future
)
_, code, _ = await client.request_with_retries("get", url, {}, None)
assert code == 200
client.request.assert_called_with("get", url, {}, None)
response_future = asyncio.Future()
response_future.set_result(["", 200, {"Request-Id": "req_234"}])
client.request = mocker.MagicMock(
return_value=response_future
)
_, code, _ = await client.request_with_retries("get", url, {}, None)
assert code == 200
args, _ = client.request.call_args
assert "X-Stripe-Client-Telemetry" in args[2]
telemetry = json.loads(args[2]["X-Stripe-Client-Telemetry"])
assert telemetry["last_request_metrics"]["request_id"] == "req_123"
class ClientTestBase(object):
@pytest.fixture
def request_mock(self, request_mocks):
return request_mocks[self.REQUEST_CLIENT.name]
@property
def valid_url(self, path="/foo"):
return "https://api.stripe.com%s" % (path,)
def make_request(self, method, url, headers, post_data):
client = self.REQUEST_CLIENT(verify_ssl_certs=True)
return client.request_with_retries(method, url, headers, post_data)
async def make_request_stream(self, method, url, headers, post_data):
client = self.REQUEST_CLIENT(verify_ssl_certs=True)
return await client.request_stream_with_retries(
method, url, headers, post_data
)
@pytest.fixture
def mock_response(self):
def mock_response(mock, body, code):
raise NotImplementedError(
"You must implement this in your test subclass"
)
return mock_response
@pytest.fixture
def mock_error(self):
def mock_error(mock, error):
raise NotImplementedError(
"You must implement this in your test subclass"
)
return mock_error
@pytest.fixture
def check_call(self):
def check_call(
mock, method, abs_url, headers, params, is_streaming=False
):
raise NotImplementedError(
"You must implement this in your test subclass"
)
return check_call
def test_request(self, request_mock, mock_response, check_call):
mock_response(request_mock, '{"foo": "baz"}', 200)
for method in VALID_API_METHODS:
abs_url = self.valid_url
data = ""
if method != "post":
abs_url = "%s?%s" % (abs_url, data)
data = None
headers = {"my-header": "header val"}
body, code, _ = self.make_request(method, abs_url, headers, data)
assert code == 200
assert body == '{"foo": "baz"}'
check_call(request_mock, method, abs_url, data, headers)
def test_request_stream(
self, mocker, request_mock, mock_response, check_call
):
for method in VALID_API_METHODS:
mock_response(request_mock, "some streamed content", 200)
abs_url = self.valid_url
data = ""
if method != "post":
abs_url = "%s?%s" % (abs_url, data)
data = None
headers = {"my-header": "header val"}
print(dir(self))
print("make_request_stream" in dir(self))
stream, code, _ = self.make_request_stream(
method, abs_url, headers, data
)
assert code == 200
# Here we need to convert and align all content on one type (string)
# as some clients return a string stream others a byte stream.
body_content = stream.read()
if hasattr(body_content, "decode"):
body_content = body_content.decode("utf-8")
assert body_content == "some streamed content"
mocker.resetall()
def test_exception(self, request_mock, mock_error):
mock_error(request_mock)
with pytest.raises(stripe.error.APIConnectionError):
self.make_request("get", self.valid_url, {}, None)
class TestTornadoAsyncHTTPClient:
# :TODO: Write tests for tornado client
pass
class TestAPIEncode(StripeClientTestCase):
def test_encode_dict(self):
body = {"foo": {"dob": {"month": 1}, "name": "bat"}}
values = [t for t in stripe.api_requestor._api_encode(body)]
assert ("foo[dob][month]", 1) in values
assert ("foo[name]", "bat") in values
def test_encode_array(self):
body = {"foo": [{"dob": {"month": 1}, "name": "bat"}]}
values = [t for t in stripe.api_requestor._api_encode(body)]
assert ("foo[0][dob][month]", 1) in values
assert ("foo[0][name]", "bat") in values
| [
"stripe.http_client.new_default_http_client",
"json.loads",
"pytest.raises",
"stripe.api_requestor._api_encode",
"pytest.fixture",
"stripe.http_client.warnings.simplefilter",
"asyncio.Future"
] | [((695, 723), 'pytest.fixture', 'pytest.fixture', ([], {'autouse': '(True)'}), '(autouse=True)\n', (709, 723), False, 'import pytest\n'), ((7283, 7311), 'pytest.fixture', 'pytest.fixture', ([], {'autouse': '(True)'}), '(autouse=True)\n', (7297, 7311), False, 'import pytest\n'), ((843, 893), 'stripe.http_client.warnings.simplefilter', 'stripe.http_client.warnings.simplefilter', (['"""ignore"""'], {}), "('ignore')\n", (883, 893), False, 'import stripe\n'), ((1119, 1163), 'stripe.http_client.new_default_http_client', 'stripe.http_client.new_default_http_client', ([], {}), '()\n', (1161, 1163), False, 'import stripe\n'), ((2034, 2078), 'stripe.http_client.new_default_http_client', 'stripe.http_client.new_default_http_client', ([], {}), '()\n', (2076, 2078), False, 'import stripe\n'), ((2297, 2341), 'stripe.http_client.new_default_http_client', 'stripe.http_client.new_default_http_client', ([], {}), '()\n', (2339, 2341), False, 'import stripe\n'), ((2577, 2621), 'stripe.http_client.new_default_http_client', 'stripe.http_client.new_default_http_client', ([], {}), '()\n', (2619, 2621), False, 'import stripe\n'), ((2898, 2942), 'stripe.http_client.new_default_http_client', 'stripe.http_client.new_default_http_client', ([], {}), '()\n', (2940, 2942), False, 'import stripe\n'), ((3488, 3532), 'stripe.http_client.new_default_http_client', 'stripe.http_client.new_default_http_client', ([], {}), '()\n', (3530, 3532), False, 'import stripe\n'), ((4100, 4144), 'stripe.http_client.new_default_http_client', 'stripe.http_client.new_default_http_client', ([], {}), '()\n', (4142, 4144), False, 'import stripe\n'), ((4644, 4688), 'stripe.http_client.new_default_http_client', 'stripe.http_client.new_default_http_client', ([], {}), '()\n', (4686, 4688), False, 'import stripe\n'), ((5329, 5373), 'stripe.http_client.new_default_http_client', 'stripe.http_client.new_default_http_client', ([], {}), '()\n', (5371, 5373), False, 'import stripe\n'), ((5806, 5850), 'stripe.http_client.new_default_http_client', 'stripe.http_client.new_default_http_client', ([], {}), '()\n', (5848, 5850), False, 'import stripe\n'), ((6271, 6315), 'stripe.http_client.new_default_http_client', 'stripe.http_client.new_default_http_client', ([], {}), '()\n', (6313, 6315), False, 'import stripe\n'), ((6724, 6768), 'stripe.http_client.new_default_http_client', 'stripe.http_client.new_default_http_client', ([], {}), '()\n', (6766, 6768), False, 'import stripe\n'), ((7800, 7816), 'asyncio.Future', 'asyncio.Future', ([], {}), '()\n', (7814, 7816), False, 'import asyncio\n'), ((8181, 8197), 'asyncio.Future', 'asyncio.Future', ([], {}), '()\n', (8195, 8197), False, 'import asyncio\n'), ((8588, 8636), 'json.loads', 'json.loads', (["args[2]['X-Stripe-Client-Telemetry']"], {}), "(args[2]['X-Stripe-Client-Telemetry'])\n", (8598, 8636), False, 'import json\n'), ((11967, 12013), 'pytest.raises', 'pytest.raises', (['stripe.error.APIConnectionError'], {}), '(stripe.error.APIConnectionError)\n', (11980, 12013), False, 'import pytest\n'), ((12335, 12373), 'stripe.api_requestor._api_encode', 'stripe.api_requestor._api_encode', (['body'], {}), '(body)\n', (12367, 12373), False, 'import stripe\n'), ((12597, 12635), 'stripe.api_requestor._api_encode', 'stripe.api_requestor._api_encode', (['body'], {}), '(body)\n', (12629, 12635), False, 'import stripe\n')] |
import sys
from PyQt5 import QtGui
from PyQt5.QtCore import QEvent, QPoint, Qt
from PyQt5.QtGui import QIcon
from PyQt5.QtWidgets import (QApplication, QDialog, QGroupBox, QMainWindow,
QTabWidget, QVBoxLayout, QWidget)
from sim2d_game_analyzer.fmdb_tab import FMDBTab
class MainWindow(QMainWindow):
title = "Sim2d Game Analyzer"
top = 500
left = 100
width = 70*4
height = 130*4
def __init__(self):
QMainWindow.__init__(self)
self.setGeometry(self.screen().geometry())
self.setWindowTitle(self.title)
self.setWindowIcon(QIcon("sim2d_game_analyzer/figures/icon.png"))
vbox = QVBoxLayout()
tabWidget = QTabWidget()
tabWidget.setFont(QtGui.QFont("Sanserif", 12))
self.fmdb_tab = FMDBTab()
tabWidget.addTab(self.fmdb_tab, FMDBTab.NAME)
vbox.addWidget(tabWidget)
wid = QWidget(self)
self.setCentralWidget(wid)
wid.setLayout(vbox)
if __name__ == "__main__":
app = QApplication(sys.argv)
mainwindow = MainWindow()
sys.exit(app.exec())
| [
"PyQt5.QtWidgets.QWidget",
"PyQt5.QtWidgets.QMainWindow.__init__",
"PyQt5.QtGui.QIcon",
"PyQt5.QtGui.QFont",
"PyQt5.QtWidgets.QApplication",
"PyQt5.QtWidgets.QTabWidget",
"PyQt5.QtWidgets.QVBoxLayout",
"sim2d_game_analyzer.fmdb_tab.FMDBTab"
] | [((1026, 1048), 'PyQt5.QtWidgets.QApplication', 'QApplication', (['sys.argv'], {}), '(sys.argv)\n', (1038, 1048), False, 'from PyQt5.QtWidgets import QApplication, QDialog, QGroupBox, QMainWindow, QTabWidget, QVBoxLayout, QWidget\n'), ((464, 490), 'PyQt5.QtWidgets.QMainWindow.__init__', 'QMainWindow.__init__', (['self'], {}), '(self)\n', (484, 490), False, 'from PyQt5.QtWidgets import QApplication, QDialog, QGroupBox, QMainWindow, QTabWidget, QVBoxLayout, QWidget\n'), ((671, 684), 'PyQt5.QtWidgets.QVBoxLayout', 'QVBoxLayout', ([], {}), '()\n', (682, 684), False, 'from PyQt5.QtWidgets import QApplication, QDialog, QGroupBox, QMainWindow, QTabWidget, QVBoxLayout, QWidget\n'), ((705, 717), 'PyQt5.QtWidgets.QTabWidget', 'QTabWidget', ([], {}), '()\n', (715, 717), False, 'from PyQt5.QtWidgets import QApplication, QDialog, QGroupBox, QMainWindow, QTabWidget, QVBoxLayout, QWidget\n'), ((798, 807), 'sim2d_game_analyzer.fmdb_tab.FMDBTab', 'FMDBTab', ([], {}), '()\n', (805, 807), False, 'from sim2d_game_analyzer.fmdb_tab import FMDBTab\n'), ((910, 923), 'PyQt5.QtWidgets.QWidget', 'QWidget', (['self'], {}), '(self)\n', (917, 923), False, 'from PyQt5.QtWidgets import QApplication, QDialog, QGroupBox, QMainWindow, QTabWidget, QVBoxLayout, QWidget\n'), ((609, 654), 'PyQt5.QtGui.QIcon', 'QIcon', (['"""sim2d_game_analyzer/figures/icon.png"""'], {}), "('sim2d_game_analyzer/figures/icon.png')\n", (614, 654), False, 'from PyQt5.QtGui import QIcon\n'), ((745, 772), 'PyQt5.QtGui.QFont', 'QtGui.QFont', (['"""Sanserif"""', '(12)'], {}), "('Sanserif', 12)\n", (756, 772), False, 'from PyQt5 import QtGui\n')] |
# pip install openpyxl
# pip install cuid
import os.path
import json
import datetime
from openpyxl import load_workbook
import cuid # https://github.com/necaris/cuid.py - create uuid's in the format that graphcool expects
SOURCE_XLSX = "./data/CLP_combined.xlsx"
EXTRACT_OUTPUT_DIR = "../server/extract"
SCHOOL_TITLES = ["ORGANISATION_ID", "ORGANISATION_NAME", "ORG_ELECTORATE", "P_ADDRESS1", "P_SUBURB", "P_STATE",
"P_POSTCODE", "S_ADDRESS1", "S_SUBURB", "S_STATE", "S_POSTCODE", "SCHOOL_NAME", "SCH_ELECTORATE",
"SCHOOL_ID", "SCHOOL_P_ADDRESS1",
"SCHOOL_P_SUBURB", "SCHOOL_P_STATE", "SCHOOL_P_POSTCODE", "SCHOOL_S_ADDRESS1", "SCHOOL_S_SUBURB",
"SCHOOL_S_STATE", "SCHOOL_S_POSTCODE", "LOCATION_NAME", "LOC_ELECTORATE", "LOC_S_ADDRESS1",
"LOC_S_SUBURB", "LOC_S_STATE", "LOC_S_POSTCODE"]
ORGANISATION_FIELDS = {"ORGANISATION_ID": "CLP_ORGANISATION_ID", "ORGANISATION_NAME": "NAME",
"ORG_ELECTORATE": "ELECTORATE", "S_ADDRESS1": "ADDRESS", "S_SUBURB": "SUBURB",
"S_STATE": "STATE", "S_POSTCODE": "POSTCODE", }
SCHOOL_FIELDS = {"SCHOOL_NAME": "NAME", "SCH_ELECTORATE": "ELECTORATE", "SCHOOL_ID": "CLP_SCHOOL_ID",
"ORGANISATION_ID": "CLP_ORGANISATION_ID",
"SCHOOL_S_ADDRESS1": "ADDRESS", "SCHOOL_S_SUBURB": "SUBURB", "SCHOOL_S_STATE": "STATE",
"SCHOOL_S_POSTCODE": "POSTCODE", }
LOCATION_FIELDS = {"LOCATION_NAME": "NAME", "LOC_ELECTORATE": "ELECTORATE", "SCHOOL_ID": "CLP_SCHOOL_ID",
"LOC_S_ADDRESS1": "ADDRESS", "LOC_S_SUBURB": "SUBURB", "LOC_S_STATE": "STATE",
"LOC_S_POSTCODE": "POSTCODE"}
TEACHER_TITLES = ["TEACHER_ID", "ORGANISATION_NAME", "SCHOOL_NAME", "TEACHER_NAME", "TITLE", "LNAME", "FNAME",
"TEACHER_LANGUAGES", "P_ADDRESS1", "P_ADDRESS2", "P_SUBURB", "P_STATE", "P_POSTCODE",
"TELEPHONE", "TEL_EVENING", "EMAIL", "MOBILE", "LEVEL_TAUGHT", "LEVEL_OF_EDUCATION",
"FIELD_OF_EDUCATION", "DEGREE_COUNTRY", "DEGREE_YEAR", "ORGANISATION_ID", "SCHOOL_ID"]
STUDENT_TITLES = ["SCHOOL_NAME", "SCHOOL_ID", "STUDENT_ID", "STUDENT_SRN", "LOCATION_NAME",
"STUDENT_LNAME", "STUDENT_FNAME", "DOB", "TEL", "LOCATION_NAME_1"]
TEACHER_FIELDS = {"TEACHER_ID": "CLP_TEACHER_ID", "ORGANISATION_NAME": "ORGANISATION_NAME",
"SCHOOL_NAME": "SCHOOL_NAME", "TITLE": "TITLE",
"LNAME": "FAMILY_NAME", "FNAME": "GIVEN_NAMES", "TEACHER_LANGUAGES": "LANGUAGES",
"P_ADDRESS1": "ADDRESS1", "P_ADDRESS2": "ADDRESS2", "P_SUBURB": "SUBURB",
"P_STATE": "STATE", "P_POSTCODE": "POSTCODE",
"TELEPHONE": "DAY_PHONE", "TEL_EVENING": "EVENING_PHONE", "EMAIL": "EMAIL", "MOBILE": "MOBILE",
"LEVEL_TAUGHT": "LEVEL_TAUGHT", "LEVEL_OF_EDUCATION": "EDUCATION_LEVEL",
"FIELD_OF_EDUCATION": "EDUCATION_FIELD", "DEGREE_COUNTRY": "EDUCATION_COUNTRY",
"DEGREE_YEAR": "EDUCATION_YEAR",
"ORGANISATION_ID": "ORGANISATION_ID", "SCHOOL_ID": "SCHOOL_ID", }
STUDENT_FIELDS = {"SCHOOL_NAME": "SCHOOL_NAME", "SCHOOL_ID": "SCHOOL_ID", "STUDENT_ID": "CLP_STUDENT_ID",
"STUDENT_SRN": "SRN", "LOCATION_NAME": "LOCATION",
"STUDENT_LNAME": "FAMILY_NAME", "STUDENT_FNAME": "GIVEN_NAMES", "DOB": "DATE_OF_BIRTH",
"TEL": "PHONE", "LOCATION_NAME_1": "DAY_SCHOOL", }
class Sheet:
"Data container object to hold the contents of one sheet within an excel spreadsheet"
def __init__(self, name, titles=None, rows=None):
self.name = name
self.titles = titles or []
self.rows = rows or []
def convert_row_to_dict(titles, row):
data = {}
for (i, cell) in enumerate(row):
if cell.Value is not None:
data[titles[i]] = str(cell.value)
return data
def convert_xlsx(xlsx_file):
"""Convert the given XLSX spreadsheet to iterable of Sheet objects,
in which row has been converted into a dictionary"""
work_book = load_workbook(filename=xlsx_file, read_only=True, data_only=True)
for sheet in work_book:
rows = [x for x in sheet.iter_rows()]
if rows:
titles = [cell.value for cell in rows[0]]
dicts = [convert_row_to_dict(titles, row) for row in rows[1:]]
yield Sheet(sheet.title, titles, dicts)
else:
yield Sheet(sheet.title)
def to_camel(s):
"""Convert an underscored title into camel case. 'PARENT_ORGANISATION_ID' => 'parentOrganisationId'"""
bits = [(x.lower() if i == 0 else x.title())
for (i, x) in enumerate(s.split("_"))]
return "".join(bits)
def relative_to_absolute(relative_path):
path_to_py = os.path.abspath(os.path.dirname(__file__))
return os.path.join(path_to_py, relative_path)
def extract(fields, row_as_dict):
data = {}
for (k, v) in fields.items():
data[to_camel(v)] = row_as_dict[k]
return data
def process_sheet(sheet, titles, field_defns):
if titles != sheet.titles:
print("Sheet doesn't have expected titles:", [(i, x) for (i, x) in enumerate(titles) if x != sheet.titles[i]])
return []
structs = [[extract(defn, x) for x in sheet.rows] for defn in field_defns]
return structs
def unique(key, dicts):
t = {x[key]: x for x in dicts}
return t.values()
def now_as_iso8601():
return datetime.datetime.now().replace(microsecond=0).isoformat() + "Z"
def inject_required(type_name, dicts):
"Inject the required fields that graphcool import required"
for x in dicts:
x["_typeName"] = type_name
x["id"] = cuid.cuid()
x["createdAt"] = x["updatedAt"] = now_as_iso8601()
return list(dicts)
def prepare_organisations(organisations):
unique_orgs = unique("clpOrganisationId", organisations)
fat_orgs = inject_required("ClpOrganisation", unique_orgs)
return fat_orgs
def prepare_schools(schools):
uniques = unique("clpSchoolId", schools)
injected = inject_required("ClpSchool", uniques)
return injected
def prepare_locations(locations):
# There are multiple locations, each of which is identitical except that for being related to a different school.
# We have to collect all the schools that meet at the same location.
uniques = {}
for x in locations:
# get an existing location with the given name, or add the new location
location = uniques.setdefault(x["name"], x)
related_schools = location.setdefault("schools", list())
related_schools.append(x.pop("clpSchoolId"))
injected = inject_required("ClpLocation", uniques.values())
# FIX THIS - Current extract doesn't include the CLP location id :( Make one up for the time being
for x in injected:
x["clpLocationId"] = cuid.cuid()
return injected
def convert_dob_to_datetime(s):
"Convert the string from 99/MON/YY to a ISO date"
dt = datetime.datetime.strptime(s, "%d/%b/%y")
return dt.isoformat() + ".0Z" # GraphCool import insists on microseconds, hence the ".0"
def prepare_students(students):
uniques = unique("clpStudentId", students)
injected = inject_required("ClpStudent", uniques)
for x in injected:
x["dateOfBirth"] = convert_dob_to_datetime(x["dateOfBirth"])
return injected
def prepare_teachers(teachers):
# Like locations, the same teacher can have multiple records,
# each of which is identitical except that for being related to a different school.
# We have to collect all the schools that the same teacher is teaching at.
uniques = {}
for x in teachers:
# get an existing teacher with that id, or add the new teacher record
teacher = uniques.setdefault(x["clpTeacherId"], x)
related_schools = teacher.setdefault("schools", list())
related_schools.append(x.pop("schoolId"))
injected = inject_required("ClpTeacher", uniques.values())
return injected
def extract_from_xlsx(file_path):
for sheet in convert_xlsx(file_path):
if sheet.name == "SCHOOL-ORG":
(organisations, schools, locations) = process_sheet(
sheet, SCHOOL_TITLES, [ORGANISATION_FIELDS, SCHOOL_FIELDS, LOCATION_FIELDS])
elif sheet.name == "Teacher":
(teachers, ) = process_sheet(sheet, TEACHER_TITLES, [TEACHER_FIELDS])
elif sheet.name == "Student":
(students, ) = process_sheet(sheet, STUDENT_TITLES, [STUDENT_FIELDS])
else:
print("Ignoring sheet:", sheet.name)
return (organisations, schools, locations, teachers, students)
def copy_without(dicts, *keys_to_remove):
"Return iterable that contains copies of the given dictionary with all the given keys removed"
copies = [x.copy() for x in dicts]
for d in copies:
for to_remove in keys_to_remove:
d.pop(to_remove, None)
return copies
def write_nodes(*list_of_lists):
for (i, one_list) in enumerate(list_of_lists):
nodes_dir = relative_to_absolute(os.path.join(EXTRACT_OUTPUT_DIR + str(i), "nodes"))
os.makedirs(nodes_dir, exist_ok=True)
path = os.path.join(nodes_dir, "1.json")
with open(path, "w") as f:
nodes = {
"valueType": "nodes",
"values": one_list
}
f.write(json.dumps(nodes))
def write_relations(list_of_lists):
for (i, one_list) in enumerate(list_of_lists):
nodes_dir = relative_to_absolute(os.path.join(EXTRACT_OUTPUT_DIR + "-relations" + str(i), "relations"))
os.makedirs(nodes_dir, exist_ok=True)
path = os.path.join(nodes_dir, "1.json")
with open(path, "w") as f:
nodes = {
"valueType": "relations",
"values": list(one_list)
}
f.write(json.dumps(nodes))
def chunks(n, l):
"""Yield n successive similar-sized chunks from l."""
chunk_size = 1 + len(l) // n
for i in range(0, len(l), chunk_size):
yield l[i:i + chunk_size]
def prepare(raw_organisations, raw_schools, raw_locations, raw_teachers, raw_students):
return (
prepare_organisations(raw_organisations),
prepare_schools(raw_schools),
prepare_locations(raw_locations),
prepare_teachers(raw_teachers),
prepare_students(raw_students)
)
def make_relation(entity1, id1, field1, entity2, id2, field2):
return [
{"_typeName": entity1, "id": id1, "fieldName": field1},
{"_typeName": entity2, "id": id2, "fieldName": field2}
]
def generate_relations(organisations, schools, locations, teachers, students):
# Build school -> organisation relations
org_keys = {x["clpOrganisationId"]: x["id"] for x in organisations}
yield [make_relation("ClpOrganisation", org_keys[x["clpOrganisationId"]], "schools",
"ClpSchool", x["id"], "organisation") for x in schools]
# Build location -> school relations
school_keys = {x["clpSchoolId"]: x["id"] for x in schools}
yield [make_relation("ClpLocation", location["id"], "schools",
"ClpSchool", school_keys[schoolId], "locations")
for location in locations for schoolId in location.get("schools", [])]
# Build teacher -> school relations
yield [make_relation("ClpTeacher", teacher["id"], "schools",
"ClpSchool", school_keys[schoolId], "teachers")
for teacher in teachers for schoolId in teacher.get("schools", [])]
# Build student -> school relations
yield [make_relation("ClpStudent", student["id"], "school",
"ClpSchool", school_keys[student["schoolId"]], "students")
for student in students if student["schoolId"] in school_keys]
def main():
xlsx_path = relative_to_absolute(SOURCE_XLSX)
raw_collections = extract_from_xlsx(xlsx_path)
(organisations, schools, locations, teachers, students) = prepare(*raw_collections)
write_nodes(
organisations,
copy_without(schools, "clpOrganisationId"),
copy_without(locations, "schools"),
copy_without(teachers, "organisationId", "organisationName", "schools", "schoolName"),
*chunks(3, copy_without(students, "schoolId", "schoolName", "location")))
write_relations(generate_relations(organisations, schools, locations, teachers, students))
if __name__ == "__main__":
main()
| [
"openpyxl.load_workbook",
"datetime.datetime.strptime",
"json.dumps",
"cuid.cuid",
"datetime.datetime.now"
] | [((4132, 4197), 'openpyxl.load_workbook', 'load_workbook', ([], {'filename': 'xlsx_file', 'read_only': '(True)', 'data_only': '(True)'}), '(filename=xlsx_file, read_only=True, data_only=True)\n', (4145, 4197), False, 'from openpyxl import load_workbook\n'), ((7045, 7086), 'datetime.datetime.strptime', 'datetime.datetime.strptime', (['s', '"""%d/%b/%y"""'], {}), "(s, '%d/%b/%y')\n", (7071, 7086), False, 'import datetime\n'), ((5746, 5757), 'cuid.cuid', 'cuid.cuid', ([], {}), '()\n', (5755, 5757), False, 'import cuid\n'), ((6916, 6927), 'cuid.cuid', 'cuid.cuid', ([], {}), '()\n', (6925, 6927), False, 'import cuid\n'), ((9449, 9466), 'json.dumps', 'json.dumps', (['nodes'], {}), '(nodes)\n', (9459, 9466), False, 'import json\n'), ((9938, 9955), 'json.dumps', 'json.dumps', (['nodes'], {}), '(nodes)\n', (9948, 9955), False, 'import json\n'), ((5503, 5526), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (5524, 5526), False, 'import datetime\n')] |
import os
import sys
from lxml import html
import pathlib
import json
import m3u8
from seleniumwire import webdriver
from selenium.common.exceptions import TimeoutException, NoSuchElementException
from selenium.webdriver.firefox.options import Options as FirefoxOptions
IFRAME_CSS_SELECTOR = '.iframe-container>iframe'
# Disable
def blockPrint():
sys.stdout = open(os.devnull, 'w')
# Restore
def enablePrint():
sys.stdout = sys.__stdout__
class OriginChannels():
def __init__(self, fhdhr, origin):
self.fhdhr = fhdhr
self.origin = origin
self.cache_dir = self.fhdhr.config.dict["filedir"]["epg_cache"]["origin"]["top"]
self.m3ucache = pathlib.Path(self.cache_dir).joinpath('m3ucache.json')
self.cached_m3u = {}
self.load_m3u_cache()
def load_m3u_cache(self):
if os.path.isfile(self.m3ucache):
self.fhdhr.logger.info("Loading Previously Saved Channel m3u.")
with open(self.m3ucache, 'r') as m3ufile:
self.cached_m3u = json.load(m3ufile)
def save_m3u_cache(self):
self.fhdhr.logger.info("Saving Channel m3u cache.")
with open(self.m3ucache, 'w') as m3ufile:
m3ufile.write(json.dumps(self.cached_m3u, indent=4))
def get_channels(self):
channel_list = []
chan_names, chan_urls = self.scrape_channels()
chan_number_index = 1
for name, url in zip(chan_names, chan_urls):
chan_dict = {
"name": name.rstrip(),
"number": chan_number_index,
"callsign": self.format_callsign(url),
}
channel_list.append(chan_dict)
chan_number_index += 1
return channel_list
def get_channel_stream(self, chandict, allchandict):
caching = True
streamlist = []
streamdict = {}
if chandict["callsign"] in list(self.cached_m3u):
streamurl = self.cached_m3u[chandict["callsign"]]
else:
streamurl = self.get_ustvgo_stream(chandict)
# if self.fhdhr.config.dict["origin"]["force_best"]:
streamurl = self.m3u8_beststream(streamurl)
streamdict = {"number": chandict["number"], "stream_url": streamurl}
streamlist.append(streamdict)
return streamlist, caching
def m3u8_beststream(self, m3u8_url):
bestStream = None
videoUrlM3u = m3u8.load(m3u8_url)
if not videoUrlM3u.is_variant:
return m3u8_url
for videoStream in videoUrlM3u.playlists:
if not bestStream:
bestStream = videoStream
elif videoStream.stream_info.bandwidth > bestStream.stream_info.bandwidth:
bestStream = videoStream
if not bestStream:
return bestStream.absolute_uri
else:
return m3u8_url
def scrape_channels(self):
channels_url = "https://ustvgo.tv/"
chanpage = self.fhdhr.web.session.get(channels_url)
tree = html.fromstring(chanpage.content)
channel_names_xpath = "/html/body/div[1]/div[1]/div/div[2]/div/div/div/article/div[1]/ol/li[*]/strong/a/text()"
channel_urls_xpath = "/html/body/div[1]/div[1]/div/div[2]/div/div/div/article/div[1]/ol/li[*]/strong/a/@href"
chan_names = tree.xpath(channel_names_xpath)
chan_urls = tree.xpath(channel_urls_xpath)
return chan_names, chan_urls
def format_callsign(self, url):
callsign = (url
.split('/')[-2]
.replace('-live', '')
.replace('-channel', '')
.replace('-free', '')
.replace('-streaming', ''))
return callsign
def get_ustvgo_stream(self, chandict):
driver = self.get_firefox_driver()
blockPrint()
driver.get("https://ustvgo.tv/" + chandict["callsign"])
enablePrint()
# Get iframe
iframe = None
try:
iframe = driver.find_element_by_css_selector(IFRAME_CSS_SELECTOR)
except NoSuchElementException:
self.fhdhr.logger.error('Video frame is not found for channel')
return None
# Detect VPN-required channels
try:
driver.switch_to.frame(iframe)
driver.find_element_by_xpath("//*[text()='This channel requires our VPN to watch!']")
need_vpn = True
except NoSuchElementException:
need_vpn = False
finally:
driver.switch_to.default_content()
if need_vpn:
self.fhdhr.logger.warning('Channel needs VPN to be grabbed.')
return None
# Autoplay
iframe.click()
try:
playlist = driver.wait_for_request('/playlist.m3u8', timeout=10)
except TimeoutException:
self.fhdhr.logger.error('Channel m3u8 not found.')
return None
streamurl = str(playlist)
driver.close()
driver.quit()
self.cached_m3u[chandict["callsign"]] = streamurl
self.save_m3u_cache()
return streamurl
def get_firefox_driver(self):
ff_options = FirefoxOptions()
ff_options.add_argument('--headless')
firefox_profile = webdriver.FirefoxProfile()
firefox_profile.set_preference('permissions.default.image', 2)
firefox_profile.set_preference('dom.ipc.plugins.enabled.libflashplayer.so', 'false')
firefox_profile.set_preference('dom.disable_beforeunload', True)
firefox_profile.set_preference('browser.tabs.warnOnClose', False)
firefox_profile.set_preference('media.volume_scale', '0.0')
set_seleniumwire_options = {
'connection_timeout': None,
'verify_ssl': False,
'suppress_connection_errors': True
}
driver = webdriver.Firefox(seleniumwire_options=set_seleniumwire_options, options=ff_options, firefox_profile=firefox_profile)
return driver
| [
"pathlib.Path",
"m3u8.load",
"lxml.html.fromstring",
"json.dumps",
"os.path.isfile",
"seleniumwire.webdriver.Firefox",
"selenium.webdriver.firefox.options.Options",
"seleniumwire.webdriver.FirefoxProfile",
"json.load"
] | [((846, 875), 'os.path.isfile', 'os.path.isfile', (['self.m3ucache'], {}), '(self.m3ucache)\n', (860, 875), False, 'import os\n'), ((2466, 2485), 'm3u8.load', 'm3u8.load', (['m3u8_url'], {}), '(m3u8_url)\n', (2475, 2485), False, 'import m3u8\n'), ((3068, 3101), 'lxml.html.fromstring', 'html.fromstring', (['chanpage.content'], {}), '(chanpage.content)\n', (3083, 3101), False, 'from lxml import html\n'), ((5227, 5243), 'selenium.webdriver.firefox.options.Options', 'FirefoxOptions', ([], {}), '()\n', (5241, 5243), True, 'from selenium.webdriver.firefox.options import Options as FirefoxOptions\n'), ((5317, 5343), 'seleniumwire.webdriver.FirefoxProfile', 'webdriver.FirefoxProfile', ([], {}), '()\n', (5341, 5343), False, 'from seleniumwire import webdriver\n'), ((6008, 6130), 'seleniumwire.webdriver.Firefox', 'webdriver.Firefox', ([], {'seleniumwire_options': 'set_seleniumwire_options', 'options': 'ff_options', 'firefox_profile': 'firefox_profile'}), '(seleniumwire_options=set_seleniumwire_options, options=\n ff_options, firefox_profile=firefox_profile)\n', (6025, 6130), False, 'from seleniumwire import webdriver\n'), ((689, 717), 'pathlib.Path', 'pathlib.Path', (['self.cache_dir'], {}), '(self.cache_dir)\n', (701, 717), False, 'import pathlib\n'), ((1041, 1059), 'json.load', 'json.load', (['m3ufile'], {}), '(m3ufile)\n', (1050, 1059), False, 'import json\n'), ((1227, 1264), 'json.dumps', 'json.dumps', (['self.cached_m3u'], {'indent': '(4)'}), '(self.cached_m3u, indent=4)\n', (1237, 1264), False, 'import json\n')] |
import sys
import threading
import logging
import time
logger = logging.getLogger("interchange.strategy.base")
class BaseStrategy(object):
"""Implements threshold-interval based flow control.
The overall goal is to trap the flow of apps from the
workflow, measure it and redirect it the appropriate executors for
processing.
This is based on the following logic:
.. code-block:: none
BEGIN (INTERVAL, THRESHOLD, callback) :
start = current_time()
while (current_time()-start < INTERVAL) :
count = get_events_since(start)
if count >= THRESHOLD :
break
callback()
This logic ensures that the callbacks are activated with a maximum delay
of `interval` for systems with infrequent events as well as systems which would
generate large bursts of events.
Once a callback is triggered, the callback generally runs a strategy
method on the sites available as well asqeuque
TODO: When the debug logs are enabled this module emits duplicate messages.
This issue needs more debugging. What I've learnt so far is that the duplicate
messages are present only when the timer thread is started, so this could be
from a duplicate logger being added by the thread.
"""
def __init__(self, *args, threshold=20, interval=5):
"""Initialize the flowcontrol object.
We start the timer thread here
Parameters
----------
- threshold (int) : Tasks after which the callback is triggered
- interval (int) : seconds after which timer expires
"""
self.interchange = None
self.threshold = threshold
self.interval = interval
self.cb_args = args
self.callback = self.strategize
self._handle = None
self._event_count = 0
self._event_buffer = []
self._wake_up_time = time.time() + 1
self._kill_event = threading.Event()
self._thread = threading.Thread(target=self._wake_up_timer, args=(self._kill_event,))
self._thread.daemon = True
def start(self, interchange):
"""Actually start the strategy
Parameters
----------
interchange: funcx.executors.high_throughput.interchange.Interchange
Interchange to bind the strategy to
"""
self.interchange = interchange
if hasattr(interchange.config, 'provider'):
logger.debug("Strategy bounds-> init:{}, min:{}, max:{}".format(
interchange.config.provider.init_blocks,
interchange.config.provider.min_blocks,
interchange.config.provider.max_blocks))
self._thread.start()
def strategize(self, *args, **kwargs):
""" Strategize is called everytime the threshold or the interval is hit
"""
logger.debug("Strategize called with {} {}".format(args, kwargs))
def _wake_up_timer(self, kill_event):
"""Internal. This is the function that the thread will execute.
waits on an event so that the thread can make a quick exit when close() is called
Args:
- kill_event (threading.Event) : Event to wait on
"""
while True:
prev = self._wake_up_time
# Waiting for the event returns True only when the event
# is set, usually by the parent thread
time_to_die = kill_event.wait(float(max(prev - time.time(), 0)))
if time_to_die:
return
if prev == self._wake_up_time:
self.make_callback(kind='timer')
else:
print("Sleeping a bit more")
def notify(self, event_id):
"""Let the FlowControl system know that there is an event.
This method is to be called from the Interchange to notify the flowcontrol
"""
self._event_buffer.extend([event_id])
self._event_count += 1
if self._event_count >= self.threshold:
logger.debug("Eventcount >= threshold")
self.make_callback(kind="event")
def make_callback(self, kind=None):
"""Makes the callback and resets the timer.
KWargs:
- kind (str): Default=None, used to pass information on what
triggered the callback
"""
self._wake_up_time = time.time() + self.interval
self.callback(tasks=self._event_buffer, kind=kind)
self._event_buffer = []
def close(self):
"""Merge the threads and terminate."""
self._kill_event.set()
self._thread.join()
class Timer(object):
"""This timer is a simplified version of the FlowControl timer.
This timer does not employ notify events.
This is based on the following logic :
.. code-block:: none
BEGIN (INTERVAL, THRESHOLD, callback) :
start = current_time()
while (current_time()-start < INTERVAL) :
wait()
break
callback()
"""
def __init__(self, callback, *args, interval=5):
"""Initialize the flowcontrol object
We start the timer thread here
Args:
- dfk (DataFlowKernel) : DFK object to track parsl progress
KWargs:
- threshold (int) : Tasks after which the callback is triggered
- interval (int) : seconds after which timer expires
"""
self.interval = interval
self.cb_args = args
self.callback = callback
self._wake_up_time = time.time() + 1
self._kill_event = threading.Event()
self._thread = threading.Thread(target=self._wake_up_timer, args=(self._kill_event,))
self._thread.daemon = True
self._thread.start()
def _wake_up_timer(self, kill_event):
"""Internal. This is the function that the thread will execute.
waits on an event so that the thread can make a quick exit when close() is called
Args:
- kill_event (threading.Event) : Event to wait on
"""
# Sleep till time to wake up
while True:
prev = self._wake_up_time
# Waiting for the event returns True only when the event
# is set, usually by the parent thread
time_to_die = kill_event.wait(float(max(prev - time.time(), 0)))
if time_to_die:
return
if prev == self._wake_up_time:
self.make_callback(kind='timer')
else:
print("Sleeping a bit more")
def make_callback(self, kind=None):
"""Makes the callback and resets the timer.
"""
self._wake_up_time = time.time() + self.interval
self.callback(*self.cb_args)
def close(self):
"""Merge the threads and terminate.
"""
self._kill_event.set()
self._thread.join()
| [
"logging.getLogger",
"threading.Thread",
"threading.Event",
"time.time"
] | [((65, 111), 'logging.getLogger', 'logging.getLogger', (['"""interchange.strategy.base"""'], {}), "('interchange.strategy.base')\n", (82, 111), False, 'import logging\n'), ((1985, 2002), 'threading.Event', 'threading.Event', ([], {}), '()\n', (2000, 2002), False, 'import threading\n'), ((2026, 2096), 'threading.Thread', 'threading.Thread', ([], {'target': 'self._wake_up_timer', 'args': '(self._kill_event,)'}), '(target=self._wake_up_timer, args=(self._kill_event,))\n', (2042, 2096), False, 'import threading\n'), ((5642, 5659), 'threading.Event', 'threading.Event', ([], {}), '()\n', (5657, 5659), False, 'import threading\n'), ((5683, 5753), 'threading.Thread', 'threading.Thread', ([], {'target': 'self._wake_up_timer', 'args': '(self._kill_event,)'}), '(target=self._wake_up_timer, args=(self._kill_event,))\n', (5699, 5753), False, 'import threading\n'), ((1942, 1953), 'time.time', 'time.time', ([], {}), '()\n', (1951, 1953), False, 'import time\n'), ((4402, 4413), 'time.time', 'time.time', ([], {}), '()\n', (4411, 4413), False, 'import time\n'), ((5598, 5609), 'time.time', 'time.time', ([], {}), '()\n', (5607, 5609), False, 'import time\n'), ((6748, 6759), 'time.time', 'time.time', ([], {}), '()\n', (6757, 6759), False, 'import time\n'), ((3491, 3502), 'time.time', 'time.time', ([], {}), '()\n', (3500, 3502), False, 'import time\n'), ((6388, 6399), 'time.time', 'time.time', ([], {}), '()\n', (6397, 6399), False, 'import time\n')] |
import torch
import torchvision
import matplotlib
import matplotlib.pyplot as plt
from PIL import Image
from captum.attr import GuidedGradCam, GuidedBackprop
from captum.attr import LayerActivation, LayerConductance, LayerGradCam
from data_utils import *
from image_utils import *
from captum_utils import *
import numpy as np
from visualizers import GradCam
plt.rcParams['figure.figsize'] = (10.0, 8.0) # set default size of plots
plt.rcParams['image.interpolation'] = 'nearest'
plt.rcParams['image.cmap'] = 'gray'
X, y, class_names = load_imagenet_val(num=5)
# FOR THIS SECTION ONLY, we need to use gradients. We introduce a new model we will use explicitly for GradCAM for this.
gc_model = torchvision.models.squeezenet1_1(pretrained=True)
gc = GradCam()
X_tensor = torch.cat([preprocess(Image.fromarray(x)) for x in X], dim=0).requires_grad_(True)
y_tensor = torch.LongTensor(y)
# Guided Back-Propagation
gbp_result = gc.guided_backprop(X_tensor,y_tensor, gc_model)
plt.figure(figsize=(24, 24))
for i in range(gbp_result.shape[0]):
plt.subplot(1, 5, i + 1)
img = gbp_result[i]
img = rescale(img)
plt.imshow(img)
plt.title(class_names[y[i]])
plt.axis('off')
plt.gcf().tight_layout()
plt.savefig('visualization/guided_backprop.png')
# GradCam
# GradCAM. We have given you which module(=layer) that we need to capture gradients from, which you can see in conv_module variable below
gc_model = torchvision.models.squeezenet1_1(pretrained=True)
for param in gc_model.parameters():
param.requires_grad = True
X_tensor = torch.cat([preprocess(Image.fromarray(x)) for x in X], dim=0).requires_grad_(True)
y_tensor = torch.LongTensor(y)
gradcam_result = gc.grad_cam(X_tensor, y_tensor, gc_model)
plt.figure(figsize=(24, 24))
for i in range(gradcam_result.shape[0]):
gradcam_val = gradcam_result[i]
img = X[i] + (matplotlib.cm.jet(gradcam_val)[:,:,:3]*255)
img = img / np.max(img)
plt.subplot(1, 5, i + 1)
plt.imshow(img)
plt.title(class_names[y[i]])
plt.axis('off')
plt.gcf().tight_layout()
plt.savefig('visualization/gradcam.png')
# As a final step, we can combine GradCam and Guided Backprop to get Guided GradCam.
X_tensor = torch.cat([preprocess(Image.fromarray(x)) for x in X], dim=0).requires_grad_(True)
y_tensor = torch.LongTensor(y)
gradcam_result = gc.grad_cam(X_tensor, y_tensor, gc_model)
gbp_result = gc.guided_backprop(X_tensor, y_tensor, gc_model)
plt.figure(figsize=(24, 24))
for i in range(gradcam_result.shape[0]):
gbp_val = gbp_result[i]
gradcam_val = np.expand_dims(gradcam_result[i], axis=2)
# Pointwise multiplication and normalization of the gradcam and guided backprop results (2 lines)
img = gradcam_val * gbp_val
img = np.expand_dims(img.transpose(2, 0, 1), axis=0)
img = np.float32(img)
img = torch.from_numpy(img)
img = deprocess(img)
plt.subplot(1, 5, i + 1)
plt.imshow(img)
plt.title(class_names[y[i]])
plt.axis('off')
plt.gcf().tight_layout()
plt.savefig('visualization/guided_gradcam.png')
# **************************************************************************************** #
# Captum
model = torchvision.models.squeezenet1_1(pretrained=True)
# We don't want to train the model, so tell PyTorch not to compute gradients
# with respect to model parameters.
for param in model.parameters():
param.requires_grad = False
# Convert X and y from numpy arrays to Torch Tensors
X_tensor = torch.cat([preprocess(Image.fromarray(x)) for x in X], dim=0)
y_tensor = torch.LongTensor(y)
conv_module = model.features[12]
##############################################################################
# TODO: Compute/Visualize GuidedBackprop and Guided GradCAM as well. #
# visualize_attr_maps function from captum_utils.py is useful for #
# visualizing captum outputs #
# Use conv_module as the convolution layer for gradcam #
##############################################################################
# Computing Guided GradCam
ggc = GuidedGradCam(model, conv_module)
attribution_gcc = compute_attributions(ggc, X_tensor, target = y_tensor)
# print(X_tensor.shape, y_tensor.shape, attribution_gcc.shape)
visualize_attr_maps('visualization/GuidedGradCam.png', X, y, class_names, [attribution_gcc], ['Guided_Grad_Cam'])
# Computing Guided BackProp
gbp = GuidedBackprop(model)
attribution_gbp = compute_attributions(gbp, X_tensor, target = y_tensor)
visualize_attr_maps('visualization/GuidedBackpropCam.png', X, y, class_names, [attribution_gbp], ['Guided_Backprop_Cam'])
##############################################################################
# END OF YOUR CODE #
##############################################################################
# Try out different layers and see observe how the attributions change
layer = model.features[3]
# Example visualization for using layer visualizations
# layer_act = LayerActivation(model, layer)
# layer_act_attr = compute_attributions(layer_act, X_tensor)
# layer_act_attr_sum = layer_act_attr.mean(axis=1, keepdim=True)
##############################################################################
# TODO: Visualize Individual Layer Gradcam and Layer Conductance (similar #
# to what we did for the other captum sections, using our helper methods), #
# but with some preprocessing calculations. #
# #
# You can refer to the LayerActivation example above and you should be #
# using 'layer' given above for this section #
# #
# Also note that, you would need to customize your 'attr_preprocess' #
# parameter that you send along to 'visualize_attr_maps' as the default #
# 'attr_preprocess' is written to only to handle multi channel attributions. #
# #
# For layer gradcam look at the usage of the parameter relu_attributions #
##############################################################################
# Layer gradcam aggregates across all channels
from captum.attr import LayerAttribution
N, C, H, W = X_tensor.shape
LC = LayerConductance(model, layer)
LC_attr = compute_attributions(LC, X_tensor, target = y_tensor)
LC_attr_sum = LC_attr.mean(axis = 1, keepdim = True)
LC_attr_int = LayerAttribution.interpolate(LC_attr_sum, (H,W) )
LC_attr_int = LC_attr_int.repeat(1, 3, 1, 1)
visualize_attr_maps('visualization/LayerConductance.png', X, y, class_names, [LC_attr_int], ['LayerConductance'])
LGC = LayerGradCam(model, layer)
LGC_attr = compute_attributions(LGC, X_tensor, target = y_tensor)
LGC_attr_sum = LGC_attr.mean(axis = 1, keepdim = True)
LGC_attr_int = LayerAttribution.interpolate(LGC_attr_sum, (H,W))
LGC_attr_int = LGC_attr_int.repeat(1, 3, 1, 1)
visualize_attr_maps ('visualization/LayerGradCam.png', X, y, class_names, [LGC_attr_int], ['LayerGradCam'])
##############################################################################
# END OF YOUR CODE #
##############################################################################
| [
"torch.LongTensor",
"torch.from_numpy",
"matplotlib.cm.jet",
"visualizers.GradCam",
"matplotlib.pyplot.imshow",
"numpy.max",
"matplotlib.pyplot.axis",
"captum.attr.GuidedGradCam",
"matplotlib.pyplot.savefig",
"matplotlib.pyplot.gcf",
"matplotlib.pyplot.title",
"captum.attr.LayerAttribution.interpolate",
"PIL.Image.fromarray",
"captum.attr.GuidedBackprop",
"captum.attr.LayerGradCam",
"matplotlib.pyplot.figure",
"captum.attr.LayerConductance",
"numpy.expand_dims",
"torchvision.models.squeezenet1_1",
"matplotlib.pyplot.subplot",
"numpy.float32"
] | [((699, 748), 'torchvision.models.squeezenet1_1', 'torchvision.models.squeezenet1_1', ([], {'pretrained': '(True)'}), '(pretrained=True)\n', (731, 748), False, 'import torchvision\n'), ((754, 763), 'visualizers.GradCam', 'GradCam', ([], {}), '()\n', (761, 763), False, 'from visualizers import GradCam\n'), ((870, 889), 'torch.LongTensor', 'torch.LongTensor', (['y'], {}), '(y)\n', (886, 889), False, 'import torch\n'), ((979, 1007), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(24, 24)'}), '(figsize=(24, 24))\n', (989, 1007), True, 'import matplotlib.pyplot as plt\n'), ((1219, 1267), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""visualization/guided_backprop.png"""'], {}), "('visualization/guided_backprop.png')\n", (1230, 1267), True, 'import matplotlib.pyplot as plt\n'), ((1430, 1479), 'torchvision.models.squeezenet1_1', 'torchvision.models.squeezenet1_1', ([], {'pretrained': '(True)'}), '(pretrained=True)\n', (1462, 1479), False, 'import torchvision\n'), ((1653, 1672), 'torch.LongTensor', 'torch.LongTensor', (['y'], {}), '(y)\n', (1669, 1672), False, 'import torch\n'), ((1733, 1761), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(24, 24)'}), '(figsize=(24, 24))\n', (1743, 1761), True, 'import matplotlib.pyplot as plt\n'), ((2056, 2096), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""visualization/gradcam.png"""'], {}), "('visualization/gradcam.png')\n", (2067, 2096), True, 'import matplotlib.pyplot as plt\n'), ((2289, 2308), 'torch.LongTensor', 'torch.LongTensor', (['y'], {}), '(y)\n', (2305, 2308), False, 'import torch\n'), ((2431, 2459), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(24, 24)'}), '(figsize=(24, 24))\n', (2441, 2459), True, 'import matplotlib.pyplot as plt\n'), ((2992, 3039), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""visualization/guided_gradcam.png"""'], {}), "('visualization/guided_gradcam.png')\n", (3003, 3039), True, 'import matplotlib.pyplot as plt\n'), ((3152, 3201), 'torchvision.models.squeezenet1_1', 'torchvision.models.squeezenet1_1', ([], {'pretrained': '(True)'}), '(pretrained=True)\n', (3184, 3201), False, 'import torchvision\n'), ((3519, 3538), 'torch.LongTensor', 'torch.LongTensor', (['y'], {}), '(y)\n', (3535, 3538), False, 'import torch\n'), ((4082, 4115), 'captum.attr.GuidedGradCam', 'GuidedGradCam', (['model', 'conv_module'], {}), '(model, conv_module)\n', (4095, 4115), False, 'from captum.attr import GuidedGradCam, GuidedBackprop\n'), ((4401, 4422), 'captum.attr.GuidedBackprop', 'GuidedBackprop', (['model'], {}), '(model)\n', (4415, 4422), False, 'from captum.attr import GuidedGradCam, GuidedBackprop\n'), ((6412, 6442), 'captum.attr.LayerConductance', 'LayerConductance', (['model', 'layer'], {}), '(model, layer)\n', (6428, 6442), False, 'from captum.attr import LayerActivation, LayerConductance, LayerGradCam\n'), ((6574, 6623), 'captum.attr.LayerAttribution.interpolate', 'LayerAttribution.interpolate', (['LC_attr_sum', '(H, W)'], {}), '(LC_attr_sum, (H, W))\n', (6602, 6623), False, 'from captum.attr import LayerAttribution\n'), ((6797, 6823), 'captum.attr.LayerGradCam', 'LayerGradCam', (['model', 'layer'], {}), '(model, layer)\n', (6809, 6823), False, 'from captum.attr import LayerActivation, LayerConductance, LayerGradCam\n'), ((6964, 7014), 'captum.attr.LayerAttribution.interpolate', 'LayerAttribution.interpolate', (['LGC_attr_sum', '(H, W)'], {}), '(LGC_attr_sum, (H, W))\n', (6992, 7014), False, 'from captum.attr import LayerAttribution\n'), ((1049, 1073), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(1)', '(5)', '(i + 1)'], {}), '(1, 5, i + 1)\n', (1060, 1073), True, 'import matplotlib.pyplot as plt\n'), ((1125, 1140), 'matplotlib.pyplot.imshow', 'plt.imshow', (['img'], {}), '(img)\n', (1135, 1140), True, 'import matplotlib.pyplot as plt\n'), ((1145, 1173), 'matplotlib.pyplot.title', 'plt.title', (['class_names[y[i]]'], {}), '(class_names[y[i]])\n', (1154, 1173), True, 'import matplotlib.pyplot as plt\n'), ((1178, 1193), 'matplotlib.pyplot.axis', 'plt.axis', (['"""off"""'], {}), "('off')\n", (1186, 1193), True, 'import matplotlib.pyplot as plt\n'), ((1933, 1957), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(1)', '(5)', '(i + 1)'], {}), '(1, 5, i + 1)\n', (1944, 1957), True, 'import matplotlib.pyplot as plt\n'), ((1962, 1977), 'matplotlib.pyplot.imshow', 'plt.imshow', (['img'], {}), '(img)\n', (1972, 1977), True, 'import matplotlib.pyplot as plt\n'), ((1982, 2010), 'matplotlib.pyplot.title', 'plt.title', (['class_names[y[i]]'], {}), '(class_names[y[i]])\n', (1991, 2010), True, 'import matplotlib.pyplot as plt\n'), ((2015, 2030), 'matplotlib.pyplot.axis', 'plt.axis', (['"""off"""'], {}), "('off')\n", (2023, 2030), True, 'import matplotlib.pyplot as plt\n'), ((2547, 2588), 'numpy.expand_dims', 'np.expand_dims', (['gradcam_result[i]'], {'axis': '(2)'}), '(gradcam_result[i], axis=2)\n', (2561, 2588), True, 'import numpy as np\n'), ((2792, 2807), 'numpy.float32', 'np.float32', (['img'], {}), '(img)\n', (2802, 2807), True, 'import numpy as np\n'), ((2818, 2839), 'torch.from_numpy', 'torch.from_numpy', (['img'], {}), '(img)\n', (2834, 2839), False, 'import torch\n'), ((2869, 2893), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(1)', '(5)', '(i + 1)'], {}), '(1, 5, i + 1)\n', (2880, 2893), True, 'import matplotlib.pyplot as plt\n'), ((2898, 2913), 'matplotlib.pyplot.imshow', 'plt.imshow', (['img'], {}), '(img)\n', (2908, 2913), True, 'import matplotlib.pyplot as plt\n'), ((2918, 2946), 'matplotlib.pyplot.title', 'plt.title', (['class_names[y[i]]'], {}), '(class_names[y[i]])\n', (2927, 2946), True, 'import matplotlib.pyplot as plt\n'), ((2951, 2966), 'matplotlib.pyplot.axis', 'plt.axis', (['"""off"""'], {}), "('off')\n", (2959, 2966), True, 'import matplotlib.pyplot as plt\n'), ((1194, 1203), 'matplotlib.pyplot.gcf', 'plt.gcf', ([], {}), '()\n', (1201, 1203), True, 'import matplotlib.pyplot as plt\n'), ((1917, 1928), 'numpy.max', 'np.max', (['img'], {}), '(img)\n', (1923, 1928), True, 'import numpy as np\n'), ((2031, 2040), 'matplotlib.pyplot.gcf', 'plt.gcf', ([], {}), '()\n', (2038, 2040), True, 'import matplotlib.pyplot as plt\n'), ((2967, 2976), 'matplotlib.pyplot.gcf', 'plt.gcf', ([], {}), '()\n', (2974, 2976), True, 'import matplotlib.pyplot as plt\n'), ((3468, 3486), 'PIL.Image.fromarray', 'Image.fromarray', (['x'], {}), '(x)\n', (3483, 3486), False, 'from PIL import Image\n'), ((1857, 1887), 'matplotlib.cm.jet', 'matplotlib.cm.jet', (['gradcam_val'], {}), '(gradcam_val)\n', (1874, 1887), False, 'import matplotlib\n'), ((798, 816), 'PIL.Image.fromarray', 'Image.fromarray', (['x'], {}), '(x)\n', (813, 816), False, 'from PIL import Image\n'), ((1581, 1599), 'PIL.Image.fromarray', 'Image.fromarray', (['x'], {}), '(x)\n', (1596, 1599), False, 'from PIL import Image\n'), ((2217, 2235), 'PIL.Image.fromarray', 'Image.fromarray', (['x'], {}), '(x)\n', (2232, 2235), False, 'from PIL import Image\n')] |
from itertools import product
import numpy as np
import pytest
from alibi_detect.utils.discretizer import Discretizer
x = np.random.rand(10, 4)
n_features = x.shape[1]
feature_names = [str(_) for _ in range(n_features)]
categorical_features = [[], [1, 3]]
percentiles = [list(np.arange(25, 100, 25)), list(np.arange(10, 100, 10))]
tests = list(product(categorical_features, percentiles))
n_tests = len(tests)
@pytest.fixture
def cats_and_percentiles(request):
cat, perc = tests[request.param]
return cat, perc
@pytest.mark.parametrize('cats_and_percentiles', list(range(n_tests)), indirect=True)
def test_discretizer(cats_and_percentiles):
cat, perc = cats_and_percentiles
disc = Discretizer(x, cat, feature_names, perc)
to_disc = list(disc.names.keys())
assert len(to_disc) == (x.shape[1] - len(cat))
x_disc = disc.discretize(x)
for k, v in disc.names.items():
assert len(v) <= len(perc) + 1
assert callable(disc.lambdas[k])
assert (x_disc[:, k].min() == 0).all()
assert (x_disc[:, k].max() == len(perc)).all()
for i in range(x.shape[1]):
if i not in to_disc:
assert (x_disc[:, i] == x[:, i]).all()
| [
"alibi_detect.utils.discretizer.Discretizer",
"itertools.product",
"numpy.random.rand",
"numpy.arange"
] | [((123, 144), 'numpy.random.rand', 'np.random.rand', (['(10)', '(4)'], {}), '(10, 4)\n', (137, 144), True, 'import numpy as np\n'), ((346, 388), 'itertools.product', 'product', (['categorical_features', 'percentiles'], {}), '(categorical_features, percentiles)\n', (353, 388), False, 'from itertools import product\n'), ((702, 742), 'alibi_detect.utils.discretizer.Discretizer', 'Discretizer', (['x', 'cat', 'feature_names', 'perc'], {}), '(x, cat, feature_names, perc)\n', (713, 742), False, 'from alibi_detect.utils.discretizer import Discretizer\n'), ((278, 300), 'numpy.arange', 'np.arange', (['(25)', '(100)', '(25)'], {}), '(25, 100, 25)\n', (287, 300), True, 'import numpy as np\n'), ((308, 330), 'numpy.arange', 'np.arange', (['(10)', '(100)', '(10)'], {}), '(10, 100, 10)\n', (317, 330), True, 'import numpy as np\n')] |
# -*- coding: utf-8 -*-
"""
Created on Mon Jun 14 11:49:43 2021
@author: Andres
"""
import sys,time
import unittest
from tinc import *
class ParameterSpaceTest(unittest.TestCase):
def test_parameter(self):
p1 = Parameter("param1")
p2 = Parameter("param2")
ps = ParameterSpace("ps")
ps.register_parameters([p1, p2])
def test_process(self):
p1 = Parameter("param1")
p1.values = [0, 1,2,3,4]
p2 = Parameter("param2")
p2.values = [-0.3,-0.2, -0.1, 0]
ps = ParameterSpace("ps")
ps.register_parameters([p1, p2])
def func(param1, param2):
return param1 * param2
result = ps.run_process(func)
self.assertAlmostEqual(result, p1.value * p2.value)
p1.value = 3
p2.value = -0.1
result = ps.run_process(func)
self.assertAlmostEqual(result, p1.value * p2.value)
p1.value = 3
p2.value = -0.1
def test_sweep_cache(self):
p1 = Parameter("param1")
p1.values = [0, 1,2,3,4]
p2 = Parameter("param2")
p2.values = [-0.3,-0.2, -0.1, 0]
ps = ParameterSpace("ps")
ps.register_parameters([p1, p2])
ps.enable_cache("ps_test")
def func(param1, param2):
return param1 * param2
ps.sweep(func)
def test_data_directories(self):
dim1 = Parameter("dim1")
dim1.values = [0.1,0.2,0.3,0.4, 0.5]
dim2 = Parameter("dim2")
dim2.set_space_representation_type(parameter_space_representation_types.INDEX)
dim2.values = [0.1,0.2,0.3,0.4, 0.5]
dim3 = Parameter("dim3")
dim3.set_space_representation_type(parameter_space_representation_types.ID)
dim2.values = [0.1,0.2,0.3,0.4, 0.5]
ps = ParameterSpace("ps")
ps.register_parameters([dim1, dim2, dim3])
ps.set_current_path_template("file_%%dim1%%_%%dim2:INDEX%%")
dim1.value=0.2
dim2.value=0.2
self.assertEqual(ps.get_current_relative_path(), 'file_0.2_1')
# TODO ML complete tests see C++ tests for parameter space
def test_common_id(self):
dim1 = Parameter("dim1")
dim1.values = [0.1, 0.1, 0.2, 0.2, 0.3, 0.3]
dim1.ids = ["0.1_1" ,"0.1_2","0.2_1" ,"0.2_2", "0.3_1" ,"0.3_2"]
dim2 = Parameter("dim2")
dim2.set_space_representation_type(parameter_space_representation_types.INDEX)
dim2.values = [1,1,1,2,2,2]
dim2.ids = ["0.1_1", "0.2_1", "0.3_1", "0.1_2", "0.2_2", "0.3_2"]
ps = ParameterSpace("ps")
ps.register_parameters([dim1, dim2])
dim1.value = 0.1
dim2.value = 1
self.assertEqual(ps.get_common_id([dim1, dim2]), "0.1_1")
dim1.value = 0.2
dim2.value = 1
self.assertEqual(ps.get_common_id([dim1, dim2]), "0.2_1")
dim1.value = 0.1
dim2.value = 2
self.assertEqual(ps.get_common_id([dim1, dim2]), "0.1_2")
dim1.value = 0.2
dim2.value = 2
self.assertEqual(ps.get_common_id([dim1, dim2]), "0.2_2")
dim1.value = 0.3
dim2.value = 2
self.assertEqual(ps.get_common_id([dim1, dim2]), "0.3_2")
if __name__ == '__main__':
unittest.main()
| [
"unittest.main"
] | [((3345, 3360), 'unittest.main', 'unittest.main', ([], {}), '()\n', (3358, 3360), False, 'import unittest\n')] |
# Created by <NAME> on 8/28/19
import gym
import numpy as np
import torch
from interpretable_ddts.agents.ddt_agent import DDTAgent
from interpretable_ddts.agents.mlp_agent import MLPAgent
from interpretable_ddts.opt_helpers.replay_buffer import discount_reward
import torch.multiprocessing as mp
import argparse
import copy
import random
def run_episode(q, agent_in, ENV_NAME, seed=0):
agent = agent_in.duplicate()
if ENV_NAME == 'lunar':
env = gym.make('LunarLander-v2')
elif ENV_NAME == 'cart':
env = gym.make('CartPole-v1')
else:
raise Exception('No valid environment selected')
done = False
torch.manual_seed(seed)
env.seed(seed)
np.random.seed(seed)
env.action_space.seed(seed)
random.seed(seed)
state = env.reset() # Reset environment and record the starting state
while not done:
action = agent.get_action(state)
# Step through environment using chosen action
state, reward, done, _ = env.step(action)
# env.render()
# Save reward
agent.save_reward(reward)
if done:
break
reward_sum = np.sum(agent.replay_buffer.rewards_list)
rewards_list, advantage_list, deeper_advantage_list = discount_reward(agent.replay_buffer.rewards_list,
agent.replay_buffer.value_list,
agent.replay_buffer.deeper_value_list)
agent.replay_buffer.rewards_list = rewards_list
agent.replay_buffer.advantage_list = advantage_list
agent.replay_buffer.deeper_advantage_list = deeper_advantage_list
to_return = [reward_sum, copy.deepcopy(agent.replay_buffer.__getstate__())]
if q is not None:
try:
q.put(to_return)
except RuntimeError as e:
print(e)
return to_return
return to_return
def main(episodes, agent, ENV_NAME):
running_reward_array = []
for episode in range(episodes):
reward = 0
returned_object = run_episode(None, agent_in=agent, ENV_NAME=ENV_NAME)
reward += returned_object[0]
running_reward_array.append(returned_object[0])
agent.replay_buffer.extend(returned_object[1])
if reward >= 499:
agent.save('../models/'+str(episode)+'th')
agent.end_episode(reward)
running_reward = sum(running_reward_array[-100:]) / float(min(100.0, len(running_reward_array)))
if episode % 50 == 0:
print(f'Episode {episode} Last Reward: {reward} Average Reward: {running_reward}')
if episode % 500 == 0:
agent.save('../models/'+str(episode)+'th')
return running_reward_array
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("-a", "--agent_type", help="architecture of agent to run", type=str, default='ddt')
parser.add_argument("-e", "--episodes", help="how many episodes", type=int, default=2000)
parser.add_argument("-l", "--num_leaves", help="number of leaves for DDT/DRL ", type=int, default=8)
parser.add_argument("-n", "--num_hidden", help="number of hidden layers for MLP ", type=int, default=0)
parser.add_argument("-env", "--env_type", help="environment to run on", type=str, default='cart')
parser.add_argument("-gpu", help="run on GPU?", action='store_true')
args = parser.parse_args()
AGENT_TYPE = args.agent_type # 'ddt', 'mlp'
NUM_EPS = args.episodes # num episodes Default 1000
ENV_TYPE = args.env_type # 'cart' or 'lunar' Default 'cart'
USE_GPU = args.gpu # Applies for 'prolo' only. use gpu? Default false
if ENV_TYPE == 'lunar':
init_env = gym.make('LunarLander-v2')
dim_in = init_env.observation_space.shape[0]
dim_out = init_env.action_space.n
elif ENV_TYPE == 'cart':
init_env = gym.make('CartPole-v1')
dim_in = init_env.observation_space.shape[0]
dim_out = init_env.action_space.n
else:
raise Exception('No valid environment selected')
print(f"Agent {AGENT_TYPE} on {ENV_TYPE} ")
# mp.set_start_method('spawn')
mp.set_sharing_strategy('file_system')
for i in range(5):
bot_name = AGENT_TYPE + ENV_TYPE
if USE_GPU:
bot_name += 'GPU'
if AGENT_TYPE == 'ddt':
policy_agent = DDTAgent(bot_name=bot_name,
input_dim=dim_in,
output_dim=dim_out,
rule_list=False,
num_rules=args.num_leaves)
elif AGENT_TYPE == 'mlp':
policy_agent = MLPAgent(input_dim=dim_in,
bot_name=bot_name,
output_dim=dim_out,
num_hidden=args.num_hidden)
else:
raise Exception('No valid network selected')
reward_array = main(NUM_EPS, policy_agent, ENV_TYPE)
| [
"torch.manual_seed",
"argparse.ArgumentParser",
"interpretable_ddts.agents.mlp_agent.MLPAgent",
"interpretable_ddts.opt_helpers.replay_buffer.discount_reward",
"random.seed",
"numpy.sum",
"numpy.random.seed",
"interpretable_ddts.agents.ddt_agent.DDTAgent",
"torch.multiprocessing.set_sharing_strategy",
"gym.make"
] | [((645, 668), 'torch.manual_seed', 'torch.manual_seed', (['seed'], {}), '(seed)\n', (662, 668), False, 'import torch\n'), ((692, 712), 'numpy.random.seed', 'np.random.seed', (['seed'], {}), '(seed)\n', (706, 712), True, 'import numpy as np\n'), ((749, 766), 'random.seed', 'random.seed', (['seed'], {}), '(seed)\n', (760, 766), False, 'import random\n'), ((1140, 1180), 'numpy.sum', 'np.sum', (['agent.replay_buffer.rewards_list'], {}), '(agent.replay_buffer.rewards_list)\n', (1146, 1180), True, 'import numpy as np\n'), ((1239, 1364), 'interpretable_ddts.opt_helpers.replay_buffer.discount_reward', 'discount_reward', (['agent.replay_buffer.rewards_list', 'agent.replay_buffer.value_list', 'agent.replay_buffer.deeper_value_list'], {}), '(agent.replay_buffer.rewards_list, agent.replay_buffer.\n value_list, agent.replay_buffer.deeper_value_list)\n', (1254, 1364), False, 'from interpretable_ddts.opt_helpers.replay_buffer import discount_reward\n'), ((2796, 2821), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (2819, 2821), False, 'import argparse\n'), ((4181, 4219), 'torch.multiprocessing.set_sharing_strategy', 'mp.set_sharing_strategy', (['"""file_system"""'], {}), "('file_system')\n", (4204, 4219), True, 'import torch.multiprocessing as mp\n'), ((463, 489), 'gym.make', 'gym.make', (['"""LunarLander-v2"""'], {}), "('LunarLander-v2')\n", (471, 489), False, 'import gym\n'), ((3737, 3763), 'gym.make', 'gym.make', (['"""LunarLander-v2"""'], {}), "('LunarLander-v2')\n", (3745, 3763), False, 'import gym\n'), ((533, 556), 'gym.make', 'gym.make', (['"""CartPole-v1"""'], {}), "('CartPole-v1')\n", (541, 556), False, 'import gym\n'), ((3907, 3930), 'gym.make', 'gym.make', (['"""CartPole-v1"""'], {}), "('CartPole-v1')\n", (3915, 3930), False, 'import gym\n'), ((4393, 4507), 'interpretable_ddts.agents.ddt_agent.DDTAgent', 'DDTAgent', ([], {'bot_name': 'bot_name', 'input_dim': 'dim_in', 'output_dim': 'dim_out', 'rule_list': '(False)', 'num_rules': 'args.num_leaves'}), '(bot_name=bot_name, input_dim=dim_in, output_dim=dim_out, rule_list\n =False, num_rules=args.num_leaves)\n', (4401, 4507), False, 'from interpretable_ddts.agents.ddt_agent import DDTAgent\n'), ((4708, 4805), 'interpretable_ddts.agents.mlp_agent.MLPAgent', 'MLPAgent', ([], {'input_dim': 'dim_in', 'bot_name': 'bot_name', 'output_dim': 'dim_out', 'num_hidden': 'args.num_hidden'}), '(input_dim=dim_in, bot_name=bot_name, output_dim=dim_out,\n num_hidden=args.num_hidden)\n', (4716, 4805), False, 'from interpretable_ddts.agents.mlp_agent import MLPAgent\n')] |
import hashlib
from typing import TypeVar, Union
import redis
from openff.toolkit.topology import Molecule
from openff.bespokefit.executor.services.qcgenerator import worker
from openff.bespokefit.schema.tasks import HessianTask, OptimizationTask, Torsion1DTask
from openff.bespokefit.utilities.molecule import canonical_order_atoms
_T = TypeVar("_T", HessianTask, OptimizationTask, Torsion1DTask)
def _canonicalize_task(task: _T) -> _T:
task = task.copy(deep=True)
# Ensure the SMILES has a canonical ordering to help ensure cache hits.
canonical_molecule = canonical_order_atoms(
Molecule.from_smiles(task.smiles, allow_undefined_stereo=True)
)
if isinstance(task, Torsion1DTask):
map_to_atom_index = {
j: i for i, j in canonical_molecule.properties["atom_map"].items()
}
central_atom_indices = sorted(
map_to_atom_index[task.central_bond[i]] for i in (0, 1)
)
canonical_molecule.properties["atom_map"] = {
atom_index: (i + 1) for i, atom_index in enumerate(central_atom_indices)
}
canonical_smiles = canonical_molecule.to_smiles(
isomeric=True, explicit_hydrogens=True, mapped=True
)
task.central_bond = (1, 2)
else:
canonical_smiles = canonical_molecule.to_smiles(
isomeric=True, explicit_hydrogens=True, mapped=False
)
task.smiles = canonical_smiles
return task
def cached_compute_task(
task: Union[HessianTask, OptimizationTask, Torsion1DTask],
redis_connection: redis.Redis,
) -> str:
"""Checks to see if a QC task has already been executed and if not send it to a
worker.
"""
if isinstance(task, Torsion1DTask):
compute = worker.compute_torsion_drive
elif isinstance(task, OptimizationTask):
compute = worker.compute_optimization
elif isinstance(task, HessianTask):
compute = worker.compute_hessian
else:
raise NotImplementedError()
# Canonicalize the task to improve the cache hit rate.
task = _canonicalize_task(task)
task_hash = hashlib.sha512(task.json().encode()).hexdigest()
task_id = redis_connection.hget("qcgenerator:task-ids", task_hash)
if task_id is not None:
return task_id.decode()
task_id = compute.delay(task_json=task.json()).id
redis_connection.hset("qcgenerator:types", task_id, task.type)
# Make sure to only set the hash after the type is set in case the connection
# goes down before this information is entered and subsequently discarded.
redis_connection.hset("qcgenerator:task-ids", task_hash, task_id)
return task_id
| [
"openff.toolkit.topology.Molecule.from_smiles",
"typing.TypeVar"
] | [((341, 400), 'typing.TypeVar', 'TypeVar', (['"""_T"""', 'HessianTask', 'OptimizationTask', 'Torsion1DTask'], {}), "('_T', HessianTask, OptimizationTask, Torsion1DTask)\n", (348, 400), False, 'from typing import TypeVar, Union\n'), ((609, 671), 'openff.toolkit.topology.Molecule.from_smiles', 'Molecule.from_smiles', (['task.smiles'], {'allow_undefined_stereo': '(True)'}), '(task.smiles, allow_undefined_stereo=True)\n', (629, 671), False, 'from openff.toolkit.topology import Molecule\n')] |
'''Copyright Gigaspaces, 2017, All Rights Reserved'''
from cloudify.plugins import lifecycle
OP_START = 'hacker.interfaces.lifecycle.start'
OP_STOP = 'hacker.interfaces.lifecycle.stop'
OP_SS_C = 'hacker.interfaces.lifecycle.create_snapshots'
OP_SS_D = 'hacker.interfaces.lifecycle.delete_snapshots'
REQUIRED_OPS = set([OP_START, OP_SS_C, OP_SS_D, OP_STOP])
def build_instance_sequence(instance, operation,
state_start=None, state_end=None):
'''
Builds sequenced subgraph tasks for an instance
.. note::
The sequence will not be built if the instance provided
does not have a node with an operation defined in the
operation parameter.
:param `CloudifyWorkflowNodeInstance` instance:
Node instance to execute tasks against
:param str operation:
Node (lifecycle) operation to execute
:param str state_start:
Verb to describe operation start
:param str state_stop:
Verb to describe operation finish
'''
tasks = list()
# Only build the sequence if the node operation exists
if operation not in instance.node.operations:
return tasks
# Add task starting state
if state_start:
tasks.append(instance.send_event('%s host' % state_start))
tasks.append(instance.set_state(state_start.lower()))
# Add task operation
tasks.append(instance.execute_operation(operation))
# Add task ended state
if state_end:
tasks.append(instance.send_event('%s host' % state_end))
tasks.append(instance.set_state(state_end.lower()))
return tasks
def build_instance_subgraph(instance, graph):
'''
Builds a subgraph for an instance
:param `CloudifyWorkflowNodeInstance` instance:
Node instance to execute tasks against
:param `TaskDependencyGraph` graph:
Task graph to create sequences from
'''
# Init a "stop instance" subgraph
sg_stop = graph.subgraph('stop_subgraph')
seq_stop = sg_stop.sequence()
seq_stop.add(*build_instance_sequence(
instance, OP_STOP, 'Stopping', 'Stopped'))
# Init a "recreate snapshots" subgraph
sg_snap = graph.subgraph('snapshot_subgraph')
seq_snap = sg_snap.sequence()
if OP_SS_D in instance.node.operations:
seq_snap.add(*build_instance_sequence(instance, OP_SS_D))
if OP_SS_C in instance.node.operations:
seq_snap.add(*build_instance_sequence(instance, OP_SS_C))
# Init a "start instance" subgraph
sg_start = graph.subgraph('stop_subgraph')
seq_start = sg_start.sequence()
seq_start.add(*build_instance_sequence(
instance, OP_START, 'Starting', 'Started'))
# Create subgraph dependencies
graph.add_dependency(sg_snap, sg_stop)
graph.add_dependency(sg_start, sg_snap)
def refresh_snapshots(ctx, **_):
'''
Executes a complex, graph-based set of lifecycle events
to stop all host (compute) instances, delete all
existing instance snapshots, take new snapshots
of all attached volumes, and start the instances
back up when complete.
'''
graph = ctx.graph_mode()
# Find all compute hosts and build a sequence graph
for node in ctx.nodes:
if not REQUIRED_OPS.issubset(node.operations):
ctx.logger.warn(
'Skipping refresh_snapshots workflow for node "%s" because '
'it does not have all required operations defined' % node.id)
continue
# Iterate over each node instance
for instance in node.instances:
if not lifecycle.is_host_node(instance):
ctx.logger.warn(
'Skipping refresh_snapshots workflow for node instance '
'"%s" because it is not a compute host' % instance.id)
continue
build_instance_subgraph(instance, graph)
# Execute the sequences
return graph.execute()
| [
"cloudify.plugins.lifecycle.is_host_node"
] | [((3599, 3631), 'cloudify.plugins.lifecycle.is_host_node', 'lifecycle.is_host_node', (['instance'], {}), '(instance)\n', (3621, 3631), False, 'from cloudify.plugins import lifecycle\n')] |
# coding=utf-8
# Copyright <NAME>, <NAME>, <NAME> and The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
from transformers import LEDConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from .test_configuration_common import ConfigTester
from .test_modeling_tf_common import TFModelTesterMixin, ids_tensor
if is_tf_available():
import tensorflow as tf
from transformers import TFLEDForConditionalGeneration, TFLEDModel
@require_tf
class TFLEDModelTester:
config_cls = LEDConfig
config_updates = {}
hidden_act = "gelu"
def __init__(
self,
parent,
batch_size=13,
seq_length=7,
is_training=True,
use_labels=False,
vocab_size=99,
hidden_size=32,
num_hidden_layers=5,
num_attention_heads=4,
intermediate_size=37,
hidden_dropout_prob=0.1,
attention_probs_dropout_prob=0.1,
max_position_embeddings=20,
eos_token_id=2,
pad_token_id=1,
bos_token_id=0,
attention_window=4,
):
self.parent = parent
self.batch_size = batch_size
self.seq_length = seq_length
self.is_training = is_training
self.use_labels = use_labels
self.vocab_size = vocab_size
self.hidden_size = hidden_size
self.num_hidden_layers = num_hidden_layers
self.num_attention_heads = num_attention_heads
self.intermediate_size = intermediate_size
self.hidden_dropout_prob = hidden_dropout_prob
self.attention_probs_dropout_prob = attention_probs_dropout_prob
self.max_position_embeddings = max_position_embeddings
self.eos_token_id = eos_token_id
self.pad_token_id = pad_token_id
self.bos_token_id = bos_token_id
self.attention_window = attention_window
# `ModelTesterMixin.test_attention_outputs` is expecting attention tensors to be of size
# [num_attention_heads, encoder_seq_length, encoder_key_length], but TFLongformerSelfAttention
# returns attention of shape [num_attention_heads, encoder_seq_length, self.attention_window + 1]
# because its local attention only attends to `self.attention_window` and one before and one after
self.key_length = self.attention_window + 1
# because of padding `encoder_seq_length`, is different from `seq_length`. Relevant for
# the `test_attention_outputs` and `test_hidden_states_output` tests
self.encoder_seq_length = (
self.seq_length + (self.attention_window - self.seq_length % self.attention_window) % self.attention_window
)
def prepare_config_and_inputs_for_common(self):
input_ids = ids_tensor([self.batch_size, self.seq_length - 1], self.vocab_size)
eos_tensor = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size), 1)
input_ids = tf.concat([input_ids, eos_tensor], axis=1)
decoder_input_ids = ids_tensor([self.batch_size, self.seq_length], self.vocab_size)
config = self.config_cls(
vocab_size=self.vocab_size,
d_model=self.hidden_size,
encoder_layers=self.num_hidden_layers,
decoder_layers=self.num_hidden_layers,
encoder_attention_heads=self.num_attention_heads,
decoder_attention_heads=self.num_attention_heads,
encoder_ffn_dim=self.intermediate_size,
decoder_ffn_dim=self.intermediate_size,
dropout=self.hidden_dropout_prob,
attention_dropout=self.attention_probs_dropout_prob,
max_position_embeddings=self.max_position_embeddings,
eos_token_ids=[2],
bos_token_id=self.bos_token_id,
pad_token_id=self.pad_token_id,
decoder_start_token_id=self.pad_token_id,
attention_window=self.attention_window,
**self.config_updates,
)
inputs_dict = prepare_led_inputs_dict(config, input_ids, decoder_input_ids)
global_attention_mask = tf.concat(
[tf.zeros_like(input_ids)[:, :-1], tf.ones_like(input_ids)[:, -1:]],
axis=-1,
)
inputs_dict["global_attention_mask"] = global_attention_mask
return config, inputs_dict
def check_decoder_model_past_large_inputs(self, config, inputs_dict):
model = TFLEDModel(config=config).get_decoder()
input_ids = inputs_dict["input_ids"]
input_ids = input_ids[:1, :]
attention_mask = inputs_dict["attention_mask"][:1, :]
self.batch_size = 1
# first forward pass
outputs = model(input_ids, attention_mask=attention_mask, use_cache=True)
output, past_key_values = outputs.to_tuple()
past_key_values = past_key_values[1]
# create hypothetical next token and extent to next_input_ids
next_tokens = ids_tensor((self.batch_size, 3), config.vocab_size)
next_attn_mask = tf.cast(ids_tensor((self.batch_size, 3), 2), tf.int8)
# append to next input_ids and
next_input_ids = tf.concat([input_ids, next_tokens], axis=-1)
next_attention_mask = tf.concat([attention_mask, next_attn_mask], axis=-1)
output_from_no_past = model(next_input_ids, attention_mask=next_attention_mask)[0]
output_from_past = model(next_tokens, attention_mask=next_attention_mask, past_key_values=past_key_values)[0]
self.parent.assertEqual(next_tokens.shape[1], output_from_past.shape[1])
# select random slice
random_slice_idx = int(ids_tensor((1,), output_from_past.shape[-1]))
output_from_no_past_slice = output_from_no_past[:, -3:, random_slice_idx]
output_from_past_slice = output_from_past[:, :, random_slice_idx]
# test that outputs are equal for slice
tf.debugging.assert_near(output_from_past_slice, output_from_no_past_slice, rtol=1e-3)
def prepare_led_inputs_dict(
config,
input_ids,
decoder_input_ids,
attention_mask=None,
decoder_attention_mask=None,
):
if attention_mask is None:
attention_mask = tf.cast(tf.math.not_equal(input_ids, config.pad_token_id), tf.int8)
if decoder_attention_mask is None:
decoder_attention_mask = tf.cast(tf.math.not_equal(decoder_input_ids, config.pad_token_id), tf.int8)
return {
"input_ids": input_ids,
"attention_mask": attention_mask,
"decoder_input_ids": decoder_input_ids,
"decoder_attention_mask": decoder_attention_mask,
}
@require_tf
class TFLEDModelTest(TFModelTesterMixin, unittest.TestCase):
all_model_classes = (TFLEDForConditionalGeneration, TFLEDModel) if is_tf_available() else ()
all_generative_model_classes = (TFLEDForConditionalGeneration,) if is_tf_available() else ()
is_encoder_decoder = True
test_pruning = False
def setUp(self):
self.model_tester = TFLEDModelTester(self)
self.config_tester = ConfigTester(self, config_class=LEDConfig)
def test_config(self):
self.config_tester.run_common_tests()
def test_decoder_model_past_large_inputs(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_decoder_model_past_large_inputs(*config_and_inputs)
def test_model_common_attributes(self):
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
model = model_class(config)
assert isinstance(model.get_input_embeddings(), tf.keras.layers.Layer)
x = model.get_output_layer_with_bias()
assert x is None
name = model.get_prefix_bias_name()
assert name is None
def test_attention_outputs(self):
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
inputs_dict["global_attention_mask"] = tf.zeros_like(inputs_dict["attention_mask"])
num_global_attn_indices = 2
inputs_dict["global_attention_mask"] = tf.where(
tf.range(self.model_tester.seq_length)[None, :] < num_global_attn_indices,
1,
inputs_dict["global_attention_mask"],
)
config.return_dict = True
seq_length = self.model_tester.seq_length
encoder_seq_length = self.model_tester.encoder_seq_length
def check_decoder_attentions_output(outputs):
decoder_attentions = outputs.decoder_attentions
self.assertEqual(len(decoder_attentions), self.model_tester.num_hidden_layers)
self.assertListEqual(
list(decoder_attentions[0].shape[-3:]),
[self.model_tester.num_attention_heads, seq_length, seq_length],
)
def check_encoder_attentions_output(outputs):
attentions = [t.numpy() for t in outputs.encoder_attentions]
global_attentions = [t.numpy() for t in outputs.encoder_global_attentions]
self.assertEqual(len(attentions), self.model_tester.num_hidden_layers)
self.assertEqual(len(global_attentions), self.model_tester.num_hidden_layers)
self.assertListEqual(
list(attentions[0].shape[-3:]),
[self.model_tester.num_attention_heads, encoder_seq_length, seq_length],
)
self.assertListEqual(
list(global_attentions[0].shape[-3:]),
[self.model_tester.num_attention_heads, encoder_seq_length, num_global_attn_indices],
)
for model_class in self.all_model_classes:
inputs_dict["output_attentions"] = True
inputs_dict["use_cache"] = False
config.output_hidden_states = False
model = model_class(config)
outputs = model(self._prepare_for_class(inputs_dict, model_class))
out_len = len(outputs)
self.assertEqual(config.output_hidden_states, False)
check_encoder_attentions_output(outputs)
if self.is_encoder_decoder:
model = model_class(config)
outputs = model(self._prepare_for_class(inputs_dict, model_class))
self.assertEqual(config.output_hidden_states, False)
check_decoder_attentions_output(outputs)
# Check that output attentions can also be changed via the config
del inputs_dict["output_attentions"]
config.output_attentions = True
model = model_class(config)
outputs = model(self._prepare_for_class(inputs_dict, model_class))
self.assertEqual(config.output_hidden_states, False)
check_encoder_attentions_output(outputs)
# Check attention is always last and order is fine
inputs_dict["output_attentions"] = True
config.output_hidden_states = True
model = model_class(config)
outputs = model(self._prepare_for_class(inputs_dict, model_class))
self.assertEqual(out_len + (2 if self.is_encoder_decoder else 1), len(outputs))
self.assertEqual(model.config.output_hidden_states, True)
check_encoder_attentions_output(outputs)
@slow
def test_saved_model_with_attentions_output(self):
# longformer has special attentions which are not
# compatible in graph mode
pass
@slow
def test_saved_model_with_hidden_states_output(self):
# TODO(JPLU, PVP) this test should pass!!! PVP:
# IMO there is a problem with the signature check.
# Test passes for TFLEDModel, but not for TFLEDForConditionalGeneration
# IMO the reason is that the tensor variable name cannot be changed
# from decoder_input_ids -> input_ids, which poses a BIG restrictions
pass
def _assert_tensors_equal(a, b, atol=1e-12, prefix=""):
"""If tensors not close, or a and b arent both tensors, raise a nice Assertion error."""
if a is None and b is None:
return True
try:
if tf.debugging.assert_near(a, b, atol=atol):
return True
raise
except Exception:
msg = "{} != {}".format(a, b)
if prefix:
msg = prefix + ": " + msg
raise AssertionError(msg)
def _long_tensor(tok_lst):
return tf.constant(tok_lst, dtype=tf.int32)
TOLERANCE = 1e-4
@slow
@require_tf
class TFLEDModelIntegrationTest(unittest.TestCase):
def test_inference_no_head(self):
model = TFLEDForConditionalGeneration.from_pretrained("allenai/led-base-16384").led
# change to intended input here
input_ids = _long_tensor([512 * [0, 31414, 232, 328, 740, 1140, 12695, 69]])
decoder_input_ids = _long_tensor([128 * [0, 31414, 232, 328, 740, 1140, 12695, 69]])
inputs_dict = prepare_led_inputs_dict(model.config, input_ids, decoder_input_ids)
output = model(**inputs_dict)[0]
expected_shape = (1, 1024, 768)
self.assertEqual(output.shape, expected_shape)
# change to expected output here
expected_slice = tf.convert_to_tensor(
[[2.3050, 2.8279, 0.6531], [-1.8457, -0.1455, -3.5661], [-1.0186, 0.4586, -2.2043]],
)
tf.debugging.assert_near(output[:, :3, :3], expected_slice, atol=TOLERANCE)
def test_inference_with_head(self):
model = TFLEDForConditionalGeneration.from_pretrained("allenai/led-base-16384")
# change to intended input here
input_ids = _long_tensor([512 * [0, 31414, 232, 328, 740, 1140, 12695, 69]])
decoder_input_ids = _long_tensor([128 * [0, 31414, 232, 328, 740, 1140, 12695, 69]])
inputs_dict = prepare_led_inputs_dict(model.config, input_ids, decoder_input_ids)
output = model(**inputs_dict)[0]
expected_shape = (1, 1024, model.config.vocab_size)
self.assertEqual(output.shape, expected_shape)
# change to expected output here
expected_slice = tf.convert_to_tensor(
[[33.6507, 6.4572, 16.8089], [5.8739, -2.4238, 11.2902], [-3.2139, -4.3149, 4.2783]],
)
tf.debugging.assert_near(output[:, :3, :3], expected_slice, atol=TOLERANCE)
| [
"tensorflow.convert_to_tensor",
"transformers.TFLEDForConditionalGeneration.from_pretrained",
"tensorflow.range",
"tensorflow.concat",
"tensorflow.constant",
"tensorflow.ones_like",
"tensorflow.zeros_like",
"tensorflow.debugging.assert_near",
"transformers.is_tf_available",
"transformers.TFLEDModel",
"tensorflow.math.not_equal"
] | [((901, 918), 'transformers.is_tf_available', 'is_tf_available', ([], {}), '()\n', (916, 918), False, 'from transformers import LEDConfig, is_tf_available\n'), ((12875, 12911), 'tensorflow.constant', 'tf.constant', (['tok_lst'], {'dtype': 'tf.int32'}), '(tok_lst, dtype=tf.int32)\n', (12886, 12911), True, 'import tensorflow as tf\n'), ((3468, 3510), 'tensorflow.concat', 'tf.concat', (['[input_ids, eos_tensor]'], {'axis': '(1)'}), '([input_ids, eos_tensor], axis=1)\n', (3477, 3510), True, 'import tensorflow as tf\n'), ((5641, 5685), 'tensorflow.concat', 'tf.concat', (['[input_ids, next_tokens]'], {'axis': '(-1)'}), '([input_ids, next_tokens], axis=-1)\n', (5650, 5685), True, 'import tensorflow as tf\n'), ((5716, 5768), 'tensorflow.concat', 'tf.concat', (['[attention_mask, next_attn_mask]'], {'axis': '(-1)'}), '([attention_mask, next_attn_mask], axis=-1)\n', (5725, 5768), True, 'import tensorflow as tf\n'), ((6382, 6473), 'tensorflow.debugging.assert_near', 'tf.debugging.assert_near', (['output_from_past_slice', 'output_from_no_past_slice'], {'rtol': '(0.001)'}), '(output_from_past_slice, output_from_no_past_slice,\n rtol=0.001)\n', (6406, 6473), True, 'import tensorflow as tf\n'), ((7228, 7245), 'transformers.is_tf_available', 'is_tf_available', ([], {}), '()\n', (7243, 7245), False, 'from transformers import LEDConfig, is_tf_available\n'), ((7325, 7342), 'transformers.is_tf_available', 'is_tf_available', ([], {}), '()\n', (7340, 7342), False, 'from transformers import LEDConfig, is_tf_available\n'), ((8487, 8531), 'tensorflow.zeros_like', 'tf.zeros_like', (["inputs_dict['attention_mask']"], {}), "(inputs_dict['attention_mask'])\n", (8500, 8531), True, 'import tensorflow as tf\n'), ((12603, 12644), 'tensorflow.debugging.assert_near', 'tf.debugging.assert_near', (['a', 'b'], {'atol': 'atol'}), '(a, b, atol=atol)\n', (12627, 12644), True, 'import tensorflow as tf\n'), ((13644, 13752), 'tensorflow.convert_to_tensor', 'tf.convert_to_tensor', (['[[2.305, 2.8279, 0.6531], [-1.8457, -0.1455, -3.5661], [-1.0186, 0.4586, -\n 2.2043]]'], {}), '([[2.305, 2.8279, 0.6531], [-1.8457, -0.1455, -3.5661],\n [-1.0186, 0.4586, -2.2043]])\n', (13664, 13752), True, 'import tensorflow as tf\n'), ((13781, 13856), 'tensorflow.debugging.assert_near', 'tf.debugging.assert_near', (['output[:, :3, :3]', 'expected_slice'], {'atol': 'TOLERANCE'}), '(output[:, :3, :3], expected_slice, atol=TOLERANCE)\n', (13805, 13856), True, 'import tensorflow as tf\n'), ((13914, 13985), 'transformers.TFLEDForConditionalGeneration.from_pretrained', 'TFLEDForConditionalGeneration.from_pretrained', (['"""allenai/led-base-16384"""'], {}), "('allenai/led-base-16384')\n", (13959, 13985), False, 'from transformers import TFLEDForConditionalGeneration, TFLEDModel\n'), ((14517, 14628), 'tensorflow.convert_to_tensor', 'tf.convert_to_tensor', (['[[33.6507, 6.4572, 16.8089], [5.8739, -2.4238, 11.2902], [-3.2139, -4.3149,\n 4.2783]]'], {}), '([[33.6507, 6.4572, 16.8089], [5.8739, -2.4238, 11.2902\n ], [-3.2139, -4.3149, 4.2783]])\n', (14537, 14628), True, 'import tensorflow as tf\n'), ((14655, 14730), 'tensorflow.debugging.assert_near', 'tf.debugging.assert_near', (['output[:, :3, :3]', 'expected_slice'], {'atol': 'TOLERANCE'}), '(output[:, :3, :3], expected_slice, atol=TOLERANCE)\n', (14679, 14730), True, 'import tensorflow as tf\n'), ((3393, 3443), 'tensorflow.constant', 'tf.constant', (['([self.eos_token_id] * self.batch_size)'], {}), '([self.eos_token_id] * self.batch_size)\n', (3404, 3443), True, 'import tensorflow as tf\n'), ((6675, 6724), 'tensorflow.math.not_equal', 'tf.math.not_equal', (['input_ids', 'config.pad_token_id'], {}), '(input_ids, config.pad_token_id)\n', (6692, 6724), True, 'import tensorflow as tf\n'), ((6815, 6872), 'tensorflow.math.not_equal', 'tf.math.not_equal', (['decoder_input_ids', 'config.pad_token_id'], {}), '(decoder_input_ids, config.pad_token_id)\n', (6832, 6872), True, 'import tensorflow as tf\n'), ((13057, 13128), 'transformers.TFLEDForConditionalGeneration.from_pretrained', 'TFLEDForConditionalGeneration.from_pretrained', (['"""allenai/led-base-16384"""'], {}), "('allenai/led-base-16384')\n", (13102, 13128), False, 'from transformers import TFLEDForConditionalGeneration, TFLEDModel\n'), ((4928, 4953), 'transformers.TFLEDModel', 'TFLEDModel', ([], {'config': 'config'}), '(config=config)\n', (4938, 4953), False, 'from transformers import TFLEDForConditionalGeneration, TFLEDModel\n'), ((4634, 4658), 'tensorflow.zeros_like', 'tf.zeros_like', (['input_ids'], {}), '(input_ids)\n', (4647, 4658), True, 'import tensorflow as tf\n'), ((4668, 4691), 'tensorflow.ones_like', 'tf.ones_like', (['input_ids'], {}), '(input_ids)\n', (4680, 4691), True, 'import tensorflow as tf\n'), ((8637, 8675), 'tensorflow.range', 'tf.range', (['self.model_tester.seq_length'], {}), '(self.model_tester.seq_length)\n', (8645, 8675), True, 'import tensorflow as tf\n')] |
import contextlib
from datetime import date
from datetime import datetime
from datetime import timezone
from functools import wraps
from io import BytesIO
from itertools import count
from typing import Any
from typing import Dict
from typing import Sequence
import pytest
from dateutil.parser import parse as parse_date
from dateutil.relativedelta import relativedelta
from django import forms
from django.core.exceptions import ValidationError
from django.template.loader import render_to_string
from django.urls import reverse
from freezegun import freeze_time
from lxml import etree
from common.models.records import TrackedModel
from common.renderers import counter_generator
from common.serializers import validate_taric_xml_record_order
from common.util import TaricDateRange
from common.util import get_accessor
from common.util import get_field_tuple
INTERDEPENDENT_IMPORT_IMPLEMENTED = True
UPDATE_IMPORTER_IMPLEMENTED = True
EXPORT_REFUND_NOMENCLATURE_IMPLEMENTED = False
COMMODITIES_IMPLEMENTED = True
MEURSING_TABLES_IMPLEMENTED = False
PARTIAL_TEMPORARY_STOP_IMPLEMENTED = False
UTC = timezone.utc
requires_commodities = pytest.mark.skipif(
not COMMODITIES_IMPLEMENTED,
reason="Commodities not implemented",
)
requires_export_refund_nomenclature = pytest.mark.skipif(
not EXPORT_REFUND_NOMENCLATURE_IMPLEMENTED,
reason="Export refund nomenclature not implemented",
)
requires_meursing_tables = pytest.mark.skipif(
not MEURSING_TABLES_IMPLEMENTED,
reason="Meursing tables not implemented",
)
requires_partial_temporary_stop = pytest.mark.skipif(
not PARTIAL_TEMPORARY_STOP_IMPLEMENTED,
reason="Partial temporary stop not implemented",
)
requires_interdependent_import = pytest.mark.skipif(
not INTERDEPENDENT_IMPORT_IMPLEMENTED,
reason="Interdependent imports not implemented",
)
requires_update_importer = pytest.mark.skipif(
not UPDATE_IMPORTER_IMPLEMENTED,
reason="Requires Updating importers to be implemented",
)
@contextlib.contextmanager
def raises_if(exception, expected):
try:
yield
except exception:
if not expected:
raise
else:
if expected:
pytest.fail(f"Did not raise {exception}")
def check_validator(validate, value, expected_valid):
try:
validate(value)
except ValidationError:
if expected_valid:
pytest.fail(f'Unexpected validation error for value "{value}"')
except Exception:
raise
else:
if not expected_valid:
pytest.fail(f'Expected validation error for value "{value}"')
def make_duplicate_record(factory, identifying_fields=None):
"""Creates two records using the passed factory that are duplicates of each
other and returns the record created last."""
existing = factory.create()
# allow overriding identifying_fields
if identifying_fields is None:
identifying_fields = list(factory._meta.model.identifying_fields)
return factory.create(
**dict(get_field_tuple(existing, field) for field in identifying_fields)
)
def make_non_duplicate_record(factory, identifying_fields=None):
"""Creates two records using the passed factory that are not duplicates of
each other and returns the record created last."""
existing = factory.create()
not_duplicate = factory.create()
if identifying_fields is None:
identifying_fields = list(factory._meta.model.identifying_fields)
assert any(
get_field_tuple(existing, f) != get_field_tuple(not_duplicate, f)
for f in identifying_fields
)
return not_duplicate
def get_checkable_data(model: TrackedModel, ignore=frozenset()):
"""
Returns a dict representing the model's data ignoring any automatically set
fields and fields with names passed to `ignore`.
The returned data will contain the identifying fields for any linked
models rather than internal PKs.
For example:
get_checkable_data(FootnoteDescriptionFactory(), ignore={"sid"})
# {
# "description": "My sample footnote text",
# "described_footnote": {
# "footnote_type__footnote_type_id": "FN"
# "footnote_id": "123",
# },
# }
"""
checked_field_names = {f.name for f in model.copyable_fields} - ignore
data = {
name: getattr(model, get_accessor(model._meta.get_field(name)))
for name in checked_field_names
}
identifying_fields = {
name: data[name].get_identifying_fields()
for name in checked_field_names
if hasattr(data[name], "get_identifying_fields")
}
data.update(identifying_fields)
return data
def assert_records_match(
expected: TrackedModel,
imported: TrackedModel,
ignore=frozenset(),
):
"""
Asserts that every value for every field in the imported model is the same
as the data in the expected model.
System fields that will change from model to model are not checked. Any
field names given to `ignore` will also not be checked.
"""
expected_data = get_checkable_data(expected, ignore=ignore)
imported_data = get_checkable_data(imported, ignore=ignore)
assert expected_data == imported_data
def assert_many_records_match(
expected: Sequence[TrackedModel],
imported: Sequence[TrackedModel],
ignore=frozenset(),
):
"""
Asserts that every value for every field in the imported models is the same
as the data in the expected models, and that the count of both is equal.
System fields that will change from model to model are not checked. Any
field names given to `ignore` will also not be checked.
"""
expected_data = [get_checkable_data(e, ignore=ignore) for e in expected]
imported_data = [get_checkable_data(i, ignore=ignore) for i in imported]
assert expected_data == imported_data
_transaction_counter = count(start=1)
def generate_test_import_xml(obj: dict) -> BytesIO:
xml = render_to_string(
template_name="workbaskets/taric/transaction_detail.xml",
context={
"envelope_id": next(_transaction_counter),
"tracked_models": [obj],
"transaction_id": next(_transaction_counter),
"message_counter": counter_generator(),
"counter_generator": counter_generator,
},
)
return BytesIO(xml.encode())
def taric_xml_record_codes(xml):
"""Yields tuples of (record_code, subrecord_code)"""
records = xml.xpath(".//*[local-name() = 'record']")
codes = etree.XPath(
".//*[local-name()='record.code' or local-name()='subrecord.code']/text()",
)
return [tuple(codes(record)) for record in records]
def validate_taric_xml(
factory=None,
instance=None,
factory_kwargs=None,
check_order=True,
):
def decorator(func):
def wraps(
api_client,
taric_schema,
approved_transaction,
valid_user,
*args,
**kwargs,
):
if not factory and not instance:
raise AssertionError(
"Either a factory or an object instance need to be provided",
)
if factory and instance:
raise AssertionError(
"Either a factory or an object instance need to be provided - not both.",
)
current_instance = instance or factory.create(
transaction=approved_transaction, **factory_kwargs or {}
)
api_client.force_login(user=valid_user)
response = api_client.get(
reverse(
"workbaskets:workbasket-detail",
kwargs={"pk": approved_transaction.workbasket.pk},
),
{"format": "xml"},
)
assert response.status_code == 200
content = response.content
xml = etree.XML(content)
taric_schema.validate(xml)
assert not taric_schema.error_log, f"XML errors: {taric_schema.error_log}"
if check_order:
validate_taric_xml_record_order(xml)
kwargs = {"xml": xml, **kwargs}
func(
*args,
**kwargs,
)
return wraps
return decorator
class Dates:
deltas = {
"normal": (relativedelta(), relativedelta(months=+1)),
"earlier": (relativedelta(years=-1), relativedelta(years=-1, months=+1)),
"later": (
relativedelta(years=+1, months=+1, days=+1),
relativedelta(years=+1, months=+2),
),
"big": (relativedelta(years=-2), relativedelta(years=+2, days=+1)),
"adjacent": (relativedelta(days=+1), relativedelta(months=+1)),
"adjacent_earlier": (relativedelta(months=-1), relativedelta(days=-1)),
"adjacent_later": (relativedelta(months=+1, days=+1), relativedelta(months=+2)),
"adjacent_no_end": (relativedelta(months=+1, days=+1), None),
"adjacent_even_later": (
relativedelta(months=+2, days=+1),
relativedelta(months=+3),
),
"adjacent_earlier_big": (
relativedelta(years=-2, months=-2),
relativedelta(years=-2),
),
"adjacent_later_big": (
relativedelta(months=+1, days=+1),
relativedelta(years=+2, months=+2),
),
"overlap_normal": (
relativedelta(days=+15),
relativedelta(days=+14, months=+1, years=+1),
),
"overlap_normal_earlier": (
relativedelta(months=-1, days=+14),
relativedelta(days=+14),
),
"overlap_normal_same_year": (
relativedelta(days=+15),
relativedelta(days=+14, months=+1),
),
"overlap_big": (relativedelta(years=+1), relativedelta(years=+3, days=+2)),
"after_big": (
relativedelta(years=+3, months=+1),
relativedelta(years=+3, months=+2),
),
"backwards": (relativedelta(months=+1), relativedelta(days=+1)),
"starts_with_normal": (relativedelta(), relativedelta(days=+14)),
"ends_with_normal": (relativedelta(days=+14), relativedelta(months=+1)),
"current": (relativedelta(weeks=-4), relativedelta(weeks=+4)),
"future": (relativedelta(weeks=+10), relativedelta(weeks=+20)),
"no_end": (relativedelta(), None),
"normal_first_half": (relativedelta(), relativedelta(days=+14)),
}
@property
def now(self):
return self.datetime_now.date()
@property
def datetime_now(self):
return datetime.now(tz=UTC).replace(hour=0, minute=0, second=0, microsecond=0)
def __getattr__(self, name):
if name in self.deltas:
start, end = self.deltas[name]
start = self.now + start
if end is not None:
end = self.now + end
return TaricDateRange(start, end)
raise AttributeError(name)
@classmethod
def short_before(cls, dt):
return TaricDateRange(
dt + relativedelta(months=-1),
dt + relativedelta(days=-14),
)
@classmethod
def medium_before(cls, dt):
return TaricDateRange(
dt + relativedelta(months=-1),
dt + relativedelta(days=-1),
)
@classmethod
def short_after(cls, dt):
return TaricDateRange(
dt + relativedelta(days=+14),
dt + relativedelta(months=+1),
)
@classmethod
def short_overlap(cls, dt):
return TaricDateRange(
dt + relativedelta(months=-1),
dt + relativedelta(months=+1),
)
@classmethod
def no_end_before(cls, dt):
return TaricDateRange(
dt + relativedelta(months=-1),
None,
)
def only_applicable_after(cutoff):
"""
Decorator which asserts that a test fails after a specified cutoff date.
:param cutoff: A date string, or datetime object before which the test should fail.
"""
cutoff = parse_date(cutoff)
def decorator(fn):
@wraps(fn)
def do_test(*args, **kwargs):
# test should pass normally
fn(*args, **kwargs)
# test should fail before cutoff
with freeze_time(cutoff + relativedelta(days=-1)):
try:
fn(*args, **kwargs)
except pytest.fail.Exception:
pass
except Exception:
raise
else:
pytest.fail(f"Rule applied before {cutoff:%Y-%m-%d}")
return True
return do_test
return decorator
def validity_period_post_data(start: date, end: date) -> Dict[str, int]:
"""
Construct a POST data fragment for the validity period start and end dates
of a ValidityPeriodForm from the given date objects, eg:
>>> validity_period_post_data(
>>> datetime.date(2021, 1, 2),
>>> datetime.date(2022, 3, 4),
>>> )
{
"start_date_0": 1,
"start_date_1": 2,
"start_date_2": 2021,
"end_date_0": 4,
"end_date_1": 3,
"end_date_2": 2022,
}
"""
return {
f"{name}_{i}": part
for name, date in (("start_date", start), ("end_date", end))
for i, part in enumerate([date.day, date.month, date.year])
}
def get_form_data(form: forms.ModelForm) -> Dict[str, Any]:
"""Returns a dictionary of the fields that the form will put onto a page and
their current values, taking account of any fields that have sub-fields and
hence result in multiple HTML <input> objects."""
data = {**form.initial}
for field in form.rendered_fields:
value = data[field] if field in data else form.fields[field].initial
if hasattr(form.fields[field].widget, "decompress"):
# If the widget can be decompressed, then it is not just a simple
# value and has some internal structure. So we need to generate one
# form item per decompressed value and append the name with _0, _1,
# etc. This mirrors the MultiValueWidget in django/forms/widgets.py.
if field in data:
del data[field]
value = form.fields[field].widget.decompress(value)
data.update(
**{f"{field}_{i}": v for i, v in enumerate(value) if v is not None}
)
elif value is not None:
data.setdefault(field, value)
return data
| [
"dateutil.parser.parse",
"lxml.etree.XPath",
"dateutil.relativedelta.relativedelta",
"common.renderers.counter_generator",
"django.urls.reverse",
"common.util.TaricDateRange",
"lxml.etree.XML",
"functools.wraps",
"common.util.get_field_tuple",
"pytest.fail",
"datetime.datetime.now",
"itertools.count",
"pytest.mark.skipif",
"common.serializers.validate_taric_xml_record_order"
] | [((1138, 1228), 'pytest.mark.skipif', 'pytest.mark.skipif', (['(not COMMODITIES_IMPLEMENTED)'], {'reason': '"""Commodities not implemented"""'}), "(not COMMODITIES_IMPLEMENTED, reason=\n 'Commodities not implemented')\n", (1156, 1228), False, 'import pytest\n'), ((1274, 1394), 'pytest.mark.skipif', 'pytest.mark.skipif', (['(not EXPORT_REFUND_NOMENCLATURE_IMPLEMENTED)'], {'reason': '"""Export refund nomenclature not implemented"""'}), "(not EXPORT_REFUND_NOMENCLATURE_IMPLEMENTED, reason=\n 'Export refund nomenclature not implemented')\n", (1292, 1394), False, 'import pytest\n'), ((1429, 1527), 'pytest.mark.skipif', 'pytest.mark.skipif', (['(not MEURSING_TABLES_IMPLEMENTED)'], {'reason': '"""Meursing tables not implemented"""'}), "(not MEURSING_TABLES_IMPLEMENTED, reason=\n 'Meursing tables not implemented')\n", (1447, 1527), False, 'import pytest\n'), ((1569, 1681), 'pytest.mark.skipif', 'pytest.mark.skipif', (['(not PARTIAL_TEMPORARY_STOP_IMPLEMENTED)'], {'reason': '"""Partial temporary stop not implemented"""'}), "(not PARTIAL_TEMPORARY_STOP_IMPLEMENTED, reason=\n 'Partial temporary stop not implemented')\n", (1587, 1681), False, 'import pytest\n'), ((1722, 1833), 'pytest.mark.skipif', 'pytest.mark.skipif', (['(not INTERDEPENDENT_IMPORT_IMPLEMENTED)'], {'reason': '"""Interdependent imports not implemented"""'}), "(not INTERDEPENDENT_IMPORT_IMPLEMENTED, reason=\n 'Interdependent imports not implemented')\n", (1740, 1833), False, 'import pytest\n'), ((1868, 1980), 'pytest.mark.skipif', 'pytest.mark.skipif', (['(not UPDATE_IMPORTER_IMPLEMENTED)'], {'reason': '"""Requires Updating importers to be implemented"""'}), "(not UPDATE_IMPORTER_IMPLEMENTED, reason=\n 'Requires Updating importers to be implemented')\n", (1886, 1980), False, 'import pytest\n'), ((5923, 5937), 'itertools.count', 'count', ([], {'start': '(1)'}), '(start=1)\n', (5928, 5937), False, 'from itertools import count\n'), ((6571, 6663), 'lxml.etree.XPath', 'etree.XPath', (['""".//*[local-name()=\'record.code\' or local-name()=\'subrecord.code\']/text()"""'], {}), '(\n ".//*[local-name()=\'record.code\' or local-name()=\'subrecord.code\']/text()")\n', (6582, 6663), False, 'from lxml import etree\n'), ((12168, 12186), 'dateutil.parser.parse', 'parse_date', (['cutoff'], {}), '(cutoff)\n', (12178, 12186), True, 'from dateutil.parser import parse as parse_date\n'), ((12220, 12229), 'functools.wraps', 'wraps', (['fn'], {}), '(fn)\n', (12225, 12229), False, 'from functools import wraps\n'), ((2183, 2224), 'pytest.fail', 'pytest.fail', (['f"""Did not raise {exception}"""'], {}), "(f'Did not raise {exception}')\n", (2194, 2224), False, 'import pytest\n'), ((2534, 2595), 'pytest.fail', 'pytest.fail', (['f"""Expected validation error for value "{value}\\""""'], {}), '(f\'Expected validation error for value "{value}"\')\n', (2545, 2595), False, 'import pytest\n'), ((7981, 7999), 'lxml.etree.XML', 'etree.XML', (['content'], {}), '(content)\n', (7990, 7999), False, 'from lxml import etree\n'), ((8430, 8445), 'dateutil.relativedelta.relativedelta', 'relativedelta', ([], {}), '()\n', (8443, 8445), False, 'from dateutil.relativedelta import relativedelta\n'), ((8447, 8471), 'dateutil.relativedelta.relativedelta', 'relativedelta', ([], {'months': '(+1)'}), '(months=+1)\n', (8460, 8471), False, 'from dateutil.relativedelta import relativedelta\n'), ((8494, 8517), 'dateutil.relativedelta.relativedelta', 'relativedelta', ([], {'years': '(-1)'}), '(years=-1)\n', (8507, 8517), False, 'from dateutil.relativedelta import relativedelta\n'), ((8519, 8553), 'dateutil.relativedelta.relativedelta', 'relativedelta', ([], {'years': '(-1)', 'months': '(+1)'}), '(years=-1, months=+1)\n', (8532, 8553), False, 'from dateutil.relativedelta import relativedelta\n'), ((8587, 8630), 'dateutil.relativedelta.relativedelta', 'relativedelta', ([], {'years': '(+1)', 'months': '(+1)', 'days': '(+1)'}), '(years=+1, months=+1, days=+1)\n', (8600, 8630), False, 'from dateutil.relativedelta import relativedelta\n'), ((8644, 8678), 'dateutil.relativedelta.relativedelta', 'relativedelta', ([], {'years': '(+1)', 'months': '(+2)'}), '(years=+1, months=+2)\n', (8657, 8678), False, 'from dateutil.relativedelta import relativedelta\n'), ((8707, 8730), 'dateutil.relativedelta.relativedelta', 'relativedelta', ([], {'years': '(-2)'}), '(years=-2)\n', (8720, 8730), False, 'from dateutil.relativedelta import relativedelta\n'), ((8732, 8764), 'dateutil.relativedelta.relativedelta', 'relativedelta', ([], {'years': '(+2)', 'days': '(+1)'}), '(years=+2, days=+1)\n', (8745, 8764), False, 'from dateutil.relativedelta import relativedelta\n'), ((8788, 8810), 'dateutil.relativedelta.relativedelta', 'relativedelta', ([], {'days': '(+1)'}), '(days=+1)\n', (8801, 8810), False, 'from dateutil.relativedelta import relativedelta\n'), ((8812, 8836), 'dateutil.relativedelta.relativedelta', 'relativedelta', ([], {'months': '(+1)'}), '(months=+1)\n', (8825, 8836), False, 'from dateutil.relativedelta import relativedelta\n'), ((8868, 8892), 'dateutil.relativedelta.relativedelta', 'relativedelta', ([], {'months': '(-1)'}), '(months=-1)\n', (8881, 8892), False, 'from dateutil.relativedelta import relativedelta\n'), ((8894, 8916), 'dateutil.relativedelta.relativedelta', 'relativedelta', ([], {'days': '(-1)'}), '(days=-1)\n', (8907, 8916), False, 'from dateutil.relativedelta import relativedelta\n'), ((8946, 8979), 'dateutil.relativedelta.relativedelta', 'relativedelta', ([], {'months': '(+1)', 'days': '(+1)'}), '(months=+1, days=+1)\n', (8959, 8979), False, 'from dateutil.relativedelta import relativedelta\n'), ((8981, 9005), 'dateutil.relativedelta.relativedelta', 'relativedelta', ([], {'months': '(+2)'}), '(months=+2)\n', (8994, 9005), False, 'from dateutil.relativedelta import relativedelta\n'), ((9036, 9069), 'dateutil.relativedelta.relativedelta', 'relativedelta', ([], {'months': '(+1)', 'days': '(+1)'}), '(months=+1, days=+1)\n', (9049, 9069), False, 'from dateutil.relativedelta import relativedelta\n'), ((9123, 9156), 'dateutil.relativedelta.relativedelta', 'relativedelta', ([], {'months': '(+2)', 'days': '(+1)'}), '(months=+2, days=+1)\n', (9136, 9156), False, 'from dateutil.relativedelta import relativedelta\n'), ((9170, 9194), 'dateutil.relativedelta.relativedelta', 'relativedelta', ([], {'months': '(+3)'}), '(months=+3)\n', (9183, 9194), False, 'from dateutil.relativedelta import relativedelta\n'), ((9253, 9287), 'dateutil.relativedelta.relativedelta', 'relativedelta', ([], {'years': '(-2)', 'months': '(-2)'}), '(years=-2, months=-2)\n', (9266, 9287), False, 'from dateutil.relativedelta import relativedelta\n'), ((9301, 9324), 'dateutil.relativedelta.relativedelta', 'relativedelta', ([], {'years': '(-2)'}), '(years=-2)\n', (9314, 9324), False, 'from dateutil.relativedelta import relativedelta\n'), ((9381, 9414), 'dateutil.relativedelta.relativedelta', 'relativedelta', ([], {'months': '(+1)', 'days': '(+1)'}), '(months=+1, days=+1)\n', (9394, 9414), False, 'from dateutil.relativedelta import relativedelta\n'), ((9428, 9462), 'dateutil.relativedelta.relativedelta', 'relativedelta', ([], {'years': '(+2)', 'months': '(+2)'}), '(years=+2, months=+2)\n', (9441, 9462), False, 'from dateutil.relativedelta import relativedelta\n'), ((9515, 9538), 'dateutil.relativedelta.relativedelta', 'relativedelta', ([], {'days': '(+15)'}), '(days=+15)\n', (9528, 9538), False, 'from dateutil.relativedelta import relativedelta\n'), ((9552, 9596), 'dateutil.relativedelta.relativedelta', 'relativedelta', ([], {'days': '(+14)', 'months': '(+1)', 'years': '(+1)'}), '(days=+14, months=+1, years=+1)\n', (9565, 9596), False, 'from dateutil.relativedelta import relativedelta\n'), ((9657, 9691), 'dateutil.relativedelta.relativedelta', 'relativedelta', ([], {'months': '(-1)', 'days': '(+14)'}), '(months=-1, days=+14)\n', (9670, 9691), False, 'from dateutil.relativedelta import relativedelta\n'), ((9705, 9728), 'dateutil.relativedelta.relativedelta', 'relativedelta', ([], {'days': '(+14)'}), '(days=+14)\n', (9718, 9728), False, 'from dateutil.relativedelta import relativedelta\n'), ((9791, 9814), 'dateutil.relativedelta.relativedelta', 'relativedelta', ([], {'days': '(+15)'}), '(days=+15)\n', (9804, 9814), False, 'from dateutil.relativedelta import relativedelta\n'), ((9828, 9862), 'dateutil.relativedelta.relativedelta', 'relativedelta', ([], {'days': '(+14)', 'months': '(+1)'}), '(days=+14, months=+1)\n', (9841, 9862), False, 'from dateutil.relativedelta import relativedelta\n'), ((9899, 9922), 'dateutil.relativedelta.relativedelta', 'relativedelta', ([], {'years': '(+1)'}), '(years=+1)\n', (9912, 9922), False, 'from dateutil.relativedelta import relativedelta\n'), ((9924, 9956), 'dateutil.relativedelta.relativedelta', 'relativedelta', ([], {'years': '(+3)', 'days': '(+2)'}), '(years=+3, days=+2)\n', (9937, 9956), False, 'from dateutil.relativedelta import relativedelta\n'), ((9994, 10028), 'dateutil.relativedelta.relativedelta', 'relativedelta', ([], {'years': '(+3)', 'months': '(+1)'}), '(years=+3, months=+1)\n', (10007, 10028), False, 'from dateutil.relativedelta import relativedelta\n'), ((10042, 10076), 'dateutil.relativedelta.relativedelta', 'relativedelta', ([], {'years': '(+3)', 'months': '(+2)'}), '(years=+3, months=+2)\n', (10055, 10076), False, 'from dateutil.relativedelta import relativedelta\n'), ((10111, 10135), 'dateutil.relativedelta.relativedelta', 'relativedelta', ([], {'months': '(+1)'}), '(months=+1)\n', (10124, 10135), False, 'from dateutil.relativedelta import relativedelta\n'), ((10137, 10159), 'dateutil.relativedelta.relativedelta', 'relativedelta', ([], {'days': '(+1)'}), '(days=+1)\n', (10150, 10159), False, 'from dateutil.relativedelta import relativedelta\n'), ((10193, 10208), 'dateutil.relativedelta.relativedelta', 'relativedelta', ([], {}), '()\n', (10206, 10208), False, 'from dateutil.relativedelta import relativedelta\n'), ((10210, 10233), 'dateutil.relativedelta.relativedelta', 'relativedelta', ([], {'days': '(+14)'}), '(days=+14)\n', (10223, 10233), False, 'from dateutil.relativedelta import relativedelta\n'), ((10265, 10288), 'dateutil.relativedelta.relativedelta', 'relativedelta', ([], {'days': '(+14)'}), '(days=+14)\n', (10278, 10288), False, 'from dateutil.relativedelta import relativedelta\n'), ((10290, 10314), 'dateutil.relativedelta.relativedelta', 'relativedelta', ([], {'months': '(+1)'}), '(months=+1)\n', (10303, 10314), False, 'from dateutil.relativedelta import relativedelta\n'), ((10337, 10360), 'dateutil.relativedelta.relativedelta', 'relativedelta', ([], {'weeks': '(-4)'}), '(weeks=-4)\n', (10350, 10360), False, 'from dateutil.relativedelta import relativedelta\n'), ((10362, 10385), 'dateutil.relativedelta.relativedelta', 'relativedelta', ([], {'weeks': '(+4)'}), '(weeks=+4)\n', (10375, 10385), False, 'from dateutil.relativedelta import relativedelta\n'), ((10407, 10431), 'dateutil.relativedelta.relativedelta', 'relativedelta', ([], {'weeks': '(+10)'}), '(weeks=+10)\n', (10420, 10431), False, 'from dateutil.relativedelta import relativedelta\n'), ((10433, 10457), 'dateutil.relativedelta.relativedelta', 'relativedelta', ([], {'weeks': '(+20)'}), '(weeks=+20)\n', (10446, 10457), False, 'from dateutil.relativedelta import relativedelta\n'), ((10479, 10494), 'dateutil.relativedelta.relativedelta', 'relativedelta', ([], {}), '()\n', (10492, 10494), False, 'from dateutil.relativedelta import relativedelta\n'), ((10533, 10548), 'dateutil.relativedelta.relativedelta', 'relativedelta', ([], {}), '()\n', (10546, 10548), False, 'from dateutil.relativedelta import relativedelta\n'), ((10550, 10573), 'dateutil.relativedelta.relativedelta', 'relativedelta', ([], {'days': '(+14)'}), '(days=+14)\n', (10563, 10573), False, 'from dateutil.relativedelta import relativedelta\n'), ((11020, 11046), 'common.util.TaricDateRange', 'TaricDateRange', (['start', 'end'], {}), '(start, end)\n', (11034, 11046), False, 'from common.util import TaricDateRange\n'), ((2381, 2444), 'pytest.fail', 'pytest.fail', (['f"""Unexpected validation error for value "{value}\\""""'], {}), '(f\'Unexpected validation error for value "{value}"\')\n', (2392, 2444), False, 'import pytest\n'), ((3493, 3521), 'common.util.get_field_tuple', 'get_field_tuple', (['existing', 'f'], {}), '(existing, f)\n', (3508, 3521), False, 'from common.util import get_field_tuple\n'), ((3525, 3558), 'common.util.get_field_tuple', 'get_field_tuple', (['not_duplicate', 'f'], {}), '(not_duplicate, f)\n', (3540, 3558), False, 'from common.util import get_field_tuple\n'), ((6286, 6305), 'common.renderers.counter_generator', 'counter_generator', ([], {}), '()\n', (6303, 6305), False, 'from common.renderers import counter_generator\n'), ((7673, 7769), 'django.urls.reverse', 'reverse', (['"""workbaskets:workbasket-detail"""'], {'kwargs': "{'pk': approved_transaction.workbasket.pk}"}), "('workbaskets:workbasket-detail', kwargs={'pk': approved_transaction\n .workbasket.pk})\n", (7680, 7769), False, 'from django.urls import reverse\n'), ((8173, 8209), 'common.serializers.validate_taric_xml_record_order', 'validate_taric_xml_record_order', (['xml'], {}), '(xml)\n', (8204, 8209), False, 'from common.serializers import validate_taric_xml_record_order\n'), ((10714, 10734), 'datetime.datetime.now', 'datetime.now', ([], {'tz': 'UTC'}), '(tz=UTC)\n', (10726, 10734), False, 'from datetime import datetime\n'), ((11179, 11203), 'dateutil.relativedelta.relativedelta', 'relativedelta', ([], {'months': '(-1)'}), '(months=-1)\n', (11192, 11203), False, 'from dateutil.relativedelta import relativedelta\n'), ((11222, 11245), 'dateutil.relativedelta.relativedelta', 'relativedelta', ([], {'days': '(-14)'}), '(days=-14)\n', (11235, 11245), False, 'from dateutil.relativedelta import relativedelta\n'), ((11355, 11379), 'dateutil.relativedelta.relativedelta', 'relativedelta', ([], {'months': '(-1)'}), '(months=-1)\n', (11368, 11379), False, 'from dateutil.relativedelta import relativedelta\n'), ((11398, 11420), 'dateutil.relativedelta.relativedelta', 'relativedelta', ([], {'days': '(-1)'}), '(days=-1)\n', (11411, 11420), False, 'from dateutil.relativedelta import relativedelta\n'), ((11528, 11551), 'dateutil.relativedelta.relativedelta', 'relativedelta', ([], {'days': '(+14)'}), '(days=+14)\n', (11541, 11551), False, 'from dateutil.relativedelta import relativedelta\n'), ((11570, 11594), 'dateutil.relativedelta.relativedelta', 'relativedelta', ([], {'months': '(+1)'}), '(months=+1)\n', (11583, 11594), False, 'from dateutil.relativedelta import relativedelta\n'), ((11704, 11728), 'dateutil.relativedelta.relativedelta', 'relativedelta', ([], {'months': '(-1)'}), '(months=-1)\n', (11717, 11728), False, 'from dateutil.relativedelta import relativedelta\n'), ((11747, 11771), 'dateutil.relativedelta.relativedelta', 'relativedelta', ([], {'months': '(+1)'}), '(months=+1)\n', (11760, 11771), False, 'from dateutil.relativedelta import relativedelta\n'), ((11881, 11905), 'dateutil.relativedelta.relativedelta', 'relativedelta', ([], {'months': '(-1)'}), '(months=-1)\n', (11894, 11905), False, 'from dateutil.relativedelta import relativedelta\n'), ((3016, 3048), 'common.util.get_field_tuple', 'get_field_tuple', (['existing', 'field'], {}), '(existing, field)\n', (3031, 3048), False, 'from common.util import get_field_tuple\n'), ((12686, 12739), 'pytest.fail', 'pytest.fail', (['f"""Rule applied before {cutoff:%Y-%m-%d}"""'], {}), "(f'Rule applied before {cutoff:%Y-%m-%d}')\n", (12697, 12739), False, 'import pytest\n'), ((12424, 12446), 'dateutil.relativedelta.relativedelta', 'relativedelta', ([], {'days': '(-1)'}), '(days=-1)\n', (12437, 12446), False, 'from dateutil.relativedelta import relativedelta\n')] |
import logging
import unittest
from pyinstrument import Profiler
from nuplan.planning.scenario_builder.nuplan_db.test.nuplan_scenario_test_utils import get_test_nuplan_scenario
from nuplan.planning.simulation.history.simulation_history_buffer import SimulationHistoryBuffer
from nuplan.planning.simulation.observation.idm_agents import IDMAgents
from nuplan.planning.simulation.simulation_time_controller.simulation_iteration import SimulationIteration
logger = logging.getLogger(__name__)
logging.basicConfig(level=logging.INFO)
class TestProfileIDM(unittest.TestCase):
"""
Profiling test for IDM agents.
"""
def setUp(self) -> None:
"""
Inherited, see super class.
"""
self.n_repeat_trials = 1
self.display_results = True
self.scenario = get_test_nuplan_scenario()
def test_profile_idm_agent_observation(self) -> None:
"""Profile IDMAgents."""
profiler = Profiler(interval=0.0001)
profiler.start()
# How many times to repeat runtime test
for _ in range(self.n_repeat_trials):
observation = IDMAgents(
target_velocity=10,
min_gap_to_lead_agent=0.5,
headway_time=1.5,
accel_max=1.0,
decel_max=2.0,
scenario=self.scenario,
)
for step in range(self.scenario.get_number_of_iterations() - 1):
iteration = SimulationIteration(time_point=self.scenario.get_time_point(step), index=step)
next_iteration = SimulationIteration(time_point=self.scenario.get_time_point(step + 1), index=step + 1)
buffer = SimulationHistoryBuffer.initialize_from_list(
1,
[self.scenario.get_ego_state_at_iteration(step)],
[self.scenario.get_tracked_objects_at_iteration(step)],
next_iteration.time_point.time_s - iteration.time_point.time_s,
)
observation.update_observation(iteration, next_iteration, buffer)
profiler.stop()
if self.display_results:
logger.info(profiler.output_text(unicode=True, color=True))
if __name__ == "__main__":
unittest.main()
| [
"logging.getLogger",
"logging.basicConfig",
"nuplan.planning.scenario_builder.nuplan_db.test.nuplan_scenario_test_utils.get_test_nuplan_scenario",
"pyinstrument.Profiler",
"nuplan.planning.simulation.observation.idm_agents.IDMAgents",
"unittest.main"
] | [((465, 492), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (482, 492), False, 'import logging\n'), ((493, 532), 'logging.basicConfig', 'logging.basicConfig', ([], {'level': 'logging.INFO'}), '(level=logging.INFO)\n', (512, 532), False, 'import logging\n'), ((2252, 2267), 'unittest.main', 'unittest.main', ([], {}), '()\n', (2265, 2267), False, 'import unittest\n'), ((810, 836), 'nuplan.planning.scenario_builder.nuplan_db.test.nuplan_scenario_test_utils.get_test_nuplan_scenario', 'get_test_nuplan_scenario', ([], {}), '()\n', (834, 836), False, 'from nuplan.planning.scenario_builder.nuplan_db.test.nuplan_scenario_test_utils import get_test_nuplan_scenario\n'), ((948, 973), 'pyinstrument.Profiler', 'Profiler', ([], {'interval': '(0.0001)'}), '(interval=0.0001)\n', (956, 973), False, 'from pyinstrument import Profiler\n'), ((1120, 1252), 'nuplan.planning.simulation.observation.idm_agents.IDMAgents', 'IDMAgents', ([], {'target_velocity': '(10)', 'min_gap_to_lead_agent': '(0.5)', 'headway_time': '(1.5)', 'accel_max': '(1.0)', 'decel_max': '(2.0)', 'scenario': 'self.scenario'}), '(target_velocity=10, min_gap_to_lead_agent=0.5, headway_time=1.5,\n accel_max=1.0, decel_max=2.0, scenario=self.scenario)\n', (1129, 1252), False, 'from nuplan.planning.simulation.observation.idm_agents import IDMAgents\n')] |
"""
YTArray class.
"""
from __future__ import print_function
#-----------------------------------------------------------------------------
# Copyright (c) 2013, yt Development Team.
#
# Distributed under the terms of the Modified BSD License.
#
# The full license is in the file COPYING.txt, distributed with this software.
#-----------------------------------------------------------------------------
import copy
import numpy as np
from distutils.version import LooseVersion
from functools import wraps
from numpy import \
add, subtract, multiply, divide, logaddexp, logaddexp2, true_divide, \
floor_divide, negative, power, remainder, mod, absolute, rint, \
sign, conj, exp, exp2, log, log2, log10, expm1, log1p, sqrt, square, \
reciprocal, sin, cos, tan, arcsin, arccos, arctan, arctan2, \
hypot, sinh, cosh, tanh, arcsinh, arccosh, arctanh, deg2rad, rad2deg, \
bitwise_and, bitwise_or, bitwise_xor, invert, left_shift, right_shift, \
greater, greater_equal, less, less_equal, not_equal, equal, logical_and, \
logical_or, logical_xor, logical_not, maximum, minimum, fmax, fmin, \
isreal, iscomplex, isfinite, isinf, isnan, signbit, copysign, nextafter, \
modf, ldexp, frexp, fmod, floor, ceil, trunc, fabs, spacing
try:
# numpy 1.13 or newer
from numpy import positive, divmod as divmod_, isnat, heaviside
except ImportError:
positive, divmod_, isnat, heaviside = (None,)*4
from yt.units.unit_object import Unit, UnitParseError
from yt.units.unit_registry import UnitRegistry
from yt.units.dimensions import \
angle, \
current_mks, \
dimensionless, \
em_dimensions
from yt.utilities.exceptions import \
YTUnitOperationError, YTUnitConversionError, \
YTUfuncUnitError, YTIterableUnitCoercionError, \
YTInvalidUnitEquivalence, YTEquivalentDimsError
from yt.utilities.lru_cache import lru_cache
from numbers import Number as numeric_type
from yt.utilities.on_demand_imports import _astropy
from sympy import Rational
from yt.units.unit_lookup_table import \
default_unit_symbol_lut
from yt.units.equivalencies import equivalence_registry
from yt.utilities.logger import ytLogger as mylog
from .pint_conversions import convert_pint_units
NULL_UNIT = Unit()
POWER_SIGN_MAPPING = {multiply: 1, divide: -1}
# redefine this here to avoid a circular import from yt.funcs
def iterable(obj):
try: len(obj)
except: return False
return True
def return_arr(func):
@wraps(func)
def wrapped(*args, **kwargs):
ret, units = func(*args, **kwargs)
if ret.shape == ():
return YTQuantity(ret, units)
else:
# This could be a subclass, so don't call YTArray directly.
return type(args[0])(ret, units)
return wrapped
@lru_cache(maxsize=128, typed=False)
def sqrt_unit(unit):
return unit**0.5
@lru_cache(maxsize=128, typed=False)
def multiply_units(unit1, unit2):
return unit1 * unit2
def preserve_units(unit1, unit2=None):
return unit1
@lru_cache(maxsize=128, typed=False)
def power_unit(unit, power):
return unit**power
@lru_cache(maxsize=128, typed=False)
def square_unit(unit):
return unit*unit
@lru_cache(maxsize=128, typed=False)
def divide_units(unit1, unit2):
return unit1/unit2
@lru_cache(maxsize=128, typed=False)
def reciprocal_unit(unit):
return unit**-1
def passthrough_unit(unit, unit2=None):
return unit
def return_without_unit(unit, unit2=None):
return None
def arctan2_unit(unit1, unit2):
return NULL_UNIT
def comparison_unit(unit1, unit2=None):
return None
def invert_units(unit):
raise TypeError(
"Bit-twiddling operators are not defined for YTArray instances")
def bitop_units(unit1, unit2):
raise TypeError(
"Bit-twiddling operators are not defined for YTArray instances")
def get_inp_u_unary(ufunc, inputs, out_arr=None):
inp = inputs[0]
u = getattr(inp, 'units', None)
if u is None:
u = NULL_UNIT
if u.dimensions is angle and ufunc in trigonometric_operators:
inp = inp.in_units('radian').v
if out_arr is not None:
out_arr = ufunc(inp).view(np.ndarray)
return out_arr, inp, u
def get_inp_u_binary(ufunc, inputs):
inp1 = coerce_iterable_units(inputs[0])
inp2 = coerce_iterable_units(inputs[1])
unit1 = getattr(inp1, 'units', None)
unit2 = getattr(inp2, 'units', None)
ret_class = get_binary_op_return_class(type(inp1), type(inp2))
if unit1 is None:
unit1 = Unit(registry=getattr(unit2, 'registry', None))
if unit2 is None and ufunc is not power:
unit2 = Unit(registry=getattr(unit1, 'registry', None))
elif ufunc is power:
unit2 = inp2
if isinstance(unit2, np.ndarray):
if isinstance(unit2, YTArray):
if unit2.units.is_dimensionless:
pass
else:
raise YTUnitOperationError(ufunc, unit1, unit2)
unit2 = 1.0
return (inp1, inp2), (unit1, unit2), ret_class
def handle_preserve_units(inps, units, ufunc, ret_class):
if units[0] != units[1]:
any_nonzero = [np.any(inps[0]), np.any(inps[1])]
if any_nonzero[0] == np.bool_(False):
units = (units[1], units[1])
elif any_nonzero[1] == np.bool_(False):
units = (units[0], units[0])
else:
if not units[0].same_dimensions_as(units[1]):
raise YTUnitOperationError(ufunc, *units)
inps = (inps[0], ret_class(inps[1]).to(
ret_class(inps[0]).units))
return inps, units
def handle_comparison_units(inps, units, ufunc, ret_class, raise_error=False):
if units[0] != units[1]:
u1d = units[0].is_dimensionless
u2d = units[1].is_dimensionless
any_nonzero = [np.any(inps[0]), np.any(inps[1])]
if any_nonzero[0] == np.bool_(False):
units = (units[1], units[1])
elif any_nonzero[1] == np.bool_(False):
units = (units[0], units[0])
elif not any([u1d, u2d]):
if not units[0].same_dimensions_as(units[1]):
raise YTUnitOperationError(ufunc, *units)
else:
if raise_error:
raise YTUfuncUnitError(ufunc, *units)
inps = (inps[0], ret_class(inps[1]).to(
ret_class(inps[0]).units))
return inps, units
def handle_multiply_divide_units(unit, units, out, out_arr):
if unit.is_dimensionless and unit.base_value != 1.0:
if not units[0].is_dimensionless:
if units[0].dimensions == units[1].dimensions:
out_arr = np.multiply(out_arr.view(np.ndarray),
unit.base_value, out=out)
unit = Unit(registry=unit.registry)
return out, out_arr, unit
def coerce_iterable_units(input_object):
if isinstance(input_object, np.ndarray):
return input_object
if iterable(input_object):
if any([isinstance(o, YTArray) for o in input_object]):
ff = getattr(input_object[0], 'units', NULL_UNIT, )
if any([ff != getattr(_, 'units', NULL_UNIT) for _ in input_object]):
raise YTIterableUnitCoercionError(input_object)
# This will create a copy of the data in the iterable.
return YTArray(input_object)
return input_object
else:
return input_object
def sanitize_units_mul(this_object, other_object):
inp = coerce_iterable_units(this_object)
ret = coerce_iterable_units(other_object)
# If the other object is a YTArray and has the same dimensions as the object
# under consideration, convert so we don't mix units with the same
# dimensions.
if isinstance(ret, YTArray):
if inp.units.same_dimensions_as(ret.units):
ret.in_units(inp.units)
return ret
def sanitize_units_add(this_object, other_object, op_string):
inp = coerce_iterable_units(this_object)
ret = coerce_iterable_units(other_object)
# Make sure the other object is a YTArray before we use the `units`
# attribute.
if isinstance(ret, YTArray):
if not inp.units.same_dimensions_as(ret.units):
# handle special case of adding or subtracting with zero or
# array filled with zero
if not np.any(other_object):
return ret.view(np.ndarray)
elif not np.any(this_object):
return ret
raise YTUnitOperationError(op_string, inp.units, ret.units)
ret = ret.in_units(inp.units)
else:
# If the other object is not a YTArray, then one of the arrays must be
# dimensionless or filled with zeros
if not inp.units.is_dimensionless and np.any(ret):
raise YTUnitOperationError(op_string, inp.units, dimensionless)
return ret
def validate_comparison_units(this, other, op_string):
# Check that other is a YTArray.
if hasattr(other, 'units'):
if this.units.expr is other.units.expr:
if this.units.base_value == other.units.base_value:
return other
if not this.units.same_dimensions_as(other.units):
raise YTUnitOperationError(op_string, this.units, other.units)
return other.in_units(this.units)
return other
@lru_cache(maxsize=128, typed=False)
def _unit_repr_check_same(my_units, other_units):
"""
Takes a Unit object, or string of known unit symbol, and check that it
is compatible with this quantity. Returns Unit object.
"""
# let Unit() handle units arg if it's not already a Unit obj.
if not isinstance(other_units, Unit):
other_units = Unit(other_units, registry=my_units.registry)
equiv_dims = em_dimensions.get(my_units.dimensions, None)
if equiv_dims == other_units.dimensions:
if current_mks in equiv_dims.free_symbols:
base = "SI"
else:
base = "CGS"
raise YTEquivalentDimsError(my_units, other_units, base)
if not my_units.same_dimensions_as(other_units):
raise YTUnitConversionError(
my_units, my_units.dimensions, other_units, other_units.dimensions)
return other_units
unary_operators = (
negative, absolute, rint, sign, conj, exp, exp2, log, log2,
log10, expm1, log1p, sqrt, square, reciprocal, sin, cos, tan, arcsin,
arccos, arctan, sinh, cosh, tanh, arcsinh, arccosh, arctanh, deg2rad,
rad2deg, invert, logical_not, isreal, iscomplex, isfinite, isinf, isnan,
signbit, floor, ceil, trunc, modf, frexp, fabs, spacing, positive, isnat,
)
binary_operators = (
add, subtract, multiply, divide, logaddexp, logaddexp2, true_divide, power,
remainder, mod, arctan2, hypot, bitwise_and, bitwise_or, bitwise_xor,
left_shift, right_shift, greater, greater_equal, less, less_equal,
not_equal, equal, logical_and, logical_or, logical_xor, maximum, minimum,
fmax, fmin, copysign, nextafter, ldexp, fmod, divmod_, heaviside
)
trigonometric_operators = (
sin, cos, tan,
)
class YTArray(np.ndarray):
"""
An ndarray subclass that attaches a symbolic unit object to the array data.
Parameters
----------
input_array : :obj:`!iterable`
A tuple, list, or array to attach units to
input_units : String unit specification, unit symbol object, or astropy units
The units of the array. Powers must be specified using python
syntax (cm**3, not cm^3).
registry : ~yt.units.unit_registry.UnitRegistry
The registry to create units from. If input_units is already associated
with a unit registry and this is specified, this will be used instead of
the registry associated with the unit object.
dtype : data-type
The dtype of the array data. Defaults to the dtype of the input data,
or, if none is found, uses np.float64
bypass_validation : boolean
If True, all input validation is skipped. Using this option may produce
corrupted, invalid units or array data, but can lead to significant
speedups in the input validation logic adds significant overhead. If set,
input_units *must* be a valid unit object. Defaults to False.
Examples
--------
>>> from yt import YTArray
>>> a = YTArray([1, 2, 3], 'cm')
>>> b = YTArray([4, 5, 6], 'm')
>>> a + b
YTArray([ 401., 502., 603.]) cm
>>> b + a
YTArray([ 4.01, 5.02, 6.03]) m
NumPy ufuncs will pass through units where appropriate.
>>> import numpy as np
>>> a = YTArray(np.arange(8) - 4, 'g/cm**3')
>>> np.abs(a)
YTArray([4, 3, 2, 1, 0, 1, 2, 3]) g/cm**3
and strip them when it would be annoying to deal with them.
>>> np.log10(a)
array([ -inf, 0. , 0.30103 , 0.47712125, 0.60205999,
0.69897 , 0.77815125, 0.84509804])
YTArray is tightly integrated with yt datasets:
>>> import yt
>>> ds = yt.load('IsolatedGalaxy/galaxy0030/galaxy0030')
>>> a = ds.arr(np.ones(5), 'code_length')
>>> a.in_cgs()
YTArray([ 3.08600000e+24, 3.08600000e+24, 3.08600000e+24,
3.08600000e+24, 3.08600000e+24]) cm
This is equivalent to:
>>> b = YTArray(np.ones(5), 'code_length', registry=ds.unit_registry)
>>> np.all(a == b)
True
"""
_ufunc_registry = {
add: preserve_units,
subtract: preserve_units,
multiply: multiply_units,
divide: divide_units,
logaddexp: return_without_unit,
logaddexp2: return_without_unit,
true_divide: divide_units,
floor_divide: divide_units,
negative: passthrough_unit,
power: power_unit,
remainder: preserve_units,
mod: preserve_units,
fmod: preserve_units,
absolute: passthrough_unit,
fabs: passthrough_unit,
rint: return_without_unit,
sign: return_without_unit,
conj: passthrough_unit,
exp: return_without_unit,
exp2: return_without_unit,
log: return_without_unit,
log2: return_without_unit,
log10: return_without_unit,
expm1: return_without_unit,
log1p: return_without_unit,
sqrt: sqrt_unit,
square: square_unit,
reciprocal: reciprocal_unit,
sin: return_without_unit,
cos: return_without_unit,
tan: return_without_unit,
sinh: return_without_unit,
cosh: return_without_unit,
tanh: return_without_unit,
arcsin: return_without_unit,
arccos: return_without_unit,
arctan: return_without_unit,
arctan2: arctan2_unit,
arcsinh: return_without_unit,
arccosh: return_without_unit,
arctanh: return_without_unit,
hypot: preserve_units,
deg2rad: return_without_unit,
rad2deg: return_without_unit,
bitwise_and: bitop_units,
bitwise_or: bitop_units,
bitwise_xor: bitop_units,
invert: invert_units,
left_shift: bitop_units,
right_shift: bitop_units,
greater: comparison_unit,
greater_equal: comparison_unit,
less: comparison_unit,
less_equal: comparison_unit,
not_equal: comparison_unit,
equal: comparison_unit,
logical_and: comparison_unit,
logical_or: comparison_unit,
logical_xor: comparison_unit,
logical_not: return_without_unit,
maximum: preserve_units,
minimum: preserve_units,
fmax: preserve_units,
fmin: preserve_units,
isreal: return_without_unit,
iscomplex: return_without_unit,
isfinite: return_without_unit,
isinf: return_without_unit,
isnan: return_without_unit,
signbit: return_without_unit,
copysign: passthrough_unit,
nextafter: preserve_units,
modf: passthrough_unit,
ldexp: bitop_units,
frexp: return_without_unit,
floor: passthrough_unit,
ceil: passthrough_unit,
trunc: passthrough_unit,
spacing: passthrough_unit,
positive: passthrough_unit,
divmod_: passthrough_unit,
isnat: return_without_unit,
heaviside: preserve_units,
}
__array_priority__ = 2.0
def __new__(cls, input_array, input_units=None, registry=None, dtype=None,
bypass_validation=False):
if dtype is None:
dtype = getattr(input_array, 'dtype', np.float64)
if bypass_validation is True:
obj = np.asarray(input_array, dtype=dtype).view(cls)
obj.units = input_units
if registry is not None:
obj.units.registry = registry
return obj
if input_array is NotImplemented:
return input_array.view(cls)
if registry is None and isinstance(input_units, (str, bytes)):
if input_units.startswith('code_'):
raise UnitParseError(
"Code units used without referring to a dataset. \n"
"Perhaps you meant to do something like this instead: \n"
"ds.arr(%s, \"%s\")" % (input_array, input_units)
)
if isinstance(input_array, YTArray):
ret = input_array.view(cls)
if input_units is None:
if registry is None:
ret.units = input_array.units
else:
units = Unit(str(input_array.units), registry=registry)
ret.units = units
elif isinstance(input_units, Unit):
ret.units = input_units
else:
ret.units = Unit(input_units, registry=registry)
return ret
elif isinstance(input_array, np.ndarray):
pass
elif iterable(input_array) and input_array:
if isinstance(input_array[0], YTArray):
return YTArray(np.array(input_array, dtype=dtype),
input_array[0].units, registry=registry)
# Input array is an already formed ndarray instance
# We first cast to be our class type
obj = np.asarray(input_array, dtype=dtype).view(cls)
# Check units type
if input_units is None:
# Nothing provided. Make dimensionless...
units = Unit()
elif isinstance(input_units, Unit):
if registry and registry is not input_units.registry:
units = Unit(str(input_units), registry=registry)
else:
units = input_units
else:
# units kwarg set, but it's not a Unit object.
# don't handle all the cases here, let the Unit class handle if
# it's a str.
units = Unit(input_units, registry=registry)
# Attach the units
obj.units = units
return obj
def __repr__(self):
"""
"""
return super(YTArray, self).__repr__()+' '+self.units.__repr__()
def __str__(self):
"""
"""
return str(self.view(np.ndarray)) + ' ' + str(self.units)
#
# Start unit conversion methods
#
def convert_to_units(self, units):
"""
Convert the array and units to the given units.
Parameters
----------
units : Unit object or str
The units you want to convert to.
"""
new_units = _unit_repr_check_same(self.units, units)
(conversion_factor, offset) = self.units.get_conversion_factor(new_units)
self.units = new_units
values = self.d
values *= conversion_factor
if offset:
np.subtract(self, offset*self.uq, self)
return self
def convert_to_base(self, unit_system="cgs"):
"""
Convert the array and units to the equivalent base units in
the specified unit system.
Parameters
----------
unit_system : string, optional
The unit system to be used in the conversion. If not specified,
the default base units of cgs are used.
Examples
--------
>>> E = YTQuantity(2.5, "erg/s")
>>> E.convert_to_base(unit_system="galactic")
"""
return self.convert_to_units(self.units.get_base_equivalent(unit_system))
def convert_to_cgs(self):
"""
Convert the array and units to the equivalent cgs units.
"""
return self.convert_to_units(self.units.get_cgs_equivalent())
def convert_to_mks(self):
"""
Convert the array and units to the equivalent mks units.
"""
return self.convert_to_units(self.units.get_mks_equivalent())
def in_units(self, units, equivalence=None, **kwargs):
"""
Creates a copy of this array with the data in the supplied
units, and returns it.
Optionally, an equivalence can be specified to convert to an
equivalent quantity which is not in the same dimensions.
.. note::
All additional keyword arguments are passed to the
equivalency, which should be used if that particular
equivalency requires them.
Parameters
----------
units : Unit object or string
The units you want to get a new quantity in.
equivalence : string, optional
The equivalence you wish to use. To see which
equivalencies are supported for this unitful
quantity, try the :meth:`list_equivalencies`
method. Default: None
Returns
-------
YTArray
"""
if equivalence is None:
new_units = _unit_repr_check_same(self.units, units)
(conversion_factor, offset) = self.units.get_conversion_factor(new_units)
new_array = type(self)(self.ndview * conversion_factor, new_units)
if offset:
np.subtract(new_array, offset*new_array.uq, new_array)
return new_array
else:
return self.to_equivalent(units, equivalence, **kwargs)
def to(self, units, equivalence=None, **kwargs):
"""
An alias for YTArray.in_units().
See the docstrings of that function for details.
"""
return self.in_units(units, equivalence=equivalence, **kwargs)
def to_value(self, units=None, equivalence=None, **kwargs):
"""
Creates a copy of this array with the data in the supplied
units, and returns it without units. Output is therefore a
bare NumPy array.
Optionally, an equivalence can be specified to convert to an
equivalent quantity which is not in the same dimensions.
.. note::
All additional keyword arguments are passed to the
equivalency, which should be used if that particular
equivalency requires them.
Parameters
----------
units : Unit object or string, optional
The units you want to get the bare quantity in. If not
specified, the value will be returned in the current units.
equivalence : string, optional
The equivalence you wish to use. To see which
equivalencies are supported for this unitful
quantity, try the :meth:`list_equivalencies`
method. Default: None
Returns
-------
NumPy array
"""
if units is None:
v = self.value
else:
v = self.in_units(units, equivalence=equivalence, **kwargs).value
if isinstance(self, YTQuantity):
return float(v)
else:
return v
def in_base(self, unit_system="cgs"):
"""
Creates a copy of this array with the data in the specified unit system,
and returns it in that system's base units.
Parameters
----------
unit_system : string, optional
The unit system to be used in the conversion. If not specified,
the default base units of cgs are used.
Examples
--------
>>> E = YTQuantity(2.5, "erg/s")
>>> E_new = E.in_base(unit_system="galactic")
"""
return self.in_units(self.units.get_base_equivalent(unit_system))
def in_cgs(self):
"""
Creates a copy of this array with the data in the equivalent cgs units,
and returns it.
Returns
-------
Quantity object with data converted to cgs units.
"""
return self.in_units(self.units.get_cgs_equivalent())
def in_mks(self):
"""
Creates a copy of this array with the data in the equivalent mks units,
and returns it.
Returns
-------
Quantity object with data converted to mks units.
"""
return self.in_units(self.units.get_mks_equivalent())
def to_equivalent(self, unit, equiv, **kwargs):
"""
Convert a YTArray or YTQuantity to an equivalent, e.g., something that is
related by only a constant factor but not in the same units.
Parameters
----------
unit : string
The unit that you wish to convert to.
equiv : string
The equivalence you wish to use. To see which equivalencies are
supported for this unitful quantity, try the
:meth:`list_equivalencies` method.
Examples
--------
>>> a = yt.YTArray(1.0e7,"K")
>>> a.to_equivalent("keV", "thermal")
"""
conv_unit = Unit(unit, registry=self.units.registry)
if self.units.same_dimensions_as(conv_unit):
return self.in_units(conv_unit)
this_equiv = equivalence_registry[equiv]()
oneway_or_equivalent = (
conv_unit.has_equivalent(equiv) or this_equiv._one_way)
if self.has_equivalent(equiv) and oneway_or_equivalent:
new_arr = this_equiv.convert(
self, conv_unit.dimensions, **kwargs)
if isinstance(new_arr, tuple):
try:
return type(self)(new_arr[0], new_arr[1]).in_units(unit)
except YTUnitConversionError:
raise YTInvalidUnitEquivalence(equiv, self.units, unit)
else:
return new_arr.in_units(unit)
else:
raise YTInvalidUnitEquivalence(equiv, self.units, unit)
def list_equivalencies(self):
"""
Lists the possible equivalencies associated with this YTArray or
YTQuantity.
"""
self.units.list_equivalencies()
def has_equivalent(self, equiv):
"""
Check to see if this YTArray or YTQuantity has an equivalent unit in
*equiv*.
"""
return self.units.has_equivalent(equiv)
def ndarray_view(self):
"""
Returns a view into the array, but as an ndarray rather than ytarray.
Returns
-------
View of this array's data.
"""
return self.view(np.ndarray)
def to_ndarray(self):
"""
Creates a copy of this array with the unit information stripped
"""
return np.array(self)
@classmethod
def from_astropy(cls, arr, unit_registry=None):
"""
Convert an AstroPy "Quantity" to a YTArray or YTQuantity.
Parameters
----------
arr : AstroPy Quantity
The Quantity to convert from.
unit_registry : yt UnitRegistry, optional
A yt unit registry to use in the conversion. If one is not
supplied, the default one will be used.
"""
# Converting from AstroPy Quantity
u = arr.unit
ap_units = []
for base, exponent in zip(u.bases, u.powers):
unit_str = base.to_string()
# we have to do this because AstroPy is silly and defines
# hour as "h"
if unit_str == "h": unit_str = "hr"
ap_units.append("%s**(%s)" % (unit_str, Rational(exponent)))
ap_units = "*".join(ap_units)
if isinstance(arr.value, np.ndarray):
return YTArray(arr.value, ap_units, registry=unit_registry)
else:
return YTQuantity(arr.value, ap_units, registry=unit_registry)
def to_astropy(self, **kwargs):
"""
Creates a new AstroPy quantity with the same unit information.
"""
if _astropy.units is None:
raise ImportError("You don't have AstroPy installed, so you can't convert to " +
"an AstroPy quantity.")
return self.value*_astropy.units.Unit(str(self.units), **kwargs)
@classmethod
def from_pint(cls, arr, unit_registry=None):
"""
Convert a Pint "Quantity" to a YTArray or YTQuantity.
Parameters
----------
arr : Pint Quantity
The Quantity to convert from.
unit_registry : yt UnitRegistry, optional
A yt unit registry to use in the conversion. If one is not
supplied, the default one will be used.
Examples
--------
>>> from pint import UnitRegistry
>>> import numpy as np
>>> ureg = UnitRegistry()
>>> a = np.random.random(10)
>>> b = ureg.Quantity(a, "erg/cm**3")
>>> c = yt.YTArray.from_pint(b)
"""
p_units = []
for base, exponent in arr._units.items():
bs = convert_pint_units(base)
p_units.append("%s**(%s)" % (bs, Rational(exponent)))
p_units = "*".join(p_units)
if isinstance(arr.magnitude, np.ndarray):
return YTArray(arr.magnitude, p_units, registry=unit_registry)
else:
return YTQuantity(arr.magnitude, p_units, registry=unit_registry)
def to_pint(self, unit_registry=None):
"""
Convert a YTArray or YTQuantity to a Pint Quantity.
Parameters
----------
arr : YTArray or YTQuantity
The unitful quantity to convert from.
unit_registry : Pint UnitRegistry, optional
The Pint UnitRegistry to use in the conversion. If one is not
supplied, the default one will be used. NOTE: This is not
the same as a yt UnitRegistry object.
Examples
--------
>>> a = YTQuantity(4.0, "cm**2/s")
>>> b = a.to_pint()
"""
from pint import UnitRegistry
if unit_registry is None:
unit_registry = UnitRegistry()
powers_dict = self.units.expr.as_powers_dict()
units = []
for unit, pow in powers_dict.items():
# we have to do this because Pint doesn't recognize
# "yr" as "year"
if str(unit).endswith("yr") and len(str(unit)) in [2,3]:
unit = str(unit).replace("yr","year")
units.append("%s**(%s)" % (unit, Rational(pow)))
units = "*".join(units)
return unit_registry.Quantity(self.value, units)
#
# End unit conversion methods
#
def write_hdf5(self, filename, dataset_name=None, info=None, group_name=None):
r"""Writes a YTArray to hdf5 file.
Parameters
----------
filename: string
The filename to create and write a dataset to
dataset_name: string
The name of the dataset to create in the file.
info: dictionary
A dictionary of supplementary info to write to append as attributes
to the dataset.
group_name: string
An optional group to write the arrays to. If not specified, the arrays
are datasets at the top level by default.
Examples
--------
>>> a = YTArray([1,2,3], 'cm')
>>> myinfo = {'field':'dinosaurs', 'type':'field_data'}
>>> a.write_hdf5('test_array_data.h5', dataset_name='dinosaurs',
... info=myinfo)
"""
from yt.utilities.on_demand_imports import _h5py as h5py
from yt.extern.six.moves import cPickle as pickle
if info is None:
info = {}
info['units'] = str(self.units)
info['unit_registry'] = np.void(pickle.dumps(self.units.registry.lut))
if dataset_name is None:
dataset_name = 'array_data'
f = h5py.File(filename)
if group_name is not None:
if group_name in f:
g = f[group_name]
else:
g = f.create_group(group_name)
else:
g = f
if dataset_name in g.keys():
d = g[dataset_name]
# Overwrite without deleting if we can get away with it.
if d.shape == self.shape and d.dtype == self.dtype:
d[...] = self
for k in d.attrs.keys():
del d.attrs[k]
else:
del f[dataset_name]
d = g.create_dataset(dataset_name, data=self)
else:
d = g.create_dataset(dataset_name, data=self)
for k, v in info.items():
d.attrs[k] = v
f.close()
@classmethod
def from_hdf5(cls, filename, dataset_name=None, group_name=None):
r"""Attempts read in and convert a dataset in an hdf5 file into a
YTArray.
Parameters
----------
filename: string
The filename to of the hdf5 file.
dataset_name: string
The name of the dataset to read from. If the dataset has a units
attribute, attempt to infer units as well.
group_name: string
An optional group to read the arrays from. If not specified, the
arrays are datasets at the top level by default.
"""
import h5py
from yt.extern.six.moves import cPickle as pickle
if dataset_name is None:
dataset_name = 'array_data'
f = h5py.File(filename)
if group_name is not None:
g = f[group_name]
else:
g = f
dataset = g[dataset_name]
data = dataset[:]
units = dataset.attrs.get('units', '')
if 'unit_registry' in dataset.attrs.keys():
unit_lut = pickle.loads(dataset.attrs['unit_registry'].tostring())
else:
unit_lut = None
f.close()
registry = UnitRegistry(lut=unit_lut, add_default_symbols=False)
return cls(data, units, registry=registry)
#
# Start convenience methods
#
@property
def value(self):
"""Get a copy of the array data as a numpy ndarray"""
return np.array(self)
v = value
@property
def ndview(self):
"""Get a view of the array data."""
return self.ndarray_view()
d = ndview
@property
def unit_quantity(self):
"""Get a YTQuantity with the same unit as this array and a value of
1.0"""
return YTQuantity(1.0, self.units)
uq = unit_quantity
@property
def unit_array(self):
"""Get a YTArray filled with ones with the same unit and shape as this
array"""
return np.ones_like(self)
ua = unit_array
def __getitem__(self, item):
ret = super(YTArray, self).__getitem__(item)
if ret.shape == ():
return YTQuantity(ret, self.units, bypass_validation=True)
else:
if hasattr(self, 'units'):
ret.units = self.units
return ret
#
# Start operation methods
#
if LooseVersion(np.__version__) < LooseVersion('1.13.0'):
def __add__(self, right_object):
"""
Add this ytarray to the object on the right of the `+` operator.
Must check for the correct (same dimension) units.
"""
ro = sanitize_units_add(self, right_object, "addition")
return super(YTArray, self).__add__(ro)
def __radd__(self, left_object):
""" See __add__. """
lo = sanitize_units_add(self, left_object, "addition")
return super(YTArray, self).__radd__(lo)
def __iadd__(self, other):
""" See __add__. """
oth = sanitize_units_add(self, other, "addition")
np.add(self, oth, out=self)
return self
def __sub__(self, right_object):
"""
Subtract the object on the right of the `-` from this ytarray. Must
check for the correct (same dimension) units.
"""
ro = sanitize_units_add(self, right_object, "subtraction")
return super(YTArray, self).__sub__(ro)
def __rsub__(self, left_object):
""" See __sub__. """
lo = sanitize_units_add(self, left_object, "subtraction")
return super(YTArray, self).__rsub__(lo)
def __isub__(self, other):
""" See __sub__. """
oth = sanitize_units_add(self, other, "subtraction")
np.subtract(self, oth, out=self)
return self
def __neg__(self):
""" Negate the data. """
return super(YTArray, self).__neg__()
def __mul__(self, right_object):
"""
Multiply this YTArray by the object on the right of the `*`
operator. The unit objects handle being multiplied.
"""
ro = sanitize_units_mul(self, right_object)
return super(YTArray, self).__mul__(ro)
def __rmul__(self, left_object):
""" See __mul__. """
lo = sanitize_units_mul(self, left_object)
return super(YTArray, self).__rmul__(lo)
def __imul__(self, other):
""" See __mul__. """
oth = sanitize_units_mul(self, other)
np.multiply(self, oth, out=self)
return self
def __div__(self, right_object):
"""
Divide this YTArray by the object on the right of the `/` operator.
"""
ro = sanitize_units_mul(self, right_object)
return super(YTArray, self).__div__(ro)
def __rdiv__(self, left_object):
""" See __div__. """
lo = sanitize_units_mul(self, left_object)
return super(YTArray, self).__rdiv__(lo)
def __idiv__(self, other):
""" See __div__. """
oth = sanitize_units_mul(self, other)
np.divide(self, oth, out=self)
return self
def __truediv__(self, right_object):
ro = sanitize_units_mul(self, right_object)
return super(YTArray, self).__truediv__(ro)
def __rtruediv__(self, left_object):
""" See __div__. """
lo = sanitize_units_mul(self, left_object)
return super(YTArray, self).__rtruediv__(lo)
def __itruediv__(self, other):
""" See __div__. """
oth = sanitize_units_mul(self, other)
np.true_divide(self, oth, out=self)
return self
def __floordiv__(self, right_object):
ro = sanitize_units_mul(self, right_object)
return super(YTArray, self).__floordiv__(ro)
def __rfloordiv__(self, left_object):
""" See __div__. """
lo = sanitize_units_mul(self, left_object)
return super(YTArray, self).__rfloordiv__(lo)
def __ifloordiv__(self, other):
""" See __div__. """
oth = sanitize_units_mul(self, other)
np.floor_divide(self, oth, out=self)
return self
def __or__(self, right_object):
return super(YTArray, self).__or__(right_object)
def __ror__(self, left_object):
return super(YTArray, self).__ror__(left_object)
def __ior__(self, other):
np.bitwise_or(self, other, out=self)
return self
def __xor__(self, right_object):
return super(YTArray, self).__xor__(right_object)
def __rxor__(self, left_object):
return super(YTArray, self).__rxor__(left_object)
def __ixor__(self, other):
np.bitwise_xor(self, other, out=self)
return self
def __and__(self, right_object):
return super(YTArray, self).__and__(right_object)
def __rand__(self, left_object):
return super(YTArray, self).__rand__(left_object)
def __iand__(self, other):
np.bitwise_and(self, other, out=self)
return self
def __pow__(self, power):
"""
Raise this YTArray to some power.
Parameters
----------
power : float or dimensionless YTArray.
The pow value.
"""
if isinstance(power, YTArray):
if not power.units.is_dimensionless:
raise YTUnitOperationError('power', power.unit)
# Work around a sympy issue (I think?)
#
# If I don't do this, super(YTArray, self).__pow__ returns a YTArray
# with a unit attribute set to the sympy expression 1/1 rather than
# a dimensionless Unit object.
if self.units.is_dimensionless and power == -1:
ret = super(YTArray, self).__pow__(power)
return type(self)(ret, input_units='')
return super(YTArray, self).__pow__(power)
def __abs__(self):
""" Return a YTArray with the abs of the data. """
return super(YTArray, self).__abs__()
#
# Start comparison operators.
#
def __lt__(self, other):
""" Test if this is less than the object on the right. """
# converts if possible
oth = validate_comparison_units(self, other, 'less_than')
return super(YTArray, self).__lt__(oth)
def __le__(self, other):
"""Test if this is less than or equal to the object on the right.
"""
oth = validate_comparison_units(self, other, 'less_than or equal')
return super(YTArray, self).__le__(oth)
def __eq__(self, other):
""" Test if this is equal to the object on the right. """
# Check that other is a YTArray.
if other is None:
# self is a YTArray, so it can't be None.
return False
oth = validate_comparison_units(self, other, 'equal')
return super(YTArray, self).__eq__(oth)
def __ne__(self, other):
""" Test if this is not equal to the object on the right. """
# Check that the other is a YTArray.
if other is None:
return True
oth = validate_comparison_units(self, other, 'not equal')
return super(YTArray, self).__ne__(oth)
def __ge__(self, other):
""" Test if this is greater than or equal to other. """
# Check that the other is a YTArray.
oth = validate_comparison_units(
self, other, 'greater than or equal')
return super(YTArray, self).__ge__(oth)
def __gt__(self, other):
""" Test if this is greater than the object on the right. """
# Check that the other is a YTArray.
oth = validate_comparison_units(self, other, 'greater than')
return super(YTArray, self).__gt__(oth)
#
# End comparison operators
#
#
# Begin reduction operators
#
@return_arr
def prod(self, axis=None, dtype=None, out=None):
if axis is not None:
units = self.units**self.shape[axis]
else:
units = self.units**self.size
return super(YTArray, self).prod(axis, dtype, out), units
@return_arr
def mean(self, axis=None, dtype=None, out=None):
return super(YTArray, self).mean(axis, dtype, out), self.units
@return_arr
def sum(self, axis=None, dtype=None, out=None):
return super(YTArray, self).sum(axis, dtype, out), self.units
@return_arr
def std(self, axis=None, dtype=None, out=None, ddof=0):
return super(YTArray, self).std(axis, dtype, out, ddof), self.units
def __array_wrap__(self, out_arr, context=None):
ret = super(YTArray, self).__array_wrap__(out_arr, context)
if isinstance(ret, YTQuantity) and ret.shape != ():
ret = ret.view(YTArray)
if context is None:
if ret.shape == ():
return ret[()]
else:
return ret
ufunc = context[0]
inputs = context[1]
if ufunc in unary_operators:
out_arr, inp, u = get_inp_u_unary(ufunc, inputs, out_arr)
unit = self._ufunc_registry[context[0]](u)
ret_class = type(self)
elif ufunc in binary_operators:
unit_operator = self._ufunc_registry[context[0]]
inps, units, ret_class = get_inp_u_binary(ufunc, inputs)
if unit_operator in (preserve_units, comparison_unit,
arctan2_unit):
inps, units = handle_comparison_units(
inps, units, ufunc, ret_class, raise_error=True)
unit = unit_operator(*units)
if unit_operator in (multiply_units, divide_units):
out_arr, out_arr, unit = handle_multiply_divide_units(
unit, units, out_arr, out_arr)
else:
raise RuntimeError(
"Support for the %s ufunc has not been added "
"to YTArray." % str(context[0]))
if unit is None:
out_arr = np.array(out_arr, copy=False)
return out_arr
out_arr.units = unit
if out_arr.size == 1:
return YTQuantity(np.array(out_arr), unit)
else:
if ret_class is YTQuantity:
# This happens if you do ndarray * YTQuantity. Explicitly
# casting to YTArray avoids creating a YTQuantity with
# size > 1
return YTArray(np.array(out_arr), unit)
return ret_class(np.array(out_arr, copy=False), unit)
else: # numpy version equal to or newer than 1.13
def __array_ufunc__(self, ufunc, method, *inputs, **kwargs):
func = getattr(ufunc, method)
if 'out' in kwargs:
out_orig = kwargs.pop('out')
out = np.asarray(out_orig[0])
else:
out = None
if len(inputs) == 1:
_, inp, u = get_inp_u_unary(ufunc, inputs)
out_arr = func(np.asarray(inp), out=out, **kwargs)
if ufunc in (multiply, divide) and method == 'reduce':
power_sign = POWER_SIGN_MAPPING[ufunc]
if 'axis' in kwargs and kwargs['axis'] is not None:
unit = u**(power_sign*inp.shape[kwargs['axis']])
else:
unit = u**(power_sign*inp.size)
else:
unit = self._ufunc_registry[ufunc](u)
ret_class = type(self)
elif len(inputs) == 2:
unit_operator = self._ufunc_registry[ufunc]
inps, units, ret_class = get_inp_u_binary(ufunc, inputs)
if unit_operator in (comparison_unit, arctan2_unit):
inps, units = handle_comparison_units(
inps, units, ufunc, ret_class)
elif unit_operator is preserve_units:
inps, units = handle_preserve_units(
inps, units, ufunc, ret_class)
unit = unit_operator(*units)
out_arr = func(np.asarray(inps[0]), np.asarray(inps[1]),
out=out, **kwargs)
if unit_operator in (multiply_units, divide_units):
out, out_arr, unit = handle_multiply_divide_units(
unit, units, out, out_arr)
else:
raise RuntimeError(
"Support for the %s ufunc with %i inputs has not been"
"added to YTArray." % (str(ufunc), len(inputs)))
if unit is None:
out_arr = np.array(out_arr, copy=False)
elif ufunc in (modf, divmod_):
out_arr = tuple((ret_class(o, unit) for o in out_arr))
elif out_arr.size == 1:
out_arr = YTQuantity(np.asarray(out_arr), unit)
else:
if ret_class is YTQuantity:
# This happens if you do ndarray * YTQuantity. Explicitly
# casting to YTArray avoids creating a YTQuantity with
# size > 1
out_arr = YTArray(np.asarray(out_arr), unit)
else:
out_arr = ret_class(np.asarray(out_arr), unit)
if out is not None:
out_orig[0].flat[:] = out.flat[:]
if isinstance(out_orig[0], YTArray):
out_orig[0].units = unit
return out_arr
def copy(self, order='C'):
return type(self)(np.copy(np.asarray(self)), self.units)
def __array_finalize__(self, obj):
if obj is None and hasattr(self, 'units'):
return
self.units = getattr(obj, 'units', NULL_UNIT)
def __pos__(self):
""" Posify the data. """
# this needs to be defined for all numpy versions, see
# numpy issue #9081
return type(self)(super(YTArray, self).__pos__(), self.units)
@return_arr
def dot(self, b, out=None):
return super(YTArray, self).dot(b), self.units*b.units
def __reduce__(self):
"""Pickle reduction method
See the documentation for the standard library pickle module:
http://docs.python.org/2/library/pickle.html
Unit metadata is encoded in the zeroth element of third element of the
returned tuple, itself a tuple used to restore the state of the ndarray.
This is always defined for numpy arrays.
"""
np_ret = super(YTArray, self).__reduce__()
obj_state = np_ret[2]
unit_state = (((str(self.units), self.units.registry.lut),) + obj_state[:],)
new_ret = np_ret[:2] + unit_state + np_ret[3:]
return new_ret
def __setstate__(self, state):
"""Pickle setstate method
This is called inside pickle.read() and restores the unit data from the
metadata extracted in __reduce__ and then serialized by pickle.
"""
super(YTArray, self).__setstate__(state[1:])
try:
unit, lut = state[0]
except TypeError:
# this case happens when we try to load an old pickle file
# created before we serialized the unit symbol lookup table
# into the pickle file
unit, lut = str(state[0]), default_unit_symbol_lut.copy()
# need to fix up the lut if the pickle was saved prior to PR #1728
# when the pickle format changed
if len(lut['m']) == 2:
lut.update(default_unit_symbol_lut)
for k, v in [(k, v) for k, v in lut.items() if len(v) == 2]:
lut[k] = v + (0.0, r'\rm{' + k.replace('_', '\ ') + '}')
registry = UnitRegistry(lut=lut, add_default_symbols=False)
self.units = Unit(unit, registry=registry)
def __deepcopy__(self, memodict=None):
"""copy.deepcopy implementation
This is necessary for stdlib deepcopy of arrays and quantities.
"""
if memodict is None:
memodict = {}
ret = super(YTArray, self).__deepcopy__(memodict)
return type(self)(ret, copy.deepcopy(self.units))
class YTQuantity(YTArray):
"""
A scalar associated with a unit.
Parameters
----------
input_scalar : an integer or floating point scalar
The scalar to attach units to
input_units : String unit specification, unit symbol object, or astropy units
The units of the quantity. Powers must be specified using python syntax
(cm**3, not cm^3).
registry : A UnitRegistry object
The registry to create units from. If input_units is already associated
with a unit registry and this is specified, this will be used instead of
the registry associated with the unit object.
dtype : data-type
The dtype of the array data.
Examples
--------
>>> from yt import YTQuantity
>>> a = YTQuantity(1, 'cm')
>>> b = YTQuantity(2, 'm')
>>> a + b
201.0 cm
>>> b + a
2.01 m
NumPy ufuncs will pass through units where appropriate.
>>> import numpy as np
>>> a = YTQuantity(12, 'g/cm**3')
>>> np.abs(a)
12 g/cm**3
and strip them when it would be annoying to deal with them.
>>> print(np.log10(a))
1.07918124605
YTQuantity is tightly integrated with yt datasets:
>>> import yt
>>> ds = yt.load('IsolatedGalaxy/galaxy0030/galaxy0030')
>>> a = ds.quan(5, 'code_length')
>>> a.in_cgs()
1.543e+25 cm
This is equivalent to:
>>> b = YTQuantity(5, 'code_length', registry=ds.unit_registry)
>>> np.all(a == b)
True
"""
def __new__(cls, input_scalar, input_units=None, registry=None,
dtype=np.float64, bypass_validation=False):
if not isinstance(input_scalar, (numeric_type, np.number, np.ndarray)):
raise RuntimeError("YTQuantity values must be numeric")
ret = YTArray.__new__(cls, input_scalar, input_units, registry,
dtype=dtype, bypass_validation=bypass_validation)
if ret.size > 1:
raise RuntimeError("YTQuantity instances must be scalars")
return ret
def __repr__(self):
return str(self)
def validate_numpy_wrapper_units(v, arrs):
if not any(isinstance(a, YTArray) for a in arrs):
return v
if not all(isinstance(a, YTArray) for a in arrs):
raise RuntimeError("Not all of your arrays are YTArrays.")
a1 = arrs[0]
if not all(a.units == a1.units for a in arrs[1:]):
raise RuntimeError("Your arrays must have identical units.")
v.units = a1.units
return v
def uconcatenate(arrs, axis=0):
"""Concatenate a sequence of arrays.
This wrapper around numpy.concatenate preserves units. All input arrays must
have the same units. See the documentation of numpy.concatenate for full
details.
Examples
--------
>>> A = yt.YTArray([1, 2, 3], 'cm')
>>> B = yt.YTArray([2, 3, 4], 'cm')
>>> uconcatenate((A, B))
YTArray([ 1., 2., 3., 2., 3., 4.]) cm
"""
v = np.concatenate(arrs, axis=axis)
v = validate_numpy_wrapper_units(v, arrs)
return v
def ucross(arr1, arr2, registry=None, axisa=-1, axisb=-1, axisc=-1, axis=None):
"""Applies the cross product to two YT arrays.
This wrapper around numpy.cross preserves units.
See the documentation of numpy.cross for full
details.
"""
v = np.cross(arr1, arr2, axisa=axisa, axisb=axisb, axisc=axisc, axis=axis)
units = arr1.units * arr2.units
arr = YTArray(v, units, registry=registry)
return arr
def uintersect1d(arr1, arr2, assume_unique=False):
"""Find the sorted unique elements of the two input arrays.
A wrapper around numpy.intersect1d that preserves units. All input arrays
must have the same units. See the documentation of numpy.intersect1d for
full details.
Examples
--------
>>> A = yt.YTArray([1, 2, 3], 'cm')
>>> B = yt.YTArray([2, 3, 4], 'cm')
>>> uintersect1d(A, B)
YTArray([ 2., 3.]) cm
"""
v = np.intersect1d(arr1, arr2, assume_unique=assume_unique)
v = validate_numpy_wrapper_units(v, [arr1, arr2])
return v
def uunion1d(arr1, arr2):
"""Find the union of two arrays.
A wrapper around numpy.intersect1d that preserves units. All input arrays
must have the same units. See the documentation of numpy.intersect1d for
full details.
Examples
--------
>>> A = yt.YTArray([1, 2, 3], 'cm')
>>> B = yt.YTArray([2, 3, 4], 'cm')
>>> uunion1d(A, B)
YTArray([ 1., 2., 3., 4.]) cm
"""
v = np.union1d(arr1, arr2)
v = validate_numpy_wrapper_units(v, [arr1, arr2])
return v
def unorm(data, ord=None, axis=None, keepdims=False):
"""Matrix or vector norm that preserves units
This is a wrapper around np.linalg.norm that preserves units. See
the documentation for that function for descriptions of the keyword
arguments.
The keepdims argument is ignored if the version of numpy installed is
older than numpy 1.10.0.
"""
if LooseVersion(np.__version__) < LooseVersion('1.10.0'):
norm = np.linalg.norm(data, ord=ord, axis=axis)
else:
norm = np.linalg.norm(data, ord=ord, axis=axis, keepdims=keepdims)
if norm.shape == ():
return YTQuantity(norm, data.units)
return YTArray(norm, data.units)
def udot(op1, op2):
"""Matrix or vector dot product that preserves units
This is a wrapper around np.dot that preserves units.
"""
dot = np.dot(op1.d, op2.d)
units = op1.units*op2.units
if dot.shape == ():
return YTQuantity(dot, units)
return YTArray(dot, units)
def uvstack(arrs):
"""Stack arrays in sequence vertically (row wise) while preserving units
This is a wrapper around np.vstack that preserves units.
"""
v = np.vstack(arrs)
v = validate_numpy_wrapper_units(v, arrs)
return v
def uhstack(arrs):
"""Stack arrays in sequence horizontally (column wise) while preserving units
This is a wrapper around np.hstack that preserves units.
"""
v = np.hstack(arrs)
v = validate_numpy_wrapper_units(v, arrs)
return v
def ustack(arrs, axis=0):
"""Join a sequence of arrays along a new axis while preserving units
The axis parameter specifies the index of the new axis in the
dimensions of the result. For example, if ``axis=0`` it will be the
first dimension and if ``axis=-1`` it will be the last dimension.
This is a wrapper around np.stack that preserves units.
"""
v = np.stack(arrs)
v = validate_numpy_wrapper_units(v, arrs)
return v
def array_like_field(data, x, field):
field = data._determine_fields(field)[0]
if isinstance(field, tuple):
finfo = data.ds._get_field_info(field[0],field[1])
else:
finfo = data.ds._get_field_info(field)
if finfo.sampling_type == 'particle':
units = finfo.output_units
else:
units = finfo.units
if isinstance(x, YTArray):
arr = copy.deepcopy(x)
arr.convert_to_units(units)
return arr
if isinstance(x, np.ndarray):
return data.ds.arr(x, units)
else:
return data.ds.quan(x, units)
def get_binary_op_return_class(cls1, cls2):
if cls1 is cls2:
return cls1
if cls1 in (np.ndarray, np.matrix, np.ma.masked_array) or issubclass(cls1, (numeric_type, np.number, list, tuple)):
return cls2
if cls2 in (np.ndarray, np.matrix, np.ma.masked_array) or issubclass(cls2, (numeric_type, np.number, list, tuple)):
return cls1
if issubclass(cls1, YTQuantity):
return cls2
if issubclass(cls2, YTQuantity):
return cls1
if issubclass(cls1, cls2):
return cls1
if issubclass(cls2, cls1):
return cls2
else:
raise RuntimeError("Undefined operation for a YTArray subclass. "
"Received operand types (%s) and (%s)" % (cls1, cls2))
def loadtxt(fname, dtype='float', delimiter='\t', usecols=None, comments='#'):
r"""
Load YTArrays with unit information from a text file. Each row in the
text file must have the same number of values.
Parameters
----------
fname : str
Filename to read.
dtype : data-type, optional
Data-type of the resulting array; default: float.
delimiter : str, optional
The string used to separate values. By default, this is any
whitespace.
usecols : sequence, optional
Which columns to read, with 0 being the first. For example,
``usecols = (1,4,5)`` will extract the 2nd, 5th and 6th columns.
The default, None, results in all columns being read.
comments : str, optional
The character used to indicate the start of a comment;
default: '#'.
Examples
--------
>>> temp, velx = yt.loadtxt("sphere.dat", usecols=(1,2), delimiter="\t")
"""
f = open(fname, 'r')
next_one = False
units = []
num_cols = -1
for line in f.readlines():
words = line.strip().split()
if len(words) == 0:
continue
if line[0] == comments:
if next_one:
units = words[1:]
if len(words) == 2 and words[1] == "Units":
next_one = True
else:
# Here we catch the first line of numbers
try:
col_words = line.strip().split(delimiter)
for word in col_words:
float(word)
num_cols = len(col_words)
break
except ValueError:
mylog.warning("Unrecognized character at beginning of line: \"%s\"." % line[0])
f.close()
if len(units) != num_cols:
mylog.warning("Malformed or incomplete units header. Arrays will be "
"dimensionless!")
units = ["dimensionless"]*num_cols
arrays = np.loadtxt(fname, dtype=dtype, comments=comments,
delimiter=delimiter, converters=None,
unpack=True, usecols=usecols, ndmin=0)
if usecols is not None:
units = [units[col] for col in usecols]
mylog.info("Array units: %s" % ", ".join(units))
return tuple([YTArray(arr, unit) for arr, unit in zip(arrays, units)])
def savetxt(fname, arrays, fmt='%.18e', delimiter='\t', header='',
footer='', comments='#'):
r"""
Write YTArrays with unit information to a text file.
Parameters
----------
fname : str
The file to write the YTArrays to.
arrays : list of YTArrays or single YTArray
The array(s) to write to the file.
fmt : str or sequence of strs, optional
A single format (%10.5f), or a sequence of formats.
delimiter : str, optional
String or character separating columns.
header : str, optional
String that will be written at the beginning of the file, before the
unit header.
footer : str, optional
String that will be written at the end of the file.
comments : str, optional
String that will be prepended to the ``header`` and ``footer`` strings,
to mark them as comments. Default: '# ', as expected by e.g.
``yt.loadtxt``.
Examples
--------
>>> sp = ds.sphere("c", (100,"kpc"))
>>> a = sp["density"]
>>> b = sp["temperature"]
>>> c = sp["velocity_x"]
>>> yt.savetxt("sphere.dat", [a,b,c], header='My sphere stuff', delimiter="\t")
"""
if not isinstance(arrays, list):
arrays = [arrays]
units = []
for array in arrays:
if hasattr(array, "units"):
units.append(str(array.units))
else:
units.append("dimensionless")
if header != '':
header += '\n'
header += " Units\n " + '\t'.join(units)
np.savetxt(fname, np.transpose(arrays), header=header,
fmt=fmt, delimiter=delimiter, footer=footer,
newline='\n', comments=comments)
| [
"numpy.bitwise_or",
"numpy.union1d",
"yt.units.dimensions.em_dimensions.get",
"numpy.hstack",
"yt.units.unit_object.UnitParseError",
"yt.units.unit_lookup_table.default_unit_symbol_lut.copy",
"yt.utilities.exceptions.YTInvalidUnitEquivalence",
"numpy.array",
"numpy.linalg.norm",
"copy.deepcopy",
"pint.UnitRegistry",
"numpy.divide",
"numpy.multiply",
"yt.utilities.exceptions.YTUfuncUnitError",
"numpy.cross",
"yt.utilities.exceptions.YTIterableUnitCoercionError",
"numpy.bitwise_xor",
"numpy.asarray",
"yt.extern.six.moves.cPickle.dumps",
"functools.wraps",
"numpy.subtract",
"numpy.stack",
"numpy.dot",
"numpy.vstack",
"numpy.concatenate",
"yt.utilities.exceptions.YTUnitOperationError",
"numpy.add",
"yt.utilities.lru_cache.lru_cache",
"yt.utilities.logger.ytLogger.warning",
"numpy.any",
"h5py.File",
"yt.units.unit_object.Unit",
"numpy.transpose",
"numpy.intersect1d",
"numpy.ones_like",
"numpy.floor_divide",
"yt.utilities.exceptions.YTEquivalentDimsError",
"numpy.bitwise_and",
"yt.utilities.exceptions.YTUnitConversionError",
"numpy.bool_",
"numpy.true_divide",
"distutils.version.LooseVersion",
"numpy.loadtxt",
"sympy.Rational"
] | [((2243, 2249), 'yt.units.unit_object.Unit', 'Unit', ([], {}), '()\n', (2247, 2249), False, 'from yt.units.unit_object import Unit, UnitParseError\n'), ((2777, 2812), 'yt.utilities.lru_cache.lru_cache', 'lru_cache', ([], {'maxsize': '(128)', 'typed': '(False)'}), '(maxsize=128, typed=False)\n', (2786, 2812), False, 'from yt.utilities.lru_cache import lru_cache\n'), ((2857, 2892), 'yt.utilities.lru_cache.lru_cache', 'lru_cache', ([], {'maxsize': '(128)', 'typed': '(False)'}), '(maxsize=128, typed=False)\n', (2866, 2892), False, 'from yt.utilities.lru_cache import lru_cache\n'), ((3011, 3046), 'yt.utilities.lru_cache.lru_cache', 'lru_cache', ([], {'maxsize': '(128)', 'typed': '(False)'}), '(maxsize=128, typed=False)\n', (3020, 3046), False, 'from yt.utilities.lru_cache import lru_cache\n'), ((3101, 3136), 'yt.utilities.lru_cache.lru_cache', 'lru_cache', ([], {'maxsize': '(128)', 'typed': '(False)'}), '(maxsize=128, typed=False)\n', (3110, 3136), False, 'from yt.utilities.lru_cache import lru_cache\n'), ((3183, 3218), 'yt.utilities.lru_cache.lru_cache', 'lru_cache', ([], {'maxsize': '(128)', 'typed': '(False)'}), '(maxsize=128, typed=False)\n', (3192, 3218), False, 'from yt.utilities.lru_cache import lru_cache\n'), ((3276, 3311), 'yt.utilities.lru_cache.lru_cache', 'lru_cache', ([], {'maxsize': '(128)', 'typed': '(False)'}), '(maxsize=128, typed=False)\n', (3285, 3311), False, 'from yt.utilities.lru_cache import lru_cache\n'), ((9333, 9368), 'yt.utilities.lru_cache.lru_cache', 'lru_cache', ([], {'maxsize': '(128)', 'typed': '(False)'}), '(maxsize=128, typed=False)\n', (9342, 9368), False, 'from yt.utilities.lru_cache import lru_cache\n'), ((2466, 2477), 'functools.wraps', 'wraps', (['func'], {}), '(func)\n', (2471, 2477), False, 'from functools import wraps\n'), ((9764, 9808), 'yt.units.dimensions.em_dimensions.get', 'em_dimensions.get', (['my_units.dimensions', 'None'], {}), '(my_units.dimensions, None)\n', (9781, 9808), False, 'from yt.units.dimensions import angle, current_mks, dimensionless, em_dimensions\n'), ((55134, 55165), 'numpy.concatenate', 'np.concatenate', (['arrs'], {'axis': 'axis'}), '(arrs, axis=axis)\n', (55148, 55165), True, 'import numpy as np\n'), ((55490, 55560), 'numpy.cross', 'np.cross', (['arr1', 'arr2'], {'axisa': 'axisa', 'axisb': 'axisb', 'axisc': 'axisc', 'axis': 'axis'}), '(arr1, arr2, axisa=axisa, axisb=axisb, axisc=axisc, axis=axis)\n', (55498, 55560), True, 'import numpy as np\n'), ((56128, 56183), 'numpy.intersect1d', 'np.intersect1d', (['arr1', 'arr2'], {'assume_unique': 'assume_unique'}), '(arr1, arr2, assume_unique=assume_unique)\n', (56142, 56183), True, 'import numpy as np\n'), ((56672, 56694), 'numpy.union1d', 'np.union1d', (['arr1', 'arr2'], {}), '(arr1, arr2)\n', (56682, 56694), True, 'import numpy as np\n'), ((57601, 57621), 'numpy.dot', 'np.dot', (['op1.d', 'op2.d'], {}), '(op1.d, op2.d)\n', (57607, 57621), True, 'import numpy as np\n'), ((57922, 57937), 'numpy.vstack', 'np.vstack', (['arrs'], {}), '(arrs)\n', (57931, 57937), True, 'import numpy as np\n'), ((58177, 58192), 'numpy.hstack', 'np.hstack', (['arrs'], {}), '(arrs)\n', (58186, 58192), True, 'import numpy as np\n'), ((58639, 58653), 'numpy.stack', 'np.stack', (['arrs'], {}), '(arrs)\n', (58647, 58653), True, 'import numpy as np\n'), ((62003, 62133), 'numpy.loadtxt', 'np.loadtxt', (['fname'], {'dtype': 'dtype', 'comments': 'comments', 'delimiter': 'delimiter', 'converters': 'None', 'unpack': '(True)', 'usecols': 'usecols', 'ndmin': '(0)'}), '(fname, dtype=dtype, comments=comments, delimiter=delimiter,\n converters=None, unpack=True, usecols=usecols, ndmin=0)\n', (62013, 62133), True, 'import numpy as np\n'), ((9700, 9745), 'yt.units.unit_object.Unit', 'Unit', (['other_units'], {'registry': 'my_units.registry'}), '(other_units, registry=my_units.registry)\n', (9704, 9745), False, 'from yt.units.unit_object import Unit, UnitParseError\n'), ((9982, 10032), 'yt.utilities.exceptions.YTEquivalentDimsError', 'YTEquivalentDimsError', (['my_units', 'other_units', 'base'], {}), '(my_units, other_units, base)\n', (10003, 10032), False, 'from yt.utilities.exceptions import YTUnitOperationError, YTUnitConversionError, YTUfuncUnitError, YTIterableUnitCoercionError, YTInvalidUnitEquivalence, YTEquivalentDimsError\n'), ((10101, 10194), 'yt.utilities.exceptions.YTUnitConversionError', 'YTUnitConversionError', (['my_units', 'my_units.dimensions', 'other_units', 'other_units.dimensions'], {}), '(my_units, my_units.dimensions, other_units,\n other_units.dimensions)\n', (10122, 10194), False, 'from yt.utilities.exceptions import YTUnitOperationError, YTUnitConversionError, YTUfuncUnitError, YTIterableUnitCoercionError, YTInvalidUnitEquivalence, YTEquivalentDimsError\n'), ((25654, 25694), 'yt.units.unit_object.Unit', 'Unit', (['unit'], {'registry': 'self.units.registry'}), '(unit, registry=self.units.registry)\n', (25658, 25694), False, 'from yt.units.unit_object import Unit, UnitParseError\n'), ((27284, 27298), 'numpy.array', 'np.array', (['self'], {}), '(self)\n', (27292, 27298), True, 'import numpy as np\n'), ((32434, 32453), 'h5py.File', 'h5py.File', (['filename'], {}), '(filename)\n', (32443, 32453), False, 'import h5py\n'), ((34022, 34041), 'h5py.File', 'h5py.File', (['filename'], {}), '(filename)\n', (34031, 34041), False, 'import h5py\n'), ((34456, 34509), 'pint.UnitRegistry', 'UnitRegistry', ([], {'lut': 'unit_lut', 'add_default_symbols': '(False)'}), '(lut=unit_lut, add_default_symbols=False)\n', (34468, 34509), False, 'from pint import UnitRegistry\n'), ((34719, 34733), 'numpy.array', 'np.array', (['self'], {}), '(self)\n', (34727, 34733), True, 'import numpy as np\n'), ((35237, 35255), 'numpy.ones_like', 'np.ones_like', (['self'], {}), '(self)\n', (35249, 35255), True, 'import numpy as np\n'), ((35629, 35657), 'distutils.version.LooseVersion', 'LooseVersion', (['np.__version__'], {}), '(np.__version__)\n', (35641, 35657), False, 'from distutils.version import LooseVersion\n'), ((35660, 35682), 'distutils.version.LooseVersion', 'LooseVersion', (['"""1.13.0"""'], {}), "('1.13.0')\n", (35672, 35682), False, 'from distutils.version import LooseVersion\n'), ((51752, 51800), 'pint.UnitRegistry', 'UnitRegistry', ([], {'lut': 'lut', 'add_default_symbols': '(False)'}), '(lut=lut, add_default_symbols=False)\n', (51764, 51800), False, 'from pint import UnitRegistry\n'), ((51822, 51851), 'yt.units.unit_object.Unit', 'Unit', (['unit'], {'registry': 'registry'}), '(unit, registry=registry)\n', (51826, 51851), False, 'from yt.units.unit_object import Unit, UnitParseError\n'), ((57144, 57172), 'distutils.version.LooseVersion', 'LooseVersion', (['np.__version__'], {}), '(np.__version__)\n', (57156, 57172), False, 'from distutils.version import LooseVersion\n'), ((57175, 57197), 'distutils.version.LooseVersion', 'LooseVersion', (['"""1.10.0"""'], {}), "('1.10.0')\n", (57187, 57197), False, 'from distutils.version import LooseVersion\n'), ((57214, 57254), 'numpy.linalg.norm', 'np.linalg.norm', (['data'], {'ord': 'ord', 'axis': 'axis'}), '(data, ord=ord, axis=axis)\n', (57228, 57254), True, 'import numpy as np\n'), ((57280, 57339), 'numpy.linalg.norm', 'np.linalg.norm', (['data'], {'ord': 'ord', 'axis': 'axis', 'keepdims': 'keepdims'}), '(data, ord=ord, axis=axis, keepdims=keepdims)\n', (57294, 57339), True, 'import numpy as np\n'), ((59106, 59122), 'copy.deepcopy', 'copy.deepcopy', (['x'], {}), '(x)\n', (59119, 59122), False, 'import copy\n'), ((61837, 61926), 'yt.utilities.logger.ytLogger.warning', 'mylog.warning', (['"""Malformed or incomplete units header. Arrays will be dimensionless!"""'], {}), "(\n 'Malformed or incomplete units header. Arrays will be dimensionless!')\n", (61850, 61926), True, 'from yt.utilities.logger import ytLogger as mylog\n'), ((63925, 63945), 'numpy.transpose', 'np.transpose', (['arrays'], {}), '(arrays)\n', (63937, 63945), True, 'import numpy as np\n'), ((5145, 5160), 'numpy.any', 'np.any', (['inps[0]'], {}), '(inps[0])\n', (5151, 5160), True, 'import numpy as np\n'), ((5162, 5177), 'numpy.any', 'np.any', (['inps[1]'], {}), '(inps[1])\n', (5168, 5177), True, 'import numpy as np\n'), ((5208, 5223), 'numpy.bool_', 'np.bool_', (['(False)'], {}), '(False)\n', (5216, 5223), True, 'import numpy as np\n'), ((5815, 5830), 'numpy.any', 'np.any', (['inps[0]'], {}), '(inps[0])\n', (5821, 5830), True, 'import numpy as np\n'), ((5832, 5847), 'numpy.any', 'np.any', (['inps[1]'], {}), '(inps[1])\n', (5838, 5847), True, 'import numpy as np\n'), ((5878, 5893), 'numpy.bool_', 'np.bool_', (['(False)'], {}), '(False)\n', (5886, 5893), True, 'import numpy as np\n'), ((8495, 8548), 'yt.utilities.exceptions.YTUnitOperationError', 'YTUnitOperationError', (['op_string', 'inp.units', 'ret.units'], {}), '(op_string, inp.units, ret.units)\n', (8515, 8548), False, 'from yt.utilities.exceptions import YTUnitOperationError, YTUnitConversionError, YTUfuncUnitError, YTIterableUnitCoercionError, YTInvalidUnitEquivalence, YTEquivalentDimsError\n'), ((8767, 8778), 'numpy.any', 'np.any', (['ret'], {}), '(ret)\n', (8773, 8778), True, 'import numpy as np\n'), ((8798, 8855), 'yt.utilities.exceptions.YTUnitOperationError', 'YTUnitOperationError', (['op_string', 'inp.units', 'dimensionless'], {}), '(op_string, inp.units, dimensionless)\n', (8818, 8855), False, 'from yt.utilities.exceptions import YTUnitOperationError, YTUnitConversionError, YTUfuncUnitError, YTIterableUnitCoercionError, YTInvalidUnitEquivalence, YTEquivalentDimsError\n'), ((9214, 9270), 'yt.utilities.exceptions.YTUnitOperationError', 'YTUnitOperationError', (['op_string', 'this.units', 'other.units'], {}), '(op_string, this.units, other.units)\n', (9234, 9270), False, 'from yt.utilities.exceptions import YTUnitOperationError, YTUnitConversionError, YTUfuncUnitError, YTIterableUnitCoercionError, YTInvalidUnitEquivalence, YTEquivalentDimsError\n'), ((18365, 18371), 'yt.units.unit_object.Unit', 'Unit', ([], {}), '()\n', (18369, 18371), False, 'from yt.units.unit_object import Unit, UnitParseError\n'), ((19703, 19744), 'numpy.subtract', 'np.subtract', (['self', '(offset * self.uq)', 'self'], {}), '(self, offset * self.uq, self)\n', (19714, 19744), True, 'import numpy as np\n'), ((26463, 26512), 'yt.utilities.exceptions.YTInvalidUnitEquivalence', 'YTInvalidUnitEquivalence', (['equiv', 'self.units', 'unit'], {}), '(equiv, self.units, unit)\n', (26487, 26512), False, 'from yt.utilities.exceptions import YTUnitOperationError, YTUnitConversionError, YTUfuncUnitError, YTIterableUnitCoercionError, YTInvalidUnitEquivalence, YTEquivalentDimsError\n'), ((30611, 30625), 'pint.UnitRegistry', 'UnitRegistry', ([], {}), '()\n', (30623, 30625), False, 'from pint import UnitRegistry\n'), ((32308, 32345), 'yt.extern.six.moves.cPickle.dumps', 'pickle.dumps', (['self.units.registry.lut'], {}), '(self.units.registry.lut)\n', (32320, 32345), True, 'from yt.extern.six.moves import cPickle as pickle\n'), ((36358, 36385), 'numpy.add', 'np.add', (['self', 'oth'], {'out': 'self'}), '(self, oth, out=self)\n', (36364, 36385), True, 'import numpy as np\n'), ((37090, 37122), 'numpy.subtract', 'np.subtract', (['self', 'oth'], {'out': 'self'}), '(self, oth, out=self)\n', (37101, 37122), True, 'import numpy as np\n'), ((37896, 37928), 'numpy.multiply', 'np.multiply', (['self', 'oth'], {'out': 'self'}), '(self, oth, out=self)\n', (37907, 37928), True, 'import numpy as np\n'), ((38530, 38560), 'numpy.divide', 'np.divide', (['self', 'oth'], {'out': 'self'}), '(self, oth, out=self)\n', (38539, 38560), True, 'import numpy as np\n'), ((39069, 39104), 'numpy.true_divide', 'np.true_divide', (['self', 'oth'], {'out': 'self'}), '(self, oth, out=self)\n', (39083, 39104), True, 'import numpy as np\n'), ((39618, 39654), 'numpy.floor_divide', 'np.floor_divide', (['self', 'oth'], {'out': 'self'}), '(self, oth, out=self)\n', (39633, 39654), True, 'import numpy as np\n'), ((39930, 39966), 'numpy.bitwise_or', 'np.bitwise_or', (['self', 'other'], {'out': 'self'}), '(self, other, out=self)\n', (39943, 39966), True, 'import numpy as np\n'), ((40247, 40284), 'numpy.bitwise_xor', 'np.bitwise_xor', (['self', 'other'], {'out': 'self'}), '(self, other, out=self)\n', (40261, 40284), True, 'import numpy as np\n'), ((40565, 40602), 'numpy.bitwise_and', 'np.bitwise_and', (['self', 'other'], {'out': 'self'}), '(self, other, out=self)\n', (40579, 40602), True, 'import numpy as np\n'), ((52165, 52190), 'copy.deepcopy', 'copy.deepcopy', (['self.units'], {}), '(self.units)\n', (52178, 52190), False, 'import copy\n'), ((5297, 5312), 'numpy.bool_', 'np.bool_', (['(False)'], {}), '(False)\n', (5305, 5312), True, 'import numpy as np\n'), ((5967, 5982), 'numpy.bool_', 'np.bool_', (['(False)'], {}), '(False)\n', (5975, 5982), True, 'import numpy as np\n'), ((6780, 6808), 'yt.units.unit_object.Unit', 'Unit', ([], {'registry': 'unit.registry'}), '(registry=unit.registry)\n', (6784, 6808), False, 'from yt.units.unit_object import Unit, UnitParseError\n'), ((7217, 7258), 'yt.utilities.exceptions.YTIterableUnitCoercionError', 'YTIterableUnitCoercionError', (['input_object'], {}), '(input_object)\n', (7244, 7258), False, 'from yt.utilities.exceptions import YTUnitOperationError, YTUnitConversionError, YTUfuncUnitError, YTIterableUnitCoercionError, YTInvalidUnitEquivalence, YTEquivalentDimsError\n'), ((8342, 8362), 'numpy.any', 'np.any', (['other_object'], {}), '(other_object)\n', (8348, 8362), True, 'import numpy as np\n'), ((16956, 17136), 'yt.units.unit_object.UnitParseError', 'UnitParseError', (['("""Code units used without referring to a dataset. \nPerhaps you meant to do something like this instead: \nds.arr(%s, "%s")"""\n % (input_array, input_units))'], {}), '(\n """Code units used without referring to a dataset. \nPerhaps you meant to do something like this instead: \nds.arr(%s, "%s")"""\n % (input_array, input_units))\n', (16970, 17136), False, 'from yt.units.unit_object import Unit, UnitParseError\n'), ((18184, 18220), 'numpy.asarray', 'np.asarray', (['input_array'], {'dtype': 'dtype'}), '(input_array, dtype=dtype)\n', (18194, 18220), True, 'import numpy as np\n'), ((18797, 18833), 'yt.units.unit_object.Unit', 'Unit', (['input_units'], {'registry': 'registry'}), '(input_units, registry=registry)\n', (18801, 18833), False, 'from yt.units.unit_object import Unit, UnitParseError\n'), ((21985, 22041), 'numpy.subtract', 'np.subtract', (['new_array', '(offset * new_array.uq)', 'new_array'], {}), '(new_array, offset * new_array.uq, new_array)\n', (21996, 22041), True, 'import numpy as np\n'), ((46015, 46044), 'numpy.array', 'np.array', (['out_arr'], {'copy': '(False)'}), '(out_arr, copy=False)\n', (46023, 46044), True, 'import numpy as np\n'), ((46845, 46868), 'numpy.asarray', 'np.asarray', (['out_orig[0]'], {}), '(out_orig[0])\n', (46855, 46868), True, 'import numpy as np\n'), ((48678, 48707), 'numpy.array', 'np.array', (['out_arr'], {'copy': '(False)'}), '(out_arr, copy=False)\n', (48686, 48707), True, 'import numpy as np\n'), ((5449, 5484), 'yt.utilities.exceptions.YTUnitOperationError', 'YTUnitOperationError', (['ufunc', '*units'], {}), '(ufunc, *units)\n', (5469, 5484), False, 'from yt.utilities.exceptions import YTUnitOperationError, YTUnitConversionError, YTUfuncUnitError, YTIterableUnitCoercionError, YTInvalidUnitEquivalence, YTEquivalentDimsError\n'), ((8429, 8448), 'numpy.any', 'np.any', (['this_object'], {}), '(this_object)\n', (8435, 8448), True, 'import numpy as np\n'), ((16543, 16579), 'numpy.asarray', 'np.asarray', (['input_array'], {'dtype': 'dtype'}), '(input_array, dtype=dtype)\n', (16553, 16579), True, 'import numpy as np\n'), ((17693, 17729), 'yt.units.unit_object.Unit', 'Unit', (['input_units'], {'registry': 'registry'}), '(input_units, registry=registry)\n', (17697, 17729), False, 'from yt.units.unit_object import Unit, UnitParseError\n'), ((40993, 41034), 'yt.utilities.exceptions.YTUnitOperationError', 'YTUnitOperationError', (['"""power"""', 'power.unit'], {}), "('power', power.unit)\n", (41013, 41034), False, 'from yt.utilities.exceptions import YTUnitOperationError, YTUnitConversionError, YTUfuncUnitError, YTIterableUnitCoercionError, YTInvalidUnitEquivalence, YTEquivalentDimsError\n'), ((46177, 46194), 'numpy.array', 'np.array', (['out_arr'], {}), '(out_arr)\n', (46185, 46194), True, 'import numpy as np\n'), ((46541, 46570), 'numpy.array', 'np.array', (['out_arr'], {'copy': '(False)'}), '(out_arr, copy=False)\n', (46549, 46570), True, 'import numpy as np\n'), ((47037, 47052), 'numpy.asarray', 'np.asarray', (['inp'], {}), '(inp)\n', (47047, 47052), True, 'import numpy as np\n'), ((49603, 49619), 'numpy.asarray', 'np.asarray', (['self'], {}), '(self)\n', (49613, 49619), True, 'import numpy as np\n'), ((51361, 51391), 'yt.units.unit_lookup_table.default_unit_symbol_lut.copy', 'default_unit_symbol_lut.copy', ([], {}), '()\n', (51389, 51391), False, 'from yt.units.unit_lookup_table import default_unit_symbol_lut\n'), ((61704, 61781), 'yt.utilities.logger.ytLogger.warning', 'mylog.warning', (['(\'Unrecognized character at beginning of line: "%s".\' % line[0])'], {}), '(\'Unrecognized character at beginning of line: "%s".\' % line[0])\n', (61717, 61781), True, 'from yt.utilities.logger import ytLogger as mylog\n'), ((4917, 4958), 'yt.utilities.exceptions.YTUnitOperationError', 'YTUnitOperationError', (['ufunc', 'unit1', 'unit2'], {}), '(ufunc, unit1, unit2)\n', (4937, 4958), False, 'from yt.utilities.exceptions import YTUnitOperationError, YTUnitConversionError, YTUfuncUnitError, YTIterableUnitCoercionError, YTInvalidUnitEquivalence, YTEquivalentDimsError\n'), ((6139, 6174), 'yt.utilities.exceptions.YTUnitOperationError', 'YTUnitOperationError', (['ufunc', '*units'], {}), '(ufunc, *units)\n', (6159, 6174), False, 'from yt.utilities.exceptions import YTUnitOperationError, YTUnitConversionError, YTUfuncUnitError, YTIterableUnitCoercionError, YTInvalidUnitEquivalence, YTEquivalentDimsError\n'), ((26317, 26366), 'yt.utilities.exceptions.YTInvalidUnitEquivalence', 'YTInvalidUnitEquivalence', (['equiv', 'self.units', 'unit'], {}), '(equiv, self.units, unit)\n', (26341, 26366), False, 'from yt.utilities.exceptions import YTUnitOperationError, YTUnitConversionError, YTUfuncUnitError, YTIterableUnitCoercionError, YTInvalidUnitEquivalence, YTEquivalentDimsError\n'), ((28120, 28138), 'sympy.Rational', 'Rational', (['exponent'], {}), '(exponent)\n', (28128, 28138), False, 'from sympy import Rational\n'), ((29632, 29650), 'sympy.Rational', 'Rational', (['exponent'], {}), '(exponent)\n', (29640, 29650), False, 'from sympy import Rational\n'), ((31007, 31020), 'sympy.Rational', 'Rational', (['pow'], {}), '(pow)\n', (31015, 31020), False, 'from sympy import Rational\n'), ((46483, 46500), 'numpy.array', 'np.array', (['out_arr'], {}), '(out_arr)\n', (46491, 46500), True, 'import numpy as np\n'), ((48143, 48162), 'numpy.asarray', 'np.asarray', (['inps[0]'], {}), '(inps[0])\n', (48153, 48162), True, 'import numpy as np\n'), ((48164, 48183), 'numpy.asarray', 'np.asarray', (['inps[1]'], {}), '(inps[1])\n', (48174, 48183), True, 'import numpy as np\n'), ((6251, 6282), 'yt.utilities.exceptions.YTUfuncUnitError', 'YTUfuncUnitError', (['ufunc', '*units'], {}), '(ufunc, *units)\n', (6267, 6282), False, 'from yt.utilities.exceptions import YTUnitOperationError, YTUnitConversionError, YTUfuncUnitError, YTIterableUnitCoercionError, YTInvalidUnitEquivalence, YTEquivalentDimsError\n'), ((17955, 17989), 'numpy.array', 'np.array', (['input_array'], {'dtype': 'dtype'}), '(input_array, dtype=dtype)\n', (17963, 17989), True, 'import numpy as np\n'), ((48895, 48914), 'numpy.asarray', 'np.asarray', (['out_arr'], {}), '(out_arr)\n', (48905, 48914), True, 'import numpy as np\n'), ((49206, 49225), 'numpy.asarray', 'np.asarray', (['out_arr'], {}), '(out_arr)\n', (49216, 49225), True, 'import numpy as np\n'), ((49295, 49314), 'numpy.asarray', 'np.asarray', (['out_arr'], {}), '(out_arr)\n', (49305, 49314), True, 'import numpy as np\n')] |
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=missing-function-docstring,missing-module-docstring
import pytest
import tvm
from tvm import tir
from tvm.script import ty
# pylint: disable=no-member,invalid-name,unused-variable
@tvm.script.tir
def elementwise(a: ty.handle, c: ty.handle) -> None:
A = tir.match_buffer(a, (128, 128))
B = tir.alloc_buffer((128, 128))
C = tir.match_buffer(c, (128, 128))
with tir.block([128, 128], "B") as [vi, vj]:
B[vi, vj] = A[vi, vj] * 2.0
with tir.block([128, 128], "C") as [vi, vj]:
C[vi, vj] = B[vi, vj] + 1.0
@tvm.script.tir
def elementwise_multi_producer_consumer(a: ty.handle, c: ty.handle, d: ty.handle) -> None:
A = tir.match_buffer(a, (128, 128))
B = tir.alloc_buffer((128, 128))
C = tir.match_buffer(c, (128, 128))
D = tir.match_buffer(d, (128, 128))
with tir.block([128, 128], "B") as [vi, vj]:
B[vi, vj] = A[vi, vj] * 2.0 # B has two consumers
with tir.block([128, 128], "C") as [vi, vj]:
C[vi, vj] = B[vi, vj] + 1.0
with tir.block([128, 128], "D") as [vi, vj]:
D[vi, vj] = B[vi, vj] + 2.0 + C[vi, vj] # D has two producers
@tvm.script.tir
def elementwise_multi_consumer_inlined(a: ty.handle, c: ty.handle, d: ty.handle) -> None:
A = tir.match_buffer(a, (128, 128))
C = tir.match_buffer(c, (128, 128))
D = tir.match_buffer(d, (128, 128))
with tir.block([128, 128], "C") as [vi, vj]:
C[vi, vj] = A[vi, vj] * 2.0 + 1.0
with tir.block([128, 128], "D") as [vi, vj]:
D[vi, vj] = A[vi, vj] * 2.0 + 2.0 + C[vi, vj]
@tvm.script.tir
def elementwise_standalone(a: ty.handle, c: ty.handle) -> None:
A = tir.match_buffer(a, (128, 128))
B = tir.alloc_buffer((128, 128))
C = tir.match_buffer(c, (128, 128))
with tir.block([128, 128], "B") as [vi, vj]:
B[vi, vj] = A[vi, vj] * 2.0
with tir.block([128, 128], "C") as [vi, vj]:
C[vi, vj] = A[vi, vj] + 1.0
@tvm.script.tir
def elementwise_standalone_dce(a: ty.handle, c: ty.handle) -> None:
A = tir.match_buffer(a, (128, 128))
C = tir.match_buffer(c, (128, 128))
with tir.block([128, 128], "C") as [vi, vj]:
C[vi, vj] = A[vi, vj] + 1.0
@tvm.script.tir
def elementwise_under_loop(a: ty.handle, c: ty.handle) -> None:
A = tir.match_buffer(a, (128, 128))
C = tir.match_buffer(c, (128, 128))
B = tir.alloc_buffer((128, 128))
for i in tir.serial(0, 128):
for j in tir.serial(0, 128):
with tir.block([128, 128], "B") as [vi, vj]:
tir.bind(vi, i)
tir.bind(vj, j)
B[vi, vj] = A[vi, vj] * 2.0
for j in tir.serial(0, 128):
with tir.block([128, 128], "C") as [vi, vj]:
tir.bind(vi, i)
tir.bind(vj, j)
C[vi, vj] = B[vi, vj] + 1.0
@tvm.script.tir
def elementwise_inlined(a: ty.handle, c: ty.handle) -> None:
A = tir.match_buffer(a, (128, 128))
C = tir.match_buffer(c, (128, 128))
with tir.block([128, 128], "C") as [vi, vj]:
C[vi, vj] = A[vi, vj] * 2.0 + 1.0
@tvm.script.tir
def fail_multi_reader_writer(a: ty.handle, d: ty.handle) -> None:
A = tir.match_buffer(a, (128, 128))
B = tir.alloc_buffer((128, 128))
C = tir.alloc_buffer((128, 128))
D = tir.match_buffer(d, (128, 128))
with tir.block([128, 128], "B") as [vi, vj]:
B[vi, vj] = A[vi, vj] * 2.0
C[vi, vj] = A[vi, vj] + 2.0
with tir.block([128, 128], "C") as [vi, vj]:
D[vi, vj] = B[vi, vj] + C[vi, vj]
@tvm.script.tir
def elementwise_multi_reverse_loads(a: ty.handle, c: ty.handle) -> None:
A = tir.match_buffer(a, (128, 128))
B = tir.alloc_buffer((128, 128))
C = tir.match_buffer(c, (128, 128))
with tir.block([128, 128], "B") as [vi, vj]:
B[vi, vj] = A[vi, vj] * 2.0
with tir.block([128, 128], "C") as [vi, vj]:
C[vi, vj] = (B[vi, vj] + 1.0) * (B[vi, vj] * 2.0) + 3.0
@tvm.script.tir
def elementwise_multi_reverse_loads_inlined(a: ty.handle, c: ty.handle) -> None:
A = tir.match_buffer(a, (128, 128))
C = tir.match_buffer(c, (128, 128))
with tir.block([128, 128], "B") as [vi, vj]:
C[vi, vj] = (A[vi, vj] * 2.0 + 1.0) * (A[vi, vj] * 2.0 * 2.0) + 3.0
@tvm.script.tir
def opaque_access_load(a: ty.handle, c: ty.handle) -> None:
A = tir.match_buffer(a, (128, 128))
B = tir.alloc_buffer((128, 128))
C = tir.match_buffer(c, (128, 128))
with tir.block([128, 128], "B") as [vi, vj]:
B[vi, vj] = A[vi, vj] * 2.0
with tir.block([128, 128], "C") as [vi, vj]:
tir.reads(B[0:128, 0:128])
tir.writes(C[0:128, 0:128])
C[vi, vj] = tir.load("float32", B.data, vi * 128 + vj) + 1.0
@tvm.script.tir
def opaque_access_store(a: ty.handle, c: ty.handle) -> None:
A = tir.match_buffer(a, (128, 128))
B = tir.alloc_buffer((128, 128))
C = tir.match_buffer(c, (128, 128))
with tir.block([128, 128], "B") as [vi, vj]:
B[vi, vj] = A[vi, vj] * 2.0
with tir.block([128, 128], "C") as [vi, vj]:
tir.reads(B[0:128, 0:128])
tir.writes(C[0:128, 0:128])
tir.store(C.data, vi * 128 + vj, B[vi, vj] + 1.0)
C[vi, vj] = tir.load("float32", B.data, vi * 16 + vj) + 1.0
@tvm.script.tir
def buffer_matched(a: ty.handle, c: ty.handle) -> None:
A = tir.match_buffer(a, (128, 128))
B = tir.alloc_buffer((128, 128))
C = tir.match_buffer(c, (128, 128))
with tir.block([128, 128], "B") as [vi, vj]:
B[vi, vj] = A[vi, vj] * 2.0
with tir.block([128, 128], "C") as [vi, vj]:
Bb = tir.match_buffer(B[vi : vi + 1, vj], (1, 1))
C[vi, vj] = Bb[0, 0] + 1.0
@tvm.script.tir
def elementwise_predicate(a: ty.handle, c: ty.handle) -> None:
A = tir.match_buffer(a, (128, 128))
B = tir.alloc_buffer((128, 128))
C = tir.match_buffer(c, (128, 128))
with tir.block([128, 128], "B") as [vi, vj]:
B[vi, vj] = A[vi, vj] * 2.0
for i, j in tir.grid(128, 128):
with tir.block([128, 128], "C") as [vi, vj]:
tir.where(B[i, j] < 10.0)
C[vi, vj] = B[vi, vj] + 1.0
@tvm.script.tir
def elementwise_predicate_inlined(a: ty.handle, c: ty.handle) -> None:
A = tir.match_buffer(a, (128, 128))
C = tir.match_buffer(c, (128, 128))
for i, j in tir.grid(128, 128):
with tir.block([128, 128], "C") as [vi, vj]:
tir.where(A[i, j] * 2.0 < 10.0)
C[vi, vj] = A[vi, vj] * 2.0 + 1.0
@tvm.script.tir
def elementwise_multi_loads(a: ty.handle, c: ty.handle) -> None:
A = tir.match_buffer(a, (128, 128))
B = tir.alloc_buffer((128, 128))
C = tir.match_buffer(c, (128, 128))
with tir.block([128, 128], "B") as [vi, vj]:
B[vi, vj] = A[vi, vj] * 2.0
with tir.block([128, 126], "C") as [vi, vj]:
C[vi, vj] = B[vi, vj] + B[vi, vj + 1] + B[vi, vj + 2]
@tvm.script.tir
def elementwise_multi_loads_inlined(a: ty.handle, c: ty.handle) -> None:
A = tir.match_buffer(a, (128, 128))
C = tir.match_buffer(c, (128, 128))
with tir.block([128, 126], "C") as [vi, vj]:
C[vi, vj] = A[vi, vj] * 2.0 + A[vi, vj + 1] * 2.0 + A[vi, vj + 2] * 2.0
# pylint: enable=no-member,invalid-name,unused-variable
def test_compute_inline_elementwise():
sch = tir.Schedule(elementwise, debug_mode=True)
block_b = sch.get_block("B")
block_c = sch.get_block("C")
sch.compute_inline(block_b)
tvm.ir.assert_structural_equal(elementwise_inlined, sch.mod["main"])
assert sch.get(block_c).name_hint == "C"
def test_compute_inline_under_loop():
sch = tir.Schedule(elementwise_under_loop, debug_mode=True)
block_b = sch.get_block("B")
block_c = sch.get_block("C")
sch.compute_inline(block_b)
tvm.ir.assert_structural_equal(elementwise_inlined, sch.mod["main"])
assert sch.get(block_c).name_hint == "C"
def test_compute_inline_as_dce():
sch = tir.Schedule(elementwise_standalone, debug_mode=True)
block_b = sch.get_block("B")
block_c = sch.get_block("C")
sch.compute_inline(block_b)
tvm.ir.assert_structural_equal(elementwise_standalone_dce, sch.mod["main"])
assert sch.get(block_c).name_hint == "C"
def test_compute_inline_multi_consumer():
sch = tir.Schedule(elementwise_multi_producer_consumer, debug_mode=True)
block_b = sch.get_block("B")
block_c = sch.get_block("C")
block_d = sch.get_block("D")
sch.compute_inline(block_b)
tvm.ir.assert_structural_equal(elementwise_multi_consumer_inlined, sch.mod["main"])
assert sch.get(block_c).name_hint == "C"
assert sch.get(block_d).name_hint == "D"
def test_compute_inline_fail_multi_writer():
sch = tir.Schedule(fail_multi_reader_writer, debug_mode=True, error_render_level="detail")
block_b = sch.get_block("B")
with pytest.raises(tvm.tir.ScheduleError):
sch.compute_inline(block_b)
def test_reverse_compute_inline_elementwise():
sch = tir.Schedule(elementwise, debug_mode=True)
block_b = sch.get_block("B")
block_c = sch.get_block("C")
sch.reverse_compute_inline(block_c)
tvm.ir.assert_structural_equal(elementwise_inlined, sch.mod["main"])
assert sch.get(block_b).name_hint == "B"
def test_reverse_compute_inline_under_loop():
sch = tir.Schedule(elementwise_under_loop, debug_mode=True)
block_b = sch.get_block("B")
block_c = sch.get_block("C")
sch.reverse_compute_inline(block_c)
tvm.ir.assert_structural_equal(elementwise_inlined, sch.mod["main"])
assert sch.get(block_b).name_hint == "B"
def test_reverse_compute_inline_fail_as_dce():
sch = tir.Schedule(elementwise_standalone, debug_mode=True)
block_b = sch.get_block("B")
with pytest.raises(tvm.tir.ScheduleError):
sch.reverse_compute_inline(block_b)
def test_reverse_compute_inline_fail_multi_producer():
sch = tir.Schedule(elementwise_multi_producer_consumer, debug_mode=True)
block_d = sch.get_block("D")
with pytest.raises(tvm.tir.ScheduleError):
sch.reverse_compute_inline(block_d)
def test_reverse_compute_inline_fail_multi_reader():
sch = tir.Schedule(fail_multi_reader_writer, debug_mode=True)
block_c = sch.get_block("C")
with pytest.raises(tvm.tir.ScheduleError):
sch.reverse_compute_inline(block_c)
def test_reverse_compute_multi_reverse_loads():
sch = tir.Schedule(elementwise_multi_reverse_loads, debug_mode=True)
block_c = sch.get_block("C")
sch.reverse_compute_inline(block_c)
tvm.ir.assert_structural_equal(elementwise_multi_reverse_loads_inlined, sch.mod["main"])
def test_reverse_compute_fail_multi_reverse_loads():
sch = tir.Schedule(elementwise_multi_loads, debug_mode=True)
block_c = sch.get_block("C")
with pytest.raises(tvm.tir.ScheduleError):
sch.reverse_compute_inline(block_c)
def test_opaque_access_load():
sch = tir.Schedule(opaque_access_load, debug_mode=True)
block_b = sch.get_block("B")
with pytest.raises(tvm.tir.ScheduleError):
sch.compute_inline(block_b)
def test_opaque_access_store():
sch = tir.Schedule(opaque_access_store, debug_mode=True)
block_b = sch.get_block("B")
with pytest.raises(tvm.tir.ScheduleError):
sch.compute_inline(block_b)
def test_buffer_matched():
sch = tir.Schedule(buffer_matched, debug_mode=True)
block_b = sch.get_block("B")
with pytest.raises(tvm.tir.ScheduleError):
sch.compute_inline(block_b)
def test_compute_inline_predicate():
sch = tir.Schedule(elementwise_predicate, debug_mode=True)
block_b = sch.get_block("B")
sch.compute_inline(block_b)
tvm.ir.assert_structural_equal(elementwise_predicate_inlined, sch.mod["main"])
def test_compute_inline_multi_loads():
sch = tir.Schedule(elementwise_multi_loads, debug_mode=True)
block_b = sch.get_block("B")
sch.compute_inline(block_b)
tvm.ir.assert_structural_equal(elementwise_multi_loads_inlined, sch.mod["main"])
if __name__ == "__main__":
test_compute_inline_elementwise()
test_compute_inline_under_loop()
test_compute_inline_as_dce()
test_compute_inline_multi_consumer()
test_compute_inline_fail_multi_writer()
test_reverse_compute_inline_elementwise()
test_reverse_compute_inline_under_loop()
test_reverse_compute_inline_fail_as_dce()
test_reverse_compute_inline_fail_multi_producer()
test_reverse_compute_inline_fail_multi_reader()
test_reverse_compute_multi_reverse_loads()
test_reverse_compute_fail_multi_reverse_loads()
test_opaque_access_load()
test_opaque_access_store()
test_buffer_matched()
test_compute_inline_predicate()
test_compute_inline_multi_loads()
| [
"tvm.tir.serial",
"tvm.tir.Schedule",
"tvm.tir.block",
"tvm.tir.store",
"tvm.tir.match_buffer",
"tvm.tir.alloc_buffer",
"tvm.tir.bind",
"tvm.tir.grid",
"tvm.tir.writes",
"tvm.tir.reads",
"pytest.raises",
"tvm.tir.load",
"tvm.ir.assert_structural_equal",
"tvm.tir.where"
] | [((1063, 1094), 'tvm.tir.match_buffer', 'tir.match_buffer', (['a', '(128, 128)'], {}), '(a, (128, 128))\n', (1079, 1094), False, 'from tvm import tir\n'), ((1103, 1131), 'tvm.tir.alloc_buffer', 'tir.alloc_buffer', (['(128, 128)'], {}), '((128, 128))\n', (1119, 1131), False, 'from tvm import tir\n'), ((1140, 1171), 'tvm.tir.match_buffer', 'tir.match_buffer', (['c', '(128, 128)'], {}), '(c, (128, 128))\n', (1156, 1171), False, 'from tvm import tir\n'), ((1459, 1490), 'tvm.tir.match_buffer', 'tir.match_buffer', (['a', '(128, 128)'], {}), '(a, (128, 128))\n', (1475, 1490), False, 'from tvm import tir\n'), ((1499, 1527), 'tvm.tir.alloc_buffer', 'tir.alloc_buffer', (['(128, 128)'], {}), '((128, 128))\n', (1515, 1527), False, 'from tvm import tir\n'), ((1536, 1567), 'tvm.tir.match_buffer', 'tir.match_buffer', (['c', '(128, 128)'], {}), '(c, (128, 128))\n', (1552, 1567), False, 'from tvm import tir\n'), ((1576, 1607), 'tvm.tir.match_buffer', 'tir.match_buffer', (['d', '(128, 128)'], {}), '(d, (128, 128))\n', (1592, 1607), False, 'from tvm import tir\n'), ((2037, 2068), 'tvm.tir.match_buffer', 'tir.match_buffer', (['a', '(128, 128)'], {}), '(a, (128, 128))\n', (2053, 2068), False, 'from tvm import tir\n'), ((2077, 2108), 'tvm.tir.match_buffer', 'tir.match_buffer', (['c', '(128, 128)'], {}), '(c, (128, 128))\n', (2093, 2108), False, 'from tvm import tir\n'), ((2117, 2148), 'tvm.tir.match_buffer', 'tir.match_buffer', (['d', '(128, 128)'], {}), '(d, (128, 128))\n', (2133, 2148), False, 'from tvm import tir\n'), ((2433, 2464), 'tvm.tir.match_buffer', 'tir.match_buffer', (['a', '(128, 128)'], {}), '(a, (128, 128))\n', (2449, 2464), False, 'from tvm import tir\n'), ((2473, 2501), 'tvm.tir.alloc_buffer', 'tir.alloc_buffer', (['(128, 128)'], {}), '((128, 128))\n', (2489, 2501), False, 'from tvm import tir\n'), ((2510, 2541), 'tvm.tir.match_buffer', 'tir.match_buffer', (['c', '(128, 128)'], {}), '(c, (128, 128))\n', (2526, 2541), False, 'from tvm import tir\n'), ((2806, 2837), 'tvm.tir.match_buffer', 'tir.match_buffer', (['a', '(128, 128)'], {}), '(a, (128, 128))\n', (2822, 2837), False, 'from tvm import tir\n'), ((2846, 2877), 'tvm.tir.match_buffer', 'tir.match_buffer', (['c', '(128, 128)'], {}), '(c, (128, 128))\n', (2862, 2877), False, 'from tvm import tir\n'), ((3053, 3084), 'tvm.tir.match_buffer', 'tir.match_buffer', (['a', '(128, 128)'], {}), '(a, (128, 128))\n', (3069, 3084), False, 'from tvm import tir\n'), ((3093, 3124), 'tvm.tir.match_buffer', 'tir.match_buffer', (['c', '(128, 128)'], {}), '(c, (128, 128))\n', (3109, 3124), False, 'from tvm import tir\n'), ((3133, 3161), 'tvm.tir.alloc_buffer', 'tir.alloc_buffer', (['(128, 128)'], {}), '((128, 128))\n', (3149, 3161), False, 'from tvm import tir\n'), ((3175, 3193), 'tvm.tir.serial', 'tir.serial', (['(0)', '(128)'], {}), '(0, 128)\n', (3185, 3193), False, 'from tvm import tir\n'), ((3686, 3717), 'tvm.tir.match_buffer', 'tir.match_buffer', (['a', '(128, 128)'], {}), '(a, (128, 128))\n', (3702, 3717), False, 'from tvm import tir\n'), ((3726, 3757), 'tvm.tir.match_buffer', 'tir.match_buffer', (['c', '(128, 128)'], {}), '(c, (128, 128))\n', (3742, 3757), False, 'from tvm import tir\n'), ((3941, 3972), 'tvm.tir.match_buffer', 'tir.match_buffer', (['a', '(128, 128)'], {}), '(a, (128, 128))\n', (3957, 3972), False, 'from tvm import tir\n'), ((3981, 4009), 'tvm.tir.alloc_buffer', 'tir.alloc_buffer', (['(128, 128)'], {}), '((128, 128))\n', (3997, 4009), False, 'from tvm import tir\n'), ((4018, 4046), 'tvm.tir.alloc_buffer', 'tir.alloc_buffer', (['(128, 128)'], {}), '((128, 128))\n', (4034, 4046), False, 'from tvm import tir\n'), ((4055, 4086), 'tvm.tir.match_buffer', 'tir.match_buffer', (['d', '(128, 128)'], {}), '(d, (128, 128))\n', (4071, 4086), False, 'from tvm import tir\n'), ((4398, 4429), 'tvm.tir.match_buffer', 'tir.match_buffer', (['a', '(128, 128)'], {}), '(a, (128, 128))\n', (4414, 4429), False, 'from tvm import tir\n'), ((4438, 4466), 'tvm.tir.alloc_buffer', 'tir.alloc_buffer', (['(128, 128)'], {}), '((128, 128))\n', (4454, 4466), False, 'from tvm import tir\n'), ((4475, 4506), 'tvm.tir.match_buffer', 'tir.match_buffer', (['c', '(128, 128)'], {}), '(c, (128, 128))\n', (4491, 4506), False, 'from tvm import tir\n'), ((4812, 4843), 'tvm.tir.match_buffer', 'tir.match_buffer', (['a', '(128, 128)'], {}), '(a, (128, 128))\n', (4828, 4843), False, 'from tvm import tir\n'), ((4852, 4883), 'tvm.tir.match_buffer', 'tir.match_buffer', (['c', '(128, 128)'], {}), '(c, (128, 128))\n', (4868, 4883), False, 'from tvm import tir\n'), ((5095, 5126), 'tvm.tir.match_buffer', 'tir.match_buffer', (['a', '(128, 128)'], {}), '(a, (128, 128))\n', (5111, 5126), False, 'from tvm import tir\n'), ((5135, 5163), 'tvm.tir.alloc_buffer', 'tir.alloc_buffer', (['(128, 128)'], {}), '((128, 128))\n', (5151, 5163), False, 'from tvm import tir\n'), ((5172, 5203), 'tvm.tir.match_buffer', 'tir.match_buffer', (['c', '(128, 128)'], {}), '(c, (128, 128))\n', (5188, 5203), False, 'from tvm import tir\n'), ((5565, 5596), 'tvm.tir.match_buffer', 'tir.match_buffer', (['a', '(128, 128)'], {}), '(a, (128, 128))\n', (5581, 5596), False, 'from tvm import tir\n'), ((5605, 5633), 'tvm.tir.alloc_buffer', 'tir.alloc_buffer', (['(128, 128)'], {}), '((128, 128))\n', (5621, 5633), False, 'from tvm import tir\n'), ((5642, 5673), 'tvm.tir.match_buffer', 'tir.match_buffer', (['c', '(128, 128)'], {}), '(c, (128, 128))\n', (5658, 5673), False, 'from tvm import tir\n'), ((6087, 6118), 'tvm.tir.match_buffer', 'tir.match_buffer', (['a', '(128, 128)'], {}), '(a, (128, 128))\n', (6103, 6118), False, 'from tvm import tir\n'), ((6127, 6155), 'tvm.tir.alloc_buffer', 'tir.alloc_buffer', (['(128, 128)'], {}), '((128, 128))\n', (6143, 6155), False, 'from tvm import tir\n'), ((6164, 6195), 'tvm.tir.match_buffer', 'tir.match_buffer', (['c', '(128, 128)'], {}), '(c, (128, 128))\n', (6180, 6195), False, 'from tvm import tir\n'), ((6512, 6543), 'tvm.tir.match_buffer', 'tir.match_buffer', (['a', '(128, 128)'], {}), '(a, (128, 128))\n', (6528, 6543), False, 'from tvm import tir\n'), ((6552, 6580), 'tvm.tir.alloc_buffer', 'tir.alloc_buffer', (['(128, 128)'], {}), '((128, 128))\n', (6568, 6580), False, 'from tvm import tir\n'), ((6589, 6620), 'tvm.tir.match_buffer', 'tir.match_buffer', (['c', '(128, 128)'], {}), '(c, (128, 128))\n', (6605, 6620), False, 'from tvm import tir\n'), ((6722, 6740), 'tvm.tir.grid', 'tir.grid', (['(128)', '(128)'], {}), '(128, 128)\n', (6730, 6740), False, 'from tvm import tir\n'), ((6970, 7001), 'tvm.tir.match_buffer', 'tir.match_buffer', (['a', '(128, 128)'], {}), '(a, (128, 128))\n', (6986, 7001), False, 'from tvm import tir\n'), ((7010, 7041), 'tvm.tir.match_buffer', 'tir.match_buffer', (['c', '(128, 128)'], {}), '(c, (128, 128))\n', (7026, 7041), False, 'from tvm import tir\n'), ((7058, 7076), 'tvm.tir.grid', 'tir.grid', (['(128)', '(128)'], {}), '(128, 128)\n', (7066, 7076), False, 'from tvm import tir\n'), ((7312, 7343), 'tvm.tir.match_buffer', 'tir.match_buffer', (['a', '(128, 128)'], {}), '(a, (128, 128))\n', (7328, 7343), False, 'from tvm import tir\n'), ((7352, 7380), 'tvm.tir.alloc_buffer', 'tir.alloc_buffer', (['(128, 128)'], {}), '((128, 128))\n', (7368, 7380), False, 'from tvm import tir\n'), ((7389, 7420), 'tvm.tir.match_buffer', 'tir.match_buffer', (['c', '(128, 128)'], {}), '(c, (128, 128))\n', (7405, 7420), False, 'from tvm import tir\n'), ((7716, 7747), 'tvm.tir.match_buffer', 'tir.match_buffer', (['a', '(128, 128)'], {}), '(a, (128, 128))\n', (7732, 7747), False, 'from tvm import tir\n'), ((7756, 7787), 'tvm.tir.match_buffer', 'tir.match_buffer', (['c', '(128, 128)'], {}), '(c, (128, 128))\n', (7772, 7787), False, 'from tvm import tir\n'), ((8026, 8068), 'tvm.tir.Schedule', 'tir.Schedule', (['elementwise'], {'debug_mode': '(True)'}), '(elementwise, debug_mode=True)\n', (8038, 8068), False, 'from tvm import tir\n'), ((8171, 8239), 'tvm.ir.assert_structural_equal', 'tvm.ir.assert_structural_equal', (['elementwise_inlined', "sch.mod['main']"], {}), "(elementwise_inlined, sch.mod['main'])\n", (8201, 8239), False, 'import tvm\n'), ((8335, 8388), 'tvm.tir.Schedule', 'tir.Schedule', (['elementwise_under_loop'], {'debug_mode': '(True)'}), '(elementwise_under_loop, debug_mode=True)\n', (8347, 8388), False, 'from tvm import tir\n'), ((8491, 8559), 'tvm.ir.assert_structural_equal', 'tvm.ir.assert_structural_equal', (['elementwise_inlined', "sch.mod['main']"], {}), "(elementwise_inlined, sch.mod['main'])\n", (8521, 8559), False, 'import tvm\n'), ((8651, 8704), 'tvm.tir.Schedule', 'tir.Schedule', (['elementwise_standalone'], {'debug_mode': '(True)'}), '(elementwise_standalone, debug_mode=True)\n', (8663, 8704), False, 'from tvm import tir\n'), ((8807, 8882), 'tvm.ir.assert_structural_equal', 'tvm.ir.assert_structural_equal', (['elementwise_standalone_dce', "sch.mod['main']"], {}), "(elementwise_standalone_dce, sch.mod['main'])\n", (8837, 8882), False, 'import tvm\n'), ((8982, 9048), 'tvm.tir.Schedule', 'tir.Schedule', (['elementwise_multi_producer_consumer'], {'debug_mode': '(True)'}), '(elementwise_multi_producer_consumer, debug_mode=True)\n', (8994, 9048), False, 'from tvm import tir\n'), ((9184, 9272), 'tvm.ir.assert_structural_equal', 'tvm.ir.assert_structural_equal', (['elementwise_multi_consumer_inlined', "sch.mod['main']"], {}), "(elementwise_multi_consumer_inlined, sch.mod[\n 'main'])\n", (9214, 9272), False, 'import tvm\n'), ((9415, 9504), 'tvm.tir.Schedule', 'tir.Schedule', (['fail_multi_reader_writer'], {'debug_mode': '(True)', 'error_render_level': '"""detail"""'}), "(fail_multi_reader_writer, debug_mode=True, error_render_level=\n 'detail')\n", (9427, 9504), False, 'from tvm import tir\n'), ((9675, 9717), 'tvm.tir.Schedule', 'tir.Schedule', (['elementwise'], {'debug_mode': '(True)'}), '(elementwise, debug_mode=True)\n', (9687, 9717), False, 'from tvm import tir\n'), ((9828, 9896), 'tvm.ir.assert_structural_equal', 'tvm.ir.assert_structural_equal', (['elementwise_inlined', "sch.mod['main']"], {}), "(elementwise_inlined, sch.mod['main'])\n", (9858, 9896), False, 'import tvm\n'), ((10000, 10053), 'tvm.tir.Schedule', 'tir.Schedule', (['elementwise_under_loop'], {'debug_mode': '(True)'}), '(elementwise_under_loop, debug_mode=True)\n', (10012, 10053), False, 'from tvm import tir\n'), ((10164, 10232), 'tvm.ir.assert_structural_equal', 'tvm.ir.assert_structural_equal', (['elementwise_inlined', "sch.mod['main']"], {}), "(elementwise_inlined, sch.mod['main'])\n", (10194, 10232), False, 'import tvm\n'), ((10337, 10390), 'tvm.tir.Schedule', 'tir.Schedule', (['elementwise_standalone'], {'debug_mode': '(True)'}), '(elementwise_standalone, debug_mode=True)\n', (10349, 10390), False, 'from tvm import tir\n'), ((10582, 10648), 'tvm.tir.Schedule', 'tir.Schedule', (['elementwise_multi_producer_consumer'], {'debug_mode': '(True)'}), '(elementwise_multi_producer_consumer, debug_mode=True)\n', (10594, 10648), False, 'from tvm import tir\n'), ((10838, 10893), 'tvm.tir.Schedule', 'tir.Schedule', (['fail_multi_reader_writer'], {'debug_mode': '(True)'}), '(fail_multi_reader_writer, debug_mode=True)\n', (10850, 10893), False, 'from tvm import tir\n'), ((11078, 11140), 'tvm.tir.Schedule', 'tir.Schedule', (['elementwise_multi_reverse_loads'], {'debug_mode': '(True)'}), '(elementwise_multi_reverse_loads, debug_mode=True)\n', (11090, 11140), False, 'from tvm import tir\n'), ((11218, 11311), 'tvm.ir.assert_structural_equal', 'tvm.ir.assert_structural_equal', (['elementwise_multi_reverse_loads_inlined', "sch.mod['main']"], {}), "(elementwise_multi_reverse_loads_inlined, sch\n .mod['main'])\n", (11248, 11311), False, 'import tvm\n'), ((11372, 11426), 'tvm.tir.Schedule', 'tir.Schedule', (['elementwise_multi_loads'], {'debug_mode': '(True)'}), '(elementwise_multi_loads, debug_mode=True)\n', (11384, 11426), False, 'from tvm import tir\n'), ((11594, 11643), 'tvm.tir.Schedule', 'tir.Schedule', (['opaque_access_load'], {'debug_mode': '(True)'}), '(opaque_access_load, debug_mode=True)\n', (11606, 11643), False, 'from tvm import tir\n'), ((11804, 11854), 'tvm.tir.Schedule', 'tir.Schedule', (['opaque_access_store'], {'debug_mode': '(True)'}), '(opaque_access_store, debug_mode=True)\n', (11816, 11854), False, 'from tvm import tir\n'), ((12010, 12055), 'tvm.tir.Schedule', 'tir.Schedule', (['buffer_matched'], {'debug_mode': '(True)'}), '(buffer_matched, debug_mode=True)\n', (12022, 12055), False, 'from tvm import tir\n'), ((12221, 12273), 'tvm.tir.Schedule', 'tir.Schedule', (['elementwise_predicate'], {'debug_mode': '(True)'}), '(elementwise_predicate, debug_mode=True)\n', (12233, 12273), False, 'from tvm import tir\n'), ((12343, 12421), 'tvm.ir.assert_structural_equal', 'tvm.ir.assert_structural_equal', (['elementwise_predicate_inlined', "sch.mod['main']"], {}), "(elementwise_predicate_inlined, sch.mod['main'])\n", (12373, 12421), False, 'import tvm\n'), ((12473, 12527), 'tvm.tir.Schedule', 'tir.Schedule', (['elementwise_multi_loads'], {'debug_mode': '(True)'}), '(elementwise_multi_loads, debug_mode=True)\n', (12485, 12527), False, 'from tvm import tir\n'), ((12597, 12682), 'tvm.ir.assert_structural_equal', 'tvm.ir.assert_structural_equal', (['elementwise_multi_loads_inlined', "sch.mod['main']"], {}), "(elementwise_multi_loads_inlined, sch.mod['main']\n )\n", (12627, 12682), False, 'import tvm\n'), ((1181, 1207), 'tvm.tir.block', 'tir.block', (['[128, 128]', '"""B"""'], {}), "([128, 128], 'B')\n", (1190, 1207), False, 'from tvm import tir\n'), ((1266, 1292), 'tvm.tir.block', 'tir.block', (['[128, 128]', '"""C"""'], {}), "([128, 128], 'C')\n", (1275, 1292), False, 'from tvm import tir\n'), ((1617, 1643), 'tvm.tir.block', 'tir.block', (['[128, 128]', '"""B"""'], {}), "([128, 128], 'B')\n", (1626, 1643), False, 'from tvm import tir\n'), ((1725, 1751), 'tvm.tir.block', 'tir.block', (['[128, 128]', '"""C"""'], {}), "([128, 128], 'C')\n", (1734, 1751), False, 'from tvm import tir\n'), ((1810, 1836), 'tvm.tir.block', 'tir.block', (['[128, 128]', '"""D"""'], {}), "([128, 128], 'D')\n", (1819, 1836), False, 'from tvm import tir\n'), ((2158, 2184), 'tvm.tir.block', 'tir.block', (['[128, 128]', '"""C"""'], {}), "([128, 128], 'C')\n", (2167, 2184), False, 'from tvm import tir\n'), ((2249, 2275), 'tvm.tir.block', 'tir.block', (['[128, 128]', '"""D"""'], {}), "([128, 128], 'D')\n", (2258, 2275), False, 'from tvm import tir\n'), ((2551, 2577), 'tvm.tir.block', 'tir.block', (['[128, 128]', '"""B"""'], {}), "([128, 128], 'B')\n", (2560, 2577), False, 'from tvm import tir\n'), ((2636, 2662), 'tvm.tir.block', 'tir.block', (['[128, 128]', '"""C"""'], {}), "([128, 128], 'C')\n", (2645, 2662), False, 'from tvm import tir\n'), ((2887, 2913), 'tvm.tir.block', 'tir.block', (['[128, 128]', '"""C"""'], {}), "([128, 128], 'C')\n", (2896, 2913), False, 'from tvm import tir\n'), ((3212, 3230), 'tvm.tir.serial', 'tir.serial', (['(0)', '(128)'], {}), '(0, 128)\n', (3222, 3230), False, 'from tvm import tir\n'), ((3414, 3432), 'tvm.tir.serial', 'tir.serial', (['(0)', '(128)'], {}), '(0, 128)\n', (3424, 3432), False, 'from tvm import tir\n'), ((3767, 3793), 'tvm.tir.block', 'tir.block', (['[128, 128]', '"""C"""'], {}), "([128, 128], 'C')\n", (3776, 3793), False, 'from tvm import tir\n'), ((4096, 4122), 'tvm.tir.block', 'tir.block', (['[128, 128]', '"""B"""'], {}), "([128, 128], 'B')\n", (4105, 4122), False, 'from tvm import tir\n'), ((4217, 4243), 'tvm.tir.block', 'tir.block', (['[128, 128]', '"""C"""'], {}), "([128, 128], 'C')\n", (4226, 4243), False, 'from tvm import tir\n'), ((4516, 4542), 'tvm.tir.block', 'tir.block', (['[128, 128]', '"""B"""'], {}), "([128, 128], 'B')\n", (4525, 4542), False, 'from tvm import tir\n'), ((4601, 4627), 'tvm.tir.block', 'tir.block', (['[128, 128]', '"""C"""'], {}), "([128, 128], 'C')\n", (4610, 4627), False, 'from tvm import tir\n'), ((4893, 4919), 'tvm.tir.block', 'tir.block', (['[128, 128]', '"""B"""'], {}), "([128, 128], 'B')\n", (4902, 4919), False, 'from tvm import tir\n'), ((5213, 5239), 'tvm.tir.block', 'tir.block', (['[128, 128]', '"""B"""'], {}), "([128, 128], 'B')\n", (5222, 5239), False, 'from tvm import tir\n'), ((5298, 5324), 'tvm.tir.block', 'tir.block', (['[128, 128]', '"""C"""'], {}), "([128, 128], 'C')\n", (5307, 5324), False, 'from tvm import tir\n'), ((5346, 5372), 'tvm.tir.reads', 'tir.reads', (['B[0:128, 0:128]'], {}), '(B[0:128, 0:128])\n', (5355, 5372), False, 'from tvm import tir\n'), ((5381, 5408), 'tvm.tir.writes', 'tir.writes', (['C[0:128, 0:128]'], {}), '(C[0:128, 0:128])\n', (5391, 5408), False, 'from tvm import tir\n'), ((5683, 5709), 'tvm.tir.block', 'tir.block', (['[128, 128]', '"""B"""'], {}), "([128, 128], 'B')\n", (5692, 5709), False, 'from tvm import tir\n'), ((5768, 5794), 'tvm.tir.block', 'tir.block', (['[128, 128]', '"""C"""'], {}), "([128, 128], 'C')\n", (5777, 5794), False, 'from tvm import tir\n'), ((5816, 5842), 'tvm.tir.reads', 'tir.reads', (['B[0:128, 0:128]'], {}), '(B[0:128, 0:128])\n', (5825, 5842), False, 'from tvm import tir\n'), ((5851, 5878), 'tvm.tir.writes', 'tir.writes', (['C[0:128, 0:128]'], {}), '(C[0:128, 0:128])\n', (5861, 5878), False, 'from tvm import tir\n'), ((5887, 5936), 'tvm.tir.store', 'tir.store', (['C.data', '(vi * 128 + vj)', '(B[vi, vj] + 1.0)'], {}), '(C.data, vi * 128 + vj, B[vi, vj] + 1.0)\n', (5896, 5936), False, 'from tvm import tir\n'), ((6205, 6231), 'tvm.tir.block', 'tir.block', (['[128, 128]', '"""B"""'], {}), "([128, 128], 'B')\n", (6214, 6231), False, 'from tvm import tir\n'), ((6290, 6316), 'tvm.tir.block', 'tir.block', (['[128, 128]', '"""C"""'], {}), "([128, 128], 'C')\n", (6299, 6316), False, 'from tvm import tir\n'), ((6343, 6385), 'tvm.tir.match_buffer', 'tir.match_buffer', (['B[vi:vi + 1, vj]', '(1, 1)'], {}), '(B[vi:vi + 1, vj], (1, 1))\n', (6359, 6385), False, 'from tvm import tir\n'), ((6630, 6656), 'tvm.tir.block', 'tir.block', (['[128, 128]', '"""B"""'], {}), "([128, 128], 'B')\n", (6639, 6656), False, 'from tvm import tir\n'), ((7430, 7456), 'tvm.tir.block', 'tir.block', (['[128, 128]', '"""B"""'], {}), "([128, 128], 'B')\n", (7439, 7456), False, 'from tvm import tir\n'), ((7515, 7541), 'tvm.tir.block', 'tir.block', (['[128, 126]', '"""C"""'], {}), "([128, 126], 'C')\n", (7524, 7541), False, 'from tvm import tir\n'), ((7797, 7823), 'tvm.tir.block', 'tir.block', (['[128, 126]', '"""C"""'], {}), "([128, 126], 'C')\n", (7806, 7823), False, 'from tvm import tir\n'), ((9542, 9578), 'pytest.raises', 'pytest.raises', (['tvm.tir.ScheduleError'], {}), '(tvm.tir.ScheduleError)\n', (9555, 9578), False, 'import pytest\n'), ((10433, 10469), 'pytest.raises', 'pytest.raises', (['tvm.tir.ScheduleError'], {}), '(tvm.tir.ScheduleError)\n', (10446, 10469), False, 'import pytest\n'), ((10691, 10727), 'pytest.raises', 'pytest.raises', (['tvm.tir.ScheduleError'], {}), '(tvm.tir.ScheduleError)\n', (10704, 10727), False, 'import pytest\n'), ((10936, 10972), 'pytest.raises', 'pytest.raises', (['tvm.tir.ScheduleError'], {}), '(tvm.tir.ScheduleError)\n', (10949, 10972), False, 'import pytest\n'), ((11469, 11505), 'pytest.raises', 'pytest.raises', (['tvm.tir.ScheduleError'], {}), '(tvm.tir.ScheduleError)\n', (11482, 11505), False, 'import pytest\n'), ((11686, 11722), 'pytest.raises', 'pytest.raises', (['tvm.tir.ScheduleError'], {}), '(tvm.tir.ScheduleError)\n', (11699, 11722), False, 'import pytest\n'), ((11897, 11933), 'pytest.raises', 'pytest.raises', (['tvm.tir.ScheduleError'], {}), '(tvm.tir.ScheduleError)\n', (11910, 11933), False, 'import pytest\n'), ((12098, 12134), 'pytest.raises', 'pytest.raises', (['tvm.tir.ScheduleError'], {}), '(tvm.tir.ScheduleError)\n', (12111, 12134), False, 'import pytest\n'), ((5429, 5471), 'tvm.tir.load', 'tir.load', (['"""float32"""', 'B.data', '(vi * 128 + vj)'], {}), "('float32', B.data, vi * 128 + vj)\n", (5437, 5471), False, 'from tvm import tir\n'), ((5957, 5998), 'tvm.tir.load', 'tir.load', (['"""float32"""', 'B.data', '(vi * 16 + vj)'], {}), "('float32', B.data, vi * 16 + vj)\n", (5965, 5998), False, 'from tvm import tir\n'), ((6755, 6781), 'tvm.tir.block', 'tir.block', (['[128, 128]', '"""C"""'], {}), "([128, 128], 'C')\n", (6764, 6781), False, 'from tvm import tir\n'), ((6807, 6832), 'tvm.tir.where', 'tir.where', (['(B[i, j] < 10.0)'], {}), '(B[i, j] < 10.0)\n', (6816, 6832), False, 'from tvm import tir\n'), ((7091, 7117), 'tvm.tir.block', 'tir.block', (['[128, 128]', '"""C"""'], {}), "([128, 128], 'C')\n", (7100, 7117), False, 'from tvm import tir\n'), ((7143, 7174), 'tvm.tir.where', 'tir.where', (['(A[i, j] * 2.0 < 10.0)'], {}), '(A[i, j] * 2.0 < 10.0)\n', (7152, 7174), False, 'from tvm import tir\n'), ((3249, 3275), 'tvm.tir.block', 'tir.block', (['[128, 128]', '"""B"""'], {}), "([128, 128], 'B')\n", (3258, 3275), False, 'from tvm import tir\n'), ((3305, 3320), 'tvm.tir.bind', 'tir.bind', (['vi', 'i'], {}), '(vi, i)\n', (3313, 3320), False, 'from tvm import tir\n'), ((3337, 3352), 'tvm.tir.bind', 'tir.bind', (['vj', 'j'], {}), '(vj, j)\n', (3345, 3352), False, 'from tvm import tir\n'), ((3451, 3477), 'tvm.tir.block', 'tir.block', (['[128, 128]', '"""C"""'], {}), "([128, 128], 'C')\n", (3460, 3477), False, 'from tvm import tir\n'), ((3507, 3522), 'tvm.tir.bind', 'tir.bind', (['vi', 'i'], {}), '(vi, i)\n', (3515, 3522), False, 'from tvm import tir\n'), ((3539, 3554), 'tvm.tir.bind', 'tir.bind', (['vj', 'j'], {}), '(vj, j)\n', (3547, 3554), False, 'from tvm import tir\n')] |
from flask import Flask, Response, request, redirect
import subprocess
import tempfile
import json
import yaml
import signal
import threading
import time
import copy
app = Flask(__name__)
jobs_lock = threading.Lock()
jobs = []
class Job(threading.Thread):
def __init__(self, jobid, path, inputobj):
super(Job, self).__init__()
self.jobid = jobid
self.path = path
self.inputobj = inputobj
self.updatelock = threading.Lock()
self.begin()
def begin(self):
loghandle, self.logname = tempfile.mkstemp()
with self.updatelock:
self.outdir = tempfile.mkdtemp()
self.proc = subprocess.Popen(["cwl-runner", self.path, "-"],
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=loghandle,
close_fds=True,
cwd=self.outdir)
self.status = {
"id": "%sjobs/%i" % (request.url_root, self.jobid),
"log": "%sjobs/%i/log" % (request.url_root, self.jobid),
"run": self.path,
"state": "Running",
"input": json.loads(self.inputobj),
"output": None}
def run(self):
self.stdoutdata, self.stderrdata = self.proc.communicate(self.inputobj)
if self.proc.returncode == 0:
outobj = yaml.load(self.stdoutdata, Loader=yaml.FullLoader)
with self.updatelock:
self.status["state"] = "Success"
self.status["output"] = outobj
else:
with self.updatelock:
self.status["state"] = "Failed"
def getstatus(self):
with self.updatelock:
return self.status.copy()
def cancel(self):
if self.status["state"] == "Running":
self.proc.send_signal(signal.SIGQUIT)
with self.updatelock:
self.status["state"] = "Canceled"
def pause(self):
if self.status["state"] == "Running":
self.proc.send_signal(signal.SIGTSTP)
with self.updatelock:
self.status["state"] = "Paused"
def resume(self):
if self.status["state"] == "Paused":
self.proc.send_signal(signal.SIGCONT)
with self.updatelock:
self.status["state"] = "Running"
@app.route("/run", methods=['POST'])
def runworkflow():
path = request.args["wf"]
with jobs_lock:
jobid = len(jobs)
job = Job(jobid, path, request.stream.read())
job.start()
jobs.append(job)
return redirect("/jobs/%i" % jobid, code=303)
@app.route("/jobs/<int:jobid>", methods=['GET', 'POST'])
def jobcontrol(jobid):
with jobs_lock:
job = jobs[jobid]
if request.method == 'POST':
action = request.args.get("action")
if action:
if action == "cancel":
job.cancel()
elif action == "pause":
job.pause()
elif action == "resume":
job.resume()
status = job.getstatus()
return json.dumps(status, indent=4), 200, ""
def logspooler(job):
with open(job.logname, "r") as f:
while True:
r = f.read(4096)
if r:
yield r
else:
with job.updatelock:
if job.status["state"] != "Running":
break
time.sleep(1)
@app.route("/jobs/<int:jobid>/log", methods=['GET'])
def getlog(jobid):
with jobs_lock:
job = jobs[jobid]
return Response(logspooler(job))
@app.route("/jobs", methods=['GET'])
def getjobs():
with jobs_lock:
jobscopy = copy.copy(jobs)
def spool(jc):
yield "["
first = True
for j in jc:
if first:
yield json.dumps(j.getstatus(), indent=4)
first = False
else:
yield ", " + json.dumps(j.getstatus(), indent=4)
yield "]"
return Response(spool(jobscopy))
if __name__ == "__main__":
# app.debug = True
app.run()
| [
"flask.request.args.get",
"json.loads",
"flask.Flask",
"flask.request.stream.read",
"threading.Lock",
"subprocess.Popen",
"json.dumps",
"yaml.load",
"time.sleep",
"flask.redirect",
"tempfile.mkdtemp",
"copy.copy",
"tempfile.mkstemp"
] | [((173, 188), 'flask.Flask', 'Flask', (['__name__'], {}), '(__name__)\n', (178, 188), False, 'from flask import Flask, Response, request, redirect\n'), ((202, 218), 'threading.Lock', 'threading.Lock', ([], {}), '()\n', (216, 218), False, 'import threading\n'), ((2719, 2757), 'flask.redirect', 'redirect', (["('/jobs/%i' % jobid)"], {'code': '(303)'}), "('/jobs/%i' % jobid, code=303)\n", (2727, 2757), False, 'from flask import Flask, Response, request, redirect\n'), ((454, 470), 'threading.Lock', 'threading.Lock', ([], {}), '()\n', (468, 470), False, 'import threading\n'), ((548, 566), 'tempfile.mkstemp', 'tempfile.mkstemp', ([], {}), '()\n', (564, 566), False, 'import tempfile\n'), ((2936, 2962), 'flask.request.args.get', 'request.args.get', (['"""action"""'], {}), "('action')\n", (2952, 2962), False, 'from flask import Flask, Response, request, redirect\n'), ((3217, 3245), 'json.dumps', 'json.dumps', (['status'], {'indent': '(4)'}), '(status, indent=4)\n', (3227, 3245), False, 'import json\n'), ((3829, 3844), 'copy.copy', 'copy.copy', (['jobs'], {}), '(jobs)\n', (3838, 3844), False, 'import copy\n'), ((623, 641), 'tempfile.mkdtemp', 'tempfile.mkdtemp', ([], {}), '()\n', (639, 641), False, 'import tempfile\n'), ((666, 816), 'subprocess.Popen', 'subprocess.Popen', (["['cwl-runner', self.path, '-']"], {'stdin': 'subprocess.PIPE', 'stdout': 'subprocess.PIPE', 'stderr': 'loghandle', 'close_fds': '(True)', 'cwd': 'self.outdir'}), "(['cwl-runner', self.path, '-'], stdin=subprocess.PIPE,\n stdout=subprocess.PIPE, stderr=loghandle, close_fds=True, cwd=self.outdir)\n", (682, 816), False, 'import subprocess\n'), ((1500, 1550), 'yaml.load', 'yaml.load', (['self.stdoutdata'], {'Loader': 'yaml.FullLoader'}), '(self.stdoutdata, Loader=yaml.FullLoader)\n', (1509, 1550), False, 'import yaml\n'), ((2640, 2661), 'flask.request.stream.read', 'request.stream.read', ([], {}), '()\n', (2659, 2661), False, 'from flask import Flask, Response, request, redirect\n'), ((1282, 1307), 'json.loads', 'json.loads', (['self.inputobj'], {}), '(self.inputobj)\n', (1292, 1307), False, 'import json\n'), ((3565, 3578), 'time.sleep', 'time.sleep', (['(1)'], {}), '(1)\n', (3575, 3578), False, 'import time\n')] |
from sys import argv
from PyPDF2 import PdfFileReader, PdfFileWriter
import re
range_pattern = re.compile(r'(\d+)(\.\.|-)(\d+)')
comma_pattern = re.compile('\d+(,\d+)*')
def pages_args_to_array(pages_str):
groups = range_pattern.search(pages_str)
if groups:
start = int(groups.group(1))
end = int(groups.group(3))
return list(range(start, end + 1))
elif comma_pattern.search(pages_str):
return [int(d) for d in pages_str.split(',')]
else:
raise Exception('pages should be like 1,2,3 or 1-3, but was {}'
.format(pages_str))
if __name__ == '__main__':
assert(len(argv) > 1), "usage examle:\npython3 selective_merge_pdf.py file1.pdf 1-3 file2.pdf 3,4,10 file1.pdf 50"
assert(len(argv) % 2 == 1), "invalid arguments; supply page numbers after each pdf name"
files_names = argv[1::2]
pages_args = argv[2::2]
pdf_writer = PdfFileWriter()
for file_name, pages in zip(files_names, pages_args):
pdf_reader = PdfFileReader(file_name)
last_page_index = pdf_reader.getNumPages()
pages = pages_args_to_array(pages)
pages_to_add = list(filter(lambda i: i >= 0 and i <= last_page_index, pages))
for page in pages_to_add:
pdf_writer.addPage(pdf_reader.getPage(page - 1))
with open("merged.pdf", 'wb') as out:
pdf_writer.write(out)
| [
"PyPDF2.PdfFileWriter",
"PyPDF2.PdfFileReader",
"re.compile"
] | [((97, 133), 're.compile', 're.compile', (['"""(\\\\d+)(\\\\.\\\\.|-)(\\\\d+)"""'], {}), "('(\\\\d+)(\\\\.\\\\.|-)(\\\\d+)')\n", (107, 133), False, 'import re\n'), ((147, 173), 're.compile', 're.compile', (['"""\\\\d+(,\\\\d+)*"""'], {}), "('\\\\d+(,\\\\d+)*')\n", (157, 173), False, 'import re\n'), ((845, 860), 'PyPDF2.PdfFileWriter', 'PdfFileWriter', ([], {}), '()\n', (858, 860), False, 'from PyPDF2 import PdfFileReader, PdfFileWriter\n'), ((931, 955), 'PyPDF2.PdfFileReader', 'PdfFileReader', (['file_name'], {}), '(file_name)\n', (944, 955), False, 'from PyPDF2 import PdfFileReader, PdfFileWriter\n')] |
import math
from vp import geom_tools
def horizon_error(ground_truth_horizon, detected_horizon, image_dims):
"""Calculates error in a detected horizon.
This measures the max distance between the detected horizon line and
the ground truth horizon line, within the image's x-axis, and
normalized by image height.
Args:
ground_truth_horizon: Tuple with (slope, intercept) for the GT horizon line.
detected_horizon: Tuple with (slope, intercept) for the detected horizon line.
image_dims: Tuple of integers, (width, height) of the image, in pixels.
Returns:
Float, or None if a horizon is missing altogether.
"""
if ground_truth_horizon is None or detected_horizon is None:
return None
def gt(x):
return ground_truth_horizon[0] * x + ground_truth_horizon[1]
def dt(x):
return detected_horizon[0] * x + detected_horizon[1]
width, height = image_dims
return max(abs(gt(0) - dt(0)), abs(gt(width) - dt(width))) / height
def vp_direction_error(ground_truth_vps, detected_vps, image_dims):
"""Measures error in direction from center of detected vanishing points.
Each detected VP is matched with its closest unclaimed ground truth VP.
Args:
ground_truth_vps: List of ground truth VP point tuples.
detected_vps: List of detected VP point tuples.
image_dims: Tuple of integers, (width, height) of the image, in pixels.
Returns:
List with float degrees of error for each ground truth VP.
Error is None for missing VPs.
"""
principal_point = (image_dims[0] // 2, image_dims[1] // 2)
point_pair_dists = []
for gt_vp in ground_truth_vps:
for dt_vp in detected_vps:
gt_angle = geom_tools.get_line_angle((
principal_point[0], principal_point[1], gt_vp[0], gt_vp[1]))
dt_angle = geom_tools.get_line_angle((
principal_point[0], principal_point[1], dt_vp[0], dt_vp[1]))
angle_diff = 180 - abs(abs(gt_angle - dt_angle) - 180)
point_pair_dists.append((angle_diff, gt_vp, dt_vp))
point_pair_dists = sorted(point_pair_dists, key=lambda k: k[0])
gt_vp_to_error = {}
seen_dt_vps = set()
for distance, gt_vp, dt_vp in point_pair_dists:
if gt_vp in gt_vp_to_error or dt_vp in seen_dt_vps:
continue
gt_vp_to_error[gt_vp] = distance
seen_dt_vps.add(dt_vp)
return [gt_vp_to_error.get(gt, None) for gt in ground_truth_vps]
def location_accuracy_error(ground_truth_vps, detected_vps):
"""Measures average error in the location of detected vanishing points.
"Missed" or "extra" VPs do not count against the score.
Based on log distance of detected vp from ground truth vp.
Args:
ground_truth_vps: List of ground truth VP point tuples.
detected_vps: List of detected VP point tuples.
Returns:
Float, error.
"""
if len(ground_truth_vps) == 0 or len(detected_vps) == 0:
return 0
point_pair_dists = []
for gt_vp in ground_truth_vps:
for dt_vp in detected_vps:
distance = geom_tools.point_to_point_dist(gt_vp, dt_vp)
point_pair_dists.append((distance, gt_vp, dt_vp))
sorted(point_pair_dists, key=lambda k: k[0])
seen_gt_vps = set()
seen_dt_vps = set()
total_error = 0
for distance, gt_vp, dt_vp in point_pair_dists:
if gt_vp in seen_gt_vps or dt_vp in seen_dt_vps:
continue
seen_gt_vps.add(gt_vp)
seen_dt_vps.add(dt_vp)
if distance > 0:
total_error += math.log(distance)
return total_error / min(len(detected_vps), len(ground_truth_vps))
def num_model_detection_error(ground_truth_vps, detected_vps):
"""Measures error in the number of detected vanishing points.
Returns:
Integer, positive when there are too many VPs, negative
when there are too few.
"""
return len(detected_vps) - len(ground_truth_vps)
| [
"vp.geom_tools.get_line_angle",
"vp.geom_tools.point_to_point_dist",
"math.log"
] | [((1771, 1862), 'vp.geom_tools.get_line_angle', 'geom_tools.get_line_angle', (['(principal_point[0], principal_point[1], gt_vp[0], gt_vp[1])'], {}), '((principal_point[0], principal_point[1], gt_vp[0],\n gt_vp[1]))\n', (1796, 1862), False, 'from vp import geom_tools\n'), ((1899, 1990), 'vp.geom_tools.get_line_angle', 'geom_tools.get_line_angle', (['(principal_point[0], principal_point[1], dt_vp[0], dt_vp[1])'], {}), '((principal_point[0], principal_point[1], dt_vp[0],\n dt_vp[1]))\n', (1924, 1990), False, 'from vp import geom_tools\n'), ((3164, 3208), 'vp.geom_tools.point_to_point_dist', 'geom_tools.point_to_point_dist', (['gt_vp', 'dt_vp'], {}), '(gt_vp, dt_vp)\n', (3194, 3208), False, 'from vp import geom_tools\n'), ((3634, 3652), 'math.log', 'math.log', (['distance'], {}), '(distance)\n', (3642, 3652), False, 'import math\n')] |
import numpy as np
import argparse
import composition
import os
import json
import torch
from spinup.algos.pytorch.ppo.core import MLPActorCritic
from spinup.algos.pytorch.ppo.ppo import ppo
from spinup.utils.run_utils import setup_logger_kwargs
from spinup.utils.mpi_tools import proc_id, num_procs
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument('--data-dir', default='spinningup_training/logs')
parser.add_argument('--load-dir', default=None)
parser.add_argument('--gridsearch-id', type=int, default=-1)
parser.add_argument('--task-id', type=int, default=-1)
parser.add_argument('--hid', type=int, default=256)
parser.add_argument('--l', type=int, default=2)
parser.add_argument('--gamma', type=float, default=0.99)
parser.add_argument('--seed', '-s', type=int, default=4)
parser.add_argument('--cpu', type=int, default=4)
parser.add_argument('--steps', type=int, default=16000)
parser.add_argument('--epochs', type=int, default=625)
parser.add_argument('--exp-name', type=str, default='ppo')
parser.add_argument('--clip', type=float, default=0.2)
parser.add_argument('--pi-lr', type=float, default=1e-4)
parser.add_argument('--vf-lr', type=float, default=1e-4)
parser.add_argument('--pi-iters', type=int, default=128)
parser.add_argument('--vf-iters', type=int, default=128)
parser.add_argument('--target-kl', type=float, default=0.02)
parser.add_argument('--ent-coef', type=float, default=0.02)
parser.add_argument('--log-std-init', type=float, default=0.)
parser.add_argument('--controller', type=str, default="joint")
parser.add_argument('--robot', type=str, default="IIWA")
parser.add_argument('--object', type=str, default="Hollowbox")
parser.add_argument('--obstacle', type=str, default=None)
parser.add_argument('--task', type=str, default="PickPlace")
parser.add_argument('--horizon', type=int, default=500)
args = parser.parse_args()
np.random.seed(args.seed)
task_list = np.random.choice(256, num_procs(), replace=False)
args.task_id = int(task_list[proc_id()])
_robots = ["IIWA", "Jaco", "Kinova3", "Panda"]
_objects = ["Box", "Dumbbell", "Plate", "Hollowbox"]
_objectives = ["PickPlace", "Push", "Shelf", "Trashcan"]
_obstacles = ["None", "GoalWall", "ObjectDoor", "ObjectWall"]
idx = np.unravel_index(args.task_id, (len(_robots), len(_objects), len(_objectives), len(_obstacles)))
args.robot = _robots[idx[0]]
args.object = _objects[idx[1]]
args.task = _objectives[idx[2]]
args.obstacle = _obstacles[idx[3]]
# args.exp_name = "t:" + str(args.task_id) + "_name:" + args.exp_name + "_robot:" + str(args.robot) + "_task:" + str(args.task) + "_object:" + str(args.object) + "_obstacle:" + str(args.obstacle)
args.exp_name = 'MTL_{}'.format(len(task_list))
return args
def main():
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = False
torch.set_num_threads(1)
args = parse_args()
os.makedirs(os.path.join(args.data_dir, args.exp_name), exist_ok=True)
with open(os.path.join(args.data_dir, args.exp_name, 'args_{}.json'.format(proc_id())), 'w') as f:
json.dump(args.__dict__, f, indent=2)
logger_kwargs = setup_logger_kwargs(
args.exp_name, data_dir=args.data_dir)
checkpoint = None
if args.load_dir is not None:
checkpoint = torch.load(os.path.join(args.load_dir, 'pyt_save', 'state_dicts.pt'))
ppo(lambda: composition.make(
args.robot, args.object, args.obstacle, args.task, args.controller, args.horizon, use_task_id_obs=True), actor_critic=MLPActorCritic,
ac_kwargs=dict(hidden_sizes=[args.hid]*args.l, log_std_init=args.log_std_init), seed=args.seed, gamma=args.gamma, steps_per_epoch=args.steps, epochs=args.epochs, clip_ratio=args.clip,
pi_lr=args.pi_lr, vf_lr=args.vf_lr, train_pi_iters=args.pi_iters, train_v_iters=args.vf_iters, target_kl=args.target_kl,
logger_kwargs=logger_kwargs, max_ep_len=args.horizon, ent_coef=args.ent_coef, log_per_proc=True, checkpoint=checkpoint)
if __name__ == '__main__':
main()
| [
"argparse.ArgumentParser",
"spinup.utils.run_utils.setup_logger_kwargs",
"spinup.utils.mpi_tools.num_procs",
"os.path.join",
"torch.set_num_threads",
"numpy.random.seed",
"composition.make",
"spinup.utils.mpi_tools.proc_id",
"json.dump"
] | [((335, 360), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (358, 360), False, 'import argparse\n'), ((1997, 2022), 'numpy.random.seed', 'np.random.seed', (['args.seed'], {}), '(args.seed)\n', (2011, 2022), True, 'import numpy as np\n'), ((2998, 3022), 'torch.set_num_threads', 'torch.set_num_threads', (['(1)'], {}), '(1)\n', (3019, 3022), False, 'import torch\n'), ((3293, 3351), 'spinup.utils.run_utils.setup_logger_kwargs', 'setup_logger_kwargs', (['args.exp_name'], {'data_dir': 'args.data_dir'}), '(args.exp_name, data_dir=args.data_dir)\n', (3312, 3351), False, 'from spinup.utils.run_utils import setup_logger_kwargs\n'), ((2061, 2072), 'spinup.utils.mpi_tools.num_procs', 'num_procs', ([], {}), '()\n', (2070, 2072), False, 'from spinup.utils.mpi_tools import proc_id, num_procs\n'), ((3064, 3106), 'os.path.join', 'os.path.join', (['args.data_dir', 'args.exp_name'], {}), '(args.data_dir, args.exp_name)\n', (3076, 3106), False, 'import os\n'), ((3234, 3271), 'json.dump', 'json.dump', (['args.__dict__', 'f'], {'indent': '(2)'}), '(args.__dict__, f, indent=2)\n', (3243, 3271), False, 'import json\n'), ((2123, 2132), 'spinup.utils.mpi_tools.proc_id', 'proc_id', ([], {}), '()\n', (2130, 2132), False, 'from spinup.utils.mpi_tools import proc_id, num_procs\n'), ((3450, 3507), 'os.path.join', 'os.path.join', (['args.load_dir', '"""pyt_save"""', '"""state_dicts.pt"""'], {}), "(args.load_dir, 'pyt_save', 'state_dicts.pt')\n", (3462, 3507), False, 'import os\n'), ((3526, 3651), 'composition.make', 'composition.make', (['args.robot', 'args.object', 'args.obstacle', 'args.task', 'args.controller', 'args.horizon'], {'use_task_id_obs': '(True)'}), '(args.robot, args.object, args.obstacle, args.task, args.\n controller, args.horizon, use_task_id_obs=True)\n', (3542, 3651), False, 'import composition\n'), ((3202, 3211), 'spinup.utils.mpi_tools.proc_id', 'proc_id', ([], {}), '()\n', (3209, 3211), False, 'from spinup.utils.mpi_tools import proc_id, num_procs\n')] |
"""igvm - The command line interface
Copyright (c) 2017 InnoGames GmbH
"""
from __future__ import print_function
from argparse import ArgumentParser, _SubParsersAction
from logging import StreamHandler, root as root_logger
import time
from fabric.network import disconnect_all
from igvm.commands import (
change_address,
disk_set,
evacuate,
host_info,
mem_set,
vcpu_set,
vm_build,
vm_delete,
vm_migrate,
vm_rename,
vm_restart,
vm_start,
vm_stop,
vm_sync, vm_define,
)
from igvm.libvirt import close_virtconns
class ColorFormatters():
BOLD = '\033[1m{}\033[0m'
WARNING = '\033[1;33m{}\033[0m'
ERROR = '\033[1;31m{}\033[0m'
CRITICAL = '\033[1;41m{}\033[0m'
class IGVMArgumentParser(ArgumentParser):
def format_help(self):
if not any(isinstance(a, _SubParsersAction) for a in self._actions):
return super(IGVMArgumentParser, self).format_help()
out = []
out.append(ColorFormatters.BOLD.format(__doc__))
out.append('Available commands:\n')
subparsers_actions = [
action for action in self._actions
if isinstance(action, _SubParsersAction)
]
# There will probably only be one subparser_action, but better safe
# than sorry.
for subparsers_action in subparsers_actions:
# Get all subparsers and print help
for choice, subparser in subparsers_action.choices.items():
out.append(ColorFormatters.BOLD.format(choice))
if subparser.get_default('func').__doc__:
out.append('\n'.join(
'\t{}'.format(l.strip()) for l in subparser
.get_default('func').__doc__.strip().splitlines()
))
out.append('\n\t{}'.format(subparser.format_usage()))
return '\n'.join(out)
class IGVMLogHandler(StreamHandler):
"""Extend StreamHandler to format messages short-cutting Formatters"""
def __init__(self, *args, **kwargs):
super(IGVMLogHandler, self).__init__(*args, **kwargs)
self.isatty = self.stream.isatty()
def format(self, record):
level = record.levelname
msg = '{}: {}: {}'.format(level, record.name, record.getMessage())
if self.isatty and level in vars(ColorFormatters):
msg = getattr(ColorFormatters, level).format(msg)
return msg
def parse_args():
top_parser = IGVMArgumentParser('igvm')
top_parser.add_argument('--silent', '-s', action='count', default=0)
top_parser.add_argument('--verbose', '-v', action='count', default=0)
subparsers = top_parser.add_subparsers(help='Actions')
subparser = subparsers.add_parser(
'build',
description=vm_build.__doc__,
)
subparser.set_defaults(func=vm_build)
subparser.add_argument(
'vm_hostname',
help='Hostname of the guest system',
)
subparser.add_argument(
'--postboot',
metavar='postboot_script',
help='Run postboot_script on the guest after first boot',
)
subparser.add_argument(
'--skip-puppet',
action='store_false',
dest='run_puppet',
help='Skip running puppet in chroot before powering up',
)
subparser.add_argument(
'--debug-puppet',
action='store_true',
help='Run puppet in debug mode',
)
subparser.add_argument(
'--ignore-reserved',
dest='allow_reserved_hv',
action='store_true',
help='Allow building on a Host which has the state online_reserved',
)
subparser.add_argument(
'--rebuild',
dest='rebuild',
action='store_true',
help='Rebuild already defined VM or build it if not defined',
)
subparser.add_argument(
'--soft-preferences',
dest='soft_preferences',
action='store_true',
help='Overrules all preferences so that Hypervisors are not excluded. '
'Use this if igvm fails to find a matching Hypervisor, but you '
'are in urgent need to do it anyway. Hint: If igvm fails to find '
'a matching Hypervisor something might be really wrong. Run igvm '
'with --verbose to check why it fails finding a Hypervisor.',
)
subparser = subparsers.add_parser(
'migrate',
description=vm_migrate.__doc__,
)
subparser.set_defaults(func=vm_migrate)
subparser.add_argument(
'vm_hostname',
help='Hostname of the guest system',
)
subparser.add_argument(
'hypervisor_hostname',
nargs='?',
default=None,
help='Hostname of destination hypervisor',
)
subparser.add_argument(
'--run-puppet',
action='store_true',
help='Run puppet in chroot before powering up',
)
subparser.add_argument(
'--debug-puppet',
action='store_true',
help='Run puppet in debug mode',
)
subparser.add_argument(
'--offline',
action='store_true',
help='Force offline migration',
)
subparser.add_argument(
'--ignore-reserved',
dest='allow_reserved_hv',
action='store_true',
help='Allow migration to a Host which has the state online_reserved',
)
subparser.add_argument(
'--offline-transport',
default='drbd',
choices=('drbd', 'netcat', 'xfs'),
help=(
'Specify drbd (default), netcat or xfs transport to migrate '
'disk image'
),
)
subparser.add_argument(
'--no-shutdown',
action='store_true',
help=(
'Don\'t shutdown VM during offline migration, igvm will wait for'
' operator to shut down VM for 24h.'
),
)
subparser.add_argument(
'--enforce-vm-env',
dest='enforce_vm_env',
action='store_true',
help='Build or migrate VM only to a HV with the same environment of VM'
)
subparser.add_argument(
'--disk-size',
dest='disk_size',
type=int,
help='Resize disk of migrated VM. Expects new size in GiB. '
'Works only with --offline --offline-transport=xfs',
)
subparser.add_argument(
'--soft-preferences',
dest='soft_preferences',
action='store_true',
help='Overrules all preferences so that Hypervisors are not excluded. '
'Use this if igvm fails to find a matching Hypervisor, but you '
'are in urgent need to do it anyway. Hint: If igvm fails to find '
'a matching Hypervisor something might be really wrong. Run igvm '
'with --verbose to check why it fails finding a Hypervisor.',
)
subparser = subparsers.add_parser(
'change-address',
description=disk_set.__doc__,
)
subparser.set_defaults(func=change_address)
subparser.add_argument(
'vm_hostname',
help='Hostname of the guest system',
)
subparser.add_argument(
'new_address',
help=(
'New IPv4 address of VM'
)
)
subparser.add_argument(
'--offline',
action='store_true',
help='Perform IP address change offline',
)
subparser.add_argument(
'--migrate',
action='store_true',
help='Migrate VM to new HV while changing IP address',
)
subparser.add_argument(
'--ignore-reserved',
dest='allow_reserved_hv',
action='store_true',
help='Allow migration to a Host which has the state online_reserved',
)
subparser.add_argument(
'--offline-transport',
default='drbd',
help=(
'Specify drbd (default) or netcat transport to migrate disk image'
),
)
subparser = subparsers.add_parser(
'disk-set',
description=disk_set.__doc__,
)
subparser.set_defaults(func=disk_set)
subparser.add_argument(
'vm_hostname',
help='Hostname of the guest system',
)
subparser.add_argument(
'size',
help=(
'New disk size with an optional unit (default GiB). '
'Can be specified relative with "+". Only integers are allowed'
)
)
subparser = subparsers.add_parser(
'mem-set',
description=mem_set.__doc__,
)
subparser.set_defaults(func=mem_set)
subparser.add_argument(
'vm_hostname',
help='Hostname of the guest system',
)
subparser.add_argument(
'size',
help=(
'New memory size with optional unit (default is MiB).'
'Only integers are allowed.'
),
)
subparser.add_argument(
'--offline',
action='store_true',
help='Shutdown VM, change memory, and restart VM',
)
subparser = subparsers.add_parser(
'vcpu-set',
description=vcpu_set.__doc__,
)
subparser.set_defaults(func=vcpu_set)
subparser.add_argument(
'vm_hostname',
help='Hostname of the guest system',
)
subparser.add_argument(
'count',
type=int,
help='New number of CPUs',
)
subparser.add_argument(
'--offline',
action='store_true',
help='Shutdown VM, change CPUs, and restart VM',
)
subparser = subparsers.add_parser(
'start',
description=vm_start.__doc__,
)
subparser.set_defaults(func=vm_start)
subparser.add_argument(
'vm_hostname',
help='Hostname of the guest system',
)
subparser.add_argument(
'--unretire',
nargs='?',
const='maintenance',
help='Unretire a VM, set it to given state, maintenance by default',
)
subparser = subparsers.add_parser(
'stop',
description=vm_stop.__doc__,
)
subparser.set_defaults(func=vm_stop)
subparser.add_argument(
'vm_hostname',
help='Hostname of the guest system',
)
subparser.add_argument(
'--force',
action='store_true',
help='Do not wait for guest to shutdown gracefully',
)
subparser.add_argument(
'--retire',
action='store_true',
help='Retire VM after stopping it',
)
subparser = subparsers.add_parser(
'restart',
description=vm_restart.__doc__,
)
subparser.set_defaults(func=vm_restart)
subparser.add_argument(
'vm_hostname',
help='Hostname of the guest system',
)
subparser.add_argument(
'--force',
action='store_true',
help='Do not wait for guest to shutdown gracefully',
)
subparser.add_argument(
'--no-redefine',
action='store_true',
help='Do not redefine the domain to use latest hypervisor settings',
)
subparser = subparsers.add_parser(
'delete',
description=vm_delete.__doc__,
)
subparser.set_defaults(func=vm_delete)
subparser.add_argument(
'vm_hostname',
help='Hostname of the guest system',
)
subparser.add_argument(
'--retire',
action='store_true',
help='Set VM state to "retired" on Serveradmin instead of deleting',
)
subparser = subparsers.add_parser(
'info',
description=host_info.__doc__,
)
subparser.set_defaults(func=host_info)
subparser.add_argument(
'vm_hostname',
help='Hostname of the guest system',
)
subparser = subparsers.add_parser(
'sync',
description=vm_sync.__doc__,
)
subparser.set_defaults(func=vm_sync)
subparser.add_argument(
'vm_hostname',
help='Hostname of the guest system',
)
subparser = subparsers.add_parser(
'rename',
description=vm_rename.__doc__,
)
subparser.set_defaults(func=vm_rename)
subparser.add_argument(
'vm_hostname',
help='Hostname of the guest system',
)
subparser.add_argument(
'new_hostname',
help='New hostname',
)
subparser.add_argument(
'--offline',
action='store_true',
help='Shutdown VM, if running',
)
subparser = subparsers.add_parser(
'evacuate',
description=evacuate.__doc__,
)
subparser.set_defaults(func=evacuate)
subparser.add_argument(
'hv_hostname',
help='Hostname of the hypervisor',
)
subparser.add_argument(
'dst_hv_hostname',
nargs='?',
default=None,
help='Hostname of destination hypervisor',
)
subparser.add_argument(
'--dry-run',
action='store_true',
help='Do not migrate but just print what would be done'
)
subparser.add_argument(
'--offline',
nargs='*',
help='Migrate VMs matching the given serveradmin function offline',
)
subparser.add_argument(
'--ignore-reserved',
dest='allow_reserved_hv',
action='store_true',
help='Allow migrating to a host which has the state online_reserved',
)
subparser.add_argument(
'--soft-preferences',
dest='soft_preferences',
action='store_true',
help='Overrules all preferences so that Hypervisors are not excluded. '
'Use this if igvm fails to find a matching Hypervisor, but you '
'are in urgent need to do it anyway. Hint: If igvm fails to find '
'a matching Hypervisor something might be really wrong. Run igvm '
'with --verbose to check why it fails finding a Hypervisor.',
)
subparser = subparsers.add_parser(
'define',
description=vm_define.__doc__,
)
subparser.set_defaults(func=vm_define)
subparser.add_argument('vm_hostname', help='Hostname of the guest system')
return vars(top_parser.parse_args())
def main():
args = parse_args()
configure_root_logger(args.pop('silent'), args.pop('verbose'))
try:
args.pop('func')(**args)
finally:
# Fabric requires the disconnect function to be called after every
# use. We are also taking our chance to disconnect from
# the hypervisors.
disconnect_all()
close_virtconns()
# The underlying library of Fabric, Paramiko, raises an error, on
# destruction right after the disconnect function is called. We are
# sleeping for a little while to avoid this.
time.sleep(0.1)
def configure_root_logger(silent, verbose):
root_logger.addHandler(IGVMLogHandler())
# We are summing up the silent and verbose arguments in here. It
# is not really meaningful to use them both, but giving an error is not
# better. See Python logging library documentation [1] for the levels.
# Paramiko is overly verbose. We configure it for one level higher.
#
# [1] https://docs.python.org/library/logging.html#logging-levels
level = 20 + (silent - verbose) * 10
root_logger.setLevel(level)
root_logger.getChild('paramiko').setLevel(level + 10)
| [
"igvm.libvirt.close_virtconns",
"fabric.network.disconnect_all",
"time.sleep",
"logging.root.getChild",
"logging.root.setLevel"
] | [((15035, 15062), 'logging.root.setLevel', 'root_logger.setLevel', (['level'], {}), '(level)\n', (15055, 15062), True, 'from logging import StreamHandler, root as root_logger\n'), ((14255, 14271), 'fabric.network.disconnect_all', 'disconnect_all', ([], {}), '()\n', (14269, 14271), False, 'from fabric.network import disconnect_all\n'), ((14280, 14297), 'igvm.libvirt.close_virtconns', 'close_virtconns', ([], {}), '()\n', (14295, 14297), False, 'from igvm.libvirt import close_virtconns\n'), ((14511, 14526), 'time.sleep', 'time.sleep', (['(0.1)'], {}), '(0.1)\n', (14521, 14526), False, 'import time\n'), ((15067, 15099), 'logging.root.getChild', 'root_logger.getChild', (['"""paramiko"""'], {}), "('paramiko')\n", (15087, 15099), True, 'from logging import StreamHandler, root as root_logger\n')] |
import torch
import torchvision.transforms as transforms
from torch.utils.data import Dataset
import glob
from PIL import Image
import random
class SUN397EncodableDataset(Dataset):
"""SUN397 encodable dataset class"""
def __init__(self, train=True):
super().__init__()
path = 'data/SUN397/train/*/*.jpg' if train else 'data/SUN397/test/*/*.jpg'
self.data = list(glob.glob(path))
random.shuffle(self.data)
cats = list(set([path.split("/")[3] for path in self.data]))
cats.sort()
self.labels = torch.LongTensor([cats.index(path.split("/")[3]) for path in self.data])
self.preprocessor = transforms.Compose([
transforms.Resize((224, 224)),
transforms.ToTensor(),
transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
])
self.device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
def __getitem__(self, idx):
if torch.is_tensor(idx):
idx = idx.tolist()
if len(self.encoded_data) == 0:
return self.preprocessor(Image.open(self.data[idx]).convert('RGB')), self.labels[idx]
return self.encoded_data[idx], self.labels[idx]
def __len__(self):
return len(self.labels)
def num_classes(self):
return int(max(self.labels) + 1)
| [
"PIL.Image.open",
"random.shuffle",
"torch.is_tensor",
"torch.cuda.is_available",
"torchvision.transforms.Normalize",
"torchvision.transforms.Resize",
"torchvision.transforms.ToTensor",
"glob.glob"
] | [((422, 447), 'random.shuffle', 'random.shuffle', (['self.data'], {}), '(self.data)\n', (436, 447), False, 'import random\n'), ((987, 1007), 'torch.is_tensor', 'torch.is_tensor', (['idx'], {}), '(idx)\n', (1002, 1007), False, 'import torch\n'), ((397, 412), 'glob.glob', 'glob.glob', (['path'], {}), '(path)\n', (406, 412), False, 'import glob\n'), ((693, 722), 'torchvision.transforms.Resize', 'transforms.Resize', (['(224, 224)'], {}), '((224, 224))\n', (710, 722), True, 'import torchvision.transforms as transforms\n'), ((736, 757), 'torchvision.transforms.ToTensor', 'transforms.ToTensor', ([], {}), '()\n', (755, 757), True, 'import torchvision.transforms as transforms\n'), ((771, 846), 'torchvision.transforms.Normalize', 'transforms.Normalize', ([], {'mean': '[0.485, 0.456, 0.406]', 'std': '[0.229, 0.224, 0.225]'}), '(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])\n', (791, 846), True, 'import torchvision.transforms as transforms\n'), ((905, 930), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (928, 930), False, 'import torch\n'), ((1118, 1144), 'PIL.Image.open', 'Image.open', (['self.data[idx]'], {}), '(self.data[idx])\n', (1128, 1144), False, 'from PIL import Image\n')] |
import tensorflow
from tensorflow import keras
Model = keras.models.Model
Dense = keras.layers.Dense
Activation = keras.layers.Activation
Flatten = keras.layers.Flatten
BatchNormalization= keras.layers.BatchNormalization
Conv2D = tensorflow.keras.layers.Conv2D
AveragePooling2D = keras.layers.AveragePooling2D
Input=keras.layers.Input
l2=keras.regularizers.l2
from tensorflow.keras import backend
def resnet_layer(inputs,
num_filters=16,
kernel_size=3,
strides=1,
activation='relu',
batch_normalization=True,
conv_first=True):
"""2D Convolution-Batch Normalization-Activation stack builder
# Arguments
inputs (tensor): input tensor from input image or previous layer
num_filters (int): Conv2D number of filters
kernel_size (int): Conv2D square kernel dimensions
strides (int): Conv2D square stride dimensions
activation (string): activation name
batch_normalization (bool): whether to include batch normalization
conv_first (bool): conv-bn-activation (True) or
bn-activation-conv (False)
# Returns
x (tensor): tensor as input to the next layer
"""
conv = Conv2D(
num_filters,
kernel_size=kernel_size,
strides=strides,
padding='same',
kernel_initializer='he_normal',
kernel_regularizer=l2(1e-4))
x = inputs
if conv_first:
x = conv(x)
if batch_normalization:
x = BatchNormalization()(x)
if activation is not None:
x = Activation(activation)(x)
else:
if batch_normalization:
x = BatchNormalization()(x)
if activation is not None:
x = Activation(activation)(x)
x = conv(x)
return x
def resnet_v2(input, complexityParameter, num_classes=10, dataset='cifar10'):
depth = complexityParameter * 9 + 2
if (depth - 2) % 9 != 0:
raise ValueError('depth should be 9n+2 (eg 56 or 110 in [b])')
# Start model definition.
num_filters_in = 16
num_res_blocks = int((depth - 2) / 9)
inputs = input
x = resnet_layer(inputs=inputs,
num_filters=num_filters_in,
conv_first=True)
# Instantiate the stack of residual units
for stage in range(3):
for res_block in range(num_res_blocks):
activation = 'relu'
batch_normalization = True
strides = 1
if stage == 0:
num_filters_out = num_filters_in * 4
if res_block == 0: # first layer and first stage
activation = None
batch_normalization = False
else:
num_filters_out = num_filters_in * 2
if res_block == 0: # first layer but not first stage
strides = 2 # downsample
# bottleneck residual unit
y = resnet_layer(inputs=x,
num_filters=num_filters_in,
kernel_size=1,
strides=strides,
activation=activation,
batch_normalization=batch_normalization,
conv_first=False)
y = resnet_layer(inputs=y,
num_filters=num_filters_in,
conv_first=False)
y = resnet_layer(inputs=y,
num_filters=num_filters_out,
kernel_size=1,
conv_first=False)
if res_block == 0:
# linear projection residual shortcut connection to match
# changed dims
x = resnet_layer(inputs=x,
num_filters=num_filters_out,
kernel_size=1,
strides=strides,
activation=None,
batch_normalization=False)
x = tensorflow.keras.layers.add([x, y])
num_filters_in = num_filters_out
# Add classifier on top.
# v2 has BN-ReLU before Pooling
x = BatchNormalization()(x)
x = Activation('relu')(x)
x = AveragePooling2D(pool_size=8)(x)
final_features = Flatten()(x)
logits = Dense(num_classes, kernel_initializer='he_normal')(final_features)
outputs = Activation('softmax')(logits)
# Instantiate model.
model = Model(inputs=inputs, outputs=outputs)
return model, inputs, outputs, logits, final_features | [
"tensorflow.keras.layers.add"
] | [((4292, 4327), 'tensorflow.keras.layers.add', 'tensorflow.keras.layers.add', (['[x, y]'], {}), '([x, y])\n', (4319, 4327), False, 'import tensorflow\n')] |
#!/usr/bin/env python3
import sys
import json
import rdflib
import rdflib.plugins.sparql as sparql
RELS_TO_DRAW = ['isWifeOf', 'isMotherOf', 'isFatherOf', 'isHusbandOf', 'isSpouseOf']
RELS_TO_INFER = ['hasGrandParent', 'isGrandParentOf', 'hasGreatGrandParent',
'isGreatGrandParentOf', 'isUncleOf', 'hasUncle',
'isGreatUncleOf', 'hasGreatUncle', 'isAuntOf', 'hasAunt',
'isGreatAuntOf', 'hasGreatAunt',
'isBrotherOf', 'isSisterOf', 'isSiblingOf',
'isFirstCousinOf', 'isSecondCousinOf', 'isThirdCousinOf']
RELS_OF_INTEREST = RELS_TO_DRAW + RELS_TO_INFER
try:
workpath = sys.argv[1]
except IndexError:
sys.exit("No path defined!")
try:
recursion_limit = int(sys.argv[2])
except IndexError:
recursion_limit = 0
if recursion_limit > 0:
sys.setrecursionlimit(recursion_limit)
g = rdflib.Graph()
g.parse(workpath, format="turtle")
fhkb_str = "http://www.example.com/genealogy.owl#"
schema_str = "https://schema.org/"
FHKB = rdflib.Namespace(fhkb_str)
SCHEMA_ORG = rdflib.Namespace(schema_str)
def dump(uriref):
if uriref.__contains__('#'):
return uriref.split('#')[-1]
return uriref.split('/')[-1]
graph = {}
graph['nodes'] = []
graph['edges'] = []
nodes = {}
q = sparql.prepareQuery(
"""PREFIX fhkb:<http://www.example.com/genealogy.owl#>
SELECT ?person ?pred ?obj
WHERE {
?person a fhkb:Person ;
?pred ?obj .
}
ORDER BY ?person""")
for rel in RELS_OF_INTEREST:
pred = rdflib.URIRef("{}{}".format(fhkb_str, rel))
relation_query_results = g.query(q, initBindings={'pred': pred})
for (subj, pred, obj) in relation_query_results:
graph['edges'].append(
{
'data': {
'group': 'edges',
'id': f'{dump(subj)}-{dump(pred)}-{dump(obj)}',
'source': dump(subj),
'target': dump(obj),
'type': dump(pred)
}
})
q_details = sparql.prepareQuery(
"""PREFIX fhkb:<http://www.example.com/genealogy.owl#>
SELECT ?person ?pred ?obj
WHERE {
?person a fhkb:Person ;
?pred ?obj .
FILTER NOT EXISTS {
?person ?testPred ?obj .
VALUES ?testPred {
fhkb:isWifeOf
fhkb:isMotherOf
fhkb:isFatherOf
fhkb:isHusbandOf
fhkb:isSpouseOf
fhkb:hasGrandParent
fhkb:isGrandParentOf
fhkb:hasGreatGrandParent
fhkb:isGreatGrandParentOf
fhkb:isUncleOf
fhkb:hasUncle
fhkb:isGreatUncleOf
fhkb:hasGreatUncle
fhkb:isAuntOf
fhkb:hasAunt
fhkb:isGreatAuntOf
fhkb:hasGreatAunt
fhkb:isBrotherOf
fhkb:isSisterOf
fhkb:isSiblingOf
fhkb:isFirstCousinOf
fhkb:isSecondCousinOf
fhkb:isThirdCousinOf
fhkb:hasRelation
fhkb:isPartnerIn
fhkb:isMalePartnerIn
fhkb:isFemalePartnerIn
fhkb:isBloodrelationOf
}
}
}
ORDER BY ?person"""
)
person_query_results = g.query(q_details)
for (subj, pred, obj) in person_query_results:
node = nodes.get(dump(subj), {
'data': {
'label': '',
'degree': 0,
'size': 10,
'alternateNames': [],
'honorificPrefixes': [],
'honorificSuffixes': [],
'images': [],
'id': dump(subj),
}})
if pred == FHKB.Sex:
node['data'][dump(pred)] = dump(obj)
elif pred.startswith(SCHEMA_ORG):
if dump(pred) == 'honorificSuffix':
node['data']['honorificSuffixes'].append(obj)
elif dump(pred) == 'honorificPrefix':
node['data']['honorificPrefixes'].append(obj)
elif dump(pred) == 'alternateName':
node['data']['alternateNames'].append(obj)
elif dump(pred) == 'image':
node['data']['images'].append(obj)
else:
node['data'][dump(pred)] = obj
elif pred == rdflib.RDFS.label:
node['data']['label'] = obj
else:
continue
nodes[dump(subj)] = node
graph['nodes'] = list(nodes.values())
print(json.dumps(graph, indent=0))
sys.exit(0)
| [
"sys.setrecursionlimit",
"json.dumps",
"rdflib.Graph",
"rdflib.plugins.sparql.prepareQuery",
"sys.exit",
"rdflib.Namespace"
] | [((884, 898), 'rdflib.Graph', 'rdflib.Graph', ([], {}), '()\n', (896, 898), False, 'import rdflib\n'), ((1029, 1055), 'rdflib.Namespace', 'rdflib.Namespace', (['fhkb_str'], {}), '(fhkb_str)\n', (1045, 1055), False, 'import rdflib\n'), ((1069, 1097), 'rdflib.Namespace', 'rdflib.Namespace', (['schema_str'], {}), '(schema_str)\n', (1085, 1097), False, 'import rdflib\n'), ((1289, 1499), 'rdflib.plugins.sparql.prepareQuery', 'sparql.prepareQuery', (['"""PREFIX fhkb:<http://www.example.com/genealogy.owl#> \n SELECT ?person ?pred ?obj\n WHERE { \n ?person a fhkb:Person ; \n ?pred ?obj .\n } \n ORDER BY ?person"""'], {}), '(\n """PREFIX fhkb:<http://www.example.com/genealogy.owl#> \n SELECT ?person ?pred ?obj\n WHERE { \n ?person a fhkb:Person ; \n ?pred ?obj .\n } \n ORDER BY ?person"""\n )\n', (1308, 1499), True, 'import rdflib.plugins.sparql as sparql\n'), ((2043, 3171), 'rdflib.plugins.sparql.prepareQuery', 'sparql.prepareQuery', (['"""PREFIX fhkb:<http://www.example.com/genealogy.owl#>\n SELECT ?person ?pred ?obj\n WHERE { \n ?person a fhkb:Person ; \n ?pred ?obj . \n FILTER NOT EXISTS {\n ?person ?testPred ?obj .\n VALUES ?testPred {\n fhkb:isWifeOf\n fhkb:isMotherOf\n fhkb:isFatherOf\n fhkb:isHusbandOf\n fhkb:isSpouseOf\n fhkb:hasGrandParent\n fhkb:isGrandParentOf\n fhkb:hasGreatGrandParent\n fhkb:isGreatGrandParentOf\n fhkb:isUncleOf\n fhkb:hasUncle\n fhkb:isGreatUncleOf\n fhkb:hasGreatUncle\n fhkb:isAuntOf\n fhkb:hasAunt\n fhkb:isGreatAuntOf\n fhkb:hasGreatAunt\n fhkb:isBrotherOf\n fhkb:isSisterOf\n fhkb:isSiblingOf\n fhkb:isFirstCousinOf\n fhkb:isSecondCousinOf\n fhkb:isThirdCousinOf\n\n fhkb:hasRelation\n fhkb:isPartnerIn\n fhkb:isMalePartnerIn\n fhkb:isFemalePartnerIn\n fhkb:isBloodrelationOf\n }\n }\n} \nORDER BY ?person"""'], {}), '(\n """PREFIX fhkb:<http://www.example.com/genealogy.owl#>\n SELECT ?person ?pred ?obj\n WHERE { \n ?person a fhkb:Person ; \n ?pred ?obj . \n FILTER NOT EXISTS {\n ?person ?testPred ?obj .\n VALUES ?testPred {\n fhkb:isWifeOf\n fhkb:isMotherOf\n fhkb:isFatherOf\n fhkb:isHusbandOf\n fhkb:isSpouseOf\n fhkb:hasGrandParent\n fhkb:isGrandParentOf\n fhkb:hasGreatGrandParent\n fhkb:isGreatGrandParentOf\n fhkb:isUncleOf\n fhkb:hasUncle\n fhkb:isGreatUncleOf\n fhkb:hasGreatUncle\n fhkb:isAuntOf\n fhkb:hasAunt\n fhkb:isGreatAuntOf\n fhkb:hasGreatAunt\n fhkb:isBrotherOf\n fhkb:isSisterOf\n fhkb:isSiblingOf\n fhkb:isFirstCousinOf\n fhkb:isSecondCousinOf\n fhkb:isThirdCousinOf\n\n fhkb:hasRelation\n fhkb:isPartnerIn\n fhkb:isMalePartnerIn\n fhkb:isFemalePartnerIn\n fhkb:isBloodrelationOf\n }\n }\n} \nORDER BY ?person"""\n )\n', (2062, 3171), True, 'import rdflib.plugins.sparql as sparql\n'), ((4409, 4420), 'sys.exit', 'sys.exit', (['(0)'], {}), '(0)\n', (4417, 4420), False, 'import sys\n'), ((840, 878), 'sys.setrecursionlimit', 'sys.setrecursionlimit', (['recursion_limit'], {}), '(recursion_limit)\n', (861, 878), False, 'import sys\n'), ((4380, 4407), 'json.dumps', 'json.dumps', (['graph'], {'indent': '(0)'}), '(graph, indent=0)\n', (4390, 4407), False, 'import json\n'), ((695, 723), 'sys.exit', 'sys.exit', (['"""No path defined!"""'], {}), "('No path defined!')\n", (703, 723), False, 'import sys\n')] |
from os import environ
import psycopg2
from datetime import timedelta
from dotenv import load_dotenv
load_dotenv()
class Config(object):
""" app configuration class """
TESTING = False
CSRF_ENABLED = True
SECRET_KEY = environ.get('SECRET_KEY')
USER = environ.get('DB_USER')
PASSWORD = environ.get('DB_PASSWORD')
DB_NAME = environ.get('DB_NAME')
HOST = environ.get('DB_HOST')
SQLALCHEMY_DATABASE_URI = f"postgresql://{USER}:{PASSWORD}@{HOST}/{DB_NAME}"
SQLALCHEMY_TRACK_MODIFICATIONS = False
# jwt configuarations for the user auth api
JWT_SECRET_KEY = environ.get('SECRET_KEY')
JWT_ACCESS_TOKEN_EXPIRES = timedelta(days=1)
# pagination
NUM_OF_ITEMS_PER_PAGE = 18
class DevelopmentConfig(Config):
""" app development configuration class """
ENV = "development"
DEBUG = True
SQLALCHEMY_TRACK_MODIFICATIONS = True
class ProductionConfig(Config):
DEBUG = False
SQLALCHEMY_TRACK_MODIFICATIONS = False
| [
"datetime.timedelta",
"os.environ.get",
"dotenv.load_dotenv"
] | [((101, 114), 'dotenv.load_dotenv', 'load_dotenv', ([], {}), '()\n', (112, 114), False, 'from dotenv import load_dotenv\n'), ((235, 260), 'os.environ.get', 'environ.get', (['"""SECRET_KEY"""'], {}), "('SECRET_KEY')\n", (246, 260), False, 'from os import environ\n'), ((273, 295), 'os.environ.get', 'environ.get', (['"""DB_USER"""'], {}), "('DB_USER')\n", (284, 295), False, 'from os import environ\n'), ((311, 337), 'os.environ.get', 'environ.get', (['"""DB_PASSWORD"""'], {}), "('DB_PASSWORD')\n", (322, 337), False, 'from os import environ\n'), ((352, 374), 'os.environ.get', 'environ.get', (['"""DB_NAME"""'], {}), "('DB_NAME')\n", (363, 374), False, 'from os import environ\n'), ((386, 408), 'os.environ.get', 'environ.get', (['"""DB_HOST"""'], {}), "('DB_HOST')\n", (397, 408), False, 'from os import environ\n'), ((603, 628), 'os.environ.get', 'environ.get', (['"""SECRET_KEY"""'], {}), "('SECRET_KEY')\n", (614, 628), False, 'from os import environ\n'), ((660, 677), 'datetime.timedelta', 'timedelta', ([], {'days': '(1)'}), '(days=1)\n', (669, 677), False, 'from datetime import timedelta\n')] |
from __future__ import division
from math import sqrt as sqrt
from itertools import product as product
import torch
import numpy as np
import cv2
from lib.utils.visualize_utils import TBWriter
def vis(func):
"""tensorboard visualization if has writer as input"""
def wrapper(*args, **kw):
return func(*args, **kw) if kw['tb_writer'] is not None else None
return wrapper
class PriorBoxBase(object):
"""Compute priorbox coordinates in center-offset form for each source
feature map.
"""
def __init__(self, cfg):
super(PriorBoxBase, self).__init__()
self.image_size = cfg.MODEL.IMAGE_SIZE
self._steps = cfg.MODEL.STEPS
self._cfg_list = []
self._prior_cfg = {}
self._clip = cfg.MODEL.CLIP
self._variance = cfg.MODEL.VARIANCE
for v in self._variance:
if v <= 0:
raise ValueError('Variances must be greater than 0')
def _setup(self, cfg):
num_feat = len(self._steps)
for item in self._cfg_list:
if item not in cfg.MODEL:
raise Exception("wrong anchor config!")
if len(cfg.MODEL[item]) != num_feat and len(cfg.MODEL[item]) != 0:
raise Exception("config {} length does not match step length!".format(item))
self._prior_cfg[item] = cfg.MODEL[item]
@property
def num_priors(self):
"""allow prior num calculation before knowing feature map size"""
assert self._prior_cfg is not {}
return [int(len(self._create_prior(0, 0, k)) / 4) for k in range(len(self._steps))]
def _create_prior(self, cx, cy, k):
raise NotImplementedError
@vis
def _image_proc(self, image=None, tb_writer=None):
# TODO test with image
if isinstance(image, type(None)):
image = np.ones((self.image_size[1], self.image_size[0], 3))
elif isinstance(image, str):
image = cv2.imread(image, -1)
image = cv2.resize(image, (self.image_size[1], self.image_size[0]))
return image
@vis
def _prior_vis(self, anchor, image_ori, feat_idx, tb_writer=None):
# TODO add output path to the signature
writer = tb_writer.writer
prior_num = self.num_priors[feat_idx]
# transform coordinates
scale = [self.image_size[1], self.image_size[0], self.image_size[1], self.image_size[0]]
bboxs = np.array(anchor).reshape((-1, 4))
box_centers = bboxs[:, :2] * scale[:2] # [x, y]
# bboxs: [xmin, ymin, xmax, ymax]
bboxs = np.hstack((bboxs[:, :2] - bboxs[:, 2:4] / 2, bboxs[:, :2] + bboxs[:, 2:4] / 2)) * scale
box_centers = box_centers.astype(np.int32)
bboxs = bboxs.astype(np.int32)
# visualize each anchor box on a feature map
for prior_idx in range(prior_num):
image = image_ori.copy()
bboxs_ = bboxs[prior_idx::prior_num, :]
box_centers_ = box_centers[4 * prior_idx::prior_num, :]
for archor, bbox in zip(box_centers_, bboxs_):
cv2.circle(image, (archor[0], archor[1]), 1, (0, 0, 255), -1)
if archor[0] == archor[1]: # only show diagnal anchors
cv2.rectangle(image, (bbox[0], bbox[1]), (bbox[2], bbox[3]), (0, 255, 0), 1)
image = image[..., ::-1]
image = image.transpose((2,0,1))
writer.add_image('base/feature_map_{}_{}'.format(feat_idx, prior_idx), image, 2)
def forward(self, layer_dims, tb_writer=None, image=None):
priors = []
image = self._image_proc(image=image, tb_writer=tb_writer)
for k in range(len(layer_dims)):
prior = []
for i, j in product(range(layer_dims[k][0]), range(layer_dims[k][1])):
steps_x = self.image_size[1] / self._steps[k]
steps_y = self.image_size[0] / self._steps[k]
cx = (j + 0.5) / steps_x # unit center x,y
cy = (i + 0.5) / steps_y
prior += self._create_prior(cx, cy, k)
priors += prior
self._prior_vis(prior, image, k, tb_writer=tb_writer)
output = torch.Tensor(priors).view(-1, 4)
# TODO this clip is meanless, should clip on [xmin, ymin, xmax, ymax]
if self._clip:
output.clamp_(max=1, min=0)
return output
class PriorBoxSSD(PriorBoxBase):
def __init__(self, cfg):
super(PriorBoxSSD, self).__init__(cfg)
# self.image_size = cfg['image_size']
self._cfg_list = ['MIN_SIZES', 'MAX_SIZES', 'ASPECT_RATIOS']
self._flip = cfg.MODEL.FLIP
self._setup(cfg)
def _create_prior(self, cx, cy, k):
# as the original paper do
prior = []
min_sizes = self._prior_cfg['MIN_SIZES'][k]
min_sizes = [min_sizes] if not isinstance(min_sizes, list) else min_sizes
for ms in min_sizes:
# min square
s_i = ms / self.image_size[0]
s_j = ms / self.image_size[1]
prior += [cx, cy, s_j, s_i]
# min max square
if len(self._prior_cfg['MAX_SIZES']) != 0:
assert type(self._prior_cfg['MAX_SIZES'][k]) is not list # one max size per layer
s_i_prime = sqrt(s_i * (self._prior_cfg['MAX_SIZES'][k] / self.image_size[0]))
s_j_prime = sqrt(s_j * (self._prior_cfg['MAX_SIZES'][k] / self.image_size[1]))
prior += [cx, cy, s_j_prime, s_i_prime]
# rectangles by min and aspect ratio
for ar in self._prior_cfg['ASPECT_RATIOS'][k]:
prior += [cx, cy, s_j * sqrt(ar), s_i / sqrt(ar)] # a vertical box
if self._flip:
prior += [cx, cy, s_j / sqrt(ar), s_i * sqrt(ar)]
return prior
# PriorBox = PriorBoxSSD
def test_no_vis(cfg, tb_writer):
cfg = copy.deepcopy(cfg)
cfg['feature_maps'] = [38, 19, 10, 5, 3, 1]
cfg['min_sizes'] = [[30], [60], 111, 162, 213, 264]
cfg['flip'] = True
feat_dim = [list(a) for a in zip(cfg['feature_maps'], cfg['feature_maps'])]
p = PriorBoxSSD(cfg)
print(p.num_priors)
p1 = p.forward(feat_dim)
print(p1)
def test_filp(cfg, tb_writer):
cfg = copy.deepcopy(cfg)
cfg['feature_maps'] = [38, 19, 10, 5, 3, 1]
cfg['flip'] = True
feat_dim = [list(a) for a in zip(cfg['feature_maps'], cfg['feature_maps'])]
p = PriorBoxSSD(cfg)
p1 = p.forward(feat_dim, tb_writer=tb_writer)
cfg['flip'] = False
cfg['aspect_ratios'] = [[2, 1 / 2], [2, 1 / 2, 3, 1 / 3], [2, 1 / 2, 3, 1 / 3],
[2, 1 / 2, 3, 1 / 3], [2, 1 / 2], [2, 1 / 2]]
p = PriorBox(cfg)
p2 = p.forward(feat_dim, tb_writer=tb_writer)
# print(p2)
assert (p2 - p1).sum() < 1e-8
def test_rectangle(cfg, tb_writer):
cfg = copy.deepcopy(cfg)
cfg['feature_maps'] = [38, 19, 10, 5, 3, 1]
cfg['min_sizes'] = [30, 60, 111, 162, 213, 264]
cfg['flip'] = True
# feat_dim = [list(a) for a in zip(cfg['feature_maps'], cfg['feature_maps'])]
# cfg['image_size'] = [300, 300]
# feat_dim = [list(a) for a in zip(cfg['feature_maps'], [item * 2 for item in cfg['feature_maps']])]
# cfg['image_size'] = [300, 600]
feat_dim = [list(a) for a in zip([item * 2 for item in cfg['feature_maps']], cfg['feature_maps'])]
cfg['image_size'] = [600, 300]
p = PriorBoxSSD(cfg)
p1 = p.forward(feat_dim, tb_writer=tb_writer)
print(p1.shape)
if __name__ == '__main__':
import copy
# from lib.datasets.config import ssd_voc_vgg as cfg
# from lib.utils.visualize_utils import TBWriter
# tb_writer = TBWriter(log_dir, {'epoch': 50})
#
# test_no_vis(cfg, tb_writer)
# test_filp(cfg, tb_writer)
# test_rectangle(cfg, tb_writer)
print('haha')
from lib.utils.config import cfg
print(cfg)
| [
"cv2.rectangle",
"numpy.ones",
"numpy.hstack",
"torch.Tensor",
"math.sqrt",
"numpy.array",
"cv2.circle",
"copy.deepcopy",
"cv2.resize",
"cv2.imread"
] | [((5886, 5904), 'copy.deepcopy', 'copy.deepcopy', (['cfg'], {}), '(cfg)\n', (5899, 5904), False, 'import copy\n'), ((6247, 6265), 'copy.deepcopy', 'copy.deepcopy', (['cfg'], {}), '(cfg)\n', (6260, 6265), False, 'import copy\n'), ((6845, 6863), 'copy.deepcopy', 'copy.deepcopy', (['cfg'], {}), '(cfg)\n', (6858, 6863), False, 'import copy\n'), ((1993, 2052), 'cv2.resize', 'cv2.resize', (['image', '(self.image_size[1], self.image_size[0])'], {}), '(image, (self.image_size[1], self.image_size[0]))\n', (2003, 2052), False, 'import cv2\n'), ((1845, 1897), 'numpy.ones', 'np.ones', (['(self.image_size[1], self.image_size[0], 3)'], {}), '((self.image_size[1], self.image_size[0], 3))\n', (1852, 1897), True, 'import numpy as np\n'), ((2578, 2657), 'numpy.hstack', 'np.hstack', (['(bboxs[:, :2] - bboxs[:, 2:4] / 2, bboxs[:, :2] + bboxs[:, 2:4] / 2)'], {}), '((bboxs[:, :2] - bboxs[:, 2:4] / 2, bboxs[:, :2] + bboxs[:, 2:4] / 2))\n', (2587, 2657), True, 'import numpy as np\n'), ((1955, 1976), 'cv2.imread', 'cv2.imread', (['image', '(-1)'], {}), '(image, -1)\n', (1965, 1976), False, 'import cv2\n'), ((2429, 2445), 'numpy.array', 'np.array', (['anchor'], {}), '(anchor)\n', (2437, 2445), True, 'import numpy as np\n'), ((3084, 3145), 'cv2.circle', 'cv2.circle', (['image', '(archor[0], archor[1])', '(1)', '(0, 0, 255)', '(-1)'], {}), '(image, (archor[0], archor[1]), 1, (0, 0, 255), -1)\n', (3094, 3145), False, 'import cv2\n'), ((4181, 4201), 'torch.Tensor', 'torch.Tensor', (['priors'], {}), '(priors)\n', (4193, 4201), False, 'import torch\n'), ((5282, 5348), 'math.sqrt', 'sqrt', (["(s_i * (self._prior_cfg['MAX_SIZES'][k] / self.image_size[0]))"], {}), "(s_i * (self._prior_cfg['MAX_SIZES'][k] / self.image_size[0]))\n", (5286, 5348), True, 'from math import sqrt as sqrt\n'), ((5377, 5443), 'math.sqrt', 'sqrt', (["(s_j * (self._prior_cfg['MAX_SIZES'][k] / self.image_size[1]))"], {}), "(s_j * (self._prior_cfg['MAX_SIZES'][k] / self.image_size[1]))\n", (5381, 5443), True, 'from math import sqrt as sqrt\n'), ((3238, 3314), 'cv2.rectangle', 'cv2.rectangle', (['image', '(bbox[0], bbox[1])', '(bbox[2], bbox[3])', '(0, 255, 0)', '(1)'], {}), '(image, (bbox[0], bbox[1]), (bbox[2], bbox[3]), (0, 255, 0), 1)\n', (3251, 3314), False, 'import cv2\n'), ((5648, 5656), 'math.sqrt', 'sqrt', (['ar'], {}), '(ar)\n', (5652, 5656), True, 'from math import sqrt as sqrt\n'), ((5664, 5672), 'math.sqrt', 'sqrt', (['ar'], {}), '(ar)\n', (5668, 5672), True, 'from math import sqrt as sqrt\n'), ((5767, 5775), 'math.sqrt', 'sqrt', (['ar'], {}), '(ar)\n', (5771, 5775), True, 'from math import sqrt as sqrt\n'), ((5783, 5791), 'math.sqrt', 'sqrt', (['ar'], {}), '(ar)\n', (5787, 5791), True, 'from math import sqrt as sqrt\n')] |
##########################################################################
#
# Copyright (c) 2012-2013, Image Engine Design Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above
# copyright notice, this list of conditions and the following
# disclaimer.
#
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided with
# the distribution.
#
# * Neither the name of <NAME> nor the names of
# any other contributors to this software may be used to endorse or
# promote products derived from this software without specific prior
# written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
# IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
##########################################################################
import IECore
import Gaffer
class SequencePath( Gaffer.Path ) :
def __init__( self, path, root="/", minSequenceSize=1, filter=None ) :
if not isinstance( path, Gaffer.Path ) :
path = Gaffer.FileSystemPath( path, root )
Gaffer.Path.__init__( self, path[:], path.root(), filter=filter )
# we use the seed for creating base paths whenever we need them
self.__basePathSeed = path
self.__minSequenceSize = minSequenceSize
def isValid( self ) :
for p in self.__basePaths() :
if not p.isValid() :
return False
return True
def isLeaf( self ) :
for p in self.__basePaths() :
if not p.isLeaf() :
return False
return True
def info( self ) :
result = Gaffer.Path.info( self )
if result is None :
return None
def average( values ) :
return sum( values ) / len( values )
def mostCommon( values ) :
counter = {}
for value in values :
if value in counter :
counter[value] += 1
else :
counter[value] = 1
maxCount = 0
mostCommonValue = None
for value, count in counter.items() :
if count > maxCount :
mostCommonValue = value
maxCount = count
return mostCommonValue
combiners = {
"fileSystem:owner" : mostCommon,
"fileSystem:group" : mostCommon,
"fileSystem:modificationTime" : max,
"fileSystem:accessTime" : max,
"fileSystem:size" : sum,
}
infos = [ path.info() for path in self.__basePaths() ]
if len( infos ) :
for key, exampleValue in infos[0].items() :
if key in result :
continue
combiner = combiners.get( key, None )
if combiner is None :
if isinstance( exampleValue, ( int, float ) ) :
combiner = average
elif isinstance( exampleValue, basestring ) :
combiner = mostCommon
if combiner is not None :
values = [ i[key] for i in infos ]
result[key] = combiner( values )
return result
def _children( self ) :
p = self.__basePath( self )
children = p.children()
nonLeafPaths = []
leafPathStrings = []
for child in children :
if child.isLeaf() :
leafPathStrings.append( str( child ) )
else :
nonLeafPaths.append( child )
sequences = IECore.findSequences( leafPathStrings, self.__minSequenceSize )
result = []
for path in sequences + nonLeafPaths :
result.append( SequencePath( self.__basePath( str( path ) ), minSequenceSize=self.__minSequenceSize, filter = self.getFilter() ) )
return result
def copy( self ) :
result = SequencePath( self.__basePathSeed, minSequenceSize = self.__minSequenceSize, filter = self.getFilter() )
result.setFromPath( self )
return result
def __basePath( self, path ) :
result = self.__basePathSeed.copy()
if isinstance( path, basestring ) :
result.setFromString( path )
else :
result.setFromPath( path )
return result
def __basePaths( self ) :
sequence = None
with IECore.IgnoredExceptions( Exception ) :
sequence = IECore.FileSequence( str( self ) )
result = []
if sequence :
for f in sequence.fileNames() :
result.append( self.__basePath( f ) )
else :
result.append( self.__basePath( self ) )
return result
def __isSequence( self ) :
s = str( self )
if IECore.FileSequence.fileNameValidator().match( s ) :
return True
return False
| [
"IECore.IgnoredExceptions",
"Gaffer.Path.info",
"Gaffer.FileSystemPath",
"IECore.FileSequence.fileNameValidator",
"IECore.findSequences"
] | [((2497, 2519), 'Gaffer.Path.info', 'Gaffer.Path.info', (['self'], {}), '(self)\n', (2513, 2519), False, 'import Gaffer\n'), ((3958, 4019), 'IECore.findSequences', 'IECore.findSequences', (['leafPathStrings', 'self.__minSequenceSize'], {}), '(leafPathStrings, self.__minSequenceSize)\n', (3978, 4019), False, 'import IECore\n'), ((1996, 2029), 'Gaffer.FileSystemPath', 'Gaffer.FileSystemPath', (['path', 'root'], {}), '(path, root)\n', (2017, 2029), False, 'import Gaffer\n'), ((4664, 4699), 'IECore.IgnoredExceptions', 'IECore.IgnoredExceptions', (['Exception'], {}), '(Exception)\n', (4688, 4699), False, 'import IECore\n'), ((4984, 5023), 'IECore.FileSequence.fileNameValidator', 'IECore.FileSequence.fileNameValidator', ([], {}), '()\n', (5021, 5023), False, 'import IECore\n')] |
from PhysicsTools.Heppy.analyzers.core.Analyzer import Analyzer
from PhysicsTools.Heppy.analyzers.core.AutoHandle import AutoHandle
from PhysicsTools.Heppy.physicsobjects.Tau import Tau
from PhysicsTools.HeppyCore.utils.deltar import deltaR, matchObjectCollection3
import PhysicsTools.HeppyCore.framework.config as cfg
class TauAnalyzer( Analyzer ):
def __init__(self, cfg_ana, cfg_comp, looperName ):
super(TauAnalyzer,self).__init__(cfg_ana,cfg_comp,looperName)
#----------------------------------------
# DECLARATION OF HANDLES OF LEPTONS STUFF
#----------------------------------------
def declareHandles(self):
super(TauAnalyzer, self).declareHandles()
self.handles['taus'] = AutoHandle( ('slimmedTaus',''),'std::vector<pat::Tau>')
def beginLoop(self, setup):
super(TauAnalyzer,self).beginLoop(setup)
self.counters.addCounter('events')
count = self.counters.counter('events')
count.register('all events')
count.register('has >=1 tau at preselection')
count.register('has >=1 selected taus')
count.register('has >=1 other taus')
#------------------
# MAKE LEPTON LISTS
#------------------
def makeTaus(self, event):
event.inclusiveTaus = []
event.selectedTaus = []
event.otherTaus = []
#get all
alltaus = map( Tau, self.handles['taus'].product() )
#make inclusive taus
for tau in alltaus:
tau.associatedVertex = event.goodVertices[0] if len(event.goodVertices)>0 else event.vertices[0]
tau.lepVeto = False
tau.idDecayMode = tau.tauID("decayModeFinding")
tau.idDecayModeNewDMs = tau.tauID("decayModeFindingNewDMs")
if hasattr(self.cfg_ana, 'inclusive_decayModeID') and self.cfg_ana.inclusive_decayModeID and not tau.tauID(self.cfg_ana.inclusive_decayModeID):
continue
tau.inclusive_lepVeto = False
if self.cfg_ana.inclusive_vetoLeptons:
for lep in event.selectedLeptons:
if deltaR(lep.eta(), lep.phi(), tau.eta(), tau.phi()) < self.cfg_ana.inclusive_leptonVetoDR:
tau.inclusive_lepVeto = True
if tau.inclusive_lepVeto: continue
if self.cfg_ana.inclusive_vetoLeptonsPOG:
if not tau.tauID(self.cfg_ana.inclusive_tauAntiMuonID):
tau.inclusive_lepVeto = True
if not tau.tauID(self.cfg_ana.inclusive_tauAntiElectronID):
tau.inclusive_lepVeto = True
if tau.inclusive_lepVeto: continue
if tau.pt() < self.cfg_ana.inclusive_ptMin: continue
if abs(tau.eta()) > self.cfg_ana.inclusive_etaMax: continue
if abs(tau.dxy()) > self.cfg_ana.inclusive_dxyMax or abs(tau.dz()) > self.cfg_ana.inclusive_dzMax: continue
def id3(tau,X):
"""Create an integer equal to 1-2-3 for (loose,medium,tight)"""
return tau.tauID(X%"Loose") + tau.tauID(X%"Medium") + tau.tauID(X%"Tight")
def id5(tau,X):
"""Create an integer equal to 1-2-3-4-5 for (very loose,
loose, medium, tight, very tight)"""
return id3(tau, X) + tau.tauID(X%"VLoose") + tau.tauID(X%"VTight")
def id6(tau,X):
"""Create an integer equal to 1-2-3-4-5-6 for (very loose,
loose, medium, tight, very tight, very very tight)"""
return id5(tau, X) + tau.tauID(X%"VVTight")
tau.idMVA = id6(tau, "by%sIsolationMVArun2v1DBoldDMwLT")
tau.idMVANewDM = id6(tau, "by%sIsolationMVArun2v1DBnewDMwLT")
tau.idCI3hit = id3(tau, "by%sCombinedIsolationDeltaBetaCorr3Hits")
tau.idAntiMu = tau.tauID("againstMuonLoose3") + tau.tauID("againstMuonTight3")
tau.idAntiE = id5(tau, "againstElectron%sMVA6")
#print "Tau pt %5.1f: idMVA2 %d, idCI3hit %d, %s, %s" % (tau.pt(), tau.idMVA2, tau.idCI3hit, tau.tauID(self.cfg_ana.tauID), tau.tauID(self.cfg_ana.tauLooseID))
if tau.tauID(self.cfg_ana.inclusive_tauID):
event.inclusiveTaus.append(tau)
for tau in event.inclusiveTaus:
tau.loose_lepVeto = False
if self.cfg_ana.loose_vetoLeptons:
for lep in event.selectedLeptons:
if deltaR(lep.eta(), lep.phi(), tau.eta(), tau.phi()) < self.cfg_ana.loose_leptonVetoDR:
tau.loose_lepVeto = True
if self.cfg_ana.loose_vetoLeptonsPOG:
if not tau.tauID(self.cfg_ana.loose_tauAntiMuonID):
tau.loose_lepVeto = True
if not tau.tauID(self.cfg_ana.loose_tauAntiElectronID):
tau.loose_lepVeto = True
if tau.tauID(self.cfg_ana.loose_decayModeID) and \
tau.pt() > self.cfg_ana.loose_ptMin and abs(tau.eta()) < self.cfg_ana.loose_etaMax and \
abs(tau.dxy()) < self.cfg_ana.loose_dxyMax and abs(tau.dz()) < self.cfg_ana.loose_dzMax and \
tau.tauID(self.cfg_ana.loose_tauID) and not tau.loose_lepVeto:
event.selectedTaus.append(tau)
else:
event.otherTaus.append(tau)
event.inclusiveTaus.sort(key = lambda l : l.pt(), reverse = True)
event.selectedTaus.sort(key = lambda l : l.pt(), reverse = True)
event.otherTaus.sort(key = lambda l : l.pt(), reverse = True)
self.counters.counter('events').inc('all events')
if len(event.inclusiveTaus): self.counters.counter('events').inc('has >=1 tau at preselection')
if len(event.selectedTaus): self.counters.counter('events').inc('has >=1 selected taus')
if len(event.otherTaus): self.counters.counter('events').inc('has >=1 other taus')
def matchTaus(self, event):
match = matchObjectCollection3(event.inclusiveTaus, event.gentaus, deltaRMax = 0.5)
for lep in event.inclusiveTaus:
gen = match[lep]
lep.mcMatchId = 1 if gen else 0
lep.genp = gen
def process(self, event):
self.readCollections( event.input )
self.makeTaus(event)
if not self.cfg_comp.isMC:
return True
if hasattr(event, 'gentaus'):
self.matchTaus(event)
return True
# Find the definitions of the tau ID strings here:
# http://cmslxr.fnal.gov/lxr/source/PhysicsTools/PatAlgos/python/producersLayer1/tauProducer_cfi.py
setattr(TauAnalyzer,"defaultConfig",cfg.Analyzer(
class_object = TauAnalyzer,
# inclusive very loose hadronic tau selection
inclusive_ptMin = 18,
inclusive_etaMax = 9999,
inclusive_dxyMax = 1000.,
inclusive_dzMax = 0.4,
inclusive_vetoLeptons = False,
inclusive_leptonVetoDR = 0.4,
inclusive_decayModeID = "decayModeFindingNewDMs", # ignored if not set or ""
inclusive_tauID = "decayModeFindingNewDMs",
inclusive_vetoLeptonsPOG = False, # If True, the following two IDs are required
inclusive_tauAntiMuonID = "",
inclusive_tauAntiElectronID = "",
# loose hadronic tau selection
loose_ptMin = 18,
loose_etaMax = 9999,
loose_dxyMax = 1000.,
loose_dzMax = 0.2,
loose_vetoLeptons = True,
loose_leptonVetoDR = 0.4,
loose_decayModeID = "decayModeFindingNewDMs", # ignored if not set or ""
loose_tauID = "byLooseCombinedIsolationDeltaBetaCorr3Hits",
loose_vetoLeptonsPOG = False, # If True, the following two IDs are required
loose_tauAntiMuonID = "againstMuonLoose3",
loose_tauAntiElectronID = "againstElectronLooseMVA5"
)
)
| [
"PhysicsTools.HeppyCore.framework.config.Analyzer",
"PhysicsTools.HeppyCore.utils.deltar.matchObjectCollection3",
"PhysicsTools.Heppy.analyzers.core.AutoHandle.AutoHandle"
] | [((6681, 7442), 'PhysicsTools.HeppyCore.framework.config.Analyzer', 'cfg.Analyzer', ([], {'class_object': 'TauAnalyzer', 'inclusive_ptMin': '(18)', 'inclusive_etaMax': '(9999)', 'inclusive_dxyMax': '(1000.0)', 'inclusive_dzMax': '(0.4)', 'inclusive_vetoLeptons': '(False)', 'inclusive_leptonVetoDR': '(0.4)', 'inclusive_decayModeID': '"""decayModeFindingNewDMs"""', 'inclusive_tauID': '"""decayModeFindingNewDMs"""', 'inclusive_vetoLeptonsPOG': '(False)', 'inclusive_tauAntiMuonID': '""""""', 'inclusive_tauAntiElectronID': '""""""', 'loose_ptMin': '(18)', 'loose_etaMax': '(9999)', 'loose_dxyMax': '(1000.0)', 'loose_dzMax': '(0.2)', 'loose_vetoLeptons': '(True)', 'loose_leptonVetoDR': '(0.4)', 'loose_decayModeID': '"""decayModeFindingNewDMs"""', 'loose_tauID': '"""byLooseCombinedIsolationDeltaBetaCorr3Hits"""', 'loose_vetoLeptonsPOG': '(False)', 'loose_tauAntiMuonID': '"""againstMuonLoose3"""', 'loose_tauAntiElectronID': '"""againstElectronLooseMVA5"""'}), "(class_object=TauAnalyzer, inclusive_ptMin=18, inclusive_etaMax\n =9999, inclusive_dxyMax=1000.0, inclusive_dzMax=0.4,\n inclusive_vetoLeptons=False, inclusive_leptonVetoDR=0.4,\n inclusive_decayModeID='decayModeFindingNewDMs', inclusive_tauID=\n 'decayModeFindingNewDMs', inclusive_vetoLeptonsPOG=False,\n inclusive_tauAntiMuonID='', inclusive_tauAntiElectronID='', loose_ptMin\n =18, loose_etaMax=9999, loose_dxyMax=1000.0, loose_dzMax=0.2,\n loose_vetoLeptons=True, loose_leptonVetoDR=0.4, loose_decayModeID=\n 'decayModeFindingNewDMs', loose_tauID=\n 'byLooseCombinedIsolationDeltaBetaCorr3Hits', loose_vetoLeptonsPOG=\n False, loose_tauAntiMuonID='againstMuonLoose3', loose_tauAntiElectronID\n ='againstElectronLooseMVA5')\n", (6693, 7442), True, 'import PhysicsTools.HeppyCore.framework.config as cfg\n'), ((740, 796), 'PhysicsTools.Heppy.analyzers.core.AutoHandle.AutoHandle', 'AutoHandle', (["('slimmedTaus', '')", '"""std::vector<pat::Tau>"""'], {}), "(('slimmedTaus', ''), 'std::vector<pat::Tau>')\n", (750, 796), False, 'from PhysicsTools.Heppy.analyzers.core.AutoHandle import AutoHandle\n'), ((6009, 6082), 'PhysicsTools.HeppyCore.utils.deltar.matchObjectCollection3', 'matchObjectCollection3', (['event.inclusiveTaus', 'event.gentaus'], {'deltaRMax': '(0.5)'}), '(event.inclusiveTaus, event.gentaus, deltaRMax=0.5)\n', (6031, 6082), False, 'from PhysicsTools.HeppyCore.utils.deltar import deltaR, matchObjectCollection3\n')] |
# <NAME> 170401038
import math
import random
r = 3271
def egcd(a,b):
if(a == 0):
return(b,0,1)
else:
c,d,e = egcd(b % a, a)
return(c, e - (b // a) * d, d)
def modInvert(a,b):
c,d,e = egcd(a,b)
if c != 1:
raise Exception('moduler ters bulunamadi')
else:
return d % b
def randomInteger(n):
return random.randrange(2 ** (n-1), 2 ** n) | 1
def RabinMiller(f):
s = 5
if(f == 2):
return 1
if not (f & 1):
return 0
p = f-1
u = 0
r = f-1
while (r%2 == 0):
r >>= 1
u+=1
def Control(a):
z = pow(a, r, f)
if z == 1:
return 0
for i in range(u):
z = pow(a, (2**i) * r, f-1)
if z == p:
return 0
return 1
for i in range(s):
a = random.randrange(2, p-2)
if Control(a):
return 0
return 1
def Keygen(n):
while True:
p = randomInteger(n//2)
if (p - 1) % r == 0 and RabinMiller(p) and math.gcd(r, int((p - 1) / r)) == 1:
break
while True:
q = randomInteger(n//2)
if RabinMiller(q) and math.gcd(r, int(q - 1)) == 1:
break
N = p * q
phi = (p - 1) * (q - 1)
while True:
y = random.randrange(1, N)
if math.gcd(y, N) == 1:
x = pow(y, phi * modInvert(r, N) % N, N)
if x != 1:
break
publicKeyFile = open("publickey.txt", "w+")
publicKeyFile.write(str(N) + "\n" + str(y))
publicKeyFile.close()
privateKeyFile = open("privatekey.txt", "w+")
privateKeyFile.write(str(phi) + "\n" + str(x) + "\n" + str(N))
privateKeyFile.close()
def encrypt(plaintext, publickeytxt):
try:
open(publickeytxt, "r")
except FileNotFoundError:
print("Anahtar çiftleri oluşturulmadan şifrelme işlemi yapılamaz. Lütfen önce Keygen fonksiyonunu çalıştırın.")
else:
publicKeyFile = open(publickeytxt, "r")
N, y = publicKeyFile.read().split("\n")
N = int(N)
y = int(y)
publicKeyFile.close()
plainTextFile = open(plaintext, "r")
plainCopy = int(plainTextFile.read().split("\n")[0])
plainTextFile.close()
while True:
u = random.randrange(1, int(N))
if math.gcd(y, N) == 1:
break
cipherText = pow(y, plainCopy, N) * pow(u, r, N) % N
cipherTextFile = open("ciphertext.txt", "w+")
cipherTextFile.write(str(cipherText))
cipherTextFile.close()
def decrypt(ciphertext, privatekeytxt):
try:
open(privatekeytxt, "r")
except FileNotFoundError:
print("Anahtar çiftleri oluşturulmadan deşifreleme işlemi yapılamz. Lütfen önce Keygen fonksiyonunu çalıştırın.")
else:
privateKeyFile = open(privatekeytxt, "r")
phi, x, N = privateKeyFile.read().split("\n")
phi, x, N = int(phi), int(x), int(N)
privateKeyFile.close()
cipherTextFile = open(ciphertext, "r")
cipherCopy = int(cipherTextFile.read())
a = pow(cipherCopy, (phi * modInvert(r, N)) % N, N)
for i in range(r -1):
if(pow(x, i, N) == a):
break
plainText2File = open("plaintext2.txt", "w+")
plainText2File.write(str(i))
plainText2File.close()
plain2File = open("plaintext2.txt", "r")
plain1File = open("plaintext.txt", "r")
plain1 = plain1File.read().split("\n")[0]
plain2 = plain2File.read().split("\n")[0]
if plain1 == plain2:
print("Dosyalar Özdeştir..")
else:
print("Dosyalar özdeş değildir..")
n = int(input("Oluşturulmak istenen anahtar çiftlerinin bit uzunluğunu girin: "))
Keygen(n)
encrypt("plaintext.txt","publickey.txt")
decrypt("ciphertext.txt", "privatekey.txt")
| [
"math.gcd",
"random.randrange"
] | [((388, 426), 'random.randrange', 'random.randrange', (['(2 ** (n - 1))', '(2 ** n)'], {}), '(2 ** (n - 1), 2 ** n)\n', (404, 426), False, 'import random\n'), ((897, 923), 'random.randrange', 'random.randrange', (['(2)', '(p - 2)'], {}), '(2, p - 2)\n', (913, 923), False, 'import random\n'), ((1369, 1391), 'random.randrange', 'random.randrange', (['(1)', 'N'], {}), '(1, N)\n', (1385, 1391), False, 'import random\n'), ((1404, 1418), 'math.gcd', 'math.gcd', (['y', 'N'], {}), '(y, N)\n', (1412, 1418), False, 'import math\n'), ((2443, 2457), 'math.gcd', 'math.gcd', (['y', 'N'], {}), '(y, N)\n', (2451, 2457), False, 'import math\n')] |
import pytest
import time
import subprocess
from subprocess import run,Popen
from seldon_utils import *
from k8s_utils import *
def wait_for_shutdown(deploymentName):
ret = run("kubectl get deploy/"+deploymentName, shell=True)
while ret.returncode == 0:
time.sleep(1)
ret = run("kubectl get deploy/"+deploymentName, shell=True)
def wait_for_rollout(deploymentName):
ret = run("kubectl rollout status deploy/"+deploymentName, shell=True)
while ret.returncode > 0:
time.sleep(1)
ret = run("kubectl rollout status deploy/"+deploymentName, shell=True)
def initial_rest_request():
r = rest_request_api_gateway("oauth-key","oauth-secret",None,API_GATEWAY_REST)
if not r.status_code == 200:
time.sleep(1)
r = rest_request_api_gateway("oauth-key","oauth-secret",None,API_GATEWAY_REST)
if not r.status_code == 200:
time.sleep(5)
r = rest_request_api_gateway("oauth-key","oauth-secret",None,API_GATEWAY_REST)
return r
@pytest.mark.usefixtures("seldon_java_images")
@pytest.mark.usefixtures("single_namespace_seldon_ksonnet")
class TestSingleNamespace(object):
# Test singe model helm script with 4 API methods
def test_single_model(self):
run('cd my-model && ks delete default && ks component rm mymodel', shell=True)
run('kubectl delete sdep --all', shell=True)
run('cd my-model && ks generate seldon-serve-simple-v1alpha2 mymodel --image seldonio/mock_classifier:1.0 --oauthKey=oauth-key --oauthSecret=oauth-secret && ks apply default -c mymodel', shell=True, check=True)
wait_for_rollout("mymodel-mymodel-025d03d")
r = initial_rest_request()
r = rest_request_api_gateway("oauth-key","oauth-secret",None,API_GATEWAY_REST)
res = r.json()
print(res)
assert r.status_code == 200
assert len(r.json()["data"]["tensor"]["values"]) == 1
r = rest_request_ambassador("mymodel",None,API_AMBASSADOR)
res = r.json()
print(res)
assert r.status_code == 200
assert len(r.json()["data"]["tensor"]["values"]) == 1
r = grpc_request_ambassador2("mymodel",None,API_AMBASSADOR)
print(r)
r = grpc_request_api_gateway2("oauth-key","oauth-secret",None,rest_endpoint=API_GATEWAY_REST,grpc_endpoint=API_GATEWAY_GRPC)
print(r)
run('cd my-model && ks delete default -c mymodel && ks component rm mymodel', shell=True)
# Test AB Test model helm script with 4 API methods
def test_abtest_model(self):
run('cd my-model && ks delete default && ks component rm mymodel', shell=True)
run('kubectl delete sdep --all', shell=True)
run('cd my-model && ks generate seldon-abtest-v1alpha2 myabtest --imageA seldonio/mock_classifier:1.0 --imageB seldonio/mock_classifier:1.0 --oauthKey=oauth-key --oauthSecret=oauth-secret && ks apply default -c myabtest', shell=True)
wait_for_rollout("myabtest-myabtest-41de5b8")
wait_for_rollout("myabtest-myabtest-df66c5c")
r = initial_rest_request()
r = rest_request_api_gateway("oauth-key","oauth-secret",None,API_GATEWAY_REST)
res = r.json()
print(res)
assert r.status_code == 200
assert len(r.json()["data"]["tensor"]["values"]) == 1
r = rest_request_ambassador("myabtest",None,API_AMBASSADOR)
res = r.json()
print(res)
assert r.status_code == 200
assert len(r.json()["data"]["tensor"]["values"]) == 1
r = grpc_request_ambassador2("myabtest",None,API_AMBASSADOR)
print(r)
r = grpc_request_api_gateway2("oauth-key","oauth-secret",None,rest_endpoint=API_GATEWAY_REST,grpc_endpoint=API_GATEWAY_GRPC)
print(r)
run('cd my-model && ks delete default -c myabtest && ks component rm myabtest', shell=True)
# Test MAB Test model helm script with 4 API methods
def test_mab_model(self):
run('cd my-model && ks delete default && ks component rm mymab', shell=True)
run('kubectl delete sdep --all', shell=True)
run('cd my-model && ks generate seldon-mab-v1alpha2 mymab --imageA seldonio/mock_classifier:1.0 --imageB seldonio/mock_classifier:1.0 --oauthKey=oauth-key --oauthSecret=oauth-secret && ks apply default -c mymab', shell=True)
wait_for_rollout("mymab-mymab-41de5b8")
wait_for_rollout("mymab-mymab-b8038b2")
wait_for_rollout("mymab-mymab-df66c5c")
r = initial_rest_request()
r = rest_request_api_gateway("oauth-key","oauth-secret",None,API_GATEWAY_REST)
res = r.json()
print(res)
assert r.status_code == 200
assert len(r.json()["data"]["tensor"]["values"]) == 1
r = rest_request_ambassador("mymab",None,API_AMBASSADOR)
res = r.json()
print(res)
assert r.status_code == 200
assert len(r.json()["data"]["tensor"]["values"]) == 1
r = grpc_request_ambassador2("mymab",None,API_AMBASSADOR)
print(r)
r = grpc_request_api_gateway2("oauth-key","oauth-secret",None,rest_endpoint=API_GATEWAY_REST,grpc_endpoint=API_GATEWAY_GRPC)
print(r)
run('cd my-model && ks delete default && ks component rm mymab', shell=True)
| [
"subprocess.run",
"pytest.mark.usefixtures",
"time.sleep"
] | [((1033, 1078), 'pytest.mark.usefixtures', 'pytest.mark.usefixtures', (['"""seldon_java_images"""'], {}), "('seldon_java_images')\n", (1056, 1078), False, 'import pytest\n'), ((1080, 1138), 'pytest.mark.usefixtures', 'pytest.mark.usefixtures', (['"""single_namespace_seldon_ksonnet"""'], {}), "('single_namespace_seldon_ksonnet')\n", (1103, 1138), False, 'import pytest\n'), ((178, 233), 'subprocess.run', 'run', (["('kubectl get deploy/' + deploymentName)"], {'shell': '(True)'}), "('kubectl get deploy/' + deploymentName, shell=True)\n", (181, 233), False, 'from subprocess import run, Popen\n'), ((402, 468), 'subprocess.run', 'run', (["('kubectl rollout status deploy/' + deploymentName)"], {'shell': '(True)'}), "('kubectl rollout status deploy/' + deploymentName, shell=True)\n", (405, 468), False, 'from subprocess import run, Popen\n'), ((271, 284), 'time.sleep', 'time.sleep', (['(1)'], {}), '(1)\n', (281, 284), False, 'import time\n'), ((299, 354), 'subprocess.run', 'run', (["('kubectl get deploy/' + deploymentName)"], {'shell': '(True)'}), "('kubectl get deploy/' + deploymentName, shell=True)\n", (302, 354), False, 'from subprocess import run, Popen\n'), ((505, 518), 'time.sleep', 'time.sleep', (['(1)'], {}), '(1)\n', (515, 518), False, 'import time\n'), ((533, 599), 'subprocess.run', 'run', (["('kubectl rollout status deploy/' + deploymentName)"], {'shell': '(True)'}), "('kubectl rollout status deploy/' + deploymentName, shell=True)\n", (536, 599), False, 'from subprocess import run, Popen\n'), ((763, 776), 'time.sleep', 'time.sleep', (['(1)'], {}), '(1)\n', (773, 776), False, 'import time\n'), ((1274, 1352), 'subprocess.run', 'run', (['"""cd my-model && ks delete default && ks component rm mymodel"""'], {'shell': '(True)'}), "('cd my-model && ks delete default && ks component rm mymodel', shell=True)\n", (1277, 1352), False, 'from subprocess import run, Popen\n'), ((1361, 1405), 'subprocess.run', 'run', (['"""kubectl delete sdep --all"""'], {'shell': '(True)'}), "('kubectl delete sdep --all', shell=True)\n", (1364, 1405), False, 'from subprocess import run, Popen\n'), ((1414, 1629), 'subprocess.run', 'run', (['"""cd my-model && ks generate seldon-serve-simple-v1alpha2 mymodel --image seldonio/mock_classifier:1.0 --oauthKey=oauth-key --oauthSecret=oauth-secret && ks apply default -c mymodel"""'], {'shell': '(True)', 'check': '(True)'}), "('cd my-model && ks generate seldon-serve-simple-v1alpha2 mymodel --image seldonio/mock_classifier:1.0 --oauthKey=oauth-key --oauthSecret=oauth-secret && ks apply default -c mymodel'\n , shell=True, check=True)\n", (1417, 1629), False, 'from subprocess import run, Popen\n'), ((2404, 2497), 'subprocess.run', 'run', (['"""cd my-model && ks delete default -c mymodel && ks component rm mymodel"""'], {'shell': '(True)'}), "('cd my-model && ks delete default -c mymodel && ks component rm mymodel',\n shell=True)\n", (2407, 2497), False, 'from subprocess import run, Popen\n'), ((2600, 2678), 'subprocess.run', 'run', (['"""cd my-model && ks delete default && ks component rm mymodel"""'], {'shell': '(True)'}), "('cd my-model && ks delete default && ks component rm mymodel', shell=True)\n", (2603, 2678), False, 'from subprocess import run, Popen\n'), ((2687, 2731), 'subprocess.run', 'run', (['"""kubectl delete sdep --all"""'], {'shell': '(True)'}), "('kubectl delete sdep --all', shell=True)\n", (2690, 2731), False, 'from subprocess import run, Popen\n'), ((2740, 2982), 'subprocess.run', 'run', (['"""cd my-model && ks generate seldon-abtest-v1alpha2 myabtest --imageA seldonio/mock_classifier:1.0 --imageB seldonio/mock_classifier:1.0 --oauthKey=oauth-key --oauthSecret=oauth-secret && ks apply default -c myabtest"""'], {'shell': '(True)'}), "('cd my-model && ks generate seldon-abtest-v1alpha2 myabtest --imageA seldonio/mock_classifier:1.0 --imageB seldonio/mock_classifier:1.0 --oauthKey=oauth-key --oauthSecret=oauth-secret && ks apply default -c myabtest'\n , shell=True)\n", (2743, 2982), False, 'from subprocess import run, Popen\n'), ((3816, 3917), 'subprocess.run', 'run', (['"""cd my-model && ks delete default -c myabtest && ks component rm myabtest"""'], {'shell': '(True)'}), "('cd my-model && ks delete default -c myabtest && ks component rm myabtest'\n , shell=True)\n", (3819, 3917), False, 'from subprocess import run, Popen\n'), ((4009, 4085), 'subprocess.run', 'run', (['"""cd my-model && ks delete default && ks component rm mymab"""'], {'shell': '(True)'}), "('cd my-model && ks delete default && ks component rm mymab', shell=True)\n", (4012, 4085), False, 'from subprocess import run, Popen\n'), ((4094, 4138), 'subprocess.run', 'run', (['"""kubectl delete sdep --all"""'], {'shell': '(True)'}), "('kubectl delete sdep --all', shell=True)\n", (4097, 4138), False, 'from subprocess import run, Popen\n'), ((4147, 4384), 'subprocess.run', 'run', (['"""cd my-model && ks generate seldon-mab-v1alpha2 mymab --imageA seldonio/mock_classifier:1.0 --imageB seldonio/mock_classifier:1.0 --oauthKey=oauth-key --oauthSecret=oauth-secret && ks apply default -c mymab"""'], {'shell': '(True)'}), "('cd my-model && ks generate seldon-mab-v1alpha2 mymab --imageA seldonio/mock_classifier:1.0 --imageB seldonio/mock_classifier:1.0 --oauthKey=oauth-key --oauthSecret=oauth-secret && ks apply default -c mymab'\n , shell=True)\n", (4150, 4384), False, 'from subprocess import run, Popen\n'), ((5257, 5333), 'subprocess.run', 'run', (['"""cd my-model && ks delete default && ks component rm mymab"""'], {'shell': '(True)'}), "('cd my-model && ks delete default && ks component rm mymab', shell=True)\n", (5260, 5333), False, 'from subprocess import run, Popen\n'), ((913, 926), 'time.sleep', 'time.sleep', (['(5)'], {}), '(5)\n', (923, 926), False, 'import time\n')] |
from django.db import models
from .query import BookQuerySet
class Book(models.Model):
objects = BookQuerySet.as_manager()
title = models.CharField(max_length=50)
publication_date = models.DateTimeField()
author = models.ForeignKey('Author')
genres = models.ManyToManyField('Genre')
class Author(models.Model):
name = models.CharField(max_length=50)
nationality = models.ForeignKey('Nation', null=True)
class Genre(models.Model):
name = models.CharField(max_length=50)
class Nation(models.Model):
name = models.CharField(max_length=50)
demonym = models.CharField(max_length=50)
| [
"django.db.models.DateTimeField",
"django.db.models.ManyToManyField",
"django.db.models.CharField",
"django.db.models.ForeignKey"
] | [((143, 174), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(50)'}), '(max_length=50)\n', (159, 174), False, 'from django.db import models\n'), ((198, 220), 'django.db.models.DateTimeField', 'models.DateTimeField', ([], {}), '()\n', (218, 220), False, 'from django.db import models\n'), ((234, 261), 'django.db.models.ForeignKey', 'models.ForeignKey', (['"""Author"""'], {}), "('Author')\n", (251, 261), False, 'from django.db import models\n'), ((275, 306), 'django.db.models.ManyToManyField', 'models.ManyToManyField', (['"""Genre"""'], {}), "('Genre')\n", (297, 306), False, 'from django.db import models\n'), ((348, 379), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(50)'}), '(max_length=50)\n', (364, 379), False, 'from django.db import models\n'), ((398, 436), 'django.db.models.ForeignKey', 'models.ForeignKey', (['"""Nation"""'], {'null': '(True)'}), "('Nation', null=True)\n", (415, 436), False, 'from django.db import models\n'), ((477, 508), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(50)'}), '(max_length=50)\n', (493, 508), False, 'from django.db import models\n'), ((550, 581), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(50)'}), '(max_length=50)\n', (566, 581), False, 'from django.db import models\n'), ((596, 627), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(50)'}), '(max_length=50)\n', (612, 627), False, 'from django.db import models\n')] |
from matplotlib.pyplot import title
import streamlit as st
import pandas as pd
import altair as alt
import pydeck as pdk
import os
import glob
from wordcloud import WordCloud
import streamlit_analytics
path = os.path.dirname(__file__)
streamlit_analytics.start_tracking()
@st.cache
def load_gnd_top_daten(typ):
gnd_top_df = pd.DataFrame()
for file in glob.glob(f'{path}/../stats/title_gnd_{typ}_*.csv'):
gnd_top_df = gnd_top_df.append(pd.read_csv(file, index_col=None))
return gnd_top_df
def sachbegriff_cloud():
#wordcloud der top 100 sachbegriffe eines auszuwählenden tages der letzten 10 werktage
st.header('TOP 100 Sachbegriffe pro Tag')
st.write('Wählen Sie ein Datum aus den letzten 10 Werktagen vor der letzten Aktualisierung der Daten des Dashboards und sehen Sie eine Wordcloud der 100 meistverwendeten GND-Sachbegriffe dieses Tages. Die Größe des Begriffes entspricht der Häufigkeit des Sachbegriffs.')
files = glob.glob(f'{path}/../stats/*Ts-count.csv')
daten = [x[-23:-13] for x in files]
daten.sort()
daten_filter = st.select_slider('Wählen Sie ein Datum', options=daten, value=daten[-1])
df = pd.read_csv(f'{path}/../stats/{daten_filter}-Ts-count.csv')
dict = df.to_dict(orient='records')
worte = {}
for record in dict:
worte.update({record['sachbegriff']:record['count']})
wc = WordCloud(background_color="white", max_words=100, width=2000, height=800, colormap='tab20')
wc.generate_from_frequencies(worte)
return st.image(wc.to_array())
def wirkungsorte():
#ranking und karte der meistverwendeten wirkungsorte aller personen in der gnd
df = pd.read_csv(f'{path}/wirkungsorte-top50.csv')
df.drop(columns=['id'], inplace=True)
df.rename(columns={'name': 'Name', 'count': 'Anzahl'}, inplace=True)
st.header('TOP Wirkungsorte von GND-Personen')
st.markdown('Von allen Personensätzen (Tp) weisen 782.682 Angaben zum Wirkungsort der jeweiligen Person auf.')
#Balkendiagramm
orte_filt = st.slider('Zeige Top …', min_value=3, max_value=len(df), value=10, step=1)
graph_count = alt.Chart(df.nlargest(orte_filt, 'Anzahl', keep='all')).mark_bar().encode(
alt.X('Name:N', sort='y'),
alt.Y('Anzahl'),
alt.Color('Name:N', legend=alt.Legend(columns=2)),
tooltip=[alt.Tooltip('Name:N', title='Ort'), alt.Tooltip('Anzahl:Q', title='Anzahl')]
)
st.altair_chart(graph_count, use_container_width=True)
#Karte
INITIAL_VIEW_STATE = pdk.ViewState(
latitude=50.67877877706058,
longitude=8.129981238464392,
zoom=4.5,
max_zoom=16,
bearing=0
)
scatterplotlayer = pdk.Layer(
"ScatterplotLayer",
df,
pickable=True,
opacity=0.5,
stroked=True,
filled=True,
radius_min_pixels=1,
radius_max_pixels=100,
line_width_min_pixels=1,
get_position='[lon, lat]',
get_radius="Anzahl",
get_fill_color=[255, 140, 0],
get_line_color=[0, 0, 0]
)
st.pydeck_chart(pdk.Deck(
scatterplotlayer,
initial_view_state=INITIAL_VIEW_STATE,
map_style=pdk.map_styles.LIGHT,
tooltip={"html": "<b>{Name}</b><br \>Wirkungsort von {Anzahl} Personen"}))
def wirkungsorte_musik():
#nach jahrzehnten zwischen 1400 und 2010 gefilterte auswertung der GND-Musikwerke, Musik-Personen und Wikrungsorte und daraus abgeleitete Zentren der Musikkultur, dargestellt auf einer Karte
musiker_orte = pd.read_csv(f'{path}/musiker_orte.csv', sep='\t', index_col='idn')
st.header('Wirkungszentren der Musik 1400–2010')
st.write('Eine Auswertung der veröffentlichten Titel von Musikern und deren Wirkungszeiten erlaubt Rückschlüsse auf die musikalischen Zentren, wie sie im Bestand der DNB repräsentiert sind.')
limiter = st.slider('Jahresfilter', min_value=1400, max_value=int(musiker_orte['jahrzehnt'].max()), value=(1900), step=10)
musik_filt= musiker_orte.loc[(musiker_orte['jahrzehnt'] == limiter)]
musik_filt['norm']=(musik_filt['count']-musik_filt['count'].min())/(musik_filt['count'].max()-musik_filt['count'].min())
#Karte
INITIAL_VIEW_STATE = pdk.ViewState(
latitude=50.67877877706058,
longitude=8.129981238464392,
zoom=4.5,
max_zoom=16,
bearing=0
)
musiker_scatter = pdk.Layer(
"ScatterplotLayer",
musik_filt,
opacity=0.8,
get_position='[lon, lat]',
pickable=True,
stroked=True,
filled=True,
radius_min_pixels=1,
radius_max_pixels=100,
radiusscale=100,
line_width_min_pixels=1,
get_radius="norm*50000",
get_fill_color=[50, 168, 92],
get_line_color=[39, 71, 51]
)
st.pydeck_chart(pdk.Deck(
musiker_scatter,
initial_view_state=INITIAL_VIEW_STATE,
map_style=pdk.map_styles.LIGHT,
tooltip={"html": "<b>{name}</b>"}))
st.subheader(f'TOP 10 Wirkungszentren der {limiter}er')
col1, col2 = st.beta_columns(2)
i = 1
for index, row in musik_filt.nlargest(10, 'norm').iterrows():
if i <= 5:
with col1:
st.write(f'{i}. {row["name"]}')
elif i > 5:
with col2:
st.write(f'{i}. {row["name"]}')
i += 1
def gesamt_entity_count():
#Gesamtzahl der GND-Entitäten
with open(f"{path}/../stats/gnd_entity_count.csv", "r") as f:
entities = f'{int(f.read()):,}'
return st.write(f"GND-Entitäten gesamt: {entities.replace(',','.')}")
def relationen():
#Top 10 der GND-Relationierungscodes
rels = pd.read_csv(f'{path}/../stats/gnd_codes_all.csv', index_col=False)
st.subheader('Relationen')
st.write('GND-Datensätze können mit anderen Datensätzen verlinkt (»relationiert«) werden. Die Art der Verlinkung wird über einen Relationierungscode beschrieben. Hier sind die am häufigsten verwendeten Relationierungscodes zu sehen. Die Auflösung der wichtigsten Codes gibt es [hier](https://wiki.dnb.de/download/attachments/51283696/Codeliste_ABCnachCode_Webseite_2012-07.pdf).')
rels_filt = st.slider('Zeige Top ...', 5, len(rels), 10, 1)
relation_count = alt.Chart(rels.nlargest(rels_filt, 'count', keep='all')).mark_bar().encode(
alt.X('code', title='Relationierungs-Code', sort='-y'),
alt.Y('count', title='Anzahl'),
alt.Color('code', sort='-y', title='Relationierungscode'),
tooltip=[alt.Tooltip('count', title='Anzahl'), alt.Tooltip('code', title='Code')]
)
st.altair_chart(relation_count, use_container_width=True)
with open(f"{path}/../stats/gnd_relation_count.csv", "r") as f:
relations = f'{int(f.read()):,}'
st.write(f"Relationen zwischen Entitäten gesamt: {relations.replace(',','.')}")
def systematik():
#Ranking der meistverwendeten GND-Systematik-Notationen
classification = pd.read_csv(f'{path}/../stats/gnd_classification_all.csv', index_col=False)
st.subheader('Systematik')
st.write('Die Entitäten der GND können in eine Systematik eingeordnet werden. Die Liste der möglichen Notationen gibt es [hier](http://www.dnb.de/gndsyst).')
class_filt = st.slider('Zeige Top …', 5, len(classification), 10, 1)
classification_count = alt.Chart(classification.nlargest(class_filt, 'count', keep='all')).mark_bar().encode(
alt.X('id', title='Notation', sort='-y'),
alt.Y('count', title='Anzahl'),
alt.Color('name', sort='-y', title="Bezeichnung"),
tooltip=[alt.Tooltip('id', title='Notation'), alt.Tooltip('name', title='Bezeichnung'), alt.Tooltip('count', title='Anzahl')]
)
return st.altair_chart(classification_count, use_container_width=True)
def systematik_ts():
#Ranking der Systematik von Ts-Sätzen
classification_ts = pd.read_csv(f'{path}/../stats/gnd_classification_Ts_all.csv', index_col=False)
st.subheader('Systematik der Sachbegriffe')
st.write('Die Entitäten der GND können in eine Systematik eingeordnet werden. Hier sind die Systematik-Notationen der Sachbegriffe (Ts) aufgetragen. Die Liste der möglichen Notationen gibt es [hier](http://www.dnb.de/gndsyst).')
class_ts_filt = st.slider('Zeige TOP …', min_value=5, max_value=len(classification_ts), value=10, step=1)
classification_ts_count = alt.Chart(classification_ts.nlargest(class_ts_filt, 'count', keep='all')).mark_bar().encode(
alt.X('id:N', title='Notation', sort='-y'),
alt.Y('count:Q', title='Anzahl'),
alt.Color('name:N', sort='-y', title='Bezeichnung'),
tooltip = [alt.Tooltip('id', title='Notation'), alt.Tooltip('name', title='Bezeichnung'), alt.Tooltip('count', title='Anzahl')]
)
return st.altair_chart(classification_ts_count, use_container_width=True)
def zeitverlauf():
#zeitverlauf der erstellung der GND-Sätze ab Januar 1972
created_at = pd.read_csv(f'{path}/../stats/gnd_created_at.csv', index_col='created_at', parse_dates=True, header=0, names=['created_at', 'count'])
st.subheader('Zeitverlauf der GND-Datensatzerstellung')
st.write('Auf einer Zeitleiste wird die Anzahl der monatlich erstellten GND-Sätze aufgetragen. Die ersten Sätze stammen aus dem Januar 1972')
created_filt = st.slider('Zeitraum', 1972, 2021, (1972,2021), 1)
created = alt.Chart(created_at[f'{created_filt[0]}':f'{created_filt[1]}'].reset_index()).mark_line().encode(
alt.X('created_at:T', title='Erstelldatum'),
alt.Y('count:Q', title='Sätze pro Monat'),
tooltip=['count']
)
return st.altair_chart(created, use_container_width=True)
def entities():
#GND-Entitäten nach Satzart und Katalogisierungslevel
df = pd.read_csv(f'{path}/../stats/gnd_entity_types.csv', index_col=False, names=['entity','count'])
df['level'] = df.entity.str[2:3]
df.entity = df.entity.str[:2]
if satzart == 'alle':
entity_count = alt.Chart(df).mark_bar().encode(
alt.X('sum(count)', title='Datensätze pro Katalogisierungslevel'),
alt.Y('entity', title='Satzart'),
alt.Color('level', title='Katalogisierungslevel'),
tooltip=[alt.Tooltip('entity', title='Satzart'), alt.Tooltip( 'level', title='Katalogisierungslevel'), alt.Tooltip('count', title='Anzahl')]
)
st.subheader('Entitäten und Katalogisierungslevel')
else:
entity_count = alt.Chart(df.loc[df['entity'].str.startswith(satzart[:2])]).mark_bar().encode(
alt.X('sum(count)', title='Datensätze pro Katalogisierungslevel'),
alt.Y('entity', title='Satzart'),
alt.Color('level', title='Katalogisierungslevel'),
tooltip=[alt.Tooltip( 'level', title='Katalogisierungslevel'), alt.Tooltip('count', title='Anzahl')]
)
st.subheader(f'Katalogisierungslevel in Satzart {satzart}')
st.write('Alle GND-Entitäten können in verschiedenen Katalogisierungsleveln (1-7) angelegt werden. Je niedriger das Katalogisierungslevel, desto verlässlicher die Daten, weil Sie dann von qualifizierten Personen erstellt bzw. überprüft wurden.')
return st.altair_chart(entity_count, use_container_width=True)
def newcomer():
#TOP 10 der Entitäten, die in den letzten 365 Tagen erstellt wurden
if satzart == 'alle':
st.subheader(f'TOP 10 GND-Newcomer')
st.write('TOP 10 der GND-Entitäten, die in den letzten 365 Tagen angelegt wurden.')
newcomer_daten = pd.read_csv(f'{path}/../stats/title_gnd_newcomer_top10.csv', index_col=None)
newcomer = alt.Chart(newcomer_daten).mark_bar().encode(
alt.X('gnd_id', title='Entitäten', sort='-y'),
alt.Y('count', title='Anzahl'),
alt.Color('name', sort='-y', title='Entität'),
tooltip=[alt.Tooltip('name:N', title='Entität'), alt.Tooltip('bbg:N', title='Satzart'), alt.Tooltip('gnd_id:N', title='IDN'), alt.Tooltip('count:Q', title='Anzahl')]
)
else:
st.subheader(f'TOP 10 {satzart} GND-Newcomer')
st.write(f'TOP 10 der {satzart} Sätze, die in den letzten 365 Tagen angelegt wurden.')
newcomer_daten = load_gnd_top_daten('newcomer_top10')
newcomer = alt.Chart(newcomer_daten.loc[newcomer_daten['bbg'].str.startswith(satzart[:2], na=False)]).mark_bar().encode(
alt.X('gnd_id:O', title='Entitäten', sort='-y'),
alt.Y('count', title='Anzahl'),
alt.Color('name', sort='-y', title='Entität'),
tooltip=[alt.Tooltip('name:N', title='Entität'), alt.Tooltip('gnd_id:N', title='IDN'), alt.Tooltip('count:Q', title='Anzahl')]
)
st.altair_chart(newcomer, use_container_width=True)
def gnd_top():
#TOP 10 GND-Entitäten in DNB-Titeldaten, nach Satzart gefiltert
if satzart == 'alle':
st.subheader(f'TOP 10 GND-Entitäten in DNB-Titeldaten')
top_daten = pd.read_csv(f'{path}/../stats/title_gnd_top10.csv', index_col=None)
gnd_top = alt.Chart(top_daten).mark_bar().encode(
alt.X('gnd_id:N', title='Entitäten', sort='-y'),
alt.Y('count:Q', title='Anzahl'),
alt.Color('name:N', sort='-y', title='Entität'),
tooltip=[alt.Tooltip('name:N', title='Entität'), alt.Tooltip('gnd_id:N', title='IDN'), alt.Tooltip('bbg:N', title='Satzart'), alt.Tooltip('count:Q', title='Anzahl')]
)
else:
st.subheader(f'TOP 10 {satzart} in DNB-Titeldaten')
top_daten = load_gnd_top_daten('top10')
gnd_top = alt.Chart(top_daten.loc[top_daten['bbg'].str.startswith(satzart[:2], na=False)]).mark_bar().encode(
alt.X('gnd_id:N', title='Entitäten', sort='-y'),
alt.Y('count:Q', title='Anzahl'),
alt.Color('name:N', sort='-y', title='Entität'),
tooltip=[alt.Tooltip('name:N', title='Entität'), alt.Tooltip('gnd_id:N', title='IDN'), alt.Tooltip('count:Q', title='Anzahl')]
)
st.write('Verknüpfungen, die maschinell erzeugt wurden, aus Fremddaten stammen oder verwaist sind, wurden nicht in die Auswertung einbezogen. Eine detaillierte Auflistung der ausgewerteten Felder ist im [GitHub-Repository](https://git.io/JG5vN) dieses Dashboards dokumentiert.')
st.altair_chart(gnd_top, use_container_width=True)
def dnb_links():
#GND-Verknüpfungen in DNB Titeldaten
if satzart == 'alle':
#Anzahl GND-Verknüpfungen in DNB-Titeldaten
with open(f"{path}/../stats/title_gnd_links.csv", "r") as f:
links = f'{int(f.read()):,}'
#GND-Entitäten maschinell verknüpft
with open(f"{path}/../stats/title_gnd_links_auto.csv", "r") as f:
auto_entites = int(f.read())
#GND-Entitäten aus Fremddaten
with open(f"{path}/../stats/title_gnd_links_ext.csv", "r") as f:
fremd_entities = int(f.read())
#Anzahl der intellktuell verknüpften GND-Entitäten in DNB-Titeldaten
with open(f"{path}/../stats/title_gnd_links_unique.csv", "r") as f:
uniques = int(f.read())
uniques_str = f'{uniques:,}'
#Durchschnittliche Anzahl an GND-Verknüpfungen pro DNB-Titeldatensatz
with open(f"{path}/../stats/title_gnd_mean.csv", "r") as f:
mean = str(round(float(f.read()),2)).replace('.',',')
st.write(f"{links.replace(',','.')} intellektuell vergebene Verknüpfungen zu {uniques_str.replace(',','.')} GND-Entitäten in den DNB-Titeldaten. Durchschnittlich {mean} GND-Verknüpfungen pro DNB-Titeldatensatz")
entity_df = pd.DataFrame.from_dict({"intellektuell verknüpfte Entitäten": uniques, "Entitäten aus automatischen Prozessen": auto_entites, "Entitäten aus Fremddaten": fremd_entities}, orient = "index").reset_index()
entity_df = entity_df.rename(columns={"index":"Datenart", 0:"Anzahl"})
st.subheader('Datenherkunft der GND-Entitäten in DNB-Titeldaten')
st.write('Weniger als ein Drittel der GND-Entitäten in DNB-Titeldaten wurde in intellektuellen Erschließungsprozessen vergeben. Jeweils ca. ein weiteres Drittel wurde in maschinellen Erschließungsprozessen vergeben, ca. ein Drittel stammt aus Fremddaten.')
entities = alt.Chart(entity_df).mark_bar().encode(
alt.X('sum(Datenart):N', title='Datenart'),
alt.Y('sum(Anzahl):Q', title='Anzahl'),
color='Datenart',
tooltip='Anzahl:N'
)
st.altair_chart(entities, use_container_width=True)
else:
with open(f"{path}/../stats/title_gnd_mean_{satzart[:2]}.csv", "r") as f:
mean = str(round(float(f.read()),2)).replace('.',',')
st.write(f'Durchschnittlich {mean} Verknüpfungen zu {satzart}-Sätzen pro DNB-Titeldatensatz')
#main
st.title('GND-Dashboard')
#infoebereich oben
with st.beta_container():
st.info('Hier finden Sie statistische Auswertungen der GND und ihrer Verknüpfungen mit den Titeldaten der Deutschen Nationalbibliothek (Stand der Daten: Juli 2021). Wählen Sie links die Satzart, die Sie interessiert, und Sie erhalten die verfügbaren Auswertungen und Statstiken. Verwenden Sie einen auf Chromium basierenden Browser.')
with st.beta_expander("Methodik und Datenherkunft"):
st.markdown('''
Datengrundlage ist ein Gesamtabzug der Daten der Gemeinsamen Normadatei (GND) sowie der Titeldaten der Deutschen Nationalbibliothek (DNB) inkl. Zeitschriftendatenbank (ZDB), sofern sich Exemplare der Zeitschrift im Bestand der DNB befinden. In den Titeldaten ist auch der Tonträger- und Notenbestand des Deutschen Musikarchivs (DMA) sowie der Buch- und Objektbestand des Deutschen Buch- und Schriftmuseums (DBSM) nachgewiesen.
Der Gesamtabzug liegt im OCLC-Format PICA+ vor. Die Daten werden mithilfe des Pica-Parsers [pica.rs](https://github.com/deutsche-nationalbibliothek/pica-rs) gefiltert. Dieses Tool produziert aus dem sehr großen Gesamtabzug (~ 31 GB) kleinere CSV-Dateien, die mit Python weiterverarbeitet werden.
Das Dashboard ist mit dem Python-Framework [Streamlit](https://streamlit.io/) geschrieben. Die Skripte sowie die gefilterten CSV-Rohdaten sind auf [Github](https://github.com/buchmuseum/GND_Dashboard) zu finden. Die Diagramme wurden mit [Altair](https://altair-viz.github.io/index.html) erstellt, die Karten mit [Deck GL](https://deck.gl/) (via [Pydeck](https://deckgl.readthedocs.io/en/latest/#)), die Wordcloud mit [wordcloud](https://amueller.github.io/word_cloud/index.html).
Für grundlegende Zugriffsstatistik verwenden wir [streamlit-analytics](https://pypi.org/project/streamlit-analytics/). Dabei werden keine personenbezogenen Daten gespeichert.
Alle Skripte und Daten stehen unter CC0 Lizenz und können frei weitergenutzt werden.
Die Daten werden monatlich aktualisiert.
''')
#sidebar mit satzartenfilter
st.sidebar.header("Satzart wählen")
satzart = st.sidebar.selectbox(
"Über welche GND-Satzart möchten Sie etwas erfahren?",
('alle', "Tp - Personen", "Tb - Körperschaften", "Tg - Geografika", "Ts - Sachbegriffe", "Tu - Werke", "Tf - Veranstaltungen")
)
st.sidebar.info('Diese Widgets haben die GitHub-User [niko2342](https://github.com/niko2342/), [ramonvoges](https://github.com/ramonvoges), [a-wendler](https://github.com/a-wendler/) sowie <NAME> geschrieben. Sie gehören zur Python Community der Deutschen Nationalbibliothek.')
gnd_allgemein = st.beta_container()
with gnd_allgemein:
st.header('GND Statistik allgemein')
#allgemeine statistiken in abhängigkeit der satzart
if satzart == 'alle':
gesamt_entity_count()
entities()
newcomer()
zeitverlauf()
relationen()
systematik()
else:
entities()
newcomer()
#besondere widgets für einzelne satzarten
if satzart == "Tp - Personen":
wirkungsorte()
elif satzart == "Tg - Geografika":
wirkungsorte_musik()
wirkungsorte()
elif satzart == "Ts - Sachbegriffe":
sachbegriff_cloud()
systematik_ts()
dnb = st.beta_container()
with dnb:
st.header('GND in der Deutschen Nationalbibliothek')
gnd_top()
dnb_links()
streamlit_analytics.stop_tracking() | [
"pydeck.Layer",
"pandas.read_csv",
"altair.Chart",
"pydeck.Deck",
"pydeck.ViewState",
"altair.X",
"altair.Y",
"altair.Legend",
"streamlit.header",
"streamlit.title",
"streamlit.sidebar.info",
"streamlit_analytics.stop_tracking",
"streamlit.sidebar.header",
"pandas.DataFrame.from_dict",
"streamlit.select_slider",
"streamlit_analytics.start_tracking",
"pandas.DataFrame",
"streamlit.beta_container",
"glob.glob",
"streamlit.markdown",
"streamlit.altair_chart",
"streamlit.beta_columns",
"streamlit.write",
"os.path.dirname",
"streamlit.subheader",
"altair.Color",
"wordcloud.WordCloud",
"streamlit.sidebar.selectbox",
"altair.Tooltip",
"streamlit.info",
"streamlit.slider",
"streamlit.beta_expander"
] | [((210, 235), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (225, 235), False, 'import os\n'), ((237, 273), 'streamlit_analytics.start_tracking', 'streamlit_analytics.start_tracking', ([], {}), '()\n', (271, 273), False, 'import streamlit_analytics\n'), ((16582, 16607), 'streamlit.title', 'st.title', (['"""GND-Dashboard"""'], {}), "('GND-Dashboard')\n", (16590, 16607), True, 'import streamlit as st\n'), ((18618, 18653), 'streamlit.sidebar.header', 'st.sidebar.header', (['"""Satzart wählen"""'], {}), "('Satzart wählen')\n", (18635, 18653), True, 'import streamlit as st\n'), ((18664, 18875), 'streamlit.sidebar.selectbox', 'st.sidebar.selectbox', (['"""Über welche GND-Satzart möchten Sie etwas erfahren?"""', "('alle', 'Tp - Personen', 'Tb - Körperschaften', 'Tg - Geografika',\n 'Ts - Sachbegriffe', 'Tu - Werke', 'Tf - Veranstaltungen')"], {}), "('Über welche GND-Satzart möchten Sie etwas erfahren?',\n ('alle', 'Tp - Personen', 'Tb - Körperschaften', 'Tg - Geografika',\n 'Ts - Sachbegriffe', 'Tu - Werke', 'Tf - Veranstaltungen'))\n", (18684, 18875), True, 'import streamlit as st\n'), ((18878, 19165), 'streamlit.sidebar.info', 'st.sidebar.info', (['"""Diese Widgets haben die GitHub-User [niko2342](https://github.com/niko2342/), [ramonvoges](https://github.com/ramonvoges), [a-wendler](https://github.com/a-wendler/) sowie <NAME> geschrieben. Sie gehören zur Python Community der Deutschen Nationalbibliothek."""'], {}), "(\n 'Diese Widgets haben die GitHub-User [niko2342](https://github.com/niko2342/), [ramonvoges](https://github.com/ramonvoges), [a-wendler](https://github.com/a-wendler/) sowie <NAME> geschrieben. Sie gehören zur Python Community der Deutschen Nationalbibliothek.'\n )\n", (18893, 19165), True, 'import streamlit as st\n'), ((19173, 19192), 'streamlit.beta_container', 'st.beta_container', ([], {}), '()\n', (19190, 19192), True, 'import streamlit as st\n'), ((19816, 19835), 'streamlit.beta_container', 'st.beta_container', ([], {}), '()\n', (19833, 19835), True, 'import streamlit as st\n'), ((19934, 19969), 'streamlit_analytics.stop_tracking', 'streamlit_analytics.stop_tracking', ([], {}), '()\n', (19967, 19969), False, 'import streamlit_analytics\n'), ((331, 345), 'pandas.DataFrame', 'pd.DataFrame', ([], {}), '()\n', (343, 345), True, 'import pandas as pd\n'), ((362, 413), 'glob.glob', 'glob.glob', (['f"""{path}/../stats/title_gnd_{typ}_*.csv"""'], {}), "(f'{path}/../stats/title_gnd_{typ}_*.csv')\n", (371, 413), False, 'import glob\n'), ((632, 673), 'streamlit.header', 'st.header', (['"""TOP 100 Sachbegriffe pro Tag"""'], {}), "('TOP 100 Sachbegriffe pro Tag')\n", (641, 673), True, 'import streamlit as st\n'), ((678, 958), 'streamlit.write', 'st.write', (['"""Wählen Sie ein Datum aus den letzten 10 Werktagen vor der letzten Aktualisierung der Daten des Dashboards und sehen Sie eine Wordcloud der 100 meistverwendeten GND-Sachbegriffe dieses Tages. Die Größe des Begriffes entspricht der Häufigkeit des Sachbegriffs."""'], {}), "(\n 'Wählen Sie ein Datum aus den letzten 10 Werktagen vor der letzten Aktualisierung der Daten des Dashboards und sehen Sie eine Wordcloud der 100 meistverwendeten GND-Sachbegriffe dieses Tages. Die Größe des Begriffes entspricht der Häufigkeit des Sachbegriffs.'\n )\n", (686, 958), True, 'import streamlit as st\n'), ((961, 1004), 'glob.glob', 'glob.glob', (['f"""{path}/../stats/*Ts-count.csv"""'], {}), "(f'{path}/../stats/*Ts-count.csv')\n", (970, 1004), False, 'import glob\n'), ((1081, 1153), 'streamlit.select_slider', 'st.select_slider', (['"""Wählen Sie ein Datum"""'], {'options': 'daten', 'value': 'daten[-1]'}), "('Wählen Sie ein Datum', options=daten, value=daten[-1])\n", (1097, 1153), True, 'import streamlit as st\n'), ((1164, 1223), 'pandas.read_csv', 'pd.read_csv', (['f"""{path}/../stats/{daten_filter}-Ts-count.csv"""'], {}), "(f'{path}/../stats/{daten_filter}-Ts-count.csv')\n", (1175, 1223), True, 'import pandas as pd\n'), ((1380, 1476), 'wordcloud.WordCloud', 'WordCloud', ([], {'background_color': '"""white"""', 'max_words': '(100)', 'width': '(2000)', 'height': '(800)', 'colormap': '"""tab20"""'}), "(background_color='white', max_words=100, width=2000, height=800,\n colormap='tab20')\n", (1389, 1476), False, 'from wordcloud import WordCloud\n'), ((1661, 1706), 'pandas.read_csv', 'pd.read_csv', (['f"""{path}/wirkungsorte-top50.csv"""'], {}), "(f'{path}/wirkungsorte-top50.csv')\n", (1672, 1706), True, 'import pandas as pd\n'), ((1827, 1873), 'streamlit.header', 'st.header', (['"""TOP Wirkungsorte von GND-Personen"""'], {}), "('TOP Wirkungsorte von GND-Personen')\n", (1836, 1873), True, 'import streamlit as st\n'), ((1878, 1998), 'streamlit.markdown', 'st.markdown', (['"""Von allen Personensätzen (Tp) weisen 782.682 Angaben zum Wirkungsort der jeweiligen Person auf."""'], {}), "(\n 'Von allen Personensätzen (Tp) weisen 782.682 Angaben zum Wirkungsort der jeweiligen Person auf.'\n )\n", (1889, 1998), True, 'import streamlit as st\n'), ((2417, 2471), 'streamlit.altair_chart', 'st.altair_chart', (['graph_count'], {'use_container_width': '(True)'}), '(graph_count, use_container_width=True)\n', (2432, 2471), True, 'import streamlit as st\n'), ((2509, 2618), 'pydeck.ViewState', 'pdk.ViewState', ([], {'latitude': '(50.67877877706058)', 'longitude': '(8.129981238464392)', 'zoom': '(4.5)', 'max_zoom': '(16)', 'bearing': '(0)'}), '(latitude=50.67877877706058, longitude=8.129981238464392, zoom\n =4.5, max_zoom=16, bearing=0)\n', (2522, 2618), True, 'import pydeck as pdk\n'), ((2664, 2937), 'pydeck.Layer', 'pdk.Layer', (['"""ScatterplotLayer"""', 'df'], {'pickable': '(True)', 'opacity': '(0.5)', 'stroked': '(True)', 'filled': '(True)', 'radius_min_pixels': '(1)', 'radius_max_pixels': '(100)', 'line_width_min_pixels': '(1)', 'get_position': '"""[lon, lat]"""', 'get_radius': '"""Anzahl"""', 'get_fill_color': '[255, 140, 0]', 'get_line_color': '[0, 0, 0]'}), "('ScatterplotLayer', df, pickable=True, opacity=0.5, stroked=True,\n filled=True, radius_min_pixels=1, radius_max_pixels=100,\n line_width_min_pixels=1, get_position='[lon, lat]', get_radius='Anzahl',\n get_fill_color=[255, 140, 0], get_line_color=[0, 0, 0])\n", (2673, 2937), True, 'import pydeck as pdk\n'), ((3432, 3498), 'pandas.read_csv', 'pd.read_csv', (['f"""{path}/musiker_orte.csv"""'], {'sep': '"""\t"""', 'index_col': '"""idn"""'}), "(f'{path}/musiker_orte.csv', sep='\\t', index_col='idn')\n", (3443, 3498), True, 'import pandas as pd\n'), ((3503, 3551), 'streamlit.header', 'st.header', (['"""Wirkungszentren der Musik 1400–2010"""'], {}), "('Wirkungszentren der Musik 1400–2010')\n", (3512, 3551), True, 'import streamlit as st\n'), ((3556, 3757), 'streamlit.write', 'st.write', (['"""Eine Auswertung der veröffentlichten Titel von Musikern und deren Wirkungszeiten erlaubt Rückschlüsse auf die musikalischen Zentren, wie sie im Bestand der DNB repräsentiert sind."""'], {}), "(\n 'Eine Auswertung der veröffentlichten Titel von Musikern und deren Wirkungszeiten erlaubt Rückschlüsse auf die musikalischen Zentren, wie sie im Bestand der DNB repräsentiert sind.'\n )\n", (3564, 3757), True, 'import streamlit as st\n'), ((4109, 4218), 'pydeck.ViewState', 'pdk.ViewState', ([], {'latitude': '(50.67877877706058)', 'longitude': '(8.129981238464392)', 'zoom': '(4.5)', 'max_zoom': '(16)', 'bearing': '(0)'}), '(latitude=50.67877877706058, longitude=8.129981238464392, zoom\n =4.5, max_zoom=16, bearing=0)\n', (4122, 4218), True, 'import pydeck as pdk\n'), ((4283, 4594), 'pydeck.Layer', 'pdk.Layer', (['"""ScatterplotLayer"""', 'musik_filt'], {'opacity': '(0.8)', 'get_position': '"""[lon, lat]"""', 'pickable': '(True)', 'stroked': '(True)', 'filled': '(True)', 'radius_min_pixels': '(1)', 'radius_max_pixels': '(100)', 'radiusscale': '(100)', 'line_width_min_pixels': '(1)', 'get_radius': '"""norm*50000"""', 'get_fill_color': '[50, 168, 92]', 'get_line_color': '[39, 71, 51]'}), "('ScatterplotLayer', musik_filt, opacity=0.8, get_position=\n '[lon, lat]', pickable=True, stroked=True, filled=True,\n radius_min_pixels=1, radius_max_pixels=100, radiusscale=100,\n line_width_min_pixels=1, get_radius='norm*50000', get_fill_color=[50, \n 168, 92], get_line_color=[39, 71, 51])\n", (4292, 4594), True, 'import pydeck as pdk\n'), ((4870, 4925), 'streamlit.subheader', 'st.subheader', (['f"""TOP 10 Wirkungszentren der {limiter}er"""'], {}), "(f'TOP 10 Wirkungszentren der {limiter}er')\n", (4882, 4925), True, 'import streamlit as st\n'), ((4943, 4961), 'streamlit.beta_columns', 'st.beta_columns', (['(2)'], {}), '(2)\n', (4958, 4961), True, 'import streamlit as st\n'), ((5547, 5613), 'pandas.read_csv', 'pd.read_csv', (['f"""{path}/../stats/gnd_codes_all.csv"""'], {'index_col': '(False)'}), "(f'{path}/../stats/gnd_codes_all.csv', index_col=False)\n", (5558, 5613), True, 'import pandas as pd\n'), ((5618, 5644), 'streamlit.subheader', 'st.subheader', (['"""Relationen"""'], {}), "('Relationen')\n", (5630, 5644), True, 'import streamlit as st\n'), ((5649, 6039), 'streamlit.write', 'st.write', (['"""GND-Datensätze können mit anderen Datensätzen verlinkt (»relationiert«) werden. Die Art der Verlinkung wird über einen Relationierungscode beschrieben. Hier sind die am häufigsten verwendeten Relationierungscodes zu sehen. Die Auflösung der wichtigsten Codes gibt es [hier](https://wiki.dnb.de/download/attachments/51283696/Codeliste_ABCnachCode_Webseite_2012-07.pdf)."""'], {}), "(\n 'GND-Datensätze können mit anderen Datensätzen verlinkt (»relationiert«) werden. Die Art der Verlinkung wird über einen Relationierungscode beschrieben. Hier sind die am häufigsten verwendeten Relationierungscodes zu sehen. Die Auflösung der wichtigsten Codes gibt es [hier](https://wiki.dnb.de/download/attachments/51283696/Codeliste_ABCnachCode_Webseite_2012-07.pdf).'\n )\n", (5657, 6039), True, 'import streamlit as st\n'), ((6462, 6519), 'streamlit.altair_chart', 'st.altair_chart', (['relation_count'], {'use_container_width': '(True)'}), '(relation_count, use_container_width=True)\n', (6477, 6519), True, 'import streamlit as st\n'), ((6814, 6889), 'pandas.read_csv', 'pd.read_csv', (['f"""{path}/../stats/gnd_classification_all.csv"""'], {'index_col': '(False)'}), "(f'{path}/../stats/gnd_classification_all.csv', index_col=False)\n", (6825, 6889), True, 'import pandas as pd\n'), ((6894, 6920), 'streamlit.subheader', 'st.subheader', (['"""Systematik"""'], {}), "('Systematik')\n", (6906, 6920), True, 'import streamlit as st\n'), ((6925, 7092), 'streamlit.write', 'st.write', (['"""Die Entitäten der GND können in eine Systematik eingeordnet werden. Die Liste der möglichen Notationen gibt es [hier](http://www.dnb.de/gndsyst)."""'], {}), "(\n 'Die Entitäten der GND können in eine Systematik eingeordnet werden. Die Liste der möglichen Notationen gibt es [hier](http://www.dnb.de/gndsyst).'\n )\n", (6933, 7092), True, 'import streamlit as st\n'), ((7570, 7633), 'streamlit.altair_chart', 'st.altair_chart', (['classification_count'], {'use_container_width': '(True)'}), '(classification_count, use_container_width=True)\n', (7585, 7633), True, 'import streamlit as st\n'), ((7722, 7800), 'pandas.read_csv', 'pd.read_csv', (['f"""{path}/../stats/gnd_classification_Ts_all.csv"""'], {'index_col': '(False)'}), "(f'{path}/../stats/gnd_classification_Ts_all.csv', index_col=False)\n", (7733, 7800), True, 'import pandas as pd\n'), ((7805, 7848), 'streamlit.subheader', 'st.subheader', (['"""Systematik der Sachbegriffe"""'], {}), "('Systematik der Sachbegriffe')\n", (7817, 7848), True, 'import streamlit as st\n'), ((7853, 8091), 'streamlit.write', 'st.write', (['"""Die Entitäten der GND können in eine Systematik eingeordnet werden. Hier sind die Systematik-Notationen der Sachbegriffe (Ts) aufgetragen. Die Liste der möglichen Notationen gibt es [hier](http://www.dnb.de/gndsyst)."""'], {}), "(\n 'Die Entitäten der GND können in eine Systematik eingeordnet werden. Hier sind die Systematik-Notationen der Sachbegriffe (Ts) aufgetragen. Die Liste der möglichen Notationen gibt es [hier](http://www.dnb.de/gndsyst).'\n )\n", (7861, 8091), True, 'import streamlit as st\n'), ((8623, 8689), 'streamlit.altair_chart', 'st.altair_chart', (['classification_ts_count'], {'use_container_width': '(True)'}), '(classification_ts_count, use_container_width=True)\n', (8638, 8689), True, 'import streamlit as st\n'), ((8788, 8925), 'pandas.read_csv', 'pd.read_csv', (['f"""{path}/../stats/gnd_created_at.csv"""'], {'index_col': '"""created_at"""', 'parse_dates': '(True)', 'header': '(0)', 'names': "['created_at', 'count']"}), "(f'{path}/../stats/gnd_created_at.csv', index_col='created_at',\n parse_dates=True, header=0, names=['created_at', 'count'])\n", (8799, 8925), True, 'import pandas as pd\n'), ((8931, 8986), 'streamlit.subheader', 'st.subheader', (['"""Zeitverlauf der GND-Datensatzerstellung"""'], {}), "('Zeitverlauf der GND-Datensatzerstellung')\n", (8943, 8986), True, 'import streamlit as st\n'), ((8991, 9142), 'streamlit.write', 'st.write', (['"""Auf einer Zeitleiste wird die Anzahl der monatlich erstellten GND-Sätze aufgetragen. Die ersten Sätze stammen aus dem Januar 1972"""'], {}), "(\n 'Auf einer Zeitleiste wird die Anzahl der monatlich erstellten GND-Sätze aufgetragen. Die ersten Sätze stammen aus dem Januar 1972'\n )\n", (8999, 9142), True, 'import streamlit as st\n'), ((9152, 9202), 'streamlit.slider', 'st.slider', (['"""Zeitraum"""', '(1972)', '(2021)', '(1972, 2021)', '(1)'], {}), "('Zeitraum', 1972, 2021, (1972, 2021), 1)\n", (9161, 9202), True, 'import streamlit as st\n'), ((9462, 9512), 'streamlit.altair_chart', 'st.altair_chart', (['created'], {'use_container_width': '(True)'}), '(created, use_container_width=True)\n', (9477, 9512), True, 'import streamlit as st\n'), ((9597, 9698), 'pandas.read_csv', 'pd.read_csv', (['f"""{path}/../stats/gnd_entity_types.csv"""'], {'index_col': '(False)', 'names': "['entity', 'count']"}), "(f'{path}/../stats/gnd_entity_types.csv', index_col=False, names\n =['entity', 'count'])\n", (9608, 9698), True, 'import pandas as pd\n'), ((10756, 11011), 'streamlit.write', 'st.write', (['"""Alle GND-Entitäten können in verschiedenen Katalogisierungsleveln (1-7) angelegt werden. Je niedriger das Katalogisierungslevel, desto verlässlicher die Daten, weil Sie dann von qualifizierten Personen erstellt bzw. überprüft wurden."""'], {}), "(\n 'Alle GND-Entitäten können in verschiedenen Katalogisierungsleveln (1-7) angelegt werden. Je niedriger das Katalogisierungslevel, desto verlässlicher die Daten, weil Sie dann von qualifizierten Personen erstellt bzw. überprüft wurden.'\n )\n", (10764, 11011), True, 'import streamlit as st\n'), ((11013, 11068), 'streamlit.altair_chart', 'st.altair_chart', (['entity_count'], {'use_container_width': '(True)'}), '(entity_count, use_container_width=True)\n', (11028, 11068), True, 'import streamlit as st\n'), ((12508, 12559), 'streamlit.altair_chart', 'st.altair_chart', (['newcomer'], {'use_container_width': '(True)'}), '(newcomer, use_container_width=True)\n', (12523, 12559), True, 'import streamlit as st\n'), ((13796, 14084), 'streamlit.write', 'st.write', (['"""Verknüpfungen, die maschinell erzeugt wurden, aus Fremddaten stammen oder verwaist sind, wurden nicht in die Auswertung einbezogen. Eine detaillierte Auflistung der ausgewerteten Felder ist im [GitHub-Repository](https://git.io/JG5vN) dieses Dashboards dokumentiert."""'], {}), "(\n 'Verknüpfungen, die maschinell erzeugt wurden, aus Fremddaten stammen oder verwaist sind, wurden nicht in die Auswertung einbezogen. Eine detaillierte Auflistung der ausgewerteten Felder ist im [GitHub-Repository](https://git.io/JG5vN) dieses Dashboards dokumentiert.'\n )\n", (13804, 14084), True, 'import streamlit as st\n'), ((14079, 14129), 'streamlit.altair_chart', 'st.altair_chart', (['gnd_top'], {'use_container_width': '(True)'}), '(gnd_top, use_container_width=True)\n', (14094, 14129), True, 'import streamlit as st\n'), ((16633, 16652), 'streamlit.beta_container', 'st.beta_container', ([], {}), '()\n', (16650, 16652), True, 'import streamlit as st\n'), ((16658, 17002), 'streamlit.info', 'st.info', (['"""Hier finden Sie statistische Auswertungen der GND und ihrer Verknüpfungen mit den Titeldaten der Deutschen Nationalbibliothek (Stand der Daten: Juli 2021). Wählen Sie links die Satzart, die Sie interessiert, und Sie erhalten die verfügbaren Auswertungen und Statstiken. Verwenden Sie einen auf Chromium basierenden Browser."""'], {}), "(\n 'Hier finden Sie statistische Auswertungen der GND und ihrer Verknüpfungen mit den Titeldaten der Deutschen Nationalbibliothek (Stand der Daten: Juli 2021). Wählen Sie links die Satzart, die Sie interessiert, und Sie erhalten die verfügbaren Auswertungen und Statstiken. Verwenden Sie einen auf Chromium basierenden Browser.'\n )\n", (16665, 17002), True, 'import streamlit as st\n'), ((19217, 19253), 'streamlit.header', 'st.header', (['"""GND Statistik allgemein"""'], {}), "('GND Statistik allgemein')\n", (19226, 19253), True, 'import streamlit as st\n'), ((19850, 19902), 'streamlit.header', 'st.header', (['"""GND in der Deutschen Nationalbibliothek"""'], {}), "('GND in der Deutschen Nationalbibliothek')\n", (19859, 19902), True, 'import streamlit as st\n'), ((2202, 2227), 'altair.X', 'alt.X', (['"""Name:N"""'], {'sort': '"""y"""'}), "('Name:N', sort='y')\n", (2207, 2227), True, 'import altair as alt\n'), ((2237, 2252), 'altair.Y', 'alt.Y', (['"""Anzahl"""'], {}), "('Anzahl')\n", (2242, 2252), True, 'import altair as alt\n'), ((3001, 3182), 'pydeck.Deck', 'pdk.Deck', (['scatterplotlayer'], {'initial_view_state': 'INITIAL_VIEW_STATE', 'map_style': 'pdk.map_styles.LIGHT', 'tooltip': "{'html': '<b>{Name}</b><br \\\\>Wirkungsort von {Anzahl} Personen'}"}), "(scatterplotlayer, initial_view_state=INITIAL_VIEW_STATE, map_style\n =pdk.map_styles.LIGHT, tooltip={'html':\n '<b>{Name}</b><br \\\\>Wirkungsort von {Anzahl} Personen'})\n", (3009, 3182), True, 'import pydeck as pdk\n'), ((4716, 4852), 'pydeck.Deck', 'pdk.Deck', (['musiker_scatter'], {'initial_view_state': 'INITIAL_VIEW_STATE', 'map_style': 'pdk.map_styles.LIGHT', 'tooltip': "{'html': '<b>{name}</b>'}"}), "(musiker_scatter, initial_view_state=INITIAL_VIEW_STATE, map_style=\n pdk.map_styles.LIGHT, tooltip={'html': '<b>{name}</b>'})\n", (4724, 4852), True, 'import pydeck as pdk\n'), ((6199, 6253), 'altair.X', 'alt.X', (['"""code"""'], {'title': '"""Relationierungs-Code"""', 'sort': '"""-y"""'}), "('code', title='Relationierungs-Code', sort='-y')\n", (6204, 6253), True, 'import altair as alt\n'), ((6263, 6293), 'altair.Y', 'alt.Y', (['"""count"""'], {'title': '"""Anzahl"""'}), "('count', title='Anzahl')\n", (6268, 6293), True, 'import altair as alt\n'), ((6303, 6360), 'altair.Color', 'alt.Color', (['"""code"""'], {'sort': '"""-y"""', 'title': '"""Relationierungscode"""'}), "('code', sort='-y', title='Relationierungscode')\n", (6312, 6360), True, 'import altair as alt\n'), ((7278, 7318), 'altair.X', 'alt.X', (['"""id"""'], {'title': '"""Notation"""', 'sort': '"""-y"""'}), "('id', title='Notation', sort='-y')\n", (7283, 7318), True, 'import altair as alt\n'), ((7328, 7358), 'altair.Y', 'alt.Y', (['"""count"""'], {'title': '"""Anzahl"""'}), "('count', title='Anzahl')\n", (7333, 7358), True, 'import altair as alt\n'), ((7368, 7417), 'altair.Color', 'alt.Color', (['"""name"""'], {'sort': '"""-y"""', 'title': '"""Bezeichnung"""'}), "('name', sort='-y', title='Bezeichnung')\n", (7377, 7417), True, 'import altair as alt\n'), ((8323, 8365), 'altair.X', 'alt.X', (['"""id:N"""'], {'title': '"""Notation"""', 'sort': '"""-y"""'}), "('id:N', title='Notation', sort='-y')\n", (8328, 8365), True, 'import altair as alt\n'), ((8375, 8407), 'altair.Y', 'alt.Y', (['"""count:Q"""'], {'title': '"""Anzahl"""'}), "('count:Q', title='Anzahl')\n", (8380, 8407), True, 'import altair as alt\n'), ((8417, 8468), 'altair.Color', 'alt.Color', (['"""name:N"""'], {'sort': '"""-y"""', 'title': '"""Bezeichnung"""'}), "('name:N', sort='-y', title='Bezeichnung')\n", (8426, 8468), True, 'import altair as alt\n'), ((9323, 9366), 'altair.X', 'alt.X', (['"""created_at:T"""'], {'title': '"""Erstelldatum"""'}), "('created_at:T', title='Erstelldatum')\n", (9328, 9366), True, 'import altair as alt\n'), ((9376, 9417), 'altair.Y', 'alt.Y', (['"""count:Q"""'], {'title': '"""Sätze pro Monat"""'}), "('count:Q', title='Sätze pro Monat')\n", (9381, 9417), True, 'import altair as alt\n'), ((10207, 10258), 'streamlit.subheader', 'st.subheader', (['"""Entitäten und Katalogisierungslevel"""'], {}), "('Entitäten und Katalogisierungslevel')\n", (10219, 10258), True, 'import streamlit as st\n'), ((10692, 10751), 'streamlit.subheader', 'st.subheader', (['f"""Katalogisierungslevel in Satzart {satzart}"""'], {}), "(f'Katalogisierungslevel in Satzart {satzart}')\n", (10704, 10751), True, 'import streamlit as st\n'), ((11192, 11228), 'streamlit.subheader', 'st.subheader', (['f"""TOP 10 GND-Newcomer"""'], {}), "(f'TOP 10 GND-Newcomer')\n", (11204, 11228), True, 'import streamlit as st\n'), ((11237, 11325), 'streamlit.write', 'st.write', (['"""TOP 10 der GND-Entitäten, die in den letzten 365 Tagen angelegt wurden."""'], {}), "(\n 'TOP 10 der GND-Entitäten, die in den letzten 365 Tagen angelegt wurden.')\n", (11245, 11325), True, 'import streamlit as st\n'), ((11346, 11422), 'pandas.read_csv', 'pd.read_csv', (['f"""{path}/../stats/title_gnd_newcomer_top10.csv"""'], {'index_col': 'None'}), "(f'{path}/../stats/title_gnd_newcomer_top10.csv', index_col=None)\n", (11357, 11422), True, 'import pandas as pd\n'), ((11857, 11903), 'streamlit.subheader', 'st.subheader', (['f"""TOP 10 {satzart} GND-Newcomer"""'], {}), "(f'TOP 10 {satzart} GND-Newcomer')\n", (11869, 11903), True, 'import streamlit as st\n'), ((11912, 12008), 'streamlit.write', 'st.write', (['f"""TOP 10 der {satzart} Sätze, die in den letzten 365 Tagen angelegt wurden."""'], {}), "(\n f'TOP 10 der {satzart} Sätze, die in den letzten 365 Tagen angelegt wurden.'\n )\n", (11920, 12008), True, 'import streamlit as st\n'), ((12678, 12733), 'streamlit.subheader', 'st.subheader', (['f"""TOP 10 GND-Entitäten in DNB-Titeldaten"""'], {}), "(f'TOP 10 GND-Entitäten in DNB-Titeldaten')\n", (12690, 12733), True, 'import streamlit as st\n'), ((12754, 12821), 'pandas.read_csv', 'pd.read_csv', (['f"""{path}/../stats/title_gnd_top10.csv"""'], {'index_col': 'None'}), "(f'{path}/../stats/title_gnd_top10.csv', index_col=None)\n", (12765, 12821), True, 'import pandas as pd\n'), ((13256, 13307), 'streamlit.subheader', 'st.subheader', (['f"""TOP 10 {satzart} in DNB-Titeldaten"""'], {}), "(f'TOP 10 {satzart} in DNB-Titeldaten')\n", (13268, 13307), True, 'import streamlit as st\n'), ((15676, 15741), 'streamlit.subheader', 'st.subheader', (['"""Datenherkunft der GND-Entitäten in DNB-Titeldaten"""'], {}), "('Datenherkunft der GND-Entitäten in DNB-Titeldaten')\n", (15688, 15741), True, 'import streamlit as st\n'), ((15750, 16016), 'streamlit.write', 'st.write', (['"""Weniger als ein Drittel der GND-Entitäten in DNB-Titeldaten wurde in intellektuellen Erschließungsprozessen vergeben. Jeweils ca. ein weiteres Drittel wurde in maschinellen Erschließungsprozessen vergeben, ca. ein Drittel stammt aus Fremddaten."""'], {}), "(\n 'Weniger als ein Drittel der GND-Entitäten in DNB-Titeldaten wurde in intellektuellen Erschließungsprozessen vergeben. Jeweils ca. ein weiteres Drittel wurde in maschinellen Erschließungsprozessen vergeben, ca. ein Drittel stammt aus Fremddaten.'\n )\n", (15758, 16016), True, 'import streamlit as st\n'), ((16253, 16304), 'streamlit.altair_chart', 'st.altair_chart', (['entities'], {'use_container_width': '(True)'}), '(entities, use_container_width=True)\n', (16268, 16304), True, 'import streamlit as st\n'), ((16481, 16584), 'streamlit.write', 'st.write', (['f"""Durchschnittlich {mean} Verknüpfungen zu {satzart}-Sätzen pro DNB-Titeldatensatz"""'], {}), "(\n f'Durchschnittlich {mean} Verknüpfungen zu {satzart}-Sätzen pro DNB-Titeldatensatz'\n )\n", (16489, 16584), True, 'import streamlit as st\n'), ((17002, 17048), 'streamlit.beta_expander', 'st.beta_expander', (['"""Methodik und Datenherkunft"""'], {}), "('Methodik und Datenherkunft')\n", (17018, 17048), True, 'import streamlit as st\n'), ((17058, 18597), 'streamlit.markdown', 'st.markdown', (['"""\nDatengrundlage ist ein Gesamtabzug der Daten der Gemeinsamen Normadatei (GND) sowie der Titeldaten der Deutschen Nationalbibliothek (DNB) inkl. Zeitschriftendatenbank (ZDB), sofern sich Exemplare der Zeitschrift im Bestand der DNB befinden. In den Titeldaten ist auch der Tonträger- und Notenbestand des Deutschen Musikarchivs (DMA) sowie der Buch- und Objektbestand des Deutschen Buch- und Schriftmuseums (DBSM) nachgewiesen.\n\nDer Gesamtabzug liegt im OCLC-Format PICA+ vor. Die Daten werden mithilfe des Pica-Parsers [pica.rs](https://github.com/deutsche-nationalbibliothek/pica-rs) gefiltert. Dieses Tool produziert aus dem sehr großen Gesamtabzug (~ 31 GB) kleinere CSV-Dateien, die mit Python weiterverarbeitet werden.\n\nDas Dashboard ist mit dem Python-Framework [Streamlit](https://streamlit.io/) geschrieben. Die Skripte sowie die gefilterten CSV-Rohdaten sind auf [Github](https://github.com/buchmuseum/GND_Dashboard) zu finden. Die Diagramme wurden mit [Altair](https://altair-viz.github.io/index.html) erstellt, die Karten mit [Deck GL](https://deck.gl/) (via [Pydeck](https://deckgl.readthedocs.io/en/latest/#)), die Wordcloud mit [wordcloud](https://amueller.github.io/word_cloud/index.html).\n\nFür grundlegende Zugriffsstatistik verwenden wir [streamlit-analytics](https://pypi.org/project/streamlit-analytics/). Dabei werden keine personenbezogenen Daten gespeichert.\n\nAlle Skripte und Daten stehen unter CC0 Lizenz und können frei weitergenutzt werden.\n\nDie Daten werden monatlich aktualisiert.\n"""'], {}), '(\n """\nDatengrundlage ist ein Gesamtabzug der Daten der Gemeinsamen Normadatei (GND) sowie der Titeldaten der Deutschen Nationalbibliothek (DNB) inkl. Zeitschriftendatenbank (ZDB), sofern sich Exemplare der Zeitschrift im Bestand der DNB befinden. In den Titeldaten ist auch der Tonträger- und Notenbestand des Deutschen Musikarchivs (DMA) sowie der Buch- und Objektbestand des Deutschen Buch- und Schriftmuseums (DBSM) nachgewiesen.\n\nDer Gesamtabzug liegt im OCLC-Format PICA+ vor. Die Daten werden mithilfe des Pica-Parsers [pica.rs](https://github.com/deutsche-nationalbibliothek/pica-rs) gefiltert. Dieses Tool produziert aus dem sehr großen Gesamtabzug (~ 31 GB) kleinere CSV-Dateien, die mit Python weiterverarbeitet werden.\n\nDas Dashboard ist mit dem Python-Framework [Streamlit](https://streamlit.io/) geschrieben. Die Skripte sowie die gefilterten CSV-Rohdaten sind auf [Github](https://github.com/buchmuseum/GND_Dashboard) zu finden. Die Diagramme wurden mit [Altair](https://altair-viz.github.io/index.html) erstellt, die Karten mit [Deck GL](https://deck.gl/) (via [Pydeck](https://deckgl.readthedocs.io/en/latest/#)), die Wordcloud mit [wordcloud](https://amueller.github.io/word_cloud/index.html).\n\nFür grundlegende Zugriffsstatistik verwenden wir [streamlit-analytics](https://pypi.org/project/streamlit-analytics/). Dabei werden keine personenbezogenen Daten gespeichert.\n\nAlle Skripte und Daten stehen unter CC0 Lizenz und können frei weitergenutzt werden.\n\nDie Daten werden monatlich aktualisiert.\n"""\n )\n', (17069, 18597), True, 'import streamlit as st\n'), ((454, 487), 'pandas.read_csv', 'pd.read_csv', (['file'], {'index_col': 'None'}), '(file, index_col=None)\n', (465, 487), True, 'import pandas as pd\n'), ((9860, 9925), 'altair.X', 'alt.X', (['"""sum(count)"""'], {'title': '"""Datensätze pro Katalogisierungslevel"""'}), "('sum(count)', title='Datensätze pro Katalogisierungslevel')\n", (9865, 9925), True, 'import altair as alt\n'), ((9939, 9971), 'altair.Y', 'alt.Y', (['"""entity"""'], {'title': '"""Satzart"""'}), "('entity', title='Satzart')\n", (9944, 9971), True, 'import altair as alt\n'), ((9985, 10034), 'altair.Color', 'alt.Color', (['"""level"""'], {'title': '"""Katalogisierungslevel"""'}), "('level', title='Katalogisierungslevel')\n", (9994, 10034), True, 'import altair as alt\n'), ((10385, 10450), 'altair.X', 'alt.X', (['"""sum(count)"""'], {'title': '"""Datensätze pro Katalogisierungslevel"""'}), "('sum(count)', title='Datensätze pro Katalogisierungslevel')\n", (10390, 10450), True, 'import altair as alt\n'), ((10464, 10496), 'altair.Y', 'alt.Y', (['"""entity"""'], {'title': '"""Satzart"""'}), "('entity', title='Satzart')\n", (10469, 10496), True, 'import altair as alt\n'), ((10510, 10559), 'altair.Color', 'alt.Color', (['"""level"""'], {'title': '"""Katalogisierungslevel"""'}), "('level', title='Katalogisierungslevel')\n", (10519, 10559), True, 'import altair as alt\n'), ((11500, 11545), 'altair.X', 'alt.X', (['"""gnd_id"""'], {'title': '"""Entitäten"""', 'sort': '"""-y"""'}), "('gnd_id', title='Entitäten', sort='-y')\n", (11505, 11545), True, 'import altair as alt\n'), ((11559, 11589), 'altair.Y', 'alt.Y', (['"""count"""'], {'title': '"""Anzahl"""'}), "('count', title='Anzahl')\n", (11564, 11589), True, 'import altair as alt\n'), ((11603, 11648), 'altair.Color', 'alt.Color', (['"""name"""'], {'sort': '"""-y"""', 'title': '"""Entität"""'}), "('name', sort='-y', title='Entität')\n", (11612, 11648), True, 'import altair as alt\n'), ((12203, 12250), 'altair.X', 'alt.X', (['"""gnd_id:O"""'], {'title': '"""Entitäten"""', 'sort': '"""-y"""'}), "('gnd_id:O', title='Entitäten', sort='-y')\n", (12208, 12250), True, 'import altair as alt\n'), ((12264, 12294), 'altair.Y', 'alt.Y', (['"""count"""'], {'title': '"""Anzahl"""'}), "('count', title='Anzahl')\n", (12269, 12294), True, 'import altair as alt\n'), ((12308, 12353), 'altair.Color', 'alt.Color', (['"""name"""'], {'sort': '"""-y"""', 'title': '"""Entität"""'}), "('name', sort='-y', title='Entität')\n", (12317, 12353), True, 'import altair as alt\n'), ((12893, 12940), 'altair.X', 'alt.X', (['"""gnd_id:N"""'], {'title': '"""Entitäten"""', 'sort': '"""-y"""'}), "('gnd_id:N', title='Entitäten', sort='-y')\n", (12898, 12940), True, 'import altair as alt\n'), ((12954, 12986), 'altair.Y', 'alt.Y', (['"""count:Q"""'], {'title': '"""Anzahl"""'}), "('count:Q', title='Anzahl')\n", (12959, 12986), True, 'import altair as alt\n'), ((13000, 13047), 'altair.Color', 'alt.Color', (['"""name:N"""'], {'sort': '"""-y"""', 'title': '"""Entität"""'}), "('name:N', sort='-y', title='Entität')\n", (13009, 13047), True, 'import altair as alt\n'), ((13487, 13534), 'altair.X', 'alt.X', (['"""gnd_id:N"""'], {'title': '"""Entitäten"""', 'sort': '"""-y"""'}), "('gnd_id:N', title='Entitäten', sort='-y')\n", (13492, 13534), True, 'import altair as alt\n'), ((13548, 13580), 'altair.Y', 'alt.Y', (['"""count:Q"""'], {'title': '"""Anzahl"""'}), "('count:Q', title='Anzahl')\n", (13553, 13580), True, 'import altair as alt\n'), ((13594, 13641), 'altair.Color', 'alt.Color', (['"""name:N"""'], {'sort': '"""-y"""', 'title': '"""Entität"""'}), "('name:N', sort='-y', title='Entität')\n", (13603, 13641), True, 'import altair as alt\n'), ((16078, 16120), 'altair.X', 'alt.X', (['"""sum(Datenart):N"""'], {'title': '"""Datenart"""'}), "('sum(Datenart):N', title='Datenart')\n", (16083, 16120), True, 'import altair as alt\n'), ((16134, 16172), 'altair.Y', 'alt.Y', (['"""sum(Anzahl):Q"""'], {'title': '"""Anzahl"""'}), "('sum(Anzahl):Q', title='Anzahl')\n", (16139, 16172), True, 'import altair as alt\n'), ((2289, 2310), 'altair.Legend', 'alt.Legend', ([], {'columns': '(2)'}), '(columns=2)\n', (2299, 2310), True, 'import altair as alt\n'), ((2330, 2364), 'altair.Tooltip', 'alt.Tooltip', (['"""Name:N"""'], {'title': '"""Ort"""'}), "('Name:N', title='Ort')\n", (2341, 2364), True, 'import altair as alt\n'), ((2366, 2405), 'altair.Tooltip', 'alt.Tooltip', (['"""Anzahl:Q"""'], {'title': '"""Anzahl"""'}), "('Anzahl:Q', title='Anzahl')\n", (2377, 2405), True, 'import altair as alt\n'), ((5096, 5127), 'streamlit.write', 'st.write', (['f"""{i}. {row[\'name\']}"""'], {}), '(f"{i}. {row[\'name\']}")\n', (5104, 5127), True, 'import streamlit as st\n'), ((6379, 6415), 'altair.Tooltip', 'alt.Tooltip', (['"""count"""'], {'title': '"""Anzahl"""'}), "('count', title='Anzahl')\n", (6390, 6415), True, 'import altair as alt\n'), ((6417, 6450), 'altair.Tooltip', 'alt.Tooltip', (['"""code"""'], {'title': '"""Code"""'}), "('code', title='Code')\n", (6428, 6450), True, 'import altair as alt\n'), ((7436, 7471), 'altair.Tooltip', 'alt.Tooltip', (['"""id"""'], {'title': '"""Notation"""'}), "('id', title='Notation')\n", (7447, 7471), True, 'import altair as alt\n'), ((7473, 7513), 'altair.Tooltip', 'alt.Tooltip', (['"""name"""'], {'title': '"""Bezeichnung"""'}), "('name', title='Bezeichnung')\n", (7484, 7513), True, 'import altair as alt\n'), ((7515, 7551), 'altair.Tooltip', 'alt.Tooltip', (['"""count"""'], {'title': '"""Anzahl"""'}), "('count', title='Anzahl')\n", (7526, 7551), True, 'import altair as alt\n'), ((8489, 8524), 'altair.Tooltip', 'alt.Tooltip', (['"""id"""'], {'title': '"""Notation"""'}), "('id', title='Notation')\n", (8500, 8524), True, 'import altair as alt\n'), ((8526, 8566), 'altair.Tooltip', 'alt.Tooltip', (['"""name"""'], {'title': '"""Bezeichnung"""'}), "('name', title='Bezeichnung')\n", (8537, 8566), True, 'import altair as alt\n'), ((8568, 8604), 'altair.Tooltip', 'alt.Tooltip', (['"""count"""'], {'title': '"""Anzahl"""'}), "('count', title='Anzahl')\n", (8579, 8604), True, 'import altair as alt\n'), ((15386, 15580), 'pandas.DataFrame.from_dict', 'pd.DataFrame.from_dict', (["{'intellektuell verknüpfte Entitäten': uniques,\n 'Entitäten aus automatischen Prozessen': auto_entites,\n 'Entitäten aus Fremddaten': fremd_entities}"], {'orient': '"""index"""'}), "({'intellektuell verknüpfte Entitäten': uniques,\n 'Entitäten aus automatischen Prozessen': auto_entites,\n 'Entitäten aus Fremddaten': fremd_entities}, orient='index')\n", (15408, 15580), True, 'import pandas as pd\n'), ((5187, 5218), 'streamlit.write', 'st.write', (['f"""{i}. {row[\'name\']}"""'], {}), '(f"{i}. {row[\'name\']}")\n', (5195, 5218), True, 'import streamlit as st\n'), ((10057, 10095), 'altair.Tooltip', 'alt.Tooltip', (['"""entity"""'], {'title': '"""Satzart"""'}), "('entity', title='Satzart')\n", (10068, 10095), True, 'import altair as alt\n'), ((10097, 10148), 'altair.Tooltip', 'alt.Tooltip', (['"""level"""'], {'title': '"""Katalogisierungslevel"""'}), "('level', title='Katalogisierungslevel')\n", (10108, 10148), True, 'import altair as alt\n'), ((10151, 10187), 'altair.Tooltip', 'alt.Tooltip', (['"""count"""'], {'title': '"""Anzahl"""'}), "('count', title='Anzahl')\n", (10162, 10187), True, 'import altair as alt\n'), ((10582, 10633), 'altair.Tooltip', 'alt.Tooltip', (['"""level"""'], {'title': '"""Katalogisierungslevel"""'}), "('level', title='Katalogisierungslevel')\n", (10593, 10633), True, 'import altair as alt\n'), ((10636, 10672), 'altair.Tooltip', 'alt.Tooltip', (['"""count"""'], {'title': '"""Anzahl"""'}), "('count', title='Anzahl')\n", (10647, 10672), True, 'import altair as alt\n'), ((11671, 11709), 'altair.Tooltip', 'alt.Tooltip', (['"""name:N"""'], {'title': '"""Entität"""'}), "('name:N', title='Entität')\n", (11682, 11709), True, 'import altair as alt\n'), ((11712, 11749), 'altair.Tooltip', 'alt.Tooltip', (['"""bbg:N"""'], {'title': '"""Satzart"""'}), "('bbg:N', title='Satzart')\n", (11723, 11749), True, 'import altair as alt\n'), ((11751, 11787), 'altair.Tooltip', 'alt.Tooltip', (['"""gnd_id:N"""'], {'title': '"""IDN"""'}), "('gnd_id:N', title='IDN')\n", (11762, 11787), True, 'import altair as alt\n'), ((11789, 11827), 'altair.Tooltip', 'alt.Tooltip', (['"""count:Q"""'], {'title': '"""Anzahl"""'}), "('count:Q', title='Anzahl')\n", (11800, 11827), True, 'import altair as alt\n'), ((12376, 12414), 'altair.Tooltip', 'alt.Tooltip', (['"""name:N"""'], {'title': '"""Entität"""'}), "('name:N', title='Entität')\n", (12387, 12414), True, 'import altair as alt\n'), ((12417, 12453), 'altair.Tooltip', 'alt.Tooltip', (['"""gnd_id:N"""'], {'title': '"""IDN"""'}), "('gnd_id:N', title='IDN')\n", (12428, 12453), True, 'import altair as alt\n'), ((12455, 12493), 'altair.Tooltip', 'alt.Tooltip', (['"""count:Q"""'], {'title': '"""Anzahl"""'}), "('count:Q', title='Anzahl')\n", (12466, 12493), True, 'import altair as alt\n'), ((13070, 13108), 'altair.Tooltip', 'alt.Tooltip', (['"""name:N"""'], {'title': '"""Entität"""'}), "('name:N', title='Entität')\n", (13081, 13108), True, 'import altair as alt\n'), ((13111, 13147), 'altair.Tooltip', 'alt.Tooltip', (['"""gnd_id:N"""'], {'title': '"""IDN"""'}), "('gnd_id:N', title='IDN')\n", (13122, 13147), True, 'import altair as alt\n'), ((13149, 13186), 'altair.Tooltip', 'alt.Tooltip', (['"""bbg:N"""'], {'title': '"""Satzart"""'}), "('bbg:N', title='Satzart')\n", (13160, 13186), True, 'import altair as alt\n'), ((13188, 13226), 'altair.Tooltip', 'alt.Tooltip', (['"""count:Q"""'], {'title': '"""Anzahl"""'}), "('count:Q', title='Anzahl')\n", (13199, 13226), True, 'import altair as alt\n'), ((13664, 13702), 'altair.Tooltip', 'alt.Tooltip', (['"""name:N"""'], {'title': '"""Entität"""'}), "('name:N', title='Entität')\n", (13675, 13702), True, 'import altair as alt\n'), ((13705, 13741), 'altair.Tooltip', 'alt.Tooltip', (['"""gnd_id:N"""'], {'title': '"""IDN"""'}), "('gnd_id:N', title='IDN')\n", (13716, 13741), True, 'import altair as alt\n'), ((13743, 13781), 'altair.Tooltip', 'alt.Tooltip', (['"""count:Q"""'], {'title': '"""Anzahl"""'}), "('count:Q', title='Anzahl')\n", (13754, 13781), True, 'import altair as alt\n'), ((9815, 9828), 'altair.Chart', 'alt.Chart', (['df'], {}), '(df)\n', (9824, 9828), True, 'import altair as alt\n'), ((11443, 11468), 'altair.Chart', 'alt.Chart', (['newcomer_daten'], {}), '(newcomer_daten)\n', (11452, 11468), True, 'import altair as alt\n'), ((12841, 12861), 'altair.Chart', 'alt.Chart', (['top_daten'], {}), '(top_daten)\n', (12850, 12861), True, 'import altair as alt\n'), ((16026, 16046), 'altair.Chart', 'alt.Chart', (['entity_df'], {}), '(entity_df)\n', (16035, 16046), True, 'import altair as alt\n')] |
#<NAME> 150401052
import os
import sys
import time
from socket import *
from os import system, name
ip = '127.0.0.1'
port = 42
s_soket = socket(AF_INET, SOCK_DGRAM)
s_soket.bind((ip, port))
print("\nSunucu Hazir\n")
kontrol, istemciAdres = s_soket.recvfrom(4096)
s_soket.sendto(bytes("Sunucu hazir", encoding='utf-8'), istemciAdres)
i, istemciAdres = s_soket.recvfrom(4096)
if(i.decode("utf-8") == "listeleme yap"):
dosyalar = "\n".join(os.listdir())
s_soket.sendto(bytes(dosyalar, encoding='utf-8'), istemciAdres)
sys.exit()
elif(i.decode("utf-8") == "put yap"):
cevap = s_soket.recvfrom(4096)
if(cevap[0].decode("utf-8") == "mevcut"):
dosyaIsmi, istemciAdres = s_soket.recvfrom(4096)
dosyaIcerigi = s_soket.recvfrom(4096)
if(os.path.exists(dosyaIsmi.decode("utf-8")) == True):
s_soket.sendto(bytes("aynisi mevcut", encoding='utf-8'), istemciAdres)
karar = s_soket.recvfrom(4096)
if(karar[0].decode("utf-8") == "1"):
yeniAd = dosyaIsmi.decode("utf-8")[:-4] + " (kopya)" + ".txt"
dosyaYeni = open(yeniAd, "wb")
dosyaYeni.write(dosyaIcerigi[0])
dosyaYeni.close()
print("\nPUT islemi basariyla gerceklesti..")
else:
dosyaYeni = open(dosyaIsmi, "wb")
dosyaYeni.write(dosyaIcerigi[0])
dosyaYeni.close()
s_soket.sendto(bytes("tamam", encoding='utf-8'), istemciAdres)
print("\nPUT islemi basariyla gerceklesti..")
else:
print("\nGirilen adda bir dosya istemcide bulunamadi..")
elif(i.decode("utf-8") == "get yap"):
dosyaIsmi, istemciAdres = s_soket.recvfrom(4096)
if (os.path.exists(dosyaIsmi.decode("utf-8")) == True):
dosya = open(dosyaIsmi.decode("utf-8"), "rb")
s_soket.sendto(bytes("dosya mevcut", encoding='utf-8'), istemciAdres)
dosyaIcerik = dosya.read()
dosya.close()
s_soket.sendto(dosyaIcerik, istemciAdres)
kontrol = s_soket.recvfrom(4096)
print("\nGET islemi basariyla gerceklesti..")
sys.exit()
else:
print("\n! Bu isimde bir dosya sunucuda mevcut değil")
sys.exit()
elif(i.decode("utf-8") == "bitir"):
s_soket.close()
print("\nSunucu kapandi")
sys.exit() | [
"os.listdir",
"sys.exit"
] | [((702, 712), 'sys.exit', 'sys.exit', ([], {}), '()\n', (710, 712), False, 'import sys\n'), ((561, 573), 'os.listdir', 'os.listdir', ([], {}), '()\n', (571, 573), False, 'import os\n'), ((2831, 2841), 'sys.exit', 'sys.exit', ([], {}), '()\n', (2839, 2841), False, 'import sys\n'), ((3005, 3015), 'sys.exit', 'sys.exit', ([], {}), '()\n', (3013, 3015), False, 'import sys\n'), ((3174, 3184), 'sys.exit', 'sys.exit', ([], {}), '()\n', (3182, 3184), False, 'import sys\n')] |
"""
Scatter plot with panning and zooming
Shows a scatter plot of a set of random points,
with basic Chaco panning and zooming.
Interacting with the plot:
- Left-mouse-drag pans the plot.
- Mouse wheel up and down zooms the plot in and out.
- Pressing "z" brings up the Zoom Box, and you can click-drag a rectangular
region to zoom. If you use a sequence of zoom boxes, pressing alt-left-arrow
and alt-right-arrow moves you forwards and backwards through the "zoom
history".
"""
# Major library imports
from numpy import sort
from numpy.random import random
# Enthought library imports
from enable.api import Component, ComponentEditor
from traits.api import HasTraits, Instance
from traitsui.api import Item, Group, View
# Chaco imports
from chaco.api import ArrayPlotData, Plot
from chaco.tools.api import PanTool, ZoomTool
#===============================================================================
# # Create the Chaco plot.
#===============================================================================
def _create_plot_component():
# Create some data
numpts = 5000
x = sort(random(numpts))
y = random(numpts)
# Create a plot data obect and give it this data
pd = ArrayPlotData()
pd.set_data("index", x)
pd.set_data("value", y)
# Create the plot
plot = Plot(pd)
plot.plot(("index", "value"),
type="scatter",
marker="circle",
index_sort="ascending",
color="orange",
marker_size=3,
bgcolor="white")
# Tweak some of the plot properties
plot.title = "Scatter Plot"
plot.line_width = 0.5
plot.padding = 50
# Attach some tools to the plot
plot.tools.append(PanTool(plot, constrain_key="shift"))
zoom = ZoomTool(component=plot, tool_mode="box", always_on=False)
plot.overlays.append(zoom)
return plot
#===============================================================================
# Attributes to use for the plot view.
size = (650, 650)
title = "Basic scatter plot"
bg_color="lightgray"
#===============================================================================
# # Demo class that is used by the demo.py application.
#===============================================================================
class Demo(HasTraits):
plot = Instance(Component)
traits_view = View(
Group(
Item('plot', editor=ComponentEditor(size=size,
bgcolor=bg_color),
show_label=False),
orientation = "vertical"),
resizable=True, title=title
)
def _plot_default(self):
return _create_plot_component()
demo = Demo()
if __name__ == "__main__":
demo.configure_traits()
#--EOF---
| [
"traits.api.Instance",
"chaco.api.ArrayPlotData",
"chaco.tools.api.PanTool",
"chaco.tools.api.ZoomTool",
"numpy.random.random",
"chaco.api.Plot",
"enable.api.ComponentEditor"
] | [((1150, 1164), 'numpy.random.random', 'random', (['numpts'], {}), '(numpts)\n', (1156, 1164), False, 'from numpy.random import random\n'), ((1228, 1243), 'chaco.api.ArrayPlotData', 'ArrayPlotData', ([], {}), '()\n', (1241, 1243), False, 'from chaco.api import ArrayPlotData, Plot\n'), ((1334, 1342), 'chaco.api.Plot', 'Plot', (['pd'], {}), '(pd)\n', (1338, 1342), False, 'from chaco.api import ArrayPlotData, Plot\n'), ((1795, 1853), 'chaco.tools.api.ZoomTool', 'ZoomTool', ([], {'component': 'plot', 'tool_mode': '"""box"""', 'always_on': '(False)'}), "(component=plot, tool_mode='box', always_on=False)\n", (1803, 1853), False, 'from chaco.tools.api import PanTool, ZoomTool\n'), ((2344, 2363), 'traits.api.Instance', 'Instance', (['Component'], {}), '(Component)\n', (2352, 2363), False, 'from traits.api import HasTraits, Instance\n'), ((1126, 1140), 'numpy.random.random', 'random', (['numpts'], {}), '(numpts)\n', (1132, 1140), False, 'from numpy.random import random\n'), ((1746, 1782), 'chaco.tools.api.PanTool', 'PanTool', (['plot'], {'constrain_key': '"""shift"""'}), "(plot, constrain_key='shift')\n", (1753, 1782), False, 'from chaco.tools.api import PanTool, ZoomTool\n'), ((2460, 2504), 'enable.api.ComponentEditor', 'ComponentEditor', ([], {'size': 'size', 'bgcolor': 'bg_color'}), '(size=size, bgcolor=bg_color)\n', (2475, 2504), False, 'from enable.api import Component, ComponentEditor\n')] |
import pandas as pd
import argparse
import json
try:
from graphviz import Digraph
except:
print("Note: Optional graphviz not installed")
def generate_graph(df, graph_format='pdf'):
g = Digraph('ModelFlow', filename='modelflow.gv', engine='neato', format=graph_format)
g.attr(overlap='false')
g.attr(splines='true')
column_names = df.columns
states = []
g.attr('node', shape='ellipse')
for column_name in column_names:
if column_name[:6] == 'state_':
states.append((column_name[6:], column_name))
g.node(column_name[6:])
models = []
g.attr('node', shape='box')
for column_name in column_names:
if column_name[:6] != 'state_':
models.append((column_name.split('_')[0], column_name))
g.node(column_name.split('_')[0])
for column_name in column_names:
if column_name[:6] != 'state_':
parts = column_name.split('_')
state = '_'.join(parts[1:])[6:-7]
print(parts[0], state, df[column_name].min(),
df[column_name].max())
if df[column_name].min() < 0 and df[column_name].max() <= 0:
g.edge(state, parts[0])
elif df[column_name].min() >= 0 and df[column_name].max() > 0:
g.edge(parts[0], state)
else:
g.edge(parts[0], state)
g.edge(state, parts[0])
if graph_format == 'json':
# TODO: THIS DOES NOT WORK FOR MULTIPLE MODELFLOWS
with open('modelflow.gv.json', 'r') as f:
return json.load(f)
else:
g.view()
def generate_react_flow_chart(outputs):
df = pd.DataFrame()
for key, value in outputs['output_states'].items():
df[key] = value['data']
return generate_react_flow_chart_from_df(df)
def generate_react_flow_chart_from_df(df):
column_names = df.columns
nodes = {}
# Elipses
for column_name in column_names:
if column_name[:6] == 'state_':
nodes[column_name[6:]] = dict(name=column_name[6:], kind='elipse')
# Boxes
for column_name in column_names:
if column_name[:6] != 'state_':
nodes[column_name.split('_')[0]] = dict(name=column_name.split('_')[0], kind='box')
edges = []
for column_name in column_names:
if column_name[:6] != 'state_':
parts = column_name.split('_')
name1 = parts[0]
state = '_'.join(parts[1:])[6:-7]
# print(name1, state, df[column_name].min(),
# df[column_name].max())
if df[column_name].min() < 0 and df[column_name].max() <= 0:
edges.append([state, name1, 'one_way'])
elif df[column_name].min() >= 0 and df[column_name].max() > 0:
edges.append([name1, state, 'one_way'])
else:
edges.append([name1, state, 'both'])
return dict(nodes=list(nodes.values()), edges=edges)
def main(args):
df = pd.read_csv(args.output_file)
# generate_graph(df)
generate_react_flow_chart_from_df(df)
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Generate Graph Viz')
parser.add_argument('-f', '--output_file', type=str,
help='The output file to generate a graph of', required=True)
args = parser.parse_args()
main(args)
| [
"argparse.ArgumentParser",
"pandas.DataFrame",
"pandas.read_csv",
"json.load",
"graphviz.Digraph"
] | [((199, 286), 'graphviz.Digraph', 'Digraph', (['"""ModelFlow"""'], {'filename': '"""modelflow.gv"""', 'engine': '"""neato"""', 'format': 'graph_format'}), "('ModelFlow', filename='modelflow.gv', engine='neato', format=\n graph_format)\n", (206, 286), False, 'from graphviz import Digraph\n'), ((1681, 1695), 'pandas.DataFrame', 'pd.DataFrame', ([], {}), '()\n', (1693, 1695), True, 'import pandas as pd\n'), ((3012, 3041), 'pandas.read_csv', 'pd.read_csv', (['args.output_file'], {}), '(args.output_file)\n', (3023, 3041), True, 'import pandas as pd\n'), ((3152, 3209), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Generate Graph Viz"""'}), "(description='Generate Graph Viz')\n", (3175, 3209), False, 'import argparse\n'), ((1582, 1594), 'json.load', 'json.load', (['f'], {}), '(f)\n', (1591, 1594), False, 'import json\n')] |
import discord
import os
import json
import datetime
import pandas as pd
from dateutil.relativedelta import relativedelta
from pprint import pprint
import base.ColorPrint as CPrint
import command.voice_log.Config_Main as CSetting
def most_old_Month() :
old_month = 1
labels = []
fileNameList = []
while True :
filetime = datetime.datetime.today() - relativedelta(months=old_month)
m_month = datetime.datetime.strftime(filetime,'%m')
m_year = datetime.datetime.strftime(filetime,'%Y')
filename = CSetting.baseLogFolder + CSetting.JSONPATH_row + m_year + m_month + ".json"
if not os.path.exists( filename ) :
old_month -= 1 # 調査用に+1してあるので、実際の値は、これにold_monthに-1したものとなる。
break
labels.append( m_year + "/" + m_month )
fileNameList.append( filename )
old_month += 1
return old_month , labels , fileNameList
async def makeOldTimeList( client: discord.Client, MonthFileList:list[str] , IndexLabel:list[str], RoleList: list[int] = CSetting.OneMonthOutput_RoleID ):
all_df = None
for fileName in MonthFileList :
df = await makeTimeList( client, Datafile_path=fileName , RoleList=RoleList)
#print( "test1" )
pprint( df )
if df is None :
break
labelname = IndexLabel[MonthFileList.index(fileName)]
df = df.rename(columns={'time': labelname })
if MonthFileList.index(fileName) == 0 :
all_df = df
else :
df = df.drop(columns=['name'])
all_df = pd.merge(all_df, df , left_index=True, right_index=True)
#all_df = pd.merge(all_df, df , left_index=True)
#df.loc[:,[labelname]]
#pprint(all_df)
return all_df
async def UserRoleMember( client: discord.Client, RoleList: list[int] ) :
"""
[VC] 指定ロールに参加しているメンバーを抽出する
Args:
client (discord.Client): クライアント
RoleList (list[int]): 役職ID
return:
list[discord.Member]: 指定ロールに参加しているメンバー
"""
data = []
for guild_item in client.guilds :
# ギルドデータ更新
await guild_item.chunk()
# ロール制限がなければ、全員分を取ってくる
if len(RoleList) == 0 :
data += guild_item.members
continue
# ロール制限がなければ、該当ロール部を取ってくる
for role_item in guild_item.roles :
if role_item.id in RoleList :
data += role_item.members
return data
async def makeTimeList( client: discord.Client, Datafile_path: str , RoleList: list[int]):
"""
[VC] 生のログデータを計算して、表にして返す。
Args:
client (discord.Client): クライアント
RoleList (list[int]): 役職ID
mode (string): ユーザーを示すものは、何か?(UserName or ID)
return:
pd.DataFrame: 計算済みデータ
"""
# ユーザーリスト取得
members = await UserRoleMember(client, RoleList)
# IDだけ抽出
def getID(members: list[discord.Member]):
IDlist = []
Namelist = []
for member in members :
IDlist.append( member.id )
Namelist.append( member.name + "#" + member.discriminator )
return IDlist , Namelist
members_IDlist , members_Namelist = getID(members=members)
if members_IDlist is None or members_IDlist == [] :
return None
# JSON取得
orig_TimeData : dict
try :
with open( Datafile_path ) as f:
orig_TimeData = json.load(f)
except :
CPrint.error_print("JSONではありません")
import traceback
traceback.print_exc()
return None
if orig_TimeData is None :
return None
#df = pd.DataFrame({
# 'start': [None, None],
# 'end': [None, None],
# 'time': [13, 23]},
# index=['ONE', 'TWO']
#)
df_dict = {
'name': members_Namelist,
'start': [None] * len(members),
'exit': [None] * len(members),
'time': [0.0] * len(members),
}
# 計算
for item in orig_TimeData :
try :
indexNum = members_IDlist.index(item["member.id"])
except ValueError as error :
# 現在の鯖に、存在しない人は処理しない。
continue
if item["Flag"] == "entry" :
df_dict["start"][indexNum] = item["time"]
if item["Flag"] == "exit" :
# スタートがないのに、エンドがある場合
if df_dict["start"][indexNum] is None :
# とりあえず、月初めに入室した扱いにする(他の方法も検討中。そもそも入室してない扱いetc..)
tmp_startTime = datetime.now().strftime("%Y/%m/01 00:00:00")
df_dict["start"][indexNum] = tmp_startTime
# --
df_dict["exit"][indexNum] = item["time"]
# 差分計算
a_time = datetime.datetime.strptime( df_dict["start"][indexNum] , '%Y/%m/%d %H:%M:%S')
b_time = datetime.datetime.strptime( df_dict["exit"][indexNum] , '%Y/%m/%d %H:%M:%S')
time : float = (b_time - a_time).total_seconds()
#print( "time : " + str(time) )
if time < 0.0 :
df_dict["time"][indexNum] += 0.0
else :
df_dict["time"][indexNum] += time
# DataFrameに変更
df = pd.DataFrame(df_dict,
index=members_IDlist
)
# 作業用の"start"と"end"を削除
df = df.drop(columns=['start','exit'])
# 計算
df["time"] = df["time"] / 60 / 60
#pprint(df)
return df
| [
"os.path.exists",
"datetime.now",
"dateutil.relativedelta.relativedelta",
"datetime.datetime.strptime",
"base.ColorPrint.error_print",
"pandas.merge",
"json.load",
"datetime.datetime.today",
"traceback.print_exc",
"pandas.DataFrame",
"datetime.datetime.strftime",
"pprint.pprint"
] | [((4326, 4369), 'pandas.DataFrame', 'pd.DataFrame', (['df_dict'], {'index': 'members_IDlist'}), '(df_dict, index=members_IDlist)\n', (4338, 4369), True, 'import pandas as pd\n'), ((404, 446), 'datetime.datetime.strftime', 'datetime.datetime.strftime', (['filetime', '"""%m"""'], {}), "(filetime, '%m')\n", (430, 446), False, 'import datetime\n'), ((457, 499), 'datetime.datetime.strftime', 'datetime.datetime.strftime', (['filetime', '"""%Y"""'], {}), "(filetime, '%Y')\n", (483, 499), False, 'import datetime\n'), ((1146, 1156), 'pprint.pprint', 'pprint', (['df'], {}), '(df)\n', (1152, 1156), False, 'from pprint import pprint\n'), ((332, 357), 'datetime.datetime.today', 'datetime.datetime.today', ([], {}), '()\n', (355, 357), False, 'import datetime\n'), ((360, 391), 'dateutil.relativedelta.relativedelta', 'relativedelta', ([], {'months': 'old_month'}), '(months=old_month)\n', (373, 391), False, 'from dateutil.relativedelta import relativedelta\n'), ((597, 621), 'os.path.exists', 'os.path.exists', (['filename'], {}), '(filename)\n', (611, 621), False, 'import os\n'), ((1403, 1458), 'pandas.merge', 'pd.merge', (['all_df', 'df'], {'left_index': '(True)', 'right_index': '(True)'}), '(all_df, df, left_index=True, right_index=True)\n', (1411, 1458), True, 'import pandas as pd\n'), ((2933, 2945), 'json.load', 'json.load', (['f'], {}), '(f)\n', (2942, 2945), False, 'import json\n'), ((2958, 2991), 'base.ColorPrint.error_print', 'CPrint.error_print', (['"""JSONではありません"""'], {}), "('JSONではありません')\n", (2976, 2991), True, 'import base.ColorPrint as CPrint\n'), ((3013, 3034), 'traceback.print_exc', 'traceback.print_exc', ([], {}), '()\n', (3032, 3034), False, 'import traceback\n'), ((3943, 4018), 'datetime.datetime.strptime', 'datetime.datetime.strptime', (["df_dict['start'][indexNum]", '"""%Y/%m/%d %H:%M:%S"""'], {}), "(df_dict['start'][indexNum], '%Y/%m/%d %H:%M:%S')\n", (3969, 4018), False, 'import datetime\n'), ((4033, 4107), 'datetime.datetime.strptime', 'datetime.datetime.strptime', (["df_dict['exit'][indexNum]", '"""%Y/%m/%d %H:%M:%S"""'], {}), "(df_dict['exit'][indexNum], '%Y/%m/%d %H:%M:%S')\n", (4059, 4107), False, 'import datetime\n'), ((3775, 3789), 'datetime.now', 'datetime.now', ([], {}), '()\n', (3787, 3789), False, 'import datetime\n')] |
from abc import ABCMeta, abstractmethod
import os
from vmaf.tools.misc import make_absolute_path, run_process
from vmaf.tools.stats import ListStats
__copyright__ = "Copyright 2016-2018, Netflix, Inc."
__license__ = "Apache, Version 2.0"
import re
import numpy as np
import ast
from vmaf import ExternalProgramCaller, to_list
from vmaf.config import VmafConfig, VmafExternalConfig
from vmaf.core.executor import Executor
from vmaf.core.result import Result
from vmaf.tools.reader import YuvReader
class FeatureExtractor(Executor):
"""
FeatureExtractor takes in a list of assets, and run feature extraction on
them, and return a list of corresponding results. A FeatureExtractor must
specify a unique type and version combination (by the TYPE and VERSION
attribute), so that the Result generated by it can be identified.
A derived class of FeatureExtractor must:
1) Override TYPE and VERSION
2) Override _generate_result(self, asset), which call a
command-line executable and generate feature scores in a log file.
3) Override _get_feature_scores(self, asset), which read the feature
scores from the log file, and return the scores in a dictionary format.
For an example, follow VmafFeatureExtractor.
"""
__metaclass__ = ABCMeta
@property
@abstractmethod
def ATOM_FEATURES(self):
raise NotImplementedError
def _read_result(self, asset):
result = {}
result.update(self._get_feature_scores(asset))
executor_id = self.executor_id
return Result(asset, executor_id, result)
@classmethod
def get_scores_key(cls, atom_feature):
return "{type}_{atom_feature}_scores".format(
type=cls.TYPE, atom_feature=atom_feature)
@classmethod
def get_score_key(cls, atom_feature):
return "{type}_{atom_feature}_score".format(
type=cls.TYPE, atom_feature=atom_feature)
def _get_feature_scores(self, asset):
# routine to read the feature scores from the log file, and return
# the scores in a dictionary format.
log_file_path = self._get_log_file_path(asset)
atom_feature_scores_dict = {}
atom_feature_idx_dict = {}
for atom_feature in self.ATOM_FEATURES:
atom_feature_scores_dict[atom_feature] = []
atom_feature_idx_dict[atom_feature] = 0
with open(log_file_path, 'rt') as log_file:
for line in log_file.readlines():
for atom_feature in self.ATOM_FEATURES:
re_template = "{af}: ([0-9]+) ([a-zA-Z0-9.-]+)".format(af=atom_feature)
mo = re.match(re_template, line)
if mo:
cur_idx = int(mo.group(1))
assert cur_idx == atom_feature_idx_dict[atom_feature]
# parse value, allowing NaN and inf
val = float(mo.group(2))
if np.isnan(val) or np.isinf(val):
val = None
atom_feature_scores_dict[atom_feature].append(val)
atom_feature_idx_dict[atom_feature] += 1
continue
len_score = len(atom_feature_scores_dict[self.ATOM_FEATURES[0]])
assert len_score != 0
for atom_feature in self.ATOM_FEATURES[1:]:
assert len_score == len(atom_feature_scores_dict[atom_feature]), \
"Feature data possibly corrupt. Run cleanup script and try again."
feature_result = {}
for atom_feature in self.ATOM_FEATURES:
scores_key = self.get_scores_key(atom_feature)
feature_result[scores_key] = atom_feature_scores_dict[atom_feature]
return feature_result
class VmafFeatureExtractor(FeatureExtractor):
TYPE = "VMAF_feature"
# VERSION = '0.1' # vmaf_study; Anush's VIF fix
# VERSION = '0.2' # expose vif_num, vif_den, adm_num, adm_den, anpsnr
# VERSION = '0.2.1' # expose vif num/den of each scale
# VERSION = '0.2.2' # adm abs-->fabs, corrected border handling, uniform reading with option of offset for input YUV, updated VIF corner case
# VERSION = '0.2.2b' # expose adm_den/num_scalex
# VERSION = '0.2.3' # AVX for VMAF convolution; update adm features by folding noise floor into per coef
# VERSION = '0.2.4' # Fix a bug in adm feature passing scale into dwt_quant_step
# VERSION = '0.2.4b' # Modify by adding ADM noise floor outside cube root; add derived feature motion2
VERSION = '0.2.4c' # Modify by moving motion2 to c code
ATOM_FEATURES = ['vif', 'adm', 'ansnr', 'motion', 'motion2',
'vif_num', 'vif_den', 'adm_num', 'adm_den', 'anpsnr',
'vif_num_scale0', 'vif_den_scale0',
'vif_num_scale1', 'vif_den_scale1',
'vif_num_scale2', 'vif_den_scale2',
'vif_num_scale3', 'vif_den_scale3',
'adm_num_scale0', 'adm_den_scale0',
'adm_num_scale1', 'adm_den_scale1',
'adm_num_scale2', 'adm_den_scale2',
'adm_num_scale3', 'adm_den_scale3',
]
DERIVED_ATOM_FEATURES = ['vif_scale0', 'vif_scale1', 'vif_scale2', 'vif_scale3',
'vif2', 'adm2', 'adm3',
'adm_scale0', 'adm_scale1', 'adm_scale2', 'adm_scale3',
]
ADM2_CONSTANT = 0
ADM_SCALE_CONSTANT = 0
def _generate_result(self, asset):
# routine to call the command-line executable and generate feature
# scores in the log file.
quality_width, quality_height = asset.quality_width_height
log_file_path = self._get_log_file_path(asset)
yuv_type=self._get_workfile_yuv_type(asset)
ref_path=asset.ref_workfile_path
dis_path=asset.dis_workfile_path
w=quality_width
h=quality_height
logger = self.logger
ExternalProgramCaller.call_vmaf_feature(yuv_type, ref_path, dis_path, w, h, log_file_path, logger)
@classmethod
def _post_process_result(cls, result):
# override Executor._post_process_result
result = super(VmafFeatureExtractor, cls)._post_process_result(result)
# adm2 =
# (adm_num + ADM2_CONSTANT) / (adm_den + ADM2_CONSTANT)
adm2_scores_key = cls.get_scores_key('adm2')
adm_num_scores_key = cls.get_scores_key('adm_num')
adm_den_scores_key = cls.get_scores_key('adm_den')
result.result_dict[adm2_scores_key] = list(
(np.array(result.result_dict[adm_num_scores_key]) + cls.ADM2_CONSTANT) /
(np.array(result.result_dict[adm_den_scores_key]) + cls.ADM2_CONSTANT)
)
# vif_scalei = vif_num_scalei / vif_den_scalei, i = 0, 1, 2, 3
vif_num_scale0_scores_key = cls.get_scores_key('vif_num_scale0')
vif_den_scale0_scores_key = cls.get_scores_key('vif_den_scale0')
vif_num_scale1_scores_key = cls.get_scores_key('vif_num_scale1')
vif_den_scale1_scores_key = cls.get_scores_key('vif_den_scale1')
vif_num_scale2_scores_key = cls.get_scores_key('vif_num_scale2')
vif_den_scale2_scores_key = cls.get_scores_key('vif_den_scale2')
vif_num_scale3_scores_key = cls.get_scores_key('vif_num_scale3')
vif_den_scale3_scores_key = cls.get_scores_key('vif_den_scale3')
vif_scale0_scores_key = cls.get_scores_key('vif_scale0')
vif_scale1_scores_key = cls.get_scores_key('vif_scale1')
vif_scale2_scores_key = cls.get_scores_key('vif_scale2')
vif_scale3_scores_key = cls.get_scores_key('vif_scale3')
result.result_dict[vif_scale0_scores_key] = list(
(np.array(result.result_dict[vif_num_scale0_scores_key])
/ np.array(result.result_dict[vif_den_scale0_scores_key]))
)
result.result_dict[vif_scale1_scores_key] = list(
(np.array(result.result_dict[vif_num_scale1_scores_key])
/ np.array(result.result_dict[vif_den_scale1_scores_key]))
)
result.result_dict[vif_scale2_scores_key] = list(
(np.array(result.result_dict[vif_num_scale2_scores_key])
/ np.array(result.result_dict[vif_den_scale2_scores_key]))
)
result.result_dict[vif_scale3_scores_key] = list(
(np.array(result.result_dict[vif_num_scale3_scores_key])
/ np.array(result.result_dict[vif_den_scale3_scores_key]))
)
# vif2 =
# ((vif_num_scale0 / vif_den_scale0) + (vif_num_scale1 / vif_den_scale1) +
# (vif_num_scale2 / vif_den_scale2) + (vif_num_scale3 / vif_den_scale3)) / 4.0
vif_scores_key = cls.get_scores_key('vif2')
result.result_dict[vif_scores_key] = list(
(
(np.array(result.result_dict[vif_num_scale0_scores_key])
/ np.array(result.result_dict[vif_den_scale0_scores_key])) +
(np.array(result.result_dict[vif_num_scale1_scores_key])
/ np.array(result.result_dict[vif_den_scale1_scores_key])) +
(np.array(result.result_dict[vif_num_scale2_scores_key])
/ np.array(result.result_dict[vif_den_scale2_scores_key])) +
(np.array(result.result_dict[vif_num_scale3_scores_key])
/ np.array(result.result_dict[vif_den_scale3_scores_key]))
) / 4.0
)
# adm_scalei = adm_num_scalei / adm_den_scalei, i = 0, 1, 2, 3
adm_num_scale0_scores_key = cls.get_scores_key('adm_num_scale0')
adm_den_scale0_scores_key = cls.get_scores_key('adm_den_scale0')
adm_num_scale1_scores_key = cls.get_scores_key('adm_num_scale1')
adm_den_scale1_scores_key = cls.get_scores_key('adm_den_scale1')
adm_num_scale2_scores_key = cls.get_scores_key('adm_num_scale2')
adm_den_scale2_scores_key = cls.get_scores_key('adm_den_scale2')
adm_num_scale3_scores_key = cls.get_scores_key('adm_num_scale3')
adm_den_scale3_scores_key = cls.get_scores_key('adm_den_scale3')
adm_scale0_scores_key = cls.get_scores_key('adm_scale0')
adm_scale1_scores_key = cls.get_scores_key('adm_scale1')
adm_scale2_scores_key = cls.get_scores_key('adm_scale2')
adm_scale3_scores_key = cls.get_scores_key('adm_scale3')
result.result_dict[adm_scale0_scores_key] = list(
(np.array(result.result_dict[adm_num_scale0_scores_key]) + cls.ADM_SCALE_CONSTANT)
/ (np.array(result.result_dict[adm_den_scale0_scores_key]) + cls.ADM_SCALE_CONSTANT)
)
result.result_dict[adm_scale1_scores_key] = list(
(np.array(result.result_dict[adm_num_scale1_scores_key]) + cls.ADM_SCALE_CONSTANT)
/ (np.array(result.result_dict[adm_den_scale1_scores_key]) + cls.ADM_SCALE_CONSTANT)
)
result.result_dict[adm_scale2_scores_key] = list(
(np.array(result.result_dict[adm_num_scale2_scores_key]) + cls.ADM_SCALE_CONSTANT)
/ (np.array(result.result_dict[adm_den_scale2_scores_key]) + cls.ADM_SCALE_CONSTANT)
)
result.result_dict[adm_scale3_scores_key] = list(
(np.array(result.result_dict[adm_num_scale3_scores_key]) + cls.ADM_SCALE_CONSTANT)
/ (np.array(result.result_dict[adm_den_scale3_scores_key]) + cls.ADM_SCALE_CONSTANT)
)
# adm3 = \
# (((adm_num_scale0 + ADM_SCALE_CONSTANT) / (adm_den_scale0 + ADM_SCALE_CONSTANT))
# + ((adm_num_scale1 + ADM_SCALE_CONSTANT) / (adm_den_scale1 + ADM_SCALE_CONSTANT))
# + ((adm_num_scale2 + ADM_SCALE_CONSTANT) / (adm_den_scale2 + ADM_SCALE_CONSTANT))
# + ((adm_num_scale3 + ADM_SCALE_CONSTANT) / (adm_den_scale3 + ADM_SCALE_CONSTANT))) / 4.0
adm3_scores_key = cls.get_scores_key('adm3')
result.result_dict[adm3_scores_key] = list(
(
((np.array(result.result_dict[adm_num_scale0_scores_key]) + cls.ADM_SCALE_CONSTANT)
/ (np.array(result.result_dict[adm_den_scale0_scores_key]) + cls.ADM_SCALE_CONSTANT)) +
((np.array(result.result_dict[adm_num_scale1_scores_key]) + cls.ADM_SCALE_CONSTANT)
/ (np.array(result.result_dict[adm_den_scale1_scores_key]) + cls.ADM_SCALE_CONSTANT)) +
((np.array(result.result_dict[adm_num_scale2_scores_key]) + cls.ADM_SCALE_CONSTANT)
/ (np.array(result.result_dict[adm_den_scale2_scores_key]) + cls.ADM_SCALE_CONSTANT)) +
((np.array(result.result_dict[adm_num_scale3_scores_key]) + cls.ADM_SCALE_CONSTANT)
/ (np.array(result.result_dict[adm_den_scale3_scores_key]) + cls.ADM_SCALE_CONSTANT))
) / 4.0
)
# validate
for feature in cls.DERIVED_ATOM_FEATURES:
assert cls.get_scores_key(feature) in result.result_dict
return result
class VifFrameDifferenceFeatureExtractor(FeatureExtractor):
TYPE = "VifDiff_feature"
VERSION = '0.1'
ATOM_FEATURES = ['vifdiff',
'vifdiff_num', 'vifdiff_den',
'vifdiff_num_scale0', 'vifdiff_den_scale0',
'vifdiff_num_scale1', 'vifdiff_den_scale1',
'vifdiff_num_scale2', 'vifdiff_den_scale2',
'vifdiff_num_scale3', 'vifdiff_den_scale3',
]
DERIVED_ATOM_FEATURES = ['vifdiff_scale0', 'vifdiff_scale1', 'vifdiff_scale2', 'vifdiff_scale3',
]
ADM2_CONSTANT = 0
ADM_SCALE_CONSTANT = 0
def _generate_result(self, asset):
# routine to call the command-line executable and generate feature
# scores in the log file.
quality_width, quality_height = asset.quality_width_height
log_file_path = self._get_log_file_path(asset)
yuv_type=self._get_workfile_yuv_type(asset)
ref_path=asset.ref_workfile_path
dis_path=asset.dis_workfile_path
w=quality_width
h=quality_height
logger = self.logger
ExternalProgramCaller.call_vifdiff_feature(yuv_type, ref_path, dis_path, w, h, log_file_path, logger)
@classmethod
def _post_process_result(cls, result):
# override Executor._post_process_result
result = super(VifFrameDifferenceFeatureExtractor, cls)._post_process_result(result)
# vifdiff_scalei = vifdiff_num_scalei / vifdiff_den_scalei, i = 0, 1, 2, 3
vifdiff_num_scale0_scores_key = cls.get_scores_key('vifdiff_num_scale0')
vifdiff_den_scale0_scores_key = cls.get_scores_key('vifdiff_den_scale0')
vifdiff_num_scale1_scores_key = cls.get_scores_key('vifdiff_num_scale1')
vifdiff_den_scale1_scores_key = cls.get_scores_key('vifdiff_den_scale1')
vifdiff_num_scale2_scores_key = cls.get_scores_key('vifdiff_num_scale2')
vifdiff_den_scale2_scores_key = cls.get_scores_key('vifdiff_den_scale2')
vifdiff_num_scale3_scores_key = cls.get_scores_key('vifdiff_num_scale3')
vifdiff_den_scale3_scores_key = cls.get_scores_key('vifdiff_den_scale3')
vifdiff_scale0_scores_key = cls.get_scores_key('vifdiff_scale0')
vifdiff_scale1_scores_key = cls.get_scores_key('vifdiff_scale1')
vifdiff_scale2_scores_key = cls.get_scores_key('vifdiff_scale2')
vifdiff_scale3_scores_key = cls.get_scores_key('vifdiff_scale3')
result.result_dict[vifdiff_scale0_scores_key] = list(
(np.array(result.result_dict[vifdiff_num_scale0_scores_key])
/ np.array(result.result_dict[vifdiff_den_scale0_scores_key]))
)
result.result_dict[vifdiff_scale1_scores_key] = list(
(np.array(result.result_dict[vifdiff_num_scale1_scores_key])
/ np.array(result.result_dict[vifdiff_den_scale1_scores_key]))
)
result.result_dict[vifdiff_scale2_scores_key] = list(
(np.array(result.result_dict[vifdiff_num_scale2_scores_key])
/ np.array(result.result_dict[vifdiff_den_scale2_scores_key]))
)
result.result_dict[vifdiff_scale3_scores_key] = list(
(np.array(result.result_dict[vifdiff_num_scale3_scores_key])
/ np.array(result.result_dict[vifdiff_den_scale3_scores_key]))
)
# validate
for feature in cls.DERIVED_ATOM_FEATURES:
assert cls.get_scores_key(feature) in result.result_dict
return result
class PsnrFeatureExtractor(FeatureExtractor):
TYPE = "PSNR_feature"
VERSION = "1.0"
ATOM_FEATURES = ['psnr']
def _generate_result(self, asset):
# routine to call the command-line executable and generate quality
# scores in the log file.
quality_width, quality_height = asset.quality_width_height
log_file_path = self._get_log_file_path(asset)
yuv_type=self._get_workfile_yuv_type(asset)
ref_path=asset.ref_workfile_path
dis_path=asset.dis_workfile_path
w=quality_width
h=quality_height
logger = self.logger
ExternalProgramCaller.call_psnr(yuv_type, ref_path, dis_path, w, h, log_file_path, logger)
class MomentFeatureExtractor(FeatureExtractor):
TYPE = "Moment_feature"
# VERSION = "1.0" # call executable
VERSION = "1.1" # python only
ATOM_FEATURES = ['ref1st', 'ref2nd', 'dis1st', 'dis2nd', ]
DERIVED_ATOM_FEATURES = ['refvar', 'disvar', ]
def _generate_result(self, asset):
# routine to call the command-line executable and generate feature
# scores in the log file.
quality_w, quality_h = asset.quality_width_height
ref_scores_mtx = None
with YuvReader(filepath=asset.ref_workfile_path, width=quality_w, height=quality_h,
yuv_type=self._get_workfile_yuv_type(asset)) as ref_yuv_reader:
scores_mtx_list = []
i = 0
for ref_yuv in ref_yuv_reader:
ref_y = ref_yuv[0]
firstm = ref_y.mean()
secondm = ref_y.var() + firstm**2
scores_mtx_list.append(np.hstack(([firstm], [secondm])))
i += 1
ref_scores_mtx = np.vstack(scores_mtx_list)
dis_scores_mtx = None
with YuvReader(filepath=asset.dis_workfile_path, width=quality_w, height=quality_h,
yuv_type=self._get_workfile_yuv_type(asset)) as dis_yuv_reader:
scores_mtx_list = []
i = 0
for dis_yuv in dis_yuv_reader:
dis_y = dis_yuv[0]
firstm = dis_y.mean()
secondm = dis_y.var() + firstm**2
scores_mtx_list.append(np.hstack(([firstm], [secondm])))
i += 1
dis_scores_mtx = np.vstack(scores_mtx_list)
assert ref_scores_mtx is not None and dis_scores_mtx is not None
log_dict = {'ref_scores_mtx': ref_scores_mtx.tolist(),
'dis_scores_mtx': dis_scores_mtx.tolist()}
log_file_path = self._get_log_file_path(asset)
with open(log_file_path, 'wt') as log_file:
log_file.write(str(log_dict))
def _get_feature_scores(self, asset):
# routine to read the feature scores from the log file, and return
# the scores in a dictionary format.
log_file_path = self._get_log_file_path(asset)
with open(log_file_path, 'rt') as log_file:
log_str = log_file.read()
log_dict = ast.literal_eval(log_str)
ref_scores_mtx = np.array(log_dict['ref_scores_mtx'])
dis_scores_mtx = np.array(log_dict['dis_scores_mtx'])
_, num_ref_features = ref_scores_mtx.shape
assert num_ref_features == 2 # ref1st, ref2nd
_, num_dis_features = dis_scores_mtx.shape
assert num_dis_features == 2 # dis1st, dis2nd
feature_result = {}
feature_result[self.get_scores_key('ref1st')] = list(ref_scores_mtx[:, 0])
feature_result[self.get_scores_key('ref2nd')] = list(ref_scores_mtx[:, 1])
feature_result[self.get_scores_key('dis1st')] = list(dis_scores_mtx[:, 0])
feature_result[self.get_scores_key('dis2nd')] = list(dis_scores_mtx[:, 1])
return feature_result
@classmethod
def _post_process_result(cls, result):
# override Executor._post_process_result
result = super(MomentFeatureExtractor, cls)._post_process_result(result)
# calculate refvar and disvar from ref1st, ref2nd, dis1st, dis2nd
refvar_scores_key = cls.get_scores_key('refvar')
ref1st_scores_key = cls.get_scores_key('ref1st')
ref2nd_scores_key = cls.get_scores_key('ref2nd')
disvar_scores_key = cls.get_scores_key('disvar')
dis1st_scores_key = cls.get_scores_key('dis1st')
dis2nd_scores_key = cls.get_scores_key('dis2nd')
get_var = lambda m: m[1] - m[0] * m[0]
result.result_dict[refvar_scores_key] = \
to_list(map(get_var, zip(result.result_dict[ref1st_scores_key],
result.result_dict[ref2nd_scores_key])))
result.result_dict[disvar_scores_key] = \
to_list(map(get_var, zip(result.result_dict[dis1st_scores_key],
result.result_dict[dis2nd_scores_key])))
# validate
for feature in cls.DERIVED_ATOM_FEATURES:
assert cls.get_scores_key(feature) in result.result_dict
return result
class SsimFeatureExtractor(FeatureExtractor):
TYPE = "SSIM_feature"
# VERSION = "1.0"
VERSION = "1.1" # fix OPT_RANGE_PIXEL_OFFSET = 0
ATOM_FEATURES = ['ssim', 'ssim_l', 'ssim_c', 'ssim_s']
def _generate_result(self, asset):
# routine to call the command-line executable and generate quality
# scores in the log file.
quality_width, quality_height = asset.quality_width_height
log_file_path = self._get_log_file_path(asset)
yuv_type=self._get_workfile_yuv_type(asset)
ref_path=asset.ref_workfile_path
dis_path=asset.dis_workfile_path
w=quality_width
h=quality_height
logger = self.logger
ExternalProgramCaller.call_ssim(yuv_type, ref_path, dis_path, w, h, log_file_path, logger)
class MsSsimFeatureExtractor(FeatureExtractor):
TYPE = "MS_SSIM_feature"
# VERSION = "1.0"
VERSION = "1.1" # fix OPT_RANGE_PIXEL_OFFSET = 0
ATOM_FEATURES = ['ms_ssim',
'ms_ssim_l_scale0', 'ms_ssim_c_scale0', 'ms_ssim_s_scale0',
'ms_ssim_l_scale1', 'ms_ssim_c_scale1', 'ms_ssim_s_scale1',
'ms_ssim_l_scale2', 'ms_ssim_c_scale2', 'ms_ssim_s_scale2',
'ms_ssim_l_scale3', 'ms_ssim_c_scale3', 'ms_ssim_s_scale3',
'ms_ssim_l_scale4', 'ms_ssim_c_scale4', 'ms_ssim_s_scale4',
]
def _generate_result(self, asset):
# routine to call the command-line executable and generate quality
# scores in the log file.
quality_width, quality_height = asset.quality_width_height
log_file_path = self._get_log_file_path(asset)
yuv_type=self._get_workfile_yuv_type(asset)
ref_path=asset.ref_workfile_path
dis_path=asset.dis_workfile_path
w=quality_width
h=quality_height
logger = self.logger
ExternalProgramCaller.call_ms_ssim(yuv_type, ref_path, dis_path, w, h, log_file_path, logger)
| [
"vmaf.ExternalProgramCaller.call_vmaf_feature",
"vmaf.core.result.Result",
"vmaf.ExternalProgramCaller.call_ssim",
"numpy.hstack",
"re.match",
"ast.literal_eval",
"numpy.array",
"vmaf.ExternalProgramCaller.call_psnr",
"numpy.isnan",
"numpy.vstack",
"vmaf.ExternalProgramCaller.call_ms_ssim",
"numpy.isinf",
"vmaf.ExternalProgramCaller.call_vifdiff_feature"
] | [((1573, 1607), 'vmaf.core.result.Result', 'Result', (['asset', 'executor_id', 'result'], {}), '(asset, executor_id, result)\n', (1579, 1607), False, 'from vmaf.core.result import Result\n'), ((6044, 6146), 'vmaf.ExternalProgramCaller.call_vmaf_feature', 'ExternalProgramCaller.call_vmaf_feature', (['yuv_type', 'ref_path', 'dis_path', 'w', 'h', 'log_file_path', 'logger'], {}), '(yuv_type, ref_path, dis_path, w, h,\n log_file_path, logger)\n', (6083, 6146), False, 'from vmaf import ExternalProgramCaller, to_list\n'), ((14143, 14248), 'vmaf.ExternalProgramCaller.call_vifdiff_feature', 'ExternalProgramCaller.call_vifdiff_feature', (['yuv_type', 'ref_path', 'dis_path', 'w', 'h', 'log_file_path', 'logger'], {}), '(yuv_type, ref_path, dis_path, w,\n h, log_file_path, logger)\n', (14185, 14248), False, 'from vmaf import ExternalProgramCaller, to_list\n'), ((17137, 17231), 'vmaf.ExternalProgramCaller.call_psnr', 'ExternalProgramCaller.call_psnr', (['yuv_type', 'ref_path', 'dis_path', 'w', 'h', 'log_file_path', 'logger'], {}), '(yuv_type, ref_path, dis_path, w, h,\n log_file_path, logger)\n', (17168, 17231), False, 'from vmaf import ExternalProgramCaller, to_list\n'), ((19599, 19635), 'numpy.array', 'np.array', (["log_dict['ref_scores_mtx']"], {}), "(log_dict['ref_scores_mtx'])\n", (19607, 19635), True, 'import numpy as np\n'), ((19661, 19697), 'numpy.array', 'np.array', (["log_dict['dis_scores_mtx']"], {}), "(log_dict['dis_scores_mtx'])\n", (19669, 19697), True, 'import numpy as np\n'), ((22214, 22308), 'vmaf.ExternalProgramCaller.call_ssim', 'ExternalProgramCaller.call_ssim', (['yuv_type', 'ref_path', 'dis_path', 'w', 'h', 'log_file_path', 'logger'], {}), '(yuv_type, ref_path, dis_path, w, h,\n log_file_path, logger)\n', (22245, 22308), False, 'from vmaf import ExternalProgramCaller, to_list\n'), ((23415, 23512), 'vmaf.ExternalProgramCaller.call_ms_ssim', 'ExternalProgramCaller.call_ms_ssim', (['yuv_type', 'ref_path', 'dis_path', 'w', 'h', 'log_file_path', 'logger'], {}), '(yuv_type, ref_path, dis_path, w, h,\n log_file_path, logger)\n', (23449, 23512), False, 'from vmaf import ExternalProgramCaller, to_list\n'), ((18258, 18284), 'numpy.vstack', 'np.vstack', (['scores_mtx_list'], {}), '(scores_mtx_list)\n', (18267, 18284), True, 'import numpy as np\n'), ((18837, 18863), 'numpy.vstack', 'np.vstack', (['scores_mtx_list'], {}), '(scores_mtx_list)\n', (18846, 18863), True, 'import numpy as np\n'), ((19548, 19573), 'ast.literal_eval', 'ast.literal_eval', (['log_str'], {}), '(log_str)\n', (19564, 19573), False, 'import ast\n'), ((7803, 7858), 'numpy.array', 'np.array', (['result.result_dict[vif_num_scale0_scores_key]'], {}), '(result.result_dict[vif_num_scale0_scores_key])\n', (7811, 7858), True, 'import numpy as np\n'), ((7874, 7929), 'numpy.array', 'np.array', (['result.result_dict[vif_den_scale0_scores_key]'], {}), '(result.result_dict[vif_den_scale0_scores_key])\n', (7882, 7929), True, 'import numpy as np\n'), ((8012, 8067), 'numpy.array', 'np.array', (['result.result_dict[vif_num_scale1_scores_key]'], {}), '(result.result_dict[vif_num_scale1_scores_key])\n', (8020, 8067), True, 'import numpy as np\n'), ((8083, 8138), 'numpy.array', 'np.array', (['result.result_dict[vif_den_scale1_scores_key]'], {}), '(result.result_dict[vif_den_scale1_scores_key])\n', (8091, 8138), True, 'import numpy as np\n'), ((8221, 8276), 'numpy.array', 'np.array', (['result.result_dict[vif_num_scale2_scores_key]'], {}), '(result.result_dict[vif_num_scale2_scores_key])\n', (8229, 8276), True, 'import numpy as np\n'), ((8292, 8347), 'numpy.array', 'np.array', (['result.result_dict[vif_den_scale2_scores_key]'], {}), '(result.result_dict[vif_den_scale2_scores_key])\n', (8300, 8347), True, 'import numpy as np\n'), ((8430, 8485), 'numpy.array', 'np.array', (['result.result_dict[vif_num_scale3_scores_key]'], {}), '(result.result_dict[vif_num_scale3_scores_key])\n', (8438, 8485), True, 'import numpy as np\n'), ((8501, 8556), 'numpy.array', 'np.array', (['result.result_dict[vif_den_scale3_scores_key]'], {}), '(result.result_dict[vif_den_scale3_scores_key])\n', (8509, 8556), True, 'import numpy as np\n'), ((15548, 15607), 'numpy.array', 'np.array', (['result.result_dict[vifdiff_num_scale0_scores_key]'], {}), '(result.result_dict[vifdiff_num_scale0_scores_key])\n', (15556, 15607), True, 'import numpy as np\n'), ((15623, 15682), 'numpy.array', 'np.array', (['result.result_dict[vifdiff_den_scale0_scores_key]'], {}), '(result.result_dict[vifdiff_den_scale0_scores_key])\n', (15631, 15682), True, 'import numpy as np\n'), ((15769, 15828), 'numpy.array', 'np.array', (['result.result_dict[vifdiff_num_scale1_scores_key]'], {}), '(result.result_dict[vifdiff_num_scale1_scores_key])\n', (15777, 15828), True, 'import numpy as np\n'), ((15844, 15903), 'numpy.array', 'np.array', (['result.result_dict[vifdiff_den_scale1_scores_key]'], {}), '(result.result_dict[vifdiff_den_scale1_scores_key])\n', (15852, 15903), True, 'import numpy as np\n'), ((15990, 16049), 'numpy.array', 'np.array', (['result.result_dict[vifdiff_num_scale2_scores_key]'], {}), '(result.result_dict[vifdiff_num_scale2_scores_key])\n', (15998, 16049), True, 'import numpy as np\n'), ((16065, 16124), 'numpy.array', 'np.array', (['result.result_dict[vifdiff_den_scale2_scores_key]'], {}), '(result.result_dict[vifdiff_den_scale2_scores_key])\n', (16073, 16124), True, 'import numpy as np\n'), ((16211, 16270), 'numpy.array', 'np.array', (['result.result_dict[vifdiff_num_scale3_scores_key]'], {}), '(result.result_dict[vifdiff_num_scale3_scores_key])\n', (16219, 16270), True, 'import numpy as np\n'), ((16286, 16345), 'numpy.array', 'np.array', (['result.result_dict[vifdiff_den_scale3_scores_key]'], {}), '(result.result_dict[vifdiff_den_scale3_scores_key])\n', (16294, 16345), True, 'import numpy as np\n'), ((2665, 2692), 're.match', 're.match', (['re_template', 'line'], {}), '(re_template, line)\n', (2673, 2692), False, 'import re\n'), ((6651, 6699), 'numpy.array', 'np.array', (['result.result_dict[adm_num_scores_key]'], {}), '(result.result_dict[adm_num_scores_key])\n', (6659, 6699), True, 'import numpy as np\n'), ((6736, 6784), 'numpy.array', 'np.array', (['result.result_dict[adm_den_scores_key]'], {}), '(result.result_dict[adm_den_scores_key])\n', (6744, 6784), True, 'import numpy as np\n'), ((10492, 10547), 'numpy.array', 'np.array', (['result.result_dict[adm_num_scale0_scores_key]'], {}), '(result.result_dict[adm_num_scale0_scores_key])\n', (10500, 10547), True, 'import numpy as np\n'), ((10589, 10644), 'numpy.array', 'np.array', (['result.result_dict[adm_den_scale0_scores_key]'], {}), '(result.result_dict[adm_den_scale0_scores_key])\n', (10597, 10644), True, 'import numpy as np\n'), ((10752, 10807), 'numpy.array', 'np.array', (['result.result_dict[adm_num_scale1_scores_key]'], {}), '(result.result_dict[adm_num_scale1_scores_key])\n', (10760, 10807), True, 'import numpy as np\n'), ((10849, 10904), 'numpy.array', 'np.array', (['result.result_dict[adm_den_scale1_scores_key]'], {}), '(result.result_dict[adm_den_scale1_scores_key])\n', (10857, 10904), True, 'import numpy as np\n'), ((11012, 11067), 'numpy.array', 'np.array', (['result.result_dict[adm_num_scale2_scores_key]'], {}), '(result.result_dict[adm_num_scale2_scores_key])\n', (11020, 11067), True, 'import numpy as np\n'), ((11109, 11164), 'numpy.array', 'np.array', (['result.result_dict[adm_den_scale2_scores_key]'], {}), '(result.result_dict[adm_den_scale2_scores_key])\n', (11117, 11164), True, 'import numpy as np\n'), ((11272, 11327), 'numpy.array', 'np.array', (['result.result_dict[adm_num_scale3_scores_key]'], {}), '(result.result_dict[adm_num_scale3_scores_key])\n', (11280, 11327), True, 'import numpy as np\n'), ((11369, 11424), 'numpy.array', 'np.array', (['result.result_dict[adm_den_scale3_scores_key]'], {}), '(result.result_dict[adm_den_scale3_scores_key])\n', (11377, 11424), True, 'import numpy as np\n'), ((18172, 18204), 'numpy.hstack', 'np.hstack', (['([firstm], [secondm])'], {}), '(([firstm], [secondm]))\n', (18181, 18204), True, 'import numpy as np\n'), ((18751, 18783), 'numpy.hstack', 'np.hstack', (['([firstm], [secondm])'], {}), '(([firstm], [secondm]))\n', (18760, 18783), True, 'import numpy as np\n'), ((9343, 9398), 'numpy.array', 'np.array', (['result.result_dict[vif_num_scale3_scores_key]'], {}), '(result.result_dict[vif_num_scale3_scores_key])\n', (9351, 9398), True, 'import numpy as np\n'), ((9418, 9473), 'numpy.array', 'np.array', (['result.result_dict[vif_den_scale3_scores_key]'], {}), '(result.result_dict[vif_den_scale3_scores_key])\n', (9426, 9473), True, 'import numpy as np\n'), ((2987, 3000), 'numpy.isnan', 'np.isnan', (['val'], {}), '(val)\n', (2995, 3000), True, 'import numpy as np\n'), ((3004, 3017), 'numpy.isinf', 'np.isinf', (['val'], {}), '(val)\n', (3012, 3017), True, 'import numpy as np\n'), ((9192, 9247), 'numpy.array', 'np.array', (['result.result_dict[vif_num_scale2_scores_key]'], {}), '(result.result_dict[vif_num_scale2_scores_key])\n', (9200, 9247), True, 'import numpy as np\n'), ((9267, 9322), 'numpy.array', 'np.array', (['result.result_dict[vif_den_scale2_scores_key]'], {}), '(result.result_dict[vif_den_scale2_scores_key])\n', (9275, 9322), True, 'import numpy as np\n'), ((12610, 12665), 'numpy.array', 'np.array', (['result.result_dict[adm_num_scale3_scores_key]'], {}), '(result.result_dict[adm_num_scale3_scores_key])\n', (12618, 12665), True, 'import numpy as np\n'), ((12712, 12767), 'numpy.array', 'np.array', (['result.result_dict[adm_den_scale3_scores_key]'], {}), '(result.result_dict[adm_den_scale3_scores_key])\n', (12720, 12767), True, 'import numpy as np\n'), ((8890, 8945), 'numpy.array', 'np.array', (['result.result_dict[vif_num_scale0_scores_key]'], {}), '(result.result_dict[vif_num_scale0_scores_key])\n', (8898, 8945), True, 'import numpy as np\n'), ((8965, 9020), 'numpy.array', 'np.array', (['result.result_dict[vif_den_scale0_scores_key]'], {}), '(result.result_dict[vif_den_scale0_scores_key])\n', (8973, 9020), True, 'import numpy as np\n'), ((9041, 9096), 'numpy.array', 'np.array', (['result.result_dict[vif_num_scale1_scores_key]'], {}), '(result.result_dict[vif_num_scale1_scores_key])\n', (9049, 9096), True, 'import numpy as np\n'), ((9116, 9171), 'numpy.array', 'np.array', (['result.result_dict[vif_den_scale1_scores_key]'], {}), '(result.result_dict[vif_den_scale1_scores_key])\n', (9124, 9171), True, 'import numpy as np\n'), ((12405, 12460), 'numpy.array', 'np.array', (['result.result_dict[adm_num_scale2_scores_key]'], {}), '(result.result_dict[adm_num_scale2_scores_key])\n', (12413, 12460), True, 'import numpy as np\n'), ((12507, 12562), 'numpy.array', 'np.array', (['result.result_dict[adm_den_scale2_scores_key]'], {}), '(result.result_dict[adm_den_scale2_scores_key])\n', (12515, 12562), True, 'import numpy as np\n'), ((11995, 12050), 'numpy.array', 'np.array', (['result.result_dict[adm_num_scale0_scores_key]'], {}), '(result.result_dict[adm_num_scale0_scores_key])\n', (12003, 12050), True, 'import numpy as np\n'), ((12097, 12152), 'numpy.array', 'np.array', (['result.result_dict[adm_den_scale0_scores_key]'], {}), '(result.result_dict[adm_den_scale0_scores_key])\n', (12105, 12152), True, 'import numpy as np\n'), ((12200, 12255), 'numpy.array', 'np.array', (['result.result_dict[adm_num_scale1_scores_key]'], {}), '(result.result_dict[adm_num_scale1_scores_key])\n', (12208, 12255), True, 'import numpy as np\n'), ((12302, 12357), 'numpy.array', 'np.array', (['result.result_dict[adm_den_scale1_scores_key]'], {}), '(result.result_dict[adm_den_scale1_scores_key])\n', (12310, 12357), True, 'import numpy as np\n')] |
# -*- coding: utf-8 -*-
"""
Created by susy at 2019/11/8
"""
from dao.dao import DataDao
import pytz
from dao.models import PanAccounts
from cfg import PAN_SERVICE, MASTER_ACCOUNT_ID
class BaseService:
def __init__(self):
self.default_tz = pytz.timezone('Asia/Chongqing')
# self.pan_acc: PanAccounts = DataDao.pan_account_list(MASTER_ACCOUNT_ID, False)
| [
"pytz.timezone"
] | [((255, 286), 'pytz.timezone', 'pytz.timezone', (['"""Asia/Chongqing"""'], {}), "('Asia/Chongqing')\n", (268, 286), False, 'import pytz\n')] |
import os, time, argparse
from datetime import datetime
from pm4py.objects.log.importer.csv import factory as csv_importer
from pm4py.objects.log.exporter.xes import factory as xes_exporter
from pm4py.objects.log.importer.xes import factory as xes_importer
from pm4py.objects.petri.importer import pnml as pnml_importer
from pm4py.evaluation.replay_fitness import factory as replay_factory
from pm4py.evaluation.precision import factory as precision_factory
from conf.settings import DATA_PATH
WORK_PATH = os.path.abspath(os.getcwd())
def readFile(f_name1, f_name2, unique=False):
traces = []
skipped = 0
with open(f_name1) as file:
file_contents = file.read()
file_contents = file_contents.split("\n")
print("Number of train traces are:", str(len(file_contents)))
for row in file_contents:
if unique:
if row not in traces:
traces.append(row)
else:
skipped += 1
else:
traces.append(row)
with open(f_name2) as file:
file_contents = file.read()
file_contents = file_contents.split("\n")
print("Number of generated traces are:", str(len(file_contents)))
for row in file_contents:
if unique:
if row not in traces:
traces.append(row)
else:
skipped += 1
else:
traces.append(row)
f_traces = []
for trace in traces:
f_trace = []
t = trace.split(" ")
for i in t:
if i != "" and "<" not in i:
f_trace.append(i)
if len(f_trace) > 0:
f_traces.append(f_trace)
print("Number of traces are:", str(len(f_traces)))
print("Number of skipped traces are:", str(skipped))
return f_traces
def writeToFile(file, lst):
with open(file, 'w') as outfile:
for entry in lst:
outfile.write(str(entry) + "\n")
def convertToCsv(traces, to_path):
lines = []
case = 0
timestamp = 0
line = "concept:name,case:concept:name,time:timestamp"
lines.append(line)
for trace in traces:
for event in trace:
timestamp = timestamp + 1
dt_object = datetime.fromtimestamp(timestamp)
line = str(event) + "_" + "," + str(case) + "," + str(dt_object)
lines.append(line)
case = case + 1
writeToFile(str(to_path), lines)
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument('-s', '--system', help='Which system (e.g. pb_system_5_3)', required=True)
parser.add_argument('-sfx', '--suffix', help='Suffix (chosen epoch, e.g. 1981)', required=True)
parser.add_argument('-j', '--job', help='Job (0/1)', required=True)
parser.add_argument('-pn', '--pn', help='Petri net file to evaluate', required=True)
parser.add_argument('-strategy', '--strategy', help='naive/mh', required=True)
args = parser.parse_args()
system = args.system
suffix = int(args.suffix)
job = args.job
pn = args.pn
strategy = args.strategy
if DATA_PATH is None:
train_file = os.path.join(WORK_PATH, "data", "variants", system + "_train.txt")
gen_file = os.path.join(WORK_PATH, "data", "avatar", "variants", system + "_relgan_" + str(suffix) + "_j" + str(job) + "_" + strategy + ".txt")
csv_file = os.path.join(WORK_PATH, "data", "avatar", "variants", system + "_relgan_" + str(suffix) + "_j" + str(job) + "_" + strategy + "_generalization.csv")
xes_file = os.path.join(WORK_PATH, "data", "avatar", "variants", system + "_relgan_" + str(suffix) + "_j" + str(job) + "_" + strategy + "_generalization.xes")
pn_file = os.path.join(WORK_PATH, "data", "pns", system, pn)
else:
train_file = os.path.join(DATA_PATH, "variants", system + "_train.txt")
gen_file = os.path.join(DATA_PATH, "avatar", "variants", system + "_relgan_" + str(suffix) + "_j" + str(job) + "_" + strategy + ".txt")
csv_file = os.path.join(DATA_PATH, "avatar", "variants", system + "_relgan_" + str(suffix) + "_j" + str(job) + "_" + strategy + "_generalization.csv")
xes_file = os.path.join(DATA_PATH, "avatar", "variants", system + "_relgan_" + str(suffix) + "_j" + str(job) + "_" + strategy + "_generalization.xes")
pn_file = os.path.join(DATA_PATH, "pns", system, pn)
""" READ FILES AND CONVERT TO XES """
traces = readFile(train_file,gen_file, unique=True)
convertToCsv(traces=traces, to_path=csv_file)
time.sleep(1)
log = csv_importer.import_event_log(csv_file)
xes_exporter.export_log(log, xes_file)
time.sleep(1)
""" PERFORM MEASUREMENT ON PN AND XES"""
log = xes_importer.import_log(xes_file)
net, initial_marking, final_marking = pnml_importer.import_net(pn_file)
fitness = replay_factory.apply(log, net, initial_marking, final_marking)
print("Fitness=", fitness)
precision = precision_factory.apply(log, net, initial_marking, final_marking)
print("Precision=", precision)
fitness = fitness["log_fitness"]
generalization = 2 * ((fitness * precision) / (fitness + precision))
if strategy == "mh":
print("**** ", str(system), " Job ", str(job), " on PN ", str(pn_file), " using MH SAMPLING on suffix ", str(suffix)," ***")
elif strategy == "naive":
print("**** ", str(system), " Job ", str(job), " on PN ", str(pn_file), " using NAIVE SAMPLING on suffix ", str(suffix), " ***")
else:
raise ValueError("Unknown strategy.")
print("AVATAR Generalization=", generalization) | [
"datetime.datetime.fromtimestamp",
"argparse.ArgumentParser",
"pm4py.objects.log.importer.xes.factory.import_log",
"pm4py.evaluation.precision.factory.apply",
"os.path.join",
"pm4py.objects.log.exporter.xes.factory.export_log",
"time.sleep",
"pm4py.objects.log.importer.csv.factory.import_event_log",
"os.getcwd",
"pm4py.objects.petri.importer.pnml.import_net",
"pm4py.evaluation.replay_fitness.factory.apply"
] | [((526, 537), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (535, 537), False, 'import os, time, argparse\n'), ((2537, 2562), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (2560, 2562), False, 'import os, time, argparse\n'), ((4595, 4608), 'time.sleep', 'time.sleep', (['(1)'], {}), '(1)\n', (4605, 4608), False, 'import os, time, argparse\n'), ((4620, 4659), 'pm4py.objects.log.importer.csv.factory.import_event_log', 'csv_importer.import_event_log', (['csv_file'], {}), '(csv_file)\n', (4649, 4659), True, 'from pm4py.objects.log.importer.csv import factory as csv_importer\n'), ((4664, 4702), 'pm4py.objects.log.exporter.xes.factory.export_log', 'xes_exporter.export_log', (['log', 'xes_file'], {}), '(log, xes_file)\n', (4687, 4702), True, 'from pm4py.objects.log.exporter.xes import factory as xes_exporter\n'), ((4707, 4720), 'time.sleep', 'time.sleep', (['(1)'], {}), '(1)\n', (4717, 4720), False, 'import os, time, argparse\n'), ((4777, 4810), 'pm4py.objects.log.importer.xes.factory.import_log', 'xes_importer.import_log', (['xes_file'], {}), '(xes_file)\n', (4800, 4810), True, 'from pm4py.objects.log.importer.xes import factory as xes_importer\n'), ((4853, 4886), 'pm4py.objects.petri.importer.pnml.import_net', 'pnml_importer.import_net', (['pn_file'], {}), '(pn_file)\n', (4877, 4886), True, 'from pm4py.objects.petri.importer import pnml as pnml_importer\n'), ((4902, 4964), 'pm4py.evaluation.replay_fitness.factory.apply', 'replay_factory.apply', (['log', 'net', 'initial_marking', 'final_marking'], {}), '(log, net, initial_marking, final_marking)\n', (4922, 4964), True, 'from pm4py.evaluation.replay_fitness import factory as replay_factory\n'), ((5013, 5078), 'pm4py.evaluation.precision.factory.apply', 'precision_factory.apply', (['log', 'net', 'initial_marking', 'final_marking'], {}), '(log, net, initial_marking, final_marking)\n', (5036, 5078), True, 'from pm4py.evaluation.precision import factory as precision_factory\n'), ((3206, 3272), 'os.path.join', 'os.path.join', (['WORK_PATH', '"""data"""', '"""variants"""', "(system + '_train.txt')"], {}), "(WORK_PATH, 'data', 'variants', system + '_train.txt')\n", (3218, 3272), False, 'import os, time, argparse\n'), ((3777, 3827), 'os.path.join', 'os.path.join', (['WORK_PATH', '"""data"""', '"""pns"""', 'system', 'pn'], {}), "(WORK_PATH, 'data', 'pns', system, pn)\n", (3789, 3827), False, 'import os, time, argparse\n'), ((3859, 3917), 'os.path.join', 'os.path.join', (['DATA_PATH', '"""variants"""', "(system + '_train.txt')"], {}), "(DATA_PATH, 'variants', system + '_train.txt')\n", (3871, 3917), False, 'import os, time, argparse\n'), ((4398, 4440), 'os.path.join', 'os.path.join', (['DATA_PATH', '"""pns"""', 'system', 'pn'], {}), "(DATA_PATH, 'pns', system, pn)\n", (4410, 4440), False, 'import os, time, argparse\n'), ((2290, 2323), 'datetime.datetime.fromtimestamp', 'datetime.fromtimestamp', (['timestamp'], {}), '(timestamp)\n', (2312, 2323), False, 'from datetime import datetime\n')] |
# $ python embed.py
from ctypes import cdll
lib = cdll.LoadLibrary("../target/release/libembed.dylib") #=> for Mac
#lib = cdll.LoadLibrary("../target/release/libembed.so") #=> for Linux
lib.process()
print("done!")
| [
"ctypes.cdll.LoadLibrary"
] | [((52, 104), 'ctypes.cdll.LoadLibrary', 'cdll.LoadLibrary', (['"""../target/release/libembed.dylib"""'], {}), "('../target/release/libembed.dylib')\n", (68, 104), False, 'from ctypes import cdll\n')] |
#!/usr/bin/env python
import unittest
from rdflib.graph import ConjunctiveGraph
from rdflib.term import URIRef, Literal
from rdflib.graph import Graph
class TestTrixSerialize(unittest.TestCase):
def setUp(self):
pass
def tearDown(self):
pass
def testSerialize(self):
s1 = URIRef('store:1')
r1 = URIRef('resource:1')
r2 = URIRef('resource:2')
label = URIRef('predicate:label')
g1 = Graph(identifier = s1)
g1.add((r1, label, Literal("label 1", lang="en")))
g1.add((r1, label, Literal("label 2")))
s2 = URIRef('store:2')
g2 = Graph(identifier = s2)
g2.add((r2, label, Literal("label 3")))
g = ConjunctiveGraph()
for s,p,o in g1.triples((None, None, None)):
g.addN([(s,p,o,g1)])
for s,p,o in g2.triples((None, None, None)):
g.addN([(s,p,o,g2)])
r3 = URIRef('resource:3')
g.add((r3, label, Literal(4)))
r = g.serialize(format='trix')
g3 = ConjunctiveGraph()
from StringIO import StringIO
g3.parse(StringIO(r), format='trix')
for q in g3.quads((None,None,None)):
# TODO: Fix once getGraph/getContext is in conjunctive graph
if isinstance(q[3].identifier, URIRef):
tg=Graph(store=g.store, identifier=q[3].identifier)
else:
# BNode, this is a bit ugly
# we cannot match the bnode to the right graph automagically
# here I know there is only one anonymous graph,
# and that is the default one, but this is not always the case
tg=g.default_context
self.assertTrue(q[0:3] in tg)
if __name__=='__main__':
unittest.main()
| [
"StringIO.StringIO",
"rdflib.term.URIRef",
"rdflib.term.Literal",
"rdflib.graph.Graph",
"rdflib.graph.ConjunctiveGraph",
"unittest.main"
] | [((1684, 1699), 'unittest.main', 'unittest.main', ([], {}), '()\n', (1697, 1699), False, 'import unittest\n'), ((314, 331), 'rdflib.term.URIRef', 'URIRef', (['"""store:1"""'], {}), "('store:1')\n", (320, 331), False, 'from rdflib.term import URIRef, Literal\n'), ((343, 363), 'rdflib.term.URIRef', 'URIRef', (['"""resource:1"""'], {}), "('resource:1')\n", (349, 363), False, 'from rdflib.term import URIRef, Literal\n'), ((375, 395), 'rdflib.term.URIRef', 'URIRef', (['"""resource:2"""'], {}), "('resource:2')\n", (381, 395), False, 'from rdflib.term import URIRef, Literal\n'), ((411, 436), 'rdflib.term.URIRef', 'URIRef', (['"""predicate:label"""'], {}), "('predicate:label')\n", (417, 436), False, 'from rdflib.term import URIRef, Literal\n'), ((449, 469), 'rdflib.graph.Graph', 'Graph', ([], {'identifier': 's1'}), '(identifier=s1)\n', (454, 469), False, 'from rdflib.graph import Graph\n'), ((587, 604), 'rdflib.term.URIRef', 'URIRef', (['"""store:2"""'], {}), "('store:2')\n", (593, 604), False, 'from rdflib.term import URIRef, Literal\n'), ((616, 636), 'rdflib.graph.Graph', 'Graph', ([], {'identifier': 's2'}), '(identifier=s2)\n', (621, 636), False, 'from rdflib.graph import Graph\n'), ((696, 714), 'rdflib.graph.ConjunctiveGraph', 'ConjunctiveGraph', ([], {}), '()\n', (712, 714), False, 'from rdflib.graph import ConjunctiveGraph\n'), ((886, 906), 'rdflib.term.URIRef', 'URIRef', (['"""resource:3"""'], {}), "('resource:3')\n", (892, 906), False, 'from rdflib.term import URIRef, Literal\n'), ((1006, 1024), 'rdflib.graph.ConjunctiveGraph', 'ConjunctiveGraph', ([], {}), '()\n', (1022, 1024), False, 'from rdflib.graph import ConjunctiveGraph\n'), ((1077, 1088), 'StringIO.StringIO', 'StringIO', (['r'], {}), '(r)\n', (1085, 1088), False, 'from StringIO import StringIO\n'), ((497, 526), 'rdflib.term.Literal', 'Literal', (['"""label 1"""'], {'lang': '"""en"""'}), "('label 1', lang='en')\n", (504, 526), False, 'from rdflib.term import URIRef, Literal\n'), ((554, 572), 'rdflib.term.Literal', 'Literal', (['"""label 2"""'], {}), "('label 2')\n", (561, 572), False, 'from rdflib.term import URIRef, Literal\n'), ((664, 682), 'rdflib.term.Literal', 'Literal', (['"""label 3"""'], {}), "('label 3')\n", (671, 682), False, 'from rdflib.term import URIRef, Literal\n'), ((931, 941), 'rdflib.term.Literal', 'Literal', (['(4)'], {}), '(4)\n', (938, 941), False, 'from rdflib.term import URIRef, Literal\n'), ((1280, 1328), 'rdflib.graph.Graph', 'Graph', ([], {'store': 'g.store', 'identifier': 'q[3].identifier'}), '(store=g.store, identifier=q[3].identifier)\n', (1285, 1328), False, 'from rdflib.graph import Graph\n')] |
from sklearn.linear_model import LogisticRegression
from fightchurn.listings.chap8.listing_8_2_logistic_regression import prepare_data, save_regression_model
from fightchurn.listings.chap8.listing_8_2_logistic_regression import save_regression_summary, save_dataset_predictions
def regression_cparam(data_set_path, C_param):
X,y = prepare_data(data_set_path)
retain_reg = LogisticRegression( C=C_param, penalty='l1', solver='liblinear', fit_intercept=True)
retain_reg.fit(X, y)
c_ext = '_c{:.3f}'.format(C_param)
save_regression_summary(data_set_path,retain_reg,ext=c_ext)
save_regression_model(data_set_path,retain_reg,ext=c_ext)
save_dataset_predictions(data_set_path,retain_reg,X,ext=c_ext)
| [
"fightchurn.listings.chap8.listing_8_2_logistic_regression.prepare_data",
"sklearn.linear_model.LogisticRegression",
"fightchurn.listings.chap8.listing_8_2_logistic_regression.save_regression_summary",
"fightchurn.listings.chap8.listing_8_2_logistic_regression.save_dataset_predictions",
"fightchurn.listings.chap8.listing_8_2_logistic_regression.save_regression_model"
] | [((336, 363), 'fightchurn.listings.chap8.listing_8_2_logistic_regression.prepare_data', 'prepare_data', (['data_set_path'], {}), '(data_set_path)\n', (348, 363), False, 'from fightchurn.listings.chap8.listing_8_2_logistic_regression import prepare_data, save_regression_model\n'), ((381, 468), 'sklearn.linear_model.LogisticRegression', 'LogisticRegression', ([], {'C': 'C_param', 'penalty': '"""l1"""', 'solver': '"""liblinear"""', 'fit_intercept': '(True)'}), "(C=C_param, penalty='l1', solver='liblinear',\n fit_intercept=True)\n", (399, 468), False, 'from sklearn.linear_model import LogisticRegression\n'), ((534, 595), 'fightchurn.listings.chap8.listing_8_2_logistic_regression.save_regression_summary', 'save_regression_summary', (['data_set_path', 'retain_reg'], {'ext': 'c_ext'}), '(data_set_path, retain_reg, ext=c_ext)\n', (557, 595), False, 'from fightchurn.listings.chap8.listing_8_2_logistic_regression import save_regression_summary, save_dataset_predictions\n'), ((598, 657), 'fightchurn.listings.chap8.listing_8_2_logistic_regression.save_regression_model', 'save_regression_model', (['data_set_path', 'retain_reg'], {'ext': 'c_ext'}), '(data_set_path, retain_reg, ext=c_ext)\n', (619, 657), False, 'from fightchurn.listings.chap8.listing_8_2_logistic_regression import prepare_data, save_regression_model\n'), ((660, 725), 'fightchurn.listings.chap8.listing_8_2_logistic_regression.save_dataset_predictions', 'save_dataset_predictions', (['data_set_path', 'retain_reg', 'X'], {'ext': 'c_ext'}), '(data_set_path, retain_reg, X, ext=c_ext)\n', (684, 725), False, 'from fightchurn.listings.chap8.listing_8_2_logistic_regression import save_regression_summary, save_dataset_predictions\n')] |
#!/usr/bin/python3
# -*- coding: utf-8 -*-
# -----------------------------------------------------------
# created 02.02.2021, tkaulke
# <NAME>, <EMAIL>
# https://github.com/kaulketh
# -----------------------------------------------------------
__author__ = "<NAME>"
__email__ = "<EMAIL>"
import errno
import logging
import os
from logging.config import fileConfig
# runtime location
this_folder = os.path.dirname(os.path.abspath(__file__))
# define log folder related to location
log_folder = os.path.join(this_folder, '../logs')
# define ini and log files
ini_file = 'debug.ini'
info_log_file = log_folder + '/info.log'
error_log_file = log_folder + '/error.log'
# check if exists or create log folder
try:
os.makedirs(log_folder, exist_ok=True) # Python>3.2
except TypeError:
try:
os.makedirs(log_folder)
except OSError as exc: # Python >2.5
if exc.errno == errno.EEXIST and os.path.isdir(log_folder):
pass
else:
raise
# setup configuration
config_file = os.path.join(this_folder, ini_file)
fileConfig(config_file, disable_existing_loggers=True)
# create handlers
handler_info = logging.FileHandler(os.path.join(this_folder, info_log_file))
handler_error = logging.FileHandler(os.path.join(this_folder, error_log_file))
# set levels
handler_info.setLevel(logging.INFO)
handler_error.setLevel(logging.ERROR)
# create formatters and add to handlers
format_info = \
logging.Formatter('%(asctime)s %(levelname)s '
'[ %(module)s.%(funcName)s linenr.%(lineno)s ] '
'%(message).180s', datefmt='%Y-%m-%d %H:%M:%S')
format_error = \
logging.Formatter(
'%(asctime)s %(levelname)s '
'[ %(module)s.%(funcName)s linenr.%(lineno)s ] '
'[ thread: %(threadName)s ] %(message)s')
handler_info.setFormatter(format_info)
handler_error.setFormatter(format_error)
def get_logger(name: str = __name__):
logger = logging.getLogger(name)
# add handler
logger.addHandler(handler_info)
logger.addHandler(handler_error)
return logger
if __name__ == '__main__':
pass
| [
"logging.getLogger",
"os.makedirs",
"logging.Formatter",
"os.path.join",
"os.path.isdir",
"logging.config.fileConfig",
"os.path.abspath"
] | [((498, 534), 'os.path.join', 'os.path.join', (['this_folder', '"""../logs"""'], {}), "(this_folder, '../logs')\n", (510, 534), False, 'import os\n'), ((1027, 1062), 'os.path.join', 'os.path.join', (['this_folder', 'ini_file'], {}), '(this_folder, ini_file)\n', (1039, 1062), False, 'import os\n'), ((1063, 1117), 'logging.config.fileConfig', 'fileConfig', (['config_file'], {'disable_existing_loggers': '(True)'}), '(config_file, disable_existing_loggers=True)\n', (1073, 1117), False, 'from logging.config import fileConfig\n'), ((1441, 1590), 'logging.Formatter', 'logging.Formatter', (['"""%(asctime)s %(levelname)s [ %(module)s.%(funcName)s linenr.%(lineno)s ] %(message).180s"""'], {'datefmt': '"""%Y-%m-%d %H:%M:%S"""'}), "(\n '%(asctime)s %(levelname)s [ %(module)s.%(funcName)s linenr.%(lineno)s ] %(message).180s'\n , datefmt='%Y-%m-%d %H:%M:%S')\n", (1458, 1590), False, 'import logging\n'), ((1652, 1795), 'logging.Formatter', 'logging.Formatter', (['"""%(asctime)s %(levelname)s [ %(module)s.%(funcName)s linenr.%(lineno)s ] [ thread: %(threadName)s ] %(message)s"""'], {}), "(\n '%(asctime)s %(levelname)s [ %(module)s.%(funcName)s linenr.%(lineno)s ] [ thread: %(threadName)s ] %(message)s'\n )\n", (1669, 1795), False, 'import logging\n'), ((418, 443), 'os.path.abspath', 'os.path.abspath', (['__file__'], {}), '(__file__)\n', (433, 443), False, 'import os\n'), ((719, 757), 'os.makedirs', 'os.makedirs', (['log_folder'], {'exist_ok': '(True)'}), '(log_folder, exist_ok=True)\n', (730, 757), False, 'import os\n'), ((1172, 1212), 'os.path.join', 'os.path.join', (['this_folder', 'info_log_file'], {}), '(this_folder, info_log_file)\n', (1184, 1212), False, 'import os\n'), ((1250, 1291), 'os.path.join', 'os.path.join', (['this_folder', 'error_log_file'], {}), '(this_folder, error_log_file)\n', (1262, 1291), False, 'import os\n'), ((1950, 1973), 'logging.getLogger', 'logging.getLogger', (['name'], {}), '(name)\n', (1967, 1973), False, 'import logging\n'), ((807, 830), 'os.makedirs', 'os.makedirs', (['log_folder'], {}), '(log_folder)\n', (818, 830), False, 'import os\n'), ((914, 939), 'os.path.isdir', 'os.path.isdir', (['log_folder'], {}), '(log_folder)\n', (927, 939), False, 'import os\n')] |
"""
"""
import unittest
from example_module import COLORS, increment
class ExampleTest(unittest.TestCase):
"""
#TODO
"""
def test_increment(self):
x0 = 0
y0 = increment(x0) #y0 == 1
self.assertEqual(y0, 1)
x1 = 100
y1 = increment(x1) #y1 == 101
self.assertEqual(y1, 101)
| [
"example_module.increment"
] | [((195, 208), 'example_module.increment', 'increment', (['x0'], {}), '(x0)\n', (204, 208), False, 'from example_module import COLORS, increment\n'), ((285, 298), 'example_module.increment', 'increment', (['x1'], {}), '(x1)\n', (294, 298), False, 'from example_module import COLORS, increment\n')] |
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
import json
from pathlib import Path
import numpy as np
import torch
from PIL import Image
from panopticapi.utils import rgb2id
# from util.box_ops import masks_to_boxes
from .construction import make_construction_transforms
import logging
def box_xywh_to_xyxy(x):
xs, ys, w, h = x.unbind(-1)
b = [xs, ys, (xs + w), (ys + h)]
return torch.stack(b, dim=-1)
def masks_to_boxes(segments):
boxes = []
labels = []
iscrowd = []
area = []
for ann in segments:
if len(ann["bbox"]) == 4:
boxes.append(ann["bbox"])
area.append(ann['area'])
else:
boxes.append([0, 0, 2, 2])
area.append(4)
labels.append(ann["category_id"])
iscrowd.append(ann['iscrowd'])
if len(boxes) == 0 and len(labels) == 0:
boxes.append([0, 0, 2, 2])
labels.append(1)
area.append(4)
iscrowd.append(0)
boxes = torch.tensor(boxes, dtype=torch.int64)
labels = torch.tensor(labels, dtype=torch.int64)
iscrowd = torch.tensor(iscrowd)
area = torch.tensor(area)
boxes = box_xywh_to_xyxy(boxes)
return boxes, labels, iscrowd, area
class ConstructionPanoptic:
def __init__(self, img_folder, ann_folder, ann_file, transforms=None, return_masks=True):
with open(ann_file, "r") as f:
self.coco = json.load(f)
# sort 'images' field so that they are aligned with 'annotations'
# i.e., in alphabetical order
self.coco["images"] = sorted(self.coco["images"], key=lambda x: x["id"])
# sanity check
if "annotations" in self.coco:
for img, ann in zip(self.coco["images"], self.coco["annotations"]):
assert img["file_name"][:-4] == ann["file_name"][:-4]
self.img_folder = img_folder
self.ann_folder = ann_folder
self.ann_file = ann_file
self.transforms = transforms
self.return_masks = return_masks
def __getitem__(self, idx):
try:
ann_info = (
self.coco["annotations"][idx]
if "annotations" in self.coco
else self.coco["images"][idx]
)
img_path = Path(self.img_folder) / ann_info["file_name"].replace(".png", ".jpg")
ann_path = Path(self.ann_folder) / ann_info["file_name"]
img = Image.open(img_path).convert("RGB")
w, h = img.size
if "segments_info" in ann_info:
masks = np.asarray(Image.open(ann_path), dtype=np.uint32)
masks = rgb2id(masks)
ids = np.array([ann["id"] for ann in ann_info["segments_info"]])
masks = masks == ids[:, None, None]
masks = torch.as_tensor(masks, dtype=torch.uint8)
# labels = torch.tensor(
# [ann["category_id"] for ann in ann_info["segments_info"]],
# dtype=torch.int64,
# )
target = {}
target['image_id'] = torch.tensor([ann_info['image_id'] if "image_id" in ann_info else ann_info["id"]])
if self.return_masks:
target['masks'] = masks
boxes, labels, iscrowd, area = masks_to_boxes(ann_info["segments_info"])
target['labels'] = labels
# Instead of finding boxes, just take the one from json info available
# target["boxes"] = masks_to_boxes(ann_info["segments_info"])
target["boxes"] = boxes
target['size'] = torch.as_tensor([int(h), int(w)])
target['orig_size'] = torch.as_tensor([int(h), int(w)])
target['iscrowd'] = iscrowd
target['area'] = area
# if "segments_info" in ann_info:
# for name in ['iscrowd', 'area']:
# target[name] = torch.tensor([ann[name] for ann in ann_info['segments_info']])
if self.transforms is not None:
img, target = self.transforms(img, target)
return img, target
except Exception as e:
logging.error(ann_info)
raise e
def __len__(self):
return len(self.coco['images'])
def get_height_and_width(self, idx):
img_info = self.coco['images'][idx]
height = img_info['height']
width = img_info['width']
return height, width
def build(image_set, args):
root = Path(args.data_path)
assert (
root.exists()
), f"provided Panoptic path {root} does not exist"
mode = "panoptic"
PATHS = {
"train": ("images", f"{mode}", f"{mode}.json"),
"val": ("images", f"val_{mode}", f"val_{mode}.json"),
}
img_folder, ann_folder, ann_file = PATHS[image_set]
img_folder_path = root / img_folder
ann_folder_path = root / ann_folder
ann_file = root / ann_file
dataset = ConstructionPanoptic(
img_folder_path,
ann_folder_path,
ann_file,
transforms=make_construction_transforms(image_set),
return_masks=args.masks,
)
return dataset
| [
"torch.as_tensor",
"PIL.Image.open",
"pathlib.Path",
"torch.stack",
"panopticapi.utils.rgb2id",
"torch.tensor",
"numpy.array",
"json.load",
"logging.error"
] | [((422, 444), 'torch.stack', 'torch.stack', (['b'], {'dim': '(-1)'}), '(b, dim=-1)\n', (433, 444), False, 'import torch\n'), ((1007, 1045), 'torch.tensor', 'torch.tensor', (['boxes'], {'dtype': 'torch.int64'}), '(boxes, dtype=torch.int64)\n', (1019, 1045), False, 'import torch\n'), ((1059, 1098), 'torch.tensor', 'torch.tensor', (['labels'], {'dtype': 'torch.int64'}), '(labels, dtype=torch.int64)\n', (1071, 1098), False, 'import torch\n'), ((1113, 1134), 'torch.tensor', 'torch.tensor', (['iscrowd'], {}), '(iscrowd)\n', (1125, 1134), False, 'import torch\n'), ((1146, 1164), 'torch.tensor', 'torch.tensor', (['area'], {}), '(area)\n', (1158, 1164), False, 'import torch\n'), ((4513, 4533), 'pathlib.Path', 'Path', (['args.data_path'], {}), '(args.data_path)\n', (4517, 4533), False, 'from pathlib import Path\n'), ((1427, 1439), 'json.load', 'json.load', (['f'], {}), '(f)\n', (1436, 1439), False, 'import json\n'), ((3116, 3203), 'torch.tensor', 'torch.tensor', (["[ann_info['image_id'] if 'image_id' in ann_info else ann_info['id']]"], {}), "([ann_info['image_id'] if 'image_id' in ann_info else ann_info[\n 'id']])\n", (3128, 3203), False, 'import torch\n'), ((2278, 2299), 'pathlib.Path', 'Path', (['self.img_folder'], {}), '(self.img_folder)\n', (2282, 2299), False, 'from pathlib import Path\n'), ((2371, 2392), 'pathlib.Path', 'Path', (['self.ann_folder'], {}), '(self.ann_folder)\n', (2375, 2392), False, 'from pathlib import Path\n'), ((2642, 2655), 'panopticapi.utils.rgb2id', 'rgb2id', (['masks'], {}), '(masks)\n', (2648, 2655), False, 'from panopticapi.utils import rgb2id\n'), ((2679, 2737), 'numpy.array', 'np.array', (["[ann['id'] for ann in ann_info['segments_info']]"], {}), "([ann['id'] for ann in ann_info['segments_info']])\n", (2687, 2737), True, 'import numpy as np\n'), ((2815, 2856), 'torch.as_tensor', 'torch.as_tensor', (['masks'], {'dtype': 'torch.uint8'}), '(masks, dtype=torch.uint8)\n', (2830, 2856), False, 'import torch\n'), ((4178, 4201), 'logging.error', 'logging.error', (['ann_info'], {}), '(ann_info)\n', (4191, 4201), False, 'import logging\n'), ((2436, 2456), 'PIL.Image.open', 'Image.open', (['img_path'], {}), '(img_path)\n', (2446, 2456), False, 'from PIL import Image\n'), ((2579, 2599), 'PIL.Image.open', 'Image.open', (['ann_path'], {}), '(ann_path)\n', (2589, 2599), False, 'from PIL import Image\n')] |
#!/usr/bin/python
'''
memory class
stored in sqlite data base
holds raw input and memories in parse taged columns
'''
import sys
import re
import sqlite3
import os
from datetime import date, datetime
from pattern.en import parse
from pattern.en import pprint
from pattern.en import parsetree
from pattern.en import wordnet
from pattern.en import pluralize, singularize
from pattern.en import conjugate, lemma, lexeme
#dir = os.path.dirname(os.path.abspath(__file__))
dir = '/home/erni/catkin_ws/src/max_ros/max_ai/src/max_ai/'
RM = sqlite3.connect(dir +'robbie_memory.sqlite')
#RM = sqlite3.connect(dir + '/data/robbie_memory.db')
cursor = RM.cursor()
# Information about a single concept
class conceptClass:
def __init__(self, state='none', locality='none'):
self.state = state # what/how is 'concept'
self.reference = 'none' # unused
self.locality = locality # where is 'concept'
self.person = '3sg' # e.g. a thing is 3rd-person, singular
self.isProperNoun = False # True if proper noun: e.g. Robert
self.properties = {} # Dict of custom properties, e.g. 'age' = 39, 'color' = 'blue'
# Robbie memory class. Collection of concepts
class memoryClass():
def __init__(self):
self.concepts = {}
self.person = {'I': '1sg',
'you': '2sg'
}
self.posessivePronouns = {'1sg': 'my',
'2sg': 'your',
'3sg': 'its'
}
# Add a concept to memory
def add(self, c):
# add oncept to raw_input table in robbie_memory
# x=
# dt = datetime.now()
# RM.execute("insert into RAW_INPUT (RAW, DATE) values (?, ?)",(c, dt))
# RM.commit()
self.concepts[c] = conceptClass()
if c in self.person:
self.concepts[c].person = self.person[c]
else:
self.concepts[c].person = '3sg'
# Return True if concept 'c' (string) is in memory
def known(self, c):
cursor.execute('''SELECT concept, location FROM memory WHERE concept =?''', (c,))
user = cursor.fetchone()
# if user == 'None':
return user
def add_memory(self, a, b):
c = '3sg'
dt = datetime.now()
RM.execute("insert into memory (concept, location, person,DATE) values (?, ?, ?, ?)", (a, b, c, dt))
RM.commit()
def update_memory(self, a, b):
cursor.execute('''UPDATE memory SET location = ? WHERE concept = ? ''', (b, a))
RM.commit()
def search_memory(self, a):
cursor.execute('''SELECT concept,location, person FROM memory WHERE concept =?''', (a,))
user = cursor.fetchone()
return user
def search_profile(self, a):
cursor.execute('''SELECT value FROM profile WHERE item =?''', (a,))
user = cursor.fetchone()
return user
def Dump(self):
return (self.concepts.state)
| [
"datetime.datetime.now",
"sqlite3.connect"
] | [((537, 582), 'sqlite3.connect', 'sqlite3.connect', (["(dir + 'robbie_memory.sqlite')"], {}), "(dir + 'robbie_memory.sqlite')\n", (552, 582), False, 'import sqlite3\n'), ((2301, 2315), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (2313, 2315), False, 'from datetime import date, datetime\n')] |
from datetime import datetime
from difflib import unified_diff
from logging import basicConfig, getLogger, INFO
import os
from pathlib import Path
import shutil
import subprocess
import sys
import yaml
from urllib.parse import urlparse
from notebook import notebookapp
from IPython.core.display import HTML
WORKDIR = 'edit'
META_YML = '.vcp-meta.yml'
MOODLE_DIR = '/opt/moodle'
CONF_RELATIVE = '/etc'
ENV_INHERIT = ['VAULT_ADDR', 'VAULT_TOKEN', 'PATH', 'REQUESTS_CA_BUNDLE']
logger = getLogger(__name__)
basicConfig(level=INFO, format='%(message)s')
def generate_local_path(host, conf_path, version=None):
ret = Path(WORKDIR).absolute() / host
if version is None:
ret /= datetime.now().strftime("%Y%m%d%H%M%S%f")
else:
ret /= version
ret /= Path(conf_path).name
return ret
def generate_remote_path(container, conf_path, relative_to=CONF_RELATIVE):
return (Path(MOODLE_DIR) / container / 'conf' /
Path(conf_path).relative_to(relative_to))
def get_local_path(host, container, conf_path, version=None):
if version is None:
version = find_latest_version(host, container, conf_path)
return generate_local_path(host, conf_path, version)
def _match_metainfo(parent, container, conf_path):
p = parent / META_YML
if not p.exists():
return False
with p.open() as f:
params = yaml.safe_load(f)
return (
isinstance(params, dict) and
'container' in params and
'container_path' in params and
params['container'] == container and
params['container_path'] == conf_path)
def _match_metainfo_by_remote_path(parent, remote_path):
p = parent / META_YML
if not p.exists():
return False
with p.open() as f:
params = yaml.safe_load(f)
return (
isinstance(params, dict) and
'remote_path' in params and
params['remote_path'] == remote_path)
def get_versions(host, *args, match=_match_metainfo):
pdir = Path(WORKDIR).absolute() / host
return sorted([
x.name for x in pdir.glob('*')
if x.is_dir() and match(x, *args)])
def find_latest_version(host, container, conf_path):
return get_versions(host, container, conf_path)[-1]
def find_latest_version_by_remote_path(host, remote_path):
return get_versions(
host, remote_path, match=_match_metainfo_by_remote_path)[-1]
def download_file(host, remote_path, conf_path=None):
if conf_path is None:
conf_path = Path(remote_path).name
dest = generate_local_path(host, conf_path)
ansible_arg = f'src={remote_path} dest={dest} flat=yes'
out = subprocess.check_output(
['ansible', host, '-m', 'fetch', '-a', ansible_arg])
host_1 = out.decode('utf-8').split("\n")[0].split()[0]
logger.info(f'Downloading {remote_path} from {host_1} to {dest}')
return dest
def download_conf_file(host, container, conf_path, relative_to=CONF_RELATIVE):
src = generate_remote_path(container, conf_path, relative_to)
return download_file(host, src, conf_path)
def create_conf_file(host, conf_path):
dest = generate_local_path(host, conf_path)
dest.parent.mkdir(parents=True, exist_ok=True)
dest.touch()
return dest
def _to_backup(conf):
return conf.parent / (conf.name + '.orig')
def make_backup(conf, quiet=False):
org = _to_backup(conf)
if not quiet:
logger.info(f'Copy {conf} {org}')
shutil.copy2(conf, org)
def make_metainfo(local_path, container, conf_path, relative_to=CONF_RELATIVE):
params = {
'container': container,
'container_path': conf_path,
'remote_path':
str(generate_remote_path(container, conf_path, relative_to)),
'version': list(local_path.parts)[-2],
}
with (local_path.parent / META_YML).open(mode='w') as f:
yaml.safe_dump(params, stream=f, default_flow_style=False)
def make_simple_metainfo(local_path, remote_path):
params = {
'remote_path': remote_path,
'version': list(local_path.parts)[-2],
}
with (local_path.parent / META_YML).open(mode='w') as f:
yaml.safe_dump(params, stream=f, default_flow_style=False)
def generate_edit_link(conf):
nb_conf = list(notebookapp.list_running_servers())[0]
p = (Path(nb_conf['base_url']) / 'edit' /
conf.absolute().relative_to(nb_conf['notebook_dir']))
return HTML(f'<a href={p} target="_blank">{p.name}</a>')
def show_diff(path_a, path_b):
lines_a = []
lines_b = []
with path_a.open() as f:
lines_a = f.readlines()
with path_b.open() as f:
lines_b = f.readlines()
diff = list(unified_diff(
lines_a, lines_b, fromfile=path_a.name, tofile=path_b.name))
sys.stdout.writelines(diff)
return len(diff)
def upload_conf_file(src, host, container, conf_path,
relative_to=CONF_RELATIVE):
dest = generate_remote_path(container, conf_path, relative_to)
ansible_arg = f'mkdir -p {dest.parent}'
subprocess.run(
['ansible', host, '-a', ansible_arg])
ansible_arg = f'dest={dest} src={src} backup=yes'
out = subprocess.check_output(
['ansible', host, '-m', 'copy', '-b', '-a', ansible_arg])
host_1 = out.decode('utf-8').split("\n")[0].split()[0]
logger.info(f'Uploading {dest} from {src} to {host_1}')
def restart_container(host, container):
cmd = f'chdir={MOODLE_DIR} docker-compose restart {container}'
logger.info(f'Restart container {container}')
subprocess.check_call(['ansible', host, '-a', cmd])
def fetch_conf(host, container, conf_path,
relative_to=CONF_RELATIVE, create=False):
local_path = download_conf_file(host, container, conf_path, relative_to)
make_backup(local_path)
make_metainfo(local_path, container, conf_path, relative_to)
return generate_edit_link(local_path)
def create_conf(host, container, conf_path,
relative_to=CONF_RELATIVE, create=False):
local_path = create_conf_file(host, conf_path)
make_backup(local_path, quiet=True)
make_metainfo(local_path, container, conf_path, relative_to)
return generate_edit_link(local_path)
def apply_conf(host, container, conf_path,
relative_to=CONF_RELATIVE, version=None, restart=True):
diff = show_local_conf_diff(host, container, conf_path, version)
local_path = get_local_path(host, container, conf_path, version)
upload_conf_file(local_path, host, container, conf_path, relative_to)
if restart:
restart_container(host, container)
def revert_conf(host, container, conf_path,
relative_to=CONF_RELATIVE, version=None):
local_path = get_local_path(host, container, conf_path, version)
backup_path = _to_backup(local_path)
show_diff(local_path, backup_path)
upload_conf_file(backup_path, host, container, conf_path, relative_to)
restart_container(host, container)
local_path.rename(local_path.parent / (local_path.name + '.revert'))
def show_local_conf(host, container, conf_path,
relative_to=CONF_RELATIVE, version=None):
conf = get_local_path(host, container, conf_path, version)
with conf.open() as f:
print(f.read())
def edit_local_conf(host, container, conf_path,
relative_to=CONF_RELATIVE, version=None):
conf = get_local_path(host, container, conf_path, version)
return generate_edit_link(conf)
def show_local_conf_diff(host, container, conf_path, version=None):
local_path = get_local_path(host, container, conf_path, version)
show_diff(_to_backup(local_path), local_path)
def save_shibboleth_part(conf_path):
with conf_path.open() as f:
data = yaml.safe_load(f)
params = {}
if 'shibboleth' in data['services']:
params['shibboleth_container'] = yaml.safe_dump(
data['services']['shibboleth'])
vars_path = conf_path.parent / 'extra_vars.yml'
with vars_path.open(mode='w') as f:
yaml.safe_dump(params, f)
return vars_path
def init_shibboleth_part(conf_dir, hostname, volumes):
shibboleth_volumes = ['/sys/fs/cgroup:/sys/fs/cgroup']
shibboleth_volumes.extend(volumes)
params = {
'shibboleth_container': yaml.safe_dump({
'image': 'harbor.vcloud.nii.ac.jp/vcp/moodle:shibboleth-3.0.4',
'privileged': True,
'ports': ['443:443'],
'volumes': shibboleth_volumes,
'container_name': 'shibboleth',
'hostname': hostname,
}),
}
vars_path = conf_dir / 'shibboleth.yml'
with vars_path.open(mode='w') as f:
yaml.safe_dump(params, f)
return vars_path
def setup_shibboleth_part(local_path, **params):
if params is None or len(params) == 0:
return save_shibboleth_part(local_path)
else:
return init_shibboleth_part(local_path.parent, **params)
def generate_docker_compose(host, conf_path, extra_vars, extra_vars_file):
template = 'template/docker/compose/docker-compose.yml'
ansible_arg = f'src={template} dest={conf_path.parent}/'
env = dict([(x, os.environ[x]) for x in ENV_INHERIT])
args = ['ansible', host, '-m', 'template', '-c', 'local',
'-a', ansible_arg]
for k, v in extra_vars.items():
args.extend(['-e', f'{k}={v}'])
for x in extra_vars_file:
args.extend(['-e', f'@{str(x)}'])
subprocess.run(args=args, env=env, check=True)
def update_docker_compose(host, extra_vars={}, shibboleth_params={}):
remote_path = MOODLE_DIR + '/docker-compose.yml'
local_path = download_file(host, remote_path)
make_backup(local_path)
make_simple_metainfo(local_path, remote_path)
shibboleth_vars = setup_shibboleth_part(local_path, **shibboleth_params)
generate_docker_compose(host, local_path, extra_vars, [shibboleth_vars])
show_diff(_to_backup(local_path), local_path)
return generate_edit_link(local_path)
def append_shibboleth_container(host, moodle_url, volumes=[], extra_vars={}):
hostname = urlparse(moodle_url).netloc
return update_docker_compose(
host, extra_vars,
shibboleth_params={'hostname': hostname, 'volumes': volumes},
)
def upload_docker_compose(host, version=None, apply=False):
remote_path = MOODLE_DIR + '/docker-compose.yml'
if version is None:
version = find_latest_version_by_remote_path(host, remote_path)
local_path = (
Path(WORKDIR).absolute() / host / version / 'docker-compose.yml')
ansible_arg = f'dest={remote_path} src={local_path} backup=yes'
out = subprocess.check_output(
['ansible', host, '-m', 'copy', '-b', '-a', ansible_arg])
host_1 = out.decode('utf-8').split("\n")[0].split()[0]
logger.info(f'Uploading {remote_path} from {local_path} to {host_1}')
if not apply:
return
ansible_arg = f'chdir=/opt/moodle docker-compose up -d --remove-orphans'
args = ['ansible', host, '-a', ansible_arg]
logger.info('Apply the changes in docker-compose.yml.')
subprocess.run(args=args, check=True)
def generate_proxy_conf(host, conf_path, extra_vars):
template = 'template/docker/compose/moodle-proxy.conf.template'
ansible_arg = f'src={template} dest={conf_path.parent}/moodle-proxy.conf'
env = dict([(x, os.environ[x]) for x in ENV_INHERIT])
args = [
'ansible', host, '-m', 'template', '-c', 'local', '-a', ansible_arg]
for k, v in extra_vars.items():
args.extend(['-e', f'{k}={v}'])
subprocess.run(args=args, env=env, check=True)
def update_proxy_conf(host, extra_vars={}):
conf_path = Path('/usr/local/apache2/conf/moodle-proxy.conf')
container = 'proxy'
link = fetch_conf(host, container, str(conf_path), str(conf_path.parent))
version = find_latest_version(host, container, str(conf_path))
local_path = generate_local_path(host, conf_path, version)
generate_proxy_conf(host, local_path, extra_vars)
show_local_conf_diff(host, container, conf_path, version)
return link
def apply_proxy_conf(host, version=None, restart=True):
conf_path = Path('/usr/local/apache2/conf/moodle-proxy.conf')
apply_conf(host, 'proxy', str(conf_path), str(conf_path.parent),
version, restart)
| [
"logging.getLogger",
"subprocess.check_output",
"logging.basicConfig",
"yaml.safe_dump",
"urllib.parse.urlparse",
"IPython.core.display.HTML",
"shutil.copy2",
"subprocess.check_call",
"pathlib.Path",
"subprocess.run",
"notebook.notebookapp.list_running_servers",
"difflib.unified_diff",
"yaml.safe_load",
"datetime.datetime.now",
"sys.stdout.writelines"
] | [((487, 506), 'logging.getLogger', 'getLogger', (['__name__'], {}), '(__name__)\n', (496, 506), False, 'from logging import basicConfig, getLogger, INFO\n'), ((507, 552), 'logging.basicConfig', 'basicConfig', ([], {'level': 'INFO', 'format': '"""%(message)s"""'}), "(level=INFO, format='%(message)s')\n", (518, 552), False, 'from logging import basicConfig, getLogger, INFO\n'), ((2636, 2712), 'subprocess.check_output', 'subprocess.check_output', (["['ansible', host, '-m', 'fetch', '-a', ansible_arg]"], {}), "(['ansible', host, '-m', 'fetch', '-a', ansible_arg])\n", (2659, 2712), False, 'import subprocess\n'), ((3434, 3457), 'shutil.copy2', 'shutil.copy2', (['conf', 'org'], {}), '(conf, org)\n', (3446, 3457), False, 'import shutil\n'), ((4397, 4446), 'IPython.core.display.HTML', 'HTML', (['f"""<a href={p} target="_blank">{p.name}</a>"""'], {}), '(f\'<a href={p} target="_blank">{p.name}</a>\')\n', (4401, 4446), False, 'from IPython.core.display import HTML\n'), ((4739, 4766), 'sys.stdout.writelines', 'sys.stdout.writelines', (['diff'], {}), '(diff)\n', (4760, 4766), False, 'import sys\n'), ((5008, 5060), 'subprocess.run', 'subprocess.run', (["['ansible', host, '-a', ansible_arg]"], {}), "(['ansible', host, '-a', ansible_arg])\n", (5022, 5060), False, 'import subprocess\n'), ((5134, 5219), 'subprocess.check_output', 'subprocess.check_output', (["['ansible', host, '-m', 'copy', '-b', '-a', ansible_arg]"], {}), "(['ansible', host, '-m', 'copy', '-b', '-a',\n ansible_arg])\n", (5157, 5219), False, 'import subprocess\n'), ((5507, 5558), 'subprocess.check_call', 'subprocess.check_call', (["['ansible', host, '-a', cmd]"], {}), "(['ansible', host, '-a', cmd])\n", (5528, 5558), False, 'import subprocess\n'), ((9394, 9440), 'subprocess.run', 'subprocess.run', ([], {'args': 'args', 'env': 'env', 'check': '(True)'}), '(args=args, env=env, check=True)\n', (9408, 9440), False, 'import subprocess\n'), ((10582, 10667), 'subprocess.check_output', 'subprocess.check_output', (["['ansible', host, '-m', 'copy', '-b', '-a', ansible_arg]"], {}), "(['ansible', host, '-m', 'copy', '-b', '-a',\n ansible_arg])\n", (10605, 10667), False, 'import subprocess\n'), ((11028, 11065), 'subprocess.run', 'subprocess.run', ([], {'args': 'args', 'check': '(True)'}), '(args=args, check=True)\n', (11042, 11065), False, 'import subprocess\n'), ((11497, 11543), 'subprocess.run', 'subprocess.run', ([], {'args': 'args', 'env': 'env', 'check': '(True)'}), '(args=args, env=env, check=True)\n', (11511, 11543), False, 'import subprocess\n'), ((11606, 11655), 'pathlib.Path', 'Path', (['"""/usr/local/apache2/conf/moodle-proxy.conf"""'], {}), "('/usr/local/apache2/conf/moodle-proxy.conf')\n", (11610, 11655), False, 'from pathlib import Path\n'), ((12094, 12143), 'pathlib.Path', 'Path', (['"""/usr/local/apache2/conf/moodle-proxy.conf"""'], {}), "('/usr/local/apache2/conf/moodle-proxy.conf')\n", (12098, 12143), False, 'from pathlib import Path\n'), ((778, 793), 'pathlib.Path', 'Path', (['conf_path'], {}), '(conf_path)\n', (782, 793), False, 'from pathlib import Path\n'), ((1372, 1389), 'yaml.safe_load', 'yaml.safe_load', (['f'], {}), '(f)\n', (1386, 1389), False, 'import yaml\n'), ((1775, 1792), 'yaml.safe_load', 'yaml.safe_load', (['f'], {}), '(f)\n', (1789, 1792), False, 'import yaml\n'), ((3843, 3901), 'yaml.safe_dump', 'yaml.safe_dump', (['params'], {'stream': 'f', 'default_flow_style': '(False)'}), '(params, stream=f, default_flow_style=False)\n', (3857, 3901), False, 'import yaml\n'), ((4128, 4186), 'yaml.safe_dump', 'yaml.safe_dump', (['params'], {'stream': 'f', 'default_flow_style': '(False)'}), '(params, stream=f, default_flow_style=False)\n', (4142, 4186), False, 'import yaml\n'), ((4652, 4724), 'difflib.unified_diff', 'unified_diff', (['lines_a', 'lines_b'], {'fromfile': 'path_a.name', 'tofile': 'path_b.name'}), '(lines_a, lines_b, fromfile=path_a.name, tofile=path_b.name)\n', (4664, 4724), False, 'from difflib import unified_diff\n'), ((7714, 7731), 'yaml.safe_load', 'yaml.safe_load', (['f'], {}), '(f)\n', (7728, 7731), False, 'import yaml\n'), ((7830, 7876), 'yaml.safe_dump', 'yaml.safe_dump', (["data['services']['shibboleth']"], {}), "(data['services']['shibboleth'])\n", (7844, 7876), False, 'import yaml\n'), ((7990, 8015), 'yaml.safe_dump', 'yaml.safe_dump', (['params', 'f'], {}), '(params, f)\n', (8004, 8015), False, 'import yaml\n'), ((8239, 8459), 'yaml.safe_dump', 'yaml.safe_dump', (["{'image': 'harbor.vcloud.nii.ac.jp/vcp/moodle:shibboleth-3.0.4',\n 'privileged': True, 'ports': ['443:443'], 'volumes': shibboleth_volumes,\n 'container_name': 'shibboleth', 'hostname': hostname}"], {}), "({'image':\n 'harbor.vcloud.nii.ac.jp/vcp/moodle:shibboleth-3.0.4', 'privileged': \n True, 'ports': ['443:443'], 'volumes': shibboleth_volumes,\n 'container_name': 'shibboleth', 'hostname': hostname})\n", (8253, 8459), False, 'import yaml\n'), ((8629, 8654), 'yaml.safe_dump', 'yaml.safe_dump', (['params', 'f'], {}), '(params, f)\n', (8643, 8654), False, 'import yaml\n'), ((10035, 10055), 'urllib.parse.urlparse', 'urlparse', (['moodle_url'], {}), '(moodle_url)\n', (10043, 10055), False, 'from urllib.parse import urlparse\n'), ((2495, 2512), 'pathlib.Path', 'Path', (['remote_path'], {}), '(remote_path)\n', (2499, 2512), False, 'from pathlib import Path\n'), ((4238, 4272), 'notebook.notebookapp.list_running_servers', 'notebookapp.list_running_servers', ([], {}), '()\n', (4270, 4272), False, 'from notebook import notebookapp\n'), ((4286, 4311), 'pathlib.Path', 'Path', (["nb_conf['base_url']"], {}), "(nb_conf['base_url'])\n", (4290, 4311), False, 'from pathlib import Path\n'), ((621, 634), 'pathlib.Path', 'Path', (['WORKDIR'], {}), '(WORKDIR)\n', (625, 634), False, 'from pathlib import Path\n'), ((692, 706), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (704, 706), False, 'from datetime import datetime\n'), ((903, 919), 'pathlib.Path', 'Path', (['MOODLE_DIR'], {}), '(MOODLE_DIR)\n', (907, 919), False, 'from pathlib import Path\n'), ((955, 970), 'pathlib.Path', 'Path', (['conf_path'], {}), '(conf_path)\n', (959, 970), False, 'from pathlib import Path\n'), ((1992, 2005), 'pathlib.Path', 'Path', (['WORKDIR'], {}), '(WORKDIR)\n', (1996, 2005), False, 'from pathlib import Path\n'), ((10437, 10450), 'pathlib.Path', 'Path', (['WORKDIR'], {}), '(WORKDIR)\n', (10441, 10450), False, 'from pathlib import Path\n')] |
from operator import attrgetter
import logging
import os
import shutil
import subprocess
import pyfastaq
import pymummer
from cluster_vcf_records import vcf_record
from varifier import utils
# We only want the .snps file from the dnadiff script from MUMmer. From reading
# the docs inspecting that script, we need to run these commands:
#
# nucmer --maxmatch --delta out.delta ref.fasta query.fasta
# delta-filter -1 out.delta > out.1delta
# show-snps -rlTHC out.1delta > out.snps
#
# This is instead of just running show-snps, which runs several other commands
# in addition to making the snps file.
def _run_dnadiff_one_split(ref_fasta, query_fasta, outfile, threads=1, maxmatch=True):
delta = f"{outfile}.tmp.delta"
delta_1 = f"{outfile}.tmp.1delta"
subprocess.check_output(f"rm -f {delta} {delta_1}", shell=True)
maxmatch_opt = "--maxmatch" if maxmatch else ""
commands = [
f"nucmer --threads {threads} {maxmatch_opt} --delta {delta} {ref_fasta} {query_fasta}",
f"delta-filter -1 {delta} > {delta_1}",
f"show-snps -rlTHC {delta_1} > {outfile}",
]
for command in commands:
logging.info("Start run command: " + command)
subprocess.check_output(command, shell=True)
logging.info("Finish run command: " + command)
os.unlink(delta)
os.unlink(delta_1)
def _run_dnadiff(
ref_fasta,
query_fasta,
outfile,
split_query=False,
debug=False,
threads=1,
maxmatch=True,
):
if not split_query:
_run_dnadiff_one_split(
ref_fasta, query_fasta, outfile, threads=threads, maxmatch=maxmatch
)
else:
tmp_snp_files = []
seq_reader = pyfastaq.sequences.file_reader(query_fasta)
for seq in seq_reader:
prefix = f"{outfile}.tmp.split.{len(tmp_snp_files)}"
tmp_fasta = f"{prefix}.fasta"
with open(tmp_fasta, "w") as f:
print(seq, file=f)
snp_file = f"{prefix}.snps"
_run_dnadiff_one_split(
ref_fasta, tmp_fasta, snp_file, threads=threads, maxmatch=maxmatch
)
os.unlink(tmp_fasta)
tmp_snp_files.append(snp_file)
with open(outfile, "wb") as f_out:
for snp_file in tmp_snp_files:
with open(snp_file, "rb") as f_in:
shutil.copyfileobj(f_in, f_out)
if not debug:
os.unlink(snp_file)
def _snps_file_to_vcf(snps_file, query_fasta, outfile):
"""Loads the .snps file made by dnadiff.
query_fasta = fasta file of query sequences.
Writes a new VCF file unmerged records."""
vcf_records = {}
variants = pymummer.snp_file.get_all_variants(snps_file)
query_seqs = utils.file_to_dict_of_seqs(query_fasta)
for variant in variants:
# If the variant is reversed, it means that either the ref or query had to be
# reverse complemented when aligned by mummer. Need to do the appropriate
# reverse (complement) fixes so the VCF has the correct REF and ALT sequences
if variant.reverse:
qry_seq = pyfastaq.sequences.Fasta("x", variant.qry_base)
qry_seq.revcomp()
variant.qry_base = "".join(reversed(qry_seq.seq))
ref_seq = pyfastaq.sequences.Fasta("x", variant.ref_base)
ref_seq.revcomp()
variant.ref_base = ref_seq.seq
if variant.var_type == pymummer.variant.SNP:
new_record = vcf_record.VcfRecord(
"\t".join(
[
variant.qry_name,
str(variant.qry_start + 1),
".",
variant.qry_base,
variant.ref_base,
".",
".",
"SVTYPE=DNADIFF_SNP",
"GT",
"1/1",
]
)
)
elif variant.var_type == pymummer.variant.DEL:
# The query has sequence missing, compared to the
# reference. We're making VCF records w.r.t. the
# query, so this is an insertion. So need to
# get the nucleotide before the insertion as well.
new_record = vcf_record.VcfRecord(
"\t".join(
[
variant.qry_name,
str(variant.qry_start + 1),
".",
query_seqs[variant.qry_name][variant.qry_start],
query_seqs[variant.qry_name][variant.qry_start]
+ variant.ref_base,
".",
".",
"SVTYPE=DNADIFF_INS",
"GT",
"1/1",
]
)
)
elif variant.var_type == pymummer.variant.INS:
# The ref has sequence missing, compared to the
# query. We're making VCF records w.r.t. the
# query, so this is a deletion. So need to
# get the nucleotide before the deletion as well.
new_record = vcf_record.VcfRecord(
"\t".join(
[
variant.qry_name,
str(variant.qry_start),
".",
query_seqs[variant.qry_name][variant.qry_start - 1]
+ variant.qry_base,
query_seqs[variant.qry_name][variant.qry_start - 1],
".",
".",
"SVTYPE=DNADIFF_DEL",
"GT",
"1/1",
]
)
)
else:
raise Exception("Unknown variant type: " + str(variant))
assert (
new_record.REF
== query_seqs[new_record.CHROM][
new_record.POS : new_record.POS + len(new_record.REF)
]
)
if new_record.CHROM not in vcf_records:
vcf_records[new_record.CHROM] = []
vcf_records[new_record.CHROM].append(new_record)
for vcf_list in vcf_records.values():
vcf_list.sort(key=attrgetter("POS"))
with open(outfile, "w") as f:
print("##fileformat=VCFv4.2", file=f)
for seq in query_seqs.values():
print(f"##contig=<ID={seq.id},length={len(seq)}>", file=f)
print("#CHROM\tPOS\tID\tREF\tALT\tQUAL\tFILTER\tINFO\tFORMAT\tsample", file=f)
for key, vcf_list in sorted(vcf_records.items()):
for record in vcf_list:
print(record, file=f)
def make_truth_vcf(
ref_fasta,
truth_fasta,
outfile,
debug=False,
split_ref=False,
threads=1,
maxmatch=True,
):
snps_file = f"{outfile}.tmp.snps"
_run_dnadiff(
truth_fasta,
ref_fasta,
snps_file,
split_query=split_ref,
debug=debug,
threads=threads,
maxmatch=maxmatch,
)
_snps_file_to_vcf(snps_file, ref_fasta, outfile)
if not debug:
os.unlink(snps_file)
| [
"subprocess.check_output",
"operator.attrgetter",
"pyfastaq.sequences.Fasta",
"shutil.copyfileobj",
"varifier.utils.file_to_dict_of_seqs",
"pymummer.snp_file.get_all_variants",
"os.unlink",
"pyfastaq.sequences.file_reader",
"logging.info"
] | [((768, 831), 'subprocess.check_output', 'subprocess.check_output', (['f"""rm -f {delta} {delta_1}"""'], {'shell': '(True)'}), "(f'rm -f {delta} {delta_1}', shell=True)\n", (791, 831), False, 'import subprocess\n'), ((1299, 1315), 'os.unlink', 'os.unlink', (['delta'], {}), '(delta)\n', (1308, 1315), False, 'import os\n'), ((1320, 1338), 'os.unlink', 'os.unlink', (['delta_1'], {}), '(delta_1)\n', (1329, 1338), False, 'import os\n'), ((2690, 2735), 'pymummer.snp_file.get_all_variants', 'pymummer.snp_file.get_all_variants', (['snps_file'], {}), '(snps_file)\n', (2724, 2735), False, 'import pymummer\n'), ((2753, 2792), 'varifier.utils.file_to_dict_of_seqs', 'utils.file_to_dict_of_seqs', (['query_fasta'], {}), '(query_fasta)\n', (2779, 2792), False, 'from varifier import utils\n'), ((1140, 1185), 'logging.info', 'logging.info', (["('Start run command: ' + command)"], {}), "('Start run command: ' + command)\n", (1152, 1185), False, 'import logging\n'), ((1194, 1238), 'subprocess.check_output', 'subprocess.check_output', (['command'], {'shell': '(True)'}), '(command, shell=True)\n', (1217, 1238), False, 'import subprocess\n'), ((1247, 1293), 'logging.info', 'logging.info', (["('Finish run command: ' + command)"], {}), "('Finish run command: ' + command)\n", (1259, 1293), False, 'import logging\n'), ((1685, 1728), 'pyfastaq.sequences.file_reader', 'pyfastaq.sequences.file_reader', (['query_fasta'], {}), '(query_fasta)\n', (1715, 1728), False, 'import pyfastaq\n'), ((7198, 7218), 'os.unlink', 'os.unlink', (['snps_file'], {}), '(snps_file)\n', (7207, 7218), False, 'import os\n'), ((2131, 2151), 'os.unlink', 'os.unlink', (['tmp_fasta'], {}), '(tmp_fasta)\n', (2140, 2151), False, 'import os\n'), ((3127, 3174), 'pyfastaq.sequences.Fasta', 'pyfastaq.sequences.Fasta', (['"""x"""', 'variant.qry_base'], {}), "('x', variant.qry_base)\n", (3151, 3174), False, 'import pyfastaq\n'), ((3289, 3336), 'pyfastaq.sequences.Fasta', 'pyfastaq.sequences.Fasta', (['"""x"""', 'variant.ref_base'], {}), "('x', variant.ref_base)\n", (3313, 3336), False, 'import pyfastaq\n'), ((6321, 6338), 'operator.attrgetter', 'attrgetter', (['"""POS"""'], {}), "('POS')\n", (6331, 6338), False, 'from operator import attrgetter\n'), ((2353, 2384), 'shutil.copyfileobj', 'shutil.copyfileobj', (['f_in', 'f_out'], {}), '(f_in, f_out)\n', (2371, 2384), False, 'import shutil\n'), ((2435, 2454), 'os.unlink', 'os.unlink', (['snp_file'], {}), '(snp_file)\n', (2444, 2454), False, 'import os\n')] |
from __future__ import absolute_import
import datetime
from dateutil import parser
import pytz
from .base import FieldFilter, DictFilterMixin, DjangoQueryFilterMixin
from .queryfilter import QueryFilter
WHOLE_DAY = datetime.timedelta(days=1)
ONE_SECOND = datetime.timedelta(seconds=1)
@QueryFilter.register_type_condition('datetime')
class DatetimeRangeFilter(DjangoQueryFilterMixin, DictFilterMixin,
FieldFilter):
@property
def start(self):
return get_start(self.filter_args.get("start"))
@property
def end(self):
end_datetime = get_end(self.filter_args.get("end"))
if not end_datetime:
return None
if _has_no_time_info(end_datetime):
end_datetime = end_datetime + WHOLE_DAY - ONE_SECOND
return end_datetime
def on_dicts(self, dicts):
def in_range(datum):
datetime_string = self.get(datum, self.field_name)
if isinstance(datetime_string, datetime.datetime):
to_compare = datetime_string
else:
to_compare = parse(datetime_string)
if not self.start and not self.end:
return False
if self.start and (to_compare < self.start):
return False
if self.end and (self.end < to_compare):
return False
return True
return list(filter(in_range, dicts))
@property
def query_params(self):
if not any((self.start, self.end)):
return None
query_params = dict()
if self.start:
query_params["{}__gte".format(self.field_name)] = self.start
if self.end:
query_params["{}__lte".format(self.field_name)] = self.end
return query_params
def _do_django_query(self, queryset):
query_params = self.query_params
if query_params:
return queryset.filter(**query_params)
else:
return queryset.none()
min_datetime = datetime.datetime.min.replace(tzinfo=pytz.utc)
max_datetime = datetime.datetime.max.replace(tzinfo=pytz.utc)
def get_start(start_date_str):
if not start_date_str:
return None
return parse(start_date_str)
def get_end(end_date_str):
if not end_date_str:
return None
return parse(end_date_str)
def parse(datetime_string):
return make_time_aware(parser.parse(datetime_string))
def make_time_aware(datetime_data):
if not datetime_data.tzinfo:
datetime_data = datetime_data.replace(tzinfo=pytz.utc)
return datetime_data
def _has_no_time_info(value):
return value.hour == 0 and \
value.minute == 0 and \
value.second == 0 and \
value.microsecond == 0
| [
"datetime.datetime.max.replace",
"dateutil.parser.parse",
"datetime.timedelta",
"datetime.datetime.min.replace"
] | [((220, 246), 'datetime.timedelta', 'datetime.timedelta', ([], {'days': '(1)'}), '(days=1)\n', (238, 246), False, 'import datetime\n'), ((260, 289), 'datetime.timedelta', 'datetime.timedelta', ([], {'seconds': '(1)'}), '(seconds=1)\n', (278, 289), False, 'import datetime\n'), ((2032, 2078), 'datetime.datetime.min.replace', 'datetime.datetime.min.replace', ([], {'tzinfo': 'pytz.utc'}), '(tzinfo=pytz.utc)\n', (2061, 2078), False, 'import datetime\n'), ((2094, 2140), 'datetime.datetime.max.replace', 'datetime.datetime.max.replace', ([], {'tzinfo': 'pytz.utc'}), '(tzinfo=pytz.utc)\n', (2123, 2140), False, 'import datetime\n'), ((2416, 2445), 'dateutil.parser.parse', 'parser.parse', (['datetime_string'], {}), '(datetime_string)\n', (2428, 2445), False, 'from dateutil import parser\n')] |
"""
This module implements the Request class which is used to represent HTTP
requests in Scrapy.
See documentation in docs/topics/request-response.rst
"""
from w3lib.url import safe_url_string
from scrapy.http.headers import Headers
from scrapy.utils.python import to_bytes
from scrapy.utils.trackref import object_ref
from scrapy.utils.url import escape_ajax
from scrapy.http.common import obsolete_setter
from scrapy.utils.curl import curl_to_request_kwargs
class Request(object_ref):
def __init__(self, url, callback=None, method='GET', headers=None, body=None,
cookies=None, meta=None, encoding='utf-8', priority=0,
dont_filter=False, errback=None, flags=None, cb_kwargs=None):
self._encoding = encoding # this one has to be set first
self.method = str(method).upper()
self._set_url(url)
self._set_body(body)
assert isinstance(priority, int), "Request priority not an integer: %r" % priority
self.priority = priority
if callback is not None and not callable(callback):
raise TypeError('callback must be a callable, got %s' % type(callback).__name__)
if errback is not None and not callable(errback):
raise TypeError('errback must be a callable, got %s' % type(errback).__name__)
self.callback = callback
self.errback = errback
self.cookies = cookies or {}
self.headers = Headers(headers or {}, encoding=encoding)
self.dont_filter = dont_filter
self._meta = dict(meta) if meta else None
self._cb_kwargs = dict(cb_kwargs) if cb_kwargs else None
self.flags = [] if flags is None else list(flags)
@property
def cb_kwargs(self):
if self._cb_kwargs is None:
self._cb_kwargs = {}
return self._cb_kwargs
@property
def meta(self):
if self._meta is None:
self._meta = {}
return self._meta
def _get_url(self):
return self._url
def _set_url(self, url):
if not isinstance(url, str):
raise TypeError('Request url must be str or unicode, got %s:' % type(url).__name__)
s = safe_url_string(url, self.encoding)
self._url = escape_ajax(s)
if ('://' not in self._url) and (not self._url.startswith('data:')):
raise ValueError('Missing scheme in request url: %s' % self._url)
url = property(_get_url, obsolete_setter(_set_url, 'url'))
def _get_body(self):
return self._body
def _set_body(self, body):
if body is None:
self._body = b''
else:
self._body = to_bytes(body, self.encoding)
body = property(_get_body, obsolete_setter(_set_body, 'body'))
@property
def encoding(self):
return self._encoding
def __str__(self):
return "<%s %s>" % (self.method, self.url)
__repr__ = __str__
def copy(self):
"""Return a copy of this Request"""
return self.replace()
def replace(self, *args, **kwargs):
"""Create a new Request with the same attributes except for those
given new values.
"""
for x in ['url', 'method', 'headers', 'body', 'cookies', 'meta', 'flags',
'encoding', 'priority', 'dont_filter', 'callback', 'errback', 'cb_kwargs']:
kwargs.setdefault(x, getattr(self, x))
cls = kwargs.pop('cls', self.__class__)
return cls(*args, **kwargs)
@classmethod
def from_curl(cls, curl_command, ignore_unknown_options=True, **kwargs):
"""Create a Request object from a string containing a `cURL
<https://curl.haxx.se/>`_ command. It populates the HTTP method, the
URL, the headers, the cookies and the body. It accepts the same
arguments as the :class:`Request` class, taking preference and
overriding the values of the same arguments contained in the cURL
command.
Unrecognized options are ignored by default. To raise an error when
finding unknown options call this method by passing
``ignore_unknown_options=False``.
.. caution:: Using :meth:`from_curl` from :class:`~scrapy.http.Request`
subclasses, such as :class:`~scrapy.http.JSONRequest`, or
:class:`~scrapy.http.XmlRpcRequest`, as well as having
:ref:`downloader middlewares <topics-downloader-middleware>`
and
:ref:`spider middlewares <topics-spider-middleware>`
enabled, such as
:class:`~scrapy.downloadermiddlewares.defaultheaders.DefaultHeadersMiddleware`,
:class:`~scrapy.downloadermiddlewares.useragent.UserAgentMiddleware`,
or
:class:`~scrapy.downloadermiddlewares.httpcompression.HttpCompressionMiddleware`,
may modify the :class:`~scrapy.http.Request` object.
To translate a cURL command into a Scrapy request,
you may use `curl2scrapy <https://michael-shub.github.io/curl2scrapy/>`_.
"""
request_kwargs = curl_to_request_kwargs(curl_command, ignore_unknown_options)
request_kwargs.update(kwargs)
return cls(**request_kwargs)
| [
"scrapy.utils.python.to_bytes",
"scrapy.utils.url.escape_ajax",
"scrapy.http.headers.Headers",
"scrapy.utils.curl.curl_to_request_kwargs",
"scrapy.http.common.obsolete_setter",
"w3lib.url.safe_url_string"
] | [((1442, 1483), 'scrapy.http.headers.Headers', 'Headers', (['(headers or {})'], {'encoding': 'encoding'}), '(headers or {}, encoding=encoding)\n', (1449, 1483), False, 'from scrapy.http.headers import Headers\n'), ((2183, 2218), 'w3lib.url.safe_url_string', 'safe_url_string', (['url', 'self.encoding'], {}), '(url, self.encoding)\n', (2198, 2218), False, 'from w3lib.url import safe_url_string\n'), ((2239, 2253), 'scrapy.utils.url.escape_ajax', 'escape_ajax', (['s'], {}), '(s)\n', (2250, 2253), False, 'from scrapy.utils.url import escape_ajax\n'), ((2440, 2472), 'scrapy.http.common.obsolete_setter', 'obsolete_setter', (['_set_url', '"""url"""'], {}), "(_set_url, 'url')\n", (2455, 2472), False, 'from scrapy.http.common import obsolete_setter\n'), ((2713, 2747), 'scrapy.http.common.obsolete_setter', 'obsolete_setter', (['_set_body', '"""body"""'], {}), "(_set_body, 'body')\n", (2728, 2747), False, 'from scrapy.http.common import obsolete_setter\n'), ((5156, 5216), 'scrapy.utils.curl.curl_to_request_kwargs', 'curl_to_request_kwargs', (['curl_command', 'ignore_unknown_options'], {}), '(curl_command, ignore_unknown_options)\n', (5178, 5216), False, 'from scrapy.utils.curl import curl_to_request_kwargs\n'), ((2651, 2680), 'scrapy.utils.python.to_bytes', 'to_bytes', (['body', 'self.encoding'], {}), '(body, self.encoding)\n', (2659, 2680), False, 'from scrapy.utils.python import to_bytes\n')] |
# HomeAssistant Status Output
# Publishes the provided sensor key and value pair to a HomeAssistant instance
import logging
import time
from ww import f
logger = logging.getLogger(__name__.rsplit(".")[-1])
class HASSStatus:
import threading
import requests
apiKey = None
config = None
configConfig = None
configHASS = None
master = None
msgRateInSeconds = 60
resendRateInSeconds = 3600
retryRateInSeconds = 60
msgQueue = {}
status = False
serverIP = None
serverPort = 8123
useHttps = False
timeout = 2
backgroundTasksLock = threading.Lock()
backgroundTasksThread = None
def __init__(self, master):
self.config = master.config
self.master = master
try:
self.configConfig = self.config["config"]
except KeyError:
self.configConfig = {}
try:
self.configHASS = self.config["status"]["HASS"]
except KeyError:
self.configHASS = {}
self.status = self.configHASS.get("enabled", False)
self.serverIP = self.configHASS.get("serverIP", None)
self.serverPort = self.configHASS.get("serverPort", 8123)
self.useHttps = self.configHASS.get("useHttps", False)
self.apiKey = self.configHASS.get("apiKey", None)
self.msgRateInSeconds = self.configHASS.get("msgRateInSeconds", 60)
self.resendRateInSeconds = self.configHASS.get("resendRateInSeconds", 3600)
self.retryRateInSeconds = self.configHASS.get("retryRateInSeconds", 60)
# Unload if this module is disabled or misconfigured
if (
(not self.status)
or (not self.serverIP)
or (int(self.serverPort) < 1)
or (not self.apiKey)
):
self.master.releaseModule("lib.TWCManager.Status", "HASSStatus")
else:
self.backgroundTasksThread = self.threading.Thread(
target=self.background_task_thread, args=()
)
self.backgroundTasksThread.daemon = True
self.backgroundTasksThread.start()
def getTwident(self, twcid):
# Format TWCID nicely
if len(twcid) == 2:
return "%02X%02X" % (twcid[0], twcid[1])
else:
return str(twcid.decode("utf-8"))
def background_task_thread(self):
while True:
time.sleep(self.msgRateInSeconds)
self.backgroundTasksLock.acquire()
for msgKey in self.msgQueue:
msg = self.msgQueue[msgKey]
if msg.elapsingTime < time.time():
self.sendingStatusToHASS(msg)
self.backgroundTasksLock.release()
def getSensorName(self, twcid, key_underscore):
return "sensor.twcmanager_" + str(self.getTwident(twcid)) + "_" + key_underscore
def setStatus(self, twcid, key_underscore, key_camelcase, value, unit):
self.backgroundTasksLock.acquire()
sensor = self.getSensorName(twcid, key_underscore)
if (sensor not in self.msgQueue) or (self.msgQueue[sensor].value != value):
self.msgQueue[sensor] = HASSMessage(
time.time(),
sensor,
twcid,
key_underscore,
key_camelcase,
value,
unit,
)
self.backgroundTasksLock.release()
def sendingStatusToHASS(self, msg):
http = "http://" if not (self.useHttps) else "https://"
url = http + self.serverIP + ":" + self.serverPort
url = url + "/api/states/" + msg.sensor
headers = {
"Authorization": "Bearer " + self.apiKey,
"content-type": "application/json",
}
try:
logger.log(
logging.INFO8,
f(
"Sending POST request to HomeAssistant for sensor {msg.sensor} (value {msg.value})."
),
)
devclass = ""
if str.upper(msg.unit) in ["W", "A", "V", "KWH"]:
devclass = "power"
if len(msg.unit) > 0:
self.requests.post(
url,
json={
"state": msg.value,
"attributes": {
"unit_of_measurement": msg.unit,
"device_class": devclass,
"friendly_name": "TWC "
+ str(self.getTwident(msg.twcid))
+ " "
+ msg.key_camelcase,
},
},
timeout=self.timeout,
headers=headers,
)
else:
self.requests.post(
url,
json={
"state": msg.value,
"attributes": {
"friendly_name": "TWC "
+ str(self.getTwident(msg.twcid))
+ " "
+ msg.key_camelcase
},
},
timeout=self.timeout,
headers=headers,
)
# Setting elapsing time to now + resendRateInSeconds
self.msgQueue[msg.sensor].elapsingTime = (
time.time() + self.resendRateInSeconds
)
except self.requests.exceptions.ConnectionError as e:
logger.log(
logging.INFO4,
"Error connecting to HomeAssistant to publish sensor values",
)
logger.debug(str(e))
self.settingRetryRate(msg)
return False
except self.requests.exceptions.ReadTimeout as e:
logger.log(
logging.INFO4,
"Error connecting to HomeAssistant to publish sensor values",
)
logger.debug(str(e))
self.settingRetryRate(msg)
return False
except Exception as e:
logger.log(
logging.INFO4, "Error during publishing HomeAssistant sensor values"
)
logger.debug(str(e))
self.settingRetryRate(msg)
return False
def settingRetryRate(self, msg):
# Setting elapsing time to now + retryRateInSeconds
self.msgQueue[msg.sensor].elapsingTime = (
time.time() + self.retryRateInSeconds
)
class HASSMessage:
elapsingTime = 0
sensor = ""
twcid = ""
key_underscore = ""
key_camelcase = ""
value = None
unit = ""
def __init__(
self, elapsingTime, sensor, twcid, key_underscore, key_camelcase, value, unit
):
self.elapsingTime = elapsingTime
self.sensor = sensor
self.twcid = twcid
self.key_underscore = key_underscore
self.key_camelcase = key_camelcase
self.value = value
self.unit = unit
| [
"ww.f",
"threading.Lock",
"time.time",
"time.sleep"
] | [((599, 615), 'threading.Lock', 'threading.Lock', ([], {}), '()\n', (613, 615), False, 'import threading\n'), ((2385, 2418), 'time.sleep', 'time.sleep', (['self.msgRateInSeconds'], {}), '(self.msgRateInSeconds)\n', (2395, 2418), False, 'import time\n'), ((6495, 6506), 'time.time', 'time.time', ([], {}), '()\n', (6504, 6506), False, 'import time\n'), ((3169, 3180), 'time.time', 'time.time', ([], {}), '()\n', (3178, 3180), False, 'import time\n'), ((3822, 3914), 'ww.f', 'f', (['"""Sending POST request to HomeAssistant for sensor {msg.sensor} (value {msg.value})."""'], {}), "('Sending POST request to HomeAssistant for sensor {msg.sensor} (value {msg.value}).'\n )\n", (3823, 3914), False, 'from ww import f\n'), ((5422, 5433), 'time.time', 'time.time', ([], {}), '()\n', (5431, 5433), False, 'import time\n'), ((2589, 2600), 'time.time', 'time.time', ([], {}), '()\n', (2598, 2600), False, 'import time\n')] |
from flask import Blueprint, jsonify, request, render_template
home_routes = Blueprint("home_routes", __name__)
@home_routes.route("/")
def index():
users = User.query.all()
return render_template('base.html', title='Home',
users=users)
@home_routes.route("/about")
def about():
return "About Me"
@home_routes.route('/reset')
def reset():
DB.drop_all()
DB.create_all()
return render_template('base.html', title='Reset', users=[])
# # Add config for database
# app.config['SQLALCHEMY_DATABASE_URI'] = 'sqlite:///db.sqlite3'
# # stop tracking modifications on sqlalchemy config
# app.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = False
# # ? app.config["TWITTER_API_CLIENT"] = twitter
# # Have the database know about the app
# DB.init_app(app) | [
"flask.render_template",
"flask.Blueprint"
] | [((79, 113), 'flask.Blueprint', 'Blueprint', (['"""home_routes"""', '__name__'], {}), "('home_routes', __name__)\n", (88, 113), False, 'from flask import Blueprint, jsonify, request, render_template\n'), ((197, 252), 'flask.render_template', 'render_template', (['"""base.html"""'], {'title': '"""Home"""', 'users': 'users'}), "('base.html', title='Home', users=users)\n", (212, 252), False, 'from flask import Blueprint, jsonify, request, render_template\n'), ((442, 495), 'flask.render_template', 'render_template', (['"""base.html"""'], {'title': '"""Reset"""', 'users': '[]'}), "('base.html', title='Reset', users=[])\n", (457, 495), False, 'from flask import Blueprint, jsonify, request, render_template\n')] |
import copy
import functools
import itertools
import numbers
import warnings
from collections import defaultdict
from datetime import timedelta
from distutils.version import LooseVersion
from typing import (
Any,
Dict,
Hashable,
Mapping,
Optional,
Sequence,
Tuple,
TypeVar,
Union,
)
import numpy as np
import pandas as pd
import xarray as xr # only for Dataset and DataArray
from . import arithmetic, common, dtypes, duck_array_ops, indexing, nputils, ops, utils
from .indexing import (
BasicIndexer,
OuterIndexer,
PandasIndexAdapter,
VectorizedIndexer,
as_indexable,
)
from .npcompat import IS_NEP18_ACTIVE
from .options import _get_keep_attrs
from .pycompat import (
cupy_array_type,
dask_array_type,
integer_types,
is_duck_dask_array,
)
from .utils import (
OrderedSet,
_default,
decode_numpy_dict_values,
drop_dims_from_indexers,
either_dict_or_kwargs,
ensure_us_time_resolution,
infix_dims,
is_duck_array,
)
NON_NUMPY_SUPPORTED_ARRAY_TYPES = (
(
indexing.ExplicitlyIndexed,
pd.Index,
)
+ dask_array_type
+ cupy_array_type
)
# https://github.com/python/mypy/issues/224
BASIC_INDEXING_TYPES = integer_types + (slice,) # type: ignore
VariableType = TypeVar("VariableType", bound="Variable")
"""Type annotation to be used when methods of Variable return self or a copy of self.
When called from an instance of a subclass, e.g. IndexVariable, mypy identifies the
output as an instance of the subclass.
Usage::
class Variable:
def f(self: VariableType, ...) -> VariableType:
...
"""
class MissingDimensionsError(ValueError):
"""Error class used when we can't safely guess a dimension name."""
# inherits from ValueError for backward compatibility
# TODO: move this to an xarray.exceptions module?
def as_variable(obj, name=None) -> "Union[Variable, IndexVariable]":
"""Convert an object into a Variable.
Parameters
----------
obj : object
Object to convert into a Variable.
- If the object is already a Variable, return a shallow copy.
- Otherwise, if the object has 'dims' and 'data' attributes, convert
it into a new Variable.
- If all else fails, attempt to convert the object into a Variable by
unpacking it into the arguments for creating a new Variable.
name : str, optional
If provided:
- `obj` can be a 1D array, which is assumed to label coordinate values
along a dimension of this given name.
- Variables with name matching one of their dimensions are converted
into `IndexVariable` objects.
Returns
-------
var : Variable
The newly created variable.
"""
from .dataarray import DataArray
# TODO: consider extending this method to automatically handle Iris and
if isinstance(obj, DataArray):
# extract the primary Variable from DataArrays
obj = obj.variable
if isinstance(obj, Variable):
obj = obj.copy(deep=False)
elif isinstance(obj, tuple):
try:
obj = Variable(*obj)
except (TypeError, ValueError) as error:
# use .format() instead of % because it handles tuples consistently
raise error.__class__(
"Could not convert tuple of form "
"(dims, data[, attrs, encoding]): "
"{} to Variable.".format(obj)
)
elif utils.is_scalar(obj):
obj = Variable([], obj)
elif isinstance(obj, (pd.Index, IndexVariable)) and obj.name is not None:
obj = Variable(obj.name, obj)
elif isinstance(obj, (set, dict)):
raise TypeError("variable {!r} has invalid type {!r}".format(name, type(obj)))
elif name is not None:
data = as_compatible_data(obj)
if data.ndim != 1:
raise MissingDimensionsError(
"cannot set variable %r with %r-dimensional data "
"without explicit dimension names. Pass a tuple of "
"(dims, data) instead." % (name, data.ndim)
)
obj = Variable(name, data, fastpath=True)
else:
raise TypeError(
"unable to convert object into a variable without an "
"explicit list of dimensions: %r" % obj
)
if name is not None and name in obj.dims:
# convert the Variable into an Index
if obj.ndim != 1:
raise MissingDimensionsError(
"%r has more than 1-dimension and the same name as one of its "
"dimensions %r. xarray disallows such variables because they "
"conflict with the coordinates used to label "
"dimensions." % (name, obj.dims)
)
obj = obj.to_index_variable()
return obj
def _maybe_wrap_data(data):
"""
Put pandas.Index and numpy.ndarray arguments in adapter objects to ensure
they can be indexed properly.
NumpyArrayAdapter, PandasIndexAdapter and LazilyOuterIndexedArray should
all pass through unmodified.
"""
if isinstance(data, pd.Index):
return PandasIndexAdapter(data)
return data
def _possibly_convert_objects(values):
"""Convert arrays of datetime.datetime and datetime.timedelta objects into
datetime64 and timedelta64, according to the pandas convention. Also used for
validating that datetime64 and timedelta64 objects are within the valid date
range for ns precision, as pandas will raise an error if they are not.
"""
return np.asarray(pd.Series(values.ravel())).reshape(values.shape)
def as_compatible_data(data, fastpath=False):
"""Prepare and wrap data to put in a Variable.
- If data does not have the necessary attributes, convert it to ndarray.
- If data has dtype=datetime64, ensure that it has ns precision. If it's a
pandas.Timestamp, convert it to datetime64.
- If data is already a pandas or xarray object (other than an Index), just
use the values.
Finally, wrap it up with an adapter if necessary.
"""
if fastpath and getattr(data, "ndim", 0) > 0:
# can't use fastpath (yet) for scalars
return _maybe_wrap_data(data)
if isinstance(data, Variable):
return data.data
if isinstance(data, NON_NUMPY_SUPPORTED_ARRAY_TYPES):
return _maybe_wrap_data(data)
if isinstance(data, tuple):
data = utils.to_0d_object_array(data)
if isinstance(data, pd.Timestamp):
# TODO: convert, handle datetime objects, too
data = np.datetime64(data.value, "ns")
if isinstance(data, timedelta):
data = np.timedelta64(getattr(data, "value", data), "ns")
# we don't want nested self-described arrays
data = getattr(data, "values", data)
if isinstance(data, np.ma.MaskedArray):
mask = np.ma.getmaskarray(data)
if mask.any():
dtype, fill_value = dtypes.maybe_promote(data.dtype)
data = np.asarray(data, dtype=dtype)
data[mask] = fill_value
else:
data = np.asarray(data)
if not isinstance(data, np.ndarray):
if hasattr(data, "__array_function__"):
if IS_NEP18_ACTIVE:
return data
else:
raise TypeError(
"Got an NumPy-like array type providing the "
"__array_function__ protocol but NEP18 is not enabled. "
"Check that numpy >= v1.16 and that the environment "
'variable "NUMPY_EXPERIMENTAL_ARRAY_FUNCTION" is set to '
'"1"'
)
# validate whether the data is valid data types.
data = np.asarray(data)
if isinstance(data, np.ndarray):
if data.dtype.kind == "O":
data = _possibly_convert_objects(data)
elif data.dtype.kind == "M":
data = _possibly_convert_objects(data)
elif data.dtype.kind == "m":
data = _possibly_convert_objects(data)
return _maybe_wrap_data(data)
def _as_array_or_item(data):
"""Return the given values as a numpy array, or as an individual item if
it's a 0d datetime64 or timedelta64 array.
Importantly, this function does not copy data if it is already an ndarray -
otherwise, it will not be possible to update Variable values in place.
This function mostly exists because 0-dimensional ndarrays with
dtype=datetime64 are broken :(
https://github.com/numpy/numpy/issues/4337
https://github.com/numpy/numpy/issues/7619
TODO: remove this (replace with np.asarray) once these issues are fixed
"""
if isinstance(data, cupy_array_type):
data = data.get()
else:
data = np.asarray(data)
if data.ndim == 0:
if data.dtype.kind == "M":
data = np.datetime64(data, "ns")
elif data.dtype.kind == "m":
data = np.timedelta64(data, "ns")
return data
class Variable(
common.AbstractArray, arithmetic.SupportsArithmetic, utils.NdimSizeLenMixin
):
"""A netcdf-like variable consisting of dimensions, data and attributes
which describe a single Array. A single Variable object is not fully
described outside the context of its parent Dataset (if you want such a
fully described object, use a DataArray instead).
The main functional difference between Variables and numpy arrays is that
numerical operations on Variables implement array broadcasting by dimension
name. For example, adding an Variable with dimensions `('time',)` to
another Variable with dimensions `('space',)` results in a new Variable
with dimensions `('time', 'space')`. Furthermore, numpy reduce operations
like ``mean`` or ``sum`` are overwritten to take a "dimension" argument
instead of an "axis".
Variables are light-weight objects used as the building block for datasets.
They are more primitive objects, so operations with them provide marginally
higher performance than using DataArrays. However, manipulating data in the
form of a Dataset or DataArray should almost always be preferred, because
they can use more complete metadata in context of coordinate labels.
"""
__slots__ = ("_dims", "_data", "_attrs", "_encoding")
def __init__(self, dims, data, attrs=None, encoding=None, fastpath=False):
"""
Parameters
----------
dims : str or sequence of str
Name(s) of the the data dimension(s). Must be either a string (only
for 1D data) or a sequence of strings with length equal to the
number of dimensions.
data : array_like
Data array which supports numpy-like data access.
attrs : dict_like or None, optional
Attributes to assign to the new variable. If None (default), an
empty attribute dictionary is initialized.
encoding : dict_like or None, optional
Dictionary specifying how to encode this array's data into a
serialized format like netCDF4. Currently used keys (for netCDF)
include '_FillValue', 'scale_factor', 'add_offset' and 'dtype'.
Well-behaved code to serialize a Variable should ignore
unrecognized encoding items.
"""
self._data = as_compatible_data(data, fastpath=fastpath)
self._dims = self._parse_dimensions(dims)
self._attrs = None
self._encoding = None
if attrs is not None:
self.attrs = attrs
if encoding is not None:
self.encoding = encoding
@property
def dtype(self):
return self._data.dtype
@property
def shape(self):
return self._data.shape
@property
def nbytes(self):
return self.size * self.dtype.itemsize
@property
def _in_memory(self):
return isinstance(self._data, (np.ndarray, np.number, PandasIndexAdapter)) or (
isinstance(self._data, indexing.MemoryCachedArray)
and isinstance(self._data.array, indexing.NumpyIndexingAdapter)
)
@property
def data(self):
if is_duck_array(self._data):
return self._data
else:
return self.values
@data.setter
def data(self, data):
data = as_compatible_data(data)
if data.shape != self.shape:
raise ValueError(
f"replacement data must match the Variable's shape. "
f"replacement data has shape {data.shape}; Variable has shape {self.shape}"
)
self._data = data
def astype(
self: VariableType,
dtype,
*,
order=None,
casting=None,
subok=None,
copy=None,
keep_attrs=True,
) -> VariableType:
"""
Copy of the Variable object, with data cast to a specified type.
Parameters
----------
dtype : str or dtype
Typecode or data-type to which the array is cast.
order : {'C', 'F', 'A', 'K'}, optional
Controls the memory layout order of the result. ‘C’ means C order,
‘F’ means Fortran order, ‘A’ means ‘F’ order if all the arrays are
Fortran contiguous, ‘C’ order otherwise, and ‘K’ means as close to
the order the array elements appear in memory as possible.
casting : {'no', 'equiv', 'safe', 'same_kind', 'unsafe'}, optional
Controls what kind of data casting may occur.
* 'no' means the data types should not be cast at all.
* 'equiv' means only byte-order changes are allowed.
* 'safe' means only casts which can preserve values are allowed.
* 'same_kind' means only safe casts or casts within a kind,
like float64 to float32, are allowed.
* 'unsafe' means any data conversions may be done.
subok : bool, optional
If True, then sub-classes will be passed-through, otherwise the
returned array will be forced to be a base-class array.
copy : bool, optional
By default, astype always returns a newly allocated array. If this
is set to False and the `dtype` requirement is satisfied, the input
array is returned instead of a copy.
keep_attrs : bool, optional
By default, astype keeps attributes. Set to False to remove
attributes in the returned object.
Returns
-------
out : same as object
New object with data cast to the specified type.
Notes
-----
The ``order``, ``casting``, ``subok`` and ``copy`` arguments are only passed
through to the ``astype`` method of the underlying array when a value
different than ``None`` is supplied.
Make sure to only supply these arguments if the underlying array class
supports them.
See also
--------
numpy.ndarray.astype
dask.array.Array.astype
sparse.COO.astype
"""
from .computation import apply_ufunc
kwargs = dict(order=order, casting=casting, subok=subok, copy=copy)
kwargs = {k: v for k, v in kwargs.items() if v is not None}
return apply_ufunc(
duck_array_ops.astype,
self,
dtype,
kwargs=kwargs,
keep_attrs=keep_attrs,
dask="allowed",
)
def load(self, **kwargs):
"""Manually trigger loading of this variable's data from disk or a
remote source into memory and return this variable.
Normally, it should not be necessary to call this method in user code,
because all xarray functions should either work on deferred data or
load data automatically.
Parameters
----------
**kwargs : dict
Additional keyword arguments passed on to ``dask.array.compute``.
See Also
--------
dask.array.compute
"""
if is_duck_dask_array(self._data):
self._data = as_compatible_data(self._data.compute(**kwargs))
elif not is_duck_array(self._data):
self._data = np.asarray(self._data)
return self
def compute(self, **kwargs):
"""Manually trigger loading of this variable's data from disk or a
remote source into memory and return a new variable. The original is
left unaltered.
Normally, it should not be necessary to call this method in user code,
because all xarray functions should either work on deferred data or
load data automatically.
Parameters
----------
**kwargs : dict
Additional keyword arguments passed on to ``dask.array.compute``.
See Also
--------
dask.array.compute
"""
new = self.copy(deep=False)
return new.load(**kwargs)
def __dask_tokenize__(self):
# Use v.data, instead of v._data, in order to cope with the wrappers
# around NetCDF and the like
from dask.base import normalize_token
return normalize_token((type(self), self._dims, self.data, self._attrs))
def __dask_graph__(self):
if is_duck_dask_array(self._data):
return self._data.__dask_graph__()
else:
return None
def __dask_keys__(self):
return self._data.__dask_keys__()
def __dask_layers__(self):
return self._data.__dask_layers__()
@property
def __dask_optimize__(self):
return self._data.__dask_optimize__
@property
def __dask_scheduler__(self):
return self._data.__dask_scheduler__
def __dask_postcompute__(self):
array_func, array_args = self._data.__dask_postcompute__()
return (
self._dask_finalize,
(array_func, array_args, self._dims, self._attrs, self._encoding),
)
def __dask_postpersist__(self):
array_func, array_args = self._data.__dask_postpersist__()
return (
self._dask_finalize,
(array_func, array_args, self._dims, self._attrs, self._encoding),
)
@staticmethod
def _dask_finalize(results, array_func, array_args, dims, attrs, encoding):
data = array_func(results, *array_args)
return Variable(dims, data, attrs=attrs, encoding=encoding)
@property
def values(self):
"""The variable's data as a numpy.ndarray"""
return _as_array_or_item(self._data)
@values.setter
def values(self, values):
self.data = values
def to_base_variable(self):
"""Return this variable as a base xarray.Variable"""
return Variable(
self.dims, self._data, self._attrs, encoding=self._encoding, fastpath=True
)
to_variable = utils.alias(to_base_variable, "to_variable")
def to_index_variable(self):
"""Return this variable as an xarray.IndexVariable"""
return IndexVariable(
self.dims, self._data, self._attrs, encoding=self._encoding, fastpath=True
)
to_coord = utils.alias(to_index_variable, "to_coord")
def to_index(self):
"""Convert this variable to a pandas.Index"""
return self.to_index_variable().to_index()
def to_dict(self, data=True):
"""Dictionary representation of variable."""
item = {"dims": self.dims, "attrs": decode_numpy_dict_values(self.attrs)}
if data:
item["data"] = ensure_us_time_resolution(self.values).tolist()
else:
item.update({"dtype": str(self.dtype), "shape": self.shape})
return item
@property
def dims(self):
"""Tuple of dimension names with which this variable is associated."""
return self._dims
@dims.setter
def dims(self, value):
self._dims = self._parse_dimensions(value)
def _parse_dimensions(self, dims):
if isinstance(dims, str):
dims = (dims,)
dims = tuple(dims)
if len(dims) != self.ndim:
raise ValueError(
"dimensions %s must have the same length as the "
"number of data dimensions, ndim=%s" % (dims, self.ndim)
)
return dims
def _item_key_to_tuple(self, key):
if utils.is_dict_like(key):
return tuple(key.get(dim, slice(None)) for dim in self.dims)
else:
return key
def _broadcast_indexes(self, key):
"""Prepare an indexing key for an indexing operation.
Parameters
-----------
key: int, slice, array-like, dict or tuple of integer, slice and array-like
Any valid input for indexing.
Returns
-------
dims : tuple
Dimension of the resultant variable.
indexers : IndexingTuple subclass
Tuple of integer, array-like, or slices to use when indexing
self._data. The type of this argument indicates the type of
indexing to perform, either basic, outer or vectorized.
new_order : Optional[Sequence[int]]
Optional reordering to do on the result of indexing. If not None,
the first len(new_order) indexing should be moved to these
positions.
"""
key = self._item_key_to_tuple(key) # key is a tuple
# key is a tuple of full size
key = indexing.expanded_indexer(key, self.ndim)
# Convert a scalar Variable to an integer
key = tuple(
k.data.item() if isinstance(k, Variable) and k.ndim == 0 else k for k in key
)
# Convert a 0d-array to an integer
key = tuple(
k.item() if isinstance(k, np.ndarray) and k.ndim == 0 else k for k in key
)
if all(isinstance(k, BASIC_INDEXING_TYPES) for k in key):
return self._broadcast_indexes_basic(key)
self._validate_indexers(key)
# Detect it can be mapped as an outer indexer
# If all key is unlabeled, or
# key can be mapped as an OuterIndexer.
if all(not isinstance(k, Variable) for k in key):
return self._broadcast_indexes_outer(key)
# If all key is 1-dimensional and there are no duplicate labels,
# key can be mapped as an OuterIndexer.
dims = []
for k, d in zip(key, self.dims):
if isinstance(k, Variable):
if len(k.dims) > 1:
return self._broadcast_indexes_vectorized(key)
dims.append(k.dims[0])
elif not isinstance(k, integer_types):
dims.append(d)
if len(set(dims)) == len(dims):
return self._broadcast_indexes_outer(key)
return self._broadcast_indexes_vectorized(key)
def _broadcast_indexes_basic(self, key):
dims = tuple(
dim for k, dim in zip(key, self.dims) if not isinstance(k, integer_types)
)
return dims, BasicIndexer(key), None
def _validate_indexers(self, key):
""" Make sanity checks """
for dim, k in zip(self.dims, key):
if isinstance(k, BASIC_INDEXING_TYPES):
pass
else:
if not isinstance(k, Variable):
k = np.asarray(k)
if k.ndim > 1:
raise IndexError(
"Unlabeled multi-dimensional array cannot be "
"used for indexing: {}".format(k)
)
if k.dtype.kind == "b":
if self.shape[self.get_axis_num(dim)] != len(k):
raise IndexError(
"Boolean array size {:d} is used to index array "
"with shape {:s}.".format(len(k), str(self.shape))
)
if k.ndim > 1:
raise IndexError(
"{}-dimensional boolean indexing is "
"not supported. ".format(k.ndim)
)
if getattr(k, "dims", (dim,)) != (dim,):
raise IndexError(
"Boolean indexer should be unlabeled or on the "
"same dimension to the indexed array. Indexer is "
"on {:s} but the target dimension is {:s}.".format(
str(k.dims), dim
)
)
def _broadcast_indexes_outer(self, key):
dims = tuple(
k.dims[0] if isinstance(k, Variable) else dim
for k, dim in zip(key, self.dims)
if not isinstance(k, integer_types)
)
new_key = []
for k in key:
if isinstance(k, Variable):
k = k.data
if not isinstance(k, BASIC_INDEXING_TYPES):
k = np.asarray(k)
if k.size == 0:
# Slice by empty list; numpy could not infer the dtype
k = k.astype(int)
elif k.dtype.kind == "b":
(k,) = np.nonzero(k)
new_key.append(k)
return dims, OuterIndexer(tuple(new_key)), None
def _nonzero(self):
""" Equivalent numpy's nonzero but returns a tuple of Varibles. """
# TODO we should replace dask's native nonzero
# after https://github.com/dask/dask/issues/1076 is implemented.
nonzeros = np.nonzero(self.data)
return tuple(Variable((dim), nz) for nz, dim in zip(nonzeros, self.dims))
def _broadcast_indexes_vectorized(self, key):
variables = []
out_dims_set = OrderedSet()
for dim, value in zip(self.dims, key):
if isinstance(value, slice):
out_dims_set.add(dim)
else:
variable = (
value
if isinstance(value, Variable)
else as_variable(value, name=dim)
)
if variable.dtype.kind == "b": # boolean indexing case
(variable,) = variable._nonzero()
variables.append(variable)
out_dims_set.update(variable.dims)
variable_dims = set()
for variable in variables:
variable_dims.update(variable.dims)
slices = []
for i, (dim, value) in enumerate(zip(self.dims, key)):
if isinstance(value, slice):
if dim in variable_dims:
# We only convert slice objects to variables if they share
# a dimension with at least one other variable. Otherwise,
# we can equivalently leave them as slices aknd transpose
# the result. This is significantly faster/more efficient
# for most array backends.
values = np.arange(*value.indices(self.sizes[dim]))
variables.insert(i - len(slices), Variable((dim,), values))
else:
slices.append((i, value))
try:
variables = _broadcast_compat_variables(*variables)
except ValueError:
raise IndexError(f"Dimensions of indexers mismatch: {key}")
out_key = [variable.data for variable in variables]
out_dims = tuple(out_dims_set)
slice_positions = set()
for i, value in slices:
out_key.insert(i, value)
new_position = out_dims.index(self.dims[i])
slice_positions.add(new_position)
if slice_positions:
new_order = [i for i in range(len(out_dims)) if i not in slice_positions]
else:
new_order = None
return out_dims, VectorizedIndexer(tuple(out_key)), new_order
def __getitem__(self: VariableType, key) -> VariableType:
"""Return a new Variable object whose contents are consistent with
getting the provided key from the underlying data.
NB. __getitem__ and __setitem__ implement xarray-style indexing,
where if keys are unlabeled arrays, we index the array orthogonally
with them. If keys are labeled array (such as Variables), they are
broadcasted with our usual scheme and then the array is indexed with
the broadcasted key, like numpy's fancy indexing.
If you really want to do indexing like `x[x > 0]`, manipulate the numpy
array `x.values` directly.
"""
dims, indexer, new_order = self._broadcast_indexes(key)
data = as_indexable(self._data)[indexer]
if new_order:
data = duck_array_ops.moveaxis(data, range(len(new_order)), new_order)
return self._finalize_indexing_result(dims, data)
def _finalize_indexing_result(self: VariableType, dims, data) -> VariableType:
"""Used by IndexVariable to return IndexVariable objects when possible."""
return type(self)(dims, data, self._attrs, self._encoding, fastpath=True)
def _getitem_with_mask(self, key, fill_value=dtypes.NA):
"""Index this Variable with -1 remapped to fill_value."""
# TODO(shoyer): expose this method in public API somewhere (isel?) and
# use it for reindex.
# TODO(shoyer): add a sanity check that all other integers are
# non-negative
# TODO(shoyer): add an optimization, remapping -1 to an adjacent value
# that is actually indexed rather than mapping it to the last value
# along each axis.
if fill_value is dtypes.NA:
fill_value = dtypes.get_fill_value(self.dtype)
dims, indexer, new_order = self._broadcast_indexes(key)
if self.size:
if is_duck_dask_array(self._data):
# dask's indexing is faster this way; also vindex does not
# support negative indices yet:
# https://github.com/dask/dask/pull/2967
actual_indexer = indexing.posify_mask_indexer(indexer)
else:
actual_indexer = indexer
data = as_indexable(self._data)[actual_indexer]
mask = indexing.create_mask(indexer, self.shape, data)
# we need to invert the mask in order to pass data first. This helps
# pint to choose the correct unit
# TODO: revert after https://github.com/hgrecco/pint/issues/1019 is fixed
data = duck_array_ops.where(np.logical_not(mask), data, fill_value)
else:
# array cannot be indexed along dimensions of size 0, so just
# build the mask directly instead.
mask = indexing.create_mask(indexer, self.shape)
data = np.broadcast_to(fill_value, getattr(mask, "shape", ()))
if new_order:
data = duck_array_ops.moveaxis(data, range(len(new_order)), new_order)
return self._finalize_indexing_result(dims, data)
def __setitem__(self, key, value):
"""__setitem__ is overloaded to access the underlying numpy values with
orthogonal indexing.
See __getitem__ for more details.
"""
dims, index_tuple, new_order = self._broadcast_indexes(key)
if not isinstance(value, Variable):
value = as_compatible_data(value)
if value.ndim > len(dims):
raise ValueError(
"shape mismatch: value array of shape %s could not be "
"broadcast to indexing result with %s dimensions"
% (value.shape, len(dims))
)
if value.ndim == 0:
value = Variable((), value)
else:
value = Variable(dims[-value.ndim :], value)
# broadcast to become assignable
value = value.set_dims(dims).data
if new_order:
value = duck_array_ops.asarray(value)
value = value[(len(dims) - value.ndim) * (np.newaxis,) + (Ellipsis,)]
value = duck_array_ops.moveaxis(value, new_order, range(len(new_order)))
indexable = as_indexable(self._data)
indexable[index_tuple] = value
@property
def attrs(self) -> Dict[Hashable, Any]:
"""Dictionary of local attributes on this variable."""
if self._attrs is None:
self._attrs = {}
return self._attrs
@attrs.setter
def attrs(self, value: Mapping[Hashable, Any]) -> None:
self._attrs = dict(value)
@property
def encoding(self):
"""Dictionary of encodings on this variable."""
if self._encoding is None:
self._encoding = {}
return self._encoding
@encoding.setter
def encoding(self, value):
try:
self._encoding = dict(value)
except ValueError:
raise ValueError("encoding must be castable to a dictionary")
def copy(self, deep=True, data=None):
"""Returns a copy of this object.
If `deep=True`, the data array is loaded into memory and copied onto
the new object. Dimensions, attributes and encodings are always copied.
Use `data` to create a new object with the same structure as
original but entirely new data.
Parameters
----------
deep : bool, optional
Whether the data array is loaded into memory and copied onto
the new object. Default is True.
data : array_like, optional
Data to use in the new object. Must have same shape as original.
When `data` is used, `deep` is ignored.
Returns
-------
object : Variable
New object with dimensions, attributes, encodings, and optionally
data copied from original.
Examples
--------
Shallow copy versus deep copy
>>> var = xr.Variable(data=[1, 2, 3], dims="x")
>>> var.copy()
<xarray.Variable (x: 3)>
array([1, 2, 3])
>>> var_0 = var.copy(deep=False)
>>> var_0[0] = 7
>>> var_0
<xarray.Variable (x: 3)>
array([7, 2, 3])
>>> var
<xarray.Variable (x: 3)>
array([7, 2, 3])
Changing the data using the ``data`` argument maintains the
structure of the original object, but with the new data. Original
object is unaffected.
>>> var.copy(data=[0.1, 0.2, 0.3])
<xarray.Variable (x: 3)>
array([0.1, 0.2, 0.3])
>>> var
<xarray.Variable (x: 3)>
array([7, 2, 3])
See Also
--------
pandas.DataFrame.copy
"""
if data is None:
data = self._data
if isinstance(data, indexing.MemoryCachedArray):
# don't share caching between copies
data = indexing.MemoryCachedArray(data.array)
if deep:
data = copy.deepcopy(data)
else:
data = as_compatible_data(data)
if self.shape != data.shape:
raise ValueError(
"Data shape {} must match shape of object {}".format(
data.shape, self.shape
)
)
# note:
# dims is already an immutable tuple
# attributes and encoding will be copied when the new Array is created
return self._replace(data=data)
def _replace(
self, dims=_default, data=_default, attrs=_default, encoding=_default
) -> "Variable":
if dims is _default:
dims = copy.copy(self._dims)
if data is _default:
data = copy.copy(self.data)
if attrs is _default:
attrs = copy.copy(self._attrs)
if encoding is _default:
encoding = copy.copy(self._encoding)
return type(self)(dims, data, attrs, encoding, fastpath=True)
def __copy__(self):
return self.copy(deep=False)
def __deepcopy__(self, memo=None):
# memo does nothing but is required for compatibility with
# copy.deepcopy
return self.copy(deep=True)
# mutable objects should not be hashable
# https://github.com/python/mypy/issues/4266
__hash__ = None # type: ignore
@property
def chunks(self):
"""Block dimensions for this array's data or None if it's not a dask
array.
"""
return getattr(self._data, "chunks", None)
_array_counter = itertools.count()
def chunk(self, chunks={}, name=None, lock=False):
"""Coerce this array's data into a dask arrays with the given chunks.
If this variable is a non-dask array, it will be converted to dask
array. If it's a dask array, it will be rechunked to the given chunk
sizes.
If neither chunks is not provided for one or more dimensions, chunk
sizes along that dimension will not be updated; non-dask arrays will be
converted into dask arrays with a single block.
Parameters
----------
chunks : int, tuple or dict, optional
Chunk sizes along each dimension, e.g., ``5``, ``(5, 5)`` or
``{'x': 5, 'y': 5}``.
name : str, optional
Used to generate the name for this array in the internal dask
graph. Does not need not be unique.
lock : optional
Passed on to :py:func:`dask.array.from_array`, if the array is not
already as dask array.
Returns
-------
chunked : xarray.Variable
"""
import dask
import dask.array as da
if chunks is None:
warnings.warn(
"None value for 'chunks' is deprecated. "
"It will raise an error in the future. Use instead '{}'",
category=FutureWarning,
)
chunks = {}
if utils.is_dict_like(chunks):
chunks = {self.get_axis_num(dim): chunk for dim, chunk in chunks.items()}
data = self._data
if is_duck_dask_array(data):
data = data.rechunk(chunks)
else:
if isinstance(data, indexing.ExplicitlyIndexed):
# Unambiguously handle array storage backends (like NetCDF4 and h5py)
# that can't handle general array indexing. For example, in netCDF4 you
# can do "outer" indexing along two dimensions independent, which works
# differently from how NumPy handles it.
# da.from_array works by using lazy indexing with a tuple of slices.
# Using OuterIndexer is a pragmatic choice: dask does not yet handle
# different indexing types in an explicit way:
# https://github.com/dask/dask/issues/2883
data = indexing.ImplicitToExplicitIndexingAdapter(
data, indexing.OuterIndexer
)
if LooseVersion(dask.__version__) < "2.0.0":
kwargs = {}
else:
# All of our lazily loaded backend array classes should use NumPy
# array operations.
kwargs = {"meta": np.ndarray}
else:
kwargs = {}
if utils.is_dict_like(chunks):
chunks = tuple(chunks.get(n, s) for n, s in enumerate(self.shape))
data = da.from_array(data, chunks, name=name, lock=lock, **kwargs)
return type(self)(self.dims, data, self._attrs, self._encoding, fastpath=True)
def _as_sparse(self, sparse_format=_default, fill_value=dtypes.NA):
"""
use sparse-array as backend.
"""
import sparse
# TODO: what to do if dask-backended?
if fill_value is dtypes.NA:
dtype, fill_value = dtypes.maybe_promote(self.dtype)
else:
dtype = dtypes.result_type(self.dtype, fill_value)
if sparse_format is _default:
sparse_format = "coo"
try:
as_sparse = getattr(sparse, f"as_{sparse_format.lower()}")
except AttributeError:
raise ValueError(f"{sparse_format} is not a valid sparse format")
data = as_sparse(self.data.astype(dtype), fill_value=fill_value)
return self._replace(data=data)
def _to_dense(self):
"""
Change backend from sparse to np.array
"""
if hasattr(self._data, "todense"):
return self._replace(data=self._data.todense())
return self.copy(deep=False)
def isel(
self: VariableType,
indexers: Mapping[Hashable, Any] = None,
missing_dims: str = "raise",
**indexers_kwargs: Any,
) -> VariableType:
"""Return a new array indexed along the specified dimension(s).
Parameters
----------
**indexers : {dim: indexer, ...}
Keyword arguments with names matching dimensions and values given
by integers, slice objects or arrays.
missing_dims : {"raise", "warn", "ignore"}, default: "raise"
What to do if dimensions that should be selected from are not present in the
DataArray:
- "raise": raise an exception
- "warning": raise a warning, and ignore the missing dimensions
- "ignore": ignore the missing dimensions
Returns
-------
obj : Array object
A new Array with the selected data and dimensions. In general,
the new variable's data will be a view of this variable's data,
unless numpy fancy indexing was triggered by using an array
indexer, in which case the data will be a copy.
"""
indexers = either_dict_or_kwargs(indexers, indexers_kwargs, "isel")
indexers = drop_dims_from_indexers(indexers, self.dims, missing_dims)
key = tuple(indexers.get(dim, slice(None)) for dim in self.dims)
return self[key]
def squeeze(self, dim=None):
"""Return a new object with squeezed data.
Parameters
----------
dim : None or str or tuple of str, optional
Selects a subset of the length one dimensions. If a dimension is
selected with length greater than one, an error is raised. If
None, all length one dimensions are squeezed.
Returns
-------
squeezed : same type as caller
This object, but with with all or a subset of the dimensions of
length 1 removed.
See Also
--------
numpy.squeeze
"""
dims = common.get_squeeze_dims(self, dim)
return self.isel({d: 0 for d in dims})
def _shift_one_dim(self, dim, count, fill_value=dtypes.NA):
axis = self.get_axis_num(dim)
if count > 0:
keep = slice(None, -count)
elif count < 0:
keep = slice(-count, None)
else:
keep = slice(None)
trimmed_data = self[(slice(None),) * axis + (keep,)].data
if fill_value is dtypes.NA:
dtype, fill_value = dtypes.maybe_promote(self.dtype)
else:
dtype = self.dtype
width = min(abs(count), self.shape[axis])
dim_pad = (width, 0) if count >= 0 else (0, width)
pads = [(0, 0) if d != dim else dim_pad for d in self.dims]
data = duck_array_ops.pad(
trimmed_data.astype(dtype),
pads,
mode="constant",
constant_values=fill_value,
)
if is_duck_dask_array(data):
# chunked data should come out with the same chunks; this makes
# it feasible to combine shifted and unshifted data
# TODO: remove this once dask.array automatically aligns chunks
data = data.rechunk(self.data.chunks)
return type(self)(self.dims, data, self._attrs, fastpath=True)
def shift(self, shifts=None, fill_value=dtypes.NA, **shifts_kwargs):
"""
Return a new Variable with shifted data.
Parameters
----------
shifts : mapping of the form {dim: offset}
Integer offset to shift along each of the given dimensions.
Positive offsets shift to the right; negative offsets shift to the
left.
fill_value: scalar, optional
Value to use for newly missing values
**shifts_kwargs
The keyword arguments form of ``shifts``.
One of shifts or shifts_kwargs must be provided.
Returns
-------
shifted : Variable
Variable with the same dimensions and attributes but shifted data.
"""
shifts = either_dict_or_kwargs(shifts, shifts_kwargs, "shift")
result = self
for dim, count in shifts.items():
result = result._shift_one_dim(dim, count, fill_value=fill_value)
return result
def _pad_options_dim_to_index(
self,
pad_option: Mapping[Hashable, Union[int, Tuple[int, int]]],
fill_with_shape=False,
):
if fill_with_shape:
return [
(n, n) if d not in pad_option else pad_option[d]
for d, n in zip(self.dims, self.data.shape)
]
return [(0, 0) if d not in pad_option else pad_option[d] for d in self.dims]
def pad(
self,
pad_width: Mapping[Hashable, Union[int, Tuple[int, int]]] = None,
mode: str = "constant",
stat_length: Union[
int, Tuple[int, int], Mapping[Hashable, Tuple[int, int]]
] = None,
constant_values: Union[
int, Tuple[int, int], Mapping[Hashable, Tuple[int, int]]
] = None,
end_values: Union[
int, Tuple[int, int], Mapping[Hashable, Tuple[int, int]]
] = None,
reflect_type: str = None,
**pad_width_kwargs: Any,
):
"""
Return a new Variable with padded data.
Parameters
----------
pad_width : mapping of hashable to tuple of int
Mapping with the form of {dim: (pad_before, pad_after)}
describing the number of values padded along each dimension.
{dim: pad} is a shortcut for pad_before = pad_after = pad
mode : str, default: "constant"
See numpy / Dask docs
stat_length : int, tuple or mapping of hashable to tuple
Used in 'maximum', 'mean', 'median', and 'minimum'. Number of
values at edge of each axis used to calculate the statistic value.
constant_values : scalar, tuple or mapping of hashable to tuple
Used in 'constant'. The values to set the padded values for each
axis.
end_values : scalar, tuple or mapping of hashable to tuple
Used in 'linear_ramp'. The values used for the ending value of the
linear_ramp and that will form the edge of the padded array.
reflect_type : {"even", "odd"}, optional
Used in "reflect", and "symmetric". The "even" style is the
default with an unaltered reflection around the edge value. For
the "odd" style, the extended part of the array is created by
subtracting the reflected values from two times the edge value.
**pad_width_kwargs
One of pad_width or pad_width_kwargs must be provided.
Returns
-------
padded : Variable
Variable with the same dimensions and attributes but padded data.
"""
pad_width = either_dict_or_kwargs(pad_width, pad_width_kwargs, "pad")
# change default behaviour of pad with mode constant
if mode == "constant" and (
constant_values is None or constant_values is dtypes.NA
):
dtype, constant_values = dtypes.maybe_promote(self.dtype)
else:
dtype = self.dtype
# create pad_options_kwargs, numpy requires only relevant kwargs to be nonempty
if isinstance(stat_length, dict):
stat_length = self._pad_options_dim_to_index(
stat_length, fill_with_shape=True
)
if isinstance(constant_values, dict):
constant_values = self._pad_options_dim_to_index(constant_values)
if isinstance(end_values, dict):
end_values = self._pad_options_dim_to_index(end_values)
# workaround for bug in Dask's default value of stat_length https://github.com/dask/dask/issues/5303
if stat_length is None and mode in ["maximum", "mean", "median", "minimum"]:
stat_length = [(n, n) for n in self.data.shape] # type: ignore
# change integer values to a tuple of two of those values and change pad_width to index
for k, v in pad_width.items():
if isinstance(v, numbers.Number):
pad_width[k] = (v, v)
pad_width_by_index = self._pad_options_dim_to_index(pad_width)
# create pad_options_kwargs, numpy/dask requires only relevant kwargs to be nonempty
pad_option_kwargs = {}
if stat_length is not None:
pad_option_kwargs["stat_length"] = stat_length
if constant_values is not None:
pad_option_kwargs["constant_values"] = constant_values
if end_values is not None:
pad_option_kwargs["end_values"] = end_values
if reflect_type is not None:
pad_option_kwargs["reflect_type"] = reflect_type # type: ignore
array = duck_array_ops.pad(
self.data.astype(dtype, copy=False),
pad_width_by_index,
mode=mode,
**pad_option_kwargs,
)
return type(self)(self.dims, array)
def _roll_one_dim(self, dim, count):
axis = self.get_axis_num(dim)
count %= self.shape[axis]
if count != 0:
indices = [slice(-count, None), slice(None, -count)]
else:
indices = [slice(None)]
arrays = [self[(slice(None),) * axis + (idx,)].data for idx in indices]
data = duck_array_ops.concatenate(arrays, axis)
if is_duck_dask_array(data):
# chunked data should come out with the same chunks; this makes
# it feasible to combine shifted and unshifted data
# TODO: remove this once dask.array automatically aligns chunks
data = data.rechunk(self.data.chunks)
return type(self)(self.dims, data, self._attrs, fastpath=True)
def roll(self, shifts=None, **shifts_kwargs):
"""
Return a new Variable with rolld data.
Parameters
----------
shifts : mapping of hashable to int
Integer offset to roll along each of the given dimensions.
Positive offsets roll to the right; negative offsets roll to the
left.
**shifts_kwargs
The keyword arguments form of ``shifts``.
One of shifts or shifts_kwargs must be provided.
Returns
-------
shifted : Variable
Variable with the same dimensions and attributes but rolled data.
"""
shifts = either_dict_or_kwargs(shifts, shifts_kwargs, "roll")
result = self
for dim, count in shifts.items():
result = result._roll_one_dim(dim, count)
return result
def transpose(self, *dims) -> "Variable":
"""Return a new Variable object with transposed dimensions.
Parameters
----------
*dims : str, optional
By default, reverse the dimensions. Otherwise, reorder the
dimensions to this order.
Returns
-------
transposed : Variable
The returned object has transposed data and dimensions with the
same attributes as the original.
Notes
-----
This operation returns a view of this variable's data. It is
lazy for dask-backed Variables but not for numpy-backed Variables.
See Also
--------
numpy.transpose
"""
if len(dims) == 0:
dims = self.dims[::-1]
dims = tuple(infix_dims(dims, self.dims))
axes = self.get_axis_num(dims)
if len(dims) < 2 or dims == self.dims:
# no need to transpose if only one dimension
# or dims are in same order
return self.copy(deep=False)
data = as_indexable(self._data).transpose(axes)
return type(self)(dims, data, self._attrs, self._encoding, fastpath=True)
@property
def T(self) -> "Variable":
return self.transpose()
def set_dims(self, dims, shape=None):
"""Return a new variable with given set of dimensions.
This method might be used to attach new dimension(s) to variable.
When possible, this operation does not copy this variable's data.
Parameters
----------
dims : str or sequence of str or dict
Dimensions to include on the new variable. If a dict, values are
used to provide the sizes of new dimensions; otherwise, new
dimensions are inserted with length 1.
Returns
-------
Variable
"""
if isinstance(dims, str):
dims = [dims]
if shape is None and utils.is_dict_like(dims):
shape = dims.values()
missing_dims = set(self.dims) - set(dims)
if missing_dims:
raise ValueError(
"new dimensions %r must be a superset of "
"existing dimensions %r" % (dims, self.dims)
)
self_dims = set(self.dims)
expanded_dims = tuple(d for d in dims if d not in self_dims) + self.dims
if self.dims == expanded_dims:
# don't use broadcast_to unless necessary so the result remains
# writeable if possible
expanded_data = self.data
elif shape is not None:
dims_map = dict(zip(dims, shape))
tmp_shape = tuple(dims_map[d] for d in expanded_dims)
expanded_data = duck_array_ops.broadcast_to(self.data, tmp_shape)
else:
expanded_data = self.data[(None,) * (len(expanded_dims) - self.ndim)]
expanded_var = Variable(
expanded_dims, expanded_data, self._attrs, self._encoding, fastpath=True
)
return expanded_var.transpose(*dims)
def _stack_once(self, dims, new_dim):
if not set(dims) <= set(self.dims):
raise ValueError("invalid existing dimensions: %s" % dims)
if new_dim in self.dims:
raise ValueError(
"cannot create a new dimension with the same "
"name as an existing dimension"
)
if len(dims) == 0:
# don't stack
return self.copy(deep=False)
other_dims = [d for d in self.dims if d not in dims]
dim_order = other_dims + list(dims)
reordered = self.transpose(*dim_order)
new_shape = reordered.shape[: len(other_dims)] + (-1,)
new_data = reordered.data.reshape(new_shape)
new_dims = reordered.dims[: len(other_dims)] + (new_dim,)
return Variable(new_dims, new_data, self._attrs, self._encoding, fastpath=True)
def stack(self, dimensions=None, **dimensions_kwargs):
"""
Stack any number of existing dimensions into a single new dimension.
New dimensions will be added at the end, and the order of the data
along each new dimension will be in contiguous (C) order.
Parameters
----------
dimensions : mapping of hashable to tuple of hashable
Mapping of form new_name=(dim1, dim2, ...) describing the
names of new dimensions, and the existing dimensions that
they replace.
**dimensions_kwargs
The keyword arguments form of ``dimensions``.
One of dimensions or dimensions_kwargs must be provided.
Returns
-------
stacked : Variable
Variable with the same attributes but stacked data.
See also
--------
Variable.unstack
"""
dimensions = either_dict_or_kwargs(dimensions, dimensions_kwargs, "stack")
result = self
for new_dim, dims in dimensions.items():
result = result._stack_once(dims, new_dim)
return result
def _unstack_once(self, dims, old_dim):
new_dim_names = tuple(dims.keys())
new_dim_sizes = tuple(dims.values())
if old_dim not in self.dims:
raise ValueError("invalid existing dimension: %s" % old_dim)
if set(new_dim_names).intersection(self.dims):
raise ValueError(
"cannot create a new dimension with the same "
"name as an existing dimension"
)
if np.prod(new_dim_sizes) != self.sizes[old_dim]:
raise ValueError(
"the product of the new dimension sizes must "
"equal the size of the old dimension"
)
other_dims = [d for d in self.dims if d != old_dim]
dim_order = other_dims + [old_dim]
reordered = self.transpose(*dim_order)
new_shape = reordered.shape[: len(other_dims)] + new_dim_sizes
new_data = reordered.data.reshape(new_shape)
new_dims = reordered.dims[: len(other_dims)] + new_dim_names
return Variable(new_dims, new_data, self._attrs, self._encoding, fastpath=True)
def unstack(self, dimensions=None, **dimensions_kwargs):
"""
Unstack an existing dimension into multiple new dimensions.
New dimensions will be added at the end, and the order of the data
along each new dimension will be in contiguous (C) order.
Parameters
----------
dimensions : mapping of hashable to mapping of hashable to int
Mapping of the form old_dim={dim1: size1, ...} describing the
names of existing dimensions, and the new dimensions and sizes
that they map to.
**dimensions_kwargs
The keyword arguments form of ``dimensions``.
One of dimensions or dimensions_kwargs must be provided.
Returns
-------
unstacked : Variable
Variable with the same attributes but unstacked data.
See also
--------
Variable.stack
"""
dimensions = either_dict_or_kwargs(dimensions, dimensions_kwargs, "unstack")
result = self
for old_dim, dims in dimensions.items():
result = result._unstack_once(dims, old_dim)
return result
def fillna(self, value):
return ops.fillna(self, value)
def where(self, cond, other=dtypes.NA):
return ops.where_method(self, cond, other)
def reduce(
self,
func,
dim=None,
axis=None,
keep_attrs=None,
keepdims=False,
**kwargs,
):
"""Reduce this array by applying `func` along some dimension(s).
Parameters
----------
func : callable
Function which can be called in the form
`func(x, axis=axis, **kwargs)` to return the result of reducing an
np.ndarray over an integer valued axis.
dim : str or sequence of str, optional
Dimension(s) over which to apply `func`.
axis : int or sequence of int, optional
Axis(es) over which to apply `func`. Only one of the 'dim'
and 'axis' arguments can be supplied. If neither are supplied, then
the reduction is calculated over the flattened array (by calling
`func(x)` without an axis argument).
keep_attrs : bool, optional
If True, the variable's attributes (`attrs`) will be copied from
the original object to the new one. If False (default), the new
object will be returned without attributes.
keepdims : bool, default: False
If True, the dimensions which are reduced are left in the result
as dimensions of size one
**kwargs : dict
Additional keyword arguments passed on to `func`.
Returns
-------
reduced : Array
Array with summarized data and the indicated dimension(s)
removed.
"""
if dim == ...:
dim = None
if dim is not None and axis is not None:
raise ValueError("cannot supply both 'axis' and 'dim' arguments")
if dim is not None:
axis = self.get_axis_num(dim)
with warnings.catch_warnings():
warnings.filterwarnings(
"ignore", r"Mean of empty slice", category=RuntimeWarning
)
if axis is not None:
data = func(self.data, axis=axis, **kwargs)
else:
data = func(self.data, **kwargs)
if getattr(data, "shape", ()) == self.shape:
dims = self.dims
else:
removed_axes = (
range(self.ndim) if axis is None else np.atleast_1d(axis) % self.ndim
)
if keepdims:
# Insert np.newaxis for removed dims
slices = tuple(
np.newaxis if i in removed_axes else slice(None, None)
for i in range(self.ndim)
)
if getattr(data, "shape", None) is None:
# Reduce has produced a scalar value, not an array-like
data = np.asanyarray(data)[slices]
else:
data = data[slices]
dims = self.dims
else:
dims = [
adim for n, adim in enumerate(self.dims) if n not in removed_axes
]
if keep_attrs is None:
keep_attrs = _get_keep_attrs(default=False)
attrs = self._attrs if keep_attrs else None
return Variable(dims, data, attrs=attrs)
@classmethod
def concat(cls, variables, dim="concat_dim", positions=None, shortcut=False):
"""Concatenate variables along a new or existing dimension.
Parameters
----------
variables : iterable of Variable
Arrays to stack together. Each variable is expected to have
matching dimensions and shape except for along the stacked
dimension.
dim : str or DataArray, optional
Name of the dimension to stack along. This can either be a new
dimension name, in which case it is added along axis=0, or an
existing dimension name, in which case the location of the
dimension is unchanged. Where to insert the new dimension is
determined by the first variable.
positions : None or list of array-like, optional
List of integer arrays which specifies the integer positions to
which to assign each dataset along the concatenated dimension.
If not supplied, objects are concatenated in the provided order.
shortcut : bool, optional
This option is used internally to speed-up groupby operations.
If `shortcut` is True, some checks of internal consistency between
arrays to concatenate are skipped.
Returns
-------
stacked : Variable
Concatenated Variable formed by stacking all the supplied variables
along the given dimension.
"""
if not isinstance(dim, str):
(dim,) = dim.dims
# can't do this lazily: we need to loop through variables at least
# twice
variables = list(variables)
first_var = variables[0]
arrays = [v.data for v in variables]
if dim in first_var.dims:
axis = first_var.get_axis_num(dim)
dims = first_var.dims
data = duck_array_ops.concatenate(arrays, axis=axis)
if positions is not None:
# TODO: deprecate this option -- we don't need it for groupby
# any more.
indices = nputils.inverse_permutation(np.concatenate(positions))
data = duck_array_ops.take(data, indices, axis=axis)
else:
axis = 0
dims = (dim,) + first_var.dims
data = duck_array_ops.stack(arrays, axis=axis)
attrs = dict(first_var.attrs)
encoding = dict(first_var.encoding)
if not shortcut:
for var in variables:
if var.dims != first_var.dims:
raise ValueError(
f"Variable has dimensions {list(var.dims)} but first Variable has dimensions {list(first_var.dims)}"
)
return cls(dims, data, attrs, encoding)
def equals(self, other, equiv=duck_array_ops.array_equiv):
"""True if two Variables have the same dimensions and values;
otherwise False.
Variables can still be equal (like pandas objects) if they have NaN
values in the same locations.
This method is necessary because `v1 == v2` for Variables
does element-wise comparisons (like numpy.ndarrays).
"""
other = getattr(other, "variable", other)
try:
return self.dims == other.dims and (
self._data is other._data or equiv(self.data, other.data)
)
except (TypeError, AttributeError):
return False
def broadcast_equals(self, other, equiv=duck_array_ops.array_equiv):
"""True if two Variables have the values after being broadcast against
each other; otherwise False.
Variables can still be equal (like pandas objects) if they have NaN
values in the same locations.
"""
try:
self, other = broadcast_variables(self, other)
except (ValueError, AttributeError):
return False
return self.equals(other, equiv=equiv)
def identical(self, other, equiv=duck_array_ops.array_equiv):
"""Like equals, but also checks attributes."""
try:
return utils.dict_equiv(self.attrs, other.attrs) and self.equals(
other, equiv=equiv
)
except (TypeError, AttributeError):
return False
def no_conflicts(self, other, equiv=duck_array_ops.array_notnull_equiv):
"""True if the intersection of two Variable's non-null data is
equal; otherwise false.
Variables can thus still be equal if there are locations where either,
or both, contain NaN values.
"""
return self.broadcast_equals(other, equiv=equiv)
def quantile(
self, q, dim=None, interpolation="linear", keep_attrs=None, skipna=True
):
"""Compute the qth quantile of the data along the specified dimension.
Returns the qth quantiles(s) of the array elements.
Parameters
----------
q : float or sequence of float
Quantile to compute, which must be between 0 and 1
inclusive.
dim : str or sequence of str, optional
Dimension(s) over which to apply quantile.
interpolation : {"linear", "lower", "higher", "midpoint", "nearest"}, default: "linear"
This optional parameter specifies the interpolation method to
use when the desired quantile lies between two data points
``i < j``:
* linear: ``i + (j - i) * fraction``, where ``fraction`` is
the fractional part of the index surrounded by ``i`` and
``j``.
* lower: ``i``.
* higher: ``j``.
* nearest: ``i`` or ``j``, whichever is nearest.
* midpoint: ``(i + j) / 2``.
keep_attrs : bool, optional
If True, the variable's attributes (`attrs`) will be copied from
the original object to the new one. If False (default), the new
object will be returned without attributes.
Returns
-------
quantiles : Variable
If `q` is a single quantile, then the result
is a scalar. If multiple percentiles are given, first axis of
the result corresponds to the quantile and a quantile dimension
is added to the return array. The other dimensions are the
dimensions that remain after the reduction of the array.
See Also
--------
numpy.nanquantile, pandas.Series.quantile, Dataset.quantile,
DataArray.quantile
"""
from .computation import apply_ufunc
_quantile_func = np.nanquantile if skipna else np.quantile
if keep_attrs is None:
keep_attrs = _get_keep_attrs(default=False)
scalar = utils.is_scalar(q)
q = np.atleast_1d(np.asarray(q, dtype=np.float64))
if dim is None:
dim = self.dims
if utils.is_scalar(dim):
dim = [dim]
def _wrapper(npa, **kwargs):
# move quantile axis to end. required for apply_ufunc
return np.moveaxis(_quantile_func(npa, **kwargs), 0, -1)
axis = np.arange(-1, -1 * len(dim) - 1, -1)
result = apply_ufunc(
_wrapper,
self,
input_core_dims=[dim],
exclude_dims=set(dim),
output_core_dims=[["quantile"]],
output_dtypes=[np.float64],
dask_gufunc_kwargs=dict(output_sizes={"quantile": len(q)}),
dask="parallelized",
kwargs={"q": q, "axis": axis, "interpolation": interpolation},
)
# for backward compatibility
result = result.transpose("quantile", ...)
if scalar:
result = result.squeeze("quantile")
if keep_attrs:
result.attrs = self._attrs
return result
def rank(self, dim, pct=False):
"""Ranks the data.
Equal values are assigned a rank that is the average of the ranks that
would have been otherwise assigned to all of the values within that
set. Ranks begin at 1, not 0. If `pct`, computes percentage ranks.
NaNs in the input array are returned as NaNs.
The `bottleneck` library is required.
Parameters
----------
dim : str
Dimension over which to compute rank.
pct : bool, optional
If True, compute percentage ranks, otherwise compute integer ranks.
Returns
-------
ranked : Variable
See Also
--------
Dataset.rank, DataArray.rank
"""
import bottleneck as bn
data = self.data
if is_duck_dask_array(data):
raise TypeError(
"rank does not work for arrays stored as dask "
"arrays. Load the data via .compute() or .load() "
"prior to calling this method."
)
elif not isinstance(data, np.ndarray):
raise TypeError(
"rank is not implemented for {} objects.".format(type(data))
)
axis = self.get_axis_num(dim)
func = bn.nanrankdata if self.dtype.kind == "f" else bn.rankdata
ranked = func(data, axis=axis)
if pct:
count = np.sum(~np.isnan(data), axis=axis, keepdims=True)
ranked /= count
return Variable(self.dims, ranked)
def rolling_window(
self, dim, window, window_dim, center=False, fill_value=dtypes.NA
):
"""
Make a rolling_window along dim and add a new_dim to the last place.
Parameters
----------
dim : str
Dimension over which to compute rolling_window.
For nd-rolling, should be list of dimensions.
window : int
Window size of the rolling
For nd-rolling, should be list of integers.
window_dim : str
New name of the window dimension.
For nd-rolling, should be list of integers.
center : bool, default: False
If True, pad fill_value for both ends. Otherwise, pad in the head
of the axis.
fill_value
value to be filled.
Returns
-------
Variable that is a view of the original array with a added dimension of
size w.
The return dim: self.dims + (window_dim, )
The return shape: self.shape + (window, )
Examples
--------
>>> v = Variable(("a", "b"), np.arange(8).reshape((2, 4)))
>>> v.rolling_window("b", 3, "window_dim")
<xarray.Variable (a: 2, b: 4, window_dim: 3)>
array([[[nan, nan, 0.],
[nan, 0., 1.],
[ 0., 1., 2.],
[ 1., 2., 3.]],
<BLANKLINE>
[[nan, nan, 4.],
[nan, 4., 5.],
[ 4., 5., 6.],
[ 5., 6., 7.]]])
>>> v.rolling_window("b", 3, "window_dim", center=True)
<xarray.Variable (a: 2, b: 4, window_dim: 3)>
array([[[nan, 0., 1.],
[ 0., 1., 2.],
[ 1., 2., 3.],
[ 2., 3., nan]],
<BLANKLINE>
[[nan, 4., 5.],
[ 4., 5., 6.],
[ 5., 6., 7.],
[ 6., 7., nan]]])
"""
if fill_value is dtypes.NA: # np.nan is passed
dtype, fill_value = dtypes.maybe_promote(self.dtype)
array = self.astype(dtype, copy=False).data
else:
dtype = self.dtype
array = self.data
if isinstance(dim, list):
assert len(dim) == len(window)
assert len(dim) == len(window_dim)
assert len(dim) == len(center)
else:
dim = [dim]
window = [window]
window_dim = [window_dim]
center = [center]
axis = [self.get_axis_num(d) for d in dim]
new_dims = self.dims + tuple(window_dim)
return Variable(
new_dims,
duck_array_ops.rolling_window(
array, axis=axis, window=window, center=center, fill_value=fill_value
),
)
def coarsen(
self, windows, func, boundary="exact", side="left", keep_attrs=None, **kwargs
):
"""
Apply reduction function.
"""
windows = {k: v for k, v in windows.items() if k in self.dims}
if not windows:
return self.copy()
if keep_attrs is None:
keep_attrs = _get_keep_attrs(default=False)
if keep_attrs:
_attrs = self.attrs
else:
_attrs = None
reshaped, axes = self._coarsen_reshape(windows, boundary, side)
if isinstance(func, str):
name = func
func = getattr(duck_array_ops, name, None)
if func is None:
raise NameError(f"{name} is not a valid method.")
return self._replace(data=func(reshaped, axis=axes, **kwargs), attrs=_attrs)
def _coarsen_reshape(self, windows, boundary, side):
"""
Construct a reshaped-array for coarsen
"""
if not utils.is_dict_like(boundary):
boundary = {d: boundary for d in windows.keys()}
if not utils.is_dict_like(side):
side = {d: side for d in windows.keys()}
# remove unrelated dimensions
boundary = {k: v for k, v in boundary.items() if k in windows}
side = {k: v for k, v in side.items() if k in windows}
for d, window in windows.items():
if window <= 0:
raise ValueError(f"window must be > 0. Given {window}")
variable = self
for d, window in windows.items():
# trim or pad the object
size = variable.shape[self._get_axis_num(d)]
n = int(size / window)
if boundary[d] == "exact":
if n * window != size:
raise ValueError(
"Could not coarsen a dimension of size {} with "
"window {}".format(size, window)
)
elif boundary[d] == "trim":
if side[d] == "left":
variable = variable.isel({d: slice(0, window * n)})
else:
excess = size - window * n
variable = variable.isel({d: slice(excess, None)})
elif boundary[d] == "pad": # pad
pad = window * n - size
if pad < 0:
pad += window
if side[d] == "left":
pad_width = {d: (0, pad)}
else:
pad_width = {d: (pad, 0)}
variable = variable.pad(pad_width, mode="constant")
else:
raise TypeError(
"{} is invalid for boundary. Valid option is 'exact', "
"'trim' and 'pad'".format(boundary[d])
)
shape = []
axes = []
axis_count = 0
for i, d in enumerate(variable.dims):
if d in windows:
size = variable.shape[i]
shape.append(int(size / windows[d]))
shape.append(windows[d])
axis_count += 1
axes.append(i + axis_count)
else:
shape.append(variable.shape[i])
return variable.data.reshape(shape), tuple(axes)
def isnull(self, keep_attrs: bool = None):
"""Test each value in the array for whether it is a missing value.
Returns
-------
isnull : Variable
Same type and shape as object, but the dtype of the data is bool.
See Also
--------
pandas.isnull
Examples
--------
>>> var = xr.Variable("x", [1, np.nan, 3])
>>> var
<xarray.Variable (x: 3)>
array([ 1., nan, 3.])
>>> var.isnull()
<xarray.Variable (x: 3)>
array([False, True, False])
"""
from .computation import apply_ufunc
if keep_attrs is None:
keep_attrs = _get_keep_attrs(default=False)
return apply_ufunc(
duck_array_ops.isnull,
self,
dask="allowed",
keep_attrs=keep_attrs,
)
def notnull(self, keep_attrs: bool = None):
"""Test each value in the array for whether it is not a missing value.
Returns
-------
notnull : Variable
Same type and shape as object, but the dtype of the data is bool.
See Also
--------
pandas.notnull
Examples
--------
>>> var = xr.Variable("x", [1, np.nan, 3])
>>> var
<xarray.Variable (x: 3)>
array([ 1., nan, 3.])
>>> var.notnull()
<xarray.Variable (x: 3)>
array([ True, False, True])
"""
from .computation import apply_ufunc
if keep_attrs is None:
keep_attrs = _get_keep_attrs(default=False)
return apply_ufunc(
duck_array_ops.notnull,
self,
dask="allowed",
keep_attrs=keep_attrs,
)
@property
def real(self):
return type(self)(self.dims, self.data.real, self._attrs)
@property
def imag(self):
return type(self)(self.dims, self.data.imag, self._attrs)
def __array_wrap__(self, obj, context=None):
return Variable(self.dims, obj)
@staticmethod
def _unary_op(f):
@functools.wraps(f)
def func(self, *args, **kwargs):
keep_attrs = kwargs.pop("keep_attrs", None)
if keep_attrs is None:
keep_attrs = _get_keep_attrs(default=True)
with np.errstate(all="ignore"):
result = self.__array_wrap__(f(self.data, *args, **kwargs))
if keep_attrs:
result.attrs = self.attrs
return result
return func
@staticmethod
def _binary_op(f, reflexive=False, **ignored_kwargs):
@functools.wraps(f)
def func(self, other):
if isinstance(other, (xr.DataArray, xr.Dataset)):
return NotImplemented
self_data, other_data, dims = _broadcast_compat_data(self, other)
keep_attrs = _get_keep_attrs(default=False)
attrs = self._attrs if keep_attrs else None
with np.errstate(all="ignore"):
new_data = (
f(self_data, other_data)
if not reflexive
else f(other_data, self_data)
)
result = Variable(dims, new_data, attrs=attrs)
return result
return func
@staticmethod
def _inplace_binary_op(f):
@functools.wraps(f)
def func(self, other):
if isinstance(other, xr.Dataset):
raise TypeError("cannot add a Dataset to a Variable in-place")
self_data, other_data, dims = _broadcast_compat_data(self, other)
if dims != self.dims:
raise ValueError("dimensions cannot change for in-place operations")
with np.errstate(all="ignore"):
self.values = f(self_data, other_data)
return self
return func
def _to_numeric(self, offset=None, datetime_unit=None, dtype=float):
"""A (private) method to convert datetime array to numeric dtype
See duck_array_ops.datetime_to_numeric
"""
numeric_array = duck_array_ops.datetime_to_numeric(
self.data, offset, datetime_unit, dtype
)
return type(self)(self.dims, numeric_array, self._attrs)
def _unravel_argminmax(
self,
argminmax: str,
dim: Union[Hashable, Sequence[Hashable], None],
axis: Union[int, None],
keep_attrs: Optional[bool],
skipna: Optional[bool],
) -> Union["Variable", Dict[Hashable, "Variable"]]:
"""Apply argmin or argmax over one or more dimensions, returning the result as a
dict of DataArray that can be passed directly to isel.
"""
if dim is None and axis is None:
warnings.warn(
"Behaviour of argmin/argmax with neither dim nor axis argument will "
"change to return a dict of indices of each dimension. To get a "
"single, flat index, please use np.argmin(da.data) or "
"np.argmax(da.data) instead of da.argmin() or da.argmax().",
DeprecationWarning,
stacklevel=3,
)
argminmax_func = getattr(duck_array_ops, argminmax)
if dim is ...:
# In future, should do this also when (dim is None and axis is None)
dim = self.dims
if (
dim is None
or axis is not None
or not isinstance(dim, Sequence)
or isinstance(dim, str)
):
# Return int index if single dimension is passed, and is not part of a
# sequence
return self.reduce(
argminmax_func, dim=dim, axis=axis, keep_attrs=keep_attrs, skipna=skipna
)
# Get a name for the new dimension that does not conflict with any existing
# dimension
newdimname = "_unravel_argminmax_dim_0"
count = 1
while newdimname in self.dims:
newdimname = f"_unravel_argminmax_dim_{count}"
count += 1
stacked = self.stack({newdimname: dim})
result_dims = stacked.dims[:-1]
reduce_shape = tuple(self.sizes[d] for d in dim)
result_flat_indices = stacked.reduce(argminmax_func, axis=-1, skipna=skipna)
result_unravelled_indices = duck_array_ops.unravel_index(
result_flat_indices.data, reduce_shape
)
result = {
d: Variable(dims=result_dims, data=i)
for d, i in zip(dim, result_unravelled_indices)
}
if keep_attrs is None:
keep_attrs = _get_keep_attrs(default=False)
if keep_attrs:
for v in result.values():
v.attrs = self.attrs
return result
def argmin(
self,
dim: Union[Hashable, Sequence[Hashable]] = None,
axis: int = None,
keep_attrs: bool = None,
skipna: bool = None,
) -> Union["Variable", Dict[Hashable, "Variable"]]:
"""Index or indices of the minimum of the Variable over one or more dimensions.
If a sequence is passed to 'dim', then result returned as dict of Variables,
which can be passed directly to isel(). If a single str is passed to 'dim' then
returns a Variable with dtype int.
If there are multiple minima, the indices of the first one found will be
returned.
Parameters
----------
dim : hashable, sequence of hashable or ..., optional
The dimensions over which to find the minimum. By default, finds minimum over
all dimensions - for now returning an int for backward compatibility, but
this is deprecated, in future will return a dict with indices for all
dimensions; to return a dict with all dimensions now, pass '...'.
axis : int, optional
Axis over which to apply `argmin`. Only one of the 'dim' and 'axis' arguments
can be supplied.
keep_attrs : bool, optional
If True, the attributes (`attrs`) will be copied from the original
object to the new one. If False (default), the new object will be
returned without attributes.
skipna : bool, optional
If True, skip missing values (as marked by NaN). By default, only
skips missing values for float dtypes; other dtypes either do not
have a sentinel missing value (int) or skipna=True has not been
implemented (object, datetime64 or timedelta64).
Returns
-------
result : Variable or dict of Variable
See also
--------
DataArray.argmin, DataArray.idxmin
"""
return self._unravel_argminmax("argmin", dim, axis, keep_attrs, skipna)
def argmax(
self,
dim: Union[Hashable, Sequence[Hashable]] = None,
axis: int = None,
keep_attrs: bool = None,
skipna: bool = None,
) -> Union["Variable", Dict[Hashable, "Variable"]]:
"""Index or indices of the maximum of the Variable over one or more dimensions.
If a sequence is passed to 'dim', then result returned as dict of Variables,
which can be passed directly to isel(). If a single str is passed to 'dim' then
returns a Variable with dtype int.
If there are multiple maxima, the indices of the first one found will be
returned.
Parameters
----------
dim : hashable, sequence of hashable or ..., optional
The dimensions over which to find the maximum. By default, finds maximum over
all dimensions - for now returning an int for backward compatibility, but
this is deprecated, in future will return a dict with indices for all
dimensions; to return a dict with all dimensions now, pass '...'.
axis : int, optional
Axis over which to apply `argmin`. Only one of the 'dim' and 'axis' arguments
can be supplied.
keep_attrs : bool, optional
If True, the attributes (`attrs`) will be copied from the original
object to the new one. If False (default), the new object will be
returned without attributes.
skipna : bool, optional
If True, skip missing values (as marked by NaN). By default, only
skips missing values for float dtypes; other dtypes either do not
have a sentinel missing value (int) or skipna=True has not been
implemented (object, datetime64 or timedelta64).
Returns
-------
result : Variable or dict of Variable
See also
--------
DataArray.argmax, DataArray.idxmax
"""
return self._unravel_argminmax("argmax", dim, axis, keep_attrs, skipna)
ops.inject_all_ops_and_reduce_methods(Variable)
class IndexVariable(Variable):
"""Wrapper for accommodating a pandas.Index in an xarray.Variable.
IndexVariable preserve loaded values in the form of a pandas.Index instead
of a NumPy array. Hence, their values are immutable and must always be one-
dimensional.
They also have a name property, which is the name of their sole dimension
unless another name is given.
"""
__slots__ = ()
def __init__(self, dims, data, attrs=None, encoding=None, fastpath=False):
super().__init__(dims, data, attrs, encoding, fastpath)
if self.ndim != 1:
raise ValueError("%s objects must be 1-dimensional" % type(self).__name__)
# Unlike in Variable, always eagerly load values into memory
if not isinstance(self._data, PandasIndexAdapter):
self._data = PandasIndexAdapter(self._data)
def __dask_tokenize__(self):
from dask.base import normalize_token
# Don't waste time converting pd.Index to np.ndarray
return normalize_token((type(self), self._dims, self._data.array, self._attrs))
def load(self):
# data is already loaded into memory for IndexVariable
return self
# https://github.com/python/mypy/issues/1465
@Variable.data.setter # type: ignore
def data(self, data):
raise ValueError(
f"Cannot assign to the .data attribute of dimension coordinate a.k.a IndexVariable {self.name!r}. "
f"Please use DataArray.assign_coords, Dataset.assign_coords or Dataset.assign as appropriate."
)
@Variable.values.setter # type: ignore
def values(self, values):
raise ValueError(
f"Cannot assign to the .values attribute of dimension coordinate a.k.a IndexVariable {self.name!r}. "
f"Please use DataArray.assign_coords, Dataset.assign_coords or Dataset.assign as appropriate."
)
def chunk(self, chunks={}, name=None, lock=False):
# Dummy - do not chunk. This method is invoked e.g. by Dataset.chunk()
return self.copy(deep=False)
def _as_sparse(self, sparse_format=_default, fill_value=_default):
# Dummy
return self.copy(deep=False)
def _to_dense(self):
# Dummy
return self.copy(deep=False)
def _finalize_indexing_result(self, dims, data):
if getattr(data, "ndim", 0) != 1:
# returns Variable rather than IndexVariable if multi-dimensional
return Variable(dims, data, self._attrs, self._encoding)
else:
return type(self)(dims, data, self._attrs, self._encoding, fastpath=True)
def __setitem__(self, key, value):
raise TypeError("%s values cannot be modified" % type(self).__name__)
@classmethod
def concat(cls, variables, dim="concat_dim", positions=None, shortcut=False):
"""Specialized version of Variable.concat for IndexVariable objects.
This exists because we want to avoid converting Index objects to NumPy
arrays, if possible.
"""
if not isinstance(dim, str):
(dim,) = dim.dims
variables = list(variables)
first_var = variables[0]
if any(not isinstance(v, cls) for v in variables):
raise TypeError(
"IndexVariable.concat requires that all input "
"variables be IndexVariable objects"
)
indexes = [v._data.array for v in variables]
if not indexes:
data = []
else:
data = indexes[0].append(indexes[1:])
if positions is not None:
indices = nputils.inverse_permutation(np.concatenate(positions))
data = data.take(indices)
attrs = dict(first_var.attrs)
if not shortcut:
for var in variables:
if var.dims != first_var.dims:
raise ValueError("inconsistent dimensions")
utils.remove_incompatible_items(attrs, var.attrs)
return cls(first_var.dims, data, attrs)
def copy(self, deep=True, data=None):
"""Returns a copy of this object.
`deep` is ignored since data is stored in the form of
pandas.Index, which is already immutable. Dimensions, attributes
and encodings are always copied.
Use `data` to create a new object with the same structure as
original but entirely new data.
Parameters
----------
deep : bool, optional
Deep is ignored when data is given. Whether the data array is
loaded into memory and copied onto the new object. Default is True.
data : array_like, optional
Data to use in the new object. Must have same shape as original.
Returns
-------
object : Variable
New object with dimensions, attributes, encodings, and optionally
data copied from original.
"""
if data is None:
data = self._data.copy(deep=deep)
else:
data = as_compatible_data(data)
if self.shape != data.shape:
raise ValueError(
"Data shape {} must match shape of object {}".format(
data.shape, self.shape
)
)
return type(self)(self.dims, data, self._attrs, self._encoding, fastpath=True)
def equals(self, other, equiv=None):
# if equiv is specified, super up
if equiv is not None:
return super().equals(other, equiv)
# otherwise use the native index equals, rather than looking at _data
other = getattr(other, "variable", other)
try:
return self.dims == other.dims and self._data_equals(other)
except (TypeError, AttributeError):
return False
def _data_equals(self, other):
return self.to_index().equals(other.to_index())
def to_index_variable(self):
"""Return this variable as an xarray.IndexVariable"""
return self
to_coord = utils.alias(to_index_variable, "to_coord")
def to_index(self):
"""Convert this variable to a pandas.Index"""
# n.b. creating a new pandas.Index from an old pandas.Index is
# basically free as pandas.Index objects are immutable
assert self.ndim == 1
index = self._data.array
if isinstance(index, pd.MultiIndex):
# set default names for multi-index unnamed levels so that
# we can safely rename dimension / coordinate later
valid_level_names = [
name or "{}_level_{}".format(self.dims[0], i)
for i, name in enumerate(index.names)
]
index = index.set_names(valid_level_names)
else:
index = index.set_names(self.name)
return index
@property
def level_names(self):
"""Return MultiIndex level names or None if this IndexVariable has no
MultiIndex.
"""
index = self.to_index()
if isinstance(index, pd.MultiIndex):
return index.names
else:
return None
def get_level_variable(self, level):
"""Return a new IndexVariable from a given MultiIndex level."""
if self.level_names is None:
raise ValueError("IndexVariable %r has no MultiIndex" % self.name)
index = self.to_index()
return type(self)(self.dims, index.get_level_values(level))
@property
def name(self):
return self.dims[0]
@name.setter
def name(self, value):
raise AttributeError("cannot modify name of IndexVariable in-place")
# for backwards compatibility
Coordinate = utils.alias(IndexVariable, "Coordinate")
def _unified_dims(variables):
# validate dimensions
all_dims = {}
for var in variables:
var_dims = var.dims
if len(set(var_dims)) < len(var_dims):
raise ValueError(
"broadcasting cannot handle duplicate "
"dimensions: %r" % list(var_dims)
)
for d, s in zip(var_dims, var.shape):
if d not in all_dims:
all_dims[d] = s
elif all_dims[d] != s:
raise ValueError(
"operands cannot be broadcast together "
"with mismatched lengths for dimension %r: %s"
% (d, (all_dims[d], s))
)
return all_dims
def _broadcast_compat_variables(*variables):
"""Create broadcast compatible variables, with the same dimensions.
Unlike the result of broadcast_variables(), some variables may have
dimensions of size 1 instead of the the size of the broadcast dimension.
"""
dims = tuple(_unified_dims(variables))
return tuple(var.set_dims(dims) if var.dims != dims else var for var in variables)
def broadcast_variables(*variables):
"""Given any number of variables, return variables with matching dimensions
and broadcast data.
The data on the returned variables will be a view of the data on the
corresponding original arrays, but dimensions will be reordered and
inserted so that both broadcast arrays have the same dimensions. The new
dimensions are sorted in order of appearance in the first variable's
dimensions followed by the second variable's dimensions.
"""
dims_map = _unified_dims(variables)
dims_tuple = tuple(dims_map)
return tuple(
var.set_dims(dims_map) if var.dims != dims_tuple else var for var in variables
)
def _broadcast_compat_data(self, other):
if all(hasattr(other, attr) for attr in ["dims", "data", "shape", "encoding"]):
# `other` satisfies the necessary Variable API for broadcast_variables
new_self, new_other = _broadcast_compat_variables(self, other)
self_data = new_self.data
other_data = new_other.data
dims = new_self.dims
else:
# rely on numpy broadcasting rules
self_data = self.data
other_data = other
dims = self.dims
return self_data, other_data, dims
def concat(variables, dim="concat_dim", positions=None, shortcut=False):
"""Concatenate variables along a new or existing dimension.
Parameters
----------
variables : iterable of Variable
Arrays to stack together. Each variable is expected to have
matching dimensions and shape except for along the stacked
dimension.
dim : str or DataArray, optional
Name of the dimension to stack along. This can either be a new
dimension name, in which case it is added along axis=0, or an
existing dimension name, in which case the location of the
dimension is unchanged. Where to insert the new dimension is
determined by the first variable.
positions : None or list of array-like, optional
List of integer arrays which specifies the integer positions to which
to assign each dataset along the concatenated dimension. If not
supplied, objects are concatenated in the provided order.
shortcut : bool, optional
This option is used internally to speed-up groupby operations.
If `shortcut` is True, some checks of internal consistency between
arrays to concatenate are skipped.
Returns
-------
stacked : Variable
Concatenated Variable formed by stacking all the supplied variables
along the given dimension.
"""
variables = list(variables)
if all(isinstance(v, IndexVariable) for v in variables):
return IndexVariable.concat(variables, dim, positions, shortcut)
else:
return Variable.concat(variables, dim, positions, shortcut)
def assert_unique_multiindex_level_names(variables):
"""Check for uniqueness of MultiIndex level names in all given
variables.
Not public API. Used for checking consistency of DataArray and Dataset
objects.
"""
level_names = defaultdict(list)
all_level_names = set()
for var_name, var in variables.items():
if isinstance(var._data, PandasIndexAdapter):
idx_level_names = var.to_index_variable().level_names
if idx_level_names is not None:
for n in idx_level_names:
level_names[n].append(f"{n!r} ({var_name})")
if idx_level_names:
all_level_names.update(idx_level_names)
for k, v in level_names.items():
if k in variables:
v.append("(%s)" % k)
duplicate_names = [v for v in level_names.values() if len(v) > 1]
if duplicate_names:
conflict_str = "\n".join(", ".join(v) for v in duplicate_names)
raise ValueError("conflicting MultiIndex level name(s):\n%s" % conflict_str)
# Check confliction between level names and dimensions GH:2299
for k, v in variables.items():
for d in v.dims:
if d in all_level_names:
raise ValueError(
"conflicting level / dimension names. {} "
"already exists as a level name.".format(d)
)
| [
"numpy.prod",
"numpy.ma.getmaskarray",
"numpy.logical_not",
"numpy.asanyarray",
"copy.deepcopy",
"copy.copy",
"numpy.asarray",
"functools.wraps",
"numpy.concatenate",
"numpy.datetime64",
"warnings.warn",
"dask.array.from_array",
"numpy.isnan",
"numpy.nonzero",
"numpy.timedelta64",
"warnings.filterwarnings",
"numpy.atleast_1d",
"warnings.catch_warnings",
"numpy.errstate",
"itertools.count",
"collections.defaultdict",
"distutils.version.LooseVersion",
"typing.TypeVar"
] | [((1294, 1335), 'typing.TypeVar', 'TypeVar', (['"""VariableType"""'], {'bound': '"""Variable"""'}), "('VariableType', bound='Variable')\n", (1301, 1335), False, 'from typing import Any, Dict, Hashable, Mapping, Optional, Sequence, Tuple, TypeVar, Union\n'), ((7749, 7765), 'numpy.asarray', 'np.asarray', (['data'], {}), '(data)\n', (7759, 7765), True, 'import numpy as np\n'), ((36561, 36578), 'itertools.count', 'itertools.count', ([], {}), '()\n', (36576, 36578), False, 'import itertools\n'), ((100638, 100655), 'collections.defaultdict', 'defaultdict', (['list'], {}), '(list)\n', (100649, 100655), False, 'from collections import defaultdict\n'), ((6610, 6641), 'numpy.datetime64', 'np.datetime64', (['data.value', '"""ns"""'], {}), "(data.value, 'ns')\n", (6623, 6641), True, 'import numpy as np\n'), ((6896, 6920), 'numpy.ma.getmaskarray', 'np.ma.getmaskarray', (['data'], {}), '(data)\n', (6914, 6920), True, 'import numpy as np\n'), ((8788, 8804), 'numpy.asarray', 'np.asarray', (['data'], {}), '(data)\n', (8798, 8804), True, 'import numpy as np\n'), ((25611, 25632), 'numpy.nonzero', 'np.nonzero', (['self.data'], {}), '(self.data)\n', (25621, 25632), True, 'import numpy as np\n'), ((79843, 79861), 'functools.wraps', 'functools.wraps', (['f'], {}), '(f)\n', (79858, 79861), False, 'import functools\n'), ((80387, 80405), 'functools.wraps', 'functools.wraps', (['f'], {}), '(f)\n', (80402, 80405), False, 'import functools\n'), ((81115, 81133), 'functools.wraps', 'functools.wraps', (['f'], {}), '(f)\n', (81130, 81133), False, 'import functools\n'), ((7028, 7057), 'numpy.asarray', 'np.asarray', (['data'], {'dtype': 'dtype'}), '(data, dtype=dtype)\n', (7038, 7057), True, 'import numpy as np\n'), ((7127, 7143), 'numpy.asarray', 'np.asarray', (['data'], {}), '(data)\n', (7137, 7143), True, 'import numpy as np\n'), ((8882, 8907), 'numpy.datetime64', 'np.datetime64', (['data', '"""ns"""'], {}), "(data, 'ns')\n", (8895, 8907), True, 'import numpy as np\n'), ((35671, 35692), 'copy.copy', 'copy.copy', (['self._dims'], {}), '(self._dims)\n', (35680, 35692), False, 'import copy\n'), ((35741, 35761), 'copy.copy', 'copy.copy', (['self.data'], {}), '(self.data)\n', (35750, 35761), False, 'import copy\n'), ((35812, 35834), 'copy.copy', 'copy.copy', (['self._attrs'], {}), '(self._attrs)\n', (35821, 35834), False, 'import copy\n'), ((35891, 35916), 'copy.copy', 'copy.copy', (['self._encoding'], {}), '(self._encoding)\n', (35900, 35916), False, 'import copy\n'), ((37746, 37890), 'warnings.warn', 'warnings.warn', (['"""None value for \'chunks\' is deprecated. It will raise an error in the future. Use instead \'{}\'"""'], {'category': 'FutureWarning'}), '(\n "None value for \'chunks\' is deprecated. It will raise an error in the future. Use instead \'{}\'"\n , category=FutureWarning)\n', (37759, 37890), False, 'import warnings\n'), ((39504, 39563), 'dask.array.from_array', 'da.from_array', (['data', 'chunks'], {'name': 'name', 'lock': 'lock'}), '(data, chunks, name=name, lock=lock, **kwargs)\n', (39517, 39563), True, 'import dask.array as da\n'), ((56994, 57016), 'numpy.prod', 'np.prod', (['new_dim_sizes'], {}), '(new_dim_sizes)\n', (57001, 57016), True, 'import numpy as np\n'), ((60768, 60793), 'warnings.catch_warnings', 'warnings.catch_warnings', ([], {}), '()\n', (60791, 60793), False, 'import warnings\n'), ((60807, 60893), 'warnings.filterwarnings', 'warnings.filterwarnings', (['"""ignore"""', '"""Mean of empty slice"""'], {'category': 'RuntimeWarning'}), "('ignore', 'Mean of empty slice', category=\n RuntimeWarning)\n", (60830, 60893), False, 'import warnings\n'), ((69068, 69099), 'numpy.asarray', 'np.asarray', (['q'], {'dtype': 'np.float64'}), '(q, dtype=np.float64)\n', (69078, 69099), True, 'import numpy as np\n'), ((82520, 82821), 'warnings.warn', 'warnings.warn', (['"""Behaviour of argmin/argmax with neither dim nor axis argument will change to return a dict of indices of each dimension. To get a single, flat index, please use np.argmin(da.data) or np.argmax(da.data) instead of da.argmin() or da.argmax()."""', 'DeprecationWarning'], {'stacklevel': '(3)'}), "(\n 'Behaviour of argmin/argmax with neither dim nor axis argument will change to return a dict of indices of each dimension. To get a single, flat index, please use np.argmin(da.data) or np.argmax(da.data) instead of da.argmin() or da.argmax().'\n , DeprecationWarning, stacklevel=3)\n", (82533, 82821), False, 'import warnings\n'), ((8964, 8990), 'numpy.timedelta64', 'np.timedelta64', (['data', '"""ns"""'], {}), "(data, 'ns')\n", (8978, 8990), True, 'import numpy as np\n'), ((16262, 16284), 'numpy.asarray', 'np.asarray', (['self._data'], {}), '(self._data)\n', (16272, 16284), True, 'import numpy as np\n'), ((25034, 25047), 'numpy.asarray', 'np.asarray', (['k'], {}), '(k)\n', (25044, 25047), True, 'import numpy as np\n'), ((30583, 30603), 'numpy.logical_not', 'np.logical_not', (['mask'], {}), '(mask)\n', (30597, 30603), True, 'import numpy as np\n'), ((35009, 35028), 'copy.deepcopy', 'copy.deepcopy', (['data'], {}), '(data)\n', (35022, 35028), False, 'import copy\n'), ((80070, 80095), 'numpy.errstate', 'np.errstate', ([], {'all': '"""ignore"""'}), "(all='ignore')\n", (80081, 80095), True, 'import numpy as np\n'), ((80744, 80769), 'numpy.errstate', 'np.errstate', ([], {'all': '"""ignore"""'}), "(all='ignore')\n", (80755, 80769), True, 'import numpy as np\n'), ((81504, 81529), 'numpy.errstate', 'np.errstate', ([], {'all': '"""ignore"""'}), "(all='ignore')\n", (81515, 81529), True, 'import numpy as np\n'), ((23355, 23368), 'numpy.asarray', 'np.asarray', (['k'], {}), '(k)\n', (23365, 23368), True, 'import numpy as np\n'), ((39039, 39069), 'distutils.version.LooseVersion', 'LooseVersion', (['dask.__version__'], {}), '(dask.__version__)\n', (39051, 39069), False, 'from distutils.version import LooseVersion\n'), ((61260, 61279), 'numpy.atleast_1d', 'np.atleast_1d', (['axis'], {}), '(axis)\n', (61273, 61279), True, 'import numpy as np\n'), ((64333, 64358), 'numpy.concatenate', 'np.concatenate', (['positions'], {}), '(positions)\n', (64347, 64358), True, 'import numpy as np\n'), ((71530, 71544), 'numpy.isnan', 'np.isnan', (['data'], {}), '(data)\n', (71538, 71544), True, 'import numpy as np\n'), ((92294, 92319), 'numpy.concatenate', 'np.concatenate', (['positions'], {}), '(positions)\n', (92308, 92319), True, 'import numpy as np\n'), ((25262, 25275), 'numpy.nonzero', 'np.nonzero', (['k'], {}), '(k)\n', (25272, 25275), True, 'import numpy as np\n'), ((61715, 61734), 'numpy.asanyarray', 'np.asanyarray', (['data'], {}), '(data)\n', (61728, 61734), True, 'import numpy as np\n')] |
# -*- coding: utf-8 -*-
#
# Copyright (c) The PyAMF Project.
# See LICENSE.txt for details.
"""
Tests for AMF utilities.
@since: 0.1.0
"""
import unittest
from datetime import datetime
from io import BytesIO
import pyamf
from pyamf import util
from pyamf.tests.util import replace_dict
PosInf = 1e300000
NegInf = -1e300000
NaN = PosInf / PosInf
def isNaN(val):
return str(float(val)) == str(NaN)
def isPosInf(val):
return str(float(val)) == str(PosInf)
def isNegInf(val):
return str(float(val)) == str(NegInf)
class TimestampTestCase(unittest.TestCase):
"""
Test UTC timestamps.
"""
def test_get_timestamp(self):
self.assertEqual(
util.get_timestamp(datetime(2007, 11, 12)),
1194825600
)
def test_get_datetime(self):
self.assertEqual(util.get_datetime(1194825600), datetime(2007, 11, 12))
def test_get_negative_datetime(self):
self.assertEqual(util.get_datetime(-31536000), datetime(1969, 1, 1))
def test_preserved_microseconds(self):
dt = datetime(2009, 3, 8, 23, 30, 47, 770122)
ts = util.get_timestamp(dt)
self.assertEqual(util.get_datetime(ts), dt)
class StringIOTestCase(unittest.TestCase):
def test_create(self):
sp = util.BufferedByteStream()
self.assertEqual(sp.tell(), 0)
self.assertEqual(sp.getvalue(), b'')
self.assertEqual(len(sp), 0)
self.assertEqual(sp.getvalue(), b'')
sp = util.BufferedByteStream(None)
self.assertEqual(sp.tell(), 0)
self.assertEqual(sp.getvalue(), b'')
self.assertEqual(len(sp), 0)
sp = util.BufferedByteStream('')
self.assertEqual(sp.tell(), 0)
self.assertEqual(sp.getvalue(), b'')
self.assertEqual(len(sp), 0)
sp = util.BufferedByteStream('spam')
self.assertEqual(sp.tell(), 0)
self.assertEqual(sp.getvalue(), b'spam')
self.assertEqual(len(sp), 4)
sp = util.BufferedByteStream(BytesIO('this is a test'.encode()))
self.assertEqual(sp.tell(), 0)
self.assertEqual(sp.getvalue(), b'this is a test')
self.assertEqual(len(sp), 14)
self.assertRaises(TypeError, util.BufferedByteStream, self)
def test_getvalue(self):
sp = util.BufferedByteStream()
sp.write('asdfasdf')
self.assertEqual(sp.getvalue(), b'asdfasdf')
sp.write('spam')
self.assertEqual(sp.getvalue(), b'asdfasdfspam')
def test_read(self):
sp = util.BufferedByteStream('this is a test')
self.assertEqual(len(sp), 14)
self.assertEqual(sp.read(1), b't')
self.assertEqual(sp.getvalue(), b'this is a test')
self.assertEqual(len(sp), 14)
self.assertEqual(sp.read(10), b'his is a t')
self.assertEqual(sp.read(), b'est')
def test_seek(self):
sp = util.BufferedByteStream('abcdefghijklmnopqrstuvwxyz')
self.assertEqual(sp.getvalue(), b'abcdefghijklmnopqrstuvwxyz')
self.assertEqual(sp.tell(), 0)
# Relative to the beginning of the stream
sp.seek(0, 0)
self.assertEqual(sp.tell(), 0)
self.assertEqual(sp.getvalue(), b'abcdefghijklmnopqrstuvwxyz')
self.assertEqual(sp.read(1), b'a')
self.assertEqual(len(sp), 26)
sp.seek(10, 0)
self.assertEqual(sp.tell(), 10)
self.assertEqual(sp.getvalue(), b'abcdefghijklmnopqrstuvwxyz')
self.assertEqual(sp.read(1), b'k')
self.assertEqual(len(sp), 26)
sp.seek(-5, 1)
self.assertEqual(sp.tell(), 6)
self.assertEqual(sp.getvalue(), b'abcdefghijklmnopqrstuvwxyz')
self.assertEqual(sp.read(1), b'g')
self.assertEqual(len(sp), 26)
sp.seek(-3, 2)
self.assertEqual(sp.tell(), 23)
self.assertEqual(sp.getvalue(), b'abcdefghijklmnopqrstuvwxyz')
self.assertEqual(sp.read(1), b'x')
self.assertEqual(len(sp), 26)
def test_tell(self):
sp = util.BufferedByteStream('abcdefghijklmnopqrstuvwxyz')
self.assertEqual(sp.getvalue(), b'abcdefghijklmnopqrstuvwxyz')
self.assertEqual(len(sp), 26)
self.assertEqual(sp.tell(), 0)
sp.read(1)
self.assertEqual(sp.tell(), 1)
self.assertEqual(sp.getvalue(), b'abcdefghijklmnopqrstuvwxyz')
self.assertEqual(len(sp), 26)
sp.read(5)
self.assertEqual(sp.tell(), 6)
def test_truncate(self):
sp = util.BufferedByteStream('abcdef')
self.assertEqual(sp.getvalue(), b'abcdef')
self.assertEqual(len(sp), 6)
sp.truncate()
self.assertEqual(sp.getvalue(), b'')
self.assertEqual(len(sp), 0)
sp = util.BufferedByteStream('hello')
self.assertEqual(sp.getvalue(), b'hello')
self.assertEqual(len(sp), 5)
sp.truncate(3)
self.assertEqual(sp.getvalue(), b'hel')
self.assertEqual(len(sp), 3)
def test_write(self):
sp = util.BufferedByteStream()
self.assertEqual(sp.getvalue(), b'')
self.assertEqual(len(sp), 0)
self.assertEqual(sp.tell(), 0)
sp.write('hello')
self.assertEqual(sp.getvalue(), b'hello')
self.assertEqual(len(sp), 5)
self.assertEqual(sp.tell(), 5)
sp = util.BufferedByteStream(b'xyz')
self.assertEqual(sp.getvalue(), b'xyz')
self.assertEqual(len(sp), 3)
self.assertEqual(sp.tell(), 0)
sp.write('abc')
self.assertEqual(sp.getvalue(), b'abc')
self.assertEqual(len(sp), 3)
self.assertEqual(sp.tell(), 3)
def test_len(self):
sp = util.BufferedByteStream()
self.assertEqual(sp.getvalue(), b'')
self.assertEqual(len(sp), 0)
self.assertEqual(sp.tell(), 0)
sp.write('xyz')
self.assertEqual(len(sp), 3)
sp = util.BufferedByteStream('foo')
self.assertEqual(len(sp), 3)
sp.seek(0, 2)
sp.write('xyz')
self.assertEqual(len(sp), 6)
def test_consume(self):
sp = util.BufferedByteStream()
self.assertEqual(sp.getvalue(), b'')
self.assertEqual(sp.tell(), 0)
sp.consume()
self.assertEqual(sp.getvalue(), b'')
self.assertEqual(sp.tell(), 0)
sp = util.BufferedByteStream('foobar')
self.assertEqual(sp.getvalue(), b'foobar')
self.assertEqual(sp.tell(), 0)
sp.seek(3)
self.assertEqual(sp.tell(), 3)
sp.consume()
self.assertEqual(sp.getvalue(), b'bar')
self.assertEqual(sp.tell(), 0)
# from ticket 451 - http://pyamf.org/ticket/451
sp = util.BufferedByteStream('abcdef')
# move the stream pos to the end
sp.read()
self.assertEqual(len(sp), 6)
sp.consume()
self.assertEqual(len(sp), 0)
sp = util.BufferedByteStream('abcdef')
sp.seek(6)
sp.consume()
self.assertEqual(sp.getvalue(), b'')
class DataTypeMixInTestCase(unittest.TestCase):
endians = ('>', '<') # big, little
def _write_endian(self, obj, func, args, expected):
old_endian = obj.endian
for x in range(2):
obj.truncate()
obj.endian = self.endians[x]
func(*args)
self.assertEqual(obj.getvalue(), expected[x])
obj.endian = old_endian
def _read_endian(self, data, func, args, expected):
for x in range(2):
obj = util.BufferedByteStream(data[x])
obj.endian = self.endians[x]
result = getattr(obj, func)(*args)
self.assertEqual(result, expected)
def test_read_uchar(self):
x = util.BufferedByteStream(b'\x00\xff')
self.assertEqual(x.read_uchar(), 0)
self.assertEqual(x.read_uchar(), 255)
def test_write_uchar(self):
x = util.BufferedByteStream()
x.write_uchar(0)
self.assertEqual(x.getvalue(), b'\x00')
x.write_uchar(255)
self.assertEqual(x.getvalue(), b'\x00\xff')
self.assertRaises(OverflowError, x.write_uchar, 256)
self.assertRaises(OverflowError, x.write_uchar, -1)
self.assertRaises(TypeError, x.write_uchar, 'f')
def test_read_char(self):
x = util.BufferedByteStream(b'\x00\x7f\xff\x80')
self.assertEqual(x.read_char(), 0)
self.assertEqual(x.read_char(), 127)
self.assertEqual(x.read_char(), -1)
self.assertEqual(x.read_char(), -128)
def test_write_char(self):
x = util.BufferedByteStream()
x.write_char(0)
x.write_char(-128)
x.write_char(127)
self.assertEqual(x.getvalue(), b'\x00\x80\x7f')
self.assertRaises(OverflowError, x.write_char, 128)
self.assertRaises(OverflowError, x.write_char, -129)
self.assertRaises(TypeError, x.write_char, 'f')
def test_write_ushort(self):
x = util.BufferedByteStream()
self._write_endian(x, x.write_ushort, (0,), (b'\x00\x00', b'\x00\x00'))
self._write_endian(x, x.write_ushort, (12345,), (b'09', b'90'))
self._write_endian(
x,
x.write_ushort,
(65535,),
(b'\xff\xff', b'\xff\xff')
)
self.assertRaises(OverflowError, x.write_ushort, 65536)
self.assertRaises(OverflowError, x.write_ushort, -1)
self.assertRaises(TypeError, x.write_ushort, 'aa')
def test_read_ushort(self):
self._read_endian([b'\x00\x00', b'\x00\x00'], 'read_ushort', (), 0)
self._read_endian(['09', '90'], 'read_ushort', (), 12345)
self._read_endian([b'\xff\xff', b'\xff\xff'], 'read_ushort', (), 65535)
def test_write_short(self):
x = util.BufferedByteStream()
self._write_endian(
x,
x.write_short,
(-5673,),
(b'\xe9\xd7', b'\xd7\xe9')
)
self._write_endian(
x, x.write_short,
(32767,),
(b'\x7f\xff', b'\xff\x7f')
)
self.assertRaises(OverflowError, x.write_ushort, 65537)
self.assertRaises(OverflowError, x.write_ushort, -1)
self.assertRaises(TypeError, x.write_short, '\x00\x00')
def test_read_short(self):
self._read_endian([b'\xe9\xd7', b'\xd7\xe9'], 'read_short', (), -5673)
self._read_endian([b'\x7f\xff', b'\xff\x7f'], 'read_short', (), 32767)
def test_write_ulong(self):
x = util.BufferedByteStream()
self._write_endian(
x,
x.write_ulong,
(0,),
(b'\x00\x00\x00\x00', b'\x00\x00\x00\x00')
)
self._write_endian(
x,
x.write_ulong,
(16810049,),
(b'\x01\x00\x80A', b'A\x80\x00\x01')
)
self._write_endian(
x,
x.write_ulong,
(4294967295,),
(b'\xff\xff\xff\xff', b'\xff\xff\xff\xff')
)
self.assertRaises(OverflowError, x.write_ulong, 4294967296)
self.assertRaises(OverflowError, x.write_ulong, -1)
self.assertRaises(TypeError, x.write_ulong, '\x00\x00\x00\x00')
def test_read_ulong(self):
self._read_endian(
[b'\x00\x00\x00\x00', b'\x00\x00\x00\x00'],
'read_ulong',
(),
0
)
self._read_endian(
[b'\x01\x00\x80A', b'A\x80\x00\x01'],
'read_ulong',
(),
16810049
)
self._read_endian(
[b'\xff\xff\xff\xff', b'\xff\xff\xff\xff'],
'read_ulong',
(),
4294967295
)
def test_write_long(self):
x = util.BufferedByteStream()
self._write_endian(
x,
x.write_long,
(0,),
(b'\x00\x00\x00\x00', b'\x00\x00\x00\x00')
)
self._write_endian(
x,
x.write_long,
(16810049,),
(b'\x01\x00\x80A', b'A\x80\x00\x01')
)
self._write_endian(
x,
x.write_long,
(2147483647,),
(b'\x7f\xff\xff\xff', b'\xff\xff\xff\x7f')
)
self._write_endian(
x,
x.write_long,
(-2147483648,),
(b'\x80\x00\x00\x00', b'\x00\x00\x00\x80')
)
self.assertRaises(OverflowError, x.write_long, 2147483648)
self.assertRaises(OverflowError, x.write_long, -2147483649)
self.assertRaises(TypeError, x.write_long, '\x00\x00\x00\x00')
def test_read_long(self):
self._read_endian(
[b'\xff\xff\xcf\xc7', b'\xc7\xcf\xff\xff'],
'read_long',
(),
-12345
)
self._read_endian(
[b'\x00\x00\x00\x00', b'\x00\x00\x00\x00'],
'read_long',
(),
0
)
self._read_endian(
[b'\x01\x00\x80A', b'A\x80\x00\x01'],
'read_long',
(),
16810049
)
self._read_endian(
[b'\x7f\xff\xff\xff', b'\xff\xff\xff\x7f'],
'read_long',
(),
2147483647
)
def test_write_u24bit(self):
x = util.BufferedByteStream()
self._write_endian(
x,
x.write_24bit_uint,
(0,),
(b'\x00\x00\x00', b'\x00\x00\x00')
)
self._write_endian(
x,
x.write_24bit_uint,
(4292609,),
(b'A\x80\x01', b'\x01\x80A')
)
self._write_endian(
x,
x.write_24bit_uint,
(16777215,),
(b'\xff\xff\xff', b'\xff\xff\xff')
)
self.assertRaises(OverflowError, x.write_24bit_uint, 16777216)
self.assertRaises(OverflowError, x.write_24bit_uint, -1)
self.assertRaises(TypeError, x.write_24bit_uint, '\x00\x00\x00')
def test_read_u24bit(self):
self._read_endian(
[b'\x00\x00\x00', b'\x00\x00\x00'], 'read_24bit_uint', (), 0
)
self._read_endian(
[b'\x00\x00\x80', b'\x80\x00\x00'], 'read_24bit_uint', (), 128
)
self._read_endian(
[b'\x80\x00\x00', b'\x00\x00\x80'], 'read_24bit_uint', (), 8388608
)
self._read_endian(
[b'\xff\xff\x7f', b'\x7f\xff\xff'], 'read_24bit_uint', (), 16777087
)
self._read_endian(
[b'\x7f\xff\xff', b'\xff\xff\x7f'], 'read_24bit_uint', (), 8388607
)
def test_write_24bit(self):
x = util.BufferedByteStream()
self._write_endian(
x, x.write_24bit_int, (0,), (b'\x00\x00\x00', b'\x00\x00\x00')
)
self._write_endian(
x, x.write_24bit_int, (128,), (b'\x00\x00\x80', b'\x80\x00\x00')
)
self._write_endian(
x, x.write_24bit_int, (8388607,), (b'\x7f\xff\xff', b'\xff\xff\x7f')
)
self._write_endian(
x, x.write_24bit_int, (-1,), (b'\xff\xff\xff', b'\xff\xff\xff')
)
self._write_endian(
x, x.write_24bit_int, (-8388608,), (b'\x80\x00\x00', b'\x00\x00\x80')
)
self.assertRaises(OverflowError, x.write_24bit_int, 8388608)
self.assertRaises(OverflowError, x.write_24bit_int, -8388609)
self.assertRaises(TypeError, x.write_24bit_int, '\x00\x00\x00')
def test_read_24bit(self):
self._read_endian(
[b'\x00\x00\x00', b'\x00\x00\x00'], 'read_24bit_int', (), 0
)
self._read_endian(
[b'\x00\x00\x80', b'\x80\x00\x00'], 'read_24bit_int', (), 128
)
self._read_endian(
[b'\x80\x00\x00', b'\x00\x00\x80'], 'read_24bit_int', (), -8388608
)
self._read_endian(
[b'\xff\xff\x7f', b'\x7f\xff\xff'], 'read_24bit_int', (), -129
)
self._read_endian(
[b'\x7f\xff\xff', b'\xff\xff\x7f'], 'read_24bit_int', (), 8388607
)
def test_write_float(self):
x = util.BufferedByteStream()
self._write_endian(
x, x.write_float, (0.2,), (b'>L\xcc\xcd', b'\xcd\xccL>')
)
self.assertRaises(TypeError, x.write_float, 'foo')
def test_read_float(self):
self._read_endian(
[b'?\x00\x00\x00', b'\x00\x00\x00?'], 'read_float', (), 0.5
)
def test_write_double(self):
x = util.BufferedByteStream()
self._write_endian(
x,
x.write_double,
(0.2,),
(b'?\xc9\x99\x99\x99\x99\x99\x9a', b'\x9a\x99\x99\x99\x99\x99\xc9?')
)
self.assertRaises(TypeError, x.write_double, 'foo')
def test_read_double(self):
self._read_endian(
[b'?\xc9\x99\x99\x99\x99\x99\x9a', b'\x9a\x99\x99\x99\x99\x99\xc9?'],
'read_double',
(),
0.2
)
def test_write_utf8_string(self):
x = util.BufferedByteStream()
self._write_endian(
x,
x.write_utf8_string,
(u'ᚠᛇᚻ',),
[b'\xe1\x9a\xa0\xe1\x9b\x87\xe1\x9a\xbb'] * 2
)
self.assertRaises(TypeError, x.write_utf8_string, 1)
self.assertRaises(TypeError, x.write_utf8_string, 1.0)
self.assertRaises(TypeError, x.write_utf8_string, object())
x.write_utf8_string('\xff')
def test_read_utf8_string(self):
self._read_endian(
[b'\xe1\x9a\xa0\xe1\x9b\x87\xe1\x9a\xbb'] * 2,
'read_utf8_string',
(9,),
u'ᚠᛇᚻ'
)
def test_nan(self):
x = util.BufferedByteStream(b'\xff\xf8\x00\x00\x00\x00\x00\x00')
self.assertTrue(isNaN(x.read_double()))
x = util.BufferedByteStream(b'\xff\xf0\x00\x00\x00\x00\x00\x00')
self.assertTrue(isNegInf(x.read_double()))
x = util.BufferedByteStream(b'\x7f\xf0\x00\x00\x00\x00\x00\x00')
self.assertTrue(isPosInf(x.read_double()))
# now test little endian
x = util.BufferedByteStream(b'\x00\x00\x00\x00\x00\x00\xf8\xff')
x.endian = '<'
self.assertTrue(isNaN(x.read_double()))
x = util.BufferedByteStream(b'\x00\x00\x00\x00\x00\x00\xf0\xff')
x.endian = '<'
self.assertTrue(isNegInf(x.read_double()))
x = util.BufferedByteStream(b'\x00\x00\x00\x00\x00\x00\xf0\x7f')
x.endian = '<'
self.assertTrue(isPosInf(x.read_double()))
def test_write_infinites(self):
x = util.BufferedByteStream()
self._write_endian(x, x.write_double, (NaN,), (
b'\xff\xf8\x00\x00\x00\x00\x00\x00',
b'\x00\x00\x00\x00\x00\x00\xf8\xff'
))
self._write_endian(x, x.write_double, (PosInf,), (
b'\x7f\xf0\x00\x00\x00\x00\x00\x00',
b'\x00\x00\x00\x00\x00\x00\xf0\x7f'
))
self._write_endian(x, x.write_double, (NegInf,), (
b'\xff\xf0\x00\x00\x00\x00\x00\x00',
b'\x00\x00\x00\x00\x00\x00\xf0\xff'
))
class BufferedByteStreamTestCase(unittest.TestCase):
"""
Tests for L{BufferedByteStream<util.BufferedByteStream>}
"""
def test_create(self):
x = util.BufferedByteStream()
self.assertEqual(x.getvalue(), b'')
self.assertEqual(x.tell(), 0)
x = util.BufferedByteStream('abc')
self.assertEqual(x.getvalue(), b'abc')
self.assertEqual(x.tell(), 0)
def test_read(self):
x = util.BufferedByteStream()
self.assertEqual(x.tell(), 0)
self.assertEqual(len(x), 0)
self.assertRaises(IOError, x.read)
self.assertRaises(IOError, x.read, 10)
x.write('hello')
x.seek(0)
self.assertRaises(IOError, x.read, 10)
self.assertEqual(x.read(), b'hello')
def test_read_negative(self):
"""
@see: #799
"""
x = util.BufferedByteStream()
x.write('*' * 6000)
x.seek(100)
self.assertRaises(IOError, x.read, -345)
def test_peek(self):
x = util.BufferedByteStream('abcdefghijklmnopqrstuvwxyz')
self.assertEqual(x.tell(), 0)
self.assertEqual(x.peek(), b'a')
self.assertEqual(x.peek(5), b'abcde')
self.assertEqual(x.peek(-1), b'abcdefghijklmnopqrstuvwxyz')
x.seek(10)
self.assertEqual(x.peek(50), b'klmnopqrstuvwxyz')
def test_eof(self):
x = util.BufferedByteStream()
self.assertTrue(x.at_eof())
x.write('hello')
x.seek(0)
self.assertFalse(x.at_eof())
x.seek(0, 2)
self.assertTrue(x.at_eof())
def test_remaining(self):
x = util.BufferedByteStream('spameggs')
self.assertEqual(x.tell(), 0)
self.assertEqual(x.remaining(), 8)
x.seek(2)
self.assertEqual(x.tell(), 2)
self.assertEqual(x.remaining(), 6)
def test_add(self):
a = util.BufferedByteStream('a')
b = util.BufferedByteStream('b')
c = a + b
self.assertTrue(isinstance(c, util.BufferedByteStream))
self.assertEqual(c.getvalue(), b'ab')
self.assertEqual(c.tell(), 0)
def test_add_pos(self):
a = util.BufferedByteStream(b'abc')
b = util.BufferedByteStream(b'def')
a.seek(1)
b.seek(0, 2)
self.assertEqual(a.tell(), 1)
self.assertEqual(b.tell(), 3)
self.assertEqual(a.tell(), 1)
self.assertEqual(b.tell(), 3)
def test_append_types(self):
# test non string types
a = util.BufferedByteStream()
self.assertRaises(TypeError, a.append, 234234)
self.assertRaises(TypeError, a.append, 234.0)
self.assertRaises(TypeError, a.append, 234234)
self.assertRaises(TypeError, a.append, [])
self.assertRaises(TypeError, a.append, {})
self.assertRaises(TypeError, a.append, lambda _: None)
self.assertRaises(TypeError, a.append, ())
self.assertRaises(TypeError, a.append, object())
def test_append_string(self):
"""
Test L{util.BufferedByteStream.append} with C{str} objects.
"""
# test empty
a = util.BufferedByteStream()
self.assertEqual(a.getvalue(), b'')
self.assertEqual(a.tell(), 0)
self.assertEqual(len(a), 0)
a.append('foo')
self.assertEqual(a.getvalue(), b'foo')
self.assertEqual(a.tell(), 0) # <-- pointer hasn't moved
self.assertEqual(len(a), 3)
# test pointer beginning, some data
a = util.BufferedByteStream('bar')
self.assertEqual(a.getvalue(), b'bar')
self.assertEqual(a.tell(), 0)
self.assertEqual(len(a), 3)
a.append('gak')
self.assertEqual(a.getvalue(), b'bargak')
self.assertEqual(a.tell(), 0) # <-- pointer hasn't moved
self.assertEqual(len(a), 6)
# test pointer middle, some data
a = util.BufferedByteStream('bar')
a.seek(2)
self.assertEqual(a.getvalue(), b'bar')
self.assertEqual(a.tell(), 2)
self.assertEqual(len(a), 3)
a.append('gak')
self.assertEqual(a.getvalue(), b'bargak')
self.assertEqual(a.tell(), 2) # <-- pointer hasn't moved
self.assertEqual(len(a), 6)
# test pointer end, some data
a = util.BufferedByteStream('bar')
a.seek(0, 2)
self.assertEqual(a.getvalue(), b'bar')
self.assertEqual(a.tell(), 3)
self.assertEqual(len(a), 3)
a.append('gak')
self.assertEqual(a.getvalue(), b'bargak')
self.assertEqual(a.tell(), 3) # <-- pointer hasn't moved
self.assertEqual(len(a), 6)
class Foo(object):
def getvalue(self):
return b'foo'
def __str__(self):
raise AttributeError()
a = util.BufferedByteStream()
self.assertEqual(a.getvalue(), b'')
self.assertEqual(a.tell(), 0)
self.assertEqual(len(a), 0)
a.append(Foo())
self.assertEqual(a.getvalue(), b'foo')
self.assertEqual(a.tell(), 0)
self.assertEqual(len(a), 3)
def test_append_unicode(self):
"""
Test L{util.BufferedByteStream.append} with C{unicode} objects.
"""
# test empty
a = util.BufferedByteStream()
self.assertEqual(a.getvalue(), b'')
self.assertEqual(a.tell(), 0)
self.assertEqual(len(a), 0)
a.append('foo')
self.assertEqual(a.getvalue(), b'foo')
self.assertEqual(a.tell(), 0) # <-- pointer hasn't moved
self.assertEqual(len(a), 3)
# test pointer beginning, some data
a = util.BufferedByteStream('bar')
self.assertEqual(a.getvalue(), b'bar')
self.assertEqual(a.tell(), 0)
self.assertEqual(len(a), 3)
a.append('gak')
self.assertEqual(a.getvalue(), b'bargak')
self.assertEqual(a.tell(), 0) # <-- pointer hasn't moved
self.assertEqual(len(a), 6)
# test pointer middle, some data
a = util.BufferedByteStream('bar')
a.seek(2)
self.assertEqual(a.getvalue(), b'bar')
self.assertEqual(a.tell(), 2)
self.assertEqual(len(a), 3)
a.append('gak')
self.assertEqual(a.getvalue(), b'bargak')
self.assertEqual(a.tell(), 2) # <-- pointer hasn't moved
self.assertEqual(len(a), 6)
# test pointer end, some data
a = util.BufferedByteStream('bar')
a.seek(0, 2)
self.assertEqual(a.getvalue(), b'bar')
self.assertEqual(a.tell(), 3)
self.assertEqual(len(a), 3)
a.append('gak')
self.assertEqual(a.getvalue(), b'bargak')
self.assertEqual(a.tell(), 3) # <-- pointer hasn't moved
self.assertEqual(len(a), 6)
class Foo(object):
def getvalue(self):
return u'foo'
def __str__(self):
raise AttributeError()
a = util.BufferedByteStream()
self.assertEqual(a.getvalue(), b'')
self.assertEqual(a.tell(), 0)
self.assertEqual(len(a), 0)
a.append(Foo())
self.assertEqual(a.getvalue(), b'foo')
self.assertEqual(a.tell(), 0)
self.assertEqual(len(a), 3)
class DummyAlias(pyamf.ClassAlias):
pass
class AnotherDummyAlias(pyamf.ClassAlias):
pass
class YADummyAlias(pyamf.ClassAlias):
pass
class ClassAliasTestCase(unittest.TestCase):
def setUp(self):
self.old_aliases = pyamf.ALIAS_TYPES.copy()
def tearDown(self):
replace_dict(self.old_aliases, pyamf.ALIAS_TYPES)
def test_simple(self):
class A(object):
pass
pyamf.register_alias_type(DummyAlias, A)
self.assertEqual(util.get_class_alias(A), DummyAlias)
def test_nested(self):
class A(object):
pass
class B(object):
pass
class C(object):
pass
pyamf.register_alias_type(DummyAlias, A, B, C)
self.assertEqual(util.get_class_alias(B), DummyAlias)
def test_multiple(self):
class A(object):
pass
class B(object):
pass
class C(object):
pass
pyamf.register_alias_type(DummyAlias, A)
pyamf.register_alias_type(AnotherDummyAlias, B)
pyamf.register_alias_type(YADummyAlias, C)
self.assertEqual(util.get_class_alias(B), AnotherDummyAlias)
self.assertEqual(util.get_class_alias(C), YADummyAlias)
self.assertEqual(util.get_class_alias(A), DummyAlias)
def test_none_existant(self):
self.assertEqual(util.get_class_alias(self.__class__), None)
def test_subclass(self):
class A(object):
pass
class B(A):
pass
pyamf.register_alias_type(DummyAlias, A)
self.assertEqual(util.get_class_alias(B), DummyAlias)
class IsClassSealedTestCase(unittest.TestCase):
"""
Tests for L{util.is_class_sealed}
"""
def test_new_mixed(self):
class A(object):
__slots__ = ['foo', 'bar']
class B(A):
pass
class C(B):
__slots__ = ('spam', 'eggs')
self.assertTrue(util.is_class_sealed(A))
self.assertFalse(util.is_class_sealed(B))
self.assertFalse(util.is_class_sealed(C))
def test_deep(self):
class A(object):
__slots__ = ['foo', 'bar']
class B(A):
__slots__ = ('gak',)
class C(B):
pass
self.assertTrue(util.is_class_sealed(A))
self.assertTrue(util.is_class_sealed(B))
self.assertFalse(util.is_class_sealed(C))
class GetClassMetaTestCase(unittest.TestCase):
"""
Tests for L{util.get_class_meta}
"""
def test_types(self):
class A:
pass
class B(object):
pass
for t in ['', u'', 1, 1.0, 1, [], {}, object, object(), A(), B()]:
self.assertRaises(TypeError, util.get_class_meta, t)
def test_no_meta(self):
class A:
pass
class B(object):
pass
empty = {
'readonly_attrs': None,
'static_attrs': None,
'synonym_attrs': None,
'proxy_attrs': None,
'dynamic': None,
'alias': None,
'amf3': None,
'exclude_attrs': None,
'proxy_attrs': None,
'external': None
}
self.assertEqual(util.get_class_meta(A), empty)
self.assertEqual(util.get_class_meta(B), empty)
def test_alias(self):
class A:
class __amf__:
alias = 'foo.bar.Spam'
class B(object):
class __amf__:
alias = 'foo.bar.Spam'
meta = {
'readonly_attrs': None,
'static_attrs': None,
'synonym_attrs': None,
'proxy_attrs': None,
'dynamic': None,
'alias': 'foo.bar.Spam',
'amf3': None,
'proxy_attrs': None,
'exclude_attrs': None,
'external': None
}
self.assertEqual(util.get_class_meta(A), meta)
self.assertEqual(util.get_class_meta(B), meta)
def test_static(self):
class A:
class __amf__:
static = ['foo', 'bar']
class B(object):
class __amf__:
static = ['foo', 'bar']
meta = {
'readonly_attrs': None,
'static_attrs': ['foo', 'bar'],
'synonym_attrs': None,
'proxy_attrs': None,
'dynamic': None,
'alias': None,
'amf3': None,
'exclude_attrs': None,
'external': None
}
self.assertEqual(util.get_class_meta(A), meta)
self.assertEqual(util.get_class_meta(B), meta)
def test_exclude(self):
class A:
class __amf__:
exclude = ['foo', 'bar']
class B(object):
class __amf__:
exclude = ['foo', 'bar']
meta = {
'readonly_attrs': None,
'exclude_attrs': ['foo', 'bar'],
'synonym_attrs': None,
'proxy_attrs': None,
'dynamic': None,
'alias': None,
'amf3': None,
'static_attrs': None,
'proxy_attrs': None,
'external': None
}
self.assertEqual(util.get_class_meta(A), meta)
self.assertEqual(util.get_class_meta(B), meta)
def test_readonly(self):
class A:
class __amf__:
readonly = ['foo', 'bar']
class B(object):
class __amf__:
readonly = ['foo', 'bar']
meta = {
'exclude_attrs': None,
'readonly_attrs': ['foo', 'bar'],
'synonym_attrs': None,
'proxy_attrs': None,
'dynamic': None,
'alias': None,
'amf3': None,
'static_attrs': None,
'external': None,
'proxy_attrs': None,
}
self.assertEqual(util.get_class_meta(A), meta)
self.assertEqual(util.get_class_meta(B), meta)
def test_amf3(self):
class A:
class __amf__:
amf3 = True
class B(object):
class __amf__:
amf3 = True
meta = {
'exclude_attrs': None,
'proxy_attrs': None,
'synonym_attrs': None,
'readonly_attrs': None,
'proxy_attrs': None,
'dynamic': None,
'alias': None,
'amf3': True,
'static_attrs': None,
'external': None
}
self.assertEqual(util.get_class_meta(A), meta)
self.assertEqual(util.get_class_meta(B), meta)
def test_dynamic(self):
class A:
class __amf__:
dynamic = False
class B(object):
class __amf__:
dynamic = False
meta = {
'exclude_attrs': None,
'proxy_attrs': None,
'synonym_attrs': None,
'readonly_attrs': None,
'proxy_attrs': None,
'dynamic': False,
'alias': None,
'amf3': None,
'static_attrs': None,
'external': None
}
self.assertEqual(util.get_class_meta(A), meta)
self.assertEqual(util.get_class_meta(B), meta)
def test_external(self):
class A:
class __amf__:
external = True
class B(object):
class __amf__:
external = True
meta = {
'exclude_attrs': None,
'proxy_attrs': None,
'synonym_attrs': None,
'readonly_attrs': None,
'proxy_attrs': None,
'dynamic': None,
'alias': None,
'amf3': None,
'static_attrs': None,
'external': True
}
self.assertEqual(util.get_class_meta(A), meta)
self.assertEqual(util.get_class_meta(B), meta)
def test_dict(self):
meta = {
'exclude': ['foo'],
'readonly': ['bar'],
'dynamic': False,
'alias': 'spam.eggs',
'proxy_attrs': None,
'synonym_attrs': None,
'amf3': True,
'static': ['baz'],
'external': True
}
class A:
__amf__ = meta
class B(object):
__amf__ = meta
ret = {
'readonly_attrs': ['bar'],
'static_attrs': ['baz'],
'proxy_attrs': None,
'dynamic': False,
'alias': 'spam.eggs',
'amf3': True,
'exclude_attrs': ['foo'],
'synonym_attrs': None,
'proxy_attrs': None,
'external': True
}
self.assertEqual(util.get_class_meta(A), ret)
self.assertEqual(util.get_class_meta(B), ret)
def test_proxy(self):
class A:
class __amf__:
proxy = ['foo', 'bar']
class B(object):
class __amf__:
proxy = ['foo', 'bar']
meta = {
'exclude_attrs': None,
'readonly_attrs': None,
'proxy_attrs': ['foo', 'bar'],
'synonym_attrs': None,
'dynamic': None,
'alias': None,
'amf3': None,
'static_attrs': None,
'external': None
}
self.assertEqual(util.get_class_meta(A), meta)
self.assertEqual(util.get_class_meta(B), meta)
def test_synonym(self):
class A:
class __amf__:
synonym = {'foo': 'bar'}
class B(object):
class __amf__:
synonym = {'foo': 'bar'}
meta = {
'exclude_attrs': None,
'readonly_attrs': None,
'proxy_attrs': None,
'synonym_attrs': {'foo': 'bar'},
'dynamic': None,
'alias': None,
'amf3': None,
'static_attrs': None,
'external': None
}
self.assertEqual(util.get_class_meta(A), meta)
self.assertEqual(util.get_class_meta(B), meta)
| [
"datetime.datetime",
"pyamf.util.get_timestamp",
"pyamf.util.BufferedByteStream",
"pyamf.util.is_class_sealed",
"pyamf.ALIAS_TYPES.copy",
"pyamf.register_alias_type",
"pyamf.util.get_class_alias",
"pyamf.tests.util.replace_dict",
"pyamf.util.get_class_meta",
"pyamf.util.get_datetime"
] | [((1062, 1102), 'datetime.datetime', 'datetime', (['(2009)', '(3)', '(8)', '(23)', '(30)', '(47)', '(770122)'], {}), '(2009, 3, 8, 23, 30, 47, 770122)\n', (1070, 1102), False, 'from datetime import datetime\n'), ((1116, 1138), 'pyamf.util.get_timestamp', 'util.get_timestamp', (['dt'], {}), '(dt)\n', (1134, 1138), False, 'from pyamf import util\n'), ((1277, 1302), 'pyamf.util.BufferedByteStream', 'util.BufferedByteStream', ([], {}), '()\n', (1300, 1302), False, 'from pyamf import util\n'), ((1484, 1513), 'pyamf.util.BufferedByteStream', 'util.BufferedByteStream', (['None'], {}), '(None)\n', (1507, 1513), False, 'from pyamf import util\n'), ((1650, 1677), 'pyamf.util.BufferedByteStream', 'util.BufferedByteStream', (['""""""'], {}), "('')\n", (1673, 1677), False, 'from pyamf import util\n'), ((1814, 1845), 'pyamf.util.BufferedByteStream', 'util.BufferedByteStream', (['"""spam"""'], {}), "('spam')\n", (1837, 1845), False, 'from pyamf import util\n'), ((2294, 2319), 'pyamf.util.BufferedByteStream', 'util.BufferedByteStream', ([], {}), '()\n', (2317, 2319), False, 'from pyamf import util\n'), ((2524, 2565), 'pyamf.util.BufferedByteStream', 'util.BufferedByteStream', (['"""this is a test"""'], {}), "('this is a test')\n", (2547, 2565), False, 'from pyamf import util\n'), ((2881, 2934), 'pyamf.util.BufferedByteStream', 'util.BufferedByteStream', (['"""abcdefghijklmnopqrstuvwxyz"""'], {}), "('abcdefghijklmnopqrstuvwxyz')\n", (2904, 2934), False, 'from pyamf import util\n'), ((3996, 4049), 'pyamf.util.BufferedByteStream', 'util.BufferedByteStream', (['"""abcdefghijklmnopqrstuvwxyz"""'], {}), "('abcdefghijklmnopqrstuvwxyz')\n", (4019, 4049), False, 'from pyamf import util\n'), ((4470, 4503), 'pyamf.util.BufferedByteStream', 'util.BufferedByteStream', (['"""abcdef"""'], {}), "('abcdef')\n", (4493, 4503), False, 'from pyamf import util\n'), ((4712, 4744), 'pyamf.util.BufferedByteStream', 'util.BufferedByteStream', (['"""hello"""'], {}), "('hello')\n", (4735, 4744), False, 'from pyamf import util\n'), ((4983, 5008), 'pyamf.util.BufferedByteStream', 'util.BufferedByteStream', ([], {}), '()\n', (5006, 5008), False, 'from pyamf import util\n'), ((5298, 5329), 'pyamf.util.BufferedByteStream', 'util.BufferedByteStream', (["b'xyz'"], {}), "(b'xyz')\n", (5321, 5329), False, 'from pyamf import util\n'), ((5642, 5667), 'pyamf.util.BufferedByteStream', 'util.BufferedByteStream', ([], {}), '()\n', (5665, 5667), False, 'from pyamf import util\n'), ((5867, 5897), 'pyamf.util.BufferedByteStream', 'util.BufferedByteStream', (['"""foo"""'], {}), "('foo')\n", (5890, 5897), False, 'from pyamf import util\n'), ((6063, 6088), 'pyamf.util.BufferedByteStream', 'util.BufferedByteStream', ([], {}), '()\n', (6086, 6088), False, 'from pyamf import util\n'), ((6295, 6328), 'pyamf.util.BufferedByteStream', 'util.BufferedByteStream', (['"""foobar"""'], {}), "('foobar')\n", (6318, 6328), False, 'from pyamf import util\n'), ((6659, 6692), 'pyamf.util.BufferedByteStream', 'util.BufferedByteStream', (['"""abcdef"""'], {}), "('abcdef')\n", (6682, 6692), False, 'from pyamf import util\n'), ((6862, 6895), 'pyamf.util.BufferedByteStream', 'util.BufferedByteStream', (['"""abcdef"""'], {}), "('abcdef')\n", (6885, 6895), False, 'from pyamf import util\n'), ((7689, 7725), 'pyamf.util.BufferedByteStream', 'util.BufferedByteStream', (["b'\\x00\\xff'"], {}), "(b'\\x00\\xff')\n", (7712, 7725), False, 'from pyamf import util\n'), ((7862, 7887), 'pyamf.util.BufferedByteStream', 'util.BufferedByteStream', ([], {}), '()\n', (7885, 7887), False, 'from pyamf import util\n'), ((8263, 8307), 'pyamf.util.BufferedByteStream', 'util.BufferedByteStream', (["b'\\x00\\x7f\\xff\\x80'"], {}), "(b'\\x00\\x7f\\xff\\x80')\n", (8286, 8307), False, 'from pyamf import util\n'), ((8531, 8556), 'pyamf.util.BufferedByteStream', 'util.BufferedByteStream', ([], {}), '()\n', (8554, 8556), False, 'from pyamf import util\n'), ((8916, 8941), 'pyamf.util.BufferedByteStream', 'util.BufferedByteStream', ([], {}), '()\n', (8939, 8941), False, 'from pyamf import util\n'), ((9722, 9747), 'pyamf.util.BufferedByteStream', 'util.BufferedByteStream', ([], {}), '()\n', (9745, 9747), False, 'from pyamf import util\n'), ((10444, 10469), 'pyamf.util.BufferedByteStream', 'util.BufferedByteStream', ([], {}), '()\n', (10467, 10469), False, 'from pyamf import util\n'), ((11674, 11699), 'pyamf.util.BufferedByteStream', 'util.BufferedByteStream', ([], {}), '()\n', (11697, 11699), False, 'from pyamf import util\n'), ((13220, 13245), 'pyamf.util.BufferedByteStream', 'util.BufferedByteStream', ([], {}), '()\n', (13243, 13245), False, 'from pyamf import util\n'), ((14563, 14588), 'pyamf.util.BufferedByteStream', 'util.BufferedByteStream', ([], {}), '()\n', (14586, 14588), False, 'from pyamf import util\n'), ((16023, 16048), 'pyamf.util.BufferedByteStream', 'util.BufferedByteStream', ([], {}), '()\n', (16046, 16048), False, 'from pyamf import util\n'), ((16403, 16428), 'pyamf.util.BufferedByteStream', 'util.BufferedByteStream', ([], {}), '()\n', (16426, 16428), False, 'from pyamf import util\n'), ((16934, 16959), 'pyamf.util.BufferedByteStream', 'util.BufferedByteStream', ([], {}), '()\n', (16957, 16959), False, 'from pyamf import util\n'), ((17596, 17656), 'pyamf.util.BufferedByteStream', 'util.BufferedByteStream', (["b'\\xff\\xf8\\x00\\x00\\x00\\x00\\x00\\x00'"], {}), "(b'\\xff\\xf8\\x00\\x00\\x00\\x00\\x00\\x00')\n", (17619, 17656), False, 'from pyamf import util\n'), ((17718, 17778), 'pyamf.util.BufferedByteStream', 'util.BufferedByteStream', (["b'\\xff\\xf0\\x00\\x00\\x00\\x00\\x00\\x00'"], {}), "(b'\\xff\\xf0\\x00\\x00\\x00\\x00\\x00\\x00')\n", (17741, 17778), False, 'from pyamf import util\n'), ((17843, 17903), 'pyamf.util.BufferedByteStream', 'util.BufferedByteStream', (["b'\\x7f\\xf0\\x00\\x00\\x00\\x00\\x00\\x00'"], {}), "(b'\\x7f\\xf0\\x00\\x00\\x00\\x00\\x00\\x00')\n", (17866, 17903), False, 'from pyamf import util\n'), ((18001, 18061), 'pyamf.util.BufferedByteStream', 'util.BufferedByteStream', (["b'\\x00\\x00\\x00\\x00\\x00\\x00\\xf8\\xff'"], {}), "(b'\\x00\\x00\\x00\\x00\\x00\\x00\\xf8\\xff')\n", (18024, 18061), False, 'from pyamf import util\n'), ((18146, 18206), 'pyamf.util.BufferedByteStream', 'util.BufferedByteStream', (["b'\\x00\\x00\\x00\\x00\\x00\\x00\\xf0\\xff'"], {}), "(b'\\x00\\x00\\x00\\x00\\x00\\x00\\xf0\\xff')\n", (18169, 18206), False, 'from pyamf import util\n'), ((18294, 18354), 'pyamf.util.BufferedByteStream', 'util.BufferedByteStream', (["b'\\x00\\x00\\x00\\x00\\x00\\x00\\xf0\\x7f'"], {}), "(b'\\x00\\x00\\x00\\x00\\x00\\x00\\xf0\\x7f')\n", (18317, 18354), False, 'from pyamf import util\n'), ((18478, 18503), 'pyamf.util.BufferedByteStream', 'util.BufferedByteStream', ([], {}), '()\n', (18501, 18503), False, 'from pyamf import util\n'), ((19177, 19202), 'pyamf.util.BufferedByteStream', 'util.BufferedByteStream', ([], {}), '()\n', (19200, 19202), False, 'from pyamf import util\n'), ((19299, 19329), 'pyamf.util.BufferedByteStream', 'util.BufferedByteStream', (['"""abc"""'], {}), "('abc')\n", (19322, 19329), False, 'from pyamf import util\n'), ((19454, 19479), 'pyamf.util.BufferedByteStream', 'util.BufferedByteStream', ([], {}), '()\n', (19477, 19479), False, 'from pyamf import util\n'), ((19872, 19897), 'pyamf.util.BufferedByteStream', 'util.BufferedByteStream', ([], {}), '()\n', (19895, 19897), False, 'from pyamf import util\n'), ((20034, 20087), 'pyamf.util.BufferedByteStream', 'util.BufferedByteStream', (['"""abcdefghijklmnopqrstuvwxyz"""'], {}), "('abcdefghijklmnopqrstuvwxyz')\n", (20057, 20087), False, 'from pyamf import util\n'), ((20398, 20423), 'pyamf.util.BufferedByteStream', 'util.BufferedByteStream', ([], {}), '()\n', (20421, 20423), False, 'from pyamf import util\n'), ((20641, 20676), 'pyamf.util.BufferedByteStream', 'util.BufferedByteStream', (['"""spameggs"""'], {}), "('spameggs')\n", (20664, 20676), False, 'from pyamf import util\n'), ((20896, 20924), 'pyamf.util.BufferedByteStream', 'util.BufferedByteStream', (['"""a"""'], {}), "('a')\n", (20919, 20924), False, 'from pyamf import util\n'), ((20937, 20965), 'pyamf.util.BufferedByteStream', 'util.BufferedByteStream', (['"""b"""'], {}), "('b')\n", (20960, 20965), False, 'from pyamf import util\n'), ((21175, 21206), 'pyamf.util.BufferedByteStream', 'util.BufferedByteStream', (["b'abc'"], {}), "(b'abc')\n", (21198, 21206), False, 'from pyamf import util\n'), ((21219, 21250), 'pyamf.util.BufferedByteStream', 'util.BufferedByteStream', (["b'def'"], {}), "(b'def')\n", (21242, 21250), False, 'from pyamf import util\n'), ((21523, 21548), 'pyamf.util.BufferedByteStream', 'util.BufferedByteStream', ([], {}), '()\n', (21546, 21548), False, 'from pyamf import util\n'), ((22147, 22172), 'pyamf.util.BufferedByteStream', 'util.BufferedByteStream', ([], {}), '()\n', (22170, 22172), False, 'from pyamf import util\n'), ((22525, 22555), 'pyamf.util.BufferedByteStream', 'util.BufferedByteStream', (['"""bar"""'], {}), "('bar')\n", (22548, 22555), False, 'from pyamf import util\n'), ((22911, 22941), 'pyamf.util.BufferedByteStream', 'util.BufferedByteStream', (['"""bar"""'], {}), "('bar')\n", (22934, 22941), False, 'from pyamf import util\n'), ((23312, 23342), 'pyamf.util.BufferedByteStream', 'util.BufferedByteStream', (['"""bar"""'], {}), "('bar')\n", (23335, 23342), False, 'from pyamf import util\n'), ((23838, 23863), 'pyamf.util.BufferedByteStream', 'util.BufferedByteStream', ([], {}), '()\n', (23861, 23863), False, 'from pyamf import util\n'), ((24295, 24320), 'pyamf.util.BufferedByteStream', 'util.BufferedByteStream', ([], {}), '()\n', (24318, 24320), False, 'from pyamf import util\n'), ((24673, 24703), 'pyamf.util.BufferedByteStream', 'util.BufferedByteStream', (['"""bar"""'], {}), "('bar')\n", (24696, 24703), False, 'from pyamf import util\n'), ((25059, 25089), 'pyamf.util.BufferedByteStream', 'util.BufferedByteStream', (['"""bar"""'], {}), "('bar')\n", (25082, 25089), False, 'from pyamf import util\n'), ((25460, 25490), 'pyamf.util.BufferedByteStream', 'util.BufferedByteStream', (['"""bar"""'], {}), "('bar')\n", (25483, 25490), False, 'from pyamf import util\n'), ((25986, 26011), 'pyamf.util.BufferedByteStream', 'util.BufferedByteStream', ([], {}), '()\n', (26009, 26011), False, 'from pyamf import util\n'), ((26523, 26547), 'pyamf.ALIAS_TYPES.copy', 'pyamf.ALIAS_TYPES.copy', ([], {}), '()\n', (26545, 26547), False, 'import pyamf\n'), ((26581, 26630), 'pyamf.tests.util.replace_dict', 'replace_dict', (['self.old_aliases', 'pyamf.ALIAS_TYPES'], {}), '(self.old_aliases, pyamf.ALIAS_TYPES)\n', (26593, 26630), False, 'from pyamf.tests.util import replace_dict\n'), ((26710, 26750), 'pyamf.register_alias_type', 'pyamf.register_alias_type', (['DummyAlias', 'A'], {}), '(DummyAlias, A)\n', (26735, 26750), False, 'import pyamf\n'), ((26979, 27025), 'pyamf.register_alias_type', 'pyamf.register_alias_type', (['DummyAlias', 'A', 'B', 'C'], {}), '(DummyAlias, A, B, C)\n', (27004, 27025), False, 'import pyamf\n'), ((27256, 27296), 'pyamf.register_alias_type', 'pyamf.register_alias_type', (['DummyAlias', 'A'], {}), '(DummyAlias, A)\n', (27281, 27296), False, 'import pyamf\n'), ((27305, 27352), 'pyamf.register_alias_type', 'pyamf.register_alias_type', (['AnotherDummyAlias', 'B'], {}), '(AnotherDummyAlias, B)\n', (27330, 27352), False, 'import pyamf\n'), ((27361, 27403), 'pyamf.register_alias_type', 'pyamf.register_alias_type', (['YADummyAlias', 'C'], {}), '(YADummyAlias, C)\n', (27386, 27403), False, 'import pyamf\n'), ((27823, 27863), 'pyamf.register_alias_type', 'pyamf.register_alias_type', (['DummyAlias', 'A'], {}), '(DummyAlias, A)\n', (27848, 27863), False, 'import pyamf\n'), ((830, 859), 'pyamf.util.get_datetime', 'util.get_datetime', (['(1194825600)'], {}), '(1194825600)\n', (847, 859), False, 'from pyamf import util\n'), ((861, 883), 'datetime.datetime', 'datetime', (['(2007)', '(11)', '(12)'], {}), '(2007, 11, 12)\n', (869, 883), False, 'from datetime import datetime\n'), ((953, 981), 'pyamf.util.get_datetime', 'util.get_datetime', (['(-31536000)'], {}), '(-31536000)\n', (970, 981), False, 'from pyamf import util\n'), ((983, 1003), 'datetime.datetime', 'datetime', (['(1969)', '(1)', '(1)'], {}), '(1969, 1, 1)\n', (991, 1003), False, 'from datetime import datetime\n'), ((1164, 1185), 'pyamf.util.get_datetime', 'util.get_datetime', (['ts'], {}), '(ts)\n', (1181, 1185), False, 'from pyamf import util\n'), ((7475, 7507), 'pyamf.util.BufferedByteStream', 'util.BufferedByteStream', (['data[x]'], {}), '(data[x])\n', (7498, 7507), False, 'from pyamf import util\n'), ((26777, 26800), 'pyamf.util.get_class_alias', 'util.get_class_alias', (['A'], {}), '(A)\n', (26797, 26800), False, 'from pyamf import util\n'), ((27052, 27075), 'pyamf.util.get_class_alias', 'util.get_class_alias', (['B'], {}), '(B)\n', (27072, 27075), False, 'from pyamf import util\n'), ((27430, 27453), 'pyamf.util.get_class_alias', 'util.get_class_alias', (['B'], {}), '(B)\n', (27450, 27453), False, 'from pyamf import util\n'), ((27499, 27522), 'pyamf.util.get_class_alias', 'util.get_class_alias', (['C'], {}), '(C)\n', (27519, 27522), False, 'from pyamf import util\n'), ((27563, 27586), 'pyamf.util.get_class_alias', 'util.get_class_alias', (['A'], {}), '(A)\n', (27583, 27586), False, 'from pyamf import util\n'), ((27660, 27696), 'pyamf.util.get_class_alias', 'util.get_class_alias', (['self.__class__'], {}), '(self.__class__)\n', (27680, 27696), False, 'from pyamf import util\n'), ((27890, 27913), 'pyamf.util.get_class_alias', 'util.get_class_alias', (['B'], {}), '(B)\n', (27910, 27913), False, 'from pyamf import util\n'), ((28251, 28274), 'pyamf.util.is_class_sealed', 'util.is_class_sealed', (['A'], {}), '(A)\n', (28271, 28274), False, 'from pyamf import util\n'), ((28301, 28324), 'pyamf.util.is_class_sealed', 'util.is_class_sealed', (['B'], {}), '(B)\n', (28321, 28324), False, 'from pyamf import util\n'), ((28351, 28374), 'pyamf.util.is_class_sealed', 'util.is_class_sealed', (['C'], {}), '(C)\n', (28371, 28374), False, 'from pyamf import util\n'), ((28583, 28606), 'pyamf.util.is_class_sealed', 'util.is_class_sealed', (['A'], {}), '(A)\n', (28603, 28606), False, 'from pyamf import util\n'), ((28632, 28655), 'pyamf.util.is_class_sealed', 'util.is_class_sealed', (['B'], {}), '(B)\n', (28652, 28655), False, 'from pyamf import util\n'), ((28682, 28705), 'pyamf.util.is_class_sealed', 'util.is_class_sealed', (['C'], {}), '(C)\n', (28702, 28705), False, 'from pyamf import util\n'), ((29532, 29554), 'pyamf.util.get_class_meta', 'util.get_class_meta', (['A'], {}), '(A)\n', (29551, 29554), False, 'from pyamf import util\n'), ((29588, 29610), 'pyamf.util.get_class_meta', 'util.get_class_meta', (['B'], {}), '(B)\n', (29607, 29610), False, 'from pyamf import util\n'), ((30202, 30224), 'pyamf.util.get_class_meta', 'util.get_class_meta', (['A'], {}), '(A)\n', (30221, 30224), False, 'from pyamf import util\n'), ((30257, 30279), 'pyamf.util.get_class_meta', 'util.get_class_meta', (['B'], {}), '(B)\n', (30276, 30279), False, 'from pyamf import util\n'), ((30840, 30862), 'pyamf.util.get_class_meta', 'util.get_class_meta', (['A'], {}), '(A)\n', (30859, 30862), False, 'from pyamf import util\n'), ((30895, 30917), 'pyamf.util.get_class_meta', 'util.get_class_meta', (['B'], {}), '(B)\n', (30914, 30917), False, 'from pyamf import util\n'), ((31514, 31536), 'pyamf.util.get_class_meta', 'util.get_class_meta', (['A'], {}), '(A)\n', (31533, 31536), False, 'from pyamf import util\n'), ((31569, 31591), 'pyamf.util.get_class_meta', 'util.get_class_meta', (['B'], {}), '(B)\n', (31588, 31591), False, 'from pyamf import util\n'), ((32192, 32214), 'pyamf.util.get_class_meta', 'util.get_class_meta', (['A'], {}), '(A)\n', (32211, 32214), False, 'from pyamf import util\n'), ((32247, 32269), 'pyamf.util.get_class_meta', 'util.get_class_meta', (['B'], {}), '(B)\n', (32266, 32269), False, 'from pyamf import util\n'), ((32827, 32849), 'pyamf.util.get_class_meta', 'util.get_class_meta', (['A'], {}), '(A)\n', (32846, 32849), False, 'from pyamf import util\n'), ((32882, 32904), 'pyamf.util.get_class_meta', 'util.get_class_meta', (['B'], {}), '(B)\n', (32901, 32904), False, 'from pyamf import util\n'), ((33474, 33496), 'pyamf.util.get_class_meta', 'util.get_class_meta', (['A'], {}), '(A)\n', (33493, 33496), False, 'from pyamf import util\n'), ((33529, 33551), 'pyamf.util.get_class_meta', 'util.get_class_meta', (['B'], {}), '(B)\n', (33548, 33551), False, 'from pyamf import util\n'), ((34121, 34143), 'pyamf.util.get_class_meta', 'util.get_class_meta', (['A'], {}), '(A)\n', (34140, 34143), False, 'from pyamf import util\n'), ((34176, 34198), 'pyamf.util.get_class_meta', 'util.get_class_meta', (['B'], {}), '(B)\n', (34195, 34198), False, 'from pyamf import util\n'), ((35027, 35049), 'pyamf.util.get_class_meta', 'util.get_class_meta', (['A'], {}), '(A)\n', (35046, 35049), False, 'from pyamf import util\n'), ((35081, 35103), 'pyamf.util.get_class_meta', 'util.get_class_meta', (['B'], {}), '(B)\n', (35100, 35103), False, 'from pyamf import util\n'), ((35660, 35682), 'pyamf.util.get_class_meta', 'util.get_class_meta', (['A'], {}), '(A)\n', (35679, 35682), False, 'from pyamf import util\n'), ((35715, 35737), 'pyamf.util.get_class_meta', 'util.get_class_meta', (['B'], {}), '(B)\n', (35734, 35737), False, 'from pyamf import util\n'), ((36301, 36323), 'pyamf.util.get_class_meta', 'util.get_class_meta', (['A'], {}), '(A)\n', (36320, 36323), False, 'from pyamf import util\n'), ((36356, 36378), 'pyamf.util.get_class_meta', 'util.get_class_meta', (['B'], {}), '(B)\n', (36375, 36378), False, 'from pyamf import util\n'), ((713, 735), 'datetime.datetime', 'datetime', (['(2007)', '(11)', '(12)'], {}), '(2007, 11, 12)\n', (721, 735), False, 'from datetime import datetime\n')] |
# Copyright (c) 2013 Intel, Inc.
# Copyright (c) 2013 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import copy
import typing as ty
from oslo_config import cfg
from oslo_log import log as logging
from oslo_utils import strutils
from nova import exception
from nova import objects
from nova.objects import fields
from nova.objects import pci_device_pool
from nova.pci.request import PCI_REMOTE_MANAGED_TAG
from nova.pci import utils
from nova.pci import whitelist
CONF = cfg.CONF
LOG = logging.getLogger(__name__)
# TODO(stephenfin): We might want to use TypedDict here. Refer to
# https://mypy.readthedocs.io/en/latest/kinds_of_types.html#typeddict for
# more information.
Pool = ty.Dict[str, ty.Any]
class PciDeviceStats(object):
"""PCI devices summary information.
According to the PCI SR-IOV spec, a PCI physical function can have up to
256 PCI virtual functions, thus the number of assignable PCI functions in
a cloud can be big. The scheduler needs to know all device availability
information in order to determine which compute hosts can support a PCI
request. Passing individual virtual device information to the scheduler
does not scale, so we provide summary information.
Usually the virtual functions provided by a host PCI device have the same
value for most properties, like vendor_id, product_id and class type.
The PCI stats class summarizes this information for the scheduler.
The pci stats information is maintained exclusively by compute node
resource tracker and updated to database. The scheduler fetches the
information and selects the compute node accordingly. If a compute
node is selected, the resource tracker allocates the devices to the
instance and updates the pci stats information.
This summary information will be helpful for cloud management also.
"""
pool_keys = ['product_id', 'vendor_id', 'numa_node', 'dev_type']
def __init__(
self,
numa_topology: 'objects.NUMATopology',
stats: 'objects.PCIDevicePoolList' = None,
dev_filter: whitelist.Whitelist = None,
) -> None:
self.numa_topology = numa_topology
self.pools = (
[pci_pool.to_dict() for pci_pool in stats] if stats else []
)
self.pools.sort(key=lambda item: len(item))
self.dev_filter = dev_filter or whitelist.Whitelist(
CONF.pci.passthrough_whitelist)
def _equal_properties(
self, dev: Pool, entry: Pool, matching_keys: ty.List[str],
) -> bool:
return all(dev.get(prop) == entry.get(prop)
for prop in matching_keys)
def _find_pool(self, dev_pool: Pool) -> ty.Optional[Pool]:
"""Return the first pool that matches dev."""
for pool in self.pools:
pool_keys = pool.copy()
del pool_keys['count']
del pool_keys['devices']
if (len(pool_keys.keys()) == len(dev_pool.keys()) and
self._equal_properties(dev_pool, pool_keys, list(dev_pool))):
return pool
return None
@staticmethod
def _ensure_remote_managed_tag(
dev: 'objects.PciDevice', pool: Pool):
"""Add a remote_managed tag depending on a device type if needed.
Network devices may be managed remotely, e.g. by a SmartNIC DPU. If
a tag has not been explicitly provided, populate it by assuming that
a device is not remote managed by default.
"""
if dev.dev_type not in (fields.PciDeviceType.SRIOV_VF,
fields.PciDeviceType.SRIOV_PF,
fields.PciDeviceType.VDPA):
return
# A tag is added here rather than at the client side to avoid an
# issue with having objects without this tag specified during an
# upgrade to the first version that supports handling this tag.
if pool.get(PCI_REMOTE_MANAGED_TAG) is None:
# NOTE: tags are compared as strings case-insensitively, see
# pci_device_prop_match in nova/pci/utils.py.
pool[PCI_REMOTE_MANAGED_TAG] = 'false'
def _create_pool_keys_from_dev(
self, dev: 'objects.PciDevice',
) -> ty.Optional[Pool]:
"""Create a stats pool dict that this dev is supposed to be part of
Note that this pool dict contains the stats pool's keys and their
values. 'count' and 'devices' are not included.
"""
# Don't add a device that doesn't have a matching device spec.
# This can happen during initial sync up with the controller
devspec = self.dev_filter.get_devspec(dev)
if not devspec:
return None
tags = devspec.get_tags()
pool = {k: getattr(dev, k) for k in self.pool_keys}
if tags:
pool.update(tags)
# NOTE(gibi): parent_ifname acts like a tag during pci claim but
# not provided as part of the whitelist spec as it is auto detected
# by the virt driver.
# This key is used for match InstancePciRequest backed by neutron ports
# that has resource_request and therefore that has resource allocation
# already in placement.
if dev.extra_info.get('parent_ifname'):
pool['parent_ifname'] = dev.extra_info['parent_ifname']
self._ensure_remote_managed_tag(dev, pool)
return pool
def _get_pool_with_device_type_mismatch(
self, dev: 'objects.PciDevice',
) -> ty.Optional[ty.Tuple[Pool, 'objects.PciDevice']]:
"""Check for device type mismatch in the pools for a given device.
Return (pool, device) if device type does not match or a single None
if the device type matches.
"""
for pool in self.pools:
for device in pool['devices']:
if device.address == dev.address:
if dev.dev_type != pool["dev_type"]:
return pool, device
return None
return None
def update_device(self, dev: 'objects.PciDevice') -> None:
"""Update a device to its matching pool."""
pool_device_info = self._get_pool_with_device_type_mismatch(dev)
if pool_device_info is None:
return None
pool, device = pool_device_info
pool['devices'].remove(device)
self._decrease_pool_count(self.pools, pool)
self.add_device(dev)
def add_device(self, dev: 'objects.PciDevice') -> None:
"""Add a device to its matching pool."""
dev_pool = self._create_pool_keys_from_dev(dev)
if dev_pool:
pool = self._find_pool(dev_pool)
if not pool:
dev_pool['count'] = 0
dev_pool['devices'] = []
self.pools.append(dev_pool)
self.pools.sort(key=lambda item: len(item))
pool = dev_pool
pool['count'] += 1
pool['devices'].append(dev)
@staticmethod
def _decrease_pool_count(
pool_list: ty.List[Pool], pool: Pool, count: int = 1,
) -> int:
"""Decrement pool's size by count.
If pool becomes empty, remove pool from pool_list.
"""
if pool['count'] > count:
pool['count'] -= count
count = 0
else:
count -= pool['count']
pool_list.remove(pool)
return count
def remove_device(self, dev: 'objects.PciDevice') -> None:
"""Remove one device from the first pool that it matches."""
dev_pool = self._create_pool_keys_from_dev(dev)
if dev_pool:
pool = self._find_pool(dev_pool)
if not pool:
raise exception.PciDevicePoolEmpty(
compute_node_id=dev.compute_node_id, address=dev.address)
pool['devices'].remove(dev)
self._decrease_pool_count(self.pools, pool)
def get_free_devs(self) -> ty.List['objects.PciDevice']:
free_devs: ty.List[objects.PciDevice] = []
for pool in self.pools:
free_devs.extend(pool['devices'])
return free_devs
def consume_requests(
self,
pci_requests: 'objects.InstancePCIRequests',
numa_cells: ty.Optional[ty.List['objects.InstanceNUMACell']] = None,
) -> ty.Optional[ty.List['objects.PciDevice']]:
alloc_devices: ty.List[objects.PciDevice] = []
for request in pci_requests:
count = request.count
pools = self._filter_pools(self.pools, request, numa_cells)
# Failed to allocate the required number of devices. Return the
# devices already allocated during previous iterations back to
# their pools
if not pools:
LOG.error("Failed to allocate PCI devices for instance. "
"Unassigning devices back to pools. "
"This should not happen, since the scheduler "
"should have accurate information, and allocation "
"during claims is controlled via a hold "
"on the compute node semaphore.")
for d in range(len(alloc_devices)):
self.add_device(alloc_devices.pop())
return None
for pool in pools:
if pool['count'] >= count:
num_alloc = count
else:
num_alloc = pool['count']
count -= num_alloc
pool['count'] -= num_alloc
for d in range(num_alloc):
pci_dev = pool['devices'].pop()
self._handle_device_dependents(pci_dev)
pci_dev.request_id = request.request_id
alloc_devices.append(pci_dev)
if count == 0:
break
return alloc_devices
def _handle_device_dependents(self, pci_dev: 'objects.PciDevice') -> None:
"""Remove device dependents or a parent from pools.
In case the device is a PF, all of it's dependent VFs should
be removed from pools count, if these are present.
When the device is a VF, or a VDPA device, it's parent PF
pool count should be decreased, unless it is no longer in a pool.
"""
if pci_dev.dev_type == fields.PciDeviceType.SRIOV_PF:
vfs_list = pci_dev.child_devices
if vfs_list:
free_devs = self.get_free_devs()
for vf in vfs_list:
# NOTE(gibi): do not try to remove a device that are
# already removed
if vf in free_devs:
self.remove_device(vf)
elif pci_dev.dev_type in (
fields.PciDeviceType.SRIOV_VF,
fields.PciDeviceType.VDPA,
):
try:
parent = pci_dev.parent_device
# Make sure not to decrease PF pool count if this parent has
# been already removed from pools
if parent in self.get_free_devs():
self.remove_device(parent)
except exception.PciDeviceNotFound:
return
def _filter_pools_for_spec(
self, pools: ty.List[Pool], request: 'objects.InstancePCIRequest',
) -> ty.List[Pool]:
"""Filter out pools that don't match the request's device spec.
Exclude pools that do not match the specified ``vendor_id``,
``product_id`` and/or ``device_type`` field, or any of the other
arbitrary tags such as ``physical_network``, specified in the request.
:param pools: A list of PCI device pool dicts
:param request: An InstancePCIRequest object describing the type,
quantity and required NUMA affinity of device(s) we want.
:returns: A list of pools that can be used to support the request if
this is possible.
"""
request_specs = request.spec
return [
pool for pool in pools
if utils.pci_device_prop_match(pool, request_specs)
]
def _filter_pools_for_numa_cells(
self,
pools: ty.List[Pool],
request: 'objects.InstancePCIRequest',
numa_cells: ty.Optional[ty.List['objects.InstanceNUMACell']],
) -> ty.List[Pool]:
"""Filter out pools with the wrong NUMA affinity, if required.
Exclude pools that do not have *suitable* PCI NUMA affinity.
``numa_policy`` determines what *suitable* means, being one of
PREFERRED (nice-to-have), LEGACY (must-have-if-available) and REQUIRED
(must-have). We iterate through the various policies in order of
strictness. This means that even if we only *prefer* PCI-NUMA affinity,
we will still attempt to provide it if possible.
:param pools: A list of PCI device pool dicts
:param request: An InstancePCIRequest object describing the type,
quantity and required NUMA affinity of device(s) we want.
:param numa_cells: A list of InstanceNUMACell objects whose ``id``
corresponds to the ``id`` of host NUMACells.
:returns: A list of pools that can, together, provide at least
``requested_count`` PCI devices with the level of NUMA affinity
required by ``numa_policy``, else all pools that can satisfy this
policy even if it's not enough.
"""
if not numa_cells:
return pools
# we default to the 'legacy' policy for...of course...legacy reasons
requested_policy = fields.PCINUMAAffinityPolicy.LEGACY
if 'numa_policy' in request:
requested_policy = request.numa_policy or requested_policy
requested_count = request.count
numa_cell_ids = [cell.id for cell in numa_cells]
# filter out pools which numa_node is not included in numa_cell_ids
filtered_pools = [
pool for pool in pools if any(utils.pci_device_prop_match(
pool, [{'numa_node': cell}]) for cell in numa_cell_ids)]
# we can't apply a less strict policy than the one requested, so we
# need to return if we've demanded a NUMA affinity of REQUIRED.
# However, NUMA affinity is a good thing. If we can get enough devices
# with the stricter policy then we will use them.
if requested_policy == fields.PCINUMAAffinityPolicy.REQUIRED or sum(
pool['count'] for pool in filtered_pools) >= requested_count:
return filtered_pools
# the SOCKET policy is a bit of a special case. It's less strict than
# REQUIRED (so REQUIRED will automatically fulfil SOCKET, at least
# with our assumption of never having multiple sockets per NUMA node),
# but not always more strict than LEGACY: a PCI device with no NUMA
# affinity will fulfil LEGACY but not SOCKET. If we have SOCKET,
# process it here and don't continue.
if requested_policy == fields.PCINUMAAffinityPolicy.SOCKET:
return self._filter_pools_for_socket_affinity(pools, numa_cells)
# some systems don't report NUMA node info for PCI devices, in which
# case None is reported in 'pci_device.numa_node'. The LEGACY policy
# allows us to use these devices so we include None in the list of
# suitable NUMA cells.
numa_cell_ids.append(None)
# filter out pools which numa_node is not included in numa_cell_ids
filtered_pools = [
pool for pool in pools if any(utils.pci_device_prop_match(
pool, [{'numa_node': cell}]) for cell in numa_cell_ids)]
# once again, we can't apply a less strict policy than the one
# requested, so we need to return if we've demanded a NUMA affinity of
# LEGACY. Similarly, we will also return if we have enough devices to
# satisfy this somewhat strict policy.
if requested_policy == fields.PCINUMAAffinityPolicy.LEGACY or sum(
pool['count'] for pool in filtered_pools) >= requested_count:
return filtered_pools
# if we've got here, we're using the PREFERRED policy and weren't able
# to provide anything with stricter affinity. Use whatever devices you
# can, folks.
return sorted(
pools, key=lambda pool: pool.get('numa_node') not in numa_cell_ids)
def _filter_pools_for_socket_affinity(
self,
pools: ty.List[Pool],
numa_cells: ty.List['objects.InstanceNUMACell'],
) -> ty.List[Pool]:
host_cells = self.numa_topology.cells
# bail early if we don't have socket information for all host_cells.
# This could happen if we're running on an weird older system with
# multiple sockets per NUMA node, which is a configuration that we
# explicitly chose not to support.
if any(cell.socket is None for cell in host_cells):
LOG.debug('No socket information in host NUMA cell(s).')
return []
# get a set of host sockets that the guest cells are in. Since guest
# cell IDs map to host cell IDs, we can just lookup the latter's
# socket.
socket_ids = set()
for guest_cell in numa_cells:
for host_cell in host_cells:
if guest_cell.id == host_cell.id:
socket_ids.add(host_cell.socket)
# now get a set of host NUMA nodes that are in the above sockets
allowed_numa_nodes = set()
for host_cell in host_cells:
if host_cell.socket in socket_ids:
allowed_numa_nodes.add(host_cell.id)
# filter out pools that are not in one of the correct host NUMA nodes.
return [
pool for pool in pools if any(
utils.pci_device_prop_match(pool, [{'numa_node': numa_node}])
for numa_node in allowed_numa_nodes
)
]
def _filter_pools_for_unrequested_pfs(
self, pools: ty.List[Pool], request: 'objects.InstancePCIRequest',
) -> ty.List[Pool]:
"""Filter out pools with PFs, unless these are required.
This is necessary in cases where PFs and VFs have the same product_id
and generally useful elsewhere.
:param pools: A list of PCI device pool dicts
:param request: An InstancePCIRequest object describing the type,
quantity and required NUMA affinity of device(s) we want.
:returns: A list of pools that can be used to support the request if
this is possible.
"""
if all(
spec.get('dev_type') != fields.PciDeviceType.SRIOV_PF
for spec in request.spec
):
pools = [
pool for pool in pools
if not pool.get('dev_type') == fields.PciDeviceType.SRIOV_PF
]
return pools
def _filter_pools_for_unrequested_vdpa_devices(
self,
pools: ty.List[Pool],
request: 'objects.InstancePCIRequest',
) -> ty.List[Pool]:
"""Filter out pools with VDPA devices, unless these are required.
This is necessary as vdpa devices require special handling and
should not be allocated to generic pci device requests.
:param pools: A list of PCI device pool dicts
:param request: An InstancePCIRequest object describing the type,
quantity and required NUMA affinity of device(s) we want.
:returns: A list of pools that can be used to support the request if
this is possible.
"""
if all(
spec.get('dev_type') != fields.PciDeviceType.VDPA
for spec in request.spec
):
pools = [
pool for pool in pools
if not pool.get('dev_type') == fields.PciDeviceType.VDPA
]
return pools
def _filter_pools_for_unrequested_remote_managed_devices(
self, pools: ty.List[Pool], request: 'objects.InstancePCIRequest',
) -> ty.List[Pool]:
"""Filter out pools with remote_managed devices, unless requested.
Remote-managed devices are not usable for legacy SR-IOV or hardware
offload scenarios and must be excluded from allocation.
:param pools: A list of PCI device pool dicts
:param request: An InstancePCIRequest object describing the type,
quantity and required NUMA affinity of device(s) we want.
:returns: A list of pools that can be used to support the request if
this is possible.
"""
if all(not strutils.bool_from_string(spec.get(PCI_REMOTE_MANAGED_TAG))
for spec in request.spec):
pools = [pool for pool in pools
if not strutils.bool_from_string(
pool.get(PCI_REMOTE_MANAGED_TAG))]
return pools
def _filter_pools(
self,
pools: ty.List[Pool],
request: 'objects.InstancePCIRequest',
numa_cells: ty.Optional[ty.List['objects.InstanceNUMACell']],
) -> ty.Optional[ty.List[Pool]]:
"""Determine if an individual PCI request can be met.
Filter pools, which are collections of devices with similar traits, to
identify those that can support the provided PCI request.
If ``numa_cells`` is provided then NUMA locality may be taken into
account, depending on the value of ``request.numa_policy``.
:param pools: A list of PCI device pool dicts
:param request: An InstancePCIRequest object describing the type,
quantity and required NUMA affinity of device(s) we want.
:param numa_cells: A list of InstanceNUMACell objects whose ``id``
corresponds to the ``id`` of host NUMACell objects.
:returns: A list of pools that can be used to support the request if
this is possible, else None.
"""
# NOTE(vladikr): This code may be open to race conditions.
# Two concurrent requests may succeed when called support_requests
# because this method does not remove related devices from the pools
# Firstly, let's exclude all devices that don't match our spec (e.g.
# they've got different PCI IDs or something)
before_count = sum([pool['count'] for pool in pools])
pools = self._filter_pools_for_spec(pools, request)
after_count = sum([pool['count'] for pool in pools])
if after_count < before_count:
LOG.debug(
'Dropped %d device(s) due to mismatched PCI attribute(s)',
before_count - after_count
)
if after_count < request.count:
LOG.debug('Not enough PCI devices left to satisfy request')
return None
# Next, let's exclude all devices that aren't on the correct NUMA node
# or socket, *assuming* we have devices and care about that, as
# determined by policy
before_count = after_count
pools = self._filter_pools_for_numa_cells(pools, request, numa_cells)
after_count = sum([pool['count'] for pool in pools])
if after_count < before_count:
LOG.debug(
'Dropped %d device(s) as they are on the wrong NUMA node(s)',
before_count - after_count
)
if after_count < request.count:
LOG.debug('Not enough PCI devices left to satisfy request')
return None
# If we're not requesting PFs then we should not use these.
# Exclude them.
before_count = after_count
pools = self._filter_pools_for_unrequested_pfs(pools, request)
after_count = sum([pool['count'] for pool in pools])
if after_count < before_count:
LOG.debug(
'Dropped %d device(s) as they are PFs which we have not '
'requested',
before_count - after_count
)
if after_count < request.count:
LOG.debug('Not enough PCI devices left to satisfy request')
return None
# If we're not requesting VDPA devices then we should not use these
# either. Exclude them.
before_count = after_count
pools = self._filter_pools_for_unrequested_vdpa_devices(pools, request)
after_count = sum([pool['count'] for pool in pools])
if after_count < before_count:
LOG.debug(
'Dropped %d device(s) as they are VDPA devices which we have '
'not requested',
before_count - after_count
)
# If we're not requesting remote_managed devices then we should not
# use these either. Exclude them.
before_count = after_count
pools = self._filter_pools_for_unrequested_remote_managed_devices(
pools, request)
after_count = sum([pool['count'] for pool in pools])
if after_count < before_count:
LOG.debug(
'Dropped %d device(s) as they are remote-managed devices which'
'we have not requested',
before_count - after_count
)
if after_count < request.count:
LOG.debug('Not enough PCI devices left to satisfy request')
return None
return pools
def support_requests(
self,
requests: ty.List['objects.InstancePCIRequest'],
numa_cells: ty.Optional[ty.List['objects.InstanceNUMACell']] = None,
) -> bool:
"""Determine if the PCI requests can be met.
Determine, based on a compute node's PCI stats, if an instance can be
scheduled on the node. **Support does not mean real allocation**.
If ``numa_cells`` is provided then NUMA locality may be taken into
account, depending on the value of ``numa_policy``.
:param requests: A list of InstancePCIRequest object describing the
types, quantities and required NUMA affinities of devices we want.
:type requests: nova.objects.InstancePCIRequests
:param numa_cells: A list of InstanceNUMACell objects whose ``id``
corresponds to the ``id`` of host NUMACells, or None.
:returns: Whether this compute node can satisfy the given request.
"""
# NOTE(yjiang5): this function has high possibility to fail,
# so no exception should be triggered for performance reason.
return all(
self._filter_pools(self.pools, r, numa_cells) for r in requests
)
def _apply_request(
self,
pools: ty.List[Pool],
request: 'objects.InstancePCIRequest',
numa_cells: ty.Optional[ty.List['objects.InstanceNUMACell']] = None,
) -> bool:
"""Apply an individual PCI request.
Apply a PCI request against a given set of PCI device pools, which are
collections of devices with similar traits.
If ``numa_cells`` is provided then NUMA locality may be taken into
account, depending on the value of ``request.numa_policy``.
:param pools: A list of PCI device pool dicts
:param request: An InstancePCIRequest object describing the type,
quantity and required NUMA affinity of device(s) we want.
:param numa_cells: A list of InstanceNUMACell objects whose ``id``
corresponds to the ``id`` of host NUMACell objects.
:returns: True if the request was applied against the provided pools
successfully, else False.
"""
# NOTE(vladikr): This code maybe open to race conditions.
# Two concurrent requests may succeed when called support_requests
# because this method does not remove related devices from the pools
filtered_pools = self._filter_pools(pools, request, numa_cells)
if not filtered_pools:
return False
count = request.count
for pool in filtered_pools:
count = self._decrease_pool_count(pools, pool, count)
if not count:
break
return True
def apply_requests(
self,
requests: ty.List['objects.InstancePCIRequest'],
numa_cells: ty.Optional[ty.List['objects.InstanceNUMACell']] = None,
) -> None:
"""Apply PCI requests to the PCI stats.
This is used in multiple instance creation, when the scheduler has to
maintain how the resources are consumed by the instances.
If ``numa_cells`` is provided then NUMA locality may be taken into
account, depending on the value of ``numa_policy``.
:param requests: A list of InstancePCIRequest object describing the
types, quantities and required NUMA affinities of devices we want.
:type requests: nova.objects.InstancePCIRequests
:param numa_cells: A list of InstanceNUMACell objects whose ``id``
corresponds to the ``id`` of host NUMACells, or None.
:raises: exception.PciDeviceRequestFailed if this compute node cannot
satisfy the given request.
"""
if not all(
self._apply_request(self.pools, r, numa_cells) for r in requests
):
raise exception.PciDeviceRequestFailed(requests=requests)
def __iter__(self) -> ty.Iterator[Pool]:
pools: ty.List[Pool] = []
for pool in self.pools:
pool = copy.deepcopy(pool)
# 'devices' shouldn't be part of stats
if 'devices' in pool:
del pool['devices']
pools.append(pool)
return iter(pools)
def clear(self) -> None:
"""Clear all the stats maintained."""
self.pools = []
def __eq__(self, other: object) -> bool:
if not isinstance(other, PciDeviceStats):
return NotImplemented
return self.pools == other.pools
def to_device_pools_obj(self) -> 'objects.PciDevicePoolList':
"""Return the contents of the pools as a PciDevicePoolList object."""
stats = [x for x in self]
return pci_device_pool.from_pci_stats(stats)
def has_remote_managed_device_pools(self) -> bool:
"""Determine whether remote managed device pools are present on a host.
The check is pool-based, not free device-based and is NUMA cell
agnostic.
"""
dummy_req = objects.InstancePCIRequest(
count=0,
spec=[{'remote_managed': True}]
)
pools = self._filter_pools_for_spec(self.pools, dummy_req)
return bool(pools)
| [
"nova.exception.PciDevicePoolEmpty",
"nova.objects.InstancePCIRequest",
"nova.exception.PciDeviceRequestFailed",
"nova.objects.pci_device_pool.from_pci_stats",
"nova.pci.whitelist.Whitelist",
"copy.deepcopy",
"nova.pci.utils.pci_device_prop_match",
"oslo_log.log.getLogger"
] | [((1061, 1088), 'oslo_log.log.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (1078, 1088), True, 'from oslo_log import log as logging\n'), ((30746, 30783), 'nova.objects.pci_device_pool.from_pci_stats', 'pci_device_pool.from_pci_stats', (['stats'], {}), '(stats)\n', (30776, 30783), False, 'from nova.objects import pci_device_pool\n'), ((31043, 31111), 'nova.objects.InstancePCIRequest', 'objects.InstancePCIRequest', ([], {'count': '(0)', 'spec': "[{'remote_managed': True}]"}), "(count=0, spec=[{'remote_managed': True}])\n", (31069, 31111), False, 'from nova import objects\n'), ((2940, 2991), 'nova.pci.whitelist.Whitelist', 'whitelist.Whitelist', (['CONF.pci.passthrough_whitelist'], {}), '(CONF.pci.passthrough_whitelist)\n', (2959, 2991), False, 'from nova.pci import whitelist\n'), ((29899, 29950), 'nova.exception.PciDeviceRequestFailed', 'exception.PciDeviceRequestFailed', ([], {'requests': 'requests'}), '(requests=requests)\n', (29931, 29950), False, 'from nova import exception\n'), ((30082, 30101), 'copy.deepcopy', 'copy.deepcopy', (['pool'], {}), '(pool)\n', (30095, 30101), False, 'import copy\n'), ((8300, 8391), 'nova.exception.PciDevicePoolEmpty', 'exception.PciDevicePoolEmpty', ([], {'compute_node_id': 'dev.compute_node_id', 'address': 'dev.address'}), '(compute_node_id=dev.compute_node_id, address=\n dev.address)\n', (8328, 8391), False, 'from nova import exception\n'), ((12685, 12733), 'nova.pci.utils.pci_device_prop_match', 'utils.pci_device_prop_match', (['pool', 'request_specs'], {}), '(pool, request_specs)\n', (12712, 12733), False, 'from nova.pci import utils\n'), ((14626, 14682), 'nova.pci.utils.pci_device_prop_match', 'utils.pci_device_prop_match', (['pool', "[{'numa_node': cell}]"], {}), "(pool, [{'numa_node': cell}])\n", (14653, 14682), False, 'from nova.pci import utils\n'), ((16218, 16274), 'nova.pci.utils.pci_device_prop_match', 'utils.pci_device_prop_match', (['pool', "[{'numa_node': cell}]"], {}), "(pool, [{'numa_node': cell}])\n", (16245, 16274), False, 'from nova.pci import utils\n'), ((18483, 18544), 'nova.pci.utils.pci_device_prop_match', 'utils.pci_device_prop_match', (['pool', "[{'numa_node': numa_node}]"], {}), "(pool, [{'numa_node': numa_node}])\n", (18510, 18544), False, 'from nova.pci import utils\n')] |
import multiple
multiple.rename("C:/Users/Username/Desktop",'new_name',33,'.exe')
"""this above lines renames all the files of the folder Desktop to 'new_name' and
count starts from 33 to further (we can also provide 1 to start it from 1) and
extension is given '.exe'
hence the files will be renamed like :
1. new_name33.exe
2. new_name34.exe and so on
""" | [
"multiple.rename"
] | [((17, 85), 'multiple.rename', 'multiple.rename', (['"""C:/Users/Username/Desktop"""', '"""new_name"""', '(33)', '""".exe"""'], {}), "('C:/Users/Username/Desktop', 'new_name', 33, '.exe')\n", (32, 85), False, 'import multiple\n')] |
import os
def readlinkabs(l):
"""
Return an absolute path for the destination
of a symlink
"""
if not (os.path.islink(l)):
return None
p = os.readlink(l)
if os.path.isabs(p):
return p
return os.path.join(os.path.dirname(l), p)
| [
"os.path.dirname",
"os.path.islink",
"os.readlink",
"os.path.isabs"
] | [((173, 187), 'os.readlink', 'os.readlink', (['l'], {}), '(l)\n', (184, 187), False, 'import os\n'), ((195, 211), 'os.path.isabs', 'os.path.isabs', (['p'], {}), '(p)\n', (208, 211), False, 'import os\n'), ((125, 142), 'os.path.islink', 'os.path.islink', (['l'], {}), '(l)\n', (139, 142), False, 'import os\n'), ((254, 272), 'os.path.dirname', 'os.path.dirname', (['l'], {}), '(l)\n', (269, 272), False, 'import os\n')] |
"""Generic functionality useful for all gene representations.
This module contains classes which can be used for all the different
types of patterns available for representing gene information (ie. motifs,
signatures and schemas). These are the general classes which should be
handle any of the different specific patterns.
"""
# standard library
import random
# biopython
from Bio import utils
from Bio.Seq import Seq, MutableSeq
class PatternIO:
"""Allow reading and writing of patterns to files.
This just defines a simple persistance class for patterns, making
it easy to write them to a file and read 'em back.
"""
def __init__(self, alphabet = None):
"""Intialize the reader and writer class.
Arguments:
o alphabet - An optional argument specifying the alphabet
which patterns should follow. If an alphabet is set it'll be used
to verify that all patterns follow it.
Attributes:
o separator - A character to use in separating items in a signature
when it is written to a file and read back. This character should
not be in the possible alphabet of the sequences, or there will
be trouble.
"""
self._alphabet = alphabet
self.separator = ";"
def write(self, pattern_list, output_handle):
"""Write a list of patterns to the given handle.
"""
for pattern in pattern_list:
# deal with signatures, concatentate them with the separator
if (type(pattern) == type([]) or
type(pattern) == type(tuple([]))):
string_pattern = self.separator.join(pattern)
# deal with the normal cases
else:
string_pattern = pattern
output_handle.write("%s\n" % string_pattern)
def write_seq(self, seq_pattern_list, output_handle):
"""Convenience function to write Seq objects to a file.
This can take Seqs and MutableSeqs, and write them to a file
as strings.
"""
# convert the seq patterns into just string patterns
all_patterns = []
for seq_pattern in seq_pattern_list:
if isinstance(seq_pattern, MutableSeq):
seq = seq_pattern.toseq()
all_patterns.append(seq.data)
elif isinstance(seq_pattern, Seq):
all_patterns.append(seq_pattern.data)
else:
raise ValueError("Unexpected pattern type %r" % seq_pattern)
self.write(all_patterns, output_handle)
def read(self, input_handle):
"""Read patterns from the specified handle.
"""
all_patterns = []
while 1:
cur_line = input_handle.readline()
if not(cur_line):
break
cur_pattern = cur_line.rstrip()
# split up signatures
if cur_pattern.find(self.separator) >= 0:
cur_pattern = tuple(cur_pattern.split(self.separator))
if self._alphabet is not None:
# make single patterns (not signatures) into lists, so we
# can check signatures and single patterns the same
if type(cur_pattern) != type(tuple([])):
test_pattern = [cur_pattern]
else:
test_pattern = cur_pattern
for pattern_item in test_pattern:
pattern_seq = Seq(pattern_item, self._alphabet)
if not(utils.verify_alphabet(pattern_seq)):
raise ValueError("Pattern %s not matching alphabet %s"
% (cur_pattern, self._alphabet))
all_patterns.append(cur_pattern)
return all_patterns
class PatternRepository:
"""This holds a list of specific patterns found in sequences.
This is designed to be a general holder for a set of patterns and
should be subclassed for specific implementations (ie. holding Motifs
or Signatures.
"""
def __init__(self, pattern_info):
"""Initialize a repository with patterns,
Arguments:
o pattern_info - A representation of all of the patterns found in
a *Finder search. This should be a dictionary, where the keys
are patterns, and the values are the number of times a pattern is
found.
The patterns are represented interally as a list of two
tuples, where the first element is the number of times a pattern
occurs, and the second is the pattern itself. This makes it easy
to sort the list and return the top N patterns.
"""
self._pattern_dict = pattern_info
# create the list representation
self._pattern_list = []
for pattern_name in self._pattern_dict.keys():
self._pattern_list.append((self._pattern_dict[pattern_name],
pattern_name))
self._pattern_list.sort()
self._pattern_list.reverse()
def get_all(self):
"""Retrieve all of the patterns in the repository.
"""
patterns = []
for pattern_info in self._pattern_list:
patterns.append(pattern_info[1])
return patterns
def get_random(self, num_patterns):
"""Retrieve the specified number of patterns randomly.
Randomly selects patterns from the list and returns them.
Arguments:
o num_patterns - The total number of patterns to return.
"""
all_patterns = []
while len(all_patterns) < num_patterns:
# pick a pattern, and only add it if it is not already present
new_pattern_info = random.choice(self._pattern_list)
if new_pattern_info[1] not in all_patterns:
all_patterns.append(new_pattern_info[1])
return all_patterns
def get_top_percentage(self, percent):
"""Return a percentage of the patterns.
This returns the top 'percent' percentage of the patterns in the
repository.
"""
all_patterns = self.get_all()
num_to_return = int(len(all_patterns) * percent)
return all_patterns[:num_to_return]
def get_top(self, num_patterns):
"""Return the specified number of most frequently occurring patterns
Arguments:
o num_patterns - The number of patterns to return.
"""
all_patterns = []
for pattern_info in self._pattern_list[:num_patterns]:
all_patterns.append(pattern_info[1])
return all_patterns
def get_differing(self, top_num, bottom_num):
"""Retrieve patterns that are at the extreme ranges.
This returns both patterns at the top of the list (ie. the same as
returned by get_top) and at the bottom of the list. This
is especially useful for patterns that are the differences between
two sets of patterns.
Arguments:
o top_num - The number of patterns to take from the top of the list.
o bottom_num - The number of patterns to take from the bottom of
the list.
"""
all_patterns = []
# first get from the top of the list
for pattern_info in self._pattern_list[:top_num]:
all_patterns.append(pattern_info[1])
# then from the bottom
for pattern_info in self._pattern_list[-bottom_num:]:
all_patterns.append(pattern_info[1])
return all_patterns
def remove_polyA(self, at_percentage = .9):
"""Remove patterns which are likely due to polyA tails from the lists.
This is just a helper function to remove pattenrs which are likely
just due to polyA tails, and thus are not really great motifs.
This will also get rid of stuff like ATATAT, which might be a
useful motif, so use at your own discretion.
XXX Could we write a more general function, based on info content
or something like that?
Arguments:
o at_percentage - The percentage of A and T residues in a pattern
that qualifies it for being removed.
"""
remove_list = []
# find all of the really AT rich patterns
for pattern_info in self._pattern_list:
pattern_at = float(pattern_info[1].count('A') + pattern_info[1].count('T')) / len(pattern_info[1])
if pattern_at > at_percentage:
remove_list.append(pattern_info)
# now remove them from the master list
for to_remove in remove_list:
self._pattern_list.remove(to_remove)
def count(self, pattern):
"""Return the number of times the specified pattern is found.
"""
try:
return self._pattern_dict[pattern]
except KeyError:
return 0
| [
"Bio.Seq.Seq",
"random.choice",
"Bio.utils.verify_alphabet"
] | [((5776, 5809), 'random.choice', 'random.choice', (['self._pattern_list'], {}), '(self._pattern_list)\n', (5789, 5809), False, 'import random\n'), ((3497, 3530), 'Bio.Seq.Seq', 'Seq', (['pattern_item', 'self._alphabet'], {}), '(pattern_item, self._alphabet)\n', (3500, 3530), False, 'from Bio.Seq import Seq, MutableSeq\n'), ((3558, 3592), 'Bio.utils.verify_alphabet', 'utils.verify_alphabet', (['pattern_seq'], {}), '(pattern_seq)\n', (3579, 3592), False, 'from Bio import utils\n')] |
# -*- test-case-name: epsilon.test.test_juice -*-
# Copyright 2005 Divmod, Inc. See LICENSE file for details
import warnings, pprint
import keyword
import io
import six
from twisted.internet.main import CONNECTION_LOST
from twisted.internet.defer import Deferred, maybeDeferred, fail
from twisted.internet.protocol import ServerFactory, ClientFactory
from twisted.internet.ssl import Certificate
from twisted.python.failure import Failure
from twisted.python import log, filepath
from epsilon.liner import LineReceiver
from epsilon.compat import long
from epsilon import extime
ASK = '_ask'
ANSWER = '_answer'
COMMAND = '_command'
ERROR = '_error'
ERROR_CODE = '_error_code'
ERROR_DESCRIPTION = '_error_description'
LENGTH = '_length'
BODY = 'body'
debug = False
class JuiceBox(dict):
""" I am a packet in the JUICE protocol. """
def __init__(self, __body='', **kw):
self.update(kw)
if __body:
assert isinstance(__body, str), "body must be a string: %r" % ( repr(__body),)
self['body'] = __body
def body():
def get(self):
warnings.warn("body attribute of boxes is now just a regular field",
stacklevel=2)
return self['body']
def set(self, newbody):
warnings.warn("body attribute of boxes is now just a regular field",
stacklevel=2)
self['body'] = newbody
return get,set
body = property(*body())
def copy(self):
newBox = self.__class__()
newBox.update(self)
return newBox
def serialize(self,
delimiter=b'\r\n',
escaped=b'\r\n '):
assert LENGTH not in self
delimiter = six.ensure_binary(delimiter)
escaped = six.ensure_binary(escaped)
L = []
for (k, v) in six.viewitems(self):
if k == BODY:
k = LENGTH
v = str(len(self[BODY]))
L.append(six.ensure_binary(k).replace(b'_', b'-').title())
L.append(b': ')
L.append(six.ensure_binary(v).replace(delimiter, escaped))
L.append(delimiter)
L.append(delimiter)
if BODY in self:
L.append(six.ensure_binary(self[BODY]))
return b''.join(L)
def sendTo(self, proto):
"""
Serialize and send this box to a Juice instance. By the time it is
being sent, several keys are required. I must have exactly ONE of::
-ask
-answer
-error
If the '-ask' header is set, then the '-command' header must also be
set.
"""
proto.sendPacket(self)
# juice.Box => JuiceBox
Box = JuiceBox
class TLSBox(JuiceBox):
def __repr__(self):
return 'TLS(**%s)' % (super(TLSBox, self).__repr__(),)
def __init__(self, __certificate, __verify=None, __sslstarted=None, **kw):
super(TLSBox, self).__init__(**kw)
self.certificate = __certificate
self.verify = __verify
self.sslstarted = __sslstarted
def sendTo(self, proto):
super(TLSBox, self).sendTo(proto)
if self.verify is None:
proto.startTLS(self.certificate)
else:
proto.startTLS(self.certificate, self.verify)
if self.sslstarted is not None:
self.sslstarted()
class QuitBox(JuiceBox):
def __repr__(self):
return 'Quit(**%s)' % (super(QuitBox, self).__repr__(),)
def sendTo(self, proto):
super(QuitBox, self).sendTo(proto)
proto.transport.loseConnection()
class _SwitchBox(JuiceBox):
def __repr__(self):
return 'Switch(**%s)' % (super(_SwitchBox, self).__repr__(),)
def __init__(self, __proto, **kw):
super(_SwitchBox, self).__init__(**kw)
self.innerProto = __proto
def sendTo(self, proto):
super(_SwitchBox, self).sendTo(proto)
proto._switchTo(self.innerProto)
class NegotiateBox(JuiceBox):
def __repr__(self):
return 'Negotiate(**%s)' % (super(NegotiateBox, self).__repr__(),)
def sendTo(self, proto):
super(NegotiateBox, self).sendTo(proto)
proto._setProtocolVersion(int(self['version']))
class JuiceError(Exception):
pass
class RemoteJuiceError(JuiceError):
"""
This error indicates that something went wrong on the remote end of the
connection, and the error was serialized and transmitted to you.
"""
def __init__(self, errorCode, description, fatal=False):
"""Create a remote error with an error code and description.
"""
Exception.__init__(self, "Remote[%s]: %s" % (errorCode, description))
self.errorCode = errorCode
self.description = description
self.fatal = fatal
class UnhandledRemoteJuiceError(RemoteJuiceError):
def __init__(self, description):
errorCode = b"UNHANDLED"
RemoteJuiceError.__init__(self, errorCode, description)
class JuiceBoxError(JuiceError):
pass
class MalformedJuiceBox(JuiceBoxError):
pass
class UnhandledCommand(JuiceError):
pass
class IncompatibleVersions(JuiceError):
pass
class _Transactor:
def __init__(self, store, callable):
self.store = store
self.callable = callable
def __call__(self, box):
return self.store.transact(self.callable, box)
def __repr__(self):
return '<Transaction in: %s of: %s>' % (self.store, self.callable)
class DispatchMixin:
baseDispatchPrefix = 'juice_'
autoDispatchPrefix = 'command_'
wrapper = None
def _auto(self, aCallable, proto, namespace=None):
if aCallable is None:
return None
command = aCallable.command
if namespace not in command.namespaces:
# if you're in the wrong namespace, you are very likely not allowed
# to invoke the command you are trying to invoke. some objects
# have commands exposed in a separate namespace for security
# reasons, since the security model is a role : namespace mapping.
log.msg('WRONG NAMESPACE: %r, %r' % (namespace, command.namespaces))
return None
def doit(box):
kw = stringsToObjects(box, command.arguments, proto)
for name, extraArg in command.extra:
kw[name] = extraArg.fromTransport(proto.transport)
# def checkIsDict(result):
# if not isinstance(result, dict):
# raise RuntimeError("%r returned %r, not dictionary" % (
# aCallable, result))
# return result
def checkKnownErrors(error):
key = error.trap(*command.allErrors)
code = command.allErrors[key]
desc = str(error.value)
return Failure(RemoteJuiceError(
code, desc, error in command.fatalErrors))
return maybeDeferred(aCallable, **kw).addCallback(
command.makeResponse, proto).addErrback(
checkKnownErrors)
return doit
def _wrap(self, aCallable):
if aCallable is None:
return None
wrap = self.wrapper
if wrap is not None:
return wrap(aCallable)
else:
return aCallable
def normalizeCommand(self, cmd):
"""Return the canonical form of a command.
"""
return cmd.upper().strip().replace('-', '_')
def lookupFunction(self, proto, name, namespace):
"""Return a callable to invoke when executing the named command.
"""
# Try to find a method to be invoked in a transaction first
# Otherwise fallback to a "regular" method
fName = self.autoDispatchPrefix + name
fObj = getattr(self, fName, None)
if fObj is not None:
# pass the namespace along
return self._auto(fObj, proto, namespace)
assert namespace is None, 'Old-style parsing'
# Fall back to simplistic command dispatching - we probably want to get
# rid of this eventually, there's no reason to do extra work and write
# fewer docs all the time.
fName = self.baseDispatchPrefix + name
return getattr(self, fName, None)
def dispatchCommand(self, proto, cmd, box, namespace=None):
fObj = self.lookupFunction(proto, self.normalizeCommand(cmd), namespace)
if fObj is None:
return fail(UnhandledCommand(cmd))
return maybeDeferred(self._wrap(fObj), box)
def normalizeKey(key):
lkey = six.ensure_str(key).lower().replace('-', '_')
if keyword.iskeyword(lkey):
return lkey.title()
return lkey
def parseJuiceHeaders(lines):
"""
Create a JuiceBox from a list of header lines.
@param lines: a list of lines.
@type lines: a list of L{bytes}
"""
b = JuiceBox()
key = None
for L in lines:
if L[0:1] == b' ':
# continuation
assert key is not None
b[key] += six.ensure_str(b'\r\n' + L[1:])
continue
parts = L.split(b': ', 1)
if len(parts) != 2:
raise MalformedJuiceBox("Wrong number of parts: %r" % (L,))
key, value = parts
key = normalizeKey(key)
b[key] = six.ensure_str(value)
return int(b.pop(LENGTH, 0)), b
class JuiceParserBase(DispatchMixin):
def __init__(self):
self._outstandingRequests = {}
def _puke(self, failure):
log.msg("Juice server or network failure "
"unhandled by client application:")
log.err(failure)
log.msg(
"Dropping connection! "
"To avoid, add errbacks to ALL remote commands!")
if self.transport is not None:
self.transport.loseConnection()
_counter = long(0)
def _nextTag(self):
self._counter += 1
return '%x' % (self._counter,)
def failAllOutgoing(self, reason):
OR = self._outstandingRequests.items()
self._outstandingRequests = None # we can never send another request
for key, value in OR:
value.errback(reason)
def juiceBoxReceived(self, box):
if debug:
log.msg("Juice receive: %s" % pprint.pformat(dict(six.viewitems(box))))
if ANSWER in box:
question = self._outstandingRequests.pop(box[ANSWER])
question.addErrback(self._puke)
self._wrap(question.callback)(box)
elif ERROR in box:
question = self._outstandingRequests.pop(box[ERROR])
question.addErrback(self._puke)
self._wrap(question.errback)(
Failure(RemoteJuiceError(box[ERROR_CODE],
box[ERROR_DESCRIPTION])))
elif COMMAND in box:
cmd = box[COMMAND]
def sendAnswer(answerBox):
if ASK not in box:
return
if self.transport is None:
return
answerBox[ANSWER] = box[ASK]
answerBox.sendTo(self)
def sendError(error):
if ASK not in box:
return error
if error.check(RemoteJuiceError):
code = error.value.errorCode
desc = error.value.description
if error.value.fatal:
errorBox = QuitBox()
else:
errorBox = JuiceBox()
else:
errorBox = QuitBox()
log.err(error) # here is where server-side logging happens
# if the error isn't handled
code = 'UNHANDLED'
desc = "Unhandled Remote System Exception "
errorBox[ERROR] = box[ASK]
errorBox[ERROR_DESCRIPTION] = desc
errorBox[ERROR_CODE] = code
if self.transport is not None:
errorBox.sendTo(self)
return None # intentionally stop the error here: don't log the
# traceback if it's handled, do log it (earlier) if
# it isn't
self.dispatchCommand(self, cmd, box).addCallbacks(sendAnswer, sendError
).addErrback(self._puke)
else:
raise RuntimeError(
"Empty packet received over connection-oriented juice: %r" % (box,))
def sendBoxCommand(self, command, box, requiresAnswer=True):
"""
Send a command across the wire with the given C{juice.Box}.
Returns a Deferred which fires with the response C{juice.Box} when it
is received, or fails with a C{juice.RemoteJuiceError} if an error is
received.
If the Deferred fails and the error is not handled by the caller of
this method, the failure will be logged and the connection dropped.
"""
if self._outstandingRequests is None:
return fail(CONNECTION_LOST)
box[COMMAND] = command
tag = self._nextTag()
if requiresAnswer:
box[ASK] = tag
result = self._outstandingRequests[tag] = Deferred()
else:
result = None
box.sendTo(self)
return result
class Argument:
optional = False
def __init__(self, optional=False):
self.optional = optional
def retrieve(self, d, name):
if self.optional:
value = d.get(name)
if value is not None:
del d[name]
else:
value = d.pop(name)
return value
def fromBox(self, name, strings, objects, proto):
st = self.retrieve(strings, name)
if self.optional and st is None:
objects[name] = None
else:
objects[name] = self.fromStringProto(st, proto)
def toBox(self, name, strings, objects, proto):
obj = self.retrieve(objects, name)
if self.optional and obj is None:
# strings[name] = None
return
else:
strings[name] = self.toStringProto(obj, proto)
def fromStringProto(self, inString, proto):
return self.fromString(inString)
def toStringProto(self, inObject, proto):
return self.toString(inObject)
def fromString(self, inString):
raise NotImplementedError()
def toString(self, inObject):
raise NotImplementedError()
class JuiceList(Argument):
def __init__(self, subargs):
self.subargs = subargs
def fromStringProto(self, inString, proto):
boxes = parseString(six.ensure_binary(inString))
values = [stringsToObjects(box, self.subargs, proto)
for box in boxes]
return values
def toStringProto(self, inObject, proto):
return b''.join([
objectsToStrings(objects, self.subargs, Box(), proto).serialize()
for objects in inObject
])
class ListOf(Argument):
def __init__(self, subarg, delimiter=', '):
self.subarg = subarg
self.delimiter = delimiter
def fromStringProto(self, inString, proto):
strings = inString.split(self.delimiter)
L = [self.subarg.fromStringProto(string, proto)
for string in strings]
return L
def toStringProto(self, inObject, proto):
L = []
for inSingle in inObject:
outString = self.subarg.toStringProto(inSingle, proto)
assert self.delimiter not in outString
L.append(outString)
return self.delimiter.join(L)
class Integer(Argument):
fromString = int
def toString(self, inObject):
return str(int(inObject))
class String(Argument):
def toString(self, inObject):
return inObject
def fromString(self, inString):
return inString
class EncodedString(Argument):
def __init__(self, encoding):
self.encoding = encoding
def toString(self, inObject):
return inObject.encode(self.encoding)
def fromString(self, inString):
return inString.decode(self.encoding)
# Temporary backwards compatibility for Exponent
Body = String
class Unicode(String):
def toString(self, inObject):
# assert isinstance(inObject, unicode)
return String.toString(self, inObject.encode('utf-8'))
def fromString(self, inString):
# assert isinstance(inString, str)
return String.fromString(self, inString).decode('utf-8')
class Path(Unicode):
def fromString(self, inString):
return filepath.FilePath(Unicode.fromString(self, inString))
def toString(self, inObject):
return Unicode.toString(self, inObject.path)
class Float(Argument):
fromString = float
toString = str
class Base64Binary(Argument):
def toString(self, inObject):
return inObject.encode('base64').replace('\n', '')
def fromString(self, inString):
return inString.decode('base64')
class Time(Argument):
def toString(self, inObject):
return inObject.asISO8601TimeAndDate()
def fromString(self, inString):
return extime.Time.fromISO8601TimeAndDate(inString)
class ExtraArg:
def fromTransport(self, inTransport):
raise NotImplementedError()
class Peer(ExtraArg):
def fromTransport(self, inTransport):
return inTransport.getQ2QPeer()
class PeerDomain(ExtraArg):
def fromTransport(self, inTransport):
return inTransport.getQ2QPeer().domain
class PeerUser(ExtraArg):
def fromTransport(self, inTransport):
return inTransport.getQ2QPeer().resource
class Host(ExtraArg):
def fromTransport(self, inTransport):
return inTransport.getQ2QHost()
class HostDomain(ExtraArg):
def fromTransport(self, inTransport):
return inTransport.getQ2QHost().domain
class HostUser(ExtraArg):
def fromTransport(self, inTransport):
return inTransport.getQ2QHost().resource
class Boolean(Argument):
def fromString(self, inString):
if inString == 'True':
return True
elif inString == 'False':
return False
else:
raise RuntimeError("Bad boolean value: %r" % (inString,))
def toString(self, inObject):
if inObject:
return 'True'
else:
return 'False'
class _CommandMeta(type):
def __new__(cls, name, bases, attrs):
re = attrs['reverseErrors'] = {}
er = attrs['allErrors'] = {}
for v, k in six.viewitems(attrs.get('errors',{})):
re[k] = v
er[v] = k
for v, k in six.viewitems(attrs.get('fatalErrors',{})):
re[k] = v
er[v] = k
return type.__new__(cls, name, bases, attrs)
@six.add_metaclass(_CommandMeta)
class Command:
arguments = []
response = []
extra = []
namespaces = [None] # This is set to [None] on purpose: None means
# "no namespace", not "empty list". "empty
# list" will make your command invalid in _all_
# namespaces, effectively uncallable.
errors = {}
fatalErrors = {}
commandType = Box
responseType = Box
def commandName():
def get(self):
return self.__class__.__name__
raise NotImplementedError("Missing command name")
return get,
commandName = property(*commandName())
def __init__(self, **kw):
self.structured = kw
givenArgs = [normalizeKey(k) for k in kw.keys()]
forgotten = []
for name, arg in self.arguments:
if normalizeKey(name) not in givenArgs and not arg.optional:
forgotten.append(normalizeKey(name))
# for v in kw.itervalues():
# if v is None:
# from pprint import pformat
# raise RuntimeError("ARGH: %s" % pformat(kw))
if forgotten:
if len(forgotten) == 1:
plural = 'an argument'
else:
plural = 'some arguments'
raise RuntimeError("You forgot %s to %r: %s" % (
plural, self.commandName, ', '.join(forgotten)))
forgotten = []
def makeResponse(cls, objects, proto):
try:
return objectsToStrings(objects, cls.response, cls.responseType(), proto)
except:
log.msg("Exception in %r.makeResponse" % (cls,))
raise
makeResponse = classmethod(makeResponse)
def do(self, proto, namespace=None, requiresAnswer=True):
if namespace is not None:
cmd = namespace + ":" + self.commandName
else:
cmd = self.commandName
def _massageError(error):
error.trap(RemoteJuiceError)
rje = error.value
return Failure(self.reverseErrors.get(rje.errorCode, UnhandledRemoteJuiceError)(rje.description))
d = proto.sendBoxCommand(
cmd, objectsToStrings(self.structured, self.arguments, self.commandType(),
proto),
requiresAnswer)
if requiresAnswer:
d.addCallback(stringsToObjects, self.response, proto)
d.addCallback(self.addExtra, proto.transport)
d.addErrback(_massageError)
return d
def addExtra(self, d, transport):
for name, extraArg in self.extra:
d[name] = extraArg.fromTransport(transport)
return d
class ProtocolSwitchCommand(Command):
"""Use this command to switch from something Juice-derived to a different
protocol mid-connection. This can be useful to use juice as the
connection-startup negotiation phase. Since TLS is a different layer
entirely, you can use Juice to negotiate the security parameters of your
connection, then switch to a different protocol, and the connection will
remain secured.
"""
def __init__(self, __protoToSwitchToFactory, **kw):
self.protoToSwitchToFactory = __protoToSwitchToFactory
super(ProtocolSwitchCommand, self).__init__(**kw)
def makeResponse(cls, innerProto, proto):
return _SwitchBox(innerProto)
makeResponse = classmethod(makeResponse)
def do(self, proto, namespace=None):
d = super(ProtocolSwitchCommand, self).do(proto)
proto._lock()
def switchNow(ign):
innerProto = self.protoToSwitchToFactory.buildProtocol(proto.transport.getPeer())
proto._switchTo(innerProto, self.protoToSwitchToFactory)
return ign
def die(ign):
proto.transport.loseConnection()
return ign
def handle(ign):
self.protoToSwitchToFactory.clientConnectionFailed(None, Failure(CONNECTION_LOST))
return ign
return d.addCallbacks(switchNow, handle).addErrback(die)
class Negotiate(Command):
commandName = 'Negotiate'
arguments = [('versions', ListOf(Integer()))]
response = [('version', Integer())]
responseType = NegotiateBox
class Juice(LineReceiver, JuiceParserBase, object):
"""
JUICE (JUice Is Concurrent Events) is a simple connection-oriented
request/response protocol. Packets, or "boxes", are collections of
RFC2822-inspired headers, plus a body. Note that this is NOT a literal
interpretation of any existing RFC, 822, 2822 or otherwise, but a simpler
version that does not do line continuations, does not specify any
particular format for header values, dispatches semantic meanings of most
headers on the -Command header rather than giving them global meaning, and
allows multiple sets of headers (messages, or JuiceBoxes) on a connection.
All headers whose names begin with a dash ('-') are reserved for use by the
protocol. All others are for application use - their meaning depends on
the value of the "-Command" header.
"""
protocolName = b'juice-base'
hostCertificate = None
MAX_LENGTH = 1024 * 1024
isServer = property(lambda self: self._issueGreeting,
doc="""
True if this is a juice server, e.g. it is going to
issue or has issued a server greeting upon
connection.
""")
isClient = property(lambda self: not self._issueGreeting,
doc="""
True if this is a juice server, e.g. it is not going to
issue or did not issue a server greeting upon
connection.
""")
def __init__(self, issueGreeting):
"""
@param issueGreeting: whether to issue a greeting when connected. This
should be set on server-side Juice protocols.
"""
JuiceParserBase.__init__(self)
self._issueGreeting = issueGreeting
def __repr__(self):
return '<%s %s/%s at 0x%x>' % (self.__class__.__name__, self.isClient and 'client' or 'server', self.innerProtocol, id(self))
__locked = False
def _lock(self):
""" Lock this Juice instance so that no further Juice traffic may be sent.
This is used when sending a request to switch underlying protocols.
You probably want to subclass ProtocolSwitchCommand rather than calling
this directly.
"""
self.__locked = True
innerProtocol = None
def _switchTo(self, newProto, clientFactory=None):
""" Switch this Juice instance to a new protocol. You need to do this
'simultaneously' on both ends of a connection; the easiest way to do
this is to use a subclass of ProtocolSwitchCommand.
"""
assert self.innerProtocol is None, "Protocol can only be safely switched once."
self.setRawMode()
self.innerProtocol = newProto
self.innerProtocolClientFactory = clientFactory
newProto.makeConnection(self.transport)
innerProtocolClientFactory = None
def juiceBoxReceived(self, box):
if self.__locked and COMMAND in box and ASK in box:
# This is a command which will trigger an answer, and we can no
# longer answer anything, so don't bother delivering it.
return
return super(Juice, self).juiceBoxReceived(box)
def sendPacket(self, completeBox):
"""
Send a juice.Box to my peer.
Note: transport.write is never called outside of this method.
"""
assert not self.__locked, "You cannot send juice packets when a connection is locked"
if self._startingTLSBuffer is not None:
self._startingTLSBuffer.append(completeBox)
else:
if debug:
log.msg("Juice send: %s" % pprint.pformat(dict(six.viewitems(completeBox))))
result = completeBox.serialize()
self.transport.write(result)
def sendCommand(self, command, __content='', __answer=True, **kw):
box = JuiceBox(__content, **kw)
return self.sendBoxCommand(command, box, requiresAnswer=__answer)
_outstandingRequests = None
_justStartedTLS = False
def makeConnection(self, transport):
self._transportPeer = transport.getPeer()
self._transportHost = transport.getHost()
log.msg("%s %s connection established (HOST:%s PEER:%s)" % (self.isClient and "client" or "server",
self.__class__.__name__,
self._transportHost,
self._transportPeer))
self._outstandingRequests = {}
self._requestBuffer = []
LineReceiver.makeConnection(self, transport)
_startingTLSBuffer = None
def prepareTLS(self):
self._startingTLSBuffer = []
def startTLS(self, certificate, *verifyAuthorities):
if self.hostCertificate is None:
self.hostCertificate = certificate
self._justStartedTLS = True
self.transport.startTLS(certificate.options(*verifyAuthorities))
stlsb = self._startingTLSBuffer
if stlsb is not None:
self._startingTLSBuffer = None
for box in stlsb:
self.sendPacket(box)
else:
raise RuntimeError(
"Previously authenticated connection between %s and %s "
"is trying to re-establish as %s" % (
self.hostCertificate,
Certificate.peerFromTransport(self.transport),
(certificate, verifyAuthorities)))
def dataReceived(self, data):
# If we successfully receive any data after TLS has been started, that
# means the connection was secured properly. Make a note of that fact.
if self._justStartedTLS:
self._justStartedTLS = False
return LineReceiver.dataReceived(self, data)
def connectionLost(self, reason):
log.msg("%s %s connection lost (HOST:%s PEER:%s)" % (
self.isClient and 'client' or 'server',
self.__class__.__name__,
self._transportHost,
self._transportPeer))
self.failAllOutgoing(reason)
if self.innerProtocol is not None:
self.innerProtocol.connectionLost(reason)
if self.innerProtocolClientFactory is not None:
self.innerProtocolClientFactory.clientConnectionLost(None, reason)
def lineReceived(self, line):
if line:
self._requestBuffer.append(line)
else:
buf = self._requestBuffer
self._requestBuffer = []
bodylen, b = parseJuiceHeaders(buf)
if bodylen:
self._bodyRemaining = bodylen
self._bodyBuffer = []
self._pendingBox = b
self.setRawMode()
else:
self.juiceBoxReceived(b)
def rawDataReceived(self, data):
if self.innerProtocol is not None:
self.innerProtocol.dataReceived(data)
return
self._bodyRemaining -= len(data)
if self._bodyRemaining <= 0:
if self._bodyRemaining < 0:
self._bodyBuffer.append(data[:self._bodyRemaining])
extraData = data[self._bodyRemaining:]
else:
self._bodyBuffer.append(data)
extraData = ''
self._pendingBox['body'] = six.ensure_str(b''.join(six.ensure_binary(each) for each in self._bodyBuffer))
self._bodyBuffer = None
b, self._pendingBox = self._pendingBox, None
self.juiceBoxReceived(b)
if self.innerProtocol is not None:
self.innerProtocol.makeConnection(self.transport)
if extraData:
self.innerProtocol.dataReceived(extraData)
else:
self.setLineMode(extraData)
else:
self._bodyBuffer.append(data)
protocolVersion = 0
def _setProtocolVersion(self, version):
# if we ever want to actually mangle encodings, this is the place to do
# it!
self.protocolVersion = version
return version
def renegotiateVersion(self, newVersion):
assert newVersion in VERSIONS, (
"This side of the connection doesn't support version %r"
% (newVersion,))
v = VERSIONS[:]
v.remove(newVersion)
return Negotiate(versions=[newVersion]).do(self).addCallback(
lambda ver: self._setProtocolVersion(ver['version']))
def command_NEGOTIATE(self, versions):
for version in versions:
if version in VERSIONS:
return dict(version=version)
raise IncompatibleVersions()
command_NEGOTIATE.command = Negotiate
VERSIONS = [1]
class _ParserHelper(Juice):
def __init__(self):
Juice.__init__(self, False)
self.boxes = []
self.results = Deferred()
def getPeer(self):
return 'string'
def getHost(self):
return 'string'
disconnecting = False
def juiceBoxReceived(self, box):
self.boxes.append(box)
# Synchronous helpers
def parse(cls, fileObj):
p = cls()
p.makeConnection(p)
p.dataReceived(fileObj.read())
return p.boxes
parse = classmethod(parse)
def parseString(cls, data):
with io.BytesIO(data) as f:
return cls.parse(f)
parseString = classmethod(parseString)
parse = _ParserHelper.parse
parseString = _ParserHelper.parseString
def stringsToObjects(strings, arglist, proto):
objects = {}
myStrings = strings.copy()
for argname, argparser in arglist:
argparser.fromBox(argname, myStrings, objects, proto)
return objects
def objectsToStrings(objects, arglist, strings, proto):
myObjects = {}
for (k, v) in objects.items():
myObjects[normalizeKey(k)] = v
for argname, argparser in arglist:
argparser.toBox(argname, strings, myObjects, proto)
return strings
class JuiceServerFactory(ServerFactory):
protocol = Juice
def buildProtocol(self, addr):
prot = self.protocol(True)
prot.factory = self
return prot
class JuiceClientFactory(ClientFactory):
protocol = Juice
def buildProtocol(self, addr):
prot = self.protocol(False)
prot.factory = self
return prot
| [
"epsilon.extime.Time.fromISO8601TimeAndDate",
"six.viewitems",
"epsilon.compat.long",
"twisted.internet.ssl.Certificate.peerFromTransport",
"twisted.python.log.err",
"twisted.python.log.msg",
"twisted.internet.defer.maybeDeferred",
"six.add_metaclass",
"io.BytesIO",
"six.ensure_binary",
"six.ensure_str",
"keyword.iskeyword",
"epsilon.liner.LineReceiver.dataReceived",
"twisted.python.failure.Failure",
"warnings.warn",
"twisted.internet.defer.fail",
"twisted.internet.defer.Deferred",
"epsilon.liner.LineReceiver.makeConnection"
] | [((18926, 18957), 'six.add_metaclass', 'six.add_metaclass', (['_CommandMeta'], {}), '(_CommandMeta)\n', (18943, 18957), False, 'import six\n'), ((8656, 8679), 'keyword.iskeyword', 'keyword.iskeyword', (['lkey'], {}), '(lkey)\n', (8673, 8679), False, 'import keyword\n'), ((9859, 9866), 'epsilon.compat.long', 'long', (['(0)'], {}), '(0)\n', (9863, 9866), False, 'from epsilon.compat import long\n'), ((1743, 1771), 'six.ensure_binary', 'six.ensure_binary', (['delimiter'], {}), '(delimiter)\n', (1760, 1771), False, 'import six\n'), ((1790, 1816), 'six.ensure_binary', 'six.ensure_binary', (['escaped'], {}), '(escaped)\n', (1807, 1816), False, 'import six\n'), ((1855, 1874), 'six.viewitems', 'six.viewitems', (['self'], {}), '(self)\n', (1868, 1874), False, 'import six\n'), ((9324, 9345), 'six.ensure_str', 'six.ensure_str', (['value'], {}), '(value)\n', (9338, 9345), False, 'import six\n'), ((9524, 9599), 'twisted.python.log.msg', 'log.msg', (['"""Juice server or network failure unhandled by client application:"""'], {}), "('Juice server or network failure unhandled by client application:')\n", (9531, 9599), False, 'from twisted.python import log, filepath\n'), ((9627, 9643), 'twisted.python.log.err', 'log.err', (['failure'], {}), '(failure)\n', (9634, 9643), False, 'from twisted.python import log, filepath\n'), ((9652, 9731), 'twisted.python.log.msg', 'log.msg', (['"""Dropping connection! To avoid, add errbacks to ALL remote commands!"""'], {}), "('Dropping connection! To avoid, add errbacks to ALL remote commands!')\n", (9659, 9731), False, 'from twisted.python import log, filepath\n'), ((17264, 17308), 'epsilon.extime.Time.fromISO8601TimeAndDate', 'extime.Time.fromISO8601TimeAndDate', (['inString'], {}), '(inString)\n', (17298, 17308), False, 'from epsilon import extime\n'), ((27501, 27676), 'twisted.python.log.msg', 'log.msg', (["('%s %s connection established (HOST:%s PEER:%s)' % (self.isClient and\n 'client' or 'server', self.__class__.__name__, self._transportHost,\n self._transportPeer))"], {}), "('%s %s connection established (HOST:%s PEER:%s)' % (self.isClient and\n 'client' or 'server', self.__class__.__name__, self._transportHost,\n self._transportPeer))\n", (27508, 27676), False, 'from twisted.python import log, filepath\n'), ((27953, 27997), 'epsilon.liner.LineReceiver.makeConnection', 'LineReceiver.makeConnection', (['self', 'transport'], {}), '(self, transport)\n', (27980, 27997), False, 'from epsilon.liner import LineReceiver\n'), ((29176, 29213), 'epsilon.liner.LineReceiver.dataReceived', 'LineReceiver.dataReceived', (['self', 'data'], {}), '(self, data)\n', (29201, 29213), False, 'from epsilon.liner import LineReceiver\n'), ((29261, 29429), 'twisted.python.log.msg', 'log.msg', (["('%s %s connection lost (HOST:%s PEER:%s)' % (self.isClient and 'client' or\n 'server', self.__class__.__name__, self._transportHost, self.\n _transportPeer))"], {}), "('%s %s connection lost (HOST:%s PEER:%s)' % (self.isClient and\n 'client' or 'server', self.__class__.__name__, self._transportHost,\n self._transportPeer))\n", (29268, 29429), False, 'from twisted.python import log, filepath\n'), ((32285, 32295), 'twisted.internet.defer.Deferred', 'Deferred', ([], {}), '()\n', (32293, 32295), False, 'from twisted.internet.defer import Deferred, maybeDeferred, fail\n'), ((1104, 1190), 'warnings.warn', 'warnings.warn', (['"""body attribute of boxes is now just a regular field"""'], {'stacklevel': '(2)'}), "('body attribute of boxes is now just a regular field',\n stacklevel=2)\n", (1117, 1190), False, 'import warnings, pprint\n'), ((1289, 1375), 'warnings.warn', 'warnings.warn', (['"""body attribute of boxes is now just a regular field"""'], {'stacklevel': '(2)'}), "('body attribute of boxes is now just a regular field',\n stacklevel=2)\n", (1302, 1375), False, 'import warnings, pprint\n'), ((6097, 6165), 'twisted.python.log.msg', 'log.msg', (["('WRONG NAMESPACE: %r, %r' % (namespace, command.namespaces))"], {}), "('WRONG NAMESPACE: %r, %r' % (namespace, command.namespaces))\n", (6104, 6165), False, 'from twisted.python import log, filepath\n'), ((9061, 9092), 'six.ensure_str', 'six.ensure_str', (["(b'\\r\\n' + L[1:])"], {}), "(b'\\r\\n' + L[1:])\n", (9075, 9092), False, 'import six\n'), ((13126, 13147), 'twisted.internet.defer.fail', 'fail', (['CONNECTION_LOST'], {}), '(CONNECTION_LOST)\n', (13130, 13147), False, 'from twisted.internet.defer import Deferred, maybeDeferred, fail\n'), ((13317, 13327), 'twisted.internet.defer.Deferred', 'Deferred', ([], {}), '()\n', (13325, 13327), False, 'from twisted.internet.defer import Deferred, maybeDeferred, fail\n'), ((14752, 14779), 'six.ensure_binary', 'six.ensure_binary', (['inString'], {}), '(inString)\n', (14769, 14779), False, 'import six\n'), ((32729, 32745), 'io.BytesIO', 'io.BytesIO', (['data'], {}), '(data)\n', (32739, 32745), False, 'import io\n'), ((2247, 2276), 'six.ensure_binary', 'six.ensure_binary', (['self[BODY]'], {}), '(self[BODY])\n', (2264, 2276), False, 'import six\n'), ((20588, 20636), 'twisted.python.log.msg', 'log.msg', (["('Exception in %r.makeResponse' % (cls,))"], {}), "('Exception in %r.makeResponse' % (cls,))\n", (20595, 20636), False, 'from twisted.python import log, filepath\n'), ((22941, 22965), 'twisted.python.failure.Failure', 'Failure', (['CONNECTION_LOST'], {}), '(CONNECTION_LOST)\n', (22948, 22965), False, 'from twisted.python.failure import Failure\n'), ((8603, 8622), 'six.ensure_str', 'six.ensure_str', (['key'], {}), '(key)\n', (8617, 8622), False, 'import six\n'), ((2090, 2110), 'six.ensure_binary', 'six.ensure_binary', (['v'], {}), '(v)\n', (2107, 2110), False, 'import six\n'), ((28791, 28836), 'twisted.internet.ssl.Certificate.peerFromTransport', 'Certificate.peerFromTransport', (['self.transport'], {}), '(self.transport)\n', (28820, 28836), False, 'from twisted.internet.ssl import Certificate\n'), ((30785, 30808), 'six.ensure_binary', 'six.ensure_binary', (['each'], {}), '(each)\n', (30802, 30808), False, 'import six\n'), ((6959, 6989), 'twisted.internet.defer.maybeDeferred', 'maybeDeferred', (['aCallable'], {}), '(aCallable, **kw)\n', (6972, 6989), False, 'from twisted.internet.defer import Deferred, maybeDeferred, fail\n'), ((10304, 10322), 'six.viewitems', 'six.viewitems', (['box'], {}), '(box)\n', (10317, 10322), False, 'import six\n'), ((11622, 11636), 'twisted.python.log.err', 'log.err', (['error'], {}), '(error)\n', (11629, 11636), False, 'from twisted.python import log, filepath\n'), ((1991, 2011), 'six.ensure_binary', 'six.ensure_binary', (['k'], {}), '(k)\n', (2008, 2011), False, 'import six\n'), ((26988, 27014), 'six.viewitems', 'six.viewitems', (['completeBox'], {}), '(completeBox)\n', (27001, 27014), False, 'import six\n')] |
'''
<NAME>
set up :2020-1-9
intergrate img and label into one file
-- fiducial1024_v1
'''
import argparse
import sys, os
import pickle
import random
import collections
import json
import numpy as np
import scipy.io as io
import scipy.misc as m
import matplotlib.pyplot as plt
import glob
import math
import time
import threading
import multiprocessing as mp
from multiprocessing import Pool
import re
import cv2
# sys.path.append('/lustre/home/gwxie/hope/project/dewarp/datasets/') # /lustre/home/gwxie/program/project/unwarp/perturbed_imgaes/GAN
import utils
def getDatasets(dir):
return os.listdir(dir)
class perturbed(utils.BasePerturbed):
def __init__(self, path, bg_path, save_path, save_suffix):
self.path = path
self.bg_path = bg_path
self.save_path = save_path
self.save_suffix = save_suffix
def save_img(self, m, n, fold_curve='fold', repeat_time=4, fiducial_points = 16, relativeShift_position='relativeShift_v2'):
origin_img = cv2.imread(self.path, flags=cv2.IMREAD_COLOR)
save_img_shape = [512*2, 480*2] # 320
# reduce_value = np.random.choice([2**4, 2**5, 2**6, 2**7, 2**8], p=[0.01, 0.1, 0.4, 0.39, 0.1])
reduce_value = np.random.choice([2*2, 4*2, 8*2, 16*2, 24*2, 32*2, 40*2, 48*2], p=[0.02, 0.18, 0.2, 0.3, 0.1, 0.1, 0.08, 0.02])
# reduce_value = np.random.choice([8*2, 16*2, 24*2, 32*2, 40*2, 48*2], p=[0.01, 0.02, 0.2, 0.4, 0.19, 0.18])
# reduce_value = np.random.choice([16, 24, 32, 40, 48, 64], p=[0.01, 0.1, 0.2, 0.4, 0.2, 0.09])
base_img_shrink = save_img_shape[0] - reduce_value
# enlarge_img_shrink = [1024, 768]
# enlarge_img_shrink = [896, 672] # 420
enlarge_img_shrink = [512*4, 480*4] # 420
# enlarge_img_shrink = [896*2, 768*2] # 420
# enlarge_img_shrink = [896, 768] # 420
# enlarge_img_shrink = [768, 576] # 420
# enlarge_img_shrink = [640, 480] # 420
''''''
im_lr = origin_img.shape[0]
im_ud = origin_img.shape[1]
reduce_value_v2 = np.random.choice([2*2, 4*2, 8*2, 16*2, 24*2, 28*2, 32*2, 48*2], p=[0.02, 0.18, 0.2, 0.2, 0.1, 0.1, 0.1, 0.1])
# reduce_value_v2 = np.random.choice([16, 24, 28, 32, 48, 64], p=[0.01, 0.1, 0.2, 0.3, 0.25, 0.14])
if im_lr > im_ud:
im_ud = min(int(im_ud / im_lr * base_img_shrink), save_img_shape[1] - reduce_value_v2)
im_lr = save_img_shape[0] - reduce_value
else:
base_img_shrink = save_img_shape[1] - reduce_value
im_lr = min(int(im_lr / im_ud * base_img_shrink), save_img_shape[0] - reduce_value_v2)
im_ud = base_img_shrink
if round(im_lr / im_ud, 2) < 0.5 or round(im_ud / im_lr, 2) < 0.5:
repeat_time = min(repeat_time, 8)
edge_padding = 3
im_lr -= im_lr % (fiducial_points-1) - (2*edge_padding) # im_lr % (fiducial_points-1) - 1
im_ud -= im_ud % (fiducial_points-1) - (2*edge_padding) # im_ud % (fiducial_points-1) - 1
im_hight = np.linspace(edge_padding, im_lr - edge_padding, fiducial_points, dtype=np.int64)
im_wide = np.linspace(edge_padding, im_ud - edge_padding, fiducial_points, dtype=np.int64)
# im_lr -= im_lr % (fiducial_points-1) - (1+2*edge_padding) # im_lr % (fiducial_points-1) - 1
# im_ud -= im_ud % (fiducial_points-1) - (1+2*edge_padding) # im_ud % (fiducial_points-1) - 1
# im_hight = np.linspace(edge_padding, im_lr - (1+edge_padding), fiducial_points, dtype=np.int64)
# im_wide = np.linspace(edge_padding, im_ud - (1+edge_padding), fiducial_points, dtype=np.int64)
im_x, im_y = np.meshgrid(im_hight, im_wide)
segment_x = (im_lr) // (fiducial_points-1)
segment_y = (im_ud) // (fiducial_points-1)
# plt.plot(im_x, im_y,
# color='limegreen',
# marker='.',
# linestyle='')
# plt.grid(True)
# plt.show()
self.origin_img = cv2.resize(origin_img, (im_ud, im_lr), interpolation=cv2.INTER_CUBIC)
perturbed_bg_ = getDatasets(self.bg_path)
perturbed_bg_img_ = self.bg_path+random.choice(perturbed_bg_)
perturbed_bg_img = cv2.imread(perturbed_bg_img_, flags=cv2.IMREAD_COLOR)
mesh_shape = self.origin_img.shape[:2]
self.synthesis_perturbed_img = np.full((enlarge_img_shrink[0], enlarge_img_shrink[1], 3), 256, dtype=np.float32)#np.zeros_like(perturbed_bg_img)
# self.synthesis_perturbed_img = np.full((enlarge_img_shrink[0], enlarge_img_shrink[1], 3), 0, dtype=np.int16)#np.zeros_like(perturbed_bg_img)
self.new_shape = self.synthesis_perturbed_img.shape[:2]
perturbed_bg_img = cv2.resize(perturbed_bg_img, (save_img_shape[1], save_img_shape[0]), cv2.INPAINT_TELEA)
origin_pixel_position = np.argwhere(np.zeros(mesh_shape, dtype=np.uint32) == 0).reshape(mesh_shape[0], mesh_shape[1], 2)
pixel_position = np.argwhere(np.zeros(self.new_shape, dtype=np.uint32) == 0).reshape(self.new_shape[0], self.new_shape[1], 2)
self.perturbed_xy_ = np.zeros((self.new_shape[0], self.new_shape[1], 2))
# self.perturbed_xy_ = pixel_position.copy().astype(np.float32)
# fiducial_points_grid = origin_pixel_position[im_x, im_y]
self.synthesis_perturbed_label = np.zeros((self.new_shape[0], self.new_shape[1], 2))
x_min, y_min, x_max, y_max = self.adjust_position_v2(0, 0, mesh_shape[0], mesh_shape[1], save_img_shape)
origin_pixel_position += [x_min, y_min]
x_min, y_min, x_max, y_max = self.adjust_position(0, 0, mesh_shape[0], mesh_shape[1])
x_shift = random.randint(-enlarge_img_shrink[0]//16, enlarge_img_shrink[0]//16)
y_shift = random.randint(-enlarge_img_shrink[1]//16, enlarge_img_shrink[1]//16)
x_min += x_shift
x_max += x_shift
y_min += y_shift
y_max += y_shift
'''im_x,y'''
im_x += x_min
im_y += y_min
self.synthesis_perturbed_img[x_min:x_max, y_min:y_max] = self.origin_img
self.synthesis_perturbed_label[x_min:x_max, y_min:y_max] = origin_pixel_position
synthesis_perturbed_img_map = self.synthesis_perturbed_img.copy()
synthesis_perturbed_label_map = self.synthesis_perturbed_label.copy()
foreORbackground_label = np.full((mesh_shape), 1, dtype=np.int16)
foreORbackground_label_map = np.full((self.new_shape), 0, dtype=np.int16)
foreORbackground_label_map[x_min:x_max, y_min:y_max] = foreORbackground_label
# synthesis_perturbed_img_map = self.pad(self.synthesis_perturbed_img.copy(), x_min, y_min, x_max, y_max)
# synthesis_perturbed_label_map = self.pad(synthesis_perturbed_label_map, x_min, y_min, x_max, y_max)
'''*****************************************************************'''
is_normalizationFun_mixture = self.is_perform(0.2, 0.8)
# if not is_normalizationFun_mixture:
normalizationFun_0_1 = False
# normalizationFun_0_1 = self.is_perform(0.5, 0.5)
if fold_curve == 'fold':
fold_curve_random = True
# is_normalizationFun_mixture = False
normalizationFun_0_1 = self.is_perform(0.2, 0.8)
if is_normalizationFun_mixture:
alpha_perturbed = random.randint(80, 120) / 100
else:
if normalizationFun_0_1 and repeat_time < 8:
alpha_perturbed = random.randint(50, 70) / 100
else:
alpha_perturbed = random.randint(70, 130) / 100
else:
fold_curve_random = self.is_perform(0.1, 0.9) # False # self.is_perform(0.01, 0.99)
alpha_perturbed = random.randint(80, 160) / 100
# is_normalizationFun_mixture = False # self.is_perform(0.01, 0.99)
synthesis_perturbed_img = np.full_like(self.synthesis_perturbed_img, 256)
# synthesis_perturbed_img = np.full_like(self.synthesis_perturbed_img, 0, dtype=np.int16)
synthesis_perturbed_label = np.zeros_like(self.synthesis_perturbed_label)
alpha_perturbed_change = self.is_perform(0.5, 0.5)
p_pp_choice = self.is_perform(0.8, 0.2) if fold_curve == 'fold' else self.is_perform(0.1, 0.9)
for repeat_i in range(repeat_time):
if alpha_perturbed_change:
if fold_curve == 'fold':
if is_normalizationFun_mixture:
alpha_perturbed = random.randint(80, 120) / 100
else:
if normalizationFun_0_1 and repeat_time < 8:
alpha_perturbed = random.randint(50, 70) / 100
else:
alpha_perturbed = random.randint(70, 130) / 100
else:
alpha_perturbed = random.randint(80, 160) / 100
''''''
linspace_x = [0, (self.new_shape[0] - im_lr) // 2 - 1,
self.new_shape[0] - (self.new_shape[0] - im_lr) // 2 - 1, self.new_shape[0] - 1]
linspace_y = [0, (self.new_shape[1] - im_ud) // 2 - 1,
self.new_shape[1] - (self.new_shape[1] - im_ud) // 2 - 1, self.new_shape[1] - 1]
linspace_x_seq = [1, 2, 3]
linspace_y_seq = [1, 2, 3]
r_x = random.choice(linspace_x_seq)
r_y = random.choice(linspace_y_seq)
perturbed_p = np.array(
[random.randint(linspace_x[r_x-1] * 10, linspace_x[r_x] * 10),
random.randint(linspace_y[r_y-1] * 10, linspace_y[r_y] * 10)])/10
if ((r_x == 1 or r_x == 3) and (r_y == 1 or r_y == 3)) and p_pp_choice:
linspace_x_seq.remove(r_x)
linspace_y_seq.remove(r_y)
r_x = random.choice(linspace_x_seq)
r_y = random.choice(linspace_y_seq)
perturbed_pp = np.array(
[random.randint(linspace_x[r_x-1] * 10, linspace_x[r_x] * 10),
random.randint(linspace_y[r_y-1] * 10, linspace_y[r_y] * 10)])/10
# perturbed_p, perturbed_pp = np.array(
# [random.randint(0, self.new_shape[0] * 10) / 10,
# random.randint(0, self.new_shape[1] * 10) / 10]) \
# , np.array([random.randint(0, self.new_shape[0] * 10) / 10,
# random.randint(0, self.new_shape[1] * 10) / 10])
# perturbed_p, perturbed_pp = np.array(
# [random.randint((self.new_shape[0]-im_lr)//2*10, (self.new_shape[0]-(self.new_shape[0]-im_lr)//2) * 10) / 10,
# random.randint((self.new_shape[1]-im_ud)//2*10, (self.new_shape[1]-(self.new_shape[1]-im_ud)//2) * 10) / 10]) \
# , np.array([random.randint((self.new_shape[0]-im_lr)//2*10, (self.new_shape[0]-(self.new_shape[0]-im_lr)//2) * 10) / 10,
# random.randint((self.new_shape[1]-im_ud)//2*10, (self.new_shape[1]-(self.new_shape[1]-im_ud)//2) * 10) / 10])
''''''
perturbed_vp = perturbed_pp - perturbed_p
perturbed_vp_norm = np.linalg.norm(perturbed_vp)
perturbed_distance_vertex_and_line = np.dot((perturbed_p - pixel_position), perturbed_vp) / perturbed_vp_norm
''''''
# perturbed_v = np.array([random.randint(-3000, 3000) / 100, random.randint(-3000, 3000) / 100])
# perturbed_v = np.array([random.randint(-4000, 4000) / 100, random.randint(-4000, 4000) / 100])
if fold_curve == 'fold' and self.is_perform(0.6, 0.4): # self.is_perform(0.3, 0.7):
# perturbed_v = np.array([random.randint(-9000, 9000) / 100, random.randint(-9000, 9000) / 100])
perturbed_v = np.array([random.randint(-10000, 10000) / 100, random.randint(-10000, 10000) / 100])
# perturbed_v = np.array([random.randint(-11000, 11000) / 100, random.randint(-11000, 11000) / 100])
else:
# perturbed_v = np.array([random.randint(-9000, 9000) / 100, random.randint(-9000, 9000) / 100])
# perturbed_v = np.array([random.randint(-16000, 16000) / 100, random.randint(-16000, 16000) / 100])
perturbed_v = np.array([random.randint(-8000, 8000) / 100, random.randint(-8000, 8000) / 100])
# perturbed_v = np.array([random.randint(-3500, 3500) / 100, random.randint(-3500, 3500) / 100])
# perturbed_v = np.array([random.randint(-600, 600) / 10, random.randint(-600, 600) / 10])
''''''
if fold_curve == 'fold':
if is_normalizationFun_mixture:
if self.is_perform(0.5, 0.5):
perturbed_d = np.abs(self.get_normalize(perturbed_distance_vertex_and_line))
else:
perturbed_d = self.get_0_1_d(np.abs(perturbed_distance_vertex_and_line), random.randint(1, 2))
else:
if normalizationFun_0_1:
perturbed_d = self.get_0_1_d(np.abs(perturbed_distance_vertex_and_line), 2)
else:
perturbed_d = np.abs(self.get_normalize(perturbed_distance_vertex_and_line))
else:
if is_normalizationFun_mixture:
if self.is_perform(0.5, 0.5):
perturbed_d = np.abs(self.get_normalize(perturbed_distance_vertex_and_line))
else:
perturbed_d = self.get_0_1_d(np.abs(perturbed_distance_vertex_and_line), random.randint(1, 2))
else:
if normalizationFun_0_1:
perturbed_d = self.get_0_1_d(np.abs(perturbed_distance_vertex_and_line), 2)
else:
perturbed_d = np.abs(self.get_normalize(perturbed_distance_vertex_and_line))
''''''
if fold_curve_random:
# omega_perturbed = (alpha_perturbed+0.2) / (perturbed_d + alpha_perturbed)
# omega_perturbed = alpha_perturbed**perturbed_d
omega_perturbed = alpha_perturbed / (perturbed_d + alpha_perturbed)
else:
omega_perturbed = 1 - perturbed_d ** alpha_perturbed
'''shadow'''
if self.is_perform(0.6, 0.4):
synthesis_perturbed_img_map[x_min:x_max, y_min:y_max] = np.minimum(np.maximum(synthesis_perturbed_img_map[x_min:x_max, y_min:y_max] - np.int16(np.round(omega_perturbed[x_min:x_max, y_min:y_max].repeat(3).reshape(x_max-x_min, y_max-y_min, 3) * abs(np.linalg.norm(perturbed_v//2))*np.array([0.4-random.random()*0.1, 0.4-random.random()*0.1, 0.4-random.random()*0.1]))), 0), 255)
''''''
if relativeShift_position in ['position', 'relativeShift_v2']:
self.perturbed_xy_ += np.array([omega_perturbed * perturbed_v[0], omega_perturbed * perturbed_v[1]]).transpose(1, 2, 0)
else:
print('relativeShift_position error')
exit()
'''
flat_position = np.argwhere(np.zeros(self.new_shape, dtype=np.uint32) == 0).reshape(
self.new_shape[0] * self.new_shape[1], 2)
vtx, wts = self.interp_weights(self.perturbed_xy_.reshape(self.new_shape[0] * self.new_shape[1], 2), flat_position)
wts_sum = np.abs(wts).sum(-1)
# flat_img.reshape(flat_shape[0] * flat_shape[1], 3)[:] = interpolate(pixel, vtx, wts)
wts = wts[wts_sum <= 1, :]
vtx = vtx[wts_sum <= 1, :]
synthesis_perturbed_img.reshape(self.new_shape[0] * self.new_shape[1], 3)[wts_sum <= 1,
:] = self.interpolate(synthesis_perturbed_img_map.reshape(self.new_shape[0] * self.new_shape[1], 3), vtx, wts)
synthesis_perturbed_label.reshape(self.new_shape[0] * self.new_shape[1], 2)[wts_sum <= 1,
:] = self.interpolate(synthesis_perturbed_label_map.reshape(self.new_shape[0] * self.new_shape[1], 2), vtx, wts)
foreORbackground_label = np.zeros(self.new_shape)
foreORbackground_label.reshape(self.new_shape[0] * self.new_shape[1], 1)[wts_sum <= 1, :] = self.interpolate(foreORbackground_label_map.reshape(self.new_shape[0] * self.new_shape[1], 1), vtx, wts)
foreORbackground_label[foreORbackground_label < 0.99] = 0
foreORbackground_label[foreORbackground_label >= 0.99] = 1
# synthesis_perturbed_img = np.around(synthesis_perturbed_img).astype(np.uint8)
synthesis_perturbed_label[:, :, 0] *= foreORbackground_label
synthesis_perturbed_label[:, :, 1] *= foreORbackground_label
synthesis_perturbed_img[:, :, 0] *= foreORbackground_label
synthesis_perturbed_img[:, :, 1] *= foreORbackground_label
synthesis_perturbed_img[:, :, 2] *= foreORbackground_label
self.synthesis_perturbed_img = synthesis_perturbed_img
self.synthesis_perturbed_label = synthesis_perturbed_label
'''
'''perspective'''
perspective_shreshold = random.randint(26, 36)*10 # 280
x_min_per, y_min_per, x_max_per, y_max_per = self.adjust_position(perspective_shreshold, perspective_shreshold, self.new_shape[0]-perspective_shreshold, self.new_shape[1]-perspective_shreshold)
pts1 = np.float32([[x_min_per, y_min_per], [x_max_per, y_min_per], [x_min_per, y_max_per], [x_max_per, y_max_per]])
e_1_ = x_max_per - x_min_per
e_2_ = y_max_per - y_min_per
e_3_ = e_2_
e_4_ = e_1_
perspective_shreshold_h = e_1_*0.02
perspective_shreshold_w = e_2_*0.02
a_min_, a_max_ = 70, 110
# if self.is_perform(1, 0):
if fold_curve == 'curve' and self.is_perform(0.5, 0.5):
if self.is_perform(0.5, 0.5):
while True:
pts2 = np.around(
np.float32([[x_min_per - (random.random()) * perspective_shreshold, y_min_per + (random.random()) * perspective_shreshold],
[x_max_per - (random.random()) * perspective_shreshold, y_min_per - (random.random()) * perspective_shreshold],
[x_min_per + (random.random()) * perspective_shreshold, y_max_per + (random.random()) * perspective_shreshold],
[x_max_per + (random.random()) * perspective_shreshold, y_max_per - (random.random()) * perspective_shreshold]])) # right
e_1 = np.linalg.norm(pts2[0]-pts2[1])
e_2 = np.linalg.norm(pts2[0]-pts2[2])
e_3 = np.linalg.norm(pts2[1]-pts2[3])
e_4 = np.linalg.norm(pts2[2]-pts2[3])
if e_1_+perspective_shreshold_h > e_1 and e_2_+perspective_shreshold_w > e_2 and e_3_+perspective_shreshold_w > e_3 and e_4_+perspective_shreshold_h > e_4 and \
e_1_ - perspective_shreshold_h < e_1 and e_2_ - perspective_shreshold_w < e_2 and e_3_ - perspective_shreshold_w < e_3 and e_4_ - perspective_shreshold_h < e_4 and \
abs(e_1-e_4) < perspective_shreshold_h and abs(e_2-e_3) < perspective_shreshold_w:
a0_, a1_, a2_, a3_ = self.get_angle_4(pts2)
if (a0_ > a_min_ and a0_ < a_max_) or (a1_ > a_min_ and a1_ < a_max_) or (a2_ > a_min_ and a2_ < a_max_) or (a3_ > a_min_ and a3_ < a_max_):
break
else:
while True:
pts2 = np.around(
np.float32([[x_min_per + (random.random()) * perspective_shreshold, y_min_per - (random.random()) * perspective_shreshold],
[x_max_per + (random.random()) * perspective_shreshold, y_min_per + (random.random()) * perspective_shreshold],
[x_min_per - (random.random()) * perspective_shreshold, y_max_per - (random.random()) * perspective_shreshold],
[x_max_per - (random.random()) * perspective_shreshold, y_max_per + (random.random()) * perspective_shreshold]]))
e_1 = np.linalg.norm(pts2[0]-pts2[1])
e_2 = np.linalg.norm(pts2[0]-pts2[2])
e_3 = np.linalg.norm(pts2[1]-pts2[3])
e_4 = np.linalg.norm(pts2[2]-pts2[3])
if e_1_+perspective_shreshold_h > e_1 and e_2_+perspective_shreshold_w > e_2 and e_3_+perspective_shreshold_w > e_3 and e_4_+perspective_shreshold_h > e_4 and \
e_1_ - perspective_shreshold_h < e_1 and e_2_ - perspective_shreshold_w < e_2 and e_3_ - perspective_shreshold_w < e_3 and e_4_ - perspective_shreshold_h < e_4 and \
abs(e_1-e_4) < perspective_shreshold_h and abs(e_2-e_3) < perspective_shreshold_w:
a0_, a1_, a2_, a3_ = self.get_angle_4(pts2)
if (a0_ > a_min_ and a0_ < a_max_) or (a1_ > a_min_ and a1_ < a_max_) or (a2_ > a_min_ and a2_ < a_max_) or (a3_ > a_min_ and a3_ < a_max_):
break
else:
while True:
pts2 = np.around(np.float32([[x_min_per+(random.random()-0.5)*perspective_shreshold, y_min_per+(random.random()-0.5)*perspective_shreshold],
[x_max_per+(random.random()-0.5)*perspective_shreshold, y_min_per+(random.random()-0.5)*perspective_shreshold],
[x_min_per+(random.random()-0.5)*perspective_shreshold, y_max_per+(random.random()-0.5)*perspective_shreshold],
[x_max_per+(random.random()-0.5)*perspective_shreshold, y_max_per+(random.random()-0.5)*perspective_shreshold]]))
e_1 = np.linalg.norm(pts2[0]-pts2[1])
e_2 = np.linalg.norm(pts2[0]-pts2[2])
e_3 = np.linalg.norm(pts2[1]-pts2[3])
e_4 = np.linalg.norm(pts2[2]-pts2[3])
if e_1_+perspective_shreshold_h > e_1 and e_2_+perspective_shreshold_w > e_2 and e_3_+perspective_shreshold_w > e_3 and e_4_+perspective_shreshold_h > e_4 and \
e_1_ - perspective_shreshold_h < e_1 and e_2_ - perspective_shreshold_w < e_2 and e_3_ - perspective_shreshold_w < e_3 and e_4_ - perspective_shreshold_h < e_4 and \
abs(e_1-e_4) < perspective_shreshold_h and abs(e_2-e_3) < perspective_shreshold_w:
a0_, a1_, a2_, a3_ = self.get_angle_4(pts2)
if (a0_ > a_min_ and a0_ < a_max_) or (a1_ > a_min_ and a1_ < a_max_) or (a2_ > a_min_ and a2_ < a_max_) or (a3_ > a_min_ and a3_ < a_max_):
break
M = cv2.getPerspectiveTransform(pts1, pts2)
one = np.ones((self.new_shape[0], self.new_shape[1], 1), dtype=np.int16)
matr = np.dstack((pixel_position, one))
new = np.dot(M, matr.reshape(-1, 3).T).T.reshape(self.new_shape[0], self.new_shape[1], 3)
x = new[:, :, 0]/new[:, :, 2]
y = new[:, :, 1]/new[:, :, 2]
perturbed_xy_ = np.dstack((x, y))
# perturbed_xy_round_int = np.around(cv2.bilateralFilter(perturbed_xy_round_int, 9, 75, 75))
# perturbed_xy_round_int = np.around(cv2.blur(perturbed_xy_, (17, 17)))
# perturbed_xy_round_int = cv2.blur(perturbed_xy_round_int, (17, 17))
# perturbed_xy_round_int = cv2.GaussianBlur(perturbed_xy_round_int, (7, 7), 0)
perturbed_xy_ = perturbed_xy_-np.min(perturbed_xy_.T.reshape(2, -1), 1)
# perturbed_xy_round_int = np.around(perturbed_xy_round_int-np.min(perturbed_xy_round_int.T.reshape(2, -1), 1)).astype(np.int16)
self.perturbed_xy_ += perturbed_xy_
'''perspective end'''
'''to img'''
flat_position = np.argwhere(np.zeros(self.new_shape, dtype=np.uint32) == 0).reshape(
self.new_shape[0] * self.new_shape[1], 2)
# self.perturbed_xy_ = cv2.blur(self.perturbed_xy_, (7, 7))
self.perturbed_xy_ = cv2.GaussianBlur(self.perturbed_xy_, (7, 7), 0)
'''get fiducial points'''
fiducial_points_coordinate = self.perturbed_xy_[im_x, im_y]
vtx, wts = self.interp_weights(self.perturbed_xy_.reshape(self.new_shape[0] * self.new_shape[1], 2), flat_position)
wts_sum = np.abs(wts).sum(-1)
# flat_img.reshape(flat_shape[0] * flat_shape[1], 3)[:] = interpolate(pixel, vtx, wts)
wts = wts[wts_sum <= 1, :]
vtx = vtx[wts_sum <= 1, :]
synthesis_perturbed_img.reshape(self.new_shape[0] * self.new_shape[1], 3)[wts_sum <= 1,
:] = self.interpolate(synthesis_perturbed_img_map.reshape(self.new_shape[0] * self.new_shape[1], 3), vtx, wts)
synthesis_perturbed_label.reshape(self.new_shape[0] * self.new_shape[1], 2)[wts_sum <= 1,
:] = self.interpolate(synthesis_perturbed_label_map.reshape(self.new_shape[0] * self.new_shape[1], 2), vtx, wts)
foreORbackground_label = np.zeros(self.new_shape)
foreORbackground_label.reshape(self.new_shape[0] * self.new_shape[1], 1)[wts_sum <= 1, :] = self.interpolate(foreORbackground_label_map.reshape(self.new_shape[0] * self.new_shape[1], 1), vtx, wts)
foreORbackground_label[foreORbackground_label < 0.99] = 0
foreORbackground_label[foreORbackground_label >= 0.99] = 1
self.synthesis_perturbed_img = synthesis_perturbed_img
self.synthesis_perturbed_label = synthesis_perturbed_label
self.foreORbackground_label = foreORbackground_label
'''draw fiducial points
stepSize = 0
fiducial_points_synthesis_perturbed_img = self.synthesis_perturbed_img.copy()
for l in fiducial_points_coordinate.astype(np.int64).reshape(-1,2):
cv2.circle(fiducial_points_synthesis_perturbed_img, (l[1] + math.ceil(stepSize / 2), l[0] + math.ceil(stepSize / 2)), 5, (0, 0, 255), -1)
cv2.imwrite('/lustre/home/gwxie/program/project/unwarp/unwarp_perturbed/TPS/img/cv_TPS_large.jpg', fiducial_points_synthesis_perturbed_img)
'''
'''clip'''
perturbed_x_min, perturbed_y_min, perturbed_x_max, perturbed_y_max = -1, -1, self.new_shape[0], self.new_shape[1]
for x in range(self.new_shape[0] // 2, perturbed_x_max):
if np.sum(self.synthesis_perturbed_img[x, :]) == 768 * self.new_shape[1] and perturbed_x_max - 1 > x:
perturbed_x_max = x
break
for x in range(self.new_shape[0] // 2, perturbed_x_min, -1):
if np.sum(self.synthesis_perturbed_img[x, :]) == 768 * self.new_shape[1] and x > 0:
perturbed_x_min = x
break
for y in range(self.new_shape[1] // 2, perturbed_y_max):
if np.sum(self.synthesis_perturbed_img[:, y]) == 768 * self.new_shape[0] and perturbed_y_max - 1 > y:
perturbed_y_max = y
break
for y in range(self.new_shape[1] // 2, perturbed_y_min, -1):
if np.sum(self.synthesis_perturbed_img[:, y]) == 768 * self.new_shape[0] and y > 0:
perturbed_y_min = y
break
if perturbed_x_min == 0 or perturbed_x_max == self.new_shape[0] or perturbed_y_min == self.new_shape[1] or perturbed_y_max == self.new_shape[1]:
raise Exception('clip error')
if perturbed_x_max - perturbed_x_min < im_lr//2 or perturbed_y_max - perturbed_y_min < im_ud//2:
raise Exception('clip error')
perfix_ = self.save_suffix+'_'+str(m)+'_'+str(n)
is_shrink = False
if perturbed_x_max - perturbed_x_min > save_img_shape[0] or perturbed_y_max - perturbed_y_min > save_img_shape[1]:
is_shrink = True
synthesis_perturbed_img = cv2.resize(self.synthesis_perturbed_img[perturbed_x_min:perturbed_x_max, perturbed_y_min:perturbed_y_max, :].copy(), (im_ud, im_lr), interpolation=cv2.INTER_LINEAR)
synthesis_perturbed_label = cv2.resize(self.synthesis_perturbed_label[perturbed_x_min:perturbed_x_max, perturbed_y_min:perturbed_y_max, :].copy(), (im_ud, im_lr), interpolation=cv2.INTER_LINEAR)
foreORbackground_label = cv2.resize(self.foreORbackground_label[perturbed_x_min:perturbed_x_max, perturbed_y_min:perturbed_y_max].copy(), (im_ud, im_lr), interpolation=cv2.INTER_LINEAR)
foreORbackground_label[foreORbackground_label < 0.99] = 0
foreORbackground_label[foreORbackground_label >= 0.99] = 1
'''shrink fiducial points'''
center_x_l, center_y_l = perturbed_x_min + (perturbed_x_max - perturbed_x_min) // 2, perturbed_y_min + (perturbed_y_max - perturbed_y_min) // 2
fiducial_points_coordinate_copy = fiducial_points_coordinate.copy()
shrink_x = im_lr/(perturbed_x_max - perturbed_x_min)
shrink_y = im_ud/(perturbed_y_max - perturbed_y_min)
fiducial_points_coordinate *= [shrink_x, shrink_y]
center_x_l *= shrink_x
center_y_l *= shrink_y
# fiducial_points_coordinate[1:, 1:] *= [shrink_x, shrink_y]
# fiducial_points_coordinate[1:, :1, 0] *= shrink_x
# fiducial_points_coordinate[:1, 1:, 1] *= shrink_y
# perturbed_x_min_copy, perturbed_y_min_copy, perturbed_x_max_copy, perturbed_y_max_copy = perturbed_x_min, perturbed_y_min, perturbed_x_max, perturbed_y_max
perturbed_x_min, perturbed_y_min, perturbed_x_max, perturbed_y_max = self.adjust_position_v2(0, 0, im_lr, im_ud, self.new_shape)
self.synthesis_perturbed_img = np.full_like(self.synthesis_perturbed_img, 256)
self.synthesis_perturbed_label = np.zeros_like(self.synthesis_perturbed_label)
self.foreORbackground_label = np.zeros_like(self.foreORbackground_label)
self.synthesis_perturbed_img[perturbed_x_min:perturbed_x_max, perturbed_y_min:perturbed_y_max, :] = synthesis_perturbed_img
self.synthesis_perturbed_label[perturbed_x_min:perturbed_x_max, perturbed_y_min:perturbed_y_max, :] = synthesis_perturbed_label
self.foreORbackground_label[perturbed_x_min:perturbed_x_max, perturbed_y_min:perturbed_y_max] = foreORbackground_label
center_x, center_y = perturbed_x_min + (perturbed_x_max - perturbed_x_min) // 2, perturbed_y_min + (perturbed_y_max - perturbed_y_min) // 2
if is_shrink:
fiducial_points_coordinate += [center_x-center_x_l, center_y-center_y_l]
'''draw fiducial points
stepSize = 0
fiducial_points_synthesis_perturbed_img = self.synthesis_perturbed_img.copy()
for l in fiducial_points_coordinate.astype(np.int64).reshape(-1, 2):
cv2.circle(fiducial_points_synthesis_perturbed_img,
(l[1] + math.ceil(stepSize / 2), l[0] + math.ceil(stepSize / 2)), 5, (0, 0, 255), -1)
cv2.imwrite('/lustre/home/gwxie/program/project/unwarp/unwarp_perturbed/TPS/img/cv_TPS_small.jpg',fiducial_points_synthesis_perturbed_img)
'''
self.new_shape = save_img_shape
self.synthesis_perturbed_img = self.synthesis_perturbed_img[
center_x - self.new_shape[0] // 2:center_x + self.new_shape[0] // 2,
center_y - self.new_shape[1] // 2:center_y + self.new_shape[1] // 2,
:].copy()
self.synthesis_perturbed_label = self.synthesis_perturbed_label[
center_x - self.new_shape[0] // 2:center_x + self.new_shape[0] // 2,
center_y - self.new_shape[1] // 2:center_y + self.new_shape[1] // 2,
:].copy()
self.foreORbackground_label = self.foreORbackground_label[
center_x - self.new_shape[0] // 2:center_x + self.new_shape[0] // 2,
center_y - self.new_shape[1] // 2:center_y + self.new_shape[1] // 2].copy()
perturbed_x_ = max(self.new_shape[0] - (perturbed_x_max - perturbed_x_min), 0)
perturbed_x_min = perturbed_x_ // 2
perturbed_x_max = self.new_shape[0] - perturbed_x_ // 2 if perturbed_x_%2 == 0 else self.new_shape[0] - (perturbed_x_ // 2 + 1)
perturbed_y_ = max(self.new_shape[1] - (perturbed_y_max - perturbed_y_min), 0)
perturbed_y_min = perturbed_y_ // 2
perturbed_y_max = self.new_shape[1] - perturbed_y_ // 2 if perturbed_y_%2 == 0 else self.new_shape[1] - (perturbed_y_ // 2 + 1)
'''clip
perturbed_x_min, perturbed_y_min, perturbed_x_max, perturbed_y_max = -1, -1, self.new_shape[0], self.new_shape[1]
for x in range(self.new_shape[0] // 2, perturbed_x_max):
if np.sum(self.synthesis_perturbed_img[x, :]) == 768 * self.new_shape[1] and perturbed_x_max - 1 > x:
perturbed_x_max = x
break
for x in range(self.new_shape[0] // 2, perturbed_x_min, -1):
if np.sum(self.synthesis_perturbed_img[x, :]) == 768 * self.new_shape[1] and x > 0:
perturbed_x_min = x
break
for y in range(self.new_shape[1] // 2, perturbed_y_max):
if np.sum(self.synthesis_perturbed_img[:, y]) == 768 * self.new_shape[0] and perturbed_y_max - 1 > y:
perturbed_y_max = y
break
for y in range(self.new_shape[1] // 2, perturbed_y_min, -1):
if np.sum(self.synthesis_perturbed_img[:, y]) == 768 * self.new_shape[0] and y > 0:
perturbed_y_min = y
break
center_x, center_y = perturbed_x_min+(perturbed_x_max - perturbed_x_min)//2, perturbed_y_min+(perturbed_y_max - perturbed_y_min)//2
perfix_ = self.save_suffix+'_'+str(m)+'_'+str(n)
self.new_shape = save_img_shape
perturbed_x_ = max(self.new_shape[0] - (perturbed_x_max - perturbed_x_min), 0)
perturbed_x_min = perturbed_x_ // 2
perturbed_x_max = self.new_shape[0] - perturbed_x_ // 2 if perturbed_x_%2 == 0 else self.new_shape[0] - (perturbed_x_ // 2 + 1)
perturbed_y_ = max(self.new_shape[1] - (perturbed_y_max - perturbed_y_min), 0)
perturbed_y_min = perturbed_y_ // 2
perturbed_y_max = self.new_shape[1] - perturbed_y_ // 2 if perturbed_y_%2 == 0 else self.new_shape[1] - (perturbed_y_ // 2 + 1)
self.synthesis_perturbed_img = self.synthesis_perturbed_img[center_x-self.new_shape[0]//2:center_x+self.new_shape[0]//2, center_y-self.new_shape[1]//2:center_y+self.new_shape[1]//2, :].copy()
self.synthesis_perturbed_label = self.synthesis_perturbed_label[center_x-self.new_shape[0]//2:center_x+self.new_shape[0]//2, center_y-self.new_shape[1]//2:center_y+self.new_shape[1]//2, :].copy()
self.foreORbackground_label = self.foreORbackground_label[center_x-self.new_shape[0]//2:center_x+self.new_shape[0]//2, center_y-self.new_shape[1]//2:center_y+self.new_shape[1]//2].copy()
'''
'''save'''
pixel_position = np.argwhere(np.zeros(self.new_shape, dtype=np.uint32) == 0).reshape(self.new_shape[0], self.new_shape[1], 2)
if relativeShift_position == 'relativeShift_v2':
self.synthesis_perturbed_label -= pixel_position
fiducial_points_coordinate -= [center_x - self.new_shape[0] // 2, center_y - self.new_shape[1] // 2]
self.synthesis_perturbed_label[:, :, 0] *= self.foreORbackground_label
self.synthesis_perturbed_label[:, :, 1] *= self.foreORbackground_label
self.synthesis_perturbed_img[:, :, 0] *= self.foreORbackground_label
self.synthesis_perturbed_img[:, :, 1] *= self.foreORbackground_label
self.synthesis_perturbed_img[:, :, 2] *= self.foreORbackground_label
'''
synthesis_perturbed_img_filter = self.synthesis_perturbed_img.copy()
synthesis_perturbed_img_filter = cv2.GaussianBlur(synthesis_perturbed_img_filter, (3, 3), 0)
# if self.is_perform(0.9, 0.1) or repeat_time > 5:
# # if self.is_perform(0.1, 0.9) and repeat_time > 9:
# # synthesis_perturbed_img_filter = cv2.GaussianBlur(synthesis_perturbed_img_filter, (7, 7), 0)
# # else:
# synthesis_perturbed_img_filter = cv2.GaussianBlur(synthesis_perturbed_img_filter, (5, 5), 0)
# else:
# synthesis_perturbed_img_filter = cv2.GaussianBlur(synthesis_perturbed_img_filter, (3, 3), 0)
self.synthesis_perturbed_img[self.foreORbackground_label == 1] = synthesis_perturbed_img_filter[self.foreORbackground_label == 1]
'''
'''
perturbed_bg_img = perturbed_bg_img.astype(np.float32)
perturbed_bg_img[:, :, 0] *= 1 - self.foreORbackground_label
perturbed_bg_img[:, :, 1] *= 1 - self.foreORbackground_label
perturbed_bg_img[:, :, 2] *= 1 - self.foreORbackground_label
self.synthesis_perturbed_img += perturbed_bg_img
HSV
perturbed_bg_img = perturbed_bg_img.astype(np.float32)
if self.is_perform(0.1, 0.9):
if self.is_perform(0.2, 0.8):
synthesis_perturbed_img_clip_HSV = self.synthesis_perturbed_img.copy()
synthesis_perturbed_img_clip_HSV = cv2.cvtColor(synthesis_perturbed_img_clip_HSV, cv2.COLOR_RGB2HSV)
H_, S_, V_ = (random.random()-0.2)*20, (random.random()-0.2)/8, (random.random()-0.2)*20
synthesis_perturbed_img_clip_HSV[:, :, 0], synthesis_perturbed_img_clip_HSV[:, :, 1], synthesis_perturbed_img_clip_HSV[:, :, 2] = synthesis_perturbed_img_clip_HSV[:, :, 0]-H_, synthesis_perturbed_img_clip_HSV[:, :, 1]-S_, synthesis_perturbed_img_clip_HSV[:, :, 2]-V_
synthesis_perturbed_img_clip_HSV = cv2.cvtColor(synthesis_perturbed_img_clip_HSV, cv2.COLOR_HSV2RGB)
perturbed_bg_img[:, :, 0] *= 1-self.foreORbackground_label
perturbed_bg_img[:, :, 1] *= 1-self.foreORbackground_label
perturbed_bg_img[:, :, 2] *= 1-self.foreORbackground_label
synthesis_perturbed_img_clip_HSV += perturbed_bg_img
self.synthesis_perturbed_img = synthesis_perturbed_img_clip_HSV
else:
perturbed_bg_img_HSV = perturbed_bg_img
perturbed_bg_img_HSV = cv2.cvtColor(perturbed_bg_img_HSV, cv2.COLOR_RGB2HSV)
H_, S_, V_ = (random.random()-0.5)*20, (random.random()-0.5)/8, (random.random()-0.2)*20
perturbed_bg_img_HSV[:, :, 0], perturbed_bg_img_HSV[:, :, 1], perturbed_bg_img_HSV[:, :, 2] = perturbed_bg_img_HSV[:, :, 0]-H_, perturbed_bg_img_HSV[:, :, 1]-S_, perturbed_bg_img_HSV[:, :, 2]-V_
perturbed_bg_img_HSV = cv2.cvtColor(perturbed_bg_img_HSV, cv2.COLOR_HSV2RGB)
perturbed_bg_img_HSV[:, :, 0] *= 1-self.foreORbackground_label
perturbed_bg_img_HSV[:, :, 1] *= 1-self.foreORbackground_label
perturbed_bg_img_HSV[:, :, 2] *= 1-self.foreORbackground_label
self.synthesis_perturbed_img += perturbed_bg_img_HSV
# self.synthesis_perturbed_img[np.sum(self.synthesis_perturbed_img, 2) == 771] = perturbed_bg_img_HSV[np.sum(self.synthesis_perturbed_img, 2) == 771]
else:
synthesis_perturbed_img_clip_HSV = self.synthesis_perturbed_img.copy()
perturbed_bg_img[:, :, 0] *= 1 - self.foreORbackground_label
perturbed_bg_img[:, :, 1] *= 1 - self.foreORbackground_label
perturbed_bg_img[:, :, 2] *= 1 - self.foreORbackground_label
synthesis_perturbed_img_clip_HSV += perturbed_bg_img
# synthesis_perturbed_img_clip_HSV[np.sum(self.synthesis_perturbed_img, 2) == 771] = perturbed_bg_img[np.sum(self.synthesis_perturbed_img, 2) == 771]
synthesis_perturbed_img_clip_HSV = cv2.cvtColor(synthesis_perturbed_img_clip_HSV, cv2.COLOR_RGB2HSV)
H_, S_, V_ = (random.random()-0.5)*20, (random.random()-0.5)/10, (random.random()-0.4)*20
synthesis_perturbed_img_clip_HSV[:, :, 0], synthesis_perturbed_img_clip_HSV[:, :, 1], synthesis_perturbed_img_clip_HSV[:, :, 2] = synthesis_perturbed_img_clip_HSV[:, :, 0]-H_, synthesis_perturbed_img_clip_HSV[:, :, 1]-S_, synthesis_perturbed_img_clip_HSV[:, :, 2]-V_
synthesis_perturbed_img_clip_HSV = cv2.cvtColor(synthesis_perturbed_img_clip_HSV, cv2.COLOR_HSV2RGB)
self.synthesis_perturbed_img = synthesis_perturbed_img_clip_HSV
'''
'''HSV_v2'''
perturbed_bg_img = perturbed_bg_img.astype(np.float32)
# if self.is_perform(1, 0):
# if self.is_perform(1, 0):
if self.is_perform(0.1, 0.9):
if self.is_perform(0.2, 0.8):
synthesis_perturbed_img_clip_HSV = self.synthesis_perturbed_img.copy()
synthesis_perturbed_img_clip_HSV = self.HSV_v1(synthesis_perturbed_img_clip_HSV)
perturbed_bg_img[:, :, 0] *= 1-self.foreORbackground_label
perturbed_bg_img[:, :, 1] *= 1-self.foreORbackground_label
perturbed_bg_img[:, :, 2] *= 1-self.foreORbackground_label
synthesis_perturbed_img_clip_HSV += perturbed_bg_img
self.synthesis_perturbed_img = synthesis_perturbed_img_clip_HSV
else:
perturbed_bg_img_HSV = perturbed_bg_img
perturbed_bg_img_HSV = self.HSV_v1(perturbed_bg_img_HSV)
perturbed_bg_img_HSV[:, :, 0] *= 1-self.foreORbackground_label
perturbed_bg_img_HSV[:, :, 1] *= 1-self.foreORbackground_label
perturbed_bg_img_HSV[:, :, 2] *= 1-self.foreORbackground_label
self.synthesis_perturbed_img += perturbed_bg_img_HSV
# self.synthesis_perturbed_img[np.sum(self.synthesis_perturbed_img, 2) == 771] = perturbed_bg_img_HSV[np.sum(self.synthesis_perturbed_img, 2) == 771]
else:
synthesis_perturbed_img_clip_HSV = self.synthesis_perturbed_img.copy()
perturbed_bg_img[:, :, 0] *= 1 - self.foreORbackground_label
perturbed_bg_img[:, :, 1] *= 1 - self.foreORbackground_label
perturbed_bg_img[:, :, 2] *= 1 - self.foreORbackground_label
synthesis_perturbed_img_clip_HSV += perturbed_bg_img
synthesis_perturbed_img_clip_HSV = self.HSV_v1(synthesis_perturbed_img_clip_HSV)
self.synthesis_perturbed_img = synthesis_perturbed_img_clip_HSV
''''''
# cv2.imwrite(self.save_path+'clip/'+perfix_+'_'+fold_curve+str(perturbed_time)+'-'+str(repeat_time)+'.png', synthesis_perturbed_img_clip)
self.synthesis_perturbed_img[self.synthesis_perturbed_img < 0] = 0
self.synthesis_perturbed_img[self.synthesis_perturbed_img > 255] = 255
self.synthesis_perturbed_img = np.around(self.synthesis_perturbed_img).astype(np.uint8)
label = np.zeros_like(self.synthesis_perturbed_img, dtype=np.float32)
label[:, :, :2] = self.synthesis_perturbed_label
label[:, :, 2] = self.foreORbackground_label
# grey = np.around(self.synthesis_perturbed_img[:, :, 0] * 0.2989 + self.synthesis_perturbed_img[:, :, 1] * 0.5870 + self.synthesis_perturbed_img[:, :, 0] * 0.1140).astype(np.int16)
# synthesis_perturbed_grey = np.concatenate((grey.reshape(self.new_shape[0], self.new_shape[1], 1), label), axis=2)
synthesis_perturbed_color = np.concatenate((self.synthesis_perturbed_img, label), axis=2)
self.synthesis_perturbed_color = np.zeros_like(synthesis_perturbed_color, dtype=np.float32)
# self.synthesis_perturbed_grey = np.zeros_like(synthesis_perturbed_grey, dtype=np.float32)
reduce_value_x = int(round(min((random.random() / 2) * (self.new_shape[0] - (perturbed_x_max - perturbed_x_min)), min(reduce_value, reduce_value_v2))))
reduce_value_y = int(round(min((random.random() / 2) * (self.new_shape[1] - (perturbed_y_max - perturbed_y_min)), min(reduce_value, reduce_value_v2))))
perturbed_x_min = max(perturbed_x_min - reduce_value_x, 0)
perturbed_x_max = min(perturbed_x_max + reduce_value_x, self.new_shape[0])
perturbed_y_min = max(perturbed_y_min - reduce_value_y, 0)
perturbed_y_max = min(perturbed_y_max + reduce_value_y, self.new_shape[1])
if im_lr >= im_ud:
self.synthesis_perturbed_color[:, perturbed_y_min:perturbed_y_max, :] = synthesis_perturbed_color[:, perturbed_y_min:perturbed_y_max, :]
# self.synthesis_perturbed_grey[:, perturbed_y_min:perturbed_y_max, :] = synthesis_perturbed_grey[:, perturbed_y_min:perturbed_y_max, :]
else:
self.synthesis_perturbed_color[perturbed_x_min:perturbed_x_max, :, :] = synthesis_perturbed_color[perturbed_x_min:perturbed_x_max, :, :]
# self.synthesis_perturbed_grey[perturbed_x_min:perturbed_x_max, :, :] = synthesis_perturbed_grey[perturbed_x_min:perturbed_x_max, :, :]
'''blur'''
if self.is_perform(0.1, 0.9):
synthesis_perturbed_img_filter = self.synthesis_perturbed_color[:, :, :3].copy()
if self.is_perform(0.1, 0.9):
synthesis_perturbed_img_filter = cv2.GaussianBlur(synthesis_perturbed_img_filter, (5, 5), 0)
else:
synthesis_perturbed_img_filter = cv2.GaussianBlur(synthesis_perturbed_img_filter, (3, 3), 0)
if self.is_perform(0.5, 0.5):
self.synthesis_perturbed_color[:, :, :3][self.synthesis_perturbed_color[:, :, 5] == 1] = synthesis_perturbed_img_filter[self.synthesis_perturbed_color[:, :, 5] == 1]
else:
self.synthesis_perturbed_color[:, :, :3] = synthesis_perturbed_img_filter
fiducial_points_coordinate = fiducial_points_coordinate[:, :, ::-1]
'''draw fiducial points'''
stepSize = 0
fiducial_points_synthesis_perturbed_img = self.synthesis_perturbed_color[:, :, :3].copy()
for l in fiducial_points_coordinate.astype(np.int64).reshape(-1, 2):
cv2.circle(fiducial_points_synthesis_perturbed_img, (l[0] + math.ceil(stepSize / 2), l[1] + math.ceil(stepSize / 2)), 2, (0, 0, 255), -1)
cv2.imwrite(self.save_path + 'fiducial_points/' + perfix_ + '_' + fold_curve + '.png', fiducial_points_synthesis_perturbed_img)
cv2.imwrite(self.save_path + 'png/' + perfix_ + '_' + fold_curve + '.png', self.synthesis_perturbed_color[:, :, :3])
'''forward-begin'''
self.forward_mapping = np.full((save_img_shape[0], save_img_shape[1], 2), 0, dtype=np.float32)
forward_mapping = np.full((save_img_shape[0], save_img_shape[1], 2), 0, dtype=np.float32)
forward_position = (self.synthesis_perturbed_color[:, :, 3:5] + pixel_position)[self.synthesis_perturbed_color[:, :, 5] != 0, :]
flat_position = np.argwhere(np.zeros(save_img_shape, dtype=np.uint32) == 0)
vtx, wts = self.interp_weights(forward_position, flat_position)
wts_sum = np.abs(wts).sum(-1)
wts = wts[wts_sum <= 1, :]
vtx = vtx[wts_sum <= 1, :]
flat_position_forward = flat_position.reshape(save_img_shape[0], save_img_shape[1], 2)[self.synthesis_perturbed_color[:, :, 5] != 0, :]
forward_mapping.reshape(save_img_shape[0] * save_img_shape[1], 2)[wts_sum <= 1, :] = self.interpolate(flat_position_forward, vtx, wts)
forward_mapping = forward_mapping.reshape(save_img_shape[0], save_img_shape[1], 2)
mapping_x_min_, mapping_y_min_, mapping_x_max_, mapping_y_max_ = self.adjust_position_v2(0, 0, im_lr, im_ud, self.new_shape)
shreshold_zoom_out = 2
mapping_x_min = mapping_x_min_ + shreshold_zoom_out
mapping_y_min = mapping_y_min_ + shreshold_zoom_out
mapping_x_max = mapping_x_max_ - shreshold_zoom_out
mapping_y_max = mapping_y_max_ - shreshold_zoom_out
self.forward_mapping[mapping_x_min:mapping_x_max, mapping_y_min:mapping_y_max] = forward_mapping[mapping_x_min:mapping_x_max, mapping_y_min:mapping_y_max]
self.scan_img = np.full((save_img_shape[0], save_img_shape[1], 3), 0, dtype=np.float32)
self.scan_img[mapping_x_min_:mapping_x_max_, mapping_y_min_:mapping_y_max_] = self.origin_img
self.origin_img = self.scan_img
# flat_img = np.full((save_img_shape[0], save_img_shape[1], 3), 0, dtype=np.float32)
# cv2.remap(self.synthesis_perturbed_color[:, :, :3], self.forward_mapping[:, :, 1], self.forward_mapping[:, :, 0], cv2.INTER_LINEAR, flat_img)
# cv2.imwrite(self.save_path + 'outputs/1.jpg', flat_img)
'''forward-end'''
synthesis_perturbed_data = {
'fiducial_points': fiducial_points_coordinate,
'segment': np.array((segment_x, segment_y))
}
cv2.imwrite(self.save_path + 'png/' + perfix_ + '_' + fold_curve + '.png', self.synthesis_perturbed_color[:, :, :3])
with open(self.save_path+'color/'+perfix_+'_'+fold_curve+'.gw', 'wb') as f:
pickle_perturbed_data = pickle.dumps(synthesis_perturbed_data)
f.write(pickle_perturbed_data)
# with open(self.save_path+'grey/'+perfix_+'_'+fold_curve+'.gw', 'wb') as f:
# pickle_perturbed_data = pickle.dumps(self.synthesis_perturbed_grey)
# f.write(pickle_perturbed_data)
# cv2.imwrite(self.save_path+'grey_im/'+perfix_+'_'+fold_curve+'.png', self.synthesis_perturbed_color[:, :, :1])
# cv2.imwrite(self.save_path + 'scan/' + self.save_suffix + '_' + str(m) + '.png', self.origin_img)
trian_t = time.time() - begin_train
mm, ss = divmod(trian_t, 60)
hh, mm = divmod(mm, 60)
print(str(m)+'_'+str(n)+'_'+fold_curve+' '+str(repeat_time)+" Time : %02d:%02d:%02d\n" % (hh, mm, ss))
def multiThread(m, n, img_path_, bg_path_, save_path, save_suffix):
saveFold = perturbed(img_path_, bg_path_, save_path, save_suffix)
saveCurve = perturbed(img_path_, bg_path_, save_path, save_suffix)
repeat_time = min(max(round(np.random.normal(10, 3)), 5), 16)
fold = threading.Thread(target=saveFold.save_img, args=(m, n, 'fold', repeat_time, 'relativeShift_v2'), name='fold')
curve = threading.Thread(target=saveCurve.save_img, args=(m, n, 'curve', repeat_time, 'relativeShift_v2'), name='curve')
fold.start()
curve.start()
curve.join()
fold.join()
def xgw(args):
path = args.path
bg_path = args.bg_path
if args.output_path is None:
save_path = '/lustre/home/gwxie/data/unwarp_new/train/general1024/general1024_v1/'
else:
save_path = args.output_path
# if not os.path.exists(save_path + 'grey/'):
# os.makedirs(save_path + 'grey/')
if not os.path.exists(save_path + 'color/'):
os.makedirs(save_path + 'color/')
if not os.path.exists(save_path + 'fiducial_points/'):
os.makedirs(save_path + 'fiducial_points/')
if not os.path.exists(save_path + 'png/'):
os.makedirs(save_path + 'png/')
if not os.path.exists(save_path + 'scan/'):
os.makedirs(save_path + 'scan/')
if not os.path.exists(save_path + 'outputs/'):
os.makedirs(save_path + 'outputs/')
save_suffix = str.split(args.path, '/')[-2]
all_img_path = getDatasets(path)
all_bgImg_path = getDatasets(bg_path)
global begin_train
begin_train = time.time()
fiducial_points = 61 # 31
process_pool = Pool(2)
for m, img_path in enumerate(all_img_path):
for n in range(args.sys_num):
img_path_ = path+img_path
bg_path_ = bg_path+random.choice(all_bgImg_path)+'/'
for m_n in range(10):
try:
saveFold = perturbed(img_path_, bg_path_, save_path, save_suffix)
saveCurve = perturbed(img_path_, bg_path_, save_path, save_suffix)
repeat_time = min(max(round(np.random.normal(12, 4)), 1), 18)
# repeat_time = min(max(round(np.random.normal(8, 4)), 1), 12) # random.randint(1, 2) # min(max(round(np.random.normal(8, 4)), 1), 12)
process_pool.apply_async(func=saveFold.save_img, args=(m, n, 'fold', repeat_time, fiducial_points, 'relativeShift_v2'))
repeat_time = min(max(round(np.random.normal(8, 4)), 1), 13)
# repeat_time = min(max(round(np.random.normal(6, 4)), 1), 10)
process_pool.apply_async(func=saveCurve.save_img, args=(m, n, 'curve', repeat_time, fiducial_points, 'relativeShift_v2'))
except BaseException as err:
print(err)
continue
break
# print('end')
process_pool.close()
process_pool.join()
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Hyperparams')
parser.add_argument('--path',
default='./scan/new/', type=str,
help='the path of origin img.')
parser.add_argument('--bg_path',
default='./background/', type=str,
help='the path of bg img.')
parser.add_argument('--output_path',
default='./output/', type=str,
help='the path of origin img.')
# parser.set_defaults(output_path='test')
parser.add_argument('--count_from', '-p', default=0, type=int,
metavar='N', help='print frequency (default: 10)') # print frequency
parser.add_argument('--repeat_T', default=0, type=int)
parser.add_argument('--sys_num', default=6, type=int)
args = parser.parse_args()
xgw(args)
| [
"pickle.dumps",
"numpy.array",
"numpy.linalg.norm",
"os.path.exists",
"os.listdir",
"numpy.full_like",
"argparse.ArgumentParser",
"numpy.linspace",
"numpy.dot",
"numpy.concatenate",
"numpy.meshgrid",
"random.randint",
"numpy.random.normal",
"numpy.abs",
"random.choice",
"numpy.ones",
"cv2.getPerspectiveTransform",
"numpy.random.choice",
"numpy.around",
"threading.Thread",
"cv2.resize",
"cv2.GaussianBlur",
"time.time",
"cv2.imread",
"numpy.dstack",
"cv2.imwrite",
"math.ceil",
"os.makedirs",
"numpy.sum",
"numpy.zeros",
"multiprocessing.Pool",
"numpy.full",
"random.random",
"numpy.zeros_like",
"numpy.float32"
] | [((595, 610), 'os.listdir', 'os.listdir', (['dir'], {}), '(dir)\n', (605, 610), False, 'import sys, os\n'), ((44019, 44132), 'threading.Thread', 'threading.Thread', ([], {'target': 'saveFold.save_img', 'args': "(m, n, 'fold', repeat_time, 'relativeShift_v2')", 'name': '"""fold"""'}), "(target=saveFold.save_img, args=(m, n, 'fold', repeat_time,\n 'relativeShift_v2'), name='fold')\n", (44035, 44132), False, 'import threading\n'), ((44138, 44254), 'threading.Thread', 'threading.Thread', ([], {'target': 'saveCurve.save_img', 'args': "(m, n, 'curve', repeat_time, 'relativeShift_v2')", 'name': '"""curve"""'}), "(target=saveCurve.save_img, args=(m, n, 'curve',\n repeat_time, 'relativeShift_v2'), name='curve')\n", (44154, 44254), False, 'import threading\n'), ((45191, 45202), 'time.time', 'time.time', ([], {}), '()\n', (45200, 45202), False, 'import time\n'), ((45246, 45253), 'multiprocessing.Pool', 'Pool', (['(2)'], {}), '(2)\n', (45250, 45253), False, 'from multiprocessing import Pool\n'), ((46365, 46415), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Hyperparams"""'}), "(description='Hyperparams')\n", (46388, 46415), False, 'import argparse\n'), ((960, 1005), 'cv2.imread', 'cv2.imread', (['self.path'], {'flags': 'cv2.IMREAD_COLOR'}), '(self.path, flags=cv2.IMREAD_COLOR)\n', (970, 1005), False, 'import cv2\n'), ((1164, 1295), 'numpy.random.choice', 'np.random.choice', (['[2 * 2, 4 * 2, 8 * 2, 16 * 2, 24 * 2, 32 * 2, 40 * 2, 48 * 2]'], {'p': '[0.02, 0.18, 0.2, 0.3, 0.1, 0.1, 0.08, 0.02]'}), '([2 * 2, 4 * 2, 8 * 2, 16 * 2, 24 * 2, 32 * 2, 40 * 2, 48 *\n 2], p=[0.02, 0.18, 0.2, 0.3, 0.1, 0.1, 0.08, 0.02])\n', (1180, 1295), True, 'import numpy as np\n'), ((1930, 2059), 'numpy.random.choice', 'np.random.choice', (['[2 * 2, 4 * 2, 8 * 2, 16 * 2, 24 * 2, 28 * 2, 32 * 2, 48 * 2]'], {'p': '[0.02, 0.18, 0.2, 0.2, 0.1, 0.1, 0.1, 0.1]'}), '([2 * 2, 4 * 2, 8 * 2, 16 * 2, 24 * 2, 28 * 2, 32 * 2, 48 *\n 2], p=[0.02, 0.18, 0.2, 0.2, 0.1, 0.1, 0.1, 0.1])\n', (1946, 2059), True, 'import numpy as np\n'), ((2802, 2887), 'numpy.linspace', 'np.linspace', (['edge_padding', '(im_lr - edge_padding)', 'fiducial_points'], {'dtype': 'np.int64'}), '(edge_padding, im_lr - edge_padding, fiducial_points, dtype=np.int64\n )\n', (2813, 2887), True, 'import numpy as np\n'), ((2895, 2980), 'numpy.linspace', 'np.linspace', (['edge_padding', '(im_ud - edge_padding)', 'fiducial_points'], {'dtype': 'np.int64'}), '(edge_padding, im_ud - edge_padding, fiducial_points, dtype=np.int64\n )\n', (2906, 2980), True, 'import numpy as np\n'), ((3384, 3414), 'numpy.meshgrid', 'np.meshgrid', (['im_hight', 'im_wide'], {}), '(im_hight, im_wide)\n', (3395, 3414), True, 'import numpy as np\n'), ((3651, 3720), 'cv2.resize', 'cv2.resize', (['origin_img', '(im_ud, im_lr)'], {'interpolation': 'cv2.INTER_CUBIC'}), '(origin_img, (im_ud, im_lr), interpolation=cv2.INTER_CUBIC)\n', (3661, 3720), False, 'import cv2\n'), ((3851, 3904), 'cv2.imread', 'cv2.imread', (['perturbed_bg_img_'], {'flags': 'cv2.IMREAD_COLOR'}), '(perturbed_bg_img_, flags=cv2.IMREAD_COLOR)\n', (3861, 3904), False, 'import cv2\n'), ((3981, 4067), 'numpy.full', 'np.full', (['(enlarge_img_shrink[0], enlarge_img_shrink[1], 3)', '(256)'], {'dtype': 'np.float32'}), '((enlarge_img_shrink[0], enlarge_img_shrink[1], 3), 256, dtype=np.\n float32)\n', (3988, 4067), True, 'import numpy as np\n'), ((4319, 4411), 'cv2.resize', 'cv2.resize', (['perturbed_bg_img', '(save_img_shape[1], save_img_shape[0])', 'cv2.INPAINT_TELEA'], {}), '(perturbed_bg_img, (save_img_shape[1], save_img_shape[0]), cv2.\n INPAINT_TELEA)\n', (4329, 4411), False, 'import cv2\n'), ((4682, 4733), 'numpy.zeros', 'np.zeros', (['(self.new_shape[0], self.new_shape[1], 2)'], {}), '((self.new_shape[0], self.new_shape[1], 2))\n', (4690, 4733), True, 'import numpy as np\n'), ((4897, 4948), 'numpy.zeros', 'np.zeros', (['(self.new_shape[0], self.new_shape[1], 2)'], {}), '((self.new_shape[0], self.new_shape[1], 2))\n', (4905, 4948), True, 'import numpy as np\n'), ((5199, 5272), 'random.randint', 'random.randint', (['(-enlarge_img_shrink[0] // 16)', '(enlarge_img_shrink[0] // 16)'], {}), '(-enlarge_img_shrink[0] // 16, enlarge_img_shrink[0] // 16)\n', (5213, 5272), False, 'import random\n'), ((5281, 5354), 'random.randint', 'random.randint', (['(-enlarge_img_shrink[1] // 16)', '(enlarge_img_shrink[1] // 16)'], {}), '(-enlarge_img_shrink[1] // 16, enlarge_img_shrink[1] // 16)\n', (5295, 5354), False, 'import random\n'), ((5803, 5841), 'numpy.full', 'np.full', (['mesh_shape', '(1)'], {'dtype': 'np.int16'}), '(mesh_shape, 1, dtype=np.int16)\n', (5810, 5841), True, 'import numpy as np\n'), ((5875, 5917), 'numpy.full', 'np.full', (['self.new_shape', '(0)'], {'dtype': 'np.int16'}), '(self.new_shape, 0, dtype=np.int16)\n', (5882, 5917), True, 'import numpy as np\n'), ((7126, 7173), 'numpy.full_like', 'np.full_like', (['self.synthesis_perturbed_img', '(256)'], {}), '(self.synthesis_perturbed_img, 256)\n', (7138, 7173), True, 'import numpy as np\n'), ((7296, 7341), 'numpy.zeros_like', 'np.zeros_like', (['self.synthesis_perturbed_label'], {}), '(self.synthesis_perturbed_label)\n', (7309, 7341), True, 'import numpy as np\n'), ((15095, 15207), 'numpy.float32', 'np.float32', (['[[x_min_per, y_min_per], [x_max_per, y_min_per], [x_min_per, y_max_per], [\n x_max_per, y_max_per]]'], {}), '([[x_min_per, y_min_per], [x_max_per, y_min_per], [x_min_per,\n y_max_per], [x_max_per, y_max_per]])\n', (15105, 15207), True, 'import numpy as np\n'), ((19561, 19600), 'cv2.getPerspectiveTransform', 'cv2.getPerspectiveTransform', (['pts1', 'pts2'], {}), '(pts1, pts2)\n', (19588, 19600), False, 'import cv2\n'), ((19609, 19675), 'numpy.ones', 'np.ones', (['(self.new_shape[0], self.new_shape[1], 1)'], {'dtype': 'np.int16'}), '((self.new_shape[0], self.new_shape[1], 1), dtype=np.int16)\n', (19616, 19675), True, 'import numpy as np\n'), ((19685, 19717), 'numpy.dstack', 'np.dstack', (['(pixel_position, one)'], {}), '((pixel_position, one))\n', (19694, 19717), True, 'import numpy as np\n'), ((19892, 19909), 'numpy.dstack', 'np.dstack', (['(x, y)'], {}), '((x, y))\n', (19901, 19909), True, 'import numpy as np\n'), ((20734, 20781), 'cv2.GaussianBlur', 'cv2.GaussianBlur', (['self.perturbed_xy_', '(7, 7)', '(0)'], {}), '(self.perturbed_xy_, (7, 7), 0)\n', (20750, 20781), False, 'import cv2\n'), ((21612, 21636), 'numpy.zeros', 'np.zeros', (['self.new_shape'], {}), '(self.new_shape)\n', (21620, 21636), True, 'import numpy as np\n'), ((37466, 37527), 'numpy.zeros_like', 'np.zeros_like', (['self.synthesis_perturbed_img'], {'dtype': 'np.float32'}), '(self.synthesis_perturbed_img, dtype=np.float32)\n', (37479, 37527), True, 'import numpy as np\n'), ((37959, 38020), 'numpy.concatenate', 'np.concatenate', (['(self.synthesis_perturbed_img, label)'], {'axis': '(2)'}), '((self.synthesis_perturbed_img, label), axis=2)\n', (37973, 38020), True, 'import numpy as np\n'), ((38057, 38115), 'numpy.zeros_like', 'np.zeros_like', (['synthesis_perturbed_color'], {'dtype': 'np.float32'}), '(synthesis_perturbed_color, dtype=np.float32)\n', (38070, 38115), True, 'import numpy as np\n'), ((40461, 40592), 'cv2.imwrite', 'cv2.imwrite', (["(self.save_path + 'fiducial_points/' + perfix_ + '_' + fold_curve + '.png')", 'fiducial_points_synthesis_perturbed_img'], {}), "(self.save_path + 'fiducial_points/' + perfix_ + '_' +\n fold_curve + '.png', fiducial_points_synthesis_perturbed_img)\n", (40472, 40592), False, 'import cv2\n'), ((40592, 40712), 'cv2.imwrite', 'cv2.imwrite', (["(self.save_path + 'png/' + perfix_ + '_' + fold_curve + '.png')", 'self.synthesis_perturbed_color[:, :, :3]'], {}), "(self.save_path + 'png/' + perfix_ + '_' + fold_curve + '.png',\n self.synthesis_perturbed_color[:, :, :3])\n", (40603, 40712), False, 'import cv2\n'), ((40757, 40828), 'numpy.full', 'np.full', (['(save_img_shape[0], save_img_shape[1], 2)', '(0)'], {'dtype': 'np.float32'}), '((save_img_shape[0], save_img_shape[1], 2), 0, dtype=np.float32)\n', (40764, 40828), True, 'import numpy as np\n'), ((40849, 40920), 'numpy.full', 'np.full', (['(save_img_shape[0], save_img_shape[1], 2)', '(0)'], {'dtype': 'np.float32'}), '((save_img_shape[0], save_img_shape[1], 2), 0, dtype=np.float32)\n', (40856, 40920), True, 'import numpy as np\n'), ((42190, 42261), 'numpy.full', 'np.full', (['(save_img_shape[0], save_img_shape[1], 3)', '(0)'], {'dtype': 'np.float32'}), '((save_img_shape[0], save_img_shape[1], 3), 0, dtype=np.float32)\n', (42197, 42261), True, 'import numpy as np\n'), ((42840, 42960), 'cv2.imwrite', 'cv2.imwrite', (["(self.save_path + 'png/' + perfix_ + '_' + fold_curve + '.png')", 'self.synthesis_perturbed_color[:, :, :3]'], {}), "(self.save_path + 'png/' + perfix_ + '_' + fold_curve + '.png',\n self.synthesis_perturbed_color[:, :, :3])\n", (42851, 42960), False, 'import cv2\n'), ((44612, 44648), 'os.path.exists', 'os.path.exists', (["(save_path + 'color/')"], {}), "(save_path + 'color/')\n", (44626, 44648), False, 'import sys, os\n'), ((44652, 44685), 'os.makedirs', 'os.makedirs', (["(save_path + 'color/')"], {}), "(save_path + 'color/')\n", (44663, 44685), False, 'import sys, os\n'), ((44695, 44741), 'os.path.exists', 'os.path.exists', (["(save_path + 'fiducial_points/')"], {}), "(save_path + 'fiducial_points/')\n", (44709, 44741), False, 'import sys, os\n'), ((44745, 44788), 'os.makedirs', 'os.makedirs', (["(save_path + 'fiducial_points/')"], {}), "(save_path + 'fiducial_points/')\n", (44756, 44788), False, 'import sys, os\n'), ((44798, 44832), 'os.path.exists', 'os.path.exists', (["(save_path + 'png/')"], {}), "(save_path + 'png/')\n", (44812, 44832), False, 'import sys, os\n'), ((44836, 44867), 'os.makedirs', 'os.makedirs', (["(save_path + 'png/')"], {}), "(save_path + 'png/')\n", (44847, 44867), False, 'import sys, os\n'), ((44877, 44912), 'os.path.exists', 'os.path.exists', (["(save_path + 'scan/')"], {}), "(save_path + 'scan/')\n", (44891, 44912), False, 'import sys, os\n'), ((44916, 44948), 'os.makedirs', 'os.makedirs', (["(save_path + 'scan/')"], {}), "(save_path + 'scan/')\n", (44927, 44948), False, 'import sys, os\n'), ((44958, 44996), 'os.path.exists', 'os.path.exists', (["(save_path + 'outputs/')"], {}), "(save_path + 'outputs/')\n", (44972, 44996), False, 'import sys, os\n'), ((45000, 45035), 'os.makedirs', 'os.makedirs', (["(save_path + 'outputs/')"], {}), "(save_path + 'outputs/')\n", (45011, 45035), False, 'import sys, os\n'), ((3801, 3829), 'random.choice', 'random.choice', (['perturbed_bg_'], {}), '(perturbed_bg_)\n', (3814, 3829), False, 'import random\n'), ((8301, 8330), 'random.choice', 'random.choice', (['linspace_x_seq'], {}), '(linspace_x_seq)\n', (8314, 8330), False, 'import random\n'), ((8340, 8369), 'random.choice', 'random.choice', (['linspace_y_seq'], {}), '(linspace_y_seq)\n', (8353, 8369), False, 'import random\n'), ((8681, 8710), 'random.choice', 'random.choice', (['linspace_x_seq'], {}), '(linspace_x_seq)\n', (8694, 8710), False, 'import random\n'), ((8720, 8749), 'random.choice', 'random.choice', (['linspace_y_seq'], {}), '(linspace_y_seq)\n', (8733, 8749), False, 'import random\n'), ((9801, 9829), 'numpy.linalg.norm', 'np.linalg.norm', (['perturbed_vp'], {}), '(perturbed_vp)\n', (9815, 9829), True, 'import numpy as np\n'), ((14858, 14880), 'random.randint', 'random.randint', (['(26)', '(36)'], {}), '(26, 36)\n', (14872, 14880), False, 'import random\n'), ((25705, 25752), 'numpy.full_like', 'np.full_like', (['self.synthesis_perturbed_img', '(256)'], {}), '(self.synthesis_perturbed_img, 256)\n', (25717, 25752), True, 'import numpy as np\n'), ((25789, 25834), 'numpy.zeros_like', 'np.zeros_like', (['self.synthesis_perturbed_label'], {}), '(self.synthesis_perturbed_label)\n', (25802, 25834), True, 'import numpy as np\n'), ((25868, 25910), 'numpy.zeros_like', 'np.zeros_like', (['self.foreORbackground_label'], {}), '(self.foreORbackground_label)\n', (25881, 25910), True, 'import numpy as np\n'), ((42800, 42832), 'numpy.array', 'np.array', (['(segment_x, segment_y)'], {}), '((segment_x, segment_y))\n', (42808, 42832), True, 'import numpy as np\n'), ((43062, 43100), 'pickle.dumps', 'pickle.dumps', (['synthesis_perturbed_data'], {}), '(synthesis_perturbed_data)\n', (43074, 43100), False, 'import pickle\n'), ((43554, 43565), 'time.time', 'time.time', ([], {}), '()\n', (43563, 43565), False, 'import time\n'), ((6996, 7019), 'random.randint', 'random.randint', (['(80)', '(160)'], {}), '(80, 160)\n', (7010, 7019), False, 'import random\n'), ((9871, 9921), 'numpy.dot', 'np.dot', (['(perturbed_p - pixel_position)', 'perturbed_vp'], {}), '(perturbed_p - pixel_position, perturbed_vp)\n', (9877, 9921), True, 'import numpy as np\n'), ((18765, 18798), 'numpy.linalg.norm', 'np.linalg.norm', (['(pts2[0] - pts2[1])'], {}), '(pts2[0] - pts2[1])\n', (18779, 18798), True, 'import numpy as np\n'), ((18807, 18840), 'numpy.linalg.norm', 'np.linalg.norm', (['(pts2[0] - pts2[2])'], {}), '(pts2[0] - pts2[2])\n', (18821, 18840), True, 'import numpy as np\n'), ((18849, 18882), 'numpy.linalg.norm', 'np.linalg.norm', (['(pts2[1] - pts2[3])'], {}), '(pts2[1] - pts2[3])\n', (18863, 18882), True, 'import numpy as np\n'), ((18891, 18924), 'numpy.linalg.norm', 'np.linalg.norm', (['(pts2[2] - pts2[3])'], {}), '(pts2[2] - pts2[3])\n', (18905, 18924), True, 'import numpy as np\n'), ((21005, 21016), 'numpy.abs', 'np.abs', (['wts'], {}), '(wts)\n', (21011, 21016), True, 'import numpy as np\n'), ((37399, 37438), 'numpy.around', 'np.around', (['self.synthesis_perturbed_img'], {}), '(self.synthesis_perturbed_img)\n', (37408, 37438), True, 'import numpy as np\n'), ((39584, 39643), 'cv2.GaussianBlur', 'cv2.GaussianBlur', (['synthesis_perturbed_img_filter', '(5, 5)', '(0)'], {}), '(synthesis_perturbed_img_filter, (5, 5), 0)\n', (39600, 39643), False, 'import cv2\n'), ((39690, 39749), 'cv2.GaussianBlur', 'cv2.GaussianBlur', (['synthesis_perturbed_img_filter', '(3, 3)', '(0)'], {}), '(synthesis_perturbed_img_filter, (3, 3), 0)\n', (39706, 39749), False, 'import cv2\n'), ((41082, 41123), 'numpy.zeros', 'np.zeros', (['save_img_shape'], {'dtype': 'np.uint32'}), '(save_img_shape, dtype=np.uint32)\n', (41090, 41123), True, 'import numpy as np\n'), ((41208, 41219), 'numpy.abs', 'np.abs', (['wts'], {}), '(wts)\n', (41214, 41219), True, 'import numpy as np\n'), ((43977, 44000), 'numpy.random.normal', 'np.random.normal', (['(10)', '(3)'], {}), '(10, 3)\n', (43993, 44000), True, 'import numpy as np\n'), ((6675, 6698), 'random.randint', 'random.randint', (['(80)', '(120)'], {}), '(80, 120)\n', (6689, 6698), False, 'import random\n'), ((16071, 16104), 'numpy.linalg.norm', 'np.linalg.norm', (['(pts2[0] - pts2[1])'], {}), '(pts2[0] - pts2[1])\n', (16085, 16104), True, 'import numpy as np\n'), ((16114, 16147), 'numpy.linalg.norm', 'np.linalg.norm', (['(pts2[0] - pts2[2])'], {}), '(pts2[0] - pts2[2])\n', (16128, 16147), True, 'import numpy as np\n'), ((16157, 16190), 'numpy.linalg.norm', 'np.linalg.norm', (['(pts2[1] - pts2[3])'], {}), '(pts2[1] - pts2[3])\n', (16171, 16190), True, 'import numpy as np\n'), ((16200, 16233), 'numpy.linalg.norm', 'np.linalg.norm', (['(pts2[2] - pts2[3])'], {}), '(pts2[2] - pts2[3])\n', (16214, 16233), True, 'import numpy as np\n'), ((17423, 17456), 'numpy.linalg.norm', 'np.linalg.norm', (['(pts2[0] - pts2[1])'], {}), '(pts2[0] - pts2[1])\n', (17437, 17456), True, 'import numpy as np\n'), ((17466, 17499), 'numpy.linalg.norm', 'np.linalg.norm', (['(pts2[0] - pts2[2])'], {}), '(pts2[0] - pts2[2])\n', (17480, 17499), True, 'import numpy as np\n'), ((17509, 17542), 'numpy.linalg.norm', 'np.linalg.norm', (['(pts2[1] - pts2[3])'], {}), '(pts2[1] - pts2[3])\n', (17523, 17542), True, 'import numpy as np\n'), ((17552, 17585), 'numpy.linalg.norm', 'np.linalg.norm', (['(pts2[2] - pts2[3])'], {}), '(pts2[2] - pts2[3])\n', (17566, 17585), True, 'import numpy as np\n'), ((22807, 22849), 'numpy.sum', 'np.sum', (['self.synthesis_perturbed_img[x, :]'], {}), '(self.synthesis_perturbed_img[x, :])\n', (22813, 22849), True, 'import numpy as np\n'), ((23009, 23051), 'numpy.sum', 'np.sum', (['self.synthesis_perturbed_img[x, :]'], {}), '(self.synthesis_perturbed_img[x, :])\n', (23015, 23051), True, 'import numpy as np\n'), ((23189, 23231), 'numpy.sum', 'np.sum', (['self.synthesis_perturbed_img[:, y]'], {}), '(self.synthesis_perturbed_img[:, y])\n', (23195, 23231), True, 'import numpy as np\n'), ((23391, 23433), 'numpy.sum', 'np.sum', (['self.synthesis_perturbed_img[:, y]'], {}), '(self.synthesis_perturbed_img[:, y])\n', (23397, 23433), True, 'import numpy as np\n'), ((45382, 45411), 'random.choice', 'random.choice', (['all_bgImg_path'], {}), '(all_bgImg_path)\n', (45395, 45411), False, 'import random\n'), ((4446, 4483), 'numpy.zeros', 'np.zeros', (['mesh_shape'], {'dtype': 'np.uint32'}), '(mesh_shape, dtype=np.uint32)\n', (4454, 4483), True, 'import numpy as np\n'), ((4562, 4603), 'numpy.zeros', 'np.zeros', (['self.new_shape'], {'dtype': 'np.uint32'}), '(self.new_shape, dtype=np.uint32)\n', (4570, 4603), True, 'import numpy as np\n'), ((6786, 6808), 'random.randint', 'random.randint', (['(50)', '(70)'], {}), '(50, 70)\n', (6800, 6808), False, 'import random\n'), ((6848, 6871), 'random.randint', 'random.randint', (['(70)', '(130)'], {}), '(70, 130)\n', (6862, 6871), False, 'import random\n'), ((7898, 7921), 'random.randint', 'random.randint', (['(80)', '(160)'], {}), '(80, 160)\n', (7912, 7921), False, 'import random\n'), ((8402, 8464), 'random.randint', 'random.randint', (['(linspace_x[r_x - 1] * 10)', '(linspace_x[r_x] * 10)'], {}), '(linspace_x[r_x - 1] * 10, linspace_x[r_x] * 10)\n', (8416, 8464), False, 'import random\n'), ((8469, 8531), 'random.randint', 'random.randint', (['(linspace_y[r_y - 1] * 10)', '(linspace_y[r_y] * 10)'], {}), '(linspace_y[r_y - 1] * 10, linspace_y[r_y] * 10)\n', (8483, 8531), False, 'import random\n'), ((8783, 8845), 'random.randint', 'random.randint', (['(linspace_x[r_x - 1] * 10)', '(linspace_x[r_x] * 10)'], {}), '(linspace_x[r_x - 1] * 10, linspace_x[r_x] * 10)\n', (8797, 8845), False, 'import random\n'), ((8850, 8912), 'random.randint', 'random.randint', (['(linspace_y[r_y - 1] * 10)', '(linspace_y[r_y] * 10)'], {}), '(linspace_y[r_y - 1] * 10, linspace_y[r_y] * 10)\n', (8864, 8912), False, 'import random\n'), ((12912, 12990), 'numpy.array', 'np.array', (['[omega_perturbed * perturbed_v[0], omega_perturbed * perturbed_v[1]]'], {}), '([omega_perturbed * perturbed_v[0], omega_perturbed * perturbed_v[1]])\n', (12920, 12990), True, 'import numpy as np\n'), ((20547, 20588), 'numpy.zeros', 'np.zeros', (['self.new_shape'], {'dtype': 'np.uint32'}), '(self.new_shape, dtype=np.uint32)\n', (20555, 20588), True, 'import numpy as np\n'), ((30532, 30573), 'numpy.zeros', 'np.zeros', (['self.new_shape'], {'dtype': 'np.uint32'}), '(self.new_shape, dtype=np.uint32)\n', (30540, 30573), True, 'import numpy as np\n'), ((40381, 40404), 'math.ceil', 'math.ceil', (['(stepSize / 2)'], {}), '(stepSize / 2)\n', (40390, 40404), False, 'import math\n'), ((40413, 40436), 'math.ceil', 'math.ceil', (['(stepSize / 2)'], {}), '(stepSize / 2)\n', (40422, 40436), False, 'import math\n'), ((7652, 7675), 'random.randint', 'random.randint', (['(80)', '(120)'], {}), '(80, 120)\n', (7666, 7675), False, 'import random\n'), ((10370, 10399), 'random.randint', 'random.randint', (['(-10000)', '(10000)'], {}), '(-10000, 10000)\n', (10384, 10399), False, 'import random\n'), ((10407, 10436), 'random.randint', 'random.randint', (['(-10000)', '(10000)'], {}), '(-10000, 10000)\n', (10421, 10436), False, 'import random\n'), ((10793, 10820), 'random.randint', 'random.randint', (['(-8000)', '(8000)'], {}), '(-8000, 8000)\n', (10807, 10820), False, 'import random\n'), ((10828, 10855), 'random.randint', 'random.randint', (['(-8000)', '(8000)'], {}), '(-8000, 8000)\n', (10842, 10855), False, 'import random\n'), ((11298, 11340), 'numpy.abs', 'np.abs', (['perturbed_distance_vertex_and_line'], {}), '(perturbed_distance_vertex_and_line)\n', (11304, 11340), True, 'import numpy as np\n'), ((11342, 11362), 'random.randint', 'random.randint', (['(1)', '(2)'], {}), '(1, 2)\n', (11356, 11362), False, 'import random\n'), ((11439, 11481), 'numpy.abs', 'np.abs', (['perturbed_distance_vertex_and_line'], {}), '(perturbed_distance_vertex_and_line)\n', (11445, 11481), True, 'import numpy as np\n'), ((11790, 11832), 'numpy.abs', 'np.abs', (['perturbed_distance_vertex_and_line'], {}), '(perturbed_distance_vertex_and_line)\n', (11796, 11832), True, 'import numpy as np\n'), ((11834, 11854), 'random.randint', 'random.randint', (['(1)', '(2)'], {}), '(1, 2)\n', (11848, 11854), False, 'import random\n'), ((11931, 11973), 'numpy.abs', 'np.abs', (['perturbed_distance_vertex_and_line'], {}), '(perturbed_distance_vertex_and_line)\n', (11937, 11973), True, 'import numpy as np\n'), ((38244, 38259), 'random.random', 'random.random', ([], {}), '()\n', (38257, 38259), False, 'import random\n'), ((38398, 38413), 'random.random', 'random.random', ([], {}), '()\n', (38411, 38413), False, 'import random\n'), ((7769, 7791), 'random.randint', 'random.randint', (['(50)', '(70)'], {}), '(50, 70)\n', (7783, 7791), False, 'import random\n'), ((7835, 7858), 'random.randint', 'random.randint', (['(70)', '(130)'], {}), '(70, 130)\n', (7849, 7858), False, 'import random\n'), ((45628, 45651), 'numpy.random.normal', 'np.random.normal', (['(12)', '(4)'], {}), '(12, 4)\n', (45644, 45651), True, 'import numpy as np\n'), ((45962, 45984), 'numpy.random.normal', 'np.random.normal', (['(8)', '(4)'], {}), '(8, 4)\n', (45978, 45984), True, 'import numpy as np\n'), ((15589, 15604), 'random.random', 'random.random', ([], {}), '()\n', (15602, 15604), False, 'import random\n'), ((15644, 15659), 'random.random', 'random.random', ([], {}), '()\n', (15657, 15659), False, 'import random\n'), ((15710, 15725), 'random.random', 'random.random', ([], {}), '()\n', (15723, 15725), False, 'import random\n'), ((15765, 15780), 'random.random', 'random.random', ([], {}), '()\n', (15778, 15780), False, 'import random\n'), ((15831, 15846), 'random.random', 'random.random', ([], {}), '()\n', (15844, 15846), False, 'import random\n'), ((15886, 15901), 'random.random', 'random.random', ([], {}), '()\n', (15899, 15901), False, 'import random\n'), ((15952, 15967), 'random.random', 'random.random', ([], {}), '()\n', (15965, 15967), False, 'import random\n'), ((16007, 16022), 'random.random', 'random.random', ([], {}), '()\n', (16020, 16022), False, 'import random\n'), ((16949, 16964), 'random.random', 'random.random', ([], {}), '()\n', (16962, 16964), False, 'import random\n'), ((17004, 17019), 'random.random', 'random.random', ([], {}), '()\n', (17017, 17019), False, 'import random\n'), ((17070, 17085), 'random.random', 'random.random', ([], {}), '()\n', (17083, 17085), False, 'import random\n'), ((17125, 17140), 'random.random', 'random.random', ([], {}), '()\n', (17138, 17140), False, 'import random\n'), ((17191, 17206), 'random.random', 'random.random', ([], {}), '()\n', (17204, 17206), False, 'import random\n'), ((17246, 17261), 'random.random', 'random.random', ([], {}), '()\n', (17259, 17261), False, 'import random\n'), ((17312, 17327), 'random.random', 'random.random', ([], {}), '()\n', (17325, 17327), False, 'import random\n'), ((17367, 17382), 'random.random', 'random.random', ([], {}), '()\n', (17380, 17382), False, 'import random\n'), ((18290, 18305), 'random.random', 'random.random', ([], {}), '()\n', (18303, 18305), False, 'import random\n'), ((18345, 18360), 'random.random', 'random.random', ([], {}), '()\n', (18358, 18360), False, 'import random\n'), ((18411, 18426), 'random.random', 'random.random', ([], {}), '()\n', (18424, 18426), False, 'import random\n'), ((18466, 18481), 'random.random', 'random.random', ([], {}), '()\n', (18479, 18481), False, 'import random\n'), ((18532, 18547), 'random.random', 'random.random', ([], {}), '()\n', (18545, 18547), False, 'import random\n'), ((18587, 18602), 'random.random', 'random.random', ([], {}), '()\n', (18600, 18602), False, 'import random\n'), ((18653, 18668), 'random.random', 'random.random', ([], {}), '()\n', (18666, 18668), False, 'import random\n'), ((18708, 18723), 'random.random', 'random.random', ([], {}), '()\n', (18721, 18723), False, 'import random\n'), ((12679, 12711), 'numpy.linalg.norm', 'np.linalg.norm', (['(perturbed_v // 2)'], {}), '(perturbed_v // 2)\n', (12693, 12711), True, 'import numpy as np\n'), ((12725, 12740), 'random.random', 'random.random', ([], {}), '()\n', (12738, 12740), False, 'import random\n'), ((12750, 12765), 'random.random', 'random.random', ([], {}), '()\n', (12763, 12765), False, 'import random\n'), ((12775, 12790), 'random.random', 'random.random', ([], {}), '()\n', (12788, 12790), False, 'import random\n')] |
#!/usr/bin/env python
import argparse
import logging
try:
import ujson as json
except ImportError:
import json
import sys
import datetime
import os
import importlib
from gnip_tweet_evaluation import analysis,output
"""
Perform audience and/or conversation analysis on a set of Tweets.
"""
logger = logging.getLogger('analysis')
logger.setLevel(logging.INFO)
logger.addHandler(logging.StreamHandler())
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument("-n","--identifier",dest="unique_identifier", default='0',type=str,
help="a unique name to identify the conversation/audience; default is '%(default)s'")
parser.add_argument("-c","--do-conversation-analysis",dest="do_conversation_analysis",action="store_true",default=False,
help="do conversation analysis on Tweets")
parser.add_argument("-a","--do-audience-analysis",dest="do_audience_analysis",action="store_true",default=False,
help="do audience analysis on users")
parser.add_argument("-i","--input-file-name",dest="input_file_name",default=None,
help="file containing Tweet data; take input from stdin if not present")
parser.add_argument('-o','--output-dir',dest='output_directory',default=os.environ['HOME'] + '/tweet_evaluation/',
help='directory for output files; default is %(default)s')
parser.add_argument('-b','--baseline-input-file',dest='baseline_input_name',default=None,
help='Tweets against which to run a relative analysis')
args = parser.parse_args()
# get the time right now, to use in output naming
time_now = datetime.datetime.now()
output_directory = '{0}/{1:04d}/{2:02d}/{3:02d}/'.format(args.output_directory.rstrip('/')
,time_now.year
,time_now.month
,time_now.day
)
# get the empty results object, which defines the measurements to be run
results = analysis.setup_analysis(do_conversation = args.do_conversation_analysis, do_audience = args.do_audience_analysis)
baseline_results = None
if args.baseline_input_name is not None:
baseline_results = analysis.setup_analysis(do_conversation = args.do_conversation_analysis, do_audience = args.do_audience_analysis)
# manage input sources, file opening, and deserialization
if args.input_file_name is not None:
tweet_generator = analysis.deserialize_tweets(open(args.input_file_name))
else:
tweet_generator = analysis.deserialize_tweets(sys.stdin)
# run analysis
analysis.analyze_tweets(tweet_generator, results)
# run baseline analysis, if requests
if baseline_results is not None:
baseline_tweet_generator = analysis.deserialize_tweets(open(args.baseline_input_name))
analysis.analyze_tweets(baseline_tweet_generator, baseline_results)
results = analysis.compare_results(results,baseline_results)
# dump the output
output.dump_results(results, output_directory, args.unique_identifier)
| [
"logging.getLogger",
"logging.StreamHandler",
"argparse.ArgumentParser",
"gnip_tweet_evaluation.output.dump_results",
"gnip_tweet_evaluation.analysis.setup_analysis",
"gnip_tweet_evaluation.analysis.deserialize_tweets",
"datetime.datetime.now",
"gnip_tweet_evaluation.analysis.compare_results",
"gnip_tweet_evaluation.analysis.analyze_tweets"
] | [((311, 340), 'logging.getLogger', 'logging.getLogger', (['"""analysis"""'], {}), "('analysis')\n", (328, 340), False, 'import logging\n'), ((390, 413), 'logging.StreamHandler', 'logging.StreamHandler', ([], {}), '()\n', (411, 413), False, 'import logging\n'), ((461, 486), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (484, 486), False, 'import argparse\n'), ((1650, 1673), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (1671, 1673), False, 'import datetime\n'), ((1955, 2068), 'gnip_tweet_evaluation.analysis.setup_analysis', 'analysis.setup_analysis', ([], {'do_conversation': 'args.do_conversation_analysis', 'do_audience': 'args.do_audience_analysis'}), '(do_conversation=args.do_conversation_analysis,\n do_audience=args.do_audience_analysis)\n', (1978, 2068), False, 'from gnip_tweet_evaluation import analysis, output\n'), ((2570, 2619), 'gnip_tweet_evaluation.analysis.analyze_tweets', 'analysis.analyze_tweets', (['tweet_generator', 'results'], {}), '(tweet_generator, results)\n', (2593, 2619), False, 'from gnip_tweet_evaluation import analysis, output\n'), ((2971, 3041), 'gnip_tweet_evaluation.output.dump_results', 'output.dump_results', (['results', 'output_directory', 'args.unique_identifier'], {}), '(results, output_directory, args.unique_identifier)\n', (2990, 3041), False, 'from gnip_tweet_evaluation import analysis, output\n'), ((2171, 2284), 'gnip_tweet_evaluation.analysis.setup_analysis', 'analysis.setup_analysis', ([], {'do_conversation': 'args.do_conversation_analysis', 'do_audience': 'args.do_audience_analysis'}), '(do_conversation=args.do_conversation_analysis,\n do_audience=args.do_audience_analysis)\n', (2194, 2284), False, 'from gnip_tweet_evaluation import analysis, output\n'), ((2507, 2545), 'gnip_tweet_evaluation.analysis.deserialize_tweets', 'analysis.deserialize_tweets', (['sys.stdin'], {}), '(sys.stdin)\n', (2534, 2545), False, 'from gnip_tweet_evaluation import analysis, output\n'), ((2803, 2870), 'gnip_tweet_evaluation.analysis.analyze_tweets', 'analysis.analyze_tweets', (['baseline_tweet_generator', 'baseline_results'], {}), '(baseline_tweet_generator, baseline_results)\n', (2826, 2870), False, 'from gnip_tweet_evaluation import analysis, output\n'), ((2889, 2940), 'gnip_tweet_evaluation.analysis.compare_results', 'analysis.compare_results', (['results', 'baseline_results'], {}), '(results, baseline_results)\n', (2913, 2940), False, 'from gnip_tweet_evaluation import analysis, output\n')] |
from __future__ import print_function
import numpy as np
import matplotlib.pyplot as plt
class TwoLayerNet(object):
"""
A two-layer fully-connected neural network. The net has an input dimension
of N, a hidden layer dimension of H, and performs classification over C
classes.
We train the network with a softmax loss function and L2 regularization on
the weight matrices. The network uses a ReLU nonlinearity after the first
fully connected layer.
In other words, the network has the following architecture:
input - fully connected layer - ReLU - fully connected layer - softmax
The outputs of the second fully-connected layer are the scores for each
class.
"""
def __init__(self, input_size, hidden_size, output_size, std=1e-4):
"""
Initialize the model. Weights are initialized to small random values
and biases are initialized to zero. Weights and biases are stored in
the variable self.params, which is a dictionary with the following keys
W1: First layer weights; has shape (D, H)
b1: First layer biases; has shape (H,)
W2: Second layer weights; has shape (H, C)
b2: Second layer biases; has shape (C,)
Inputs:
- input_size: The dimension D of the input data.
- hidden_size: The number of neurons H in the hidden layer.
- output_size: The number of classes C.
"""
self.params = {}
self.params['W1'] = std * np.random.randn(input_size, hidden_size)
self.params['b1'] = np.zeros(hidden_size)
self.params['W2'] = std * np.random.randn(hidden_size, output_size)
self.params['b2'] = np.zeros(output_size)
def loss(self, X, y=None, reg=0.0):
"""
Compute the loss and gradients for a two layer fully connected neural
network.
Inputs:
- X: Input data of shape (N, D). Each X[i] is a training sample.
- y: Vector of training labels. y[i] is the label for X[i], and each
y[i] is an integer in the range 0 <= y[i] < C. This parameter is
optional; if it is not passed then we only return scores, and if it
is passed then we instead return the loss and gradients.
- reg: Regularization strength.
Returns:
If y is None, return a matrix scores of shape (N, C) where scores[i, c]
is the score for class c on input X[i].
If y is not None, instead return a tuple of:
- loss: Loss (data loss and regularization loss) for this batch of
training samples.
- grads: Dictionary mapping parameter names to gradients of those
parameters with respect to the loss function; has the same keys as
self.params.
"""
# Unpack variables from the params dictionary
W1, b1 = self.params['W1'], self.params['b1']
W2, b2 = self.params['W2'], self.params['b2']
N, D = X.shape
# Compute the forward pass
scores = None
#######################################################################
# TODO: Perform the forward pass, computing the class scores for the #
# input. Store the result in the scores variable, which should be an #
# array of shape (N, C). #
#######################################################################
scores1 = X.dot(W1) + b1 # FC1
X2 = np.maximum(0, scores1) # ReLU FC1
scores = X2.dot(W2) + b2 # FC2
#######################################################################
# END OF YOUR CODE #
#######################################################################
# If the targets are not given then jump out, we're done
if y is None:
return scores
scores -= np.max(scores) # Fix Number instability
scores_exp = np.exp(scores)
probs = scores_exp / np.sum(scores_exp, axis=1, keepdims=True)
# Compute the loss
loss = None
#######################################################################
# TODO: Finish the forward pass, and compute the loss. This should #
# include both the data loss and L2 regularization for W1 and W2. #
# Store the result in the variable loss, which should be a scalar. Use#
# the Softmax classifier loss. #
#######################################################################
correct_probs = -np.log(probs[np.arange(N), y])
# L_i = -log(e^correct_score/sum(e^scores))) = -log(correct_probs)
loss = np.sum(correct_probs)
loss /= N
# L2 regularization WRT W1 and W2
loss += reg * (np.sum(W1 * W1) + np.sum(W2 * W2))
#######################################################################
# END OF YOUR CODE #
#######################################################################
# Backward pass: compute gradients
grads = {}
#############################################################################
# TODO: Compute the backward pass, computing the derivatives of the weights #
# and biases. Store the results in the grads dictionary. For example, #
# grads['W1'] should store the gradient on W1, and be a matrix of same size #
#############################################################################
# gradient of loss_i WRT scores_k
# dL_i/ds_k = probs_k-1(y_i == k)
# this means the gradient is the score for "other" classes and score-1
# for the target class
d_scores = probs.copy()
d_scores[np.arange(N), y] -= 1
d_scores /= N
# W2 were multiplied with X2, by chain rule and multiplication
# derivative, WRT W2 we need to multiply downstream derivative by X2
d_W2 = X2.T.dot(d_scores)
# b2 was added, so it's d is 1 but we must multiply it with chain rule
# (downstream), in this case d_scores
d_b2 = np.sum(d_scores, axis=0)
# W1 is upstream of X2, so we continue this way
d_X2 = d_scores.dot(W2.T)
# ReLU derivative is 1 for > 0, else 0
d_scores1 = d_X2 * (scores1 > 0)
d_W1 = X.T.dot(d_scores1)
# b1 gradient
d_b1 = d_scores1.sum(axis=0)
# regularization gradient (reg*W2^2)
d_W2 += reg * 2 * W2
d_W1 += reg * 2 * W1
grads['W1'] = d_W1
grads['b1'] = d_b1
grads['W2'] = d_W2
grads['b2'] = d_b2
#######################################################################
# END OF YOUR CODE #
#######################################################################
return loss, grads
def train(self, X, y, X_val, y_val,
learning_rate=1e-3, learning_rate_decay=0.95,
reg=5e-6, num_iters=100,
batch_size=200, verbose=False):
"""
Train this neural network using stochastic gradient descent.
Inputs:
- X: A numpy array of shape (N, D) giving training data.
- y: A numpy array f shape (N,) giving training labels; y[i] = c means
that X[i] has label c, where 0 <= c < C.
- X_val: A numpy array of shape (N_val, D) giving validation data.
- y_val: A numpy array of shape (N_val,) giving validation labels.
- learning_rate: Scalar giving learning rate for optimization.
- learning_rate_decay: Scalar giving factor used to decay the learning
rate after each epoch.
- reg: Scalar giving regularization strength.
- num_iters: Number of steps to take when optimizing.
- batch_size: Number of training examples to use per step.
- verbose: boolean; if true print progress during optimization.
"""
num_train = X.shape[0]
iterations_per_epoch = max(num_train / batch_size, 1)
# Use SGD to optimize the parameters in self.model
loss_history = []
train_acc_history = []
val_acc_history = []
for it in range(num_iters):
X_batch = None
y_batch = None
###################################################################
# TODO: Create a random minibatch of training data and labels, #
# storing them in X_batch and y_batch respectively. #
###################################################################
# random indexes to sample training data/labels
sample_idx = np.random.choice(num_train, batch_size, replace=True)
X_batch = X[sample_idx]
y_batch = y[sample_idx]
###################################################################
# END OF YOUR CODE #
###################################################################
# Compute loss and gradients using the current minibatch
loss, grads = self.loss(X_batch, y=y_batch, reg=reg)
loss_history.append(loss)
###################################################################
# TODO: Use the gradients in the grads dictionary to update the #
# parameters of the network (stored in the dictionary self.params)#
# using stochastic gradient descent. You'll need to use the #
# gradients stored in the grads dictionary defined above. #
###################################################################
# For each weight in network parameters, update it with the
# corresponding calculated gradient
for key in self.params:
self.params[key] -= learning_rate * grads[key]
###################################################################
# END OF YOUR CODE #
###################################################################
if verbose and it % 100 == 0:
print('iteration %d / %d: loss %f' % (it, num_iters, loss))
# Every epoch, check train and val accuracy and decay learning rate
if it % iterations_per_epoch == 0:
# Check accuracy
train_acc = (self.predict(X_batch) == y_batch).mean()
val_acc = (self.predict(X_val) == y_val).mean()
train_acc_history.append(train_acc)
val_acc_history.append(val_acc)
# Decay learning rate
learning_rate *= learning_rate_decay
return {
'loss_history': loss_history,
'train_acc_history': train_acc_history,
'val_acc_history': val_acc_history,
}
def predict(self, X):
"""
Use the trained weights of this two-layer network to predict labels for
data points. For each data point we predict scores for each of the C
classes, and assign each data point to the class with the highest score
Inputs:
- X: A numpy array of shape (N, D) giving N D-dimensional data points
to classify.
Returns:
- y_pred: A numpy array of shape (N,) giving predicted labels for each
of the elements of X. For all i, y_pred[i] = c means that X[i] is
predicted to have class c, where 0 <= c < C.
"""
y_pred = None
#######################################################################
# TODO: Implement this function; it should be VERY simple! #
#######################################################################
y_pred = np.argmax(self.loss(X), axis=1)
#######################################################################
# END OF YOUR CODE #
#######################################################################
return y_pred | [
"numpy.random.choice",
"numpy.max",
"numpy.exp",
"numpy.sum",
"numpy.zeros",
"numpy.maximum",
"numpy.random.randn",
"numpy.arange"
] | [((1558, 1579), 'numpy.zeros', 'np.zeros', (['hidden_size'], {}), '(hidden_size)\n', (1566, 1579), True, 'import numpy as np\n'), ((1684, 1705), 'numpy.zeros', 'np.zeros', (['output_size'], {}), '(output_size)\n', (1692, 1705), True, 'import numpy as np\n'), ((3464, 3486), 'numpy.maximum', 'np.maximum', (['(0)', 'scores1'], {}), '(0, scores1)\n', (3474, 3486), True, 'import numpy as np\n'), ((3914, 3928), 'numpy.max', 'np.max', (['scores'], {}), '(scores)\n', (3920, 3928), True, 'import numpy as np\n'), ((3976, 3990), 'numpy.exp', 'np.exp', (['scores'], {}), '(scores)\n', (3982, 3990), True, 'import numpy as np\n'), ((4746, 4767), 'numpy.sum', 'np.sum', (['correct_probs'], {}), '(correct_probs)\n', (4752, 4767), True, 'import numpy as np\n'), ((6231, 6255), 'numpy.sum', 'np.sum', (['d_scores'], {'axis': '(0)'}), '(d_scores, axis=0)\n', (6237, 6255), True, 'import numpy as np\n'), ((1489, 1529), 'numpy.random.randn', 'np.random.randn', (['input_size', 'hidden_size'], {}), '(input_size, hidden_size)\n', (1504, 1529), True, 'import numpy as np\n'), ((1614, 1655), 'numpy.random.randn', 'np.random.randn', (['hidden_size', 'output_size'], {}), '(hidden_size, output_size)\n', (1629, 1655), True, 'import numpy as np\n'), ((4020, 4061), 'numpy.sum', 'np.sum', (['scores_exp'], {'axis': '(1)', 'keepdims': '(True)'}), '(scores_exp, axis=1, keepdims=True)\n', (4026, 4061), True, 'import numpy as np\n'), ((8825, 8878), 'numpy.random.choice', 'np.random.choice', (['num_train', 'batch_size'], {'replace': '(True)'}), '(num_train, batch_size, replace=True)\n', (8841, 8878), True, 'import numpy as np\n'), ((4851, 4866), 'numpy.sum', 'np.sum', (['(W1 * W1)'], {}), '(W1 * W1)\n', (4857, 4866), True, 'import numpy as np\n'), ((4869, 4884), 'numpy.sum', 'np.sum', (['(W2 * W2)'], {}), '(W2 * W2)\n', (4875, 4884), True, 'import numpy as np\n'), ((5864, 5876), 'numpy.arange', 'np.arange', (['N'], {}), '(N)\n', (5873, 5876), True, 'import numpy as np\n'), ((4637, 4649), 'numpy.arange', 'np.arange', (['N'], {}), '(N)\n', (4646, 4649), True, 'import numpy as np\n')] |
from django.test import TestCase
from dynamic_setting.models import Setting
class SettingTestCase(TestCase):
def _create_setting(self, name, **kwargs):
return Setting.objects.create(name=name, **kwargs)
def test_create_setting(self):
""" Test Creating a new Setting. """
name = 'TEST_SETTING'
data = 'Setting Data'
setting = self._create_setting(name, data=data)
self.assertEqual(setting.name, name)
self.assertEqual(setting.__str__(), name)
self.assertEqual(setting.data, data)
def test_create_setting_no_data(self):
""" Test Creating a new setting without Data. """
name = 'TEST_SETTING'
data = '-'
setting = self._create_setting(name)
self.assertEqual(setting.name, name)
self.assertEqual(setting.__str__(), name)
self.assertEqual(setting.data, data)
def test_delete_setting(self):
""" Test Deleting a setting object. """
name = 'TEST_SETTING'
setting = self._create_setting(name)
setting_pk = setting.pk
setting.delete()
try:
Setting.objects.get(pk=setting_pk)
except Setting.DoesNotExist:
pass
else:
self.fail('Setting with ID {} should not exist.'.format(setting_pk))
def test_get_setting(self):
""" Test Getting a setting object. """
name = 'TEST_SETTING'
data = 'Setting data'
setting = self._create_setting(name, data=data)
try:
setting2 = Setting.objects.get(pk=setting.pk)
except Setting.DoesNotExist:
self.fail('Setting with ID {} should exist'.format(setting.pk))
self.assertEqual(setting.name, setting2.name)
self.assertEqual(setting.__str__(), setting2.__str__())
self.assertEqual(setting.data, setting2.data)
self.assertEqual(setting.pk, setting2.pk)
def test_update_setting(self):
""" Test Updating a setting object. """
name = 'TEST_SETTING'
data = 'Setting data'
data2 = 'New Setting Data'
setting = self._create_setting(name, data=data)
setting.data = data2
setting.save()
setting2 = Setting.objects.get(pk=setting.pk)
self.assertEqual(setting2.data, data2) | [
"dynamic_setting.models.Setting.objects.create",
"dynamic_setting.models.Setting.objects.get"
] | [((178, 221), 'dynamic_setting.models.Setting.objects.create', 'Setting.objects.create', ([], {'name': 'name'}), '(name=name, **kwargs)\n', (200, 221), False, 'from dynamic_setting.models import Setting\n'), ((2228, 2262), 'dynamic_setting.models.Setting.objects.get', 'Setting.objects.get', ([], {'pk': 'setting.pk'}), '(pk=setting.pk)\n', (2247, 2262), False, 'from dynamic_setting.models import Setting\n'), ((1136, 1170), 'dynamic_setting.models.Setting.objects.get', 'Setting.objects.get', ([], {'pk': 'setting_pk'}), '(pk=setting_pk)\n', (1155, 1170), False, 'from dynamic_setting.models import Setting\n'), ((1552, 1586), 'dynamic_setting.models.Setting.objects.get', 'Setting.objects.get', ([], {'pk': 'setting.pk'}), '(pk=setting.pk)\n', (1571, 1586), False, 'from dynamic_setting.models import Setting\n')] |
"""Sensor for data from Austrian Zentralanstalt für Meteorologie."""
from __future__ import annotations
import logging
import voluptuous as vol
from homeassistant.components.weather import (
ATTR_WEATHER_HUMIDITY,
ATTR_WEATHER_PRESSURE,
ATTR_WEATHER_TEMPERATURE,
ATTR_WEATHER_WIND_BEARING,
ATTR_WEATHER_WIND_SPEED,
PLATFORM_SCHEMA,
WeatherEntity,
)
from homeassistant.const import CONF_LATITUDE, CONF_LONGITUDE, CONF_NAME, TEMP_CELSIUS
from homeassistant.core import HomeAssistant
from homeassistant.helpers import config_validation as cv
from homeassistant.helpers.entity_platform import AddEntitiesCallback
from homeassistant.helpers.typing import ConfigType, DiscoveryInfoType
# Reuse data and API logic from the sensor implementation
from .sensor import (
ATTRIBUTION,
CONF_STATION_ID,
ZamgData,
closest_station,
zamg_stations,
)
_LOGGER = logging.getLogger(__name__)
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend(
{
vol.Optional(CONF_NAME): cv.string,
vol.Optional(CONF_STATION_ID): cv.string,
vol.Inclusive(
CONF_LATITUDE, "coordinates", "Latitude and longitude must exist together"
): cv.latitude,
vol.Inclusive(
CONF_LONGITUDE, "coordinates", "Latitude and longitude must exist together"
): cv.longitude,
}
)
def setup_platform(
hass: HomeAssistant,
config: ConfigType,
add_entities: AddEntitiesCallback,
discovery_info: DiscoveryInfoType | None = None,
) -> None:
"""Set up the ZAMG weather platform."""
name = config.get(CONF_NAME)
latitude = config.get(CONF_LATITUDE, hass.config.latitude)
longitude = config.get(CONF_LONGITUDE, hass.config.longitude)
station_id = config.get(CONF_STATION_ID) or closest_station(
latitude, longitude, hass.config.config_dir
)
if station_id not in zamg_stations(hass.config.config_dir):
_LOGGER.error(
"Configured ZAMG %s (%s) is not a known station",
CONF_STATION_ID,
station_id,
)
return
probe = ZamgData(station_id=station_id)
try:
probe.update()
except (ValueError, TypeError) as err:
_LOGGER.error("Received error from ZAMG: %s", err)
return
add_entities([ZamgWeather(probe, name)], True)
class ZamgWeather(WeatherEntity):
"""Representation of a weather condition."""
def __init__(self, zamg_data, stationname=None):
"""Initialise the platform with a data instance and station name."""
self.zamg_data = zamg_data
self.stationname = stationname
@property
def name(self):
"""Return the name of the sensor."""
return (
self.stationname
or f"ZAMG {self.zamg_data.data.get('Name') or '(unknown station)'}"
)
@property
def condition(self):
"""Return the current condition."""
return None
@property
def attribution(self):
"""Return the attribution."""
return ATTRIBUTION
@property
def temperature(self):
"""Return the platform temperature."""
return self.zamg_data.get_data(ATTR_WEATHER_TEMPERATURE)
@property
def temperature_unit(self):
"""Return the unit of measurement."""
return TEMP_CELSIUS
@property
def pressure(self):
"""Return the pressure."""
return self.zamg_data.get_data(ATTR_WEATHER_PRESSURE)
@property
def humidity(self):
"""Return the humidity."""
return self.zamg_data.get_data(ATTR_WEATHER_HUMIDITY)
@property
def wind_speed(self):
"""Return the wind speed."""
return self.zamg_data.get_data(ATTR_WEATHER_WIND_SPEED)
@property
def wind_bearing(self):
"""Return the wind bearing."""
return self.zamg_data.get_data(ATTR_WEATHER_WIND_BEARING)
def update(self):
"""Update current conditions."""
self.zamg_data.update()
| [
"logging.getLogger",
"voluptuous.Optional",
"voluptuous.Inclusive"
] | [((897, 924), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (914, 924), False, 'import logging\n'), ((982, 1005), 'voluptuous.Optional', 'vol.Optional', (['CONF_NAME'], {}), '(CONF_NAME)\n', (994, 1005), True, 'import voluptuous as vol\n'), ((1026, 1055), 'voluptuous.Optional', 'vol.Optional', (['CONF_STATION_ID'], {}), '(CONF_STATION_ID)\n', (1038, 1055), True, 'import voluptuous as vol\n'), ((1076, 1169), 'voluptuous.Inclusive', 'vol.Inclusive', (['CONF_LATITUDE', '"""coordinates"""', '"""Latitude and longitude must exist together"""'], {}), "(CONF_LATITUDE, 'coordinates',\n 'Latitude and longitude must exist together')\n", (1089, 1169), True, 'import voluptuous as vol\n'), ((1210, 1304), 'voluptuous.Inclusive', 'vol.Inclusive', (['CONF_LONGITUDE', '"""coordinates"""', '"""Latitude and longitude must exist together"""'], {}), "(CONF_LONGITUDE, 'coordinates',\n 'Latitude and longitude must exist together')\n", (1223, 1304), True, 'import voluptuous as vol\n')] |
import copy
import glob
import hashlib
import logging
import os
import shutil
from subprocess import CalledProcessError, DEVNULL, check_output # skipcq:BAN-B404
import tempfile
import typing
from pathlib import Path
from typing import Any, Text, Tuple, Union, Optional, List, Dict, NamedTuple
from packaging import version
from rasa.constants import MINIMUM_COMPATIBLE_VERSION
import rasa.shared.utils.io
import rasa.utils.io
from rasa.cli.utils import create_output_path
from rasa.shared.utils.cli import print_success
from rasa.shared.constants import (
CONFIG_KEYS_CORE,
CONFIG_KEYS_NLU,
CONFIG_KEYS,
DEFAULT_DOMAIN_PATH,
DEFAULT_MODELS_PATH,
DEFAULT_CORE_SUBDIRECTORY_NAME,
DEFAULT_NLU_SUBDIRECTORY_NAME,
)
from rasa.exceptions import ModelNotFound
from rasa.utils.common import TempDirectoryPath
if typing.TYPE_CHECKING:
from rasa.shared.importers.importer import TrainingDataImporter
logger = logging.getLogger(__name__)
# Type alias for the fingerprint
Fingerprint = Dict[Text, Union[Text, List[Text], int, float]]
FINGERPRINT_FILE_PATH = "fingerprint.json"
FINGERPRINT_CONFIG_KEY = "config"
FINGERPRINT_CONFIG_CORE_KEY = "core-config"
FINGERPRINT_CONFIG_NLU_KEY = "nlu-config"
FINGERPRINT_CONFIG_WITHOUT_EPOCHS_KEY = "config-without-epochs"
FINGERPRINT_DOMAIN_WITHOUT_NLG_KEY = "domain"
FINGERPRINT_NLG_KEY = "nlg"
FINGERPRINT_RASA_VERSION_KEY = "version"
FINGERPRINT_STORIES_KEY = "stories"
FINGERPRINT_NLU_DATA_KEY = "messages"
FINGERPRINT_NLU_LABELS_KEY = "nlu_labels"
FINGERPRINT_PROJECT = "project"
FINGERPRINT_TRAINED_AT_KEY = "trained_at"
class Section(NamedTuple):
"""Specifies which fingerprint keys decide whether this sub-model is retrained."""
name: Text
relevant_keys: List[Text]
SECTION_CORE = Section(
name="Core model",
relevant_keys=[
FINGERPRINT_CONFIG_KEY,
FINGERPRINT_CONFIG_CORE_KEY,
FINGERPRINT_DOMAIN_WITHOUT_NLG_KEY,
FINGERPRINT_STORIES_KEY,
FINGERPRINT_RASA_VERSION_KEY,
],
)
SECTION_NLU = Section(
name="NLU model",
relevant_keys=[
FINGERPRINT_CONFIG_KEY,
FINGERPRINT_CONFIG_NLU_KEY,
FINGERPRINT_NLU_DATA_KEY,
FINGERPRINT_RASA_VERSION_KEY,
],
)
SECTION_NLG = Section(name="NLG responses", relevant_keys=[FINGERPRINT_NLG_KEY])
class FingerprintComparisonResult:
"""Container for the results of a fingerprint comparison."""
def __init__(
self,
nlu: bool = True,
core: bool = True,
nlg: bool = True,
force_training: bool = False,
):
"""Creates a `FingerprintComparisonResult` instance.
Args:
nlu: `True` if the NLU model should be retrained.
core: `True` if the Core model should be retrained.
nlg: `True` if the responses in the domain should be updated.
force_training: `True` if a training of all parts is forced.
"""
self.nlu = nlu
self.core = core
self.nlg = nlg
self.force_training = force_training
def is_training_required(self) -> bool:
"""Check if anything has to be retrained."""
return any([self.nlg, self.nlu, self.core, self.force_training])
def should_retrain_core(self) -> bool:
"""Check if the Core model has to be updated."""
return self.force_training or self.core
def should_retrain_nlg(self) -> bool:
"""Check if the responses have to be updated."""
return self.should_retrain_core() or self.nlg
def should_retrain_nlu(self) -> bool:
"""Check if the NLU model has to be updated."""
return self.force_training or self.nlu
def get_model(model_path: Text = DEFAULT_MODELS_PATH) -> TempDirectoryPath:
"""Get a model and unpack it. Raises a `ModelNotFound` exception if
no model could be found at the provided path.
Args:
model_path: Path to the zipped model. If it's a directory, the latest
trained model is returned.
Returns:
Path to the unpacked model.
"""
if not model_path:
raise ModelNotFound("No path specified.")
elif not os.path.exists(model_path):
raise ModelNotFound(f"No file or directory at '{model_path}'.")
if os.path.isdir(model_path):
model_path = get_latest_model(model_path)
if not model_path:
raise ModelNotFound(
f"Could not find any Rasa model files in '{model_path}'."
)
elif not model_path.endswith(".tar.gz"):
raise ModelNotFound(f"Path '{model_path}' does not point to a Rasa model file.")
try:
model_relative_path = os.path.relpath(model_path)
except ValueError:
model_relative_path = model_path
logger.info(f"Loading model {model_relative_path}...")
return unpack_model(model_path)
def get_latest_model(model_path: Text = DEFAULT_MODELS_PATH) -> Optional[Text]:
"""Get the latest model from a path.
Args:
model_path: Path to a directory containing zipped models.
Returns:
Path to latest model in the given directory.
"""
if not os.path.exists(model_path) or os.path.isfile(model_path):
model_path = os.path.dirname(model_path)
list_of_files = glob.glob(os.path.join(model_path, "*.tar.gz"))
if len(list_of_files) == 0:
return None
return max(list_of_files, key=os.path.getctime)
def unpack_model(
model_file: Text, working_directory: Optional[Union[Path, Text]] = None
) -> TempDirectoryPath:
"""Unpack a zipped Rasa model.
Args:
model_file: Path to zipped model.
working_directory: Location where the model should be unpacked to.
If `None` a temporary directory will be created.
Returns:
Path to unpacked Rasa model.
"""
import tarfile
if working_directory is None:
working_directory = tempfile.mkdtemp()
# All files are in a subdirectory.
try:
with tarfile.open(model_file, mode="r:gz") as tar:
tar.extractall(working_directory)
logger.debug(f"Extracted model to '{working_directory}'.")
except Exception as e:
logger.error(f"Failed to extract model at {model_file}. Error: {e}")
raise
return TempDirectoryPath(working_directory)
def get_model_subdirectories(
unpacked_model_path: Text,
) -> Tuple[Optional[Text], Optional[Text]]:
"""Return paths for Core and NLU model directories, if they exist.
If neither directories exist, a `ModelNotFound` exception is raised.
Args:
unpacked_model_path: Path to unpacked Rasa model.
Returns:
Tuple (path to Core subdirectory if it exists or `None` otherwise,
path to NLU subdirectory if it exists or `None` otherwise).
"""
core_path = os.path.join(unpacked_model_path, DEFAULT_CORE_SUBDIRECTORY_NAME)
nlu_path = os.path.join(unpacked_model_path, DEFAULT_NLU_SUBDIRECTORY_NAME)
if not os.path.isdir(core_path):
core_path = None
if not os.path.isdir(nlu_path):
nlu_path = None
if not core_path and not nlu_path:
raise ModelNotFound(
"No NLU or Core data for unpacked model at: '{}'.".format(
unpacked_model_path
)
)
return core_path, nlu_path
def create_package_rasa(
training_directory: Text,
output_filename: Text,
fingerprint: Optional[Fingerprint] = None,
) -> Text:
"""Create a zipped Rasa model from trained model files.
Args:
training_directory: Path to the directory which contains the trained
model files.
output_filename: Name of the zipped model file to be created.
fingerprint: A unique fingerprint to identify the model version.
Returns:
Path to zipped model.
"""
import tarfile
if fingerprint:
persist_fingerprint(training_directory, fingerprint)
output_directory = os.path.dirname(output_filename)
if not os.path.exists(output_directory):
os.makedirs(output_directory)
with tarfile.open(output_filename, "w:gz") as tar:
for elem in os.scandir(training_directory):
tar.add(elem.path, arcname=elem.name)
shutil.rmtree(training_directory)
return output_filename
def project_fingerprint() -> Optional[Text]:
"""Create a hash for the project in the current working directory.
Returns:
project hash
"""
try:
remote = check_output( # skipcq:BAN-B607,BAN-B603
["git", "remote", "get-url", "origin"], stderr=DEVNULL
)
return hashlib.sha256(remote).hexdigest()
except (CalledProcessError, OSError):
return None
async def model_fingerprint(file_importer: "TrainingDataImporter") -> Fingerprint:
"""Create a model fingerprint from its used configuration and training data.
Args:
file_importer: File importer which provides the training data and model config.
Returns:
The fingerprint.
"""
import time
config = await file_importer.get_config()
domain = await file_importer.get_domain()
stories = await file_importer.get_stories()
nlu_data = await file_importer.get_nlu_data()
responses = domain.responses
# Do a copy of the domain to not change the actual domain (shallow is enough)
domain = copy.copy(domain)
# don't include the response texts in the fingerprint.
# Their fingerprint is separate.
domain.responses = {}
return {
FINGERPRINT_CONFIG_KEY: _get_fingerprint_of_config(
config, exclude_keys=CONFIG_KEYS
),
FINGERPRINT_CONFIG_CORE_KEY: _get_fingerprint_of_config(
config, include_keys=CONFIG_KEYS_CORE
),
FINGERPRINT_CONFIG_NLU_KEY: _get_fingerprint_of_config(
config, include_keys=CONFIG_KEYS_NLU
),
FINGERPRINT_CONFIG_WITHOUT_EPOCHS_KEY: _get_fingerprint_of_config_without_epochs(
config
),
FINGERPRINT_DOMAIN_WITHOUT_NLG_KEY: domain.fingerprint(),
FINGERPRINT_NLG_KEY: rasa.shared.utils.io.deep_container_fingerprint(responses),
FINGERPRINT_PROJECT: project_fingerprint(),
FINGERPRINT_NLU_DATA_KEY: nlu_data.fingerprint(),
FINGERPRINT_NLU_LABELS_KEY: nlu_data.label_fingerprint(),
FINGERPRINT_STORIES_KEY: stories.fingerprint(),
FINGERPRINT_TRAINED_AT_KEY: time.time(),
FINGERPRINT_RASA_VERSION_KEY: rasa.__version__,
}
def _get_fingerprint_of_config(
config: Optional[Dict[Text, Any]],
include_keys: Optional[List[Text]] = None,
exclude_keys: Optional[List[Text]] = None,
) -> Text:
if not config:
return ""
keys = include_keys or list(filter(lambda k: k not in exclude_keys, config.keys()))
sub_config = {k: config[k] for k in keys if k in config}
return rasa.shared.utils.io.deep_container_fingerprint(sub_config)
def _get_fingerprint_of_config_without_epochs(
config: Optional[Dict[Text, Any]],
) -> Text:
if not config:
return ""
copied_config = copy.deepcopy(config)
for key in ["pipeline", "policies"]:
if copied_config.get(key):
for p in copied_config[key]:
if "epochs" in p:
del p["epochs"]
return rasa.shared.utils.io.deep_container_fingerprint(copied_config)
def fingerprint_from_path(model_path: Text) -> Fingerprint:
"""Load a persisted fingerprint.
Args:
model_path: Path to directory containing the fingerprint.
Returns:
The fingerprint or an empty dict if no fingerprint was found.
"""
if not model_path or not os.path.exists(model_path):
return {}
fingerprint_path = os.path.join(model_path, FINGERPRINT_FILE_PATH)
if os.path.isfile(fingerprint_path):
return rasa.shared.utils.io.read_json_file(fingerprint_path)
else:
return {}
def persist_fingerprint(output_path: Text, fingerprint: Fingerprint):
"""Persist a model fingerprint.
Args:
output_path: Directory in which the fingerprint should be saved.
fingerprint: The fingerprint to be persisted.
"""
path = os.path.join(output_path, FINGERPRINT_FILE_PATH)
rasa.shared.utils.io.dump_obj_as_json_to_file(path, fingerprint)
def did_section_fingerprint_change(
fingerprint1: Fingerprint, fingerprint2: Fingerprint, section: Section
) -> bool:
"""Check whether the fingerprint of a section has changed."""
for k in section.relevant_keys:
if fingerprint1.get(k) != fingerprint2.get(k):
logger.info(f"Data ({k}) for {section.name} section changed.")
return True
return False
def move_model(source: Text, target: Text) -> bool:
"""Move two model directories.
Args:
source: The original folder which should be merged in another.
target: The destination folder where it should be moved to.
Returns:
`True` if the merge was successful, else `False`.
"""
try:
shutil.move(source, target)
return True
except Exception as e:
logging.debug(f"Could not merge model: {e}")
return False
def should_retrain(
new_fingerprint: Fingerprint,
old_model: Text,
train_path: Text,
has_e2e_examples: bool = False,
force_training: bool = False,
) -> FingerprintComparisonResult:
"""Check which components of a model should be retrained.
Args:
new_fingerprint: The fingerprint of the new model to be trained.
old_model: Path to the old zipped model file.
train_path: Path to the directory in which the new model will be trained.
has_e2e_examples: Whether the new training data contains e2e examples.
force_training: Indicates if the model needs to be retrained even if the data
has not changed.
Returns:
A FingerprintComparisonResult object indicating whether Rasa Core and/or Rasa
NLU needs to be retrained or not.
"""
fingerprint_comparison = FingerprintComparisonResult()
if old_model is None or not os.path.exists(old_model):
return fingerprint_comparison
with unpack_model(old_model) as unpacked:
last_fingerprint = fingerprint_from_path(unpacked)
old_core, old_nlu = get_model_subdirectories(unpacked)
fingerprint_comparison = FingerprintComparisonResult(
core=did_section_fingerprint_change(
last_fingerprint, new_fingerprint, SECTION_CORE
),
nlu=did_section_fingerprint_change(
last_fingerprint, new_fingerprint, SECTION_NLU
),
nlg=did_section_fingerprint_change(
last_fingerprint, new_fingerprint, SECTION_NLG
),
force_training=force_training,
)
# We should retrain core if nlu data changes and there are e2e stories.
if has_e2e_examples and fingerprint_comparison.should_retrain_nlu():
fingerprint_comparison.core = True
core_merge_failed = False
if not fingerprint_comparison.should_retrain_core():
target_path = os.path.join(train_path, DEFAULT_CORE_SUBDIRECTORY_NAME)
core_merge_failed = not move_model(old_core, target_path)
fingerprint_comparison.core = core_merge_failed
if not fingerprint_comparison.should_retrain_nlg() and core_merge_failed:
# If moving the Core model failed, we should also retrain NLG
fingerprint_comparison.nlg = True
if not fingerprint_comparison.should_retrain_nlu():
target_path = os.path.join(train_path, "nlu")
fingerprint_comparison.nlu = not move_model(old_nlu, target_path)
return fingerprint_comparison
def can_finetune(
last_fingerprint: Fingerprint,
new_fingerprint: Fingerprint,
core: bool = False,
nlu: bool = False,
) -> bool:
"""Checks if components of a model can be finetuned with incremental training.
Args:
last_fingerprint: The fingerprint of the old model to potentially be fine-tuned.
new_fingerprint: The fingerprint of the new model.
core: Check sections for finetuning a core model.
nlu: Check sections for finetuning an nlu model.
Returns:
`True` if the old model can be finetuned, `False` otherwise.
"""
section_keys = [
FINGERPRINT_CONFIG_WITHOUT_EPOCHS_KEY,
]
if core:
section_keys.append(FINGERPRINT_DOMAIN_WITHOUT_NLG_KEY)
if nlu:
section_keys.append(FINGERPRINT_NLU_LABELS_KEY)
fingerprint_changed = did_section_fingerprint_change(
last_fingerprint,
new_fingerprint,
Section(name="finetune", relevant_keys=section_keys),
)
old_model_above_min_version = version.parse(
last_fingerprint.get(FINGERPRINT_RASA_VERSION_KEY)
) >= version.parse(MINIMUM_COMPATIBLE_VERSION)
return old_model_above_min_version and not fingerprint_changed
def package_model(
fingerprint: Fingerprint,
output_directory: Text,
train_path: Text,
fixed_model_name: Optional[Text] = None,
model_prefix: Text = "",
) -> Text:
"""
Compress a trained model.
Args:
fingerprint: fingerprint of the model
output_directory: path to the directory in which the model should be stored
train_path: path to uncompressed model
fixed_model_name: name of the compressed model file
model_prefix: prefix of the compressed model file
Returns: path to 'tar.gz' model file
"""
output_directory = create_output_path(
output_directory, prefix=model_prefix, fixed_name=fixed_model_name
)
create_package_rasa(train_path, output_directory, fingerprint)
print_success(
"Your Rasa model is trained and saved at '{}'.".format(
os.path.abspath(output_directory)
)
)
return output_directory
async def update_model_with_new_domain(
importer: "TrainingDataImporter", unpacked_model_path: Union[Path, Text]
) -> None:
"""Overwrites the domain of an unpacked model with a new domain.
Args:
importer: Importer which provides the new domain.
unpacked_model_path: Path to the unpacked model.
"""
model_path = Path(unpacked_model_path) / DEFAULT_CORE_SUBDIRECTORY_NAME
domain = await importer.get_domain()
domain.persist(model_path / DEFAULT_DOMAIN_PATH)
def get_model_for_finetuning(
previous_model_file: Optional[Union[Path, Text]]
) -> Optional[Text]:
"""Gets validated path for model to finetune.
Args:
previous_model_file: Path to model file which should be used for finetuning or
a directory in case the latest trained model should be used.
Returns:
Path to model archive. `None` if there is no model.
"""
if Path(previous_model_file).is_dir():
logger.debug(
f"Trying to load latest model from '{previous_model_file}' for "
f"finetuning."
)
return get_latest_model(previous_model_file)
if Path(previous_model_file).is_file():
return previous_model_file
logger.debug(
"No valid model for finetuning found as directory either "
"contains no model or model file cannot be found."
)
return None
| [
"logging.getLogger",
"tarfile.open",
"logging.debug",
"rasa.utils.common.TempDirectoryPath",
"copy.deepcopy",
"copy.copy",
"rasa.exceptions.ModelNotFound",
"os.path.exists",
"shutil.move",
"pathlib.Path",
"rasa.cli.utils.create_output_path",
"os.path.isdir",
"packaging.version.parse",
"os.path.relpath",
"subprocess.check_output",
"hashlib.sha256",
"os.scandir",
"os.path.isfile",
"os.path.dirname",
"tempfile.mkdtemp",
"time.time",
"os.makedirs",
"os.path.join",
"shutil.rmtree",
"os.path.abspath"
] | [((937, 964), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (954, 964), False, 'import logging\n'), ((4264, 4289), 'os.path.isdir', 'os.path.isdir', (['model_path'], {}), '(model_path)\n', (4277, 4289), False, 'import os\n'), ((6295, 6331), 'rasa.utils.common.TempDirectoryPath', 'TempDirectoryPath', (['working_directory'], {}), '(working_directory)\n', (6312, 6331), False, 'from rasa.utils.common import TempDirectoryPath\n'), ((6841, 6906), 'os.path.join', 'os.path.join', (['unpacked_model_path', 'DEFAULT_CORE_SUBDIRECTORY_NAME'], {}), '(unpacked_model_path, DEFAULT_CORE_SUBDIRECTORY_NAME)\n', (6853, 6906), False, 'import os\n'), ((6922, 6986), 'os.path.join', 'os.path.join', (['unpacked_model_path', 'DEFAULT_NLU_SUBDIRECTORY_NAME'], {}), '(unpacked_model_path, DEFAULT_NLU_SUBDIRECTORY_NAME)\n', (6934, 6986), False, 'import os\n'), ((7995, 8027), 'os.path.dirname', 'os.path.dirname', (['output_filename'], {}), '(output_filename)\n', (8010, 8027), False, 'import os\n'), ((8274, 8307), 'shutil.rmtree', 'shutil.rmtree', (['training_directory'], {}), '(training_directory)\n', (8287, 8307), False, 'import shutil\n'), ((9403, 9420), 'copy.copy', 'copy.copy', (['domain'], {}), '(domain)\n', (9412, 9420), False, 'import copy\n'), ((11136, 11157), 'copy.deepcopy', 'copy.deepcopy', (['config'], {}), '(config)\n', (11149, 11157), False, 'import copy\n'), ((11788, 11835), 'os.path.join', 'os.path.join', (['model_path', 'FINGERPRINT_FILE_PATH'], {}), '(model_path, FINGERPRINT_FILE_PATH)\n', (11800, 11835), False, 'import os\n'), ((11844, 11876), 'os.path.isfile', 'os.path.isfile', (['fingerprint_path'], {}), '(fingerprint_path)\n', (11858, 11876), False, 'import os\n'), ((12242, 12290), 'os.path.join', 'os.path.join', (['output_path', 'FINGERPRINT_FILE_PATH'], {}), '(output_path, FINGERPRINT_FILE_PATH)\n', (12254, 12290), False, 'import os\n'), ((17669, 17760), 'rasa.cli.utils.create_output_path', 'create_output_path', (['output_directory'], {'prefix': 'model_prefix', 'fixed_name': 'fixed_model_name'}), '(output_directory, prefix=model_prefix, fixed_name=\n fixed_model_name)\n', (17687, 17760), False, 'from rasa.cli.utils import create_output_path\n'), ((4107, 4142), 'rasa.exceptions.ModelNotFound', 'ModelNotFound', (['"""No path specified."""'], {}), "('No path specified.')\n", (4120, 4142), False, 'from rasa.exceptions import ModelNotFound\n'), ((4663, 4690), 'os.path.relpath', 'os.path.relpath', (['model_path'], {}), '(model_path)\n', (4678, 4690), False, 'import os\n'), ((5168, 5194), 'os.path.isfile', 'os.path.isfile', (['model_path'], {}), '(model_path)\n', (5182, 5194), False, 'import os\n'), ((5217, 5244), 'os.path.dirname', 'os.path.dirname', (['model_path'], {}), '(model_path)\n', (5232, 5244), False, 'import os\n'), ((5276, 5312), 'os.path.join', 'os.path.join', (['model_path', '"""*.tar.gz"""'], {}), "(model_path, '*.tar.gz')\n", (5288, 5312), False, 'import os\n'), ((5921, 5939), 'tempfile.mkdtemp', 'tempfile.mkdtemp', ([], {}), '()\n', (5937, 5939), False, 'import tempfile\n'), ((6999, 7023), 'os.path.isdir', 'os.path.isdir', (['core_path'], {}), '(core_path)\n', (7012, 7023), False, 'import os\n'), ((7062, 7085), 'os.path.isdir', 'os.path.isdir', (['nlu_path'], {}), '(nlu_path)\n', (7075, 7085), False, 'import os\n'), ((8039, 8071), 'os.path.exists', 'os.path.exists', (['output_directory'], {}), '(output_directory)\n', (8053, 8071), False, 'import os\n'), ((8081, 8110), 'os.makedirs', 'os.makedirs', (['output_directory'], {}), '(output_directory)\n', (8092, 8110), False, 'import os\n'), ((8121, 8158), 'tarfile.open', 'tarfile.open', (['output_filename', '"""w:gz"""'], {}), "(output_filename, 'w:gz')\n", (8133, 8158), False, 'import tarfile\n'), ((8187, 8217), 'os.scandir', 'os.scandir', (['training_directory'], {}), '(training_directory)\n', (8197, 8217), False, 'import os\n'), ((8522, 8590), 'subprocess.check_output', 'check_output', (["['git', 'remote', 'get-url', 'origin']"], {'stderr': 'DEVNULL'}), "(['git', 'remote', 'get-url', 'origin'], stderr=DEVNULL)\n", (8534, 8590), False, 'from subprocess import CalledProcessError, DEVNULL, check_output\n'), ((10466, 10477), 'time.time', 'time.time', ([], {}), '()\n', (10475, 10477), False, 'import time\n'), ((13094, 13121), 'shutil.move', 'shutil.move', (['source', 'target'], {}), '(source, target)\n', (13105, 13121), False, 'import shutil\n'), ((16957, 16998), 'packaging.version.parse', 'version.parse', (['MINIMUM_COMPATIBLE_VERSION'], {}), '(MINIMUM_COMPATIBLE_VERSION)\n', (16970, 16998), False, 'from packaging import version\n'), ((18362, 18387), 'pathlib.Path', 'Path', (['unpacked_model_path'], {}), '(unpacked_model_path)\n', (18366, 18387), False, 'from pathlib import Path\n'), ((4156, 4182), 'os.path.exists', 'os.path.exists', (['model_path'], {}), '(model_path)\n', (4170, 4182), False, 'import os\n'), ((4198, 4255), 'rasa.exceptions.ModelNotFound', 'ModelNotFound', (['f"""No file or directory at \'{model_path}\'."""'], {}), '(f"No file or directory at \'{model_path}\'.")\n', (4211, 4255), False, 'from rasa.exceptions import ModelNotFound\n'), ((4386, 4458), 'rasa.exceptions.ModelNotFound', 'ModelNotFound', (['f"""Could not find any Rasa model files in \'{model_path}\'."""'], {}), '(f"Could not find any Rasa model files in \'{model_path}\'.")\n', (4399, 4458), False, 'from rasa.exceptions import ModelNotFound\n'), ((4548, 4622), 'rasa.exceptions.ModelNotFound', 'ModelNotFound', (['f"""Path \'{model_path}\' does not point to a Rasa model file."""'], {}), '(f"Path \'{model_path}\' does not point to a Rasa model file.")\n', (4561, 4622), False, 'from rasa.exceptions import ModelNotFound\n'), ((5138, 5164), 'os.path.exists', 'os.path.exists', (['model_path'], {}), '(model_path)\n', (5152, 5164), False, 'import os\n'), ((6002, 6039), 'tarfile.open', 'tarfile.open', (['model_file'], {'mode': '"""r:gz"""'}), "(model_file, mode='r:gz')\n", (6014, 6039), False, 'import tarfile\n'), ((11718, 11744), 'os.path.exists', 'os.path.exists', (['model_path'], {}), '(model_path)\n', (11732, 11744), False, 'import os\n'), ((13177, 13221), 'logging.debug', 'logging.debug', (['f"""Could not merge model: {e}"""'], {}), "(f'Could not merge model: {e}')\n", (13190, 13221), False, 'import logging\n'), ((14164, 14189), 'os.path.exists', 'os.path.exists', (['old_model'], {}), '(old_model)\n', (14178, 14189), False, 'import os\n'), ((15221, 15277), 'os.path.join', 'os.path.join', (['train_path', 'DEFAULT_CORE_SUBDIRECTORY_NAME'], {}), '(train_path, DEFAULT_CORE_SUBDIRECTORY_NAME)\n', (15233, 15277), False, 'import os\n'), ((15698, 15729), 'os.path.join', 'os.path.join', (['train_path', '"""nlu"""'], {}), "(train_path, 'nlu')\n", (15710, 15729), False, 'import os\n'), ((17933, 17966), 'os.path.abspath', 'os.path.abspath', (['output_directory'], {}), '(output_directory)\n', (17948, 17966), False, 'import os\n'), ((18931, 18956), 'pathlib.Path', 'Path', (['previous_model_file'], {}), '(previous_model_file)\n', (18935, 18956), False, 'from pathlib import Path\n'), ((19164, 19189), 'pathlib.Path', 'Path', (['previous_model_file'], {}), '(previous_model_file)\n', (19168, 19189), False, 'from pathlib import Path\n'), ((8656, 8678), 'hashlib.sha256', 'hashlib.sha256', (['remote'], {}), '(remote)\n', (8670, 8678), False, 'import hashlib\n')] |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Fri Jul 16 11:20:01 2021
@author: q
GOAL : develop a backtester from a .py framework / library
# installation :
pip install backtesting
# Documentation
Index :
- Manuals
- Tutorials
- Example Strategies
- FAQ
- License
- API Reference Documentation
source : https://kernc.github.io/backtesting.py/doc/backtesting/
# Features
* Simple, well-documented API
* Blazing fast execution
* Built-in optimizer
* Library of composable base strategies and utilities
* Indicator-library-agnostic
* Supports any financial instrument with candlestick data
* Detailed results
* Interactive visualizations
"""
# =============================================================================
# imports and settings
# =============================================================================
# data handling
import pandas as pd
import numpy as np
# import backtesting and set options
import backtesting
# Set notebook False
backtesting.set_bokeh_output(notebook=False)
from backtesting import Backtest, Strategy
from backtesting.lib import crossover, cross
from backtesting.test import SMA, GOOG
# =============================================================================
# strategy definition
# =============================================================================
class PriceAboveSMA(Strategy):
_ma_period = 21 # Moving Average
def init(self):
# compute momentum
""" Simple Moving Average Calc"""
self.sma = self.I(SMA, self.data.Close, self._ma_period)
def next(self):
price = self.data.Close[-1]
if not self.position and price > self.sma[-1]:
# market entry
self.buy()
elif self.position and price < self.sma[-1]:
# market exit
self.position.close()
# =============================================================================
# Program Execution
# =============================================================================
if __name__ == '__main__':
""" Instantiate the Backtester """
backtester = Backtest(GOOG, PriceAboveSMA, commission=.002,
exclusive_orders=True, cash = 10000)
PLOT = True
""" Run a Single Backtest """
stats = backtester.run()
print(stats)
if PLOT: backtester.plot()
| [
"backtesting.set_bokeh_output",
"backtesting.Backtest"
] | [((1079, 1123), 'backtesting.set_bokeh_output', 'backtesting.set_bokeh_output', ([], {'notebook': '(False)'}), '(notebook=False)\n', (1107, 1123), False, 'import backtesting\n'), ((2231, 2318), 'backtesting.Backtest', 'Backtest', (['GOOG', 'PriceAboveSMA'], {'commission': '(0.002)', 'exclusive_orders': '(True)', 'cash': '(10000)'}), '(GOOG, PriceAboveSMA, commission=0.002, exclusive_orders=True, cash\n =10000)\n', (2239, 2318), False, 'from backtesting import Backtest, Strategy\n')] |
# Copyright (c) 2012 <NAME> y otros.
# Permission is hereby granted, free of charge, to any
# person obtaining a copy of this software and associated
# documentation files (the "Software"), to deal in the
# Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the
# Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice
# shall be included in all copies or substantial portions of
# the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
# PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS
# OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
# OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
# OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
# SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
from __future__ import unicode_literals, print_function
import os
from pygments import highlight
from pygments.lexers import get_lexer_for_filename, TextLexer
from pygments.formatters import HtmlFormatter
from nikola.plugin_categories import Task
from nikola import utils
class Listings(Task):
"""Render pretty listings."""
name = "render_listings"
def gen_tasks(self):
"""Render pretty code listings."""
kw = {
"default_lang": self.site.config["DEFAULT_LANG"],
"listings_folder": self.site.config["LISTINGS_FOLDER"],
"output_folder": self.site.config["OUTPUT_FOLDER"],
"index_file": self.site.config["INDEX_FILE"],
}
# Things to ignore in listings
ignored_extensions = (".pyc",)
def render_listing(in_name, out_name, folders=[], files=[]):
if in_name:
with open(in_name, 'r') as fd:
try:
lexer = get_lexer_for_filename(in_name)
except:
lexer = TextLexer()
code = highlight(fd.read(), lexer,
HtmlFormatter(cssclass='code',
linenos="table", nowrap=False,
lineanchors=utils.slugify(f),
anchorlinenos=True))
title = os.path.basename(in_name)
else:
code = ''
title = ''
crumbs = utils.get_crumbs(os.path.relpath(out_name,
kw['output_folder']),
is_file=True)
context = {
'code': code,
'title': title,
'crumbs': crumbs,
'lang': kw['default_lang'],
'folders': folders,
'files': files,
'description': title,
}
self.site.render_template('listing.tmpl', out_name,
context)
flag = True
template_deps = self.site.template_system.template_deps('listing.tmpl')
for root, dirs, files in os.walk(kw['listings_folder']):
flag = False
# Render all files
out_name = os.path.join(
kw['output_folder'],
root, kw['index_file']
)
yield {
'basename': self.name,
'name': out_name,
'file_dep': template_deps,
'targets': [out_name],
'actions': [(render_listing, [None, out_name, dirs, files])],
# This is necessary to reflect changes in blog title,
# sidebar links, etc.
'uptodate': [utils.config_changed(
self.site.config['GLOBAL_CONTEXT'])],
'clean': True,
}
for f in files:
ext = os.path.splitext(f)[-1]
if ext in ignored_extensions:
continue
in_name = os.path.join(root, f)
out_name = os.path.join(
kw['output_folder'],
root,
f) + '.html'
yield {
'basename': self.name,
'name': out_name,
'file_dep': template_deps + [in_name],
'targets': [out_name],
'actions': [(render_listing, [in_name, out_name])],
# This is necessary to reflect changes in blog title,
# sidebar links, etc.
'uptodate': [utils.config_changed(
self.site.config['GLOBAL_CONTEXT'])],
'clean': True,
}
if flag:
yield {
'basename': self.name,
'actions': [],
}
| [
"pygments.lexers.get_lexer_for_filename",
"os.path.splitext",
"os.path.join",
"nikola.utils.slugify",
"nikola.utils.config_changed",
"os.path.basename",
"pygments.lexers.TextLexer",
"os.walk",
"os.path.relpath"
] | [((3391, 3421), 'os.walk', 'os.walk', (["kw['listings_folder']"], {}), "(kw['listings_folder'])\n", (3398, 3421), False, 'import os\n'), ((3502, 3559), 'os.path.join', 'os.path.join', (["kw['output_folder']", 'root', "kw['index_file']"], {}), "(kw['output_folder'], root, kw['index_file'])\n", (3514, 3559), False, 'import os\n'), ((2574, 2599), 'os.path.basename', 'os.path.basename', (['in_name'], {}), '(in_name)\n', (2590, 2599), False, 'import os\n'), ((2709, 2755), 'os.path.relpath', 'os.path.relpath', (['out_name', "kw['output_folder']"], {}), "(out_name, kw['output_folder'])\n", (2724, 2755), False, 'import os\n'), ((4296, 4317), 'os.path.join', 'os.path.join', (['root', 'f'], {}), '(root, f)\n', (4308, 4317), False, 'import os\n'), ((4171, 4190), 'os.path.splitext', 'os.path.splitext', (['f'], {}), '(f)\n', (4187, 4190), False, 'import os\n'), ((4345, 4387), 'os.path.join', 'os.path.join', (["kw['output_folder']", 'root', 'f'], {}), "(kw['output_folder'], root, f)\n", (4357, 4387), False, 'import os\n'), ((2088, 2119), 'pygments.lexers.get_lexer_for_filename', 'get_lexer_for_filename', (['in_name'], {}), '(in_name)\n', (2110, 2119), False, 'from pygments.lexers import get_lexer_for_filename, TextLexer\n'), ((3996, 4052), 'nikola.utils.config_changed', 'utils.config_changed', (["self.site.config['GLOBAL_CONTEXT']"], {}), "(self.site.config['GLOBAL_CONTEXT'])\n", (4016, 4052), False, 'from nikola import utils\n'), ((2180, 2191), 'pygments.lexers.TextLexer', 'TextLexer', ([], {}), '()\n', (2189, 2191), False, 'from pygments.lexers import get_lexer_for_filename, TextLexer\n'), ((4887, 4943), 'nikola.utils.config_changed', 'utils.config_changed', (["self.site.config['GLOBAL_CONTEXT']"], {}), "(self.site.config['GLOBAL_CONTEXT'])\n", (4907, 4943), False, 'from nikola import utils\n'), ((2460, 2476), 'nikola.utils.slugify', 'utils.slugify', (['f'], {}), '(f)\n', (2473, 2476), False, 'from nikola import utils\n')] |
import numpy as np
from scipy import ndimage
def erode_value_blobs(array, steps=1, values_to_ignore=tuple(), new_value=0):
unique_values = list(np.unique(array))
all_entries_to_keep = np.zeros(shape=array.shape, dtype=np.bool)
for unique_value in unique_values:
entries_of_this_value = array == unique_value
if unique_value in values_to_ignore:
all_entries_to_keep = np.logical_or(entries_of_this_value, all_entries_to_keep)
else:
eroded_unique_indicator = ndimage.binary_erosion(entries_of_this_value, iterations=steps)
all_entries_to_keep = np.logical_or(eroded_unique_indicator, all_entries_to_keep)
result = array * all_entries_to_keep
if new_value != 0:
eroded_entries = np.logical_not(all_entries_to_keep)
new_values = new_value * eroded_entries
result += new_values
return result
| [
"numpy.unique",
"scipy.ndimage.binary_erosion",
"numpy.logical_not",
"numpy.logical_or",
"numpy.zeros"
] | [((194, 236), 'numpy.zeros', 'np.zeros', ([], {'shape': 'array.shape', 'dtype': 'np.bool'}), '(shape=array.shape, dtype=np.bool)\n', (202, 236), True, 'import numpy as np\n'), ((150, 166), 'numpy.unique', 'np.unique', (['array'], {}), '(array)\n', (159, 166), True, 'import numpy as np\n'), ((766, 801), 'numpy.logical_not', 'np.logical_not', (['all_entries_to_keep'], {}), '(all_entries_to_keep)\n', (780, 801), True, 'import numpy as np\n'), ((409, 466), 'numpy.logical_or', 'np.logical_or', (['entries_of_this_value', 'all_entries_to_keep'], {}), '(entries_of_this_value, all_entries_to_keep)\n', (422, 466), True, 'import numpy as np\n'), ((519, 582), 'scipy.ndimage.binary_erosion', 'ndimage.binary_erosion', (['entries_of_this_value'], {'iterations': 'steps'}), '(entries_of_this_value, iterations=steps)\n', (541, 582), False, 'from scipy import ndimage\n'), ((617, 676), 'numpy.logical_or', 'np.logical_or', (['eroded_unique_indicator', 'all_entries_to_keep'], {}), '(eroded_unique_indicator, all_entries_to_keep)\n', (630, 676), True, 'import numpy as np\n')] |
# coding: utf-8
# Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
Test the Logarithmic Units and Quantities
"""
from __future__ import (absolute_import, unicode_literals, division,
print_function)
from ...extern import six
from ...extern.six.moves import zip
import pickle
import itertools
import pytest
import numpy as np
from numpy.testing.utils import assert_allclose
from ...tests.helper import assert_quantity_allclose
from ... import units as u, constants as c
lu_units = [u.dex, u.mag, u.decibel]
lu_subclasses = [u.DexUnit, u.MagUnit, u.DecibelUnit]
lq_subclasses = [u.Dex, u.Magnitude, u.Decibel]
pu_sample = (u.dimensionless_unscaled, u.m, u.g/u.s**2, u.Jy)
class TestLogUnitCreation(object):
def test_logarithmic_units(self):
"""Check logarithmic units are set up correctly."""
assert u.dB.to(u.dex) == 0.1
assert u.dex.to(u.mag) == -2.5
assert u.mag.to(u.dB) == -4
@pytest.mark.parametrize('lu_unit, lu_cls', zip(lu_units, lu_subclasses))
def test_callable_units(self, lu_unit, lu_cls):
assert isinstance(lu_unit, u.UnitBase)
assert callable(lu_unit)
assert lu_unit._function_unit_class is lu_cls
@pytest.mark.parametrize('lu_unit', lu_units)
def test_equality_to_normal_unit_for_dimensionless(self, lu_unit):
lu = lu_unit()
assert lu == lu._default_function_unit # eg, MagUnit() == u.mag
assert lu._default_function_unit == lu # and u.mag == MagUnit()
@pytest.mark.parametrize('lu_unit, physical_unit',
itertools.product(lu_units, pu_sample))
def test_call_units(self, lu_unit, physical_unit):
"""Create a LogUnit subclass using the callable unit and physical unit,
and do basic check that output is right."""
lu1 = lu_unit(physical_unit)
assert lu1.physical_unit == physical_unit
assert lu1.function_unit == lu1._default_function_unit
def test_call_invalid_unit(self):
with pytest.raises(TypeError):
u.mag([])
with pytest.raises(ValueError):
u.mag(u.mag())
@pytest.mark.parametrize('lu_cls, physical_unit', itertools.product(
lu_subclasses + [u.LogUnit], pu_sample))
def test_subclass_creation(self, lu_cls, physical_unit):
"""Create a LogUnit subclass object for given physical unit,
and do basic check that output is right."""
lu1 = lu_cls(physical_unit)
assert lu1.physical_unit == physical_unit
assert lu1.function_unit == lu1._default_function_unit
lu2 = lu_cls(physical_unit,
function_unit=2*lu1._default_function_unit)
assert lu2.physical_unit == physical_unit
assert lu2.function_unit == u.Unit(2*lu2._default_function_unit)
with pytest.raises(ValueError):
lu_cls(physical_unit, u.m)
def test_predefined_magnitudes():
assert_quantity_allclose((-21.1*u.STmag).physical,
1.*u.erg/u.cm**2/u.s/u.AA)
assert_quantity_allclose((-48.6*u.ABmag).physical,
1.*u.erg/u.cm**2/u.s/u.Hz)
assert_quantity_allclose((0*u.M_bol).physical, c.L_bol0)
assert_quantity_allclose((0*u.m_bol).physical,
c.L_bol0/(4.*np.pi*(10.*c.pc)**2))
def test_predefined_reinitialisation():
assert u.mag('ST') == u.STmag
assert u.mag('AB') == u.ABmag
assert u.mag('Bol') == u.M_bol
assert u.mag('bol') == u.m_bol
def test_predefined_string_roundtrip():
"""Ensure roundtripping; see #5015"""
with u.magnitude_zero_points.enable():
assert u.Unit(u.STmag.to_string()) == u.STmag
assert u.Unit(u.ABmag.to_string()) == u.ABmag
assert u.Unit(u.M_bol.to_string()) == u.M_bol
assert u.Unit(u.m_bol.to_string()) == u.m_bol
def test_inequality():
"""Check __ne__ works (regresssion for #5342)."""
lu1 = u.mag(u.Jy)
lu2 = u.dex(u.Jy)
lu3 = u.mag(u.Jy**2)
lu4 = lu3 - lu1
assert lu1 != lu2
assert lu1 != lu3
assert lu1 == lu4
class TestLogUnitStrings(object):
def test_str(self):
"""Do some spot checks that str, repr, etc. work as expected."""
lu1 = u.mag(u.Jy)
assert str(lu1) == 'mag(Jy)'
assert repr(lu1) == 'Unit("mag(Jy)")'
assert lu1.to_string('generic') == 'mag(Jy)'
with pytest.raises(ValueError):
lu1.to_string('fits')
lu2 = u.dex()
assert str(lu2) == 'dex'
assert repr(lu2) == 'Unit("dex(1)")'
assert lu2.to_string() == 'dex(1)'
lu3 = u.MagUnit(u.Jy, function_unit=2*u.mag)
assert str(lu3) == '2 mag(Jy)'
assert repr(lu3) == 'MagUnit("Jy", unit="2 mag")'
assert lu3.to_string() == '2 mag(Jy)'
lu4 = u.mag(u.ct)
assert lu4.to_string('generic') == 'mag(ct)'
assert lu4.to_string('latex') == ('$\\mathrm{mag}$$\\mathrm{\\left( '
'\\mathrm{ct} \\right)}$')
assert lu4._repr_latex_() == lu4.to_string('latex')
class TestLogUnitConversion(object):
@pytest.mark.parametrize('lu_unit, physical_unit',
itertools.product(lu_units, pu_sample))
def test_physical_unit_conversion(self, lu_unit, physical_unit):
"""Check various LogUnit subclasses are equivalent and convertible
to their non-log counterparts."""
lu1 = lu_unit(physical_unit)
assert lu1.is_equivalent(physical_unit)
assert lu1.to(physical_unit, 0.) == 1.
assert physical_unit.is_equivalent(lu1)
assert physical_unit.to(lu1, 1.) == 0.
pu = u.Unit(8.*physical_unit)
assert lu1.is_equivalent(physical_unit)
assert lu1.to(pu, 0.) == 0.125
assert pu.is_equivalent(lu1)
assert_allclose(pu.to(lu1, 0.125), 0., atol=1.e-15)
# Check we round-trip.
value = np.linspace(0., 10., 6)
assert_allclose(pu.to(lu1, lu1.to(pu, value)), value, atol=1.e-15)
# And that we're not just returning True all the time.
pu2 = u.g
assert not lu1.is_equivalent(pu2)
with pytest.raises(u.UnitsError):
lu1.to(pu2)
assert not pu2.is_equivalent(lu1)
with pytest.raises(u.UnitsError):
pu2.to(lu1)
@pytest.mark.parametrize('lu_unit', lu_units)
def test_container_unit_conversion(self, lu_unit):
"""Check that conversion to logarithmic units (u.mag, u.dB, u.dex)
is only possible when the physical unit is dimensionless."""
values = np.linspace(0., 10., 6)
lu1 = lu_unit(u.dimensionless_unscaled)
assert lu1.is_equivalent(lu1.function_unit)
assert_allclose(lu1.to(lu1.function_unit, values), values)
lu2 = lu_unit(u.Jy)
assert not lu2.is_equivalent(lu2.function_unit)
with pytest.raises(u.UnitsError):
lu2.to(lu2.function_unit, values)
@pytest.mark.parametrize(
'flu_unit, tlu_unit, physical_unit',
itertools.product(lu_units, lu_units, pu_sample))
def test_subclass_conversion(self, flu_unit, tlu_unit, physical_unit):
"""Check various LogUnit subclasses are equivalent and convertible
to each other if they correspond to equivalent physical units."""
values = np.linspace(0., 10., 6)
flu = flu_unit(physical_unit)
tlu = tlu_unit(physical_unit)
assert flu.is_equivalent(tlu)
assert_allclose(flu.to(tlu), flu.function_unit.to(tlu.function_unit))
assert_allclose(flu.to(tlu, values),
values * flu.function_unit.to(tlu.function_unit))
tlu2 = tlu_unit(u.Unit(100.*physical_unit))
assert flu.is_equivalent(tlu2)
# Check that we round-trip.
assert_allclose(flu.to(tlu2, tlu2.to(flu, values)), values, atol=1.e-15)
tlu3 = tlu_unit(physical_unit.to_system(u.si)[0])
assert flu.is_equivalent(tlu3)
assert_allclose(flu.to(tlu3, tlu3.to(flu, values)), values, atol=1.e-15)
tlu4 = tlu_unit(u.g)
assert not flu.is_equivalent(tlu4)
with pytest.raises(u.UnitsError):
flu.to(tlu4, values)
def test_unit_decomposition(self):
lu = u.mag(u.Jy)
assert lu.decompose() == u.mag(u.Jy.decompose())
assert lu.decompose().physical_unit.bases == [u.kg, u.s]
assert lu.si == u.mag(u.Jy.si)
assert lu.si.physical_unit.bases == [u.kg, u.s]
assert lu.cgs == u.mag(u.Jy.cgs)
assert lu.cgs.physical_unit.bases == [u.g, u.s]
def test_unit_multiple_possible_equivalencies(self):
lu = u.mag(u.Jy)
assert lu.is_equivalent(pu_sample)
class TestLogUnitArithmetic(object):
def test_multiplication_division(self):
"""Check that multiplication/division with other units is only
possible when the physical unit is dimensionless, and that this
turns the unit into a normal one."""
lu1 = u.mag(u.Jy)
with pytest.raises(u.UnitsError):
lu1 * u.m
with pytest.raises(u.UnitsError):
u.m * lu1
with pytest.raises(u.UnitsError):
lu1 / lu1
for unit in (u.dimensionless_unscaled, u.m, u.mag, u.dex):
with pytest.raises(u.UnitsError):
lu1 / unit
lu2 = u.mag(u.dimensionless_unscaled)
with pytest.raises(u.UnitsError):
lu2 * lu1
with pytest.raises(u.UnitsError):
lu2 / lu1
# But dimensionless_unscaled can be cancelled.
assert lu2 / lu2 == u.dimensionless_unscaled
# With dimensionless, normal units are OK, but we return a plain unit.
tf = lu2 * u.m
tr = u.m * lu2
for t in (tf, tr):
assert not isinstance(t, type(lu2))
assert t == lu2.function_unit * u.m
with u.set_enabled_equivalencies(u.logarithmic()):
with pytest.raises(u.UnitsError):
t.to(lu2.physical_unit)
# Now we essentially have a LogUnit with a prefactor of 100,
# so should be equivalent again.
t = tf / u.cm
with u.set_enabled_equivalencies(u.logarithmic()):
assert t.is_equivalent(lu2.function_unit)
assert_allclose(t.to(u.dimensionless_unscaled, np.arange(3.)/100.),
lu2.to(lu2.physical_unit, np.arange(3.)))
# If we effectively remove lu1, a normal unit should be returned.
t2 = tf / lu2
assert not isinstance(t2, type(lu2))
assert t2 == u.m
t3 = tf / lu2.function_unit
assert not isinstance(t3, type(lu2))
assert t3 == u.m
# For completeness, also ensure non-sensical operations fail
with pytest.raises(TypeError):
lu1 * object()
with pytest.raises(TypeError):
slice(None) * lu1
with pytest.raises(TypeError):
lu1 / []
with pytest.raises(TypeError):
1 / lu1
@pytest.mark.parametrize('power', (2, 0.5, 1, 0))
def test_raise_to_power(self, power):
"""Check that raising LogUnits to some power is only possible when the
physical unit is dimensionless, and that conversion is turned off when
the resulting logarithmic unit (such as mag**2) is incompatible."""
lu1 = u.mag(u.Jy)
if power == 0:
assert lu1 ** power == u.dimensionless_unscaled
elif power == 1:
assert lu1 ** power == lu1
else:
with pytest.raises(u.UnitsError):
lu1 ** power
# With dimensionless, though, it works, but returns a normal unit.
lu2 = u.mag(u.dimensionless_unscaled)
t = lu2**power
if power == 0:
assert t == u.dimensionless_unscaled
elif power == 1:
assert t == lu2
else:
assert not isinstance(t, type(lu2))
assert t == lu2.function_unit**power
# also check we roundtrip
t2 = t**(1./power)
assert t2 == lu2.function_unit
with u.set_enabled_equivalencies(u.logarithmic()):
assert_allclose(t2.to(u.dimensionless_unscaled, np.arange(3.)),
lu2.to(lu2.physical_unit, np.arange(3.)))
@pytest.mark.parametrize('other', pu_sample)
def test_addition_subtraction_to_normal_units_fails(self, other):
lu1 = u.mag(u.Jy)
with pytest.raises(u.UnitsError):
lu1 + other
with pytest.raises(u.UnitsError):
lu1 - other
with pytest.raises(u.UnitsError):
other - lu1
def test_addition_subtraction_to_non_units_fails(self):
lu1 = u.mag(u.Jy)
with pytest.raises(TypeError):
lu1 + 1.
with pytest.raises(TypeError):
lu1 - [1., 2., 3.]
@pytest.mark.parametrize(
'other', (u.mag, u.mag(), u.mag(u.Jy), u.mag(u.m),
u.Unit(2*u.mag), u.MagUnit('', 2.*u.mag)))
def test_addition_subtraction(self, other):
"""Check physical units are changed appropriately"""
lu1 = u.mag(u.Jy)
other_pu = getattr(other, 'physical_unit', u.dimensionless_unscaled)
lu_sf = lu1 + other
assert lu_sf.is_equivalent(lu1.physical_unit * other_pu)
lu_sr = other + lu1
assert lu_sr.is_equivalent(lu1.physical_unit * other_pu)
lu_df = lu1 - other
assert lu_df.is_equivalent(lu1.physical_unit / other_pu)
lu_dr = other - lu1
assert lu_dr.is_equivalent(other_pu / lu1.physical_unit)
def test_complicated_addition_subtraction(self):
"""for fun, a more complicated example of addition and subtraction"""
dm0 = u.Unit('DM', 1./(4.*np.pi*(10.*u.pc)**2))
lu_dm = u.mag(dm0)
lu_absST = u.STmag - lu_dm
assert lu_absST.is_equivalent(u.erg/u.s/u.AA)
def test_neg_pos(self):
lu1 = u.mag(u.Jy)
neg_lu = -lu1
assert neg_lu != lu1
assert neg_lu.physical_unit == u.Jy**-1
assert -neg_lu == lu1
pos_lu = +lu1
assert pos_lu is not lu1
assert pos_lu == lu1
def test_pickle():
lu1 = u.dex(u.cm/u.s**2)
s = pickle.dumps(lu1)
lu2 = pickle.loads(s)
assert lu1 == lu2
def test_hashable():
lu1 = u.dB(u.mW)
lu2 = u.dB(u.m)
lu3 = u.dB(u.mW)
assert hash(lu1) != hash(lu2)
assert hash(lu1) == hash(lu3)
luset = {lu1, lu2, lu3}
assert len(luset) == 2
class TestLogQuantityCreation(object):
@pytest.mark.parametrize('lq, lu', zip(lq_subclasses + [u.LogQuantity],
lu_subclasses + [u.LogUnit]))
def test_logarithmic_quantities(self, lq, lu):
"""Check logarithmic quantities are all set up correctly"""
assert lq._unit_class == lu
assert type(lu()._quantity_class(1.)) is lq
@pytest.mark.parametrize('lq_cls, physical_unit',
itertools.product(lq_subclasses, pu_sample))
def test_subclass_creation(self, lq_cls, physical_unit):
"""Create LogQuantity subclass objects for some physical units,
and basic check on transformations"""
value = np.arange(1., 10.)
log_q = lq_cls(value * physical_unit)
assert log_q.unit.physical_unit == physical_unit
assert log_q.unit.function_unit == log_q.unit._default_function_unit
assert_allclose(log_q.physical.value, value)
with pytest.raises(ValueError):
lq_cls(value, physical_unit)
@pytest.mark.parametrize(
'unit', (u.mag, u.mag(), u.mag(u.Jy), u.mag(u.m),
u.Unit(2*u.mag), u.MagUnit('', 2.*u.mag),
u.MagUnit(u.Jy, -1*u.mag), u.MagUnit(u.m, -2.*u.mag)))
def test_different_units(self, unit):
q = u.Magnitude(1.23, unit)
assert q.unit.function_unit == getattr(unit, 'function_unit', unit)
assert q.unit.physical_unit is getattr(unit, 'physical_unit',
u.dimensionless_unscaled)
@pytest.mark.parametrize('value, unit', (
(1.*u.mag(u.Jy), None),
(1.*u.dex(u.Jy), None),
(1.*u.mag(u.W/u.m**2/u.Hz), u.mag(u.Jy)),
(1.*u.dex(u.W/u.m**2/u.Hz), u.mag(u.Jy))))
def test_function_values(self, value, unit):
lq = u.Magnitude(value, unit)
assert lq == value
assert lq.unit.function_unit == u.mag
assert lq.unit.physical_unit == getattr(unit, 'physical_unit',
value.unit.physical_unit)
@pytest.mark.parametrize(
'unit', (u.mag(), u.mag(u.Jy), u.mag(u.m), u.MagUnit('', 2.*u.mag),
u.MagUnit(u.Jy, -1*u.mag), u.MagUnit(u.m, -2.*u.mag)))
def test_indirect_creation(self, unit):
q1 = 2.5 * unit
assert isinstance(q1, u.Magnitude)
assert q1.value == 2.5
assert q1.unit == unit
pv = 100. * unit.physical_unit
q2 = unit * pv
assert q2.unit == unit
assert q2.unit.physical_unit == pv.unit
assert q2.to_value(unit.physical_unit) == 100.
assert (q2._function_view / u.mag).to_value(1) == -5.
q3 = unit / 0.4
assert q3 == q1
def test_from_view(self):
# Cannot view a physical quantity as a function quantity, since the
# values would change.
q = [100., 1000.] * u.cm/u.s**2
with pytest.raises(TypeError):
q.view(u.Dex)
# But fine if we have the right magnitude.
q = [2., 3.] * u.dex
lq = q.view(u.Dex)
assert isinstance(lq, u.Dex)
assert lq.unit.physical_unit == u.dimensionless_unscaled
assert np.all(q == lq)
def test_using_quantity_class(self):
"""Check that we can use Quantity if we have subok=True"""
# following issue #5851
lu = u.dex(u.AA)
with pytest.raises(u.UnitTypeError):
u.Quantity(1., lu)
q = u.Quantity(1., lu, subok=True)
assert type(q) is lu._quantity_class
def test_conversion_to_and_from_physical_quantities():
"""Ensures we can convert from regular quantities."""
mst = [10., 12., 14.] * u.STmag
flux_lambda = mst.physical
mst_roundtrip = flux_lambda.to(u.STmag)
# check we return a logquantity; see #5178.
assert isinstance(mst_roundtrip, u.Magnitude)
assert mst_roundtrip.unit == mst.unit
assert_allclose(mst_roundtrip.value, mst.value)
wave = [4956.8, 4959.55, 4962.3] * u.AA
flux_nu = mst.to(u.Jy, equivalencies=u.spectral_density(wave))
mst_roundtrip2 = flux_nu.to(u.STmag, u.spectral_density(wave))
assert isinstance(mst_roundtrip2, u.Magnitude)
assert mst_roundtrip2.unit == mst.unit
assert_allclose(mst_roundtrip2.value, mst.value)
def test_quantity_decomposition():
lq = 10.*u.mag(u.Jy)
assert lq.decompose() == lq
assert lq.decompose().unit.physical_unit.bases == [u.kg, u.s]
assert lq.si == lq
assert lq.si.unit.physical_unit.bases == [u.kg, u.s]
assert lq.cgs == lq
assert lq.cgs.unit.physical_unit.bases == [u.g, u.s]
class TestLogQuantityViews(object):
def setup(self):
self.lq = u.Magnitude(np.arange(10.) * u.Jy)
self.lq2 = u.Magnitude(np.arange(5.))
def test_value_view(self):
lq_value = self.lq.value
assert type(lq_value) is np.ndarray
lq_value[2] = -1.
assert np.all(self.lq.value == lq_value)
def test_function_view(self):
lq_fv = self.lq._function_view
assert type(lq_fv) is u.Quantity
assert lq_fv.unit is self.lq.unit.function_unit
lq_fv[3] = -2. * lq_fv.unit
assert np.all(self.lq.value == lq_fv.value)
def test_quantity_view(self):
# Cannot view as Quantity, since the unit cannot be represented.
with pytest.raises(TypeError):
self.lq.view(u.Quantity)
# But a dimensionless one is fine.
q2 = self.lq2.view(u.Quantity)
assert q2.unit is u.mag
assert np.all(q2.value == self.lq2.value)
lq3 = q2.view(u.Magnitude)
assert type(lq3.unit) is u.MagUnit
assert lq3.unit.physical_unit == u.dimensionless_unscaled
assert np.all(lq3 == self.lq2)
class TestLogQuantitySlicing(object):
def test_item_get_and_set(self):
lq1 = u.Magnitude(np.arange(1., 11.)*u.Jy)
assert lq1[9] == u.Magnitude(10.*u.Jy)
lq1[2] = 100.*u.Jy
assert lq1[2] == u.Magnitude(100.*u.Jy)
with pytest.raises(u.UnitsError):
lq1[2] = 100.*u.m
with pytest.raises(u.UnitsError):
lq1[2] = 100.*u.mag
with pytest.raises(u.UnitsError):
lq1[2] = u.Magnitude(100.*u.m)
assert lq1[2] == u.Magnitude(100.*u.Jy)
def test_slice_get_and_set(self):
lq1 = u.Magnitude(np.arange(1., 10.)*u.Jy)
lq1[2:4] = 100.*u.Jy
assert np.all(lq1[2:4] == u.Magnitude(100.*u.Jy))
with pytest.raises(u.UnitsError):
lq1[2:4] = 100.*u.m
with pytest.raises(u.UnitsError):
lq1[2:4] = 100.*u.mag
with pytest.raises(u.UnitsError):
lq1[2:4] = u.Magnitude(100.*u.m)
assert np.all(lq1[2] == u.Magnitude(100.*u.Jy))
class TestLogQuantityArithmetic(object):
def test_multiplication_division(self):
"""Check that multiplication/division with other quantities is only
possible when the physical unit is dimensionless, and that this turns
the result into a normal quantity."""
lq = u.Magnitude(np.arange(1., 11.)*u.Jy)
with pytest.raises(u.UnitsError):
lq * (1.*u.m)
with pytest.raises(u.UnitsError):
(1.*u.m) * lq
with pytest.raises(u.UnitsError):
lq / lq
for unit in (u.m, u.mag, u.dex):
with pytest.raises(u.UnitsError):
lq / unit
lq2 = u.Magnitude(np.arange(1, 11.))
with pytest.raises(u.UnitsError):
lq2 * lq
with pytest.raises(u.UnitsError):
lq2 / lq
with pytest.raises(u.UnitsError):
lq / lq2
# but dimensionless_unscaled can be cancelled
r = lq2 / u.Magnitude(2.)
assert r.unit == u.dimensionless_unscaled
assert np.all(r.value == lq2.value/2.)
# with dimensionless, normal units OK, but return normal quantities
tf = lq2 * u.m
tr = u.m * lq2
for t in (tf, tr):
assert not isinstance(t, type(lq2))
assert t.unit == lq2.unit.function_unit * u.m
with u.set_enabled_equivalencies(u.logarithmic()):
with pytest.raises(u.UnitsError):
t.to(lq2.unit.physical_unit)
t = tf / (50.*u.cm)
# now we essentially have the same quantity but with a prefactor of 2
assert t.unit.is_equivalent(lq2.unit.function_unit)
assert_allclose(t.to(lq2.unit.function_unit), lq2._function_view*2)
@pytest.mark.parametrize('power', (2, 0.5, 1, 0))
def test_raise_to_power(self, power):
"""Check that raising LogQuantities to some power is only possible when
the physical unit is dimensionless, and that conversion is turned off
when the resulting logarithmic unit (say, mag**2) is incompatible."""
lq = u.Magnitude(np.arange(1., 4.)*u.Jy)
if power == 0:
assert np.all(lq ** power == 1.)
elif power == 1:
assert np.all(lq ** power == lq)
else:
with pytest.raises(u.UnitsError):
lq ** power
# with dimensionless, it works, but falls back to normal quantity
# (except for power=1)
lq2 = u.Magnitude(np.arange(10.))
t = lq2**power
if power == 0:
assert t.unit is u.dimensionless_unscaled
assert np.all(t.value == 1.)
elif power == 1:
assert np.all(t == lq2)
else:
assert not isinstance(t, type(lq2))
assert t.unit == lq2.unit.function_unit ** power
with u.set_enabled_equivalencies(u.logarithmic()):
with pytest.raises(u.UnitsError):
t.to(u.dimensionless_unscaled)
def test_error_on_lq_as_power(self):
lq = u.Magnitude(np.arange(1., 4.)*u.Jy)
with pytest.raises(TypeError):
lq ** lq
@pytest.mark.parametrize('other', pu_sample)
def test_addition_subtraction_to_normal_units_fails(self, other):
lq = u.Magnitude(np.arange(1., 10.)*u.Jy)
q = 1.23 * other
with pytest.raises(u.UnitsError):
lq + q
with pytest.raises(u.UnitsError):
lq - q
with pytest.raises(u.UnitsError):
q - lq
@pytest.mark.parametrize(
'other', (1.23 * u.mag, 2.34 * u.mag(),
u.Magnitude(3.45 * u.Jy), u.Magnitude(4.56 * u.m),
5.67 * u.Unit(2*u.mag), u.Magnitude(6.78, 2.*u.mag)))
def test_addition_subtraction(self, other):
"""Check that addition/subtraction with quantities with magnitude or
MagUnit units works, and that it changes the physical units
appropriately."""
lq = u.Magnitude(np.arange(1., 10.)*u.Jy)
other_physical = other.to(getattr(other.unit, 'physical_unit',
u.dimensionless_unscaled),
equivalencies=u.logarithmic())
lq_sf = lq + other
assert_allclose(lq_sf.physical, lq.physical * other_physical)
lq_sr = other + lq
assert_allclose(lq_sr.physical, lq.physical * other_physical)
lq_df = lq - other
assert_allclose(lq_df.physical, lq.physical / other_physical)
lq_dr = other - lq
assert_allclose(lq_dr.physical, other_physical / lq.physical)
@pytest.mark.parametrize('other', pu_sample)
def test_inplace_addition_subtraction_unit_checks(self, other):
lu1 = u.mag(u.Jy)
lq1 = u.Magnitude(np.arange(1., 10.), lu1)
with pytest.raises(u.UnitsError):
lq1 += other
assert np.all(lq1.value == np.arange(1., 10.))
assert lq1.unit == lu1
with pytest.raises(u.UnitsError):
lq1 -= other
assert np.all(lq1.value == np.arange(1., 10.))
assert lq1.unit == lu1
@pytest.mark.parametrize(
'other', (1.23 * u.mag, 2.34 * u.mag(),
u.Magnitude(3.45 * u.Jy), u.Magnitude(4.56 * u.m),
5.67 * u.Unit(2*u.mag), u.Magnitude(6.78, 2.*u.mag)))
def test_inplace_addition_subtraction(self, other):
"""Check that inplace addition/subtraction with quantities with
magnitude or MagUnit units works, and that it changes the physical
units appropriately."""
lq = u.Magnitude(np.arange(1., 10.)*u.Jy)
other_physical = other.to(getattr(other.unit, 'physical_unit',
u.dimensionless_unscaled),
equivalencies=u.logarithmic())
lq_sf = lq.copy()
lq_sf += other
assert_allclose(lq_sf.physical, lq.physical * other_physical)
lq_df = lq.copy()
lq_df -= other
assert_allclose(lq_df.physical, lq.physical / other_physical)
def test_complicated_addition_subtraction(self):
"""For fun, a more complicated example of addition and subtraction."""
dm0 = u.Unit('DM', 1./(4.*np.pi*(10.*u.pc)**2))
DMmag = u.mag(dm0)
m_st = 10. * u.STmag
dm = 5. * DMmag
M_st = m_st - dm
assert M_st.unit.is_equivalent(u.erg/u.s/u.AA)
assert np.abs(M_st.physical /
(m_st.physical*4.*np.pi*(100.*u.pc)**2) - 1.) < 1.e-15
class TestLogQuantityComparisons(object):
def test_comparison_to_non_quantities_fails(self):
lq = u.Magnitude(np.arange(1., 10.)*u.Jy)
# On python2, ordering operations always succeed, given essentially
# meaningless results.
if not six.PY2:
with pytest.raises(TypeError):
lq > 'a'
assert not (lq == 'a')
assert lq != 'a'
def test_comparison(self):
lq1 = u.Magnitude(np.arange(1., 4.)*u.Jy)
lq2 = u.Magnitude(2.*u.Jy)
assert np.all((lq1 > lq2) == np.array([True, False, False]))
assert np.all((lq1 == lq2) == np.array([False, True, False]))
lq3 = u.Dex(2.*u.Jy)
assert np.all((lq1 > lq3) == np.array([True, False, False]))
assert np.all((lq1 == lq3) == np.array([False, True, False]))
lq4 = u.Magnitude(2.*u.m)
assert not (lq1 == lq4)
assert lq1 != lq4
with pytest.raises(u.UnitsError):
lq1 < lq4
q5 = 1.5 * u.Jy
assert np.all((lq1 > q5) == np.array([True, False, False]))
assert np.all((q5 < lq1) == np.array([True, False, False]))
with pytest.raises(u.UnitsError):
lq1 >= 2.*u.m
with pytest.raises(u.UnitsError):
lq1 <= lq1.value * u.mag
# For physically dimensionless, we can compare with the function unit.
lq6 = u.Magnitude(np.arange(1., 4.))
fv6 = lq6.value * u.mag
assert np.all(lq6 == fv6)
# but not some arbitrary unit, of course.
with pytest.raises(u.UnitsError):
lq6 < 2.*u.m
class TestLogQuantityMethods(object):
def setup(self):
self.mJy = np.arange(1., 5.).reshape(2, 2) * u.mag(u.Jy)
self.m1 = np.arange(1., 5.5, 0.5).reshape(3, 3) * u.mag()
self.mags = (self.mJy, self.m1)
@pytest.mark.parametrize('method', ('mean', 'min', 'max', 'round', 'trace',
'std', 'var', 'ptp', 'diff', 'ediff1d'))
def test_always_ok(self, method):
for mag in self.mags:
res = getattr(mag, method)()
assert np.all(res.value ==
getattr(mag._function_view, method)().value)
if method in ('std', 'ptp', 'diff', 'ediff1d'):
assert res.unit == u.mag()
elif method == 'var':
assert res.unit == u.mag**2
else:
assert res.unit == mag.unit
def test_clip(self):
for mag in self.mags:
assert np.all(mag.clip(2. * mag.unit, 4. * mag.unit).value ==
mag.value.clip(2., 4.))
@pytest.mark.parametrize('method', ('sum', 'cumsum', 'nansum'))
def test_only_ok_if_dimensionless(self, method):
res = getattr(self.m1, method)()
assert np.all(res.value ==
getattr(self.m1._function_view, method)().value)
assert res.unit == self.m1.unit
with pytest.raises(TypeError):
getattr(self.mJy, method)()
def test_dot(self):
assert np.all(self.m1.dot(self.m1).value ==
self.m1.value.dot(self.m1.value))
@pytest.mark.parametrize('method', ('prod', 'cumprod'))
def test_never_ok(self, method):
with pytest.raises(ValueError):
getattr(self.mJy, method)()
with pytest.raises(ValueError):
getattr(self.m1, method)()
class TestLogQuantityUfuncs(object):
"""Spot checks on ufuncs."""
def setup(self):
self.mJy = np.arange(1., 5.).reshape(2, 2) * u.mag(u.Jy)
self.m1 = np.arange(1., 5.5, 0.5).reshape(3, 3) * u.mag()
self.mags = (self.mJy, self.m1)
def test_power(self):
assert np.all(np.power(self.mJy, 0.) == 1.)
assert np.all(np.power(self.m1, 1.) == self.m1)
assert np.all(np.power(self.mJy, 1.) == self.mJy)
assert np.all(np.power(self.m1, 2.) == self.m1 ** 2)
with pytest.raises(u.UnitsError):
np.power(self.mJy, 2.)
def test_not_implemented_with_physical_unit(self):
with pytest.raises(u.UnitsError):
np.square(self.mJy)
assert np.all(np.square(self.m1) == self.m1 ** 2)
| [
"numpy.abs",
"numpy.power",
"pickle.dumps",
"itertools.product",
"numpy.square",
"pytest.mark.parametrize",
"numpy.linspace",
"numpy.array",
"pytest.raises",
"numpy.testing.utils.assert_allclose",
"pickle.loads",
"numpy.all",
"numpy.arange"
] | [((1241, 1285), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""lu_unit"""', 'lu_units'], {}), "('lu_unit', lu_units)\n", (1264, 1285), False, 'import pytest\n'), ((6355, 6399), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""lu_unit"""', 'lu_units'], {}), "('lu_unit', lu_units)\n", (6378, 6399), False, 'import pytest\n'), ((11055, 11103), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""power"""', '(2, 0.5, 1, 0)'], {}), "('power', (2, 0.5, 1, 0))\n", (11078, 11103), False, 'import pytest\n'), ((12360, 12403), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""other"""', 'pu_sample'], {}), "('other', pu_sample)\n", (12383, 12403), False, 'import pytest\n'), ((14287, 14304), 'pickle.dumps', 'pickle.dumps', (['lu1'], {}), '(lu1)\n', (14299, 14304), False, 'import pickle\n'), ((14315, 14330), 'pickle.loads', 'pickle.loads', (['s'], {}), '(s)\n', (14327, 14330), False, 'import pickle\n'), ((18491, 18538), 'numpy.testing.utils.assert_allclose', 'assert_allclose', (['mst_roundtrip.value', 'mst.value'], {}), '(mst_roundtrip.value, mst.value)\n', (18506, 18538), False, 'from numpy.testing.utils import assert_allclose\n'), ((18815, 18863), 'numpy.testing.utils.assert_allclose', 'assert_allclose', (['mst_roundtrip2.value', 'mst.value'], {}), '(mst_roundtrip2.value, mst.value)\n', (18830, 18863), False, 'from numpy.testing.utils import assert_allclose\n'), ((23059, 23107), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""power"""', '(2, 0.5, 1, 0)'], {}), "('power', (2, 0.5, 1, 0))\n", (23082, 23107), False, 'import pytest\n'), ((24457, 24500), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""other"""', 'pu_sample'], {}), "('other', pu_sample)\n", (24480, 24500), False, 'import pytest\n'), ((25923, 25966), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""other"""', 'pu_sample'], {}), "('other', pu_sample)\n", (25946, 25966), False, 'import pytest\n'), ((29671, 29790), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""method"""', "('mean', 'min', 'max', 'round', 'trace', 'std', 'var', 'ptp', 'diff', 'ediff1d'\n )"], {}), "('method', ('mean', 'min', 'max', 'round', 'trace',\n 'std', 'var', 'ptp', 'diff', 'ediff1d'))\n", (29694, 29790), False, 'import pytest\n'), ((30475, 30537), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""method"""', "('sum', 'cumsum', 'nansum')"], {}), "('method', ('sum', 'cumsum', 'nansum'))\n", (30498, 30537), False, 'import pytest\n'), ((30996, 31050), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""method"""', "('prod', 'cumprod')"], {}), "('method', ('prod', 'cumprod'))\n", (31019, 31050), False, 'import pytest\n'), ((1611, 1649), 'itertools.product', 'itertools.product', (['lu_units', 'pu_sample'], {}), '(lu_units, pu_sample)\n', (1628, 1649), False, 'import itertools\n'), ((2210, 2267), 'itertools.product', 'itertools.product', (['(lu_subclasses + [u.LogUnit])', 'pu_sample'], {}), '(lu_subclasses + [u.LogUnit], pu_sample)\n', (2227, 2267), False, 'import itertools\n'), ((5952, 5977), 'numpy.linspace', 'np.linspace', (['(0.0)', '(10.0)', '(6)'], {}), '(0.0, 10.0, 6)\n', (5963, 5977), True, 'import numpy as np\n'), ((5226, 5264), 'itertools.product', 'itertools.product', (['lu_units', 'pu_sample'], {}), '(lu_units, pu_sample)\n', (5243, 5264), False, 'import itertools\n'), ((6616, 6641), 'numpy.linspace', 'np.linspace', (['(0.0)', '(10.0)', '(6)'], {}), '(0.0, 10.0, 6)\n', (6627, 6641), True, 'import numpy as np\n'), ((7355, 7380), 'numpy.linspace', 'np.linspace', (['(0.0)', '(10.0)', '(6)'], {}), '(0.0, 10.0, 6)\n', (7366, 7380), True, 'import numpy as np\n'), ((7064, 7112), 'itertools.product', 'itertools.product', (['lu_units', 'lu_units', 'pu_sample'], {}), '(lu_units, lu_units, pu_sample)\n', (7081, 7112), False, 'import itertools\n'), ((15283, 15303), 'numpy.arange', 'np.arange', (['(1.0)', '(10.0)'], {}), '(1.0, 10.0)\n', (15292, 15303), True, 'import numpy as np\n'), ((15490, 15534), 'numpy.testing.utils.assert_allclose', 'assert_allclose', (['log_q.physical.value', 'value'], {}), '(log_q.physical.value, value)\n', (15505, 15534), False, 'from numpy.testing.utils import assert_allclose\n'), ((15043, 15086), 'itertools.product', 'itertools.product', (['lq_subclasses', 'pu_sample'], {}), '(lq_subclasses, pu_sample)\n', (15060, 15086), False, 'import itertools\n'), ((17775, 17790), 'numpy.all', 'np.all', (['(q == lq)'], {}), '(q == lq)\n', (17781, 17790), True, 'import numpy as np\n'), ((19493, 19526), 'numpy.all', 'np.all', (['(self.lq.value == lq_value)'], {}), '(self.lq.value == lq_value)\n', (19499, 19526), True, 'import numpy as np\n'), ((19749, 19785), 'numpy.all', 'np.all', (['(self.lq.value == lq_fv.value)'], {}), '(self.lq.value == lq_fv.value)\n', (19755, 19785), True, 'import numpy as np\n'), ((20099, 20133), 'numpy.all', 'np.all', (['(q2.value == self.lq2.value)'], {}), '(q2.value == self.lq2.value)\n', (20105, 20133), True, 'import numpy as np\n'), ((20293, 20316), 'numpy.all', 'np.all', (['(lq3 == self.lq2)'], {}), '(lq3 == self.lq2)\n', (20299, 20316), True, 'import numpy as np\n'), ((22360, 22394), 'numpy.all', 'np.all', (['(r.value == lq2.value / 2.0)'], {}), '(r.value == lq2.value / 2.0)\n', (22366, 22394), True, 'import numpy as np\n'), ((25561, 25622), 'numpy.testing.utils.assert_allclose', 'assert_allclose', (['lq_sf.physical', '(lq.physical * other_physical)'], {}), '(lq_sf.physical, lq.physical * other_physical)\n', (25576, 25622), False, 'from numpy.testing.utils import assert_allclose\n'), ((25659, 25720), 'numpy.testing.utils.assert_allclose', 'assert_allclose', (['lq_sr.physical', '(lq.physical * other_physical)'], {}), '(lq_sr.physical, lq.physical * other_physical)\n', (25674, 25720), False, 'from numpy.testing.utils import assert_allclose\n'), ((25757, 25818), 'numpy.testing.utils.assert_allclose', 'assert_allclose', (['lq_df.physical', '(lq.physical / other_physical)'], {}), '(lq_df.physical, lq.physical / other_physical)\n', (25772, 25818), False, 'from numpy.testing.utils import assert_allclose\n'), ((25855, 25916), 'numpy.testing.utils.assert_allclose', 'assert_allclose', (['lq_dr.physical', '(other_physical / lq.physical)'], {}), '(lq_dr.physical, other_physical / lq.physical)\n', (25870, 25916), False, 'from numpy.testing.utils import assert_allclose\n'), ((27188, 27249), 'numpy.testing.utils.assert_allclose', 'assert_allclose', (['lq_sf.physical', '(lq.physical * other_physical)'], {}), '(lq_sf.physical, lq.physical * other_physical)\n', (27203, 27249), False, 'from numpy.testing.utils import assert_allclose\n'), ((27308, 27369), 'numpy.testing.utils.assert_allclose', 'assert_allclose', (['lq_df.physical', '(lq.physical / other_physical)'], {}), '(lq_df.physical, lq.physical / other_physical)\n', (27323, 27369), False, 'from numpy.testing.utils import assert_allclose\n'), ((29297, 29315), 'numpy.all', 'np.all', (['(lq6 == fv6)'], {}), '(lq6 == fv6)\n', (29303, 29315), True, 'import numpy as np\n'), ((2040, 2064), 'pytest.raises', 'pytest.raises', (['TypeError'], {}), '(TypeError)\n', (2053, 2064), False, 'import pytest\n'), ((2101, 2126), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (2114, 2126), False, 'import pytest\n'), ((2848, 2873), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (2861, 2873), False, 'import pytest\n'), ((4414, 4439), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (4427, 4439), False, 'import pytest\n'), ((6187, 6214), 'pytest.raises', 'pytest.raises', (['u.UnitsError'], {}), '(u.UnitsError)\n', (6200, 6214), False, 'import pytest\n'), ((6296, 6323), 'pytest.raises', 'pytest.raises', (['u.UnitsError'], {}), '(u.UnitsError)\n', (6309, 6323), False, 'import pytest\n'), ((6905, 6932), 'pytest.raises', 'pytest.raises', (['u.UnitsError'], {}), '(u.UnitsError)\n', (6918, 6932), False, 'import pytest\n'), ((8165, 8192), 'pytest.raises', 'pytest.raises', (['u.UnitsError'], {}), '(u.UnitsError)\n', (8178, 8192), False, 'import pytest\n'), ((9043, 9070), 'pytest.raises', 'pytest.raises', (['u.UnitsError'], {}), '(u.UnitsError)\n', (9056, 9070), False, 'import pytest\n'), ((9108, 9135), 'pytest.raises', 'pytest.raises', (['u.UnitsError'], {}), '(u.UnitsError)\n', (9121, 9135), False, 'import pytest\n'), ((9173, 9200), 'pytest.raises', 'pytest.raises', (['u.UnitsError'], {}), '(u.UnitsError)\n', (9186, 9200), False, 'import pytest\n'), ((9426, 9453), 'pytest.raises', 'pytest.raises', (['u.UnitsError'], {}), '(u.UnitsError)\n', (9439, 9453), False, 'import pytest\n'), ((9491, 9518), 'pytest.raises', 'pytest.raises', (['u.UnitsError'], {}), '(u.UnitsError)\n', (9504, 9518), False, 'import pytest\n'), ((10808, 10832), 'pytest.raises', 'pytest.raises', (['TypeError'], {}), '(TypeError)\n', (10821, 10832), False, 'import pytest\n'), ((10874, 10898), 'pytest.raises', 'pytest.raises', (['TypeError'], {}), '(TypeError)\n', (10887, 10898), False, 'import pytest\n'), ((10943, 10967), 'pytest.raises', 'pytest.raises', (['TypeError'], {}), '(TypeError)\n', (10956, 10967), False, 'import pytest\n'), ((11003, 11027), 'pytest.raises', 'pytest.raises', (['TypeError'], {}), '(TypeError)\n', (11016, 11027), False, 'import pytest\n'), ((12513, 12540), 'pytest.raises', 'pytest.raises', (['u.UnitsError'], {}), '(u.UnitsError)\n', (12526, 12540), False, 'import pytest\n'), ((12580, 12607), 'pytest.raises', 'pytest.raises', (['u.UnitsError'], {}), '(u.UnitsError)\n', (12593, 12607), False, 'import pytest\n'), ((12647, 12674), 'pytest.raises', 'pytest.raises', (['u.UnitsError'], {}), '(u.UnitsError)\n', (12660, 12674), False, 'import pytest\n'), ((12800, 12824), 'pytest.raises', 'pytest.raises', (['TypeError'], {}), '(TypeError)\n', (12813, 12824), False, 'import pytest\n'), ((12861, 12885), 'pytest.raises', 'pytest.raises', (['TypeError'], {}), '(TypeError)\n', (12874, 12885), False, 'import pytest\n'), ((15548, 15573), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (15561, 15573), False, 'import pytest\n'), ((17499, 17523), 'pytest.raises', 'pytest.raises', (['TypeError'], {}), '(TypeError)\n', (17512, 17523), False, 'import pytest\n'), ((17970, 18000), 'pytest.raises', 'pytest.raises', (['u.UnitTypeError'], {}), '(u.UnitTypeError)\n', (17983, 18000), False, 'import pytest\n'), ((19328, 19342), 'numpy.arange', 'np.arange', (['(5.0)'], {}), '(5.0)\n', (19337, 19342), True, 'import numpy as np\n'), ((19907, 19931), 'pytest.raises', 'pytest.raises', (['TypeError'], {}), '(TypeError)\n', (19920, 19931), False, 'import pytest\n'), ((20580, 20607), 'pytest.raises', 'pytest.raises', (['u.UnitsError'], {}), '(u.UnitsError)\n', (20593, 20607), False, 'import pytest\n'), ((20652, 20679), 'pytest.raises', 'pytest.raises', (['u.UnitsError'], {}), '(u.UnitsError)\n', (20665, 20679), False, 'import pytest\n'), ((20726, 20753), 'pytest.raises', 'pytest.raises', (['u.UnitsError'], {}), '(u.UnitsError)\n', (20739, 20753), False, 'import pytest\n'), ((21036, 21063), 'pytest.raises', 'pytest.raises', (['u.UnitsError'], {}), '(u.UnitsError)\n', (21049, 21063), False, 'import pytest\n'), ((21110, 21137), 'pytest.raises', 'pytest.raises', (['u.UnitsError'], {}), '(u.UnitsError)\n', (21123, 21137), False, 'import pytest\n'), ((21186, 21213), 'pytest.raises', 'pytest.raises', (['u.UnitsError'], {}), '(u.UnitsError)\n', (21199, 21213), False, 'import pytest\n'), ((21667, 21694), 'pytest.raises', 'pytest.raises', (['u.UnitsError'], {}), '(u.UnitsError)\n', (21680, 21694), False, 'import pytest\n'), ((21736, 21763), 'pytest.raises', 'pytest.raises', (['u.UnitsError'], {}), '(u.UnitsError)\n', (21749, 21763), False, 'import pytest\n'), ((21805, 21832), 'pytest.raises', 'pytest.raises', (['u.UnitsError'], {}), '(u.UnitsError)\n', (21818, 21832), False, 'import pytest\n'), ((21995, 22013), 'numpy.arange', 'np.arange', (['(1)', '(11.0)'], {}), '(1, 11.0)\n', (22004, 22013), True, 'import numpy as np\n'), ((22028, 22055), 'pytest.raises', 'pytest.raises', (['u.UnitsError'], {}), '(u.UnitsError)\n', (22041, 22055), False, 'import pytest\n'), ((22092, 22119), 'pytest.raises', 'pytest.raises', (['u.UnitsError'], {}), '(u.UnitsError)\n', (22105, 22119), False, 'import pytest\n'), ((22156, 22183), 'pytest.raises', 'pytest.raises', (['u.UnitsError'], {}), '(u.UnitsError)\n', (22169, 22183), False, 'import pytest\n'), ((23478, 23504), 'numpy.all', 'np.all', (['(lq ** power == 1.0)'], {}), '(lq ** power == 1.0)\n', (23484, 23504), True, 'import numpy as np\n'), ((23794, 23809), 'numpy.arange', 'np.arange', (['(10.0)'], {}), '(10.0)\n', (23803, 23809), True, 'import numpy as np\n'), ((23930, 23952), 'numpy.all', 'np.all', (['(t.value == 1.0)'], {}), '(t.value == 1.0)\n', (23936, 23952), True, 'import numpy as np\n'), ((24404, 24428), 'pytest.raises', 'pytest.raises', (['TypeError'], {}), '(TypeError)\n', (24417, 24428), False, 'import pytest\n'), ((24659, 24686), 'pytest.raises', 'pytest.raises', (['u.UnitsError'], {}), '(u.UnitsError)\n', (24672, 24686), False, 'import pytest\n'), ((24721, 24748), 'pytest.raises', 'pytest.raises', (['u.UnitsError'], {}), '(u.UnitsError)\n', (24734, 24748), False, 'import pytest\n'), ((24783, 24810), 'pytest.raises', 'pytest.raises', (['u.UnitsError'], {}), '(u.UnitsError)\n', (24796, 24810), False, 'import pytest\n'), ((26087, 26107), 'numpy.arange', 'np.arange', (['(1.0)', '(10.0)'], {}), '(1.0, 10.0)\n', (26096, 26107), True, 'import numpy as np\n'), ((26125, 26152), 'pytest.raises', 'pytest.raises', (['u.UnitsError'], {}), '(u.UnitsError)\n', (26138, 26152), False, 'import pytest\n'), ((26280, 26307), 'pytest.raises', 'pytest.raises', (['u.UnitsError'], {}), '(u.UnitsError)\n', (26293, 26307), False, 'import pytest\n'), ((27734, 27819), 'numpy.abs', 'np.abs', (['(M_st.physical / (m_st.physical * 4.0 * np.pi * (100.0 * u.pc) ** 2) - 1.0)'], {}), '(M_st.physical / (m_st.physical * 4.0 * np.pi * (100.0 * u.pc) ** 2) -\n 1.0)\n', (27740, 27819), True, 'import numpy as np\n'), ((28768, 28795), 'pytest.raises', 'pytest.raises', (['u.UnitsError'], {}), '(u.UnitsError)\n', (28781, 28795), False, 'import pytest\n'), ((28992, 29019), 'pytest.raises', 'pytest.raises', (['u.UnitsError'], {}), '(u.UnitsError)\n', (29005, 29019), False, 'import pytest\n'), ((29060, 29087), 'pytest.raises', 'pytest.raises', (['u.UnitsError'], {}), '(u.UnitsError)\n', (29073, 29087), False, 'import pytest\n'), ((29231, 29250), 'numpy.arange', 'np.arange', (['(1.0)', '(4.0)'], {}), '(1.0, 4.0)\n', (29240, 29250), True, 'import numpy as np\n'), ((29379, 29406), 'pytest.raises', 'pytest.raises', (['u.UnitsError'], {}), '(u.UnitsError)\n', (29392, 29406), False, 'import pytest\n'), ((30791, 30815), 'pytest.raises', 'pytest.raises', (['TypeError'], {}), '(TypeError)\n', (30804, 30815), False, 'import pytest\n'), ((31101, 31126), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (31114, 31126), False, 'import pytest\n'), ((31182, 31207), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (31195, 31207), False, 'import pytest\n'), ((31780, 31807), 'pytest.raises', 'pytest.raises', (['u.UnitsError'], {}), '(u.UnitsError)\n', (31793, 31807), False, 'import pytest\n'), ((31821, 31844), 'numpy.power', 'np.power', (['self.mJy', '(2.0)'], {}), '(self.mJy, 2.0)\n', (31829, 31844), True, 'import numpy as np\n'), ((31913, 31940), 'pytest.raises', 'pytest.raises', (['u.UnitsError'], {}), '(u.UnitsError)\n', (31926, 31940), False, 'import pytest\n'), ((31954, 31973), 'numpy.square', 'np.square', (['self.mJy'], {}), '(self.mJy)\n', (31963, 31973), True, 'import numpy as np\n'), ((9309, 9336), 'pytest.raises', 'pytest.raises', (['u.UnitsError'], {}), '(u.UnitsError)\n', (9322, 9336), False, 'import pytest\n'), ((19274, 19289), 'numpy.arange', 'np.arange', (['(10.0)'], {}), '(10.0)\n', (19283, 19289), True, 'import numpy as np\n'), ((20420, 20440), 'numpy.arange', 'np.arange', (['(1.0)', '(11.0)'], {}), '(1.0, 11.0)\n', (20429, 20440), True, 'import numpy as np\n'), ((20911, 20931), 'numpy.arange', 'np.arange', (['(1.0)', '(10.0)'], {}), '(1.0, 10.0)\n', (20920, 20931), True, 'import numpy as np\n'), ((21628, 21648), 'numpy.arange', 'np.arange', (['(1.0)', '(11.0)'], {}), '(1.0, 11.0)\n', (21637, 21648), True, 'import numpy as np\n'), ((21913, 21940), 'pytest.raises', 'pytest.raises', (['u.UnitsError'], {}), '(u.UnitsError)\n', (21926, 21940), False, 'import pytest\n'), ((23411, 23430), 'numpy.arange', 'np.arange', (['(1.0)', '(4.0)'], {}), '(1.0, 4.0)\n', (23420, 23430), True, 'import numpy as np\n'), ((23548, 23573), 'numpy.all', 'np.all', (['(lq ** power == lq)'], {}), '(lq ** power == lq)\n', (23554, 23573), True, 'import numpy as np\n'), ((23996, 24012), 'numpy.all', 'np.all', (['(t == lq2)'], {}), '(t == lq2)\n', (24002, 24012), True, 'import numpy as np\n'), ((24367, 24386), 'numpy.arange', 'np.arange', (['(1.0)', '(4.0)'], {}), '(1.0, 4.0)\n', (24376, 24386), True, 'import numpy as np\n'), ((24596, 24616), 'numpy.arange', 'np.arange', (['(1.0)', '(10.0)'], {}), '(1.0, 10.0)\n', (24605, 24616), True, 'import numpy as np\n'), ((25295, 25315), 'numpy.arange', 'np.arange', (['(1.0)', '(10.0)'], {}), '(1.0, 10.0)\n', (25304, 25315), True, 'import numpy as np\n'), ((26215, 26235), 'numpy.arange', 'np.arange', (['(1.0)', '(10.0)'], {}), '(1.0, 10.0)\n', (26224, 26235), True, 'import numpy as np\n'), ((26370, 26390), 'numpy.arange', 'np.arange', (['(1.0)', '(10.0)'], {}), '(1.0, 10.0)\n', (26379, 26390), True, 'import numpy as np\n'), ((26901, 26921), 'numpy.arange', 'np.arange', (['(1.0)', '(10.0)'], {}), '(1.0, 10.0)\n', (26910, 26921), True, 'import numpy as np\n'), ((27958, 27978), 'numpy.arange', 'np.arange', (['(1.0)', '(10.0)'], {}), '(1.0, 10.0)\n', (27967, 27978), True, 'import numpy as np\n'), ((28131, 28155), 'pytest.raises', 'pytest.raises', (['TypeError'], {}), '(TypeError)\n', (28144, 28155), False, 'import pytest\n'), ((28297, 28316), 'numpy.arange', 'np.arange', (['(1.0)', '(4.0)'], {}), '(1.0, 4.0)\n', (28306, 28316), True, 'import numpy as np\n'), ((28393, 28423), 'numpy.array', 'np.array', (['[True, False, False]'], {}), '([True, False, False])\n', (28401, 28423), True, 'import numpy as np\n'), ((28463, 28493), 'numpy.array', 'np.array', (['[False, True, False]'], {}), '([False, True, False])\n', (28471, 28493), True, 'import numpy as np\n'), ((28561, 28591), 'numpy.array', 'np.array', (['[True, False, False]'], {}), '([True, False, False])\n', (28569, 28591), True, 'import numpy as np\n'), ((28631, 28661), 'numpy.array', 'np.array', (['[False, True, False]'], {}), '([False, True, False])\n', (28639, 28661), True, 'import numpy as np\n'), ((28879, 28909), 'numpy.array', 'np.array', (['[True, False, False]'], {}), '([True, False, False])\n', (28887, 28909), True, 'import numpy as np\n'), ((28947, 28977), 'numpy.array', 'np.array', (['[True, False, False]'], {}), '([True, False, False])\n', (28955, 28977), True, 'import numpy as np\n'), ((31562, 31585), 'numpy.power', 'np.power', (['self.mJy', '(0.0)'], {}), '(self.mJy, 0.0)\n', (31570, 31585), True, 'import numpy as np\n'), ((31614, 31636), 'numpy.power', 'np.power', (['self.m1', '(1.0)'], {}), '(self.m1, 1.0)\n', (31622, 31636), True, 'import numpy as np\n'), ((31670, 31693), 'numpy.power', 'np.power', (['self.mJy', '(1.0)'], {}), '(self.mJy, 1.0)\n', (31678, 31693), True, 'import numpy as np\n'), ((31728, 31750), 'numpy.power', 'np.power', (['self.m1', '(2.0)'], {}), '(self.m1, 2.0)\n', (31736, 31750), True, 'import numpy as np\n'), ((31996, 32014), 'numpy.square', 'np.square', (['self.m1'], {}), '(self.m1)\n', (32005, 32014), True, 'import numpy as np\n'), ((9984, 10011), 'pytest.raises', 'pytest.raises', (['u.UnitsError'], {}), '(u.UnitsError)\n', (9997, 10011), False, 'import pytest\n'), ((10436, 10450), 'numpy.arange', 'np.arange', (['(3.0)'], {}), '(3.0)\n', (10445, 10450), True, 'import numpy as np\n'), ((11585, 11612), 'pytest.raises', 'pytest.raises', (['u.UnitsError'], {}), '(u.UnitsError)\n', (11598, 11612), False, 'import pytest\n'), ((22732, 22759), 'pytest.raises', 'pytest.raises', (['u.UnitsError'], {}), '(u.UnitsError)\n', (22745, 22759), False, 'import pytest\n'), ((23605, 23632), 'pytest.raises', 'pytest.raises', (['u.UnitsError'], {}), '(u.UnitsError)\n', (23618, 23632), False, 'import pytest\n'), ((29513, 29532), 'numpy.arange', 'np.arange', (['(1.0)', '(5.0)'], {}), '(1.0, 5.0)\n', (29522, 29532), True, 'import numpy as np\n'), ((29577, 29601), 'numpy.arange', 'np.arange', (['(1.0)', '(5.5)', '(0.5)'], {}), '(1.0, 5.5, 0.5)\n', (29586, 29601), True, 'import numpy as np\n'), ((31361, 31380), 'numpy.arange', 'np.arange', (['(1.0)', '(5.0)'], {}), '(1.0, 5.0)\n', (31370, 31380), True, 'import numpy as np\n'), ((31425, 31449), 'numpy.arange', 'np.arange', (['(1.0)', '(5.5)', '(0.5)'], {}), '(1.0, 5.5, 0.5)\n', (31434, 31449), True, 'import numpy as np\n'), ((10361, 10375), 'numpy.arange', 'np.arange', (['(3.0)'], {}), '(3.0)\n', (10370, 10375), True, 'import numpy as np\n'), ((24220, 24247), 'pytest.raises', 'pytest.raises', (['u.UnitsError'], {}), '(u.UnitsError)\n', (24233, 24247), False, 'import pytest\n'), ((12264, 12278), 'numpy.arange', 'np.arange', (['(3.0)'], {}), '(3.0)\n', (12273, 12278), True, 'import numpy as np\n'), ((12338, 12352), 'numpy.arange', 'np.arange', (['(3.0)'], {}), '(3.0)\n', (12347, 12352), True, 'import numpy as np\n')] |
import os
import numpy as np
import tensorflow as tf
def get_train_data(train_dir, batch_size):
train_images = np.load(os.path.join(train_dir, 'train_images.npy'))
train_labels = np.load(os.path.join(train_dir, 'train_labels.npy'))
print('train_images', train_images.shape, 'train_labels', train_labels.shape)
dataset_train = tf.data.Dataset.from_tensor_slices((train_images, train_labels))
dataset_train = dataset_train.repeat().shuffle(10000).batch(batch_size)
return dataset_train
def get_val_data(val_dir):
test_images = np.load(os.path.join(val_dir, 'validation_images.npy'))
test_labels = np.load(os.path.join(val_dir, 'validation_labels.npy'))
print('validation_images', test_images.shape, 'validation_labels', test_labels.shape)
dataset_test = tf.data.Dataset.from_tensor_slices((test_images, test_labels))
return dataset_test
| [
"os.path.join",
"tensorflow.data.Dataset.from_tensor_slices"
] | [((345, 409), 'tensorflow.data.Dataset.from_tensor_slices', 'tf.data.Dataset.from_tensor_slices', (['(train_images, train_labels)'], {}), '((train_images, train_labels))\n', (379, 409), True, 'import tensorflow as tf\n'), ((799, 861), 'tensorflow.data.Dataset.from_tensor_slices', 'tf.data.Dataset.from_tensor_slices', (['(test_images, test_labels)'], {}), '((test_images, test_labels))\n', (833, 861), True, 'import tensorflow as tf\n'), ((125, 168), 'os.path.join', 'os.path.join', (['train_dir', '"""train_images.npy"""'], {}), "(train_dir, 'train_images.npy')\n", (137, 168), False, 'import os\n'), ((197, 240), 'os.path.join', 'os.path.join', (['train_dir', '"""train_labels.npy"""'], {}), "(train_dir, 'train_labels.npy')\n", (209, 240), False, 'import os\n'), ((567, 613), 'os.path.join', 'os.path.join', (['val_dir', '"""validation_images.npy"""'], {}), "(val_dir, 'validation_images.npy')\n", (579, 613), False, 'import os\n'), ((641, 687), 'os.path.join', 'os.path.join', (['val_dir', '"""validation_labels.npy"""'], {}), "(val_dir, 'validation_labels.npy')\n", (653, 687), False, 'import os\n')] |
"""
Collection of tests asserting things that should be true for
any index subclass. Makes use of the `indices` fixture defined
in pandas/tests/indexes/conftest.py.
"""
import re
import numpy as np
import pytest
from pandas._libs.tslibs import iNaT
from pandas.core.dtypes.common import is_period_dtype, needs_i8_conversion
import pandas as pd
from pandas import (
CategoricalIndex,
DatetimeIndex,
MultiIndex,
PeriodIndex,
RangeIndex,
TimedeltaIndex,
)
import pandas._testing as tm
class TestCommon:
def test_droplevel(self, index):
# GH 21115
if isinstance(index, MultiIndex):
# Tested separately in test_multi.py
return
assert index.droplevel([]).equals(index)
for level in index.name, [index.name]:
if isinstance(index.name, tuple) and level is index.name:
# GH 21121 : droplevel with tuple name
continue
with pytest.raises(ValueError):
index.droplevel(level)
for level in "wrong", ["wrong"]:
with pytest.raises(
KeyError,
match=r"'Requested level \(wrong\) does not match index name \(None\)'",
):
index.droplevel(level)
def test_constructor_non_hashable_name(self, index):
# GH 20527
if isinstance(index, MultiIndex):
pytest.skip("multiindex handled in test_multi.py")
message = "Index.name must be a hashable type"
renamed = [["1"]]
# With .rename()
with pytest.raises(TypeError, match=message):
index.rename(name=renamed)
# With .set_names()
with pytest.raises(TypeError, match=message):
index.set_names(names=renamed)
def test_constructor_unwraps_index(self, index):
if isinstance(index, pd.MultiIndex):
raise pytest.skip("MultiIndex has no ._data")
a = index
b = type(a)(a)
tm.assert_equal(a._data, b._data)
@pytest.mark.parametrize("itm", [101, "no_int"])
# FutureWarning from non-tuple sequence of nd indexing
@pytest.mark.filterwarnings("ignore::FutureWarning")
def test_getitem_error(self, index, itm):
with pytest.raises(IndexError):
index[itm]
@pytest.mark.parametrize(
"fname, sname, expected_name",
[
("A", "A", "A"),
("A", "B", None),
("A", None, None),
(None, "B", None),
(None, None, None),
],
)
def test_corner_union(self, index, fname, sname, expected_name):
# GH 9943 9862
# Test unions with various name combinations
# Do not test MultiIndex or repeats
if isinstance(index, MultiIndex) or not index.is_unique:
pytest.skip("Not for MultiIndex or repeated indices")
# Test copy.union(copy)
first = index.copy().set_names(fname)
second = index.copy().set_names(sname)
union = first.union(second)
expected = index.copy().set_names(expected_name)
tm.assert_index_equal(union, expected)
# Test copy.union(empty)
first = index.copy().set_names(fname)
second = index.drop(index).set_names(sname)
union = first.union(second)
expected = index.copy().set_names(expected_name)
tm.assert_index_equal(union, expected)
# Test empty.union(copy)
first = index.drop(index).set_names(fname)
second = index.copy().set_names(sname)
union = first.union(second)
expected = index.copy().set_names(expected_name)
tm.assert_index_equal(union, expected)
# Test empty.union(empty)
first = index.drop(index).set_names(fname)
second = index.drop(index).set_names(sname)
union = first.union(second)
expected = index.drop(index).set_names(expected_name)
tm.assert_index_equal(union, expected)
@pytest.mark.parametrize(
"fname, sname, expected_name",
[
("A", "A", "A"),
("A", "B", None),
("A", None, None),
(None, "B", None),
(None, None, None),
],
)
def test_union_unequal(self, index, fname, sname, expected_name):
if isinstance(index, MultiIndex) or not index.is_unique:
pytest.skip("Not for MultiIndex or repeated indices")
# test copy.union(subset) - need sort for unicode and string
first = index.copy().set_names(fname)
second = index[1:].set_names(sname)
union = first.union(second).sort_values()
expected = index.set_names(expected_name).sort_values()
tm.assert_index_equal(union, expected)
@pytest.mark.parametrize(
"fname, sname, expected_name",
[
("A", "A", "A"),
("A", "B", None),
("A", None, None),
(None, "B", None),
(None, None, None),
],
)
def test_corner_intersect(self, index, fname, sname, expected_name):
# GH35847
# Test intersections with various name combinations
if isinstance(index, MultiIndex) or not index.is_unique:
pytest.skip("Not for MultiIndex or repeated indices")
# Test copy.intersection(copy)
first = index.copy().set_names(fname)
second = index.copy().set_names(sname)
intersect = first.intersection(second)
expected = index.copy().set_names(expected_name)
tm.assert_index_equal(intersect, expected)
# Test copy.intersection(empty)
first = index.copy().set_names(fname)
second = index.drop(index).set_names(sname)
intersect = first.intersection(second)
expected = index.drop(index).set_names(expected_name)
tm.assert_index_equal(intersect, expected)
# Test empty.intersection(copy)
first = index.drop(index).set_names(fname)
second = index.copy().set_names(sname)
intersect = first.intersection(second)
expected = index.drop(index).set_names(expected_name)
tm.assert_index_equal(intersect, expected)
# Test empty.intersection(empty)
first = index.drop(index).set_names(fname)
second = index.drop(index).set_names(sname)
intersect = first.intersection(second)
expected = index.drop(index).set_names(expected_name)
tm.assert_index_equal(intersect, expected)
@pytest.mark.parametrize(
"fname, sname, expected_name",
[
("A", "A", "A"),
("A", "B", None),
("A", None, None),
(None, "B", None),
(None, None, None),
],
)
def test_intersect_unequal(self, index, fname, sname, expected_name):
if isinstance(index, MultiIndex) or not index.is_unique:
pytest.skip("Not for MultiIndex or repeated indices")
# test copy.intersection(subset) - need sort for unicode and string
first = index.copy().set_names(fname)
second = index[1:].set_names(sname)
intersect = first.intersection(second).sort_values()
expected = index[1:].set_names(expected_name).sort_values()
tm.assert_index_equal(intersect, expected)
def test_to_flat_index(self, index):
# 22866
if isinstance(index, MultiIndex):
pytest.skip("Separate expectation for MultiIndex")
result = index.to_flat_index()
tm.assert_index_equal(result, index)
def test_set_name_methods(self, index):
new_name = "This is the new name for this index"
# don't tests a MultiIndex here (as its tested separated)
if isinstance(index, MultiIndex):
pytest.skip("Skip check for MultiIndex")
original_name = index.name
new_ind = index.set_names([new_name])
assert new_ind.name == new_name
assert index.name == original_name
res = index.rename(new_name, inplace=True)
# should return None
assert res is None
assert index.name == new_name
assert index.names == [new_name]
# FIXME: dont leave commented-out
# with pytest.raises(TypeError, match="list-like"):
# # should still fail even if it would be the right length
# ind.set_names("a")
with pytest.raises(ValueError, match="Level must be None"):
index.set_names("a", level=0)
# rename in place just leaves tuples and other containers alone
name = ("A", "B")
index.rename(name, inplace=True)
assert index.name == name
assert index.names == [name]
def test_copy_and_deepcopy(self, index):
from copy import copy, deepcopy
if isinstance(index, MultiIndex):
pytest.skip("Skip check for MultiIndex")
for func in (copy, deepcopy):
idx_copy = func(index)
assert idx_copy is not index
assert idx_copy.equals(index)
new_copy = index.copy(deep=True, name="banana")
assert new_copy.name == "banana"
def test_unique(self, index):
# don't test a MultiIndex here (as its tested separated)
# don't test a CategoricalIndex because categories change (GH 18291)
if isinstance(index, (MultiIndex, CategoricalIndex)):
pytest.skip("Skip check for MultiIndex/CategoricalIndex")
# GH 17896
expected = index.drop_duplicates()
for level in 0, index.name, None:
result = index.unique(level=level)
tm.assert_index_equal(result, expected)
msg = "Too many levels: Index has only 1 level, not 4"
with pytest.raises(IndexError, match=msg):
index.unique(level=3)
msg = (
fr"Requested level \(wrong\) does not match index name "
fr"\({re.escape(index.name.__repr__())}\)"
)
with pytest.raises(KeyError, match=msg):
index.unique(level="wrong")
def test_get_unique_index(self, index):
# MultiIndex tested separately
if not len(index) or isinstance(index, MultiIndex):
pytest.skip("Skip check for empty Index and MultiIndex")
idx = index[[0] * 5]
idx_unique = index[[0]]
# We test against `idx_unique`, so first we make sure it's unique
# and doesn't contain nans.
assert idx_unique.is_unique is True
try:
assert idx_unique.hasnans is False
except NotImplementedError:
pass
for dropna in [False, True]:
result = idx._get_unique_index(dropna=dropna)
tm.assert_index_equal(result, idx_unique)
# nans:
if not index._can_hold_na:
pytest.skip("Skip na-check if index cannot hold na")
if is_period_dtype(index.dtype):
vals = index[[0] * 5]._data
vals[0] = pd.NaT
elif needs_i8_conversion(index.dtype):
vals = index.asi8[[0] * 5]
vals[0] = iNaT
else:
vals = index.values[[0] * 5]
vals[0] = np.nan
vals_unique = vals[:2]
if index.dtype.kind in ["m", "M"]:
# i.e. needs_i8_conversion but not period_dtype, as above
vals = type(index._data)._simple_new(vals, dtype=index.dtype)
vals_unique = type(index._data)._simple_new(vals_unique, dtype=index.dtype)
idx_nan = index._shallow_copy(vals)
idx_unique_nan = index._shallow_copy(vals_unique)
assert idx_unique_nan.is_unique is True
assert idx_nan.dtype == index.dtype
assert idx_unique_nan.dtype == index.dtype
for dropna, expected in zip([False, True], [idx_unique_nan, idx_unique]):
for i in [idx_nan, idx_unique_nan]:
result = i._get_unique_index(dropna=dropna)
tm.assert_index_equal(result, expected)
def test_mutability(self, index):
if not len(index):
pytest.skip("Skip check for empty Index")
msg = "Index does not support mutable operations"
with pytest.raises(TypeError, match=msg):
index[0] = index[0]
def test_view(self, index):
assert index.view().name == index.name
def test_searchsorted_monotonic(self, index):
# GH17271
# not implemented for tuple searches in MultiIndex
# or Intervals searches in IntervalIndex
if isinstance(index, (MultiIndex, pd.IntervalIndex)):
pytest.skip("Skip check for MultiIndex/IntervalIndex")
# nothing to test if the index is empty
if index.empty:
pytest.skip("Skip check for empty Index")
value = index[0]
# determine the expected results (handle dupes for 'right')
expected_left, expected_right = 0, (index == value).argmin()
if expected_right == 0:
# all values are the same, expected_right should be length
expected_right = len(index)
# test _searchsorted_monotonic in all cases
# test searchsorted only for increasing
if index.is_monotonic_increasing:
ssm_left = index._searchsorted_monotonic(value, side="left")
assert expected_left == ssm_left
ssm_right = index._searchsorted_monotonic(value, side="right")
assert expected_right == ssm_right
ss_left = index.searchsorted(value, side="left")
assert expected_left == ss_left
ss_right = index.searchsorted(value, side="right")
assert expected_right == ss_right
elif index.is_monotonic_decreasing:
ssm_left = index._searchsorted_monotonic(value, side="left")
assert expected_left == ssm_left
ssm_right = index._searchsorted_monotonic(value, side="right")
assert expected_right == ssm_right
else:
# non-monotonic should raise.
with pytest.raises(ValueError):
index._searchsorted_monotonic(value, side="left")
def test_pickle(self, index):
original_name, index.name = index.name, "foo"
unpickled = tm.round_trip_pickle(index)
assert index.equals(unpickled)
index.name = original_name
def test_drop_duplicates(self, index, keep):
if isinstance(index, MultiIndex):
pytest.skip("MultiIndex is tested separately")
if isinstance(index, RangeIndex):
pytest.skip(
"RangeIndex is tested in test_drop_duplicates_no_duplicates "
"as it cannot hold duplicates"
)
if len(index) == 0:
pytest.skip(
"empty index is tested in test_drop_duplicates_no_duplicates "
"as it cannot hold duplicates"
)
# make unique index
holder = type(index)
unique_values = list(set(index))
unique_idx = holder(unique_values)
# make duplicated index
n = len(unique_idx)
duplicated_selection = np.random.choice(n, int(n * 1.5))
idx = holder(unique_idx.values[duplicated_selection])
# Series.duplicated is tested separately
expected_duplicated = (
pd.Series(duplicated_selection).duplicated(keep=keep).values
)
tm.assert_numpy_array_equal(idx.duplicated(keep=keep), expected_duplicated)
# Series.drop_duplicates is tested separately
expected_dropped = holder(pd.Series(idx).drop_duplicates(keep=keep))
tm.assert_index_equal(idx.drop_duplicates(keep=keep), expected_dropped)
def test_drop_duplicates_no_duplicates(self, index):
if isinstance(index, MultiIndex):
pytest.skip("MultiIndex is tested separately")
# make unique index
if isinstance(index, RangeIndex):
# RangeIndex cannot have duplicates
unique_idx = index
else:
holder = type(index)
unique_values = list(set(index))
unique_idx = holder(unique_values)
# check on unique index
expected_duplicated = np.array([False] * len(unique_idx), dtype="bool")
tm.assert_numpy_array_equal(unique_idx.duplicated(), expected_duplicated)
result_dropped = unique_idx.drop_duplicates()
tm.assert_index_equal(result_dropped, unique_idx)
# validate shallow copy
assert result_dropped is not unique_idx
def test_drop_duplicates_inplace(self, index):
msg = r"drop_duplicates\(\) got an unexpected keyword argument"
with pytest.raises(TypeError, match=msg):
index.drop_duplicates(inplace=True)
def test_has_duplicates(self, index):
holder = type(index)
if not len(index) or isinstance(index, (MultiIndex, RangeIndex)):
# MultiIndex tested separately in:
# tests/indexes/multi/test_unique_and_duplicates.
# RangeIndex is unique by definition.
pytest.skip("Skip check for empty Index, MultiIndex, and RangeIndex")
idx = holder([index[0]] * 5)
assert idx.is_unique is False
assert idx.has_duplicates is True
@pytest.mark.parametrize(
"dtype",
["int64", "uint64", "float64", "category", "datetime64[ns]", "timedelta64[ns]"],
)
def test_astype_preserves_name(self, index, dtype):
# https://github.com/pandas-dev/pandas/issues/32013
if isinstance(index, MultiIndex):
index.names = ["idx" + str(i) for i in range(index.nlevels)]
else:
index.name = "idx"
try:
# Some of these conversions cannot succeed so we use a try / except
result = index.astype(dtype)
except (ValueError, TypeError, NotImplementedError, SystemError):
return
if isinstance(index, MultiIndex):
assert result.names == index.names
else:
assert result.name == index.name
def test_ravel_deprecation(self, index):
# GH#19956 ravel returning ndarray is deprecated
with tm.assert_produces_warning(FutureWarning):
index.ravel()
@pytest.mark.parametrize("na_position", [None, "middle"])
def test_sort_values_invalid_na_position(index_with_missing, na_position):
if isinstance(index_with_missing, (DatetimeIndex, PeriodIndex, TimedeltaIndex)):
# datetime-like indices will get na_position kwarg as part of
# synchronizing duplicate-sorting behavior, because we currently expect
# them, other indices, and Series to sort differently (xref 35922)
pytest.xfail("sort_values does not support na_position kwarg")
elif isinstance(index_with_missing, (CategoricalIndex, MultiIndex)):
pytest.xfail("missing value sorting order not defined for index type")
if na_position not in ["first", "last"]:
with pytest.raises(ValueError, match=f"invalid na_position: {na_position}"):
index_with_missing.sort_values(na_position=na_position)
@pytest.mark.parametrize("na_position", ["first", "last"])
def test_sort_values_with_missing(index_with_missing, na_position):
# GH 35584. Test that sort_values works with missing values,
# sort non-missing and place missing according to na_position
if isinstance(index_with_missing, (DatetimeIndex, PeriodIndex, TimedeltaIndex)):
# datetime-like indices will get na_position kwarg as part of
# synchronizing duplicate-sorting behavior, because we currently expect
# them, other indices, and Series to sort differently (xref 35922)
pytest.xfail("sort_values does not support na_position kwarg")
elif isinstance(index_with_missing, (CategoricalIndex, MultiIndex)):
pytest.xfail("missing value sorting order not defined for index type")
missing_count = np.sum(index_with_missing.isna())
not_na_vals = index_with_missing[index_with_missing.notna()].values
sorted_values = np.sort(not_na_vals)
if na_position == "first":
sorted_values = np.concatenate([[None] * missing_count, sorted_values])
else:
sorted_values = np.concatenate([sorted_values, [None] * missing_count])
expected = type(index_with_missing)(sorted_values)
result = index_with_missing.sort_values(na_position=na_position)
tm.assert_index_equal(result, expected)
| [
"pandas.Series",
"pytest.mark.filterwarnings",
"numpy.sort",
"pandas._testing.round_trip_pickle",
"pandas._testing.assert_index_equal",
"pytest.mark.parametrize",
"pandas._testing.assert_equal",
"pandas.core.dtypes.common.needs_i8_conversion",
"pytest.raises",
"numpy.concatenate",
"pandas._testing.assert_produces_warning",
"pytest.skip",
"pandas.core.dtypes.common.is_period_dtype",
"pytest.xfail"
] | [((18137, 18193), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""na_position"""', "[None, 'middle']"], {}), "('na_position', [None, 'middle'])\n", (18160, 18193), False, 'import pytest\n'), ((19004, 19061), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""na_position"""', "['first', 'last']"], {}), "('na_position', ['first', 'last'])\n", (19027, 19061), False, 'import pytest\n'), ((2026, 2073), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""itm"""', "[101, 'no_int']"], {}), "('itm', [101, 'no_int'])\n", (2049, 2073), False, 'import pytest\n'), ((2138, 2189), 'pytest.mark.filterwarnings', 'pytest.mark.filterwarnings', (['"""ignore::FutureWarning"""'], {}), "('ignore::FutureWarning')\n", (2164, 2189), False, 'import pytest\n'), ((2305, 2459), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""fname, sname, expected_name"""', "[('A', 'A', 'A'), ('A', 'B', None), ('A', None, None), (None, 'B', None), (\n None, None, None)]"], {}), "('fname, sname, expected_name', [('A', 'A', 'A'), (\n 'A', 'B', None), ('A', None, None), (None, 'B', None), (None, None, None)])\n", (2328, 2459), False, 'import pytest\n'), ((3969, 4123), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""fname, sname, expected_name"""', "[('A', 'A', 'A'), ('A', 'B', None), ('A', None, None), (None, 'B', None), (\n None, None, None)]"], {}), "('fname, sname, expected_name', [('A', 'A', 'A'), (\n 'A', 'B', None), ('A', None, None), (None, 'B', None), (None, None, None)])\n", (3992, 4123), False, 'import pytest\n'), ((4741, 4895), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""fname, sname, expected_name"""', "[('A', 'A', 'A'), ('A', 'B', None), ('A', None, None), (None, 'B', None), (\n None, None, None)]"], {}), "('fname, sname, expected_name', [('A', 'A', 'A'), (\n 'A', 'B', None), ('A', None, None), (None, 'B', None), (None, None, None)])\n", (4764, 4895), False, 'import pytest\n'), ((6465, 6619), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""fname, sname, expected_name"""', "[('A', 'A', 'A'), ('A', 'B', None), ('A', None, None), (None, 'B', None), (\n None, None, None)]"], {}), "('fname, sname, expected_name', [('A', 'A', 'A'), (\n 'A', 'B', None), ('A', None, None), (None, 'B', None), (None, None, None)])\n", (6488, 6619), False, 'import pytest\n'), ((17159, 17276), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""dtype"""', "['int64', 'uint64', 'float64', 'category', 'datetime64[ns]', 'timedelta64[ns]']"], {}), "('dtype', ['int64', 'uint64', 'float64', 'category',\n 'datetime64[ns]', 'timedelta64[ns]'])\n", (17182, 17276), False, 'import pytest\n'), ((19942, 19962), 'numpy.sort', 'np.sort', (['not_na_vals'], {}), '(not_na_vals)\n', (19949, 19962), True, 'import numpy as np\n'), ((20293, 20332), 'pandas._testing.assert_index_equal', 'tm.assert_index_equal', (['result', 'expected'], {}), '(result, expected)\n', (20314, 20332), True, 'import pandas._testing as tm\n'), ((1986, 2019), 'pandas._testing.assert_equal', 'tm.assert_equal', (['a._data', 'b._data'], {}), '(a._data, b._data)\n', (2001, 2019), True, 'import pandas._testing as tm\n'), ((3097, 3135), 'pandas._testing.assert_index_equal', 'tm.assert_index_equal', (['union', 'expected'], {}), '(union, expected)\n', (3118, 3135), True, 'import pandas._testing as tm\n'), ((3369, 3407), 'pandas._testing.assert_index_equal', 'tm.assert_index_equal', (['union', 'expected'], {}), '(union, expected)\n', (3390, 3407), True, 'import pandas._testing as tm\n'), ((3641, 3679), 'pandas._testing.assert_index_equal', 'tm.assert_index_equal', (['union', 'expected'], {}), '(union, expected)\n', (3662, 3679), True, 'import pandas._testing as tm\n'), ((3924, 3962), 'pandas._testing.assert_index_equal', 'tm.assert_index_equal', (['union', 'expected'], {}), '(union, expected)\n', (3945, 3962), True, 'import pandas._testing as tm\n'), ((4696, 4734), 'pandas._testing.assert_index_equal', 'tm.assert_index_equal', (['union', 'expected'], {}), '(union, expected)\n', (4717, 4734), True, 'import pandas._testing as tm\n'), ((5513, 5555), 'pandas._testing.assert_index_equal', 'tm.assert_index_equal', (['intersect', 'expected'], {}), '(intersect, expected)\n', (5534, 5555), True, 'import pandas._testing as tm\n'), ((5812, 5854), 'pandas._testing.assert_index_equal', 'tm.assert_index_equal', (['intersect', 'expected'], {}), '(intersect, expected)\n', (5833, 5854), True, 'import pandas._testing as tm\n'), ((6111, 6153), 'pandas._testing.assert_index_equal', 'tm.assert_index_equal', (['intersect', 'expected'], {}), '(intersect, expected)\n', (6132, 6153), True, 'import pandas._testing as tm\n'), ((6416, 6458), 'pandas._testing.assert_index_equal', 'tm.assert_index_equal', (['intersect', 'expected'], {}), '(intersect, expected)\n', (6437, 6458), True, 'import pandas._testing as tm\n'), ((7218, 7260), 'pandas._testing.assert_index_equal', 'tm.assert_index_equal', (['intersect', 'expected'], {}), '(intersect, expected)\n', (7239, 7260), True, 'import pandas._testing as tm\n'), ((7472, 7508), 'pandas._testing.assert_index_equal', 'tm.assert_index_equal', (['result', 'index'], {}), '(result, index)\n', (7493, 7508), True, 'import pandas._testing as tm\n'), ((10810, 10838), 'pandas.core.dtypes.common.is_period_dtype', 'is_period_dtype', (['index.dtype'], {}), '(index.dtype)\n', (10825, 10838), False, 'from pandas.core.dtypes.common import is_period_dtype, needs_i8_conversion\n'), ((14146, 14173), 'pandas._testing.round_trip_pickle', 'tm.round_trip_pickle', (['index'], {}), '(index)\n', (14166, 14173), True, 'import pandas._testing as tm\n'), ((16294, 16343), 'pandas._testing.assert_index_equal', 'tm.assert_index_equal', (['result_dropped', 'unique_idx'], {}), '(result_dropped, unique_idx)\n', (16315, 16343), True, 'import pandas._testing as tm\n'), ((18587, 18649), 'pytest.xfail', 'pytest.xfail', (['"""sort_values does not support na_position kwarg"""'], {}), "('sort_values does not support na_position kwarg')\n", (18599, 18649), False, 'import pytest\n'), ((19580, 19642), 'pytest.xfail', 'pytest.xfail', (['"""sort_values does not support na_position kwarg"""'], {}), "('sort_values does not support na_position kwarg')\n", (19592, 19642), False, 'import pytest\n'), ((20018, 20073), 'numpy.concatenate', 'np.concatenate', (['[[None] * missing_count, sorted_values]'], {}), '([[None] * missing_count, sorted_values])\n', (20032, 20073), True, 'import numpy as np\n'), ((20108, 20163), 'numpy.concatenate', 'np.concatenate', (['[sorted_values, [None] * missing_count]'], {}), '([sorted_values, [None] * missing_count])\n', (20122, 20163), True, 'import numpy as np\n'), ((1402, 1452), 'pytest.skip', 'pytest.skip', (['"""multiindex handled in test_multi.py"""'], {}), "('multiindex handled in test_multi.py')\n", (1413, 1452), False, 'import pytest\n'), ((1574, 1613), 'pytest.raises', 'pytest.raises', (['TypeError'], {'match': 'message'}), '(TypeError, match=message)\n', (1587, 1613), False, 'import pytest\n'), ((1696, 1735), 'pytest.raises', 'pytest.raises', (['TypeError'], {'match': 'message'}), '(TypeError, match=message)\n', (1709, 1735), False, 'import pytest\n'), ((1897, 1936), 'pytest.skip', 'pytest.skip', (['"""MultiIndex has no ._data"""'], {}), "('MultiIndex has no ._data')\n", (1908, 1936), False, 'import pytest\n'), ((2249, 2274), 'pytest.raises', 'pytest.raises', (['IndexError'], {}), '(IndexError)\n', (2262, 2274), False, 'import pytest\n'), ((2816, 2869), 'pytest.skip', 'pytest.skip', (['"""Not for MultiIndex or repeated indices"""'], {}), "('Not for MultiIndex or repeated indices')\n", (2827, 2869), False, 'import pytest\n'), ((4360, 4413), 'pytest.skip', 'pytest.skip', (['"""Not for MultiIndex or repeated indices"""'], {}), "('Not for MultiIndex or repeated indices')\n", (4371, 4413), False, 'import pytest\n'), ((5214, 5267), 'pytest.skip', 'pytest.skip', (['"""Not for MultiIndex or repeated indices"""'], {}), "('Not for MultiIndex or repeated indices')\n", (5225, 5267), False, 'import pytest\n'), ((6860, 6913), 'pytest.skip', 'pytest.skip', (['"""Not for MultiIndex or repeated indices"""'], {}), "('Not for MultiIndex or repeated indices')\n", (6871, 6913), False, 'import pytest\n'), ((7373, 7423), 'pytest.skip', 'pytest.skip', (['"""Separate expectation for MultiIndex"""'], {}), "('Separate expectation for MultiIndex')\n", (7384, 7423), False, 'import pytest\n'), ((7732, 7772), 'pytest.skip', 'pytest.skip', (['"""Skip check for MultiIndex"""'], {}), "('Skip check for MultiIndex')\n", (7743, 7772), False, 'import pytest\n'), ((8341, 8394), 'pytest.raises', 'pytest.raises', (['ValueError'], {'match': '"""Level must be None"""'}), "(ValueError, match='Level must be None')\n", (8354, 8394), False, 'import pytest\n'), ((8790, 8830), 'pytest.skip', 'pytest.skip', (['"""Skip check for MultiIndex"""'], {}), "('Skip check for MultiIndex')\n", (8801, 8830), False, 'import pytest\n'), ((9337, 9394), 'pytest.skip', 'pytest.skip', (['"""Skip check for MultiIndex/CategoricalIndex"""'], {}), "('Skip check for MultiIndex/CategoricalIndex')\n", (9348, 9394), False, 'import pytest\n'), ((9559, 9598), 'pandas._testing.assert_index_equal', 'tm.assert_index_equal', (['result', 'expected'], {}), '(result, expected)\n', (9580, 9598), True, 'import pandas._testing as tm\n'), ((9676, 9712), 'pytest.raises', 'pytest.raises', (['IndexError'], {'match': 'msg'}), '(IndexError, match=msg)\n', (9689, 9712), False, 'import pytest\n'), ((9912, 9946), 'pytest.raises', 'pytest.raises', (['KeyError'], {'match': 'msg'}), '(KeyError, match=msg)\n', (9925, 9946), False, 'import pytest\n'), ((10144, 10200), 'pytest.skip', 'pytest.skip', (['"""Skip check for empty Index and MultiIndex"""'], {}), "('Skip check for empty Index and MultiIndex')\n", (10155, 10200), False, 'import pytest\n'), ((10639, 10680), 'pandas._testing.assert_index_equal', 'tm.assert_index_equal', (['result', 'idx_unique'], {}), '(result, idx_unique)\n', (10660, 10680), True, 'import pandas._testing as tm\n'), ((10745, 10797), 'pytest.skip', 'pytest.skip', (['"""Skip na-check if index cannot hold na"""'], {}), "('Skip na-check if index cannot hold na')\n", (10756, 10797), False, 'import pytest\n'), ((10922, 10954), 'pandas.core.dtypes.common.needs_i8_conversion', 'needs_i8_conversion', (['index.dtype'], {}), '(index.dtype)\n', (10941, 10954), False, 'from pandas.core.dtypes.common import is_period_dtype, needs_i8_conversion\n'), ((11984, 12025), 'pytest.skip', 'pytest.skip', (['"""Skip check for empty Index"""'], {}), "('Skip check for empty Index')\n", (11995, 12025), False, 'import pytest\n'), ((12097, 12132), 'pytest.raises', 'pytest.raises', (['TypeError'], {'match': 'msg'}), '(TypeError, match=msg)\n', (12110, 12132), False, 'import pytest\n'), ((12497, 12551), 'pytest.skip', 'pytest.skip', (['"""Skip check for MultiIndex/IntervalIndex"""'], {}), "('Skip check for MultiIndex/IntervalIndex')\n", (12508, 12551), False, 'import pytest\n'), ((12637, 12678), 'pytest.skip', 'pytest.skip', (['"""Skip check for empty Index"""'], {}), "('Skip check for empty Index')\n", (12648, 12678), False, 'import pytest\n'), ((14352, 14398), 'pytest.skip', 'pytest.skip', (['"""MultiIndex is tested separately"""'], {}), "('MultiIndex is tested separately')\n", (14363, 14398), False, 'import pytest\n'), ((14453, 14565), 'pytest.skip', 'pytest.skip', (['"""RangeIndex is tested in test_drop_duplicates_no_duplicates as it cannot hold duplicates"""'], {}), "(\n 'RangeIndex is tested in test_drop_duplicates_no_duplicates as it cannot hold duplicates'\n )\n", (14464, 14565), False, 'import pytest\n'), ((14645, 14758), 'pytest.skip', 'pytest.skip', (['"""empty index is tested in test_drop_duplicates_no_duplicates as it cannot hold duplicates"""'], {}), "(\n 'empty index is tested in test_drop_duplicates_no_duplicates as it cannot hold duplicates'\n )\n", (14656, 14758), False, 'import pytest\n'), ((15701, 15747), 'pytest.skip', 'pytest.skip', (['"""MultiIndex is tested separately"""'], {}), "('MultiIndex is tested separately')\n", (15712, 15747), False, 'import pytest\n'), ((16561, 16596), 'pytest.raises', 'pytest.raises', (['TypeError'], {'match': 'msg'}), '(TypeError, match=msg)\n', (16574, 16596), False, 'import pytest\n'), ((16965, 17034), 'pytest.skip', 'pytest.skip', (['"""Skip check for empty Index, MultiIndex, and RangeIndex"""'], {}), "('Skip check for empty Index, MultiIndex, and RangeIndex')\n", (16976, 17034), False, 'import pytest\n'), ((18065, 18106), 'pandas._testing.assert_produces_warning', 'tm.assert_produces_warning', (['FutureWarning'], {}), '(FutureWarning)\n', (18091, 18106), True, 'import pandas._testing as tm\n'), ((18731, 18801), 'pytest.xfail', 'pytest.xfail', (['"""missing value sorting order not defined for index type"""'], {}), "('missing value sorting order not defined for index type')\n", (18743, 18801), False, 'import pytest\n'), ((18861, 18931), 'pytest.raises', 'pytest.raises', (['ValueError'], {'match': 'f"""invalid na_position: {na_position}"""'}), "(ValueError, match=f'invalid na_position: {na_position}')\n", (18874, 18931), False, 'import pytest\n'), ((19724, 19794), 'pytest.xfail', 'pytest.xfail', (['"""missing value sorting order not defined for index type"""'], {}), "('missing value sorting order not defined for index type')\n", (19736, 19794), False, 'import pytest\n'), ((961, 986), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (974, 986), False, 'import pytest\n'), ((1086, 1190), 'pytest.raises', 'pytest.raises', (['KeyError'], {'match': '"""\'Requested level \\\\(wrong\\\\) does not match index name \\\\(None\\\\)\'"""'}), '(KeyError, match=\n "\'Requested level \\\\(wrong\\\\) does not match index name \\\\(None\\\\)\'")\n', (1099, 1190), False, 'import pytest\n'), ((11866, 11905), 'pandas._testing.assert_index_equal', 'tm.assert_index_equal', (['result', 'expected'], {}), '(result, expected)\n', (11887, 11905), True, 'import pandas._testing as tm\n'), ((13944, 13969), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (13957, 13969), False, 'import pytest\n'), ((15222, 15253), 'pandas.Series', 'pd.Series', (['duplicated_selection'], {}), '(duplicated_selection)\n', (15231, 15253), True, 'import pandas as pd\n'), ((15466, 15480), 'pandas.Series', 'pd.Series', (['idx'], {}), '(idx)\n', (15475, 15480), True, 'import pandas as pd\n')] |
# Copyright 2020, <NAME>, mailto:<EMAIL>
#
# Part of "Nuitka", an optimizing Python compiler that is compatible and
# integrates with CPython, but also works on its own.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
""" Module for constants in Nuitka.
This contains tools to compare, classify and test constants.
"""
import math
from types import BuiltinFunctionType
from nuitka.Builtins import builtin_type_names
from nuitka.PythonVersions import python_version
from .__past__ import ( # pylint: disable=I0021,redefined-builtin
iterItems,
long,
unicode,
xrange,
)
from .Builtins import (
builtin_anon_names,
builtin_anon_value_list,
builtin_exception_values_list,
builtin_named_values_list,
)
NoneType = type(None)
def compareConstants(a, b):
# Many many cases to deal with, pylint: disable=too-many-branches,too-many-return-statements
# Supposed fast path for comparison.
if type(a) is not type(b):
return False
# Now it's either not the same, or it is a container that contains NaN or it
# is a complex or float that is NaN, the other cases can use == at the end.
if type(a) is complex:
return compareConstants(a.imag, b.imag) and compareConstants(a.real, b.real)
if type(a) is float:
# Check sign first, -0.0 is not 0.0, or -nan is not nan, it has a
# different sign for a start.
if math.copysign(1.0, a) != math.copysign(1.0, b):
return False
if math.isnan(a) and math.isnan(b):
return True
return a == b
if type(a) in (tuple, list):
if len(a) != len(b):
return False
for ea, eb in zip(a, b):
if not compareConstants(ea, eb):
return False
return True
if type(a) is dict:
if len(a) != len(b):
return False
for ea1, ea2 in iterItems(a):
for eb1, eb2 in iterItems(b):
if compareConstants(ea1, eb1) and compareConstants(ea2, eb2):
break
else:
return False
return True
if type(a) in (frozenset, set):
if len(a) != len(b):
return False
for ea in a:
if ea not in b:
# Due to NaN values, we need to compare each set element with
# all the other set to be really sure.
for eb in b:
if compareConstants(ea, eb):
break
else:
return False
return True
if type(a) is xrange:
return str(a) == str(b)
# The NaN values of float and complex may let this fail, even if the
# constants are built in the same way, therefore above checks.
return a == b
# These built-in type references are kind of constant too. The list should be
# complete.
constant_builtin_types = (
int,
str,
float,
list,
tuple,
set,
dict,
slice,
complex,
xrange,
NoneType,
)
if python_version >= 300:
constant_builtin_types += (bytes,)
else:
constant_builtin_types += (
unicode,
long,
# This has no name in Python, but the natural one in C-API.
builtin_anon_names["instance"],
)
def isConstant(constant):
# Too many cases and all return, that is how we do it here,
# pylint: disable=too-many-branches,too-many-return-statements
constant_type = type(constant)
if constant_type is dict:
for key, value in iterItems(constant):
if not isConstant(key):
return False
if not isConstant(value):
return False
return True
elif constant_type in (tuple, list):
for element_value in constant:
if not isConstant(element_value):
return False
return True
elif constant_type is slice:
if (
not isConstant(constant.start)
or not isConstant(constant.stop)
or not isConstant(constant.step)
):
return False
return True
elif constant_type in (
str,
unicode,
complex,
int,
long,
bool,
float,
NoneType,
range,
bytes,
set,
frozenset,
xrange,
bytearray,
):
return True
elif constant in (Ellipsis, NoneType, NotImplemented):
return True
elif constant in builtin_anon_value_list:
return True
elif constant_type is type:
# Maybe pre-build this as a set for quicker testing.
return (
constant.__name__ in builtin_type_names
or constant in builtin_exception_values_list
)
elif constant_type is BuiltinFunctionType and constant in builtin_named_values_list:
# TODO: Some others could also be usable and even interesting, but
# then probably should go into other node types, e.g. str.join is
# a candidate.
return True
else:
return False
def isMutable(constant):
""" Is a constant mutable
That means a user of a reference to it, can modify it. Strings are
a prime example of immutable, dictionaries are mutable.
"""
# Many cases and all return, that is how we do it here,
# pylint: disable=too-many-return-statements
constant_type = type(constant)
if constant_type in (
str,
unicode,
complex,
int,
long,
bool,
float,
NoneType,
range,
bytes,
slice,
xrange,
type,
BuiltinFunctionType,
):
return False
elif constant_type in (dict, list, set, bytearray):
return True
elif constant_type is tuple:
for value in constant:
if isMutable(value):
return True
return False
elif constant_type is frozenset:
for value in constant:
if isMutable(value):
return True
return False
elif constant is Ellipsis:
return False
elif constant is NotImplemented:
return False
else:
assert False, repr(constant)
def isHashable(constant):
""" Is a constant hashable
That means a user of a reference to it, can use it for dicts and set
keys. This is distinct from mutable, there is one types that is not
mutable, and still not hashable: slices.
"""
# Many cases and all return, that is how we do it here,
# pylint: disable=too-many-return-statements
constant_type = type(constant)
if constant_type in (
str,
unicode,
complex,
int,
long,
bool,
float,
NoneType,
xrange,
bytes,
type,
BuiltinFunctionType,
):
return True
elif constant_type in (dict, list, set, slice, bytearray):
return False
elif constant_type is tuple:
for value in constant:
if not isHashable(value):
return False
return True
elif constant_type is frozenset:
for value in constant:
if not isHashable(value):
return False
return True
elif constant is Ellipsis:
return True
else:
assert False, constant_type
def getUnhashableConstant(constant):
# Too many cases and all return, that is how we do it here,
# pylint: disable=too-many-return-statements
constant_type = type(constant)
if constant_type in (
str,
unicode,
complex,
int,
long,
bool,
float,
NoneType,
xrange,
bytes,
type,
BuiltinFunctionType,
):
return None
elif constant_type in (dict, list, set):
return constant
elif constant_type is tuple:
for value in constant:
res = getUnhashableConstant(value)
if res is not None:
return res
return None
elif constant is Ellipsis:
return None
elif constant in constant_builtin_types:
return None
elif constant_type is slice:
return None
else:
assert False, constant_type
def isIterableConstant(constant):
return type(constant) in (
str,
unicode,
list,
tuple,
set,
frozenset,
dict,
xrange,
bytes,
bytearray,
)
def getConstantIterationLength(constant):
assert isIterableConstant(constant)
return len(constant)
def isNumberConstant(constant):
return type(constant) in (int, long, float, bool)
def isIndexConstant(constant):
return type(constant) in (int, long, bool)
def createConstantDict(keys, values):
# Create it proper size immediately.
constant_value = dict.fromkeys(keys, None)
for key, value in zip(keys, values):
constant_value[key] = value
return constant_value
def getConstantWeight(constant):
constant_type = type(constant)
if constant_type is dict:
result = 0
for key, value in iterItems(constant):
result += getConstantWeight(key)
result += getConstantWeight(value)
return result
elif constant_type in (tuple, list, set, frozenset):
result = 0
for element_value in constant:
result += getConstantWeight(element_value)
return result
else:
return 1
def isCompileTimeConstantValue(value):
""" Determine if a value will be usable at compile time.
"""
# This needs to match code in makeCompileTimeConstantReplacementNode
if isConstant(value):
return True
elif type(value) is type:
return True
else:
return False
| [
"math.copysign",
"math.isnan"
] | [((1947, 1968), 'math.copysign', 'math.copysign', (['(1.0)', 'a'], {}), '(1.0, a)\n', (1960, 1968), False, 'import math\n'), ((1972, 1993), 'math.copysign', 'math.copysign', (['(1.0)', 'b'], {}), '(1.0, b)\n', (1985, 1993), False, 'import math\n'), ((2032, 2045), 'math.isnan', 'math.isnan', (['a'], {}), '(a)\n', (2042, 2045), False, 'import math\n'), ((2050, 2063), 'math.isnan', 'math.isnan', (['b'], {}), '(b)\n', (2060, 2063), False, 'import math\n')] |
# Copyright (c) 2012-2013 <NAME> http://garnaat.org/
# Copyright 2012-2014 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You
# may not use this file except in compliance with the License. A copy of
# the License is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
import os
import logging
import time
import threading
from botocore.vendored import six
from botocore.awsrequest import create_request_object
from botocore.exceptions import HTTPClientError
from botocore.httpsession import URLLib3Session
from botocore.utils import is_valid_endpoint_url, get_environ_proxies
from botocore.hooks import first_non_none_response
from botocore.history import get_global_history_recorder
from botocore.response import StreamingBody
from botocore import parsers
logger = logging.getLogger(__name__)
history_recorder = get_global_history_recorder()
DEFAULT_TIMEOUT = 60
MAX_POOL_CONNECTIONS = 10
def convert_to_response_dict(http_response, operation_model):
"""Convert an HTTP response object to a request dict.
This converts the requests library's HTTP response object to
a dictionary.
:type http_response: botocore.vendored.requests.model.Response
:param http_response: The HTTP response from an AWS service request.
:rtype: dict
:return: A response dictionary which will contain the following keys:
* headers (dict)
* status_code (int)
* body (string or file-like object)
"""
response_dict = {
'headers': http_response.headers,
'status_code': http_response.status_code,
'context': {
'operation_name': operation_model.name,
}
}
if response_dict['status_code'] >= 300:
response_dict['body'] = http_response.content
elif operation_model.has_event_stream_output:
response_dict['body'] = http_response.raw
elif operation_model.has_streaming_output:
length = response_dict['headers'].get('content-length')
response_dict['body'] = StreamingBody(http_response.raw, length)
else:
response_dict['body'] = http_response.content
return response_dict
class Endpoint(object):
"""
Represents an endpoint for a particular service in a specific
region. Only an endpoint can make requests.
:ivar service: The Service object that describes this endpoints
service.
:ivar host: The fully qualified endpoint hostname.
:ivar session: The session object.
"""
def __init__(self, host, endpoint_prefix, event_emitter,
response_parser_factory=None, http_session=None):
self._endpoint_prefix = endpoint_prefix
self._event_emitter = event_emitter
self.host = host
self._lock = threading.Lock()
if response_parser_factory is None:
response_parser_factory = parsers.ResponseParserFactory()
self._response_parser_factory = response_parser_factory
self.http_session = http_session
if self.http_session is None:
self.http_session = URLLib3Session()
def __repr__(self):
return '%s(%s)' % (self._endpoint_prefix, self.host)
def make_request(self, operation_model, request_dict):
logger.debug("Making request for %s with params: %s",
operation_model, request_dict)
return self._send_request(request_dict, operation_model)
def create_request(self, params, operation_model=None):
request = create_request_object(params)
if operation_model:
request.stream_output = any([
operation_model.has_streaming_output,
operation_model.has_event_stream_output
])
service_id = operation_model.service_model.service_id.hyphenize()
event_name = 'request-created.{service_id}.{op_name}'.format(
service_id=service_id,
op_name=operation_model.name)
self._event_emitter.emit(event_name, request=request,
operation_name=operation_model.name)
prepared_request = self.prepare_request(request)
return prepared_request
def _encode_headers(self, headers):
# In place encoding of headers to utf-8 if they are unicode.
for key, value in headers.items():
if isinstance(value, six.text_type):
headers[key] = value.encode('utf-8')
def prepare_request(self, request):
self._encode_headers(request.headers)
return request.prepare()
def _send_request(self, request_dict, operation_model):
attempts = 1
request = self.create_request(request_dict, operation_model)
context = request_dict['context']
success_response, exception = self._get_response(
request, operation_model, context)
while self._needs_retry(attempts, operation_model, request_dict,
success_response, exception):
attempts += 1
# If there is a stream associated with the request, we need
# to reset it before attempting to send the request again.
# This will ensure that we resend the entire contents of the
# body.
request.reset_stream()
# Create a new request when retried (including a new signature).
request = self.create_request(
request_dict, operation_model)
success_response, exception = self._get_response(
request, operation_model, context)
if success_response is not None and \
'ResponseMetadata' in success_response[1]:
# We want to share num retries, not num attempts.
total_retries = attempts - 1
success_response[1]['ResponseMetadata']['RetryAttempts'] = \
total_retries
if exception is not None:
raise exception
else:
return success_response
def _get_response(self, request, operation_model, context):
# This will return a tuple of (success_response, exception)
# and success_response is itself a tuple of
# (http_response, parsed_dict).
# If an exception occurs then the success_response is None.
# If no exception occurs then exception is None.
success_response, exception = self._do_get_response(
request, operation_model)
kwargs_to_emit = {
'response_dict': None,
'parsed_response': None,
'context': context,
'exception': exception,
}
if success_response is not None:
http_response, parsed_response = success_response
kwargs_to_emit['parsed_response'] = parsed_response
kwargs_to_emit['response_dict'] = convert_to_response_dict(
http_response, operation_model)
service_id = operation_model.service_model.service_id.hyphenize()
self._event_emitter.emit(
'response-received.%s.%s' % (
service_id, operation_model.name), **kwargs_to_emit)
return success_response, exception
def _do_get_response(self, request, operation_model):
try:
logger.debug("Sending http request: %s", request)
history_recorder.record('HTTP_REQUEST', {
'method': request.method,
'headers': request.headers,
'streaming': operation_model.has_streaming_input,
'url': request.url,
'body': request.body
})
service_id = operation_model.service_model.service_id.hyphenize()
event_name = 'before-send.%s.%s' % (service_id, operation_model.name)
responses = self._event_emitter.emit(event_name, request=request)
http_response = first_non_none_response(responses)
if http_response is None:
http_response = self._send(request)
except HTTPClientError as e:
return (None, e)
except Exception as e:
logger.debug("Exception received when sending HTTP request.",
exc_info=True)
return (None, e)
# This returns the http_response and the parsed_data.
response_dict = convert_to_response_dict(http_response, operation_model)
http_response_record_dict = response_dict.copy()
http_response_record_dict['streaming'] = \
operation_model.has_streaming_output
history_recorder.record('HTTP_RESPONSE', http_response_record_dict)
protocol = operation_model.metadata['protocol']
parser = self._response_parser_factory.create_parser(protocol)
parsed_response = parser.parse(
response_dict, operation_model.output_shape)
# Do a second parsing pass to pick up on any modeled error fields
# NOTE: Ideally, we would push this down into the parser classes but
# they currently have no reference to the operation or service model
# The parsers should probably take the operation model instead of
# output shape but we can't change that now
if http_response.status_code >= 300:
self._add_modeled_error_fields(
response_dict, parsed_response,
operation_model, parser,
)
history_recorder.record('PARSED_RESPONSE', parsed_response)
return (http_response, parsed_response), None
def _add_modeled_error_fields(
self, response_dict, parsed_response,
operation_model, parser,
):
error_code = parsed_response.get("Error", {}).get("Code")
if error_code is None:
return
service_model = operation_model.service_model
error_shape = service_model.shape_for_error_code(error_code)
if error_shape is None:
return
modeled_parse = parser.parse(response_dict, error_shape)
# TODO: avoid naming conflicts with ResponseMetadata and Error
parsed_response.update(modeled_parse)
def _needs_retry(self, attempts, operation_model, request_dict,
response=None, caught_exception=None):
service_id = operation_model.service_model.service_id.hyphenize()
event_name = 'needs-retry.%s.%s' % (
service_id,
operation_model.name)
responses = self._event_emitter.emit(
event_name, response=response, endpoint=self,
operation=operation_model, attempts=attempts,
caught_exception=caught_exception, request_dict=request_dict)
handler_response = first_non_none_response(responses)
if handler_response is None:
return False
else:
# Request needs to be retried, and we need to sleep
# for the specified number of times.
logger.debug("Response received to retry, sleeping for "
"%s seconds", handler_response)
time.sleep(handler_response)
return True
def _send(self, request):
return self.http_session.send(request)
class EndpointCreator(object):
def __init__(self, event_emitter):
self._event_emitter = event_emitter
def create_endpoint(self, service_model, region_name, endpoint_url,
verify=None, response_parser_factory=None,
timeout=DEFAULT_TIMEOUT,
max_pool_connections=MAX_POOL_CONNECTIONS,
http_session_cls=URLLib3Session,
proxies=None,
socket_options=None,
client_cert=None):
if not is_valid_endpoint_url(endpoint_url):
raise ValueError("Invalid endpoint: %s" % endpoint_url)
if proxies is None:
proxies = self._get_proxies(endpoint_url)
endpoint_prefix = service_model.endpoint_prefix
logger.debug('Setting %s timeout as %s', endpoint_prefix, timeout)
http_session = http_session_cls(
timeout=timeout,
proxies=proxies,
verify=self._get_verify_value(verify),
max_pool_connections=max_pool_connections,
socket_options=socket_options,
client_cert=client_cert,
)
return Endpoint(
endpoint_url,
endpoint_prefix=endpoint_prefix,
event_emitter=self._event_emitter,
response_parser_factory=response_parser_factory,
http_session=http_session
)
def _get_proxies(self, url):
# We could also support getting proxies from a config file,
# but for now proxy support is taken from the environment.
return get_environ_proxies(url)
def _get_verify_value(self, verify):
# This is to account for:
# https://github.com/kennethreitz/requests/issues/1436
# where we need to honor REQUESTS_CA_BUNDLE because we're creating our
# own request objects.
# First, if verify is not None, then the user explicitly specified
# a value so this automatically wins.
if verify is not None:
return verify
# Otherwise use the value from REQUESTS_CA_BUNDLE, or default to
# True if the env var does not exist.
return os.environ.get('REQUESTS_CA_BUNDLE', True)
| [
"logging.getLogger",
"botocore.utils.get_environ_proxies",
"botocore.utils.is_valid_endpoint_url",
"botocore.awsrequest.create_request_object",
"threading.Lock",
"botocore.history.get_global_history_recorder",
"os.environ.get",
"time.sleep",
"botocore.hooks.first_non_none_response",
"botocore.response.StreamingBody",
"botocore.parsers.ResponseParserFactory",
"botocore.httpsession.URLLib3Session"
] | [((1122, 1149), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (1139, 1149), False, 'import logging\n'), ((1169, 1198), 'botocore.history.get_global_history_recorder', 'get_global_history_recorder', ([], {}), '()\n', (1196, 1198), False, 'from botocore.history import get_global_history_recorder\n'), ((3068, 3084), 'threading.Lock', 'threading.Lock', ([], {}), '()\n', (3082, 3084), False, 'import threading\n'), ((3795, 3824), 'botocore.awsrequest.create_request_object', 'create_request_object', (['params'], {}), '(params)\n', (3816, 3824), False, 'from botocore.awsrequest import create_request_object\n'), ((10973, 11007), 'botocore.hooks.first_non_none_response', 'first_non_none_response', (['responses'], {}), '(responses)\n', (10996, 11007), False, 'from botocore.hooks import first_non_none_response\n'), ((13088, 13112), 'botocore.utils.get_environ_proxies', 'get_environ_proxies', (['url'], {}), '(url)\n', (13107, 13112), False, 'from botocore.utils import is_valid_endpoint_url, get_environ_proxies\n'), ((13674, 13716), 'os.environ.get', 'os.environ.get', (['"""REQUESTS_CA_BUNDLE"""', '(True)'], {}), "('REQUESTS_CA_BUNDLE', True)\n", (13688, 13716), False, 'import os\n'), ((3167, 3198), 'botocore.parsers.ResponseParserFactory', 'parsers.ResponseParserFactory', ([], {}), '()\n', (3196, 3198), False, 'from botocore import parsers\n'), ((3374, 3390), 'botocore.httpsession.URLLib3Session', 'URLLib3Session', ([], {}), '()\n', (3388, 3390), False, 'from botocore.httpsession import URLLib3Session\n'), ((8167, 8201), 'botocore.hooks.first_non_none_response', 'first_non_none_response', (['responses'], {}), '(responses)\n', (8190, 8201), False, 'from botocore.hooks import first_non_none_response\n'), ((11335, 11363), 'time.sleep', 'time.sleep', (['handler_response'], {}), '(handler_response)\n', (11345, 11363), False, 'import time\n'), ((12036, 12071), 'botocore.utils.is_valid_endpoint_url', 'is_valid_endpoint_url', (['endpoint_url'], {}), '(endpoint_url)\n', (12057, 12071), False, 'from botocore.utils import is_valid_endpoint_url, get_environ_proxies\n'), ((2335, 2375), 'botocore.response.StreamingBody', 'StreamingBody', (['http_response.raw', 'length'], {}), '(http_response.raw, length)\n', (2348, 2375), False, 'from botocore.response import StreamingBody\n')] |
from numpy import genfromtxt
import matplotlib.pyplot as plt
import mpl_finance
import numpy as np
import uuid
import matplotlib
# Input your csv file here with historical data
ad = genfromtxt(f"../financial_data/SM.csv", delimiter=",", dtype=str)
def convolve_sma(array, period):
return np.convolve(array, np.ones((period,)) / period, mode="valid")
def graphwerk(start, finish):
open = []
high = []
low = []
close = []
volume = []
# decision = []
date = []
c_open = []
c_high = []
c_low = []
c_close = []
c_volume = []
c_date = []
c_start = start + 12
for x in range(finish - start):
c_open.append(float(pd[c_start][1]))
c_high.append(float(pd[c_start][2]))
c_low.append(float(pd[c_start][3]))
c_close.append(float(pd[c_start][4]))
c_volume.append(float(pd[c_start][5]))
c_date.append(pd[c_start][0])
c_start = c_start + 1
for x in range(finish - start):
# Below filtering is valid for eurusd.csv file. Other financial data files have different orders so you need to find out
# what means open, high and close in their respective order.
open.append(float(pd[start][1]))
high.append(float(pd[start][2]))
low.append(float(pd[start][3]))
close.append(float(pd[start][4]))
volume.append(float(pd[start][5]))
# decision.append(str(pd[start][6]))
date.append(pd[start][0])
start = start + 1
decision = "sell"
min_forecast = min(c_low)
max_forecast = max(c_high)
if close[-1] * 1.03 < max_forecast:
decision = "buy"
# for z in all_prices:
# if close[-1] * 1.03 < z:
# decision = "buy"
sma = convolve_sma(close, 5)
smb = list(sma)
diff = sma[-1] - sma[-2]
for x in range(len(close) - len(smb)):
smb.append(smb[-1] + diff)
fig = plt.figure(num=1, figsize=(3, 3), dpi=50, facecolor="w", edgecolor="k")
dx = fig.add_subplot(111)
# mpl_finance.volume_overlay(ax, open, close, volume, width=0.4, colorup='b', colordown='b', alpha=1)
mpl_finance.candlestick2_ochl(
dx, open, close, high, low, width=1.5, colorup="g", colordown="r", alpha=0.5
)
plt.autoscale()
# plt.plot(smb, color="blue", linewidth=10, alpha=0.5)
plt.axis("off")
if decision == "sell":
print("last value: " + str(close[-1]))
print(
"range of values in next 13 bars: "
+ str(min_forecast)
+ "-"
+ str(max_forecast)
)
print("sell")
plt.savefig(sell_dir + str(uuid.uuid4()) + ".jpg", bbox_inches="tight")
else:
print("last value: " + str(close[-1]))
print(
"range of values in next 13 bars: "
+ str(min_forecast)
+ "-"
+ str(max_forecast)
)
print("buy")
plt.savefig(buy_dir + str(uuid.uuid4()) + ".jpg", bbox_inches="tight")
# if close[-1] >= close_next:
# print('previous value is bigger')
# print('last value: ' + str(close[-1]))
# print('next value: ' + str(close_next))
# print('sell')
# plt.savefig(sell_dir + str(uuid.uuid4()) +'.jpg', bbox_inches='tight')
# else:
# print('previous value is smaller')
# print('last value: '+ str(close[-1]))
# print('next value: ' + str(close_next))
# print('buy')
# plt.savefig(buy_dir + str(uuid.uuid4())+'.jpg', bbox_inches='tight')
# plt.show()
open.clear()
close.clear()
volume.clear()
high.clear()
low.clear()
plt.cla()
plt.clf()
# output = []
# with open("STOCKbluechip.csv") as f:
# output = [str(s) for line in f.readlines() for s in line[:-1].split(",")]
# for stock in output:
pd = ad
buy_dir = "../data/train/buy/"
sell_dir = "../data/train/sell/"
iter = 0
for x in range(len(pd)):
graphwerk(iter, iter + 12)
iter = iter + 2
| [
"mpl_finance.candlestick2_ochl",
"numpy.ones",
"matplotlib.pyplot.clf",
"uuid.uuid4",
"matplotlib.pyplot.figure",
"matplotlib.pyplot.autoscale",
"matplotlib.pyplot.axis",
"matplotlib.pyplot.cla",
"numpy.genfromtxt"
] | [((184, 249), 'numpy.genfromtxt', 'genfromtxt', (['f"""../financial_data/SM.csv"""'], {'delimiter': '""","""', 'dtype': 'str'}), "(f'../financial_data/SM.csv', delimiter=',', dtype=str)\n", (194, 249), False, 'from numpy import genfromtxt\n'), ((1909, 1980), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'num': '(1)', 'figsize': '(3, 3)', 'dpi': '(50)', 'facecolor': '"""w"""', 'edgecolor': '"""k"""'}), "(num=1, figsize=(3, 3), dpi=50, facecolor='w', edgecolor='k')\n", (1919, 1980), True, 'import matplotlib.pyplot as plt\n'), ((2121, 2232), 'mpl_finance.candlestick2_ochl', 'mpl_finance.candlestick2_ochl', (['dx', 'open', 'close', 'high', 'low'], {'width': '(1.5)', 'colorup': '"""g"""', 'colordown': '"""r"""', 'alpha': '(0.5)'}), "(dx, open, close, high, low, width=1.5,\n colorup='g', colordown='r', alpha=0.5)\n", (2150, 2232), False, 'import mpl_finance\n'), ((2247, 2262), 'matplotlib.pyplot.autoscale', 'plt.autoscale', ([], {}), '()\n', (2260, 2262), True, 'import matplotlib.pyplot as plt\n'), ((2326, 2341), 'matplotlib.pyplot.axis', 'plt.axis', (['"""off"""'], {}), "('off')\n", (2334, 2341), True, 'import matplotlib.pyplot as plt\n'), ((3674, 3683), 'matplotlib.pyplot.cla', 'plt.cla', ([], {}), '()\n', (3681, 3683), True, 'import matplotlib.pyplot as plt\n'), ((3688, 3697), 'matplotlib.pyplot.clf', 'plt.clf', ([], {}), '()\n', (3695, 3697), True, 'import matplotlib.pyplot as plt\n'), ((315, 333), 'numpy.ones', 'np.ones', (['(period,)'], {}), '((period,))\n', (322, 333), True, 'import numpy as np\n'), ((2629, 2641), 'uuid.uuid4', 'uuid.uuid4', ([], {}), '()\n', (2639, 2641), False, 'import uuid\n'), ((2941, 2953), 'uuid.uuid4', 'uuid.uuid4', ([], {}), '()\n', (2951, 2953), False, 'import uuid\n')] |
"""
The Tornado Framework
By <NAME>
University of Ottawa, Ontario, Canada
E-mail: apesaran -at- uottawa -dot- ca / alipsgh -at- gmail -dot- com
"""
import re
from data_structures.attribute import Attribute
from dictionary.tornado_dictionary import TornadoDic
class ARFFReader:
"""This class is used to read a .arff file."""
@staticmethod
def read(file_path):
labels = []
attributes = []
attributes_min_max = []
records = []
data_flag = False
reader = open(file_path, "r")
for line in reader:
if line.strip() == '':
continue
if line.startswith("@attribute") or line.startswith("@ATTRIBUTE"):
line = line.strip('\n\r\t')
line = line.split(' ')
attribute_name = line[1]
attribute_value_range = line[2]
attribute = Attribute()
attribute.set_name(attribute_name)
if attribute_value_range.lower() in ['numeric', 'real', 'integer']:
attribute_type = TornadoDic.NUMERIC_ATTRIBUTE
attribute_value_range = []
attributes_min_max.append([0, 0])
else:
attribute_type = TornadoDic.NOMINAL_ATTRIBUTE
attribute_value_range = attribute_value_range.strip('{}').replace("'", "")
attribute_value_range = attribute_value_range.split(',')
attributes_min_max.append([None, None])
attribute.set_type(attribute_type)
attribute.set_possible_values(attribute_value_range)
attributes.append(attribute)
elif line.startswith("@data") or line.startswith("@DATA"):
data_flag = True
labels = attributes[len(attributes) - 1].POSSIBLE_VALUES
attributes.pop(len(attributes) - 1)
continue
elif data_flag is True:
line = re.sub('\s+', '', line)
elements = line.split(',')
for i in range(0, len(elements) - 1):
if attributes[i].TYPE == TornadoDic.NUMERIC_ATTRIBUTE:
elements[i] = float(elements[i])
min_value = attributes_min_max[i][0]
max_value = attributes_min_max[i][1]
if elements[i] < min_value:
min_value = elements[i]
elif elements[i] > max_value:
max_value = elements[i]
attributes_min_max[i] = [min_value, max_value]
records.append(elements)
for i in range(0, len(attributes)):
if attributes[i].TYPE == TornadoDic.NUMERIC_ATTRIBUTE:
attributes[i].set_bounds_values(attributes_min_max[i][0], attributes_min_max[i][1])
return labels, attributes, records
| [
"re.sub",
"data_structures.attribute.Attribute"
] | [((970, 981), 'data_structures.attribute.Attribute', 'Attribute', ([], {}), '()\n', (979, 981), False, 'from data_structures.attribute import Attribute\n'), ((2108, 2132), 're.sub', 're.sub', (['"""\\\\s+"""', '""""""', 'line'], {}), "('\\\\s+', '', line)\n", (2114, 2132), False, 'import re\n')] |