repo_name
stringlengths 6
100
| path
stringlengths 4
294
| copies
stringlengths 1
5
| size
stringlengths 4
6
| content
stringlengths 606
896k
| license
stringclasses 15
values | var_hash
int64 -9,223,186,179,200,150,000
9,223,291,175B
| doc_hash
int64 -9,223,304,365,658,930,000
9,223,309,051B
| line_mean
float64 3.5
99.8
| line_max
int64 13
999
| alpha_frac
float64 0.25
0.97
| autogenerated
bool 1
class |
---|---|---|---|---|---|---|---|---|---|---|---|
akhilari7/pa-dude | lib/python2.7/site-packages/django/db/migrations/state.py | 31 | 25662 | from __future__ import unicode_literals
import copy
from collections import OrderedDict
from contextlib import contextmanager
from django.apps import AppConfig
from django.apps.registry import Apps, apps as global_apps
from django.conf import settings
from django.db import models
from django.db.models.fields.proxy import OrderWrt
from django.db.models.fields.related import RECURSIVE_RELATIONSHIP_CONSTANT
from django.db.models.options import DEFAULT_NAMES, normalize_together
from django.db.models.utils import make_model_tuple
from django.utils import six
from django.utils.encoding import force_text, smart_text
from django.utils.functional import cached_property
from django.utils.module_loading import import_string
from django.utils.version import get_docs_version
from .exceptions import InvalidBasesError
def _get_app_label_and_model_name(model, app_label=''):
if isinstance(model, six.string_types):
split = model.split('.', 1)
return (tuple(split) if len(split) == 2 else (app_label, split[0]))
else:
return model._meta.app_label, model._meta.model_name
def get_related_models_recursive(model):
"""
Returns all models that have a direct or indirect relationship
to the given model.
Relationships are either defined by explicit relational fields, like
ForeignKey, ManyToManyField or OneToOneField, or by inheriting from another
model (a superclass is related to its subclasses, but not vice versa). Note,
however, that a model inheriting from a concrete model is also related to
its superclass through the implicit *_ptr OneToOneField on the subclass.
"""
def _related_models(m):
return [
f.related_model for f in m._meta.get_fields(include_parents=True, include_hidden=True)
if f.is_relation and f.related_model is not None and not isinstance(f.related_model, six.string_types)
] + [
subclass for subclass in m.__subclasses__()
if issubclass(subclass, models.Model)
]
seen = set()
queue = _related_models(model)
for rel_mod in queue:
rel_app_label, rel_model_name = rel_mod._meta.app_label, rel_mod._meta.model_name
if (rel_app_label, rel_model_name) in seen:
continue
seen.add((rel_app_label, rel_model_name))
queue.extend(_related_models(rel_mod))
return seen - {(model._meta.app_label, model._meta.model_name)}
class ProjectState(object):
"""
Represents the entire project's overall state.
This is the item that is passed around - we do it here rather than at the
app level so that cross-app FKs/etc. resolve properly.
"""
def __init__(self, models=None, real_apps=None):
self.models = models or {}
# Apps to include from main registry, usually unmigrated ones
self.real_apps = real_apps or []
def add_model(self, model_state):
app_label, model_name = model_state.app_label, model_state.name_lower
self.models[(app_label, model_name)] = model_state
if 'apps' in self.__dict__: # hasattr would cache the property
self.reload_model(app_label, model_name)
def remove_model(self, app_label, model_name):
del self.models[app_label, model_name]
if 'apps' in self.__dict__: # hasattr would cache the property
self.apps.unregister_model(app_label, model_name)
# Need to do this explicitly since unregister_model() doesn't clear
# the cache automatically (#24513)
self.apps.clear_cache()
def reload_model(self, app_label, model_name):
if 'apps' in self.__dict__: # hasattr would cache the property
try:
old_model = self.apps.get_model(app_label, model_name)
except LookupError:
related_models = set()
else:
# Get all relations to and from the old model before reloading,
# as _meta.apps may change
related_models = get_related_models_recursive(old_model)
# Get all outgoing references from the model to be rendered
model_state = self.models[(app_label, model_name)]
# Directly related models are the models pointed to by ForeignKeys,
# OneToOneFields, and ManyToManyFields.
direct_related_models = set()
for name, field in model_state.fields:
if field.is_relation:
if field.remote_field.model == RECURSIVE_RELATIONSHIP_CONSTANT:
continue
rel_app_label, rel_model_name = _get_app_label_and_model_name(field.related_model, app_label)
direct_related_models.add((rel_app_label, rel_model_name.lower()))
# For all direct related models recursively get all related models.
related_models.update(direct_related_models)
for rel_app_label, rel_model_name in direct_related_models:
try:
rel_model = self.apps.get_model(rel_app_label, rel_model_name)
except LookupError:
pass
else:
related_models.update(get_related_models_recursive(rel_model))
# Include the model itself
related_models.add((app_label, model_name))
# Unregister all related models
with self.apps.bulk_update():
for rel_app_label, rel_model_name in related_models:
self.apps.unregister_model(rel_app_label, rel_model_name)
states_to_be_rendered = []
# Gather all models states of those models that will be rerendered.
# This includes:
# 1. All related models of unmigrated apps
for model_state in self.apps.real_models:
if (model_state.app_label, model_state.name_lower) in related_models:
states_to_be_rendered.append(model_state)
# 2. All related models of migrated apps
for rel_app_label, rel_model_name in related_models:
try:
model_state = self.models[rel_app_label, rel_model_name]
except KeyError:
pass
else:
states_to_be_rendered.append(model_state)
# Render all models
self.apps.render_multiple(states_to_be_rendered)
def clone(self):
"Returns an exact copy of this ProjectState"
new_state = ProjectState(
models={k: v.clone() for k, v in self.models.items()},
real_apps=self.real_apps,
)
if 'apps' in self.__dict__:
new_state.apps = self.apps.clone()
return new_state
@cached_property
def apps(self):
return StateApps(self.real_apps, self.models)
@property
def concrete_apps(self):
self.apps = StateApps(self.real_apps, self.models, ignore_swappable=True)
return self.apps
@classmethod
def from_apps(cls, apps):
"Takes in an Apps and returns a ProjectState matching it"
app_models = {}
for model in apps.get_models(include_swapped=True):
model_state = ModelState.from_model(model)
app_models[(model_state.app_label, model_state.name_lower)] = model_state
return cls(app_models)
def __eq__(self, other):
if set(self.models.keys()) != set(other.models.keys()):
return False
if set(self.real_apps) != set(other.real_apps):
return False
return all(model == other.models[key] for key, model in self.models.items())
def __ne__(self, other):
return not (self == other)
class AppConfigStub(AppConfig):
"""
Stubs a Django AppConfig. Only provides a label, and a dict of models.
"""
# Not used, but required by AppConfig.__init__
path = ''
def __init__(self, label):
self.label = label
# App-label and app-name are not the same thing, so technically passing
# in the label here is wrong. In practice, migrations don't care about
# the app name, but we need something unique, and the label works fine.
super(AppConfigStub, self).__init__(label, None)
def import_models(self, all_models):
self.models = all_models
class StateApps(Apps):
"""
Subclass of the global Apps registry class to better handle dynamic model
additions and removals.
"""
def __init__(self, real_apps, models, ignore_swappable=False):
# Any apps in self.real_apps should have all their models included
# in the render. We don't use the original model instances as there
# are some variables that refer to the Apps object.
# FKs/M2Ms from real apps are also not included as they just
# mess things up with partial states (due to lack of dependencies)
self.real_models = []
for app_label in real_apps:
app = global_apps.get_app_config(app_label)
for model in app.get_models():
self.real_models.append(ModelState.from_model(model, exclude_rels=True))
# Populate the app registry with a stub for each application.
app_labels = {model_state.app_label for model_state in models.values()}
app_configs = [AppConfigStub(label) for label in sorted(real_apps + list(app_labels))]
super(StateApps, self).__init__(app_configs)
self.render_multiple(list(models.values()) + self.real_models)
# There shouldn't be any operations pending at this point.
pending_models = set(self._pending_operations)
if ignore_swappable:
pending_models -= {make_model_tuple(settings.AUTH_USER_MODEL)}
if pending_models:
raise ValueError(self._pending_models_error(pending_models))
def _pending_models_error(self, pending_models):
"""
Almost all internal uses of lazy operations are to resolve string model
references in related fields. We can extract the fields from those
operations and use them to provide a nicer error message.
This will work for any function passed to lazy_related_operation() that
has a keyword argument called 'field'.
"""
def extract_field(operation):
# operation is annotated with the field in
# apps.registry.Apps.lazy_model_operation().
return getattr(operation, 'field', None)
def extract_field_names(operations):
return (str(field) for field in map(extract_field, operations) if field)
get_ops = self._pending_operations.__getitem__
# Ordered list of pairs of the form
# ((app_label, model_name), [field_name_1, field_name_2, ...])
models_fields = sorted(
(model_key, sorted(extract_field_names(get_ops(model_key))))
for model_key in pending_models
)
def model_text(model_key, fields):
field_list = ", ".join(fields)
field_text = " (referred to by fields: %s)" % field_list if fields else ""
return ("%s.%s" % model_key) + field_text
msg = "Unhandled pending operations for models:"
return "\n ".join([msg] + [model_text(*i) for i in models_fields])
@contextmanager
def bulk_update(self):
# Avoid clearing each model's cache for each change. Instead, clear
# all caches when we're finished updating the model instances.
ready = self.ready
self.ready = False
try:
yield
finally:
self.ready = ready
self.clear_cache()
def render_multiple(self, model_states):
# We keep trying to render the models in a loop, ignoring invalid
# base errors, until the size of the unrendered models doesn't
# decrease by at least one, meaning there's a base dependency loop/
# missing base.
if not model_states:
return
# Prevent that all model caches are expired for each render.
with self.bulk_update():
unrendered_models = model_states
while unrendered_models:
new_unrendered_models = []
for model in unrendered_models:
try:
model.render(self)
except InvalidBasesError:
new_unrendered_models.append(model)
if len(new_unrendered_models) == len(unrendered_models):
raise InvalidBasesError(
"Cannot resolve bases for %r\nThis can happen if you are inheriting models from an "
"app with migrations (e.g. contrib.auth)\n in an app with no migrations; see "
"https://docs.djangoproject.com/en/%s/topics/migrations/#dependencies "
"for more" % (new_unrendered_models, get_docs_version())
)
unrendered_models = new_unrendered_models
def clone(self):
"""
Return a clone of this registry, mainly used by the migration framework.
"""
clone = StateApps([], {})
clone.all_models = copy.deepcopy(self.all_models)
clone.app_configs = copy.deepcopy(self.app_configs)
# No need to actually clone them, they'll never change
clone.real_models = self.real_models
return clone
def register_model(self, app_label, model):
self.all_models[app_label][model._meta.model_name] = model
if app_label not in self.app_configs:
self.app_configs[app_label] = AppConfigStub(app_label)
self.app_configs[app_label].models = OrderedDict()
self.app_configs[app_label].models[model._meta.model_name] = model
self.do_pending_operations(model)
self.clear_cache()
def unregister_model(self, app_label, model_name):
try:
del self.all_models[app_label][model_name]
del self.app_configs[app_label].models[model_name]
except KeyError:
pass
class ModelState(object):
"""
Represents a Django Model. We don't use the actual Model class
as it's not designed to have its options changed - instead, we
mutate this one and then render it into a Model as required.
Note that while you are allowed to mutate .fields, you are not allowed
to mutate the Field instances inside there themselves - you must instead
assign new ones, as these are not detached during a clone.
"""
def __init__(self, app_label, name, fields, options=None, bases=None, managers=None):
self.app_label = app_label
self.name = force_text(name)
self.fields = fields
self.options = options or {}
self.bases = bases or (models.Model, )
self.managers = managers or []
# Sanity-check that fields is NOT a dict. It must be ordered.
if isinstance(self.fields, dict):
raise ValueError("ModelState.fields cannot be a dict - it must be a list of 2-tuples.")
for name, field in fields:
# Sanity-check that fields are NOT already bound to a model.
if hasattr(field, 'model'):
raise ValueError(
'ModelState.fields cannot be bound to a model - "%s" is.' % name
)
# Sanity-check that relation fields are NOT referring to a model class.
if field.is_relation and hasattr(field.related_model, '_meta'):
raise ValueError(
'ModelState.fields cannot refer to a model class - "%s.to" does. '
'Use a string reference instead.' % name
)
if field.many_to_many and hasattr(field.remote_field.through, '_meta'):
raise ValueError(
'ModelState.fields cannot refer to a model class - "%s.through" does. '
'Use a string reference instead.' % name
)
@cached_property
def name_lower(self):
return self.name.lower()
@classmethod
def from_model(cls, model, exclude_rels=False):
"""
Feed me a model, get a ModelState representing it out.
"""
# Deconstruct the fields
fields = []
for field in model._meta.local_fields:
if getattr(field, "remote_field", None) and exclude_rels:
continue
if isinstance(field, OrderWrt):
continue
name = force_text(field.name, strings_only=True)
try:
fields.append((name, field.clone()))
except TypeError as e:
raise TypeError("Couldn't reconstruct field %s on %s: %s" % (
name,
model._meta.label,
e,
))
if not exclude_rels:
for field in model._meta.local_many_to_many:
name = force_text(field.name, strings_only=True)
try:
fields.append((name, field.clone()))
except TypeError as e:
raise TypeError("Couldn't reconstruct m2m field %s on %s: %s" % (
name,
model._meta.object_name,
e,
))
# Extract the options
options = {}
for name in DEFAULT_NAMES:
# Ignore some special options
if name in ["apps", "app_label"]:
continue
elif name in model._meta.original_attrs:
if name == "unique_together":
ut = model._meta.original_attrs["unique_together"]
options[name] = set(normalize_together(ut))
elif name == "index_together":
it = model._meta.original_attrs["index_together"]
options[name] = set(normalize_together(it))
else:
options[name] = model._meta.original_attrs[name]
# Force-convert all options to text_type (#23226)
options = cls.force_text_recursive(options)
# If we're ignoring relationships, remove all field-listing model
# options (that option basically just means "make a stub model")
if exclude_rels:
for key in ["unique_together", "index_together", "order_with_respect_to"]:
if key in options:
del options[key]
def flatten_bases(model):
bases = []
for base in model.__bases__:
if hasattr(base, "_meta") and base._meta.abstract:
bases.extend(flatten_bases(base))
else:
bases.append(base)
return bases
# We can't rely on __mro__ directly because we only want to flatten
# abstract models and not the whole tree. However by recursing on
# __bases__ we may end up with duplicates and ordering issues, we
# therefore discard any duplicates and reorder the bases according
# to their index in the MRO.
flattened_bases = sorted(set(flatten_bases(model)), key=lambda x: model.__mro__.index(x))
# Make our record
bases = tuple(
(
base._meta.label_lower
if hasattr(base, "_meta") else
base
)
for base in flattened_bases
)
# Ensure at least one base inherits from models.Model
if not any((isinstance(base, six.string_types) or issubclass(base, models.Model)) for base in bases):
bases = (models.Model,)
# Constructs all managers on the model
managers_mapping = {}
def reconstruct_manager(mgr):
as_manager, manager_path, qs_path, args, kwargs = mgr.deconstruct()
if as_manager:
qs_class = import_string(qs_path)
instance = qs_class.as_manager()
else:
manager_class = import_string(manager_path)
instance = manager_class(*args, **kwargs)
# We rely on the ordering of the creation_counter of the original
# instance
name = force_text(mgr.name)
managers_mapping[name] = (mgr.creation_counter, instance)
if hasattr(model, "_default_manager"):
default_manager_name = force_text(model._default_manager.name)
# Make sure the default manager is always the first
if model._default_manager.use_in_migrations:
reconstruct_manager(model._default_manager)
else:
# Force this manager to be the first and thus default
managers_mapping[default_manager_name] = (0, models.Manager())
# Sort all managers by their creation counter
for _, manager, _ in sorted(model._meta.managers):
if manager.name == "_base_manager" or not manager.use_in_migrations:
continue
reconstruct_manager(manager)
# Sort all managers by their creation counter but take only name and
# instance for further processing
managers = [
(name, instance) for name, (cc, instance) in
sorted(managers_mapping.items(), key=lambda v: v[1])
]
# If the only manager on the model is the default manager defined
# by Django (`objects = models.Manager()`), this manager will not
# be added to the model state.
if managers == [('objects', models.Manager())]:
managers = []
else:
managers = []
# Construct the new ModelState
return cls(
model._meta.app_label,
model._meta.object_name,
fields,
options,
bases,
managers,
)
@classmethod
def force_text_recursive(cls, value):
if isinstance(value, six.string_types):
return smart_text(value)
elif isinstance(value, list):
return [cls.force_text_recursive(x) for x in value]
elif isinstance(value, tuple):
return tuple(cls.force_text_recursive(x) for x in value)
elif isinstance(value, set):
return set(cls.force_text_recursive(x) for x in value)
elif isinstance(value, dict):
return {
cls.force_text_recursive(k): cls.force_text_recursive(v)
for k, v in value.items()
}
return value
def construct_managers(self):
"Deep-clone the managers using deconstruction"
# Sort all managers by their creation counter
sorted_managers = sorted(self.managers, key=lambda v: v[1].creation_counter)
for mgr_name, manager in sorted_managers:
mgr_name = force_text(mgr_name)
as_manager, manager_path, qs_path, args, kwargs = manager.deconstruct()
if as_manager:
qs_class = import_string(qs_path)
yield mgr_name, qs_class.as_manager()
else:
manager_class = import_string(manager_path)
yield mgr_name, manager_class(*args, **kwargs)
def clone(self):
"Returns an exact copy of this ModelState"
return self.__class__(
app_label=self.app_label,
name=self.name,
fields=list(self.fields),
options=dict(self.options),
bases=self.bases,
managers=list(self.managers),
)
def render(self, apps):
"Creates a Model object from our current state into the given apps"
# First, make a Meta object
meta_contents = {'app_label': self.app_label, "apps": apps}
meta_contents.update(self.options)
meta = type(str("Meta"), tuple(), meta_contents)
# Then, work out our bases
try:
bases = tuple(
(apps.get_model(base) if isinstance(base, six.string_types) else base)
for base in self.bases
)
except LookupError:
raise InvalidBasesError("Cannot resolve one or more bases from %r" % (self.bases,))
# Turn fields into a dict for the body, add other bits
body = {name: field.clone() for name, field in self.fields}
body['Meta'] = meta
body['__module__'] = "__fake__"
# Restore managers
body.update(self.construct_managers())
# Then, make a Model object (apps.register_model is called in __new__)
return type(
str(self.name),
bases,
body,
)
def get_field_by_name(self, name):
for fname, field in self.fields:
if fname == name:
return field
raise ValueError("No field called %s on model %s" % (name, self.name))
def __repr__(self):
return "<ModelState: '%s.%s'>" % (self.app_label, self.name)
def __eq__(self, other):
return (
(self.app_label == other.app_label) and
(self.name == other.name) and
(len(self.fields) == len(other.fields)) and
all((k1 == k2 and (f1.deconstruct()[1:] == f2.deconstruct()[1:]))
for (k1, f1), (k2, f2) in zip(self.fields, other.fields)) and
(self.options == other.options) and
(self.bases == other.bases) and
(self.managers == other.managers)
)
def __ne__(self, other):
return not (self == other)
| mit | -9,058,994,997,849,197,000 | 1,533,910,396,096,925,700 | 41.068852 | 114 | 0.587795 | false |
dumbbell/virt-manager | src/virtManager/remote.py | 3 | 2157 | #
# Copyright (C) 2006 Red Hat, Inc.
# Copyright (C) 2006 Daniel P. Berrange <berrange@redhat.com>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston,
# MA 02110-1301 USA.
#
import dbus.service
class vmmRemote(dbus.service.Object):
def __init__(self, engine, bus_name, object_path="/com/redhat/virt/manager"):
dbus.service.Object.__init__(self, bus_name, object_path)
self.engine = engine
@dbus.service.method("com.redhat.virt.manager", in_signature="s")
def show_domain_creator(self, uri):
self.engine.show_domain_creator(str(uri))
@dbus.service.method("com.redhat.virt.manager", in_signature="ss")
def show_domain_editor(self, uri, uuid):
self.engine.show_domain_editor(str(uri), str(uuid))
@dbus.service.method("com.redhat.virt.manager", in_signature="ss")
def show_domain_performance(self, uri, uuid):
self.engine.show_domain_performance(str(uri), str(uuid))
@dbus.service.method("com.redhat.virt.manager", in_signature="ss")
def show_domain_console(self, uri, uuid):
self.engine.show_domain_console(str(uri), str(uuid))
@dbus.service.method("com.redhat.virt.manager", in_signature="s")
def show_host_summary(self, uri):
self.engine.show_host_summary(str(uri))
@dbus.service.method("com.redhat.virt.manager", in_signature="")
def show_manager(self):
self.engine.show_manager()
@dbus.service.method("com.redhat.virt.manager")
def show_connect(self):
self.engine.show_connect()
| gpl-2.0 | 466,106,908,186,048,800 | -4,467,280,257,693,870,000 | 38.218182 | 81 | 0.703755 | false |
emilopez/pydem | pydem/examples/cross-tile_process_manager_test.py | 3 | 8813 | # -*- coding: utf-8 -*-
"""
Copyright 2015 Creare
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
if __name__ == "__main__":
import numpy as np
import os
from pydem.processing_manager import ProcessManager
from pydem.test_pydem import make_test_files, mk_test_multifile
#%% Make the test case files
NN = [300, 400, 660, 740]
test_num = 32
testdir = 'testtiff'
make_test_files(NN, testnum=test_num, testdir=testdir, plotflag=False)
mk_test_multifile(test_num, NN, testdir, nx_grid=3, ny_grid=4,
nx_overlap=16, ny_overlap=32)
path = r'testtiff\chunks'
# Remove a couple of these files so that we only have 4 tiles, and we
# know where they should drain to
files = os.listdir(path)
files.sort()
for i, fil in enumerate(files):
print i, fil
delete_ids = [0, 1, 2, 3, 4, 5, 6, 9]
for d_id in delete_ids:
os.remove(os.path.join(path, files[d_id]))
# Create the ProcessManager object
savepath = r'testtiff\processed_data'
pm = ProcessManager(path, savepath)
pm._DEBUG = True # Save out the magnitude and slope
pm.elev_source_files.sort()
esfile = pm.elev_source_files[1] # Start with lower-left tile and go CCW
# Start twi calculation for first tile
fn, status = pm.calculate_twi(esfile,
save_path=pm.save_path, do_edges=False)
edge_init_data, edge_init_done, edge_init_todo = \
pm.tile_edge.get_edge_init_data(esfile)
# THe only valuable information here is the edge_init_todo, which is self-set
# In this case the right edge of the tile is the edge that needs,
# information, so the right todo should be True
np.testing.assert_(np.all(edge_init_todo['right'][1:-1])) #don't look at corners
np.testing.assert_(np.all(~edge_init_todo['left'][1:-1])) #don't look at corners
# Next we check that the right and top neighbors are correctly set also
top = pm.tile_edge.neighbors[esfile]['top']
edge_init_data, edge_init_done, edge_init_todo = pm.tile_edge.get_edge_init_data(top)
np.testing.assert_(np.all(~edge_init_done['bottom'][1:-1])) #don't look at corners
# stop
right = pm.tile_edge.neighbors[esfile]['right']
edge_init_data, edge_init_done, edge_init_todo = pm.tile_edge.get_edge_init_data(right)
np.testing.assert_(np.all(~edge_init_done['left'][1:-1])) #don't look at corners
topright = pm.tile_edge.neighbors[esfile]['top-right']
edge_init_data, edge_init_done, edge_init_todo = pm.tile_edge.get_edge_init_data(topright)
np.testing.assert_(np.all(~edge_init_done['left'][1:-1])) #don't look at corners
np.testing.assert_(np.all(~edge_init_done['bottom'][1:-1])) #don't look at corners
# pm.tile_edge.visualize_neighbors()
# do the next tile
esfile = pm.elev_source_files[0]
fn, status = pm.calculate_twi(esfile,
save_path=pm.save_path, do_edges=False)
edge_init_data, edge_init_done, edge_init_todo = pm.tile_edge.get_edge_init_data(esfile)
np.testing.assert_(np.all(~edge_init_todo['right'][1:-1]))
np.testing.assert_(np.all(~edge_init_todo['left'][1:-1]))
np.testing.assert_(np.all(~edge_init_todo['top'][1:-1]))
np.testing.assert_(np.all(~edge_init_todo['bottom'][1:-1]))
# Next we check that the left and top neighbors are correctly set also
top = pm.tile_edge.neighbors[esfile]['top']
edge_init_data, edge_init_done, edge_init_todo = pm.tile_edge.get_edge_init_data(top)
np.testing.assert_(np.all(edge_init_done['bottom']))
left = pm.tile_edge.neighbors[esfile]['left']
edge_init_data, edge_init_done, edge_init_todo = pm.tile_edge.get_edge_init_data(left)
np.testing.assert_(np.all(edge_init_done['right']))
topleft = pm.tile_edge.neighbors[esfile]['top-left']
edge_init_data, edge_init_done, edge_init_todo = pm.tile_edge.get_edge_init_data(topleft)
np.testing.assert_(np.any(edge_init_done['right']))
np.testing.assert_(np.any(edge_init_done['bottom']))
# pm.tile_edge.visualize_neighbors()
# Do the third tile
esfile = pm.elev_source_files[2]
fn, status = pm.calculate_twi(esfile,
save_path=pm.save_path, do_edges=False)
edge_init_data, edge_init_done, edge_init_todo = pm.tile_edge.get_edge_init_data(esfile)
np.testing.assert_(np.all(~edge_init_todo['right'][1:-1]))
np.testing.assert_(np.all(~edge_init_todo['left'][1:-1]))
np.testing.assert_(np.all(~edge_init_todo['top'][1:-1]))
np.testing.assert_(np.all(~edge_init_todo['bottom'][1:-1]))
# Next we check that the left and top neighbors are correctly set also
left = pm.tile_edge.neighbors[esfile]['left']
edge_init_data, edge_init_done, edge_init_todo = pm.tile_edge.get_edge_init_data(left)
np.testing.assert_(np.all(edge_init_done['right']))
bottomleft = pm.tile_edge.neighbors[esfile]['bottom-left']
edge_init_data, edge_init_done, edge_init_todo = pm.tile_edge.get_edge_init_data(bottomleft)
np.testing.assert_(np.any(edge_init_done['right']))
np.testing.assert_(np.any(edge_init_done['top']))
# pm.tile_edge.visualize_neighbors()
# a1 = pm.dem_proc.uca.copy()
# esfile = pm.elev_source_files[2]
# coords1 = parse_fn(esfile)
# imshow(a1, interpolation='none',
# extent=[coords1[1], coords1[3], coords1[0], coords1[2]]);clim(0, a1.max())
# crds = pm.tile_edge.edges[left]['right'].get_coordinates()
# edge_init_data, edge_init_done, edge_init_todo = pm.tile_edge.get_edge_init_data(left)
# imshow(edge_init_data['right'][:, None], interpolation='none',
# extent=[crds[:, 1].min(), crds[:, 1].max()+0.3 / a1.shape[0],
# crds[:, 0].min(), crds[:, 0].max()]);clim(0, a1.max())
# xlim(coords1[1], coords1[3])
# ylim(coords1[0], coords1[2])
#%%Do the final tile to complete the first round (non-edge resolving)
esfile = pm.elev_source_files[3]
fn, status = pm.calculate_twi(esfile,
save_path=pm.save_path, do_edges=False)
edge_init_data, edge_init_done, edge_init_todo = pm.tile_edge.get_edge_init_data(esfile)
np.testing.assert_(np.all(~edge_init_todo['right'][1:-1]))
np.testing.assert_(np.all(~edge_init_todo['left'][1:-1]))
np.testing.assert_(np.all(~edge_init_todo['top'][1:-1]))
np.testing.assert_(np.any(~edge_init_todo['bottom'][1:-1])) # mixed on bottom
np.testing.assert_(np.any(edge_init_todo['bottom'][1:-1])) # mixed on bottom
# This one has no neighbors to check (no downstream dependencies)
# a2 = pm.dem_proc.uca.copy()
# esfile = pm.elev_source_files[3]
# coords = parse_fn(esfile)
# imshow(a2, extent=[coords[1], coords[3], coords[0], coords[2]],
# interpolation='none');clim(0, a1.max())
# xlim(coords[1], coords1[3])
# Now let us start the edge resolution round. There are only 2 tiles that
# require edge resolution
# %%
i = pm.tile_edge.find_best_candidate(pm.elev_source_files)
np.testing.assert_(i==1) # should be the first tile
esfile = pm.elev_source_files[i]
fn, status = pm.calculate_twi(esfile,
save_path=pm.save_path, do_edges=True)
edge_init_data, edge_init_done, edge_init_todo = pm.tile_edge.get_edge_init_data(esfile)
np.testing.assert_(np.all(~edge_init_todo['right'][1:-1]))
np.testing.assert_(np.all(~edge_init_todo['left'][1:-1]))
np.testing.assert_(np.all(~edge_init_todo['top'][1:-1]))
np.testing.assert_(np.all(~edge_init_todo['bottom'][1:-1]))
# check neihbors
top = pm.tile_edge.neighbors[esfile]['top']
edge_init_data, edge_init_done, edge_init_todo = pm.tile_edge.get_edge_init_data(top)
np.testing.assert_(np.all(edge_init_done['bottom'][1:-1])) #don't look at corners
right = pm.tile_edge.neighbors[esfile]['right']
edge_init_data, edge_init_done, edge_init_todo = pm.tile_edge.get_edge_init_data(right)
np.testing.assert_(np.all(edge_init_done['left'][1:-1])) #don't look at corners
i = pm.tile_edge.find_best_candidate(pm.elev_source_files)
np.testing.assert_(i==3) # should be the last tile
esfile = pm.elev_source_files[i]
fn, status = pm.calculate_twi(esfile,
save_path=pm.save_path, do_edges=True) | apache-2.0 | 1,398,575,900,090,359,300 | 2,439,557,651,420,869,600 | 48.516854 | 96 | 0.650857 | false |
CLVsol/oehealth | oehealth_dispensation/oehealth_dispensation.py | 1 | 9325 | # -*- encoding: utf-8 -*-
################################################################################
# #
# Copyright (C) 2013-Today Carlos Eduardo Vercelino - CLVsol #
# #
# This program is free software: you can redistribute it and/or modify #
# it under the terms of the GNU Affero General Public License as published by #
# the Free Software Foundation, either version 3 of the License, or #
# (at your option) any later version. #
# #
# This program is distributed in the hope that it will be useful, #
# but WITHOUT ANY WARRANTY; without even the implied warranty of #
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the #
# GNU Affero General Public License for more details. #
# #
# You should have received a copy of the GNU Affero General Public License #
# along with this program. If not, see <http://www.gnu.org/licenses/>. #
################################################################################
from osv import osv
from osv import fields
import time
class oehealth_dispensation(osv.Model):
_name='oehealth.dispensation'
def _compute_create_uid(self, cr, uid, ids, field_name, arg, context={}):
result = {}
for r in self.browse(cr, uid, ids, context=context):
perms = self.perm_read(cr, uid, ids)
create_uid = perms[0].get('create_uid', 'n/a')
result[r.id] = create_uid
return result
def _compute_create_date(self, cr, uid, ids, field_name, arg, context={}):
result = {}
for r in self.browse(cr, uid, ids, context=context):
perms = self.perm_read(cr, uid, ids)
create_date = perms[0].get('create_date', 'n/a')
result[r.id] = create_date
return result
def _compute_write_uid(self, cr, uid, ids, field_name, arg, context={}):
result = {}
for r in self.browse(cr, uid, ids, context=context):
perms = self.perm_read(cr, uid, ids)
write_uid = perms[0].get('write_uid', 'n/a')
result[r.id] = write_uid
return result
def _compute_write_date(self, cr, uid, ids, field_name, arg, context={}):
result = {}
for r in self.browse(cr, uid, ids, context=context):
perms = self.perm_read(cr, uid, ids)
write_date = perms[0].get('write_date', 'n/a')
result[r.id] = write_date
return result
_columns={
'name': fields.char(size=256, string='Dispensation ID', required=True,
help='Type in the ID of this dispensation'),
'dispensation_date': fields.date(string='Dispensation Date', required=True),
'prescription_date': fields.date(string='Prescription Date', required=True),
'prescriber_id': fields.many2one('oehealth.prescriber', string='Prescriber', required=True),
#'patient_id': fields.many2one('oehealth.patient', string='Patient', required=True),
#'pregnancy_warning': fields.boolean(string='Pregancy Warning', readonly=True),
'notes': fields.text(string='Prescription Notes'),
#'prescription_line': fields.one2many('oehealth.dispensation.line',
# 'pbm_prescription_order_id',
# string='Dispensation line',),
'prescription_line': fields.one2many('oehealth.medicament.template',
'dispensation_id',
string='Prescription lines',),
#'pbm_prescription_warning_ack': fields.boolean(string='Dispensation verified'),
#'user_id': fields.many2one('res.users', string='Prescribing Doctor', required=True),
'active': fields.boolean('Active', help="The active field allows you to hide the dispensation without removing it."),
'state': fields.selection([('new','New'),
('revised','Revised'),
('waiting','Waiting'),
('okay','Okay')], 'Stage', readonly=True),
'create_uid': fields.function(_compute_create_uid, method=True, type='char', string='Create User',),
'create_date': fields.function(_compute_create_date, method=True, type='datetime', string='Create Date',),
'write_uid': fields.function(_compute_write_uid, method=True, type='char', string='Write User',),
'write_date': fields.function(_compute_write_date, method=True, type='datetime', string='Write Date',),
}
_sql_constraints = [
('uniq_name', 'unique(name)', "The Dispensation ID must be unique!"),
]
_defaults={
'name': '/',
'dispensation_date': lambda *a: time.strftime('%Y-%m-%d %H:%M:%S'),
'active': 1,
'state': 'new',
}
def create(self, cr, uid, vals, context=None):
if context is None:
context = {}
if not 'name' in vals or vals['name'] == '/':
val = self.pool.get('ir.sequence').get(cr, uid, 'oehealth.dispensation.code')
code = map(int, str(val))
code_len = len(code)
while len(code) < 14:
code.insert(0, 0)
while len(code) < 16:
n = sum([(len(code) + 1 - i) * v for i, v in enumerate(code)]) % 11
if n > 1:
f = 11 - n
else:
f = 0
code.append(f)
code_str = "%s.%s.%s.%s.%s-%s" % (str(code[0]) + str(code[1]),
str(code[2]) + str(code[3]) + str(code[4]),
str(code[5]) + str(code[6]) + str(code[7]),
str(code[8]) + str(code[9]) + str(code[10]),
str(code[11]) + str(code[12]) + str(code[13]),
str(code[14]) + str(code[15]))
if code_len <= 3:
vals['name'] = code_str[18 - code_len:21]
elif code_len > 3 and code_len <= 6:
vals['name'] = code_str[17 - code_len:21]
elif code_len > 6 and code_len <= 9:
vals['name'] = code_str[16 - code_len:21]
elif code_len > 9 and code_len <= 12:
vals['name'] = code_str[15 - code_len:21]
elif code_len > 12 and code_len <= 14:
vals['name'] = code_str[14 - code_len:21]
return super(oehealth_dispensation, self).create(cr, uid, vals, context)
def oehealth_dispensation_new(self, cr, uid, ids):
self.write(cr, uid, ids, {'state': 'new'})
return True
def oehealth_dispensation_revised(self, cr, uid, ids):
self.write(cr, uid, ids, {'state': 'revised'})
return True
def oehealth_dispensation_waiting(self, cr, uid, ids):
self.write(cr, uid, ids, {'state': 'waiting'})
return True
def oehealth_dispensation_okay(self, cr, uid, ids):
self.write(cr, uid, ids, {'state': 'okay'})
return True
def get_authorization(self, cr, uid, ids, context={}):
data=ids
test_request_obj = self.pool.get('oehealth.dispensation')
lab_obj = self.pool.get('oehealth.dispensation')
test_report_data={}
test_cases = []
test_obj = test_request_obj.browse(cr, uid, context.get('active_id'), context=context)
#if test_obj.state == 'tested':
if test_obj.state != 'tested':
#raise osv.except_osv(_('UserError'),_('Test Report already created.'))
raise osv.except_osv(('UserError'),('Test Report already created.'))
test_report_data['test'] = test_obj.name.id
test_report_data['patient'] = test_obj.patient_id.id
#test_report_data['requestor'] = test_obj.doctor_id.id
test_report_data['date_requested'] = test_obj.date
for criterion in test_obj.name.criteria:
test_cases.append((0,0,{'name':criterion.name,
'sequence':criterion.sequence,
'normal_range':criterion.normal_range,
'unit':criterion.unit.id,
}))
test_report_data['criteria'] = test_cases
lab_id = lab_obj.create(cr,uid,test_report_data,context=context)
test_request_obj.write(cr, uid, context.get('active_id'), {'state':'tested'})
return {
'domain': "[('id','=', "+str(lab_id)+")]",
'name': 'Lab Test Report',
'view_type': 'form',
'view_mode': 'tree,form',
'res_model': 'oehealth.lab_test',
'type': 'ir.actions.act_window'
}
oehealth_dispensation()
| agpl-3.0 | -6,928,920,909,897,924,000 | 8,707,209,570,899,852,000 | 49.405405 | 125 | 0.500268 | false |
shadowk29/cusumtools | legacy/minimal_psd.py | 1 | 12009 | ## COPYRIGHT
## Copyright (C) 2015 Kyle Briggs (kbrig035<at>uottawa.ca)
##
## This file is part of cusumtools.
##
## This program is free software: you can redistribute it and/or modify
## it under the terms of the GNU General Public License as published by
## the Free Software Foundation, either version 3 of the License, or
## (at your option) any later version.
##
## This program is distributed in the hope that it will be useful,
## but WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
## GNU General Public License for more details.
##
## You should have received a copy of the GNU General Public License
## along with this program. If not, see <http://www.gnu.org/licenses/>.
import matplotlib
matplotlib.use('TkAgg')
import numpy as np
import tkinter.filedialog
import tkinter as tk
from matplotlib.figure import Figure
from matplotlib.backends.backend_tkagg import FigureCanvasTkAgg, NavigationToolbar2TkAgg
import scipy.io as sio
from scipy.signal import bessel, filtfilt, welch
from scikits.samplerate import resample
import pylab as pl
import glob
import os
import time
import pandas as pd
from pandasql import sqldf
import re
def make_format(current, other):
# current and other are axes
def format_coord(x, y):
# x, y are data coordinates
# convert to display coords
display_coord = current.transData.transform((x,y))
inv = other.transData.inverted()
# convert back to data coords with respect to ax
ax_coord = inv.transform(display_coord)
coords = [ax_coord, (x, y)]
return ('Left: {:<40} Right: {:<}'
.format(*['({:.3f}, {:.3f})'.format(x, y) for x,y in coords]))
return format_coord
class App(tk.Frame):
def __init__(self, parent,file_path):
tk.Frame.__init__(self, parent)
parent.deiconify()
self.events_flag = False
self.baseline_flag = False
self.file_path = file_path
##### Trace plotting widgets #####
self.trace_frame = tk.LabelFrame(parent,text='Current Trace')
self.trace_fig = Figure(figsize=(7,5), dpi=100)
self.trace_canvas = FigureCanvasTkAgg(self.trace_fig, master=self.trace_frame)
self.trace_toolbar_frame = tk.Frame(self.trace_frame)
self.trace_toolbar = NavigationToolbar2TkAgg(self.trace_canvas, self.trace_toolbar_frame)
self.trace_toolbar.update()
self.trace_frame.grid(row=0,column=0,columnspan=6,sticky=tk.N+tk.S)
self.trace_toolbar_frame.grid(row=1,column=0,columnspan=6)
self.trace_canvas.get_tk_widget().grid(row=0,column=0,columnspan=6)
##### PSD plotting widgets #####
self.psd_frame = tk.LabelFrame(parent,text='Power Spectrum')
self.psd_fig = Figure(figsize=(7,5), dpi=100)
self.psd_canvas = FigureCanvasTkAgg(self.psd_fig, master=self.psd_frame)
self.psd_toolbar_frame = tk.Frame(self.psd_frame)
self.psd_toolbar = NavigationToolbar2TkAgg(self.psd_canvas, self.psd_toolbar_frame)
self.psd_toolbar.update()
self.psd_frame.grid(row=0,column=6,columnspan=6,sticky=tk.N+tk.S)
self.psd_toolbar_frame.grid(row=1,column=6,columnspan=6)
self.psd_canvas.get_tk_widget().grid(row=0,column=6,columnspan=6)
##### Control widgets #####
self.control_frame = tk.LabelFrame(parent, text='Controls')
self.control_frame.grid(row=2,column=0,columnspan=6,sticky=tk.N+tk.S+tk.E+tk.W)
self.start_entry = tk.Entry(self.control_frame)
self.start_entry.insert(0,'0')
self.start_label = tk.Label(self.control_frame, text='Start Time (s)')
self.start_label.grid(row=0,column=0,sticky=tk.E+tk.W)
self.start_entry.grid(row=0,column=1,sticky=tk.E+tk.W)
self.end_entry = tk.Entry(self.control_frame)
self.end_entry.insert(0,'10')
self.end_label = tk.Label(self.control_frame, text='End Time (s)')
self.end_label.grid(row=0,column=2,sticky=tk.E+tk.W)
self.end_entry.grid(row=0,column=3,sticky=tk.E+tk.W)
self.cutoff_entry = tk.Entry(self.control_frame)
self.cutoff_entry.insert(0,'')
self.cutoff_label = tk.Label(self.control_frame, text='Cutoff (Hz)')
self.cutoff_label.grid(row=1,column=0,sticky=tk.E+tk.W)
self.cutoff_entry.grid(row=1,column=1,sticky=tk.E+tk.W)
self.order_entry = tk.Entry(self.control_frame)
self.order_entry.insert(0,'')
self.order_label = tk.Label(self.control_frame, text='Filter Order')
self.order_label.grid(row=1,column=2,sticky=tk.E+tk.W)
self.order_entry.grid(row=1,column=3,sticky=tk.E+tk.W)
self.samplerate_entry = tk.Entry(self.control_frame)
self.samplerate_entry.insert(0,'250000')
self.samplerate_label = tk.Label(self.control_frame, text='Sampling Frequency (Hz)')
self.samplerate_label.grid(row=1,column=4,sticky=tk.E+tk.W)
self.samplerate_entry.grid(row=1,column=5,sticky=tk.E+tk.W)
self.savegain_entry = tk.Entry(self.control_frame)
self.savegain_entry.insert(0,'1')
self.savegain_label = tk.Label(self.control_frame, text='Sampling Frequency (Hz)')
self.savegain_label.grid(row=0,column=4,sticky=tk.E+tk.W)
self.savegain_entry.grid(row=0,column=5,sticky=tk.E+tk.W)
self.plot_trace = tk.Button(self.control_frame, text='Update Trace', command=self.update_trace)
self.plot_trace.grid(row=2,column=0,columnspan=2,sticky=tk.E+tk.W)
self.normalize = tk.IntVar()
self.normalize.set(0)
self.normalize_check = tk.Checkbutton(self.control_frame, text='Normalize', variable = self.normalize)
self.normalize_check.grid(row=2,column=2,sticky=tk.E+tk.W)
self.plot_psd = tk.Button(self.control_frame, text='Update PSD', command=self.update_psd)
self.plot_psd.grid(row=2,column=3,sticky=tk.E+tk.W)
##### Feedback Widgets #####
self.feedback_frame = tk.LabelFrame(parent, text='Status')
self.feedback_frame.grid(row=2,column=6,columnspan=6,sticky=tk.N+tk.S+tk.E+tk.W)
self.export_psd = tk.Button(self.feedback_frame, text='Export PSD',command=self.export_psd)
self.export_psd.grid(row=1,column=0,columnspan=6,sticky=tk.E+tk.W)
self.export_trace = tk.Button(self.feedback_frame, text='Export Trace',command=self.export_trace)
self.export_trace.grid(row=2,column=0,columnspan=6,sticky=tk.E+tk.W)
self.load_memmap()
self.initialize_samplerate()
def export_psd(self):
try:
data_path = tkinter.filedialog.asksaveasfilename(defaultextension='.csv',initialdir='G:\PSDs for Sam')
np.savetxt(data_path,np.c_[self.f, self.Pxx, self.rms],delimiter=',')
except AttributeError:
self.wildcard.set('Plot the PSD first')
def export_trace(self):
try:
data_path = tkinter.filedialog.asksaveasfilename(defaultextension='.csv',initialdir='G:\Analysis\Pores\NPN\PSDs')
np.savetxt(data_path,self.plot_data,delimiter=',')
except AttributeError:
self.wildcard.set('Plot the trace first')
def load_mapped_data(self):
self.total_samples = len(self.map)
self.samplerate = int(self.samplerate_entry.get())
if self.start_entry.get()!='':
self.start_time = float(self.start_entry.get())
start_index = int((float(self.start_entry.get())*self.samplerate))
else:
self.start_time = 0
start_index = 0
if self.end_entry.get()!='':
self.end_time = float(self.end_entry.get())
end_index = int((float(self.end_entry.get())*self.samplerate))
if end_index > self.total_samples:
end_index = self.total_samples
self.data = self.map[start_index:end_index]
self.data = float(self.savegain_entry.get()) * self.data
def load_memmap(self):
columntypes = np.dtype([('current', '>i2'), ('voltage', '>i2')])
self.map = np.memmap(self.file_path, dtype=columntypes, mode='r')['current']
def integrate_noise(self, f, Pxx):
df = f[1]-f[0]
return np.sqrt(np.cumsum(Pxx * df))
def filter_data(self):
cutoff = float(self.cutoff_entry.get())
order = int(self.order_entry.get())
Wn = 2.0 * cutoff/float(self.samplerate)
b, a = bessel(order,Wn,'low')
padding = 1000
padded = np.pad(self.data, pad_width=padding, mode='median')
self.filtered_data = filtfilt(b, a, padded, padtype=None)[padding:-padding]
def initialize_samplerate(self):
self.samplerate = float(self.samplerate_entry.get())
##### Plot Updating functions #####
def update_trace(self):
self.initialize_samplerate()
self.load_mapped_data()
self.filtered_data = self.data
self.plot_data = self.filtered_data
plot_samplerate = self.samplerate
if self.cutoff_entry.get()!='' and self.order_entry!='':
self.filter_data()
self.plot_data = self.filtered_data
self.trace_fig.clf()
a = self.trace_fig.add_subplot(111)
time = np.linspace(1.0/self.samplerate,len(self.plot_data)/float(self.samplerate),len(self.plot_data))+self.start_time
a.set_xlabel(r'Time ($\mu s$)')
a.set_ylabel('Current (pA)')
self.trace_fig.subplots_adjust(bottom=0.14,left=0.21)
a.plot(time*1e6,self.plot_data,'.',markersize=1)
self.trace_canvas.show()
def update_psd(self):
self.initialize_samplerate()
self.load_mapped_data()
self.filtered_data = self.data
self.plot_data = self.filtered_data
plot_samplerate = self.samplerate
if self.cutoff_entry.get()!='' and self.order_entry!='':
self.filter_data()
self.plot_data = self.filtered_data
maxf = 2*float(self.cutoff_entry.get())
else:
maxf = 2*float(self.samplerate_entry.get())
length = np.minimum(2**18,len(self.filtered_data))
end_index = int(np.floor(len(self.filtered_data)/length)*length)
current = np.average(self.filtered_data[:end_index])
f, Pxx = welch(self.filtered_data, plot_samplerate,nperseg=length)
self.rms = self.integrate_noise(f, Pxx)
if self.normalize.get():
Pxx /= current**2
Pxx *= maxf/2.0
self.rms /= np.absolute(current)
self.f = f
self.Pxx = Pxx
minf = 1
BW_index = np.searchsorted(f, maxf/2)
logPxx = np.log10(Pxx[1:BW_index])
minP = 10**np.floor(np.amin(logPxx))
maxP = 10**np.ceil(np.amax(logPxx))
self.psd_fig.clf()
a = self.psd_fig.add_subplot(111)
a.set_xlabel('Frequency (Hz)')
a.set_ylabel(r'Spectral Power ($\mathrm{pA}^2/\mathrm{Hz}$)')
a.set_xlim(minf, maxf)
a.set_ylim(minP, maxP)
self.psd_fig.subplots_adjust(bottom=0.14,left=0.21)
a.loglog(f[1:],Pxx[1:],'b-')
for tick in a.get_yticklabels():
tick.set_color('b')
a2 = a.twinx()
a2.semilogx(f, self.rms, 'r-')
a2.set_ylabel('RMS Noise (pA)')
a2.set_xlim(minf, maxf)
for tick in a2.get_yticklabels():
tick.set_color('r')
a2.format_coord = make_format(a2, a)
self.psd_canvas.show()
def main():
root=tk.Tk()
root.withdraw()
file_path = tkinter.filedialog.askopenfilename(initialdir='C:/Data/')
App(root,file_path).grid(row=0,column=0)
root.mainloop()
if __name__=="__main__":
main()
| gpl-3.0 | -1,996,733,471,393,553,700 | -213,264,079,753,783,900 | 38.503289 | 126 | 0.615955 | false |
syhpoon/xyzcmd | libxyz/vfs/vfsobj.py | 1 | 8497 | #-*- coding: utf8 -*
#
# Max E. Kuznecov ~syhpoon <syhpoon@syhpoon.name> 2008
#
# This file is part of XYZCommander.
# XYZCommander is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
# XYZCommander is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser Public License for more details.
# You should have received a copy of the GNU Lesser Public License
# along with XYZCommander. If not, see <http://www.gnu.org/licenses/>.
import os
from libxyz.core.utils import bstring, ustring
from libxyz.vfs import types, util
class VFSObject(object):
"""
Abstract interface for VFS objects
"""
def __init__(self, xyz, path, full_path, ext_path, driver, parent,
enc=None, **kwargs):
self.xyz = xyz
self.enc = enc or xyzenc
# Internal VFS path
self.path = bstring(path, self.enc)
# Full VFS path
self.full_path = bstring(full_path, self.enc)
# External VFS path
self.ext_path = bstring(ext_path, self.enc)
self.parent = parent
self.driver = driver
self.kwargs = kwargs
self.fileobj = None
# File name
self.name = os.path.basename(self.path)
# File type
self.ftype = None
# Access time
self.atime = None
# Modified time
self.mtime = None
# Changed time
self.ctime = None
# Size in bytes
self.size = None
# Owner UID
self.uid = None
# Group
self.gid = None
# Mode
self.mode = None
# Inode
self.inode = None
# Visual file type
self.vtype = None
# Visual file representation
self.visual = None
# File info
self.info = None
# Any type-specific data
self.data = None
# List of significant attributes
self.attributes = ()
self.__ni_msg = _(u"Feature not implemented")
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
def is_file(self):
"""
Return True if instance is representing regular file
"""
return isinstance(self.ftype, types.VFSTypeFile)
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
def is_dir(self):
"""
Return True if instance is representing directory
"""
return isinstance(self.ftype, types.VFSTypeDir)
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
def is_dir_empty(self):
"""
Return True if instance is representing directory and it is empty
"""
if not self.is_dir():
return False
_, _, objs = self.walk()
return len(objs) == 0
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
def is_link(self):
"""
Return True if instance is representing soft link
"""
return isinstance(self.ftype, types.VFSTypeLink)
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
def is_char(self):
"""
Return True if instance is representing soft char device
"""
return isinstance(self.ftype, types.VFSTypeChar)
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
def is_block(self):
"""
Return True if instance is representing block device
"""
return isinstance(self.ftype, types.VFSTypeBlock)
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
def is_fifo(self):
"""
Return True if instance is representing FIFO
"""
return isinstance(self.ftype, types.VFSTypeFifo)
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
def is_socket(self):
"""
Return True if instance is representing socket
"""
return isinstance(self.ftype, types.VFSTypeSocket)
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
def copy(self, path, existcb=None, errorcb=None,
save_attrs=True, follow_links=False, cancel=None):
"""
Copy file to specified location
@param path: Local path to copy file to
@param existcb: Callback function to be called if there exists
an object in target directory with the same name.
Callback function receives VFSObject instance as an
argument and must return one of:
'override' - to override this very object
'override all' - to override any future collisions
'skip' - to skip the object
'skip all' - to skip all future collisions
'abort' - to abort the process.
If no existscb provided 'abort' is used as default
@param errorcb: Callback function to be called in case an error occured
during copying. Function receives VFSObject instance
and error string as arguments and must return one of:
'skip' - to continue the process
'skip all' - to skip all future errors
'abort' - to abort the process.
If no errorcb provided 'abort' is used as default
@param save_attrs: Whether to save object attributes
@param follow_links: Whether to follow symlinks
@param cancel: a threading.Event instance, if it is found set - abort
"""
raise NotImplementedError(self.__ni_msg)
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
def move(self, path, existcb=None, errorcb=None, save_attrs=True,
follow_links=False, cancel=None):
"""
Move object
Arguments are the same as for copy()
"""
raise NotImplementedError(self.__ni_msg)
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
def mkdir(self, newdir):
"""
Create new dir inside object (only valid for directory object types)
"""
raise NotImplementedError(self.__ni_msg)
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
def remove(self, recursive=True):
"""
[Recursively] remove object
"""
raise NotImplementedError(self.__ni_msg)
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
def walk(self):
"""
Directory tree walker
"""
raise NotImplementedError(self.__ni_msg)
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
def open(self, mode='r'):
"""
Open self object in provided mode
"""
raise NotImplementedError(self.__ni_msg)
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
def close(self):
"""
Close self object
"""
raise NotImplementedError(self.__ni_msg)
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
def read(self, bytes=None):
"""
Read bytes from self object
"""
raise NotImplementedError(self.__ni_msg)
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
def tell(self):
"""
Tell file position
"""
raise NotImplementedError(self.__ni_msg)
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
def seek(self, offset, whence=None):
"""
Perform seek() on object
"""
raise NotImplementedError(self.__ni_msg)
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
def in_dir(self, d, e):
"""
Filter only those archive entries which exist in the same
directory level
"""
if e.startswith(d.lstrip(os.sep)) and \
len(util.split_path(e)) == (len(util.split_path(d)) + 1):
return True
else:
return False
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
def __repr__(self):
return self.__str__()
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
def __unicode__(self):
return ustring(self.__str__())
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
def __del__(self):
if self.fileobj:
try:
self.close()
except Exception:
pass
| gpl-3.0 | -5,332,250,530,637,271,000 | -5,128,007,415,771,210,000 | 26.146965 | 79 | 0.485348 | false |
simobasso/ansible | test/units/parsing/vault/test_vault.py | 60 | 5974 | # (c) 2012-2014, Michael DeHaan <michael.dehaan@gmail.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import getpass
import os
import shutil
import time
import tempfile
import six
from binascii import unhexlify
from binascii import hexlify
from nose.plugins.skip import SkipTest
from ansible.compat.tests import unittest
from ansible.utils.unicode import to_bytes, to_unicode
from ansible import errors
from ansible.parsing.vault import VaultLib
# Counter import fails for 2.0.1, requires >= 2.6.1 from pip
try:
from Crypto.Util import Counter
HAS_COUNTER = True
except ImportError:
HAS_COUNTER = False
# KDF import fails for 2.0.1, requires >= 2.6.1 from pip
try:
from Crypto.Protocol.KDF import PBKDF2
HAS_PBKDF2 = True
except ImportError:
HAS_PBKDF2 = False
# AES IMPORTS
try:
from Crypto.Cipher import AES as AES
HAS_AES = True
except ImportError:
HAS_AES = False
class TestVaultLib(unittest.TestCase):
def test_methods_exist(self):
v = VaultLib('ansible')
slots = ['is_encrypted',
'encrypt',
'decrypt',
'_format_output',
'_split_header',]
for slot in slots:
assert hasattr(v, slot), "VaultLib is missing the %s method" % slot
def test_is_encrypted(self):
v = VaultLib(None)
assert not v.is_encrypted(u"foobar"), "encryption check on plaintext failed"
data = u"$ANSIBLE_VAULT;9.9;TEST\n%s" % hexlify(b"ansible")
assert v.is_encrypted(data), "encryption check on headered text failed"
def test_format_output(self):
v = VaultLib('ansible')
v.cipher_name = "TEST"
sensitive_data = b"ansible"
data = v._format_output(sensitive_data)
lines = data.split(b'\n')
assert len(lines) > 1, "failed to properly add header"
header = to_bytes(lines[0])
assert header.endswith(b';TEST'), "header does end with cipher name"
header_parts = header.split(b';')
assert len(header_parts) == 3, "header has the wrong number of parts"
assert header_parts[0] == b'$ANSIBLE_VAULT', "header does not start with $ANSIBLE_VAULT"
assert header_parts[1] == v.b_version, "header version is incorrect"
assert header_parts[2] == b'TEST', "header does end with cipher name"
def test_split_header(self):
v = VaultLib('ansible')
data = b"$ANSIBLE_VAULT;9.9;TEST\nansible"
rdata = v._split_header(data)
lines = rdata.split(b'\n')
assert lines[0] == b"ansible"
assert v.cipher_name == 'TEST', "cipher name was not set"
assert v.b_version == b"9.9"
def test_encrypt_decrypt_aes(self):
if not HAS_AES or not HAS_COUNTER or not HAS_PBKDF2:
raise SkipTest
v = VaultLib('ansible')
v.cipher_name = u'AES'
# AES encryption code has been removed, so this is old output for
# AES-encrypted 'foobar' with password 'ansible'.
enc_data = b'$ANSIBLE_VAULT;1.1;AES\n53616c7465645f5fc107ce1ef4d7b455e038a13b053225776458052f8f8f332d554809d3f150bfa3\nfe3db930508b65e0ff5947e4386b79af8ab094017629590ef6ba486814cf70f8e4ab0ed0c7d2587e\n786a5a15efeb787e1958cbdd480d076c\n'
dec_data = v.decrypt(enc_data)
assert dec_data == b"foobar", "decryption failed"
def test_encrypt_decrypt_aes256(self):
if not HAS_AES or not HAS_COUNTER or not HAS_PBKDF2:
raise SkipTest
v = VaultLib('ansible')
v.cipher_name = 'AES256'
enc_data = v.encrypt(b"foobar")
dec_data = v.decrypt(enc_data)
assert enc_data != b"foobar", "encryption failed"
assert dec_data == b"foobar", "decryption failed"
def test_encrypt_encrypted(self):
if not HAS_AES or not HAS_COUNTER or not HAS_PBKDF2:
raise SkipTest
v = VaultLib('ansible')
v.cipher_name = 'AES'
data = "$ANSIBLE_VAULT;9.9;TEST\n%s" % hexlify(six.b("ansible"))
error_hit = False
try:
enc_data = v.encrypt(data)
except errors.AnsibleError as e:
error_hit = True
assert error_hit, "No error was thrown when trying to encrypt data with a header"
def test_decrypt_decrypted(self):
if not HAS_AES or not HAS_COUNTER or not HAS_PBKDF2:
raise SkipTest
v = VaultLib('ansible')
data = "ansible"
error_hit = False
try:
dec_data = v.decrypt(data)
except errors.AnsibleError as e:
error_hit = True
assert error_hit, "No error was thrown when trying to decrypt data without a header"
def test_cipher_not_set(self):
# not setting the cipher should default to AES256
if not HAS_AES or not HAS_COUNTER or not HAS_PBKDF2:
raise SkipTest
v = VaultLib('ansible')
data = "ansible"
error_hit = False
try:
enc_data = v.encrypt(data)
except errors.AnsibleError as e:
error_hit = True
assert not error_hit, "An error was thrown when trying to encrypt data without the cipher set"
assert v.cipher_name == "AES256", "cipher name is not set to AES256: %s" % v.cipher_name
| gpl-3.0 | -4,561,396,154,499,939,000 | 968,236,543,064,954,200 | 36.3375 | 244 | 0.648979 | false |
ClaudioNahmad/Servicio-Social | Parametros/CosmoMC/prerrequisitos/plc-2.0/build/pyfits-3.2.2/lib/pyfits/core.py | 3 | 5534 | #!/usr/bin/env python
# $Id$
"""
A module for reading and writing FITS files and manipulating their
contents.
A module for reading and writing Flexible Image Transport System
(FITS) files. This file format was endorsed by the International
Astronomical Union in 1999 and mandated by NASA as the standard format
for storing high energy astrophysics data. For details of the FITS
standard, see the NASA/Science Office of Standards and Technology
publication, NOST 100-2.0.
For detailed examples of usage, see the `PyFITS User's Manual
<http://stsdas.stsci.edu/download/wikidocs/The_PyFITS_Handbook.pdf>`_.
"""
# The existing unit tests, anyways, only require this in pyfits.hdu.table,
# but we should still leave new division here too in order to avoid any nasty
# surprises
from __future__ import division # confidence high
"""
Do you mean: "Profits"?
- Google Search, when asked for "PyFITS"
"""
import os
import sys
import warnings
import pyfits.py3compat
# Public API compatibility imports
import pyfits.card
import pyfits.column
import pyfits.convenience
import pyfits.diff
import pyfits.hdu
from pyfits.card import *
from pyfits.column import *
from pyfits.convenience import *
from pyfits.diff import *
from pyfits.fitsrec import FITS_record, FITS_rec
from pyfits.hdu import *
from pyfits.hdu.hdulist import fitsopen as open
from pyfits.hdu.image import Section
from pyfits.hdu.table import new_table
from pyfits.header import Header
# Additional imports used by the documentation (some of which should be
# restructured at some point)
from pyfits.verify import VerifyError
# Set module-global boolean variables--these variables can also get their
# values from environment variables
GLOBALS = [
# Variable name # Default
('ENABLE_RECORD_VALUED_KEYWORD_CARDS', True),
('EXTENSION_NAME_CASE_SENSITIVE', False),
('STRIP_HEADER_WHITESPACE', True),
('USE_MEMMAP', True)
]
for varname, default in GLOBALS:
try:
locals()[varname] = bool(int(os.environ.get('PYFITS_' + varname,
default)))
except ValueError:
locals()[varname] = default
__all__ = (pyfits.card.__all__ + pyfits.column.__all__ +
pyfits.convenience.__all__ + pyfits.diff.__all__ +
pyfits.hdu.__all__ +
['FITS_record', 'FITS_rec', 'open', 'Section', 'new_table',
'Header', 'VerifyError', 'TRUE', 'FALSE'] +
[g[0] for g in GLOBALS])
# These are of course deprecated, but a handful of external code still uses
# them
TRUE = True
FALSE = False
# Warnings routines
_formatwarning = warnings.formatwarning
def formatwarning(message, category, filename, lineno, line=None):
if issubclass(category, UserWarning):
return unicode(message) + '\n'
else:
if sys.version_info[:2] < (2, 6):
# Python versions prior to 2.6 don't support the line argument
return _formatwarning(message, category, filename, lineno)
else:
return _formatwarning(message, category, filename, lineno, line)
warnings.formatwarning = formatwarning
warnings.filterwarnings('always', category=UserWarning, append=True)
# This is a workaround for a bug that appears in some versions of Python 2.5
if sys.version_info[:2] < (2, 6):
import urllib
class ErrorURLopener(urllib.FancyURLopener):
"""A class to use with `urlretrieve` to allow `IOError` exceptions to be
raised when a file specified by a URL cannot be accessed.
"""
def http_error_default(self, url, fp, errcode, errmsg, headers):
raise IOError((errcode, errmsg, url))
urllib._urlopener = ErrorURLopener() # Assign the locally subclassed opener
# class to the urllibrary
urllib._urlopener.tempcache = {} # Initialize tempcache with an empty
# dictionary to enable file cacheing
__credits__ = """
Copyright (C) 2004 Association of Universities for Research in Astronomy (AURA)
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
1. Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above
copyright notice, this list of conditions and the following
disclaimer in the documentation and/or other materials provided
with the distribution.
3. The name of AURA and its representatives may not be used to
endorse or promote products derived from this software without
specific prior written permission.
THIS SOFTWARE IS PROVIDED BY AURA ``AS IS'' AND ANY EXPRESS OR IMPLIED
WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL AURA BE LIABLE FOR ANY DIRECT, INDIRECT,
INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR
TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
DAMAGE.
"""
| gpl-3.0 | 2,083,739,005,804,689,400 | 2,288,885,300,740,557,600 | 32.95092 | 80 | 0.700398 | false |
317070/kaggle-heart | ira/configurations/gauss_roi10_maxout.py | 1 | 9185 | from collections import namedtuple
import lasagne as nn
from lasagne.layers.dnn import Conv2DDNNLayer, MaxPool2DDNNLayer
import data_iterators
import numpy as np
import theano.tensor as T
from functools import partial
import utils_heart
import nn_heart
from pathfinder import PKL_TRAIN_DATA_PATH, TRAIN_LABELS_PATH, PKL_VALIDATE_DATA_PATH
import utils
import data
caching = None
restart_from_save = None
rng = np.random.RandomState(42)
patch_size = (64, 64)
train_transformation_params = {
'patch_size': patch_size,
'mm_patch_size': (128, 128),
'mask_roi': True,
'rotation_range': (-180, 180),
'translation_range_x': (-5, 10),
'translation_range_y': (-10, 10),
'shear_range': (0, 0),
'roi_scale_range': (0.95, 1.3),
'zoom_range': (1 / 1.5, 1.5),
'do_flip': True,
'sequence_shift': False
}
valid_transformation_params = {
'patch_size': patch_size,
'mm_patch_size': (128, 128),
'mask_roi': True,
}
test_transformation_params = {
'patch_size': patch_size,
'mm_patch_size': (128, 128),
'mask_roi': True,
'rotation_range': (-180, 180),
'translation_range_x': (-5, 10),
'translation_range_y': (-10, 10),
'shear_range': (0, 0),
'roi_scale_range': (0.95, 1.3),
'zoom_range': (1., 1.),
'do_flip': True,
'sequence_shift': False
}
data_prep_fun = data.transform_norm_rescale_after
batch_size = 32
nbatches_chunk = 16
chunk_size = batch_size * nbatches_chunk
train_valid_ids = utils.get_train_valid_split(PKL_TRAIN_DATA_PATH)
train_data_iterator = data_iterators.SliceNormRescaleDataGenerator(data_path=PKL_TRAIN_DATA_PATH,
batch_size=chunk_size,
transform_params=train_transformation_params,
patient_ids=train_valid_ids['train'],
labels_path=TRAIN_LABELS_PATH,
slice2roi_path='pkl_train_slice2roi_10.pkl',
full_batch=True, random=True, infinite=True,
data_prep_fun=data_prep_fun)
valid_data_iterator = data_iterators.SliceNormRescaleDataGenerator(data_path=PKL_TRAIN_DATA_PATH,
batch_size=chunk_size,
transform_params=valid_transformation_params,
patient_ids=train_valid_ids['valid'],
labels_path=TRAIN_LABELS_PATH,
slice2roi_path='pkl_train_slice2roi_10.pkl',
full_batch=False, random=False, infinite=False,
data_prep_fun=data_prep_fun)
test_data_iterator = data_iterators.SliceNormRescaleDataGenerator(data_path=PKL_VALIDATE_DATA_PATH,
batch_size=chunk_size,
transform_params=test_transformation_params,
slice2roi_path='pkl_validate_slice2roi_10.pkl',
full_batch=False, random=False, infinite=False,
data_prep_fun=data_prep_fun)
nchunks_per_epoch = train_data_iterator.nsamples / chunk_size
max_nchunks = nchunks_per_epoch * 150
learning_rate_schedule = {
0: 0.0002,
int(max_nchunks * 0.1): 0.0001,
int(max_nchunks * 0.3): 0.000075,
int(max_nchunks * 0.6): 0.00005,
int(max_nchunks * 0.9): 0.00001
}
validate_every = 2 * nchunks_per_epoch
save_every = 2 * nchunks_per_epoch
conv3 = partial(Conv2DDNNLayer,
stride=(1, 1),
pad="same",
filter_size=(3, 3),
nonlinearity=nn.nonlinearities.very_leaky_rectify,
b=nn.init.Constant(0.1),
W=nn.init.Orthogonal("relu"))
max_pool = partial(MaxPool2DDNNLayer,
pool_size=(2, 2),
stride=(2, 2))
def build_model(l_in=None):
l_in = nn.layers.InputLayer((None, 30) + patch_size) if not l_in else l_in
l = conv3(l_in, num_filters=128)
l = conv3(l, num_filters=128)
l = max_pool(l)
l = conv3(l, num_filters=128)
l = conv3(l, num_filters=128)
l = max_pool(l)
l = conv3(l, num_filters=256)
l = conv3(l, num_filters=256)
l = conv3(l, num_filters=256)
l = max_pool(l)
l = conv3(l, num_filters=512)
l = conv3(l, num_filters=512)
l = conv3(l, num_filters=512)
l = max_pool(l)
l = conv3(l, num_filters=512)
l = conv3(l, num_filters=512)
l = conv3(l, num_filters=512)
l = max_pool(l)
l_d01 = nn.layers.DenseLayer(l, num_units=1024, W=nn.init.Orthogonal("relu"),
b=nn.init.Constant(0.1), nonlinearity=None)
l_d01 = nn.layers.FeaturePoolLayer(l_d01, pool_size=2)
l_d02 = nn.layers.DenseLayer(nn.layers.dropout(l_d01), num_units=1024, W=nn.init.Orthogonal("relu"),
b=nn.init.Constant(0.1), nonlinearity=None)
l_d02 = nn.layers.FeaturePoolLayer(l_d02, pool_size=2)
mu0 = nn.layers.DenseLayer(nn.layers.dropout(l_d02), num_units=1, W=nn.init.Orthogonal(),
b=nn.init.Constant(50), nonlinearity=nn_heart.lb_softplus())
sigma0 = nn.layers.DenseLayer(nn.layers.dropout(l_d02), num_units=1, W=nn.init.Orthogonal(),
b=nn.init.Constant(10), nonlinearity=nn_heart.lb_softplus())
l_cdf0 = nn_heart.NormalCDFLayer(mu0, sigma0, sigma_logscale=False, mu_logscale=False)
# ---------------------------------------------------------------
l_d11 = nn.layers.DenseLayer(l, num_units=1024, W=nn.init.Orthogonal("relu"),
b=nn.init.Constant(0.1), nonlinearity=None)
l_d11 = nn.layers.FeaturePoolLayer(l_d11, pool_size=2)
l_d12 = nn.layers.DenseLayer(nn.layers.dropout(l_d11), num_units=1024, W=nn.init.Orthogonal("relu"),
b=nn.init.Constant(0.1), nonlinearity=None)
l_d12 = nn.layers.FeaturePoolLayer(l_d12, pool_size=2)
mu1 = nn.layers.DenseLayer(nn.layers.dropout(l_d12), num_units=1, W=nn.init.Orthogonal(),
b=nn.init.Constant(100), nonlinearity=nn_heart.lb_softplus())
sigma1 = nn.layers.DenseLayer(nn.layers.dropout(l_d12), num_units=1, W=nn.init.Orthogonal(),
b=nn.init.Constant(10), nonlinearity=nn_heart.lb_softplus())
l_cdf1 = nn_heart.NormalCDFLayer(mu1, sigma1, sigma_logscale=False, mu_logscale=False)
l_outs = [l_cdf0, l_cdf1]
l_top = nn.layers.MergeLayer(l_outs)
l_target_mu0 = nn.layers.InputLayer((None, 1))
l_target_mu1 = nn.layers.InputLayer((None, 1))
l_targets = [l_target_mu0, l_target_mu1]
dense_layers = [l_d01, l_d02, l_d11, l_d12, mu0, sigma0, mu0, mu1]
mu_layers = [mu0, mu1]
sigma_layers = [sigma0, sigma1]
return namedtuple('Model', ['l_ins', 'l_outs', 'l_targets', 'l_top', 'dense_layers', 'mu_layers', 'sigma_layers'])(
[l_in], l_outs, l_targets,
l_top, dense_layers, mu_layers, sigma_layers)
def build_objective(model, deterministic=False):
p0 = nn.layers.get_output(model.l_outs[0], deterministic=deterministic)
t0 = nn.layers.get_output(model.l_targets[0])
t0_heaviside = nn_heart.heaviside(t0)
crps0 = T.mean((p0 - t0_heaviside) ** 2)
p1 = nn.layers.get_output(model.l_outs[1], deterministic=deterministic)
t1 = nn.layers.get_output(model.l_targets[1])
t1_heaviside = nn_heart.heaviside(t1)
crps1 = T.mean((p1 - t1_heaviside) ** 2)
return 0.5 * (crps0 + crps1)
def build_updates(train_loss, model, learning_rate):
updates = nn.updates.adam(train_loss, nn.layers.get_all_params(model.l_top), learning_rate)
return updates
def get_mean_validation_loss(batch_predictions, batch_targets):
return [0, 0]
def get_mean_crps_loss(batch_predictions, batch_targets, batch_ids):
nbatches = len(batch_predictions)
npredictions = len(batch_predictions[0])
crpss = []
for i in xrange(npredictions):
p, t = [], []
for j in xrange(nbatches):
p.append(batch_predictions[j][i])
t.append(batch_targets[j][i])
p, t = np.vstack(p), np.vstack(t)
target_cdf = utils_heart.heaviside_function(t)
crpss.append(np.mean((p - target_cdf) ** 2))
return crpss
def get_avg_patient_predictions(batch_predictions, batch_patient_ids, mean):
return utils_heart.get_patient_average_cdf_predictions(batch_predictions, batch_patient_ids, mean)
| mit | 651,194,890,640,461,200 | -2,377,358,736,067,673,600 | 39.10917 | 119 | 0.552314 | false |
mingwpy/numpy | numpy/lib/__init__.py | 114 | 1146 | from __future__ import division, absolute_import, print_function
import math
from .info import __doc__
from numpy.version import version as __version__
from .type_check import *
from .index_tricks import *
from .function_base import *
from .nanfunctions import *
from .shape_base import *
from .stride_tricks import *
from .twodim_base import *
from .ufunclike import *
from . import scimath as emath
from .polynomial import *
#import convertcode
from .utils import *
from .arraysetops import *
from .npyio import *
from .financial import *
from .arrayterator import *
from .arraypad import *
from ._version import *
__all__ = ['emath', 'math']
__all__ += type_check.__all__
__all__ += index_tricks.__all__
__all__ += function_base.__all__
__all__ += shape_base.__all__
__all__ += stride_tricks.__all__
__all__ += twodim_base.__all__
__all__ += ufunclike.__all__
__all__ += arraypad.__all__
__all__ += polynomial.__all__
__all__ += utils.__all__
__all__ += arraysetops.__all__
__all__ += npyio.__all__
__all__ += financial.__all__
__all__ += nanfunctions.__all__
from numpy.testing import Tester
test = Tester().test
bench = Tester().bench
| bsd-3-clause | 2,936,232,700,022,499,000 | 5,387,430,456,441,401,000 | 23.913043 | 64 | 0.657941 | false |
chouseknecht/ansible | test/units/modules/network/nxos/test_nxos_nxapi.py | 68 | 3057 | # (c) 2016 Red Hat Inc.
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
from units.compat.mock import patch
from ansible.modules.network.nxos import nxos_nxapi
from .nxos_module import TestNxosModule, load_fixture, set_module_args
class TestNxosNxapiModule(TestNxosModule):
module = nxos_nxapi
def setUp(self):
super(TestNxosNxapiModule, self).setUp()
self.mock_run_commands = patch('ansible.modules.network.nxos.nxos_nxapi.run_commands')
self.run_commands = self.mock_run_commands.start()
self.mock_load_config = patch('ansible.modules.network.nxos.nxos_nxapi.load_config')
self.load_config = self.mock_load_config.start()
self.mock_get_capabilities = patch('ansible.modules.network.nxos.nxos_nxapi.get_capabilities')
self.get_capabilities = self.mock_get_capabilities.start()
self.get_capabilities.return_value = {'device_info': {'network_os_platform': 'N7K-C7018', 'network_os_version': '8.3(1)'}, 'network_api': 'cliconf'}
def tearDown(self):
super(TestNxosNxapiModule, self).tearDown()
self.mock_run_commands.stop()
self.mock_load_config.stop()
self.mock_get_capabilities.stop()
def load_fixtures(self, commands=None, device=''):
def load_from_file(*args, **kwargs):
module, commands = args
module_name = self.module.__name__.rsplit('.', 1)[1]
output = list()
for command in commands:
filename = str(command).split(' | ')[0].replace(' ', '_')
output.append(load_fixture(module_name, filename, device))
return output
self.run_commands.side_effect = load_from_file
self.load_config.return_value = None
def test_nxos_nxapi_no_change(self):
set_module_args(dict(http=True, https=False, http_port=80, https_port=443, sandbox=False))
self.execute_module_devices(changed=False, commands=[])
def test_nxos_nxapi_disable(self):
set_module_args(dict(state='absent'))
self.execute_module_devices(changed=True, commands=['no feature nxapi'])
def test_nxos_nxapi_no_http(self):
set_module_args(dict(https=True, http=False, https_port=8443))
self.execute_module_devices(changed=True, commands=['no nxapi http', 'nxapi https port 8443'])
| gpl-3.0 | 2,316,368,884,903,001,600 | 962,516,277,986,804,500 | 40.310811 | 156 | 0.682368 | false |
SafeW3rd/Ciphers | primeSieve.py | 1 | 1139 | # Prime Number Sieve
# http://inventwithpython.com/hacking (BSD Licensed)
import math
def isPrime(num):
# Returns True if num is a prime number, otherwise False.
# Note: Generally, isPrime() is slower than primeSieve().
# all numbers less than 2 are not prime
if num < 2:
return False
# see if num is divisible by any number up to the square root of num
for i in range(2, int(math.sqrt(num)) + 1):
if num % i == 0:
return False
return True
def primeSieve(sieveSize):
# Returns a list of prime numbers calculated using
# the Sieve of Eratosthenes algorithm.
sieve = [True] * sieveSize
sieve[0] = False # zero and one are not prime numbers
sieve[1] = False
# create the sieve
for i in range(2, int(math.sqrt(sieveSize)) + 1):
pointer = i * 2
while pointer < sieveSize:
sieve[pointer] = False
pointer += i
# compile the list of primes
primes = []
for i in range(sieveSize):
if sieve[i] == True:
primes.append(i)
return primes
| mit | -1,386,377,077,298,507,300 | 1,534,785,037,177,919,000 | 23.886364 | 72 | 0.587357 | false |
pku9104038/edx-platform | common/lib/xmodule/xmodule/open_ended_grading_classes/grading_service_module.py | 17 | 5131 | # This class gives a common interface for logging into the grading controller
import json
import logging
import requests
from requests.exceptions import RequestException, ConnectionError, HTTPError
from .combined_open_ended_rubric import CombinedOpenEndedRubric
from lxml import etree
log = logging.getLogger(__name__)
class GradingServiceError(Exception):
"""
Exception for grading service. Shown when Open Response Assessment servers cannot be reached.
"""
pass
class GradingService(object):
"""
Interface to staff grading backend.
"""
def __init__(self, config):
self.username = config['username']
self.password = config['password']
self.session = requests.Session()
self.system = config['system']
def _login(self):
"""
Log into the staff grading service.
Raises requests.exceptions.HTTPError if something goes wrong.
Returns the decoded json dict of the response.
"""
response = self.session.post(self.login_url,
{'username': self.username,
'password': self.password, })
response.raise_for_status()
return response.json()
def post(self, url, data, allow_redirects=False):
"""
Make a post request to the grading controller
"""
try:
op = lambda: self.session.post(url, data=data,
allow_redirects=allow_redirects)
r = self._try_with_login(op)
except (RequestException, ConnectionError, HTTPError) as err:
# reraise as promised GradingServiceError, but preserve stacktrace.
#This is a dev_facing_error
error_string = "Problem posting data to the grading controller. URL: {0}, data: {1}".format(url, data)
log.error(error_string)
raise GradingServiceError(error_string)
return r.text
def get(self, url, params, allow_redirects=False):
"""
Make a get request to the grading controller
"""
op = lambda: self.session.get(url,
allow_redirects=allow_redirects,
params=params)
try:
r = self._try_with_login(op)
except (RequestException, ConnectionError, HTTPError) as err:
# reraise as promised GradingServiceError, but preserve stacktrace.
#This is a dev_facing_error
error_string = "Problem getting data from the grading controller. URL: {0}, params: {1}".format(url, params)
log.error(error_string)
raise GradingServiceError(error_string)
return r.text
def _try_with_login(self, operation):
"""
Call operation(), which should return a requests response object. If
the request fails with a 'login_required' error, call _login() and try
the operation again.
Returns the result of operation(). Does not catch exceptions.
"""
response = operation()
resp_json = response.json()
if (resp_json
and resp_json.get('success') is False
and resp_json.get('error') == 'login_required'):
# apparrently we aren't logged in. Try to fix that.
r = self._login()
if r and not r.get('success'):
log.warning("Couldn't log into staff_grading backend. Response: %s",
r)
# try again
response = operation()
response.raise_for_status()
return response
def _render_rubric(self, response, view_only=False):
"""
Given an HTTP Response with the key 'rubric', render out the html
required to display the rubric and put it back into the response
returns the updated response as a dictionary that can be serialized later
"""
try:
response_json = json.loads(response)
except:
response_json = response
try:
if 'rubric' in response_json:
rubric = response_json['rubric']
rubric_renderer = CombinedOpenEndedRubric(self.system, view_only)
rubric_dict = rubric_renderer.render_rubric(rubric)
success = rubric_dict['success']
rubric_html = rubric_dict['html']
response_json['rubric'] = rubric_html
return response_json
# if we can't parse the rubric into HTML,
except etree.XMLSyntaxError, RubricParsingError:
#This is a dev_facing_error
log.exception("Cannot parse rubric string. Raw string: {0}"
.format(rubric))
return {'success': False,
'error': 'Error displaying submission'}
except ValueError:
#This is a dev_facing_error
log.exception("Error parsing response: {0}".format(response))
return {'success': False,
'error': "Error displaying submission"}
| agpl-3.0 | 1,244,898,171,310,214,100 | -4,112,495,742,972,476,400 | 35.913669 | 121 | 0.582538 | false |
bufferapp/buffer-django-nonrel | django/contrib/sitemaps/tests/basic.py | 155 | 7620 | import os
from datetime import date
from django.conf import settings
from django.contrib.auth.models import User
from django.contrib.sitemaps import Sitemap
from django.contrib.sites.models import Site
from django.core.exceptions import ImproperlyConfigured
from django.test import TestCase
from django.utils.unittest import skipUnless
from django.utils.formats import localize
from django.utils.translation import activate, deactivate
class SitemapTests(TestCase):
urls = 'django.contrib.sitemaps.tests.urls'
def setUp(self):
if Site._meta.installed:
self.base_url = 'http://example.com'
else:
self.base_url = 'http://testserver'
self.old_USE_L10N = settings.USE_L10N
self.old_Site_meta_installed = Site._meta.installed
self.old_TEMPLATE_DIRS = settings.TEMPLATE_DIRS
self.old_Site_meta_installed = Site._meta.installed
settings.TEMPLATE_DIRS = (
os.path.join(os.path.dirname(__file__), 'templates'),
)
# Create a user that will double as sitemap content
User.objects.create_user('testuser', 'test@example.com', 's3krit')
def tearDown(self):
settings.USE_L10N = self.old_USE_L10N
Site._meta.installed = self.old_Site_meta_installed
settings.TEMPLATE_DIRS = self.old_TEMPLATE_DIRS
Site._meta.installed = self.old_Site_meta_installed
def test_simple_sitemap_index(self):
"A simple sitemap index can be rendered"
# Retrieve the sitemap.
response = self.client.get('/simple/index.xml')
# Check for all the important bits:
self.assertEqual(response.content, """<?xml version="1.0" encoding="UTF-8"?>
<sitemapindex xmlns="http://www.sitemaps.org/schemas/sitemap/0.9">
<sitemap><loc>%s/simple/sitemap-simple.xml</loc></sitemap>
</sitemapindex>
""" % self.base_url)
def test_simple_sitemap_custom_index(self):
"A simple sitemap index can be rendered with a custom template"
# Retrieve the sitemap.
response = self.client.get('/simple/custom-index.xml')
# Check for all the important bits:
self.assertEqual(response.content, """<?xml version="1.0" encoding="UTF-8"?>
<!-- This is a customised template -->
<sitemapindex xmlns="http://www.sitemaps.org/schemas/sitemap/0.9">
<sitemap><loc>%s/simple/sitemap-simple.xml</loc></sitemap>
</sitemapindex>
""" % self.base_url)
def test_simple_sitemap(self):
"A simple sitemap can be rendered"
# Retrieve the sitemap.
response = self.client.get('/simple/sitemap.xml')
# Check for all the important bits:
self.assertEqual(response.content, """<?xml version="1.0" encoding="UTF-8"?>
<urlset xmlns="http://www.sitemaps.org/schemas/sitemap/0.9">
<url><loc>%s/location/</loc><lastmod>%s</lastmod><changefreq>never</changefreq><priority>0.5</priority></url>
</urlset>
""" % (self.base_url, date.today().strftime('%Y-%m-%d')))
def test_simple_custom_sitemap(self):
"A simple sitemap can be rendered with a custom template"
# Retrieve the sitemap.
response = self.client.get('/simple/custom-sitemap.xml')
# Check for all the important bits:
self.assertEqual(response.content, """<?xml version="1.0" encoding="UTF-8"?>
<!-- This is a customised template -->
<urlset xmlns="http://www.sitemaps.org/schemas/sitemap/0.9">
<url><loc>%s/location/</loc><lastmod>%s</lastmod><changefreq>never</changefreq><priority>0.5</priority></url>
</urlset>
""" % (self.base_url, date.today().strftime('%Y-%m-%d')))
@skipUnless(settings.USE_I18N, "Internationalization is not enabled")
def test_localized_priority(self):
"The priority value should not be localized (Refs #14164)"
# Localization should be active
settings.USE_L10N = True
activate('fr')
self.assertEqual(u'0,3', localize(0.3))
# Retrieve the sitemap. Check that priorities
# haven't been rendered in localized format
response = self.client.get('/simple/sitemap.xml')
self.assertContains(response, '<priority>0.5</priority>')
self.assertContains(response, '<lastmod>%s</lastmod>' % date.today().strftime('%Y-%m-%d'))
deactivate()
def test_generic_sitemap(self):
"A minimal generic sitemap can be rendered"
# Retrieve the sitemap.
response = self.client.get('/generic/sitemap.xml')
expected = ''
for username in User.objects.values_list("username", flat=True):
expected += "<url><loc>%s/users/%s/</loc></url>" % (self.base_url, username)
# Check for all the important bits:
self.assertEqual(response.content, """<?xml version="1.0" encoding="UTF-8"?>
<urlset xmlns="http://www.sitemaps.org/schemas/sitemap/0.9">
%s
</urlset>
""" % expected)
@skipUnless("django.contrib.flatpages" in settings.INSTALLED_APPS, "django.contrib.flatpages app not installed.")
def test_flatpage_sitemap(self):
"Basic FlatPage sitemap test"
# Import FlatPage inside the test so that when django.contrib.flatpages
# is not installed we don't get problems trying to delete Site
# objects (FlatPage has an M2M to Site, Site.delete() tries to
# delete related objects, but the M2M table doesn't exist.
from django.contrib.flatpages.models import FlatPage
public = FlatPage.objects.create(
url=u'/public/',
title=u'Public Page',
enable_comments=True,
registration_required=False,
)
public.sites.add(settings.SITE_ID)
private = FlatPage.objects.create(
url=u'/private/',
title=u'Private Page',
enable_comments=True,
registration_required=True
)
private.sites.add(settings.SITE_ID)
response = self.client.get('/flatpages/sitemap.xml')
# Public flatpage should be in the sitemap
self.assertContains(response, '<loc>%s%s</loc>' % (self.base_url, public.url))
# Private flatpage should not be in the sitemap
self.assertNotContains(response, '<loc>%s%s</loc>' % (self.base_url, private.url))
def test_requestsite_sitemap(self):
# Make sure hitting the flatpages sitemap without the sites framework
# installed doesn't raise an exception
Site._meta.installed = False
# Retrieve the sitemap.
response = self.client.get('/simple/sitemap.xml')
# Check for all the important bits:
self.assertEqual(response.content, """<?xml version="1.0" encoding="UTF-8"?>
<urlset xmlns="http://www.sitemaps.org/schemas/sitemap/0.9">
<url><loc>http://testserver/location/</loc><lastmod>%s</lastmod><changefreq>never</changefreq><priority>0.5</priority></url>
</urlset>
""" % date.today().strftime('%Y-%m-%d'))
@skipUnless("django.contrib.sites" in settings.INSTALLED_APPS, "django.contrib.sites app not installed.")
def test_sitemap_get_urls_no_site_1(self):
"""
Check we get ImproperlyConfigured if we don't pass a site object to
Sitemap.get_urls and no Site objects exist
"""
Site.objects.all().delete()
self.assertRaises(ImproperlyConfigured, Sitemap().get_urls)
def test_sitemap_get_urls_no_site_2(self):
"""
Check we get ImproperlyConfigured when we don't pass a site object to
Sitemap.get_urls if Site objects exists, but the sites framework is not
actually installed.
"""
Site._meta.installed = False
self.assertRaises(ImproperlyConfigured, Sitemap().get_urls)
| bsd-3-clause | 5,953,282,001,767,166,000 | -2,524,149,230,830,999,000 | 43.046243 | 124 | 0.659055 | false |
bslatkin/8-bits | appengine-mapreduce/python/test/testlib/testutil.py | 2 | 4505 | #!/usr/bin/env python
#
# Copyright 2010 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Test utilities for mapreduce framework.
"""
# Disable "Invalid method name"
# pylint: disable-msg=C6409
# os_compat must be first to ensure timezones are UTC.
# Disable "unused import" and "invalid import order"
# pylint: disable-msg=W0611
from google.appengine.tools import os_compat
# pylint: enable-msg=W0611
from testlib import mox
import os
import shutil
import sys
import tempfile
import unittest
import urllib
from google.appengine.api import apiproxy_stub_map
from google.appengine.api.files import file_service_stub
from google.appengine.api.blobstore import blobstore_stub
from google.appengine.api import datastore_file_stub
from google.appengine.api import queueinfo
from google.appengine.api.blobstore import file_blob_storage
from google.appengine.api.memcache import memcache_stub
from google.appengine.api.taskqueue import taskqueue_stub
class MatchesDatastoreConfig(mox.Comparator):
"""Mox comparator for MatchesDatastoreConfig objects."""
def __init__(self, **kwargs):
self.kwargs = kwargs
def equals(self, config):
"""Check to see if config matches arguments."""
if self.kwargs.get("deadline", None) != config.deadline:
return False
if self.kwargs.get("force_writes", None) != config.force_writes:
return False
return True
def __repr__(self):
return "MatchesDatastoreConfig(%s)" % self.kwargs
class MatchesUserRPC(mox.Comparator):
"""Mox comparator for UserRPC objects."""
def __init__(self, **kwargs):
self.kwargs = kwargs
def equals(self, rpc):
"""Check to see if rpc matches arguments."""
if self.kwargs.get("deadline", None) != rpc.deadline:
return False
return True
def __repr__(self):
return "MatchesUserRPC(%s)" % self.kwargs
class HandlerTestBase(unittest.TestCase):
"""Base class for all webapp.RequestHandler tests."""
MAPREDUCE_URL = "/_ah/mapreduce/kickoffjob_callback"
def setUp(self):
unittest.TestCase.setUp(self)
self.mox = mox.Mox()
self.appid = "testapp"
self.version_id = "1.23456789"
os.environ["APPLICATION_ID"] = self.appid
os.environ["CURRENT_VERSION_ID"] = self.version_id
os.environ["HTTP_HOST"] = "localhost"
self.memcache = memcache_stub.MemcacheServiceStub()
self.taskqueue = taskqueue_stub.TaskQueueServiceStub()
self.taskqueue.queue_yaml_parser = (
lambda x: queueinfo.LoadSingleQueue(
"queue:\n"
"- name: default\n"
" rate: 10/s\n"
"- name: crazy-queue\n"
" rate: 2000/d\n"
" bucket_size: 10\n"))
self.datastore = datastore_file_stub.DatastoreFileStub(
self.appid, "/dev/null", "/dev/null")
self.blob_storage_directory = tempfile.mkdtemp()
blob_storage = file_blob_storage.FileBlobStorage(
self.blob_storage_directory, self.appid)
self.blobstore_stub = blobstore_stub.BlobstoreServiceStub(blob_storage)
self.file_service = self.createFileServiceStub(blob_storage)
apiproxy_stub_map.apiproxy = apiproxy_stub_map.APIProxyStubMap()
apiproxy_stub_map.apiproxy.RegisterStub("taskqueue", self.taskqueue)
apiproxy_stub_map.apiproxy.RegisterStub("memcache", self.memcache)
apiproxy_stub_map.apiproxy.RegisterStub("datastore_v3", self.datastore)
apiproxy_stub_map.apiproxy.RegisterStub("blobstore", self.blobstore_stub)
apiproxy_stub_map.apiproxy.RegisterStub("file", self.file_service)
def createFileServiceStub(self, blob_storage):
return file_service_stub.FileServiceStub(blob_storage)
def tearDown(self):
try:
self.mox.VerifyAll()
finally:
self.mox.UnsetStubs()
shutil.rmtree(self.blob_storage_directory)
unittest.TestCase.tearDown(self)
def assertTaskStarted(self, queue="default"):
tasks = self.taskqueue.GetTasks(queue)
self.assertEquals(1, len(tasks))
self.assertEquals(tasks[0]["url"], self.MAPREDUCE_URL)
| apache-2.0 | -2,532,318,332,842,190,300 | 8,211,313,992,373,491,000 | 31.644928 | 77 | 0.716093 | false |
sursum/buckanjaren | buckanjaren/lib/python3.5/site-packages/psycopg2/tests/test_bug_gc.py | 16 | 1723 | #!/usr/bin/env python
# bug_gc.py - test for refcounting/GC bug
#
# Copyright (C) 2010-2011 Federico Di Gregorio <fog@debian.org>
#
# psycopg2 is free software: you can redistribute it and/or modify it
# under the terms of the GNU Lesser General Public License as published
# by the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# In addition, as a special exception, the copyright holders give
# permission to link this program with the OpenSSL library (or with
# modified versions of OpenSSL that use the same license as OpenSSL),
# and distribute linked combinations including the two.
#
# You must obey the GNU Lesser General Public License in all respects for
# all of the code used other than OpenSSL.
#
# psycopg2 is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public
# License for more details.
import psycopg2
import psycopg2.extensions
import unittest
import gc
from .testutils import ConnectingTestCase, skip_if_no_uuid
class StolenReferenceTestCase(ConnectingTestCase):
@skip_if_no_uuid
def test_stolen_reference_bug(self):
def fish(val, cur):
gc.collect()
return 42
UUID = psycopg2.extensions.new_type((2950,), "UUID", fish)
psycopg2.extensions.register_type(UUID, self.conn)
curs = self.conn.cursor()
curs.execute("select 'b5219e01-19ab-4994-b71e-149225dc51e4'::uuid")
curs.fetchone()
def test_suite():
return unittest.TestLoader().loadTestsFromName(__name__)
if __name__ == "__main__":
unittest.main()
| mit | -7,841,025,691,556,243,000 | 2,435,332,462,837,745,000 | 32.784314 | 75 | 0.726059 | false |
ual/urbansim | urbansim/utils/tests/test_misc.py | 5 | 3159 | import os
import shutil
import numpy as np
import pandas as pd
import pytest
from .. import misc
class _FakeTable(object):
def __init__(self, name, columns):
self.name = name
self.columns = columns
@pytest.fixture
def fta():
return _FakeTable('a', ['aa', 'ab', 'ac'])
@pytest.fixture
def ftb():
return _FakeTable('b', ['bx', 'by', 'bz'])
@pytest.fixture
def clean_fake_data_home(request):
def fin():
if os.path.isdir('fake_data_home'):
shutil.rmtree('fake_data_home')
request.addfinalizer(fin)
def test_column_map_raises(fta, ftb):
with pytest.raises(RuntimeError):
misc.column_map([fta, ftb], ['aa', 'by', 'bz', 'cw'])
def test_column_map_none(fta, ftb):
assert misc.column_map([fta, ftb], None) == {'a': None, 'b': None}
def test_column_map(fta, ftb):
assert misc.column_map([fta, ftb], ['aa', 'by', 'bz']) == \
{'a': ['aa'], 'b': ['by', 'bz']}
assert misc.column_map([fta, ftb], ['by', 'bz']) == \
{'a': [], 'b': ['by', 'bz']}
def test_dirs(clean_fake_data_home):
misc._mkifnotexists("fake_data_home")
os.environ["DATA_HOME"] = "fake_data_home"
misc.get_run_number()
misc.get_run_number()
misc.data_dir()
misc.configs_dir()
misc.models_dir()
misc.charts_dir()
misc.maps_dir()
misc.simulations_dir()
misc.reports_dir()
misc.runs_dir()
misc.config("test")
@pytest.fixture
def range_df():
df = pd.DataFrame({'to_zone_id': [2, 3, 4],
'from_zone_id': [1, 1, 1],
'distance': [.1, .2, .9]})
df = df.set_index(['from_zone_id', 'to_zone_id'])
return df
@pytest.fixture
def range_series():
return pd.Series([10, 150, 75, 275], index=[1, 2, 3, 4])
def test_compute_range(range_df, range_series):
assert misc.compute_range(range_df, range_series, "distance", .5).loc[1] == 225
def test_reindex():
s = pd.Series([.5, 1.0, 1.5], index=[2, 1, 3])
s2 = pd.Series([1, 2, 3], index=['a', 'b', 'c'])
assert list(misc.reindex(s, s2).values) == [1.0, .5, 1.5]
def test_naics():
assert misc.naicsname(54) == "Professional"
def test_signif():
assert misc.signif(4.0) == '***'
assert misc.signif(3.0) == '**'
assert misc.signif(2.0) == '*'
assert misc.signif(1.5) == '.'
assert misc.signif(1.0) == ''
@pytest.fixture
def simple_dev_inputs():
return pd.DataFrame(
{'residential': [40, 40, 40],
'office': [15, 18, 15],
'retail': [12, 10, 10],
'industrial': [12, 12, 12],
'land_cost': [1000000, 2000000, 3000000],
'parcel_size': [10000, 20000, 30000],
'max_far': [2.0, 3.0, 4.0],
'names': ['a', 'b', 'c'],
'max_height': [40, 60, 80]},
index=['a', 'b', 'c'])
def test_misc_dffunctions(simple_dev_inputs):
misc.df64bitto32bit(simple_dev_inputs)
misc.pandasdfsummarytojson(simple_dev_inputs[['land_cost', 'parcel_size']])
misc.numpymat2df(np.array([[1, 2], [3, 4]]))
def test_column_list(fta, ftb):
assert misc.column_list([fta, ftb], ['aa', 'by', 'bz', 'c']) == \
['aa', 'by', 'bz']
| bsd-3-clause | -4,928,665,186,652,112,000 | -367,967,868,633,933,400 | 24.071429 | 83 | 0.553656 | false |
tienjunhsu/trading-with-python | lib/widgets.py | 78 | 3012 | # -*- coding: utf-8 -*-
"""
A collection of widgets for gui building
Copyright: Jev Kuznetsov
License: BSD
"""
from __future__ import division
import sys
from PyQt4.QtCore import *
from PyQt4.QtGui import *
import numpy as np
from matplotlib.backends.backend_qt4agg import FigureCanvasQTAgg as FigureCanvas
from matplotlib.backends.backend_qt4agg import NavigationToolbar2QTAgg as NavigationToolbar
from matplotlib.figure import Figure
import matplotlib.pyplot as plt
class MatplotlibWidget(QWidget):
def __init__(self,parent=None,grid=True):
QWidget.__init__(self,parent)
self.grid = grid
self.fig = Figure()
self.canvas =FigureCanvas(self.fig)
self.canvas.setParent(self)
self.canvas.mpl_connect('button_press_event', self.onPick) # bind pick event
#self.axes = self.fig.add_subplot(111)
margins = [0.05,0.1,0.9,0.8]
self.axes = self.fig.add_axes(margins)
self.toolbar = NavigationToolbar(self.canvas,self)
#self.initFigure()
layout = QVBoxLayout()
layout.addWidget(self.toolbar)
layout.addWidget(self.canvas)
self.setLayout(layout)
def onPick(self,event):
print 'Pick event'
print 'you pressed', event.button, event.xdata, event.ydata
def update(self):
self.canvas.draw()
def plot(self,*args,**kwargs):
self.axes.plot(*args,**kwargs)
self.axes.grid(self.grid)
self.update()
def clear(self):
self.axes.clear()
def initFigure(self):
self.axes.grid(True)
x = np.linspace(-1,1)
y = x**2
self.axes.plot(x,y,'o-')
class PlotWindow(QMainWindow):
''' a stand-alone window with embedded matplotlib widget '''
def __init__(self,parent=None):
super(PlotWindow,self).__init__(parent)
self.setAttribute(Qt.WA_DeleteOnClose)
self.mplWidget = MatplotlibWidget()
self.setCentralWidget(self.mplWidget)
def plot(self,dataFrame):
''' plot dataframe '''
dataFrame.plot(ax=self.mplWidget.axes)
def getAxes(self):
return self.mplWidget.axes
def getFigure(self):
return self.mplWidget.fig
def update(self):
self.mplWidget.update()
class MainForm(QMainWindow):
def __init__(self, parent=None):
QMainWindow.__init__(self, parent)
self.setWindowTitle('Demo: PyQt with matplotlib')
self.plot = MatplotlibWidget()
self.setCentralWidget(self.plot)
self.plot.clear()
self.plot.plot(np.random.rand(10),'x-')
#---------------------
if __name__=='__main__':
app = QApplication(sys.argv)
form = MainForm()
form.show()
app.exec_() | bsd-3-clause | 8,002,041,672,184,118,000 | 8,816,667,026,962,562,000 | 25.4 | 91 | 0.572377 | false |
home-assistant/home-assistant | homeassistant/components/sighthound/image_processing.py | 4 | 5695 | """Person detection using Sighthound cloud service."""
import io
import logging
from pathlib import Path
from PIL import Image, ImageDraw, UnidentifiedImageError
import simplehound.core as hound
import voluptuous as vol
from homeassistant.components.image_processing import (
CONF_ENTITY_ID,
CONF_NAME,
CONF_SOURCE,
PLATFORM_SCHEMA,
ImageProcessingEntity,
)
from homeassistant.const import ATTR_ENTITY_ID, CONF_API_KEY
from homeassistant.core import split_entity_id
import homeassistant.helpers.config_validation as cv
import homeassistant.util.dt as dt_util
from homeassistant.util.pil import draw_box
_LOGGER = logging.getLogger(__name__)
EVENT_PERSON_DETECTED = "sighthound.person_detected"
ATTR_BOUNDING_BOX = "bounding_box"
ATTR_PEOPLE = "people"
CONF_ACCOUNT_TYPE = "account_type"
CONF_SAVE_FILE_FOLDER = "save_file_folder"
CONF_SAVE_TIMESTAMPTED_FILE = "save_timestamped_file"
DATETIME_FORMAT = "%Y-%m-%d_%H:%M:%S"
DEV = "dev"
PROD = "prod"
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend(
{
vol.Required(CONF_API_KEY): cv.string,
vol.Optional(CONF_ACCOUNT_TYPE, default=DEV): vol.In([DEV, PROD]),
vol.Optional(CONF_SAVE_FILE_FOLDER): cv.isdir,
vol.Optional(CONF_SAVE_TIMESTAMPTED_FILE, default=False): cv.boolean,
}
)
def setup_platform(hass, config, add_entities, discovery_info=None):
"""Set up the platform."""
# Validate credentials by processing image.
api_key = config[CONF_API_KEY]
account_type = config[CONF_ACCOUNT_TYPE]
api = hound.cloud(api_key, account_type)
try:
api.detect(b"Test")
except hound.SimplehoundException as exc:
_LOGGER.error("Sighthound error %s setup aborted", exc)
return
save_file_folder = config.get(CONF_SAVE_FILE_FOLDER)
if save_file_folder:
save_file_folder = Path(save_file_folder)
entities = []
for camera in config[CONF_SOURCE]:
sighthound = SighthoundEntity(
api,
camera[CONF_ENTITY_ID],
camera.get(CONF_NAME),
save_file_folder,
config[CONF_SAVE_TIMESTAMPTED_FILE],
)
entities.append(sighthound)
add_entities(entities)
class SighthoundEntity(ImageProcessingEntity):
"""Create a sighthound entity."""
def __init__(
self, api, camera_entity, name, save_file_folder, save_timestamped_file
):
"""Init."""
self._api = api
self._camera = camera_entity
if name:
self._name = name
else:
camera_name = split_entity_id(camera_entity)[1]
self._name = f"sighthound_{camera_name}"
self._state = None
self._last_detection = None
self._image_width = None
self._image_height = None
self._save_file_folder = save_file_folder
self._save_timestamped_file = save_timestamped_file
def process_image(self, image):
"""Process an image."""
detections = self._api.detect(image)
people = hound.get_people(detections)
self._state = len(people)
if self._state > 0:
self._last_detection = dt_util.now().strftime(DATETIME_FORMAT)
metadata = hound.get_metadata(detections)
self._image_width = metadata["image_width"]
self._image_height = metadata["image_height"]
for person in people:
self.fire_person_detected_event(person)
if self._save_file_folder and self._state > 0:
self.save_image(image, people, self._save_file_folder)
def fire_person_detected_event(self, person):
"""Send event with detected total_persons."""
self.hass.bus.fire(
EVENT_PERSON_DETECTED,
{
ATTR_ENTITY_ID: self.entity_id,
ATTR_BOUNDING_BOX: hound.bbox_to_tf_style(
person["boundingBox"], self._image_width, self._image_height
),
},
)
def save_image(self, image, people, directory):
"""Save a timestamped image with bounding boxes around targets."""
try:
img = Image.open(io.BytesIO(bytearray(image))).convert("RGB")
except UnidentifiedImageError:
_LOGGER.warning("Sighthound unable to process image, bad data")
return
draw = ImageDraw.Draw(img)
for person in people:
box = hound.bbox_to_tf_style(
person["boundingBox"], self._image_width, self._image_height
)
draw_box(draw, box, self._image_width, self._image_height)
latest_save_path = directory / f"{self._name}_latest.jpg"
img.save(latest_save_path)
if self._save_timestamped_file:
timestamp_save_path = directory / f"{self._name}_{self._last_detection}.jpg"
img.save(timestamp_save_path)
_LOGGER.info("Sighthound saved file %s", timestamp_save_path)
@property
def camera_entity(self):
"""Return camera entity id from process pictures."""
return self._camera
@property
def name(self):
"""Return the name of the sensor."""
return self._name
@property
def should_poll(self):
"""Return the polling state."""
return False
@property
def state(self):
"""Return the state of the entity."""
return self._state
@property
def unit_of_measurement(self):
"""Return the unit of measurement."""
return ATTR_PEOPLE
@property
def extra_state_attributes(self):
"""Return the attributes."""
if not self._last_detection:
return {}
return {"last_person": self._last_detection}
| apache-2.0 | -2,250,752,954,000,885,500 | 5,157,163,443,921,081,000 | 31.175141 | 88 | 0.621773 | false |
BeegorMif/HTPC-Manager | lib/guessit/transfo/guess_release_group.py | 21 | 3682 | #!/usr/bin/env python2
# -*- coding: utf-8 -*-
#
# GuessIt - A library for guessing information from filenames
# Copyright (c) 2012 Nicolas Wack <wackou@gmail.com>
#
# GuessIt is free software; you can redistribute it and/or modify it under
# the terms of the Lesser GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# GuessIt is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# Lesser GNU General Public License for more details.
#
# You should have received a copy of the Lesser GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
from __future__ import unicode_literals
from guessit.transfo import SingleNodeGuesser
from guessit.patterns import prop_multi, compute_canonical_form, _dash, _psep
import re
import logging
log = logging.getLogger(__name__)
def get_patterns(property_name):
return [ p.replace(_dash, _psep) for patterns in prop_multi[property_name].values() for p in patterns ]
CODECS = get_patterns('videoCodec')
FORMATS = get_patterns('format')
VAPIS = get_patterns('videoApi')
# RG names following a codec or format, with a potential space or dash inside the name
GROUP_NAMES = [ r'(?P<videoCodec>' + codec + r')[ \.-](?P<releaseGroup>.+?([- \.].*?)??)[ \.]'
for codec in CODECS ]
GROUP_NAMES += [ r'(?P<format>' + fmt + r')[ \.-](?P<releaseGroup>.+?([- \.].*?)??)[ \.]'
for fmt in FORMATS ]
GROUP_NAMES += [ r'(?P<videoApi>' + api + r')[ \.-](?P<releaseGroup>.+?([- \.].*?)??)[ \.]'
for api in VAPIS ]
GROUP_NAMES2 = [ r'\.(?P<videoCodec>' + codec + r')-(?P<releaseGroup>.*?)(-(.*?))?[ \.]'
for codec in CODECS ]
GROUP_NAMES2 += [ r'\.(?P<format>' + fmt + r')-(?P<releaseGroup>.*?)(-(.*?))?[ \.]'
for fmt in FORMATS ]
GROUP_NAMES2 += [ r'\.(?P<videoApi>' + vapi + r')-(?P<releaseGroup>.*?)(-(.*?))?[ \.]'
for vapi in VAPIS ]
GROUP_NAMES = [ re.compile(r, re.IGNORECASE) for r in GROUP_NAMES ]
GROUP_NAMES2 = [ re.compile(r, re.IGNORECASE) for r in GROUP_NAMES2 ]
def adjust_metadata(md):
return dict((property_name, compute_canonical_form(property_name, value) or value)
for property_name, value in md.items())
def guess_release_group(string):
# first try to see whether we have both a known codec and a known release group
for rexp in GROUP_NAMES:
match = rexp.search(string)
while match:
metadata = match.groupdict()
# make sure this is an actual release group we caught
release_group = (compute_canonical_form('releaseGroup', metadata['releaseGroup']) or
compute_canonical_form('weakReleaseGroup', metadata['releaseGroup']))
if release_group:
return adjust_metadata(metadata), (match.start(1), match.end(2))
# we didn't find anything conclusive, keep searching
match = rexp.search(string, match.span()[0]+1)
# pick anything as releaseGroup as long as we have a codec in front
# this doesn't include a potential dash ('-') ending the release group
# eg: [...].X264-HiS@SiLUHD-English.[...]
for rexp in GROUP_NAMES2:
match = rexp.search(string)
if match:
return adjust_metadata(match.groupdict()), (match.start(1), match.end(2))
return None, None
def process(mtree):
SingleNodeGuesser(guess_release_group, 0.8, log).process(mtree)
| gpl-3.0 | 5,170,178,496,483,380,000 | 5,853,011,102,201,666,000 | 41.813953 | 108 | 0.633895 | false |
iamaris/CMUAnalysis | Common/generateObjectTree.py | 1 | 11728 | import re
import os
objects = ['Photon', 'Electron', 'Muon', 'Jet', 'Vertex']
susyObjects = {'Photon': 'Photon', 'Electron': 'Electron', 'Muon': 'Muon', 'Jet': 'PFJet', 'Vertex': 'Vertex'}
objectVars = file('ObjectVars.h')
classPat = re.compile('^[ ]*class[ ]+([a-zA-Z0-9]+)Vars[ ]*{')
cTorPat = re.compile('^[ ]*[a-zA-Z0-9]+Vars\([^,]+(,[ ]+Event.*|)\);')
varPat = re.compile('^[ ]*((?:unsigned[ ]|)(?:bool|char|short|int|unsigned|long|float|double))[ ]+([a-zA-Z_][a-zA-Z0-9_]*);')
useEvent = dict()
varList = dict()
obj = ''
for line in objectVars:
if '};' in line:
obj = ''
if obj:
cTorMatch = cTorPat.match(line)
if cTorMatch:
useEvent[obj] = len(cTorMatch.group(1)) != 0
varMatch = varPat.match(line)
if varMatch:
varList[obj].append((varMatch.group(1), varMatch.group(2)))
lineMatch = classPat.match(line)
if lineMatch and lineMatch.group(1) in objects:
obj = lineMatch.group(1)
varList[obj] = []
objectVars.close()
# GENERATE HEADER
headerContent = '''/* Auto-generated header file */
#ifndef ObjectTree_h
#define ObjectTree_h
#include "ObjectVars.h"
#include "TTree.h"
#include "TString.h"
namespace susy {
unsigned const NMAX(512);
'''
for obj in objects:
headerContent += '''
class ''' + obj + '''VarsArray {
public:
''' + obj + '''VarsArray() {}
~''' + obj + '''VarsArray() {}
void setBranches(TTree&);
void setAddress(TTree&);
void push_back(''' + obj + '''Vars const&);
void clear() { size = 0; }
''' + obj + '''Vars at(unsigned) const;
unsigned size;
'''
for (type, name) in varList[obj]:
headerContent += '''
''' + type + ' ' + name + '[NMAX];'
headerContent += '''
};
'''
headerContent += '''
class ObjectTree {
public:
ObjectTree();
~ObjectTree();
void setOutput(TString const&,'''
for i in range(len(objects)):
headerContent += ' bool = true'
if i != len(objects) - 1:
headerContent += ','
else:
headerContent += ');'
headerContent += '''
void setOutput(TTree&,'''
for i in range(len(objects)):
headerContent += ' bool = true'
if i != len(objects) - 1:
headerContent += ','
else:
headerContent += ');'
headerContent += '''
static void setBranchStatus(TTree&,'''
for i in range(len(objects)):
headerContent += ' bool = true'
if i != len(objects) - 1:
headerContent += ','
else:
headerContent += ');'
headerContent += '''
void initEvent(Event const&);
void fill() { output_->Fill(); }'''
for obj in objects:
lowerName = obj.lower()
headerContent += '''
void save(''' + obj + 'Vars const& _vars) { ' + lowerName + 'Array_.push_back(_vars); }'
for obj in objects:
lowerName = obj.lower()
headerContent += '''
unsigned get''' + obj + 'Size() const { return ' + lowerName + 'Array_.size; }'
for obj in objects:
lowerName = obj.lower()
headerContent += '''
''' + obj + 'VarsArray const& get' + obj + 'Array() const { return ' + lowerName + 'Array_; }'
headerContent += '''
private:
void setBranches_('''
for i in range(len(objects)):
headerContent += 'bool'
if i != len(objects) - 1:
headerContent += ', '
else:
headerContent += ');'
for obj in objects:
lowerName = obj.lower()
headerContent += '''
''' + obj + '''VarsArray ''' + lowerName + '''Array_;'''
headerContent += '''
unsigned runNumber_;
unsigned lumiNumber_;
unsigned eventNumber_;
TTree* output_;
bool ownOutput_;
};
}
#endif
'''
headerFile = file('ObjectTree.h', 'w')
headerFile.write(headerContent)
headerFile.close()
# GENERATE SRC
cTors = dict()
setBranches = dict()
setAddress = dict()
pushBack = dict()
at = dict()
for obj in objects:
lowerName = obj.lower()
cTorText = '''
''' + obj + 'Vars::' + obj + '''Vars() :'''
initList = ''
for (type, name) in varList[obj]:
initList += '''
''' + name + '('
if type == 'float' or type == 'double':
initList += '0.'
elif type == 'bool':
initList += 'false'
else:
initList += '0'
initList += '),'
initList = initList.rstrip(',')
cTorText += initList
cTorText += '''
{
}
'''
cTors[obj] = cTorText
setBranchText = '''
void
''' + obj + '''VarsArray::setBranches(TTree& _tree)
{
_tree.Branch("''' + lowerName + '.size", &size, "' + lowerName + '.size/i");'
for (type, name) in varList[obj]:
branch = '''
_tree.Branch("''' + lowerName + '.' + name + '", ' + name + ', "' + name + '[' + lowerName + '.size]/'
if type == 'char':
branch += 'B'
elif type == 'unsigned char':
branch += 'b'
elif type == 'short':
branch += 'S'
elif type == 'unsigned short':
branch += 's'
elif type == 'int':
branch += 'I'
elif type == 'unsigned' or type == 'unsigned int':
branch += 'i'
elif type == 'long':
branch += 'L'
elif type == 'unsigned long':
branch += 'l'
elif type == 'float':
branch += 'F'
elif type == 'double':
branch += 'D'
elif type == 'bool':
branch += 'O'
branch += '");'
setBranchText += branch
setBranchText += '''
}
'''
setBranches[obj] = setBranchText
setAddressText = '''
void
''' + obj + '''VarsArray::setAddress(TTree& _tree)
{
std::vector<TString> notFound;
_tree.SetBranchAddress("''' + lowerName + '.size", &size);'
for (type, name) in varList[obj]:
bName = lowerName + '.' + name
setAddressText += '''
if(_tree.GetBranch("''' + bName + '")) _tree.SetBranchAddress("' + bName + '", ' + name + ''');
else notFound.push_back("''' + bName + '");'
setAddressText += '''
for(unsigned iN(0); iN != notFound.size(); ++iN)
std::cerr << "Branch " << notFound[iN] << " not found in input" << std::endl;
}
'''
setAddress[obj] = setAddressText
pushBackText = '''
void
''' + obj + 'VarsArray::push_back(' + obj + '''Vars const& _vars)
{
if(size == NMAX - 1)
throw std::runtime_error("Too many ''' + obj + '''s");
'''
for (type, name) in varList[obj]:
pushBackText += '''
''' + name + '[size] = _vars.' + name + ';'
pushBackText += '''
++size;
}
'''
pushBack[obj] = pushBackText
atText = '''
''' + obj + '''Vars
''' + obj + '''VarsArray::at(unsigned _pos) const
{
if(_pos >= size)
throw std::runtime_error("''' + obj + '''Vars out-of-bounds");
''' + obj + '''Vars vars;
'''
for (type, name) in varList[obj]:
atText += '''
vars.''' + name + ' = ' + name + '[_pos];'
atText += '''
return vars;
}
'''
at[obj] = atText
preamble = '#include "ObjectVars.h"\n'
try:
originalSrc = file('ObjectVars.cc', 'r')
userDef = ''
copy = False
namespace = False
for line in originalSrc:
if 'namespace susy' in line:
namespace = True
if not namespace and 'ObjectVars.h' not in line and not re.match('^[ ]*/\*.*\*/[ ]*$', line):
preamble += line
if '/* START USER-DEFINED IMPLEMENTATION (DO NOT MODIFY THIS LINE) */' in line:
copy = True
if copy:
userDef += line
if '/* END USER-DEFINED IMPLEMENTATION (DO NOT MODIFY THIS LINE) */' in line:
copy = False
originalSrc.close()
except:
userDef = '\n/* START USER-DEFINED IMPLEMENTATION (DO NOT MODIFY THIS LINE) */\n'
for obj in objects:
userDef += '''
void
''' + obj + '''Vars::set(''' + susyObjects[obj] + ' const&'
if useEvent[obj]:
userDef += ', Event const&'
userDef += ''')
{
}
/*static*/
''' + obj + '''Vars::setBranchStatus(TTree&)
{
}
'''
userDef += '/* END USER-DEFINED IMPLEMENTATION (DO NOT MODIFY THIS LINE) */\n'
# ObjectTree.cc
objTreeContent = '''/* Auto-generated source file */
#include "ObjectTree.h"
#include "TFile.h"
#include <stdexcept>
#include <iostream>
namespace susy {
'''
for obj in objects:
objTreeContent += setBranches[obj]
objTreeContent += setAddress[obj]
objTreeContent += pushBack[obj]
objTreeContent += at[obj]
objTreeContent += '''
ObjectTree::ObjectTree() :'''
for obj in objects:
lowerName = obj.lower()
objTreeContent += '''
''' + lowerName + '''Array_(),'''
objTreeContent += '''
runNumber_(0),
lumiNumber_(0),
eventNumber_(0),
output_(0),
ownOutput_(false)
{
}
ObjectTree::~ObjectTree()
{
if(ownOutput_ && output_){
TFile* outFile(output_->GetCurrentFile());
outFile->cd();
output_->Write();
delete outFile;
}
}
void
ObjectTree::setOutput(TString const& _fileName'''
for obj in objects:
objTreeContent += ', bool _set' + obj + '/* = true*/'
objTreeContent += ''')
{
ownOutput_ = true;
TFile::Open(_fileName, "recreate");
output_ = new TTree("objectVars", "Object ID variables");
setBranches_('''
for obj in objects:
objTreeContent += '_set' + obj + ', '
objTreeContent = objTreeContent.rstrip(', ')
objTreeContent += ''');
}
void
ObjectTree::setOutput(TTree& _tree'''
for obj in objects:
objTreeContent += ', bool _set' + obj + '/* = true*/'
objTreeContent += ''')
{
output_ = &_tree;
setBranches_('''
for obj in objects:
objTreeContent += '_set' + obj + ', '
objTreeContent = objTreeContent.rstrip(', ')
objTreeContent += ''');
}
/*static*/
void
ObjectTree::setBranchStatus(TTree& _input'''
for obj in objects:
objTreeContent += ', bool _set' + obj + '/* = true*/'
objTreeContent += ''')
{
_input.SetBranchStatus("runNumber", 1);
_input.SetBranchStatus("luminosityBlockNumber", 1);
_input.SetBranchStatus("eventNumber", 1);
'''
for obj in objects:
objTreeContent += '''
if(_set''' + obj + ') ' + obj + 'Vars::setBranchStatus(_input);'
objTreeContent += '''
}
#ifdef STANDALONE
void
ObjectTree::initEvent(Event const&)
{
runNumber_ = 0;
lumiNumber_ = 0;
eventNumber_ = 0;
#else
void
ObjectTree::initEvent(Event const& _event)
{
runNumber_ = _event.runNumber;
lumiNumber_ = _event.luminosityBlockNumber;
eventNumber_ = _event.eventNumber;
#endif'''
for obj in objects:
objTreeContent += '''
''' + obj.lower() + 'Array_.clear();'
objTreeContent += '''
}
void
ObjectTree::setBranches_('''
for obj in objects:
objTreeContent += 'bool _set' + obj + ', '
objTreeContent = objTreeContent.rstrip(', ') + ')'
objTreeContent += '''
{
output_->Branch("runNumber", &runNumber_, "runNumber/i");
output_->Branch("lumiNumber", &lumiNumber_, "lumiNumber/i");
output_->Branch("eventNumber", &eventNumber_, "eventNumber/i");
'''
for obj in objects:
objTreeContent += '''
if(_set''' + obj + ') ' + obj.lower() + 'Array_.setBranches(*output_);'
objTreeContent += '''
}
'''
objTreeContent += '}\n'
objTreeFile = file('ObjectTree.cc', 'w')
objTreeFile.write(objTreeContent)
objTreeFile.close()
# ObjectVars.cc
objVarsContent = '''/* Partially auto-generated source file - edit where indicated */
/* Add necessary inclusions below */
''' + preamble + '''
namespace susy {
'''
for obj in objects:
objVarsContent += cTors[obj]
objVarsContent += '\n'
objVarsContent += userDef
objVarsContent += '''
}
'''
objVarsFile = file('ObjectVars.cc', 'w')
objVarsFile.write(objVarsContent)
objVarsFile.close()
| apache-2.0 | -249,681,040,860,180,700 | -4,133,015,835,176,165,000 | 21.339048 | 125 | 0.548857 | false |
riyer15/python_koans | python3/koans/about_scoring_project.py | 107 | 2207 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from runner.koan import *
# Greed is a dice game where you roll up to five dice to accumulate
# points. The following "score" function will be used calculate the
# score of a single roll of the dice.
#
# A greed roll is scored as follows:
#
# * A set of three ones is 1000 points
#
# * A set of three numbers (other than ones) is worth 100 times the
# number. (e.g. three fives is 500 points).
#
# * A one (that is not part of a set of three) is worth 100 points.
#
# * A five (that is not part of a set of three) is worth 50 points.
#
# * Everything else is worth 0 points.
#
#
# Examples:
#
# score([1,1,1,5,1]) => 1150 points
# score([2,3,4,6,2]) => 0 points
# score([3,4,5,3,3]) => 350 points
# score([1,5,1,2,4]) => 250 points
#
# More scoring examples are given in the tests below:
#
# Your goal is to write the score method.
def score(dice):
# You need to write this method
pass
class AboutScoringProject(Koan):
def test_score_of_an_empty_list_is_zero(self):
self.assertEqual(0, score([]))
def test_score_of_a_single_roll_of_5_is_50(self):
self.assertEqual(50, score([5]))
def test_score_of_a_single_roll_of_1_is_100(self):
self.assertEqual(100, score([1]))
def test_score_of_multiple_1s_and_5s_is_the_sum_of_individual_scores(self):
self.assertEqual(300, score([1,5,5,1]))
def test_score_of_single_2s_3s_4s_and_6s_are_zero(self):
self.assertEqual(0, score([2,3,4,6]))
def test_score_of_a_triple_1_is_1000(self):
self.assertEqual(1000, score([1,1,1]))
def test_score_of_other_triples_is_100x(self):
self.assertEqual(200, score([2,2,2]))
self.assertEqual(300, score([3,3,3]))
self.assertEqual(400, score([4,4,4]))
self.assertEqual(500, score([5,5,5]))
self.assertEqual(600, score([6,6,6]))
def test_score_of_mixed_is_sum(self):
self.assertEqual(250, score([2,5,2,2,3]))
self.assertEqual(550, score([5,5,5,5]))
self.assertEqual(1150, score([1,1,1,5,1]))
def test_ones_not_left_out(self):
self.assertEqual(300, score([1,2,2,2]))
self.assertEqual(350, score([1,5,2,2,2])) | mit | -4,133,845,819,325,437,400 | -5,559,631,275,444,434,000 | 29.666667 | 79 | 0.633892 | false |
victorbriz/rethinkdb | scripts/ui-tests.py | 50 | 3779 | #!/usr/bin/env python
# Copyright 2010-2012 RethinkDB, all rights reserved.
import os, sys, subprocess, argparse
from termcolor import colored, cprint
import time
tests = [
'add-a-namespace',
'add-a-datacenter',
'view-dashboard',
]
git_root = subprocess.Popen(['git', 'rev-parse', '--show-toplevel'], stdout=subprocess.PIPE).communicate()[0].rstrip('\r\n')
test_file_dir = os.path.join(git_root, 'test/ui_test/')
cwd = os.getcwd()
# Define and parse command-line arguments
parser = argparse.ArgumentParser(description='Run a set of UI tests using CasperJS / PhantomJS.')
parser.add_argument('tests', nargs='*', help='List of tests to run. Specify \'all\' to run all tests.')
parser.add_argument('-p','--rdb-port', nargs='?', dest='rdb_port', default='6001', help='Port of the RethinkDB server to connect to (default is 6001).')
parser.add_argument('-i','--output-images', nargs='?', dest='image_output_directory', const='./casper-results', help='Include if images should be scraped and saved. Optionally specify the output directory (default is ./casper-results/).')
parser.add_argument('-l','--list-tests', action='store_true', help='List available tests to run.')
parser.add_argument('-r','--output-results', nargs='?', dest='result_output_directory', const='./casper-results', help='Include if test results should be saved. Optionally specify the output directory (default is ./casper-results/).')
args = parser.parse_args()
def print_available_tests():
print 'Available tests:'
print '\t- all: run all of the following tests'
for test in tests:
print '\t- ' + test
if args.list_tests:
print_available_tests()
exit(0)
if len(args.tests) < 1:
parser.print_usage()
print '\nNo test specified.',
print_available_tests()
exit(1)
# Prepare the list of tests to process; if 'all' was one of the specified tests then process all tests
if 'all' in args.tests:
test_list = tests
else:
test_list = args.tests
# Process each test name specified on the command line
successful_tests = 0
os.chdir(test_file_dir)
for test_name in test_list:
# Look for a matching test among known tests
casper_script = os.path.join(test_file_dir, test_name + '.coffee')
try:
with open(casper_script) as f: pass
except IOError as e:
print "No test script found for CasperJS test '%s'." % test_name
continue
# Build command with arguments for casperjs test
cl = ['casperjs', '--rdb-server=http://localhost:' + args.rdb_port + '/', casper_script]
# If the option to scrape images was specified, add it to the casperjs argument list
if args.image_output_directory:
image_dir = os.path.abspath(args.image_output_directory)
cl.extend(['--images=' + image_dir])
# Execute casperjs and pretty-print its output
process = subprocess.Popen(cl, stdout=subprocess.PIPE)
stdout = process.stdout.readlines()
for i, line in enumerate(stdout):
cprint('[%s]' % test_name, attrs=['bold'], end=' ')
print line.rstrip('\n')
# If the option to save results was specified, save stdout to a file
if args.result_output_directory:
result_dir = os.path.abspath(args.result_output_directory)
result_filename = "casper-result_%s" % test_name
result_file = open(os.path.join(result_dir, result_filename), 'w')
for line in stdout:
result_file.write(line)
result_file.close()
# Check the exit code of the process
# 0: casper test passed
# 1: casper test failed
process.poll()
if process.returncode == 0:
successful_tests += 1
print
# Print test suite summary
cprint(" %d of %d tests ran successfully. " % (successful_tests, len(test_list)), attrs=['reverse'])
| agpl-3.0 | 1,982,702,376,969,224,400 | 4,196,503,770,904,625,000 | 39.202128 | 238 | 0.674517 | false |
jakesyl/androguard | androguard/core/analysis/sign.py | 38 | 13670 | # This file is part of Androguard.
#
# Copyright (C) 2012, Anthony Desnos <desnos at t0t0.fr>
# All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from androguard.core.analysis.analysis import TAINTED_PACKAGE_CREATE, TAINTED_PACKAGE_CALL
from androguard.core.bytecodes import dvm
TAINTED_PACKAGE_INTERNAL_CALL = 2
FIELD_ACCESS = { "R" : 0, "W" : 1 }
PACKAGE_ACCESS = { TAINTED_PACKAGE_CREATE : 0, TAINTED_PACKAGE_CALL : 1, TAINTED_PACKAGE_INTERNAL_CALL : 2 }
class Sign :
def __init__(self) :
self.levels = {}
self.hlevels = []
def add(self, level, value) :
self.levels[ level ] = value
self.hlevels.append( level )
def get_level(self, l) :
return self.levels[ "L%d" % l ]
def get_string(self) :
buff = ""
for i in self.hlevels :
buff += self.levels[ i ]
return buff
def get_list(self) :
return self.levels[ "sequencebb" ]
class Signature :
def __init__(self, vmx) :
self.vmx = vmx
self.tainted_packages = self.vmx.get_tainted_packages()
self.tainted_variables = self.vmx.get_tainted_variables()
self._cached_signatures = {}
self._cached_fields = {}
self._cached_packages = {}
self._global_cached = {}
self.levels = {
# Classical method signature with basic blocks, strings, fields, packages
"L0" : {
0 : ( "_get_strings_a", "_get_fields_a", "_get_packages_a" ),
1 : ( "_get_strings_pa", "_get_fields_a", "_get_packages_a" ),
2 : ( "_get_strings_a", "_get_fields_a", "_get_packages_pa_1" ),
3 : ( "_get_strings_a", "_get_fields_a", "_get_packages_pa_2" ),
},
# strings
"L1" : [ "_get_strings_a1" ],
# exceptions
"L2" : [ "_get_exceptions" ],
# fill array data
"L3" : [ "_get_fill_array_data" ],
}
self.classes_names = None
self._init_caches()
def _get_method_info(self, m) :
m1 = m.get_method()
return "%s-%s-%s" % (m1.get_class_name(), m1.get_name(), m1.get_descriptor())
def _get_sequence_bb(self, analysis_method) :
l = []
for i in analysis_method.basic_blocks.get() :
buff = ""
instructions = [j for j in i.get_instructions()]
if len(instructions) > 5 :
for ins in instructions :
buff += ins.get_name()
if buff != "" :
l.append( buff )
return l
def _get_hex(self, analysis_method) :
code = analysis_method.get_method().get_code()
if code == None :
return ""
buff = ""
for i in code.get_bc().get_instructions() :
buff += dvm.clean_name_instruction( i )
buff += dvm.static_operand_instruction( i )
return buff
def _get_bb(self, analysis_method, functions, options) :
bbs = []
for b in analysis_method.basic_blocks.get() :
l = []
l.append( (b.start, "B") )
l.append( (b.start, "[") )
internal = []
op_value = b.get_last().get_op_value()
# return
if op_value >= 0x0e and op_value <= 0x11 :
internal.append( (b.end-1, "R") )
# if
elif op_value >= 0x32 and op_value <= 0x3d :
internal.append( (b.end-1, "I") )
# goto
elif op_value >= 0x28 and op_value <= 0x2a :
internal.append( (b.end-1, "G") )
# sparse or packed switch
elif op_value >= 0x2b and op_value <= 0x2c :
internal.append( (b.end-1, "G") )
for f in functions :
try :
internal.extend( getattr( self, f )( analysis_method, options ) )
except TypeError :
internal.extend( getattr( self, f )( analysis_method ) )
internal.sort()
for i in internal :
if i[0] >= b.start and i[0] < b.end :
l.append( i )
del internal
l.append( (b.end, "]") )
bbs.append( ''.join(i[1] for i in l) )
return bbs
def _init_caches(self) :
if self._cached_fields == {} :
for f_t, f in self.tainted_variables.get_fields() :
self._cached_fields[ f ] = f_t.get_paths_length()
n = 0
for f in sorted( self._cached_fields ) :
self._cached_fields[ f ] = n
n += 1
if self._cached_packages == {} :
for m_t, m in self.tainted_packages.get_packages() :
self._cached_packages[ m ] = m_t.get_paths_length()
n = 0
for m in sorted( self._cached_packages ) :
self._cached_packages[ m ] = n
n += 1
def _get_fill_array_data(self, analysis_method) :
buff = ""
for b in analysis_method.basic_blocks.get() :
for i in b.get_instructions() :
if i.get_name() == "FILL-ARRAY-DATA" :
buff_tmp = i.get_operands()
for j in range(0, len(buff_tmp)) :
buff += "\\x%02x" % ord( buff_tmp[j] )
return buff
def _get_exceptions(self, analysis_method) :
buff = ""
method = analysis_method.get_method()
code = method.get_code()
if code == None or code.get_tries_size() <= 0 :
return buff
handler_catch_list = code.get_handlers()
for handler_catch in handler_catch_list.get_list() :
for handler in handler_catch.get_handlers() :
buff += analysis_method.get_vm().get_cm_type( handler.get_type_idx() )
return buff
def _get_strings_a1(self, analysis_method) :
buff = ""
strings_method = self.tainted_variables.get_strings_by_method( analysis_method.get_method() )
for s in strings_method :
for path in strings_method[s] :
buff += s.replace('\n', ' ')
return buff
def _get_strings_pa(self, analysis_method) :
l = []
strings_method = self.tainted_variables.get_strings_by_method( analysis_method.get_method() )
for s in strings_method :
for path in strings_method[s] :
l.append( ( path[1], "S%d" % len(s) ) )
return l
def _get_strings_a(self, analysis_method) :
key = "SA-%s" % self._get_method_info(analysis_method)
if key in self._global_cached :
return self._global_cached[ key ]
l = []
strings_method = self.tainted_variables.get_strings_by_method( analysis_method.get_method() )
for s in strings_method :
for path in strings_method[s] :
l.append( ( path[1], "S") )
self._global_cached[ key ] = l
return l
def _get_fields_a(self, analysis_method) :
key = "FA-%s" % self._get_method_info(analysis_method)
if key in self._global_cached :
return self._global_cached[ key ]
fields_method = self.tainted_variables.get_fields_by_method( analysis_method.get_method() )
l = []
for f in fields_method :
for path in fields_method[ f ] :
l.append( (path[1], "F%d" % FIELD_ACCESS[ path[0] ]) )
self._global_cached[ key ] = l
return l
def _get_packages_a(self, analysis_method) :
packages_method = self.tainted_packages.get_packages_by_method( analysis_method.get_method() )
l = []
for m in packages_method :
for path in packages_method[ m ] :
l.append( (path.get_idx(), "P%s" % (PACKAGE_ACCESS[ path.get_access_flag() ]) ) )
return l
def _get_packages(self, analysis_method, include_packages) :
l = self._get_packages_pa_1( analysis_method, include_packages )
return "".join([ i[1] for i in l ])
def _get_packages_pa_1(self, analysis_method, include_packages) :
key = "PA1-%s-%s" % (self._get_method_info(analysis_method), include_packages)
if key in self._global_cached :
return self._global_cached[ key ]
packages_method = self.tainted_packages.get_packages_by_method( analysis_method.get_method() )
if self.classes_names == None :
self.classes_names = analysis_method.get_vm().get_classes_names()
l = []
for m in packages_method :
for path in packages_method[ m ] :
present = False
for i in include_packages :
if m.find(i) == 0 :
present = True
break
if path.get_access_flag() == 1 :
dst_class_name, dst_method_name, dst_descriptor = path.get_dst( analysis_method.get_vm().get_class_manager() )
if dst_class_name in self.classes_names :
l.append( (path.get_idx(), "P%s" % (PACKAGE_ACCESS[ 2 ]) ) )
else :
if present == True :
l.append( (path.get_idx(), "P%s{%s%s%s}" % (PACKAGE_ACCESS[ path.get_access_flag() ], dst_class_name, dst_method_name, dst_descriptor ) ) )
else :
l.append( (path.get_idx(), "P%s" % (PACKAGE_ACCESS[ path.get_access_flag() ]) ) )
else :
if present == True :
l.append( (path.get_idx(), "P%s{%s}" % (PACKAGE_ACCESS[ path.get_access_flag() ], m) ) )
else :
l.append( (path.get_idx(), "P%s" % (PACKAGE_ACCESS[ path.get_access_flag() ]) ) )
self._global_cached[ key ] = l
return l
def _get_packages_pa_2(self, analysis_method, include_packages) :
packages_method = self.tainted_packages.get_packages_by_method( analysis_method.get_method() )
l = []
for m in packages_method :
for path in packages_method[ m ] :
present = False
for i in include_packages :
if m.find(i) == 0 :
present = True
break
if present == True :
l.append( (path.get_idx(), "P%s" % (PACKAGE_ACCESS[ path.get_access_flag() ]) ) )
continue
if path.get_access_flag() == 1 :
dst_class_name, dst_method_name, dst_descriptor = path.get_dst( analysis_method.get_vm().get_class_manager() )
l.append( (path.get_idx(), "P%s{%s%s%s}" % (PACKAGE_ACCESS[ path.get_access_flag() ], dst_class_name, dst_method_name, dst_descriptor ) ) )
else :
l.append( (path.get_idx(), "P%s{%s}" % (PACKAGE_ACCESS[ path.get_access_flag() ], m) ) )
return l
def get_method(self, analysis_method, signature_type, signature_arguments={}) :
key = "%s-%s-%s" % (self._get_method_info(analysis_method), signature_type, signature_arguments)
if key in self._cached_signatures :
return self._cached_signatures[ key ]
s = Sign()
#print signature_type, signature_arguments
for i in signature_type.split(":") :
# print i, signature_arguments[ i ]
if i == "L0" :
_type = self.levels[ i ][ signature_arguments[ i ][ "type" ] ]
try :
_arguments = signature_arguments[ i ][ "arguments" ]
except KeyError :
_arguments = []
value = self._get_bb( analysis_method, _type, _arguments )
s.add( i, ''.join(z for z in value) )
elif i == "L4" :
try :
_arguments = signature_arguments[ i ][ "arguments" ]
except KeyError :
_arguments = []
value = self._get_packages( analysis_method, _arguments )
s.add( i , value )
elif i == "hex" :
value = self._get_hex( analysis_method )
s.add( i, value )
elif i == "sequencebb" :
_type = ('_get_strings_a', '_get_fields_a', '_get_packages_pa_1')
_arguments = ['Landroid', 'Ljava']
#value = self._get_bb( analysis_method, _type, _arguments )
#s.add( i, value )
value = self._get_sequence_bb( analysis_method )
s.add( i, value )
else :
for f in self.levels[ i ] :
value = getattr( self, f )( analysis_method )
s.add( i, value )
self._cached_signatures[ key ] = s
return s
| apache-2.0 | 4,979,698,250,782,234,000 | 4,044,486,679,567,651,000 | 35.356383 | 167 | 0.501609 | false |
kambysese/mne-python | mne/connectivity/utils.py | 15 | 2957 | # Authors: Martin Luessi <mluessi@nmr.mgh.harvard.edu>
#
# License: BSD (3-clause)
import numpy as np
def check_indices(indices):
"""Check indices parameter."""
if not isinstance(indices, tuple) or len(indices) != 2:
raise ValueError('indices must be a tuple of length 2')
if len(indices[0]) != len(indices[1]):
raise ValueError('Index arrays indices[0] and indices[1] must '
'have the same length')
return indices
def seed_target_indices(seeds, targets):
"""Generate indices parameter for seed based connectivity analysis.
Parameters
----------
seeds : array of int | int
Seed indices.
targets : array of int | int
Indices of signals for which to compute connectivity.
Returns
-------
indices : tuple of array
The indices parameter used for connectivity computation.
"""
# make them arrays
seeds = np.asarray((seeds,)).ravel()
targets = np.asarray((targets,)).ravel()
n_seeds = len(seeds)
n_targets = len(targets)
indices = (np.concatenate([np.tile(i, n_targets) for i in seeds]),
np.tile(targets, n_seeds))
return indices
def degree(connectivity, threshold_prop=0.2):
"""Compute the undirected degree of a connectivity matrix.
Parameters
----------
connectivity : ndarray, shape (n_nodes, n_nodes)
The connectivity matrix.
threshold_prop : float
The proportion of edges to keep in the graph before
computing the degree. The value should be between 0
and 1.
Returns
-------
degree : ndarray, shape (n_nodes,)
The computed degree.
Notes
-----
During thresholding, the symmetry of the connectivity matrix is
auto-detected based on :func:`numpy.allclose` of it with its transpose.
"""
connectivity = np.array(connectivity)
if connectivity.ndim != 2 or \
connectivity.shape[0] != connectivity.shape[1]:
raise ValueError('connectivity must be have shape (n_nodes, n_nodes), '
'got %s' % (connectivity.shape,))
n_nodes = len(connectivity)
if np.allclose(connectivity, connectivity.T):
split = 2.
connectivity[np.tril_indices(n_nodes)] = 0
else:
split = 1.
threshold_prop = float(threshold_prop)
if not 0 < threshold_prop <= 1:
raise ValueError('threshold must be 0 <= threshold < 1, got %s'
% (threshold_prop,))
degree = connectivity.ravel() # no need to copy because np.array does
degree[::n_nodes + 1] = 0.
n_keep = int(round((degree.size - len(connectivity)) *
threshold_prop / split))
degree[np.argsort(degree)[:-n_keep]] = 0
degree.shape = connectivity.shape
if split == 2:
degree += degree.T # normally unsafe, but we know where our zeros are
degree = np.sum(degree > 0, axis=0)
return degree
| bsd-3-clause | 5,810,698,865,437,777,000 | 6,271,232,984,522,663,000 | 30.795699 | 79 | 0.614136 | false |
dsimic/taxsims | ss.py | 1 | 1112 | import pandas as pd
import numpy as np
def ss_calc(
contrib_yearly, inv_gwth_rt, num_years, safe_withdrw_rate, start_age=28
):
"""
inv_gwth_rt is infaltion adjusted.
contrib_yearly is in first years dollars
"""
tot_years = max(0, 62 - start_age - num_years) + num_years
df = pd.DataFrame({
'contrib_yearly': [contrib_yearly] * num_years + [0.] *
max(0, (62 - num_years - start_age)),
'inv_value': [0] * tot_years,
}, index=range(tot_years))
for year in range(0, tot_years):
print year
multiplier = np.array([
(1. + inv_gwth_rt) ** (year - y_) for y_ in range(year + 1)])
print multiplier
df['inv_value'][year] = np.sum(
np.array(df['contrib_yearly'][0: year + 1]) * multiplier)
df['monthly_inv_income'] = safe_withdrw_rate * df['inv_value'] / 12.
df['monthly_inv_income_w_spouse'] = df['monthly_inv_income'] * 1.5
return df
if __name__ == "__main__":
df = ss_calc(15e3, .03, 10, .03)
ss_benefit_monthly = 939.00
ss_benefit_w_spouse_monthly = 1.5 * ss_benefit_monthly
| gpl-2.0 | -695,503,681,865,803,800 | -1,160,542,201,607,618,300 | 31.705882 | 75 | 0.57554 | false |
pacoqueen/bbinn | PyChart-1.39/demos/linestyles.py | 1 | 1258 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import sys
sys.path.append("..")
#
# Copyright (C) 2000-2005 by Yasushi Saito (yasushi.saito@gmail.com)
#
# Pychart is free software; you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by the
# Free Software Foundation; either version 2, or (at your option) any
# later version.
#
# Pychart is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
# for more details.
#
from pychart import *
import pychart.doc_support
import chartdemo
import re
can = canvas.default_canvas()
x = 100
y = 500
def drawLine(style):
global x, y
name = pychart.doc_support.stringify_value(style)
name = re.sub("line_style\\.", "", name)
name = pychart.doc_support.break_string(name)
can.line(style, x, y, x+40, y)
#print "name=", name
height = font.text_height(name)[0] + 5
tb = text_box.T(text=name, loc=(x, y-height), line_style=None)
x = x + 60
tb.draw()
for style in line_style.standards.list():
drawLine(style)
if x >= chartdemo.MaxWidth:
x=100
y=y-40
| gpl-2.0 | -6,577,625,518,565,421,000 | -4,343,147,122,167,521,300 | 26.347826 | 72 | 0.68283 | false |
leiferikb/bitpop | src/third_party/WebKit/Tools/Scripts/webkitpy/common/checkout/scm/scm_mock.py | 2 | 4048 | # Copyright (C) 2011 Google Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from webkitpy.common.system.filesystem_mock import MockFileSystem
from webkitpy.common.system.executive_mock import MockExecutive
class MockSCM(object):
executable_name = "MockSCM"
def __init__(self, filesystem=None, executive=None):
self.checkout_root = "/mock-checkout/third_party/WebKit"
self.added_paths = set()
self._filesystem = filesystem or MockFileSystem()
self._executive = executive or MockExecutive()
def add(self, destination_path, return_exit_code=False):
self.add_list([destination_path], return_exit_code)
def add_list(self, destination_paths, return_exit_code=False):
self.added_paths.update(set(destination_paths))
if return_exit_code:
return 0
def has_working_directory_changes(self):
return False
def ensure_cleanly_tracking_remote_master(self):
pass
def current_branch(self):
return "mock-branch-name"
def checkout_branch(self, name):
pass
def create_clean_branch(self, name):
pass
def delete_branch(self, name):
pass
def supports_local_commits(self):
return True
def exists(self, path):
# TestRealMain.test_real_main (and several other rebaseline tests) are sensitive to this return value.
# We should make those tests more robust, but for now we just return True always (since no test needs otherwise).
return True
def absolute_path(self, *comps):
return self._filesystem.join(self.checkout_root, *comps)
def svn_revision(self, path):
return '5678'
def svn_revision_from_git_commit(self, git_commit):
if git_commit == '6469e754a1':
return 1234
if git_commit == '624c3081c0':
return 5678
if git_commit == '624caaaaaa':
return 10000
return None
def timestamp_of_revision(self, path, revision):
return '2013-02-01 08:48:05 +0000'
def commit_locally_with_message(self, message, commit_all_working_directory_changes=True):
pass
def delete(self, path):
return self.delete_list([path])
def delete_list(self, paths):
if not self._filesystem:
return
for path in paths:
if self._filesystem.exists(path):
self._filesystem.remove(path)
def move(self, origin, destination):
if self._filesystem:
self._filesystem.move(self.absolute_path(origin), self.absolute_path(destination))
| gpl-3.0 | 664,064,416,628,093,000 | 5,091,303,709,594,888,000 | 36.137615 | 121 | 0.69664 | false |
Psycojoker/wanawana | wanawana/settings.py | 1 | 2687 | """
Django settings for wanawana project.
For more information on this file, see
https://docs.djangoproject.com/en/1.7/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.7/ref/settings/
"""
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
import os
BASE_DIR = os.path.dirname(os.path.dirname(__file__))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.7/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'w2=4yi@cyc@vsio@$tvz$%&_po6si@533=cwh5kr2dk#pd69)v'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
TEMPLATE_DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = (
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'bootstrap3',
'django_extensions',
'debug_toolbar',
'django_pdb',
'wanawana',
'users',
'events',
)
MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
'django_pdb.middleware.PdbMiddleware',
)
ROOT_URLCONF = 'wanawana.urls'
TEMPLATE_LOADERS = (
'hamlpy.template.loaders.HamlPyFilesystemLoader',
'hamlpy.template.loaders.HamlPyAppDirectoriesLoader',
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
)
WSGI_APPLICATION = 'wanawana.wsgi.application'
# Email configuration
EMAIL_BACKEND = 'django.core.mail.backends.smtp.EmailBackend'
if DEBUG:
EMAIL_BACKEND = 'django.core.mail.backends.console.EmailBackend'
# Database
# https://docs.djangoproject.com/en/1.7/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Internationalization
# https://docs.djangoproject.com/en/1.7/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.7/howto/static-files/
STATIC_URL = '/static/'
try:
from settings_local import *
except ImportError:
pass
| gpl-3.0 | 6,058,254,241,284,379,000 | -2,870,537,519,099,660,000 | 23.87963 | 71 | 0.723483 | false |
bertucho/epic-movie-quotes-quiz | dialogos/build/Twisted/twisted/protocols/sip.py | 8 | 42262 | # -*- test-case-name: twisted.test.test_sip -*-
# Copyright (c) Twisted Matrix Laboratories.
# See LICENSE for details.
"""Session Initialization Protocol.
Documented in RFC 2543.
[Superseded by 3261]
This module contains a deprecated implementation of HTTP Digest authentication.
See L{twisted.cred.credentials} and L{twisted.cred._digest} for its new home.
"""
# system imports
import socket, time, sys, random, warnings
from hashlib import md5
from zope.interface import implements, Interface
# twisted imports
from twisted.python import log, util
from twisted.python.deprecate import deprecated
from twisted.python.versions import Version
from twisted.internet import protocol, defer, reactor
from twisted import cred
from twisted.cred.credentials import UsernameHashedPassword, UsernamePassword
# sibling imports
from twisted.protocols import basic
PORT = 5060
# SIP headers have short forms
shortHeaders = {"call-id": "i",
"contact": "m",
"content-encoding": "e",
"content-length": "l",
"content-type": "c",
"from": "f",
"subject": "s",
"to": "t",
"via": "v",
}
longHeaders = {}
for k, v in shortHeaders.items():
longHeaders[v] = k
del k, v
statusCodes = {
100: "Trying",
180: "Ringing",
181: "Call Is Being Forwarded",
182: "Queued",
183: "Session Progress",
200: "OK",
300: "Multiple Choices",
301: "Moved Permanently",
302: "Moved Temporarily",
303: "See Other",
305: "Use Proxy",
380: "Alternative Service",
400: "Bad Request",
401: "Unauthorized",
402: "Payment Required",
403: "Forbidden",
404: "Not Found",
405: "Method Not Allowed",
406: "Not Acceptable",
407: "Proxy Authentication Required",
408: "Request Timeout",
409: "Conflict", # Not in RFC3261
410: "Gone",
411: "Length Required", # Not in RFC3261
413: "Request Entity Too Large",
414: "Request-URI Too Large",
415: "Unsupported Media Type",
416: "Unsupported URI Scheme",
420: "Bad Extension",
421: "Extension Required",
423: "Interval Too Brief",
480: "Temporarily Unavailable",
481: "Call/Transaction Does Not Exist",
482: "Loop Detected",
483: "Too Many Hops",
484: "Address Incomplete",
485: "Ambiguous",
486: "Busy Here",
487: "Request Terminated",
488: "Not Acceptable Here",
491: "Request Pending",
493: "Undecipherable",
500: "Internal Server Error",
501: "Not Implemented",
502: "Bad Gateway", # no donut
503: "Service Unavailable",
504: "Server Time-out",
505: "SIP Version not supported",
513: "Message Too Large",
600: "Busy Everywhere",
603: "Decline",
604: "Does not exist anywhere",
606: "Not Acceptable",
}
specialCases = {
'cseq': 'CSeq',
'call-id': 'Call-ID',
'www-authenticate': 'WWW-Authenticate',
}
def dashCapitalize(s):
''' Capitalize a string, making sure to treat - as a word separator '''
return '-'.join([ x.capitalize() for x in s.split('-')])
def unq(s):
if s[0] == s[-1] == '"':
return s[1:-1]
return s
def DigestCalcHA1(
pszAlg,
pszUserName,
pszRealm,
pszPassword,
pszNonce,
pszCNonce,
):
m = md5()
m.update(pszUserName)
m.update(":")
m.update(pszRealm)
m.update(":")
m.update(pszPassword)
HA1 = m.digest()
if pszAlg == "md5-sess":
m = md5()
m.update(HA1)
m.update(":")
m.update(pszNonce)
m.update(":")
m.update(pszCNonce)
HA1 = m.digest()
return HA1.encode('hex')
DigestCalcHA1 = deprecated(Version("Twisted", 9, 0, 0))(DigestCalcHA1)
def DigestCalcResponse(
HA1,
pszNonce,
pszNonceCount,
pszCNonce,
pszQop,
pszMethod,
pszDigestUri,
pszHEntity,
):
m = md5()
m.update(pszMethod)
m.update(":")
m.update(pszDigestUri)
if pszQop == "auth-int":
m.update(":")
m.update(pszHEntity)
HA2 = m.digest().encode('hex')
m = md5()
m.update(HA1)
m.update(":")
m.update(pszNonce)
m.update(":")
if pszNonceCount and pszCNonce: # pszQop:
m.update(pszNonceCount)
m.update(":")
m.update(pszCNonce)
m.update(":")
m.update(pszQop)
m.update(":")
m.update(HA2)
hash = m.digest().encode('hex')
return hash
DigestCalcResponse = deprecated(Version("Twisted", 9, 0, 0))(DigestCalcResponse)
_absent = object()
class Via(object):
"""
A L{Via} is a SIP Via header, representing a segment of the path taken by
the request.
See RFC 3261, sections 8.1.1.7, 18.2.2, and 20.42.
@ivar transport: Network protocol used for this leg. (Probably either "TCP"
or "UDP".)
@type transport: C{str}
@ivar branch: Unique identifier for this request.
@type branch: C{str}
@ivar host: Hostname or IP for this leg.
@type host: C{str}
@ivar port: Port used for this leg.
@type port C{int}, or None.
@ivar rportRequested: Whether to request RFC 3581 client processing or not.
@type rportRequested: C{bool}
@ivar rportValue: Servers wishing to honor requests for RFC 3581 processing
should set this parameter to the source port the request was received
from.
@type rportValue: C{int}, or None.
@ivar ttl: Time-to-live for requests on multicast paths.
@type ttl: C{int}, or None.
@ivar maddr: The destination multicast address, if any.
@type maddr: C{str}, or None.
@ivar hidden: Obsolete in SIP 2.0.
@type hidden: C{bool}
@ivar otherParams: Any other parameters in the header.
@type otherParams: C{dict}
"""
def __init__(self, host, port=PORT, transport="UDP", ttl=None,
hidden=False, received=None, rport=_absent, branch=None,
maddr=None, **kw):
"""
Set parameters of this Via header. All arguments correspond to
attributes of the same name.
To maintain compatibility with old SIP
code, the 'rport' argument is used to determine the values of
C{rportRequested} and C{rportValue}. If None, C{rportRequested} is set
to True. (The deprecated method for doing this is to pass True.) If an
integer, C{rportValue} is set to the given value.
Any arguments not explicitly named here are collected into the
C{otherParams} dict.
"""
self.transport = transport
self.host = host
self.port = port
self.ttl = ttl
self.hidden = hidden
self.received = received
if rport is True:
warnings.warn(
"rport=True is deprecated since Twisted 9.0.",
DeprecationWarning,
stacklevel=2)
self.rportValue = None
self.rportRequested = True
elif rport is None:
self.rportValue = None
self.rportRequested = True
elif rport is _absent:
self.rportValue = None
self.rportRequested = False
else:
self.rportValue = rport
self.rportRequested = False
self.branch = branch
self.maddr = maddr
self.otherParams = kw
def _getrport(self):
"""
Returns the rport value expected by the old SIP code.
"""
if self.rportRequested == True:
return True
elif self.rportValue is not None:
return self.rportValue
else:
return None
def _setrport(self, newRPort):
"""
L{Base._fixupNAT} sets C{rport} directly, so this method sets
C{rportValue} based on that.
@param newRPort: The new rport value.
@type newRPort: C{int}
"""
self.rportValue = newRPort
self.rportRequested = False
rport = property(_getrport, _setrport)
def toString(self):
"""
Serialize this header for use in a request or response.
"""
s = "SIP/2.0/%s %s:%s" % (self.transport, self.host, self.port)
if self.hidden:
s += ";hidden"
for n in "ttl", "branch", "maddr", "received":
value = getattr(self, n)
if value is not None:
s += ";%s=%s" % (n, value)
if self.rportRequested:
s += ";rport"
elif self.rportValue is not None:
s += ";rport=%s" % (self.rport,)
etc = self.otherParams.items()
etc.sort()
for k, v in etc:
if v is None:
s += ";" + k
else:
s += ";%s=%s" % (k, v)
return s
def parseViaHeader(value):
"""
Parse a Via header.
@return: The parsed version of this header.
@rtype: L{Via}
"""
parts = value.split(";")
sent, params = parts[0], parts[1:]
protocolinfo, by = sent.split(" ", 1)
by = by.strip()
result = {}
pname, pversion, transport = protocolinfo.split("/")
if pname != "SIP" or pversion != "2.0":
raise ValueError, "wrong protocol or version: %r" % value
result["transport"] = transport
if ":" in by:
host, port = by.split(":")
result["port"] = int(port)
result["host"] = host
else:
result["host"] = by
for p in params:
# it's the comment-striping dance!
p = p.strip().split(" ", 1)
if len(p) == 1:
p, comment = p[0], ""
else:
p, comment = p
if p == "hidden":
result["hidden"] = True
continue
parts = p.split("=", 1)
if len(parts) == 1:
name, value = parts[0], None
else:
name, value = parts
if name in ("rport", "ttl"):
value = int(value)
result[name] = value
return Via(**result)
class URL:
"""A SIP URL."""
def __init__(self, host, username=None, password=None, port=None,
transport=None, usertype=None, method=None,
ttl=None, maddr=None, tag=None, other=None, headers=None):
self.username = username
self.host = host
self.password = password
self.port = port
self.transport = transport
self.usertype = usertype
self.method = method
self.tag = tag
self.ttl = ttl
self.maddr = maddr
if other == None:
self.other = []
else:
self.other = other
if headers == None:
self.headers = {}
else:
self.headers = headers
def toString(self):
l = []; w = l.append
w("sip:")
if self.username != None:
w(self.username)
if self.password != None:
w(":%s" % self.password)
w("@")
w(self.host)
if self.port != None:
w(":%d" % self.port)
if self.usertype != None:
w(";user=%s" % self.usertype)
for n in ("transport", "ttl", "maddr", "method", "tag"):
v = getattr(self, n)
if v != None:
w(";%s=%s" % (n, v))
for v in self.other:
w(";%s" % v)
if self.headers:
w("?")
w("&".join([("%s=%s" % (specialCases.get(h) or dashCapitalize(h), v)) for (h, v) in self.headers.items()]))
return "".join(l)
def __str__(self):
return self.toString()
def __repr__(self):
return '<URL %s:%s@%s:%r/%s>' % (self.username, self.password, self.host, self.port, self.transport)
def parseURL(url, host=None, port=None):
"""Return string into URL object.
URIs are of form 'sip:user@example.com'.
"""
d = {}
if not url.startswith("sip:"):
raise ValueError("unsupported scheme: " + url[:4])
parts = url[4:].split(";")
userdomain, params = parts[0], parts[1:]
udparts = userdomain.split("@", 1)
if len(udparts) == 2:
userpass, hostport = udparts
upparts = userpass.split(":", 1)
if len(upparts) == 1:
d["username"] = upparts[0]
else:
d["username"] = upparts[0]
d["password"] = upparts[1]
else:
hostport = udparts[0]
hpparts = hostport.split(":", 1)
if len(hpparts) == 1:
d["host"] = hpparts[0]
else:
d["host"] = hpparts[0]
d["port"] = int(hpparts[1])
if host != None:
d["host"] = host
if port != None:
d["port"] = port
for p in params:
if p == params[-1] and "?" in p:
d["headers"] = h = {}
p, headers = p.split("?", 1)
for header in headers.split("&"):
k, v = header.split("=")
h[k] = v
nv = p.split("=", 1)
if len(nv) == 1:
d.setdefault("other", []).append(p)
continue
name, value = nv
if name == "user":
d["usertype"] = value
elif name in ("transport", "ttl", "maddr", "method", "tag"):
if name == "ttl":
value = int(value)
d[name] = value
else:
d.setdefault("other", []).append(p)
return URL(**d)
def cleanRequestURL(url):
"""Clean a URL from a Request line."""
url.transport = None
url.maddr = None
url.ttl = None
url.headers = {}
def parseAddress(address, host=None, port=None, clean=0):
"""Return (name, uri, params) for From/To/Contact header.
@param clean: remove unnecessary info, usually for From and To headers.
"""
address = address.strip()
# simple 'sip:foo' case
if address.startswith("sip:"):
return "", parseURL(address, host=host, port=port), {}
params = {}
name, url = address.split("<", 1)
name = name.strip()
if name.startswith('"'):
name = name[1:]
if name.endswith('"'):
name = name[:-1]
url, paramstring = url.split(">", 1)
url = parseURL(url, host=host, port=port)
paramstring = paramstring.strip()
if paramstring:
for l in paramstring.split(";"):
if not l:
continue
k, v = l.split("=")
params[k] = v
if clean:
# rfc 2543 6.21
url.ttl = None
url.headers = {}
url.transport = None
url.maddr = None
return name, url, params
class SIPError(Exception):
def __init__(self, code, phrase=None):
if phrase is None:
phrase = statusCodes[code]
Exception.__init__(self, "SIP error (%d): %s" % (code, phrase))
self.code = code
self.phrase = phrase
class RegistrationError(SIPError):
"""Registration was not possible."""
class Message:
"""A SIP message."""
length = None
def __init__(self):
self.headers = util.OrderedDict() # map name to list of values
self.body = ""
self.finished = 0
def addHeader(self, name, value):
name = name.lower()
name = longHeaders.get(name, name)
if name == "content-length":
self.length = int(value)
self.headers.setdefault(name,[]).append(value)
def bodyDataReceived(self, data):
self.body += data
def creationFinished(self):
if (self.length != None) and (self.length != len(self.body)):
raise ValueError, "wrong body length"
self.finished = 1
def toString(self):
s = "%s\r\n" % self._getHeaderLine()
for n, vs in self.headers.items():
for v in vs:
s += "%s: %s\r\n" % (specialCases.get(n) or dashCapitalize(n), v)
s += "\r\n"
s += self.body
return s
def _getHeaderLine(self):
raise NotImplementedError
class Request(Message):
"""A Request for a URI"""
def __init__(self, method, uri, version="SIP/2.0"):
Message.__init__(self)
self.method = method
if isinstance(uri, URL):
self.uri = uri
else:
self.uri = parseURL(uri)
cleanRequestURL(self.uri)
def __repr__(self):
return "<SIP Request %d:%s %s>" % (id(self), self.method, self.uri.toString())
def _getHeaderLine(self):
return "%s %s SIP/2.0" % (self.method, self.uri.toString())
class Response(Message):
"""A Response to a URI Request"""
def __init__(self, code, phrase=None, version="SIP/2.0"):
Message.__init__(self)
self.code = code
if phrase == None:
phrase = statusCodes[code]
self.phrase = phrase
def __repr__(self):
return "<SIP Response %d:%s>" % (id(self), self.code)
def _getHeaderLine(self):
return "SIP/2.0 %s %s" % (self.code, self.phrase)
class MessagesParser(basic.LineReceiver):
"""A SIP messages parser.
Expects dataReceived, dataDone repeatedly,
in that order. Shouldn't be connected to actual transport.
"""
version = "SIP/2.0"
acceptResponses = 1
acceptRequests = 1
state = "firstline" # or "headers", "body" or "invalid"
debug = 0
def __init__(self, messageReceivedCallback):
self.messageReceived = messageReceivedCallback
self.reset()
def reset(self, remainingData=""):
self.state = "firstline"
self.length = None # body length
self.bodyReceived = 0 # how much of the body we received
self.message = None
self.header = None
self.setLineMode(remainingData)
def invalidMessage(self):
self.state = "invalid"
self.setRawMode()
def dataDone(self):
# clear out any buffered data that may be hanging around
self.clearLineBuffer()
if self.state == "firstline":
return
if self.state != "body":
self.reset()
return
if self.length == None:
# no content-length header, so end of data signals message done
self.messageDone()
elif self.length < self.bodyReceived:
# aborted in the middle
self.reset()
else:
# we have enough data and message wasn't finished? something is wrong
raise RuntimeError, "this should never happen"
def dataReceived(self, data):
try:
basic.LineReceiver.dataReceived(self, data)
except:
log.err()
self.invalidMessage()
def handleFirstLine(self, line):
"""Expected to create self.message."""
raise NotImplementedError
def lineLengthExceeded(self, line):
self.invalidMessage()
def lineReceived(self, line):
if self.state == "firstline":
while line.startswith("\n") or line.startswith("\r"):
line = line[1:]
if not line:
return
try:
a, b, c = line.split(" ", 2)
except ValueError:
self.invalidMessage()
return
if a == "SIP/2.0" and self.acceptResponses:
# response
try:
code = int(b)
except ValueError:
self.invalidMessage()
return
self.message = Response(code, c)
elif c == "SIP/2.0" and self.acceptRequests:
self.message = Request(a, b)
else:
self.invalidMessage()
return
self.state = "headers"
return
else:
assert self.state == "headers"
if line:
# multiline header
if line.startswith(" ") or line.startswith("\t"):
name, value = self.header
self.header = name, (value + line.lstrip())
else:
# new header
if self.header:
self.message.addHeader(*self.header)
self.header = None
try:
name, value = line.split(":", 1)
except ValueError:
self.invalidMessage()
return
self.header = name, value.lstrip()
# XXX we assume content-length won't be multiline
if name.lower() == "content-length":
try:
self.length = int(value.lstrip())
except ValueError:
self.invalidMessage()
return
else:
# CRLF, we now have message body until self.length bytes,
# or if no length was given, until there is no more data
# from the connection sending us data.
self.state = "body"
if self.header:
self.message.addHeader(*self.header)
self.header = None
if self.length == 0:
self.messageDone()
return
self.setRawMode()
def messageDone(self, remainingData=""):
assert self.state == "body"
self.message.creationFinished()
self.messageReceived(self.message)
self.reset(remainingData)
def rawDataReceived(self, data):
assert self.state in ("body", "invalid")
if self.state == "invalid":
return
if self.length == None:
self.message.bodyDataReceived(data)
else:
dataLen = len(data)
expectedLen = self.length - self.bodyReceived
if dataLen > expectedLen:
self.message.bodyDataReceived(data[:expectedLen])
self.messageDone(data[expectedLen:])
return
else:
self.bodyReceived += dataLen
self.message.bodyDataReceived(data)
if self.bodyReceived == self.length:
self.messageDone()
class Base(protocol.DatagramProtocol):
"""Base class for SIP clients and servers."""
PORT = PORT
debug = False
def __init__(self):
self.messages = []
self.parser = MessagesParser(self.addMessage)
def addMessage(self, msg):
self.messages.append(msg)
def datagramReceived(self, data, addr):
self.parser.dataReceived(data)
self.parser.dataDone()
for m in self.messages:
self._fixupNAT(m, addr)
if self.debug:
log.msg("Received %r from %r" % (m.toString(), addr))
if isinstance(m, Request):
self.handle_request(m, addr)
else:
self.handle_response(m, addr)
self.messages[:] = []
def _fixupNAT(self, message, (srcHost, srcPort)):
# RFC 2543 6.40.2,
senderVia = parseViaHeader(message.headers["via"][0])
if senderVia.host != srcHost:
senderVia.received = srcHost
if senderVia.port != srcPort:
senderVia.rport = srcPort
message.headers["via"][0] = senderVia.toString()
elif senderVia.rport == True:
senderVia.received = srcHost
senderVia.rport = srcPort
message.headers["via"][0] = senderVia.toString()
def deliverResponse(self, responseMessage):
"""Deliver response.
Destination is based on topmost Via header."""
destVia = parseViaHeader(responseMessage.headers["via"][0])
# XXX we don't do multicast yet
host = destVia.received or destVia.host
port = destVia.rport or destVia.port or self.PORT
destAddr = URL(host=host, port=port)
self.sendMessage(destAddr, responseMessage)
def responseFromRequest(self, code, request):
"""Create a response to a request message."""
response = Response(code)
for name in ("via", "to", "from", "call-id", "cseq"):
response.headers[name] = request.headers.get(name, [])[:]
return response
def sendMessage(self, destURL, message):
"""Send a message.
@param destURL: C{URL}. This should be a *physical* URL, not a logical one.
@param message: The message to send.
"""
if destURL.transport not in ("udp", None):
raise RuntimeError, "only UDP currently supported"
if self.debug:
log.msg("Sending %r to %r" % (message.toString(), destURL))
self.transport.write(message.toString(), (destURL.host, destURL.port or self.PORT))
def handle_request(self, message, addr):
"""Override to define behavior for requests received
@type message: C{Message}
@type addr: C{tuple}
"""
raise NotImplementedError
def handle_response(self, message, addr):
"""Override to define behavior for responses received.
@type message: C{Message}
@type addr: C{tuple}
"""
raise NotImplementedError
class IContact(Interface):
"""A user of a registrar or proxy"""
class Registration:
def __init__(self, secondsToExpiry, contactURL):
self.secondsToExpiry = secondsToExpiry
self.contactURL = contactURL
class IRegistry(Interface):
"""Allows registration of logical->physical URL mapping."""
def registerAddress(domainURL, logicalURL, physicalURL):
"""Register the physical address of a logical URL.
@return: Deferred of C{Registration} or failure with RegistrationError.
"""
def unregisterAddress(domainURL, logicalURL, physicalURL):
"""Unregister the physical address of a logical URL.
@return: Deferred of C{Registration} or failure with RegistrationError.
"""
def getRegistrationInfo(logicalURL):
"""Get registration info for logical URL.
@return: Deferred of C{Registration} object or failure of LookupError.
"""
class ILocator(Interface):
"""Allow looking up physical address for logical URL."""
def getAddress(logicalURL):
"""Return physical URL of server for logical URL of user.
@param logicalURL: a logical C{URL}.
@return: Deferred which becomes URL or fails with LookupError.
"""
class Proxy(Base):
"""SIP proxy."""
PORT = PORT
locator = None # object implementing ILocator
def __init__(self, host=None, port=PORT):
"""Create new instance.
@param host: our hostname/IP as set in Via headers.
@param port: our port as set in Via headers.
"""
self.host = host or socket.getfqdn()
self.port = port
Base.__init__(self)
def getVia(self):
"""Return value of Via header for this proxy."""
return Via(host=self.host, port=self.port)
def handle_request(self, message, addr):
# send immediate 100/trying message before processing
#self.deliverResponse(self.responseFromRequest(100, message))
f = getattr(self, "handle_%s_request" % message.method, None)
if f is None:
f = self.handle_request_default
try:
d = f(message, addr)
except SIPError, e:
self.deliverResponse(self.responseFromRequest(e.code, message))
except:
log.err()
self.deliverResponse(self.responseFromRequest(500, message))
else:
if d is not None:
d.addErrback(lambda e:
self.deliverResponse(self.responseFromRequest(e.code, message))
)
def handle_request_default(self, message, (srcHost, srcPort)):
"""Default request handler.
Default behaviour for OPTIONS and unknown methods for proxies
is to forward message on to the client.
Since at the moment we are stateless proxy, that's basically
everything.
"""
def _mungContactHeader(uri, message):
message.headers['contact'][0] = uri.toString()
return self.sendMessage(uri, message)
viaHeader = self.getVia()
if viaHeader.toString() in message.headers["via"]:
# must be a loop, so drop message
log.msg("Dropping looped message.")
return
message.headers["via"].insert(0, viaHeader.toString())
name, uri, tags = parseAddress(message.headers["to"][0], clean=1)
# this is broken and needs refactoring to use cred
d = self.locator.getAddress(uri)
d.addCallback(self.sendMessage, message)
d.addErrback(self._cantForwardRequest, message)
def _cantForwardRequest(self, error, message):
error.trap(LookupError)
del message.headers["via"][0] # this'll be us
self.deliverResponse(self.responseFromRequest(404, message))
def deliverResponse(self, responseMessage):
"""Deliver response.
Destination is based on topmost Via header."""
destVia = parseViaHeader(responseMessage.headers["via"][0])
# XXX we don't do multicast yet
host = destVia.received or destVia.host
port = destVia.rport or destVia.port or self.PORT
destAddr = URL(host=host, port=port)
self.sendMessage(destAddr, responseMessage)
def responseFromRequest(self, code, request):
"""Create a response to a request message."""
response = Response(code)
for name in ("via", "to", "from", "call-id", "cseq"):
response.headers[name] = request.headers.get(name, [])[:]
return response
def handle_response(self, message, addr):
"""Default response handler."""
v = parseViaHeader(message.headers["via"][0])
if (v.host, v.port) != (self.host, self.port):
# we got a message not intended for us?
# XXX note this check breaks if we have multiple external IPs
# yay for suck protocols
log.msg("Dropping incorrectly addressed message")
return
del message.headers["via"][0]
if not message.headers["via"]:
# this message is addressed to us
self.gotResponse(message, addr)
return
self.deliverResponse(message)
def gotResponse(self, message, addr):
"""Called with responses that are addressed at this server."""
pass
class IAuthorizer(Interface):
def getChallenge(peer):
"""Generate a challenge the client may respond to.
@type peer: C{tuple}
@param peer: The client's address
@rtype: C{str}
@return: The challenge string
"""
def decode(response):
"""Create a credentials object from the given response.
@type response: C{str}
"""
class BasicAuthorizer:
"""Authorizer for insecure Basic (base64-encoded plaintext) authentication.
This form of authentication is broken and insecure. Do not use it.
"""
implements(IAuthorizer)
def __init__(self):
"""
This method exists solely to issue a deprecation warning.
"""
warnings.warn(
"twisted.protocols.sip.BasicAuthorizer was deprecated "
"in Twisted 9.0.0",
category=DeprecationWarning,
stacklevel=2)
def getChallenge(self, peer):
return None
def decode(self, response):
# At least one SIP client improperly pads its Base64 encoded messages
for i in range(3):
try:
creds = (response + ('=' * i)).decode('base64')
except:
pass
else:
break
else:
# Totally bogus
raise SIPError(400)
p = creds.split(':', 1)
if len(p) == 2:
return UsernamePassword(*p)
raise SIPError(400)
class DigestedCredentials(UsernameHashedPassword):
"""Yet Another Simple Digest-MD5 authentication scheme"""
def __init__(self, username, fields, challenges):
warnings.warn(
"twisted.protocols.sip.DigestedCredentials was deprecated "
"in Twisted 9.0.0",
category=DeprecationWarning,
stacklevel=2)
self.username = username
self.fields = fields
self.challenges = challenges
def checkPassword(self, password):
method = 'REGISTER'
response = self.fields.get('response')
uri = self.fields.get('uri')
nonce = self.fields.get('nonce')
cnonce = self.fields.get('cnonce')
nc = self.fields.get('nc')
algo = self.fields.get('algorithm', 'MD5')
qop = self.fields.get('qop-options', 'auth')
opaque = self.fields.get('opaque')
if opaque not in self.challenges:
return False
del self.challenges[opaque]
user, domain = self.username.split('@', 1)
if uri is None:
uri = 'sip:' + domain
expected = DigestCalcResponse(
DigestCalcHA1(algo, user, domain, password, nonce, cnonce),
nonce, nc, cnonce, qop, method, uri, None,
)
return expected == response
class DigestAuthorizer:
CHALLENGE_LIFETIME = 15
implements(IAuthorizer)
def __init__(self):
warnings.warn(
"twisted.protocols.sip.DigestAuthorizer was deprecated "
"in Twisted 9.0.0",
category=DeprecationWarning,
stacklevel=2)
self.outstanding = {}
def generateNonce(self):
c = tuple([random.randrange(sys.maxint) for _ in range(3)])
c = '%d%d%d' % c
return c
def generateOpaque(self):
return str(random.randrange(sys.maxint))
def getChallenge(self, peer):
c = self.generateNonce()
o = self.generateOpaque()
self.outstanding[o] = c
return ','.join((
'nonce="%s"' % c,
'opaque="%s"' % o,
'qop-options="auth"',
'algorithm="MD5"',
))
def decode(self, response):
response = ' '.join(response.splitlines())
parts = response.split(',')
auth = dict([(k.strip(), unq(v.strip())) for (k, v) in [p.split('=', 1) for p in parts]])
try:
username = auth['username']
except KeyError:
raise SIPError(401)
try:
return DigestedCredentials(username, auth, self.outstanding)
except:
raise SIPError(400)
class RegisterProxy(Proxy):
"""A proxy that allows registration for a specific domain.
Unregistered users won't be handled.
"""
portal = None
registry = None # should implement IRegistry
authorizers = {}
def __init__(self, *args, **kw):
Proxy.__init__(self, *args, **kw)
self.liveChallenges = {}
if "digest" not in self.authorizers:
self.authorizers["digest"] = DigestAuthorizer()
def handle_ACK_request(self, message, (host, port)):
# XXX
# ACKs are a client's way of indicating they got the last message
# Responding to them is not a good idea.
# However, we should keep track of terminal messages and re-transmit
# if no ACK is received.
pass
def handle_REGISTER_request(self, message, (host, port)):
"""Handle a registration request.
Currently registration is not proxied.
"""
if self.portal is None:
# There is no portal. Let anyone in.
self.register(message, host, port)
else:
# There is a portal. Check for credentials.
if not message.headers.has_key("authorization"):
return self.unauthorized(message, host, port)
else:
return self.login(message, host, port)
def unauthorized(self, message, host, port):
m = self.responseFromRequest(401, message)
for (scheme, auth) in self.authorizers.iteritems():
chal = auth.getChallenge((host, port))
if chal is None:
value = '%s realm="%s"' % (scheme.title(), self.host)
else:
value = '%s %s,realm="%s"' % (scheme.title(), chal, self.host)
m.headers.setdefault('www-authenticate', []).append(value)
self.deliverResponse(m)
def login(self, message, host, port):
parts = message.headers['authorization'][0].split(None, 1)
a = self.authorizers.get(parts[0].lower())
if a:
try:
c = a.decode(parts[1])
except SIPError:
raise
except:
log.err()
self.deliverResponse(self.responseFromRequest(500, message))
else:
c.username += '@' + self.host
self.portal.login(c, None, IContact
).addCallback(self._cbLogin, message, host, port
).addErrback(self._ebLogin, message, host, port
).addErrback(log.err
)
else:
self.deliverResponse(self.responseFromRequest(501, message))
def _cbLogin(self, (i, a, l), message, host, port):
# It's stateless, matey. What a joke.
self.register(message, host, port)
def _ebLogin(self, failure, message, host, port):
failure.trap(cred.error.UnauthorizedLogin)
self.unauthorized(message, host, port)
def register(self, message, host, port):
"""Allow all users to register"""
name, toURL, params = parseAddress(message.headers["to"][0], clean=1)
contact = None
if message.headers.has_key("contact"):
contact = message.headers["contact"][0]
if message.headers.get("expires", [None])[0] == "0":
self.unregister(message, toURL, contact)
else:
# XXX Check expires on appropriate URL, and pass it to registry
# instead of having registry hardcode it.
if contact is not None:
name, contactURL, params = parseAddress(contact, host=host, port=port)
d = self.registry.registerAddress(message.uri, toURL, contactURL)
else:
d = self.registry.getRegistrationInfo(toURL)
d.addCallbacks(self._cbRegister, self._ebRegister,
callbackArgs=(message,),
errbackArgs=(message,)
)
def _cbRegister(self, registration, message):
response = self.responseFromRequest(200, message)
if registration.contactURL != None:
response.addHeader("contact", registration.contactURL.toString())
response.addHeader("expires", "%d" % registration.secondsToExpiry)
response.addHeader("content-length", "0")
self.deliverResponse(response)
def _ebRegister(self, error, message):
error.trap(RegistrationError, LookupError)
# XXX return error message, and alter tests to deal with
# this, currently tests assume no message sent on failure
def unregister(self, message, toURL, contact):
try:
expires = int(message.headers["expires"][0])
except ValueError:
self.deliverResponse(self.responseFromRequest(400, message))
else:
if expires == 0:
if contact == "*":
contactURL = "*"
else:
name, contactURL, params = parseAddress(contact)
d = self.registry.unregisterAddress(message.uri, toURL, contactURL)
d.addCallback(self._cbUnregister, message
).addErrback(self._ebUnregister, message
)
def _cbUnregister(self, registration, message):
msg = self.responseFromRequest(200, message)
msg.headers.setdefault('contact', []).append(registration.contactURL.toString())
msg.addHeader("expires", "0")
self.deliverResponse(msg)
def _ebUnregister(self, registration, message):
pass
class InMemoryRegistry:
"""A simplistic registry for a specific domain."""
implements(IRegistry, ILocator)
def __init__(self, domain):
self.domain = domain # the domain we handle registration for
self.users = {} # map username to (IDelayedCall for expiry, address URI)
def getAddress(self, userURI):
if userURI.host != self.domain:
return defer.fail(LookupError("unknown domain"))
if userURI.username in self.users:
dc, url = self.users[userURI.username]
return defer.succeed(url)
else:
return defer.fail(LookupError("no such user"))
def getRegistrationInfo(self, userURI):
if userURI.host != self.domain:
return defer.fail(LookupError("unknown domain"))
if self.users.has_key(userURI.username):
dc, url = self.users[userURI.username]
return defer.succeed(Registration(int(dc.getTime() - time.time()), url))
else:
return defer.fail(LookupError("no such user"))
def _expireRegistration(self, username):
try:
dc, url = self.users[username]
except KeyError:
return defer.fail(LookupError("no such user"))
else:
dc.cancel()
del self.users[username]
return defer.succeed(Registration(0, url))
def registerAddress(self, domainURL, logicalURL, physicalURL):
if domainURL.host != self.domain:
log.msg("Registration for domain we don't handle.")
return defer.fail(RegistrationError(404))
if logicalURL.host != self.domain:
log.msg("Registration for domain we don't handle.")
return defer.fail(RegistrationError(404))
if logicalURL.username in self.users:
dc, old = self.users[logicalURL.username]
dc.reset(3600)
else:
dc = reactor.callLater(3600, self._expireRegistration, logicalURL.username)
log.msg("Registered %s at %s" % (logicalURL.toString(), physicalURL.toString()))
self.users[logicalURL.username] = (dc, physicalURL)
return defer.succeed(Registration(int(dc.getTime() - time.time()), physicalURL))
def unregisterAddress(self, domainURL, logicalURL, physicalURL):
return self._expireRegistration(logicalURL.username)
| mit | 7,306,836,371,678,490,000 | -3,827,158,971,092,067,300 | 30.398217 | 119 | 0.566963 | false |
pombredanne/tahoe-lafs | src/allmydata/mutable/filenode.py | 2 | 46275 |
import random
from zope.interface import implements
from twisted.internet import defer, reactor
from foolscap.api import eventually
from allmydata.interfaces import IMutableFileNode, ICheckable, ICheckResults, \
NotEnoughSharesError, MDMF_VERSION, SDMF_VERSION, IMutableUploadable, \
IMutableFileVersion, IWriteable
from allmydata.util import hashutil, log, consumer, deferredutil, mathutil
from allmydata.util.assertutil import precondition
from allmydata.uri import WriteableSSKFileURI, ReadonlySSKFileURI, \
WriteableMDMFFileURI, ReadonlyMDMFFileURI
from allmydata.monitor import Monitor
from pycryptopp.cipher.aes import AES
from allmydata.mutable.publish import Publish, MutableData,\
TransformingUploadable
from allmydata.mutable.common import MODE_READ, MODE_WRITE, MODE_CHECK, UnrecoverableFileError, \
UncoordinatedWriteError
from allmydata.mutable.servermap import ServerMap, ServermapUpdater
from allmydata.mutable.retrieve import Retrieve
from allmydata.mutable.checker import MutableChecker, MutableCheckAndRepairer
from allmydata.mutable.repairer import Repairer
class BackoffAgent:
# these parameters are copied from foolscap.reconnector, which gets them
# from twisted.internet.protocol.ReconnectingClientFactory
initialDelay = 1.0
factor = 2.7182818284590451 # (math.e)
jitter = 0.11962656492 # molar Planck constant times c, Joule meter/mole
maxRetries = 4
def __init__(self):
self._delay = self.initialDelay
self._count = 0
def delay(self, node, f):
self._count += 1
if self._count == 4:
return f
self._delay = self._delay * self.factor
self._delay = random.normalvariate(self._delay,
self._delay * self.jitter)
d = defer.Deferred()
reactor.callLater(self._delay, d.callback, None)
return d
# use nodemaker.create_mutable_file() to make one of these
class MutableFileNode:
implements(IMutableFileNode, ICheckable)
def __init__(self, storage_broker, secret_holder,
default_encoding_parameters, history):
self._storage_broker = storage_broker
self._secret_holder = secret_holder
self._default_encoding_parameters = default_encoding_parameters
self._history = history
self._pubkey = None # filled in upon first read
self._privkey = None # filled in if we're mutable
# we keep track of the last encoding parameters that we use. These
# are updated upon retrieve, and used by publish. If we publish
# without ever reading (i.e. overwrite()), then we use these values.
self._required_shares = default_encoding_parameters["k"]
self._total_shares = default_encoding_parameters["n"]
self._sharemap = {} # known shares, shnum-to-[nodeids]
self._most_recent_size = None
# filled in after __init__ if we're being created for the first time;
# filled in by the servermap updater before publishing, otherwise.
# set to this default value in case neither of those things happen,
# or in case the servermap can't find any shares to tell us what
# to publish as.
self._protocol_version = None
# all users of this MutableFileNode go through the serializer. This
# takes advantage of the fact that Deferreds discard the callbacks
# that they're done with, so we can keep using the same Deferred
# forever without consuming more and more memory.
self._serializer = defer.succeed(None)
# Starting with MDMF, we can get these from caps if they're
# there. Leave them alone for now; they'll be filled in by my
# init_from_cap method if necessary.
self._downloader_hints = {}
def __repr__(self):
if hasattr(self, '_uri'):
return "<%s %x %s %s>" % (self.__class__.__name__, id(self), self.is_readonly() and 'RO' or 'RW', self._uri.abbrev())
else:
return "<%s %x %s %s>" % (self.__class__.__name__, id(self), None, None)
def init_from_cap(self, filecap):
# we have the URI, but we have not yet retrieved the public
# verification key, nor things like 'k' or 'N'. If and when someone
# wants to get our contents, we'll pull from shares and fill those
# in.
if isinstance(filecap, (WriteableMDMFFileURI, ReadonlyMDMFFileURI)):
self._protocol_version = MDMF_VERSION
elif isinstance(filecap, (ReadonlySSKFileURI, WriteableSSKFileURI)):
self._protocol_version = SDMF_VERSION
self._uri = filecap
self._writekey = None
if not filecap.is_readonly() and filecap.is_mutable():
self._writekey = self._uri.writekey
self._readkey = self._uri.readkey
self._storage_index = self._uri.storage_index
self._fingerprint = self._uri.fingerprint
# the following values are learned during Retrieval
# self._pubkey
# self._required_shares
# self._total_shares
# and these are needed for Publish. They are filled in by Retrieval
# if possible, otherwise by the first peer that Publish talks to.
self._privkey = None
self._encprivkey = None
return self
def create_with_keys(self, (pubkey, privkey), contents,
version=SDMF_VERSION):
"""Call this to create a brand-new mutable file. It will create the
shares, find homes for them, and upload the initial contents (created
with the same rules as IClient.create_mutable_file() ). Returns a
Deferred that fires (with the MutableFileNode instance you should
use) when it completes.
"""
self._pubkey, self._privkey = pubkey, privkey
pubkey_s = self._pubkey.serialize()
privkey_s = self._privkey.serialize()
self._writekey = hashutil.ssk_writekey_hash(privkey_s)
self._encprivkey = self._encrypt_privkey(self._writekey, privkey_s)
self._fingerprint = hashutil.ssk_pubkey_fingerprint_hash(pubkey_s)
if version == MDMF_VERSION:
self._uri = WriteableMDMFFileURI(self._writekey, self._fingerprint)
self._protocol_version = version
elif version == SDMF_VERSION:
self._uri = WriteableSSKFileURI(self._writekey, self._fingerprint)
self._protocol_version = version
self._readkey = self._uri.readkey
self._storage_index = self._uri.storage_index
initial_contents = self._get_initial_contents(contents)
return self._upload(initial_contents, None)
def _get_initial_contents(self, contents):
if contents is None:
return MutableData("")
if isinstance(contents, str):
return MutableData(contents)
if IMutableUploadable.providedBy(contents):
return contents
assert callable(contents), "%s should be callable, not %s" % \
(contents, type(contents))
return contents(self)
def _encrypt_privkey(self, writekey, privkey):
enc = AES(writekey)
crypttext = enc.process(privkey)
return crypttext
def _decrypt_privkey(self, enc_privkey):
enc = AES(self._writekey)
privkey = enc.process(enc_privkey)
return privkey
def _populate_pubkey(self, pubkey):
self._pubkey = pubkey
def _populate_required_shares(self, required_shares):
self._required_shares = required_shares
def _populate_total_shares(self, total_shares):
self._total_shares = total_shares
def _populate_privkey(self, privkey):
self._privkey = privkey
def _populate_encprivkey(self, encprivkey):
self._encprivkey = encprivkey
def get_write_enabler(self, server):
seed = server.get_foolscap_write_enabler_seed()
assert len(seed) == 20
return hashutil.ssk_write_enabler_hash(self._writekey, seed)
def get_renewal_secret(self, server):
crs = self._secret_holder.get_renewal_secret()
frs = hashutil.file_renewal_secret_hash(crs, self._storage_index)
lease_seed = server.get_lease_seed()
assert len(lease_seed) == 20
return hashutil.bucket_renewal_secret_hash(frs, lease_seed)
def get_cancel_secret(self, server):
ccs = self._secret_holder.get_cancel_secret()
fcs = hashutil.file_cancel_secret_hash(ccs, self._storage_index)
lease_seed = server.get_lease_seed()
assert len(lease_seed) == 20
return hashutil.bucket_cancel_secret_hash(fcs, lease_seed)
def get_writekey(self):
return self._writekey
def get_readkey(self):
return self._readkey
def get_storage_index(self):
return self._storage_index
def get_fingerprint(self):
return self._fingerprint
def get_privkey(self):
return self._privkey
def get_encprivkey(self):
return self._encprivkey
def get_pubkey(self):
return self._pubkey
def get_required_shares(self):
return self._required_shares
def get_total_shares(self):
return self._total_shares
####################################
# IFilesystemNode
def get_size(self):
return self._most_recent_size
def get_current_size(self):
d = self.get_size_of_best_version()
d.addCallback(self._stash_size)
return d
def _stash_size(self, size):
self._most_recent_size = size
return size
def get_cap(self):
return self._uri
def get_readcap(self):
return self._uri.get_readonly()
def get_verify_cap(self):
return self._uri.get_verify_cap()
def get_repair_cap(self):
if self._uri.is_readonly():
return None
return self._uri
def get_uri(self):
return self._uri.to_string()
def get_write_uri(self):
if self.is_readonly():
return None
return self._uri.to_string()
def get_readonly_uri(self):
return self._uri.get_readonly().to_string()
def get_readonly(self):
if self.is_readonly():
return self
ro = MutableFileNode(self._storage_broker, self._secret_holder,
self._default_encoding_parameters, self._history)
ro.init_from_cap(self._uri.get_readonly())
return ro
def is_mutable(self):
return self._uri.is_mutable()
def is_readonly(self):
return self._uri.is_readonly()
def is_unknown(self):
return False
def is_allowed_in_immutable_directory(self):
return not self._uri.is_mutable()
def raise_error(self):
pass
def __hash__(self):
return hash((self.__class__, self._uri))
def __cmp__(self, them):
if cmp(type(self), type(them)):
return cmp(type(self), type(them))
if cmp(self.__class__, them.__class__):
return cmp(self.__class__, them.__class__)
return cmp(self._uri, them._uri)
#################################
# ICheckable
def check(self, monitor, verify=False, add_lease=False):
checker = MutableChecker(self, self._storage_broker,
self._history, monitor)
return checker.check(verify, add_lease)
def check_and_repair(self, monitor, verify=False, add_lease=False):
checker = MutableCheckAndRepairer(self, self._storage_broker,
self._history, monitor)
return checker.check(verify, add_lease)
#################################
# IRepairable
def repair(self, check_results, force=False, monitor=None):
assert ICheckResults(check_results)
r = Repairer(self, check_results, self._storage_broker,
self._history, monitor)
d = r.start(force)
return d
#################################
# IFileNode
def get_best_readable_version(self):
"""
I return a Deferred that fires with a MutableFileVersion
representing the best readable version of the file that I
represent
"""
return self.get_readable_version()
def get_readable_version(self, servermap=None, version=None):
"""
I return a Deferred that fires with an MutableFileVersion for my
version argument, if there is a recoverable file of that version
on the grid. If there is no recoverable version, I fire with an
UnrecoverableFileError.
If a servermap is provided, I look in there for the requested
version. If no servermap is provided, I create and update a new
one.
If no version is provided, then I return a MutableFileVersion
representing the best recoverable version of the file.
"""
d = self._get_version_from_servermap(MODE_READ, servermap, version)
def _build_version((servermap, their_version)):
assert their_version in servermap.recoverable_versions()
assert their_version in servermap.make_versionmap()
mfv = MutableFileVersion(self,
servermap,
their_version,
self._storage_index,
self._storage_broker,
self._readkey,
history=self._history)
assert mfv.is_readonly()
mfv.set_downloader_hints(self._downloader_hints)
# our caller can use this to download the contents of the
# mutable file.
return mfv
return d.addCallback(_build_version)
def _get_version_from_servermap(self,
mode,
servermap=None,
version=None):
"""
I return a Deferred that fires with (servermap, version).
This function performs validation and a servermap update. If it
returns (servermap, version), the caller can assume that:
- servermap was last updated in mode.
- version is recoverable, and corresponds to the servermap.
If version and servermap are provided to me, I will validate
that version exists in the servermap, and that the servermap was
updated correctly.
If version is not provided, but servermap is, I will validate
the servermap and return the best recoverable version that I can
find in the servermap.
If the version is provided but the servermap isn't, I will
obtain a servermap that has been updated in the correct mode and
validate that version is found and recoverable.
If neither servermap nor version are provided, I will obtain a
servermap updated in the correct mode, and return the best
recoverable version that I can find in there.
"""
# XXX: wording ^^^^
if servermap and servermap.get_last_update()[0] == mode:
d = defer.succeed(servermap)
else:
d = self._get_servermap(mode)
def _get_version(servermap, v):
if v and v not in servermap.recoverable_versions():
v = None
elif not v:
v = servermap.best_recoverable_version()
if not v:
raise UnrecoverableFileError("no recoverable versions")
return (servermap, v)
return d.addCallback(_get_version, version)
def download_best_version(self, progress=None):
"""
I return a Deferred that fires with the contents of the best
version of this mutable file.
"""
return self._do_serialized(self._download_best_version, progress=progress)
def _download_best_version(self, progress=None):
"""
I am the serialized sibling of download_best_version.
"""
d = self.get_best_readable_version()
d.addCallback(self._record_size)
d.addCallback(lambda version: version.download_to_data(progress=progress))
# It is possible that the download will fail because there
# aren't enough shares to be had. If so, we will try again after
# updating the servermap in MODE_WRITE, which may find more
# shares than updating in MODE_READ, as we just did. We can do
# this by getting the best mutable version and downloading from
# that -- the best mutable version will be a MutableFileVersion
# with a servermap that was last updated in MODE_WRITE, as we
# want. If this fails, then we give up.
def _maybe_retry(failure):
failure.trap(NotEnoughSharesError)
d = self.get_best_mutable_version()
d.addCallback(self._record_size)
d.addCallback(lambda version: version.download_to_data(progress=progress))
return d
d.addErrback(_maybe_retry)
return d
def _record_size(self, mfv):
"""
I record the size of a mutable file version.
"""
self._most_recent_size = mfv.get_size()
return mfv
def get_size_of_best_version(self):
"""
I return the size of the best version of this mutable file.
This is equivalent to calling get_size() on the result of
get_best_readable_version().
"""
d = self.get_best_readable_version()
return d.addCallback(lambda mfv: mfv.get_size())
#################################
# IMutableFileNode
def get_best_mutable_version(self, servermap=None):
"""
I return a Deferred that fires with a MutableFileVersion
representing the best readable version of the file that I
represent. I am like get_best_readable_version, except that I
will try to make a writeable version if I can.
"""
return self.get_mutable_version(servermap=servermap)
def get_mutable_version(self, servermap=None, version=None):
"""
I return a version of this mutable file. I return a Deferred
that fires with a MutableFileVersion
If version is provided, the Deferred will fire with a
MutableFileVersion initailized with that version. Otherwise, it
will fire with the best version that I can recover.
If servermap is provided, I will use that to find versions
instead of performing my own servermap update.
"""
if self.is_readonly():
return self.get_readable_version(servermap=servermap,
version=version)
# get_mutable_version => write intent, so we require that the
# servermap is updated in MODE_WRITE
d = self._get_version_from_servermap(MODE_WRITE, servermap, version)
def _build_version((servermap, smap_version)):
# these should have been set by the servermap update.
assert self._secret_holder
assert self._writekey
mfv = MutableFileVersion(self,
servermap,
smap_version,
self._storage_index,
self._storage_broker,
self._readkey,
self._writekey,
self._secret_holder,
history=self._history)
assert not mfv.is_readonly()
mfv.set_downloader_hints(self._downloader_hints)
return mfv
return d.addCallback(_build_version)
# XXX: I'm uncomfortable with the difference between upload and
# overwrite, which, FWICT, is basically that you don't have to
# do a servermap update before you overwrite. We split them up
# that way anyway, so I guess there's no real difficulty in
# offering both ways to callers, but it also makes the
# public-facing API cluttery, and makes it hard to discern the
# right way of doing things.
# In general, we leave it to callers to ensure that they aren't
# going to cause UncoordinatedWriteErrors when working with
# MutableFileVersions. We know that the next three operations
# (upload, overwrite, and modify) will all operate on the same
# version, so we say that only one of them can be going on at once,
# and serialize them to ensure that that actually happens, since as
# the caller in this situation it is our job to do that.
def overwrite(self, new_contents):
"""
I overwrite the contents of the best recoverable version of this
mutable file with new_contents. This is equivalent to calling
overwrite on the result of get_best_mutable_version with
new_contents as an argument. I return a Deferred that eventually
fires with the results of my replacement process.
"""
# TODO: Update downloader hints.
return self._do_serialized(self._overwrite, new_contents)
def _overwrite(self, new_contents):
"""
I am the serialized sibling of overwrite.
"""
d = self.get_best_mutable_version()
d.addCallback(lambda mfv: mfv.overwrite(new_contents))
d.addCallback(self._did_upload, new_contents.get_size())
return d
def upload(self, new_contents, servermap):
"""
I overwrite the contents of the best recoverable version of this
mutable file with new_contents, using servermap instead of
creating/updating our own servermap. I return a Deferred that
fires with the results of my upload.
"""
# TODO: Update downloader hints
return self._do_serialized(self._upload, new_contents, servermap)
def modify(self, modifier, backoffer=None):
"""
I modify the contents of the best recoverable version of this
mutable file with the modifier. This is equivalent to calling
modify on the result of get_best_mutable_version. I return a
Deferred that eventually fires with an UploadResults instance
describing this process.
"""
# TODO: Update downloader hints.
return self._do_serialized(self._modify, modifier, backoffer)
def _modify(self, modifier, backoffer):
"""
I am the serialized sibling of modify.
"""
d = self.get_best_mutable_version()
d.addCallback(lambda mfv: mfv.modify(modifier, backoffer))
return d
def download_version(self, servermap, version, fetch_privkey=False):
"""
Download the specified version of this mutable file. I return a
Deferred that fires with the contents of the specified version
as a bytestring, or errbacks if the file is not recoverable.
"""
d = self.get_readable_version(servermap, version)
return d.addCallback(lambda mfv: mfv.download_to_data(fetch_privkey))
def get_servermap(self, mode):
"""
I return a servermap that has been updated in mode.
mode should be one of MODE_READ, MODE_WRITE, MODE_CHECK or
MODE_ANYTHING. See servermap.py for more on what these mean.
"""
return self._do_serialized(self._get_servermap, mode)
def _get_servermap(self, mode):
"""
I am a serialized twin to get_servermap.
"""
servermap = ServerMap()
d = self._update_servermap(servermap, mode)
# The servermap will tell us about the most recent size of the
# file, so we may as well set that so that callers might get
# more data about us.
if not self._most_recent_size:
d.addCallback(self._get_size_from_servermap)
return d
def _get_size_from_servermap(self, servermap):
"""
I extract the size of the best version of this file and record
it in self._most_recent_size. I return the servermap that I was
given.
"""
if servermap.recoverable_versions():
v = servermap.best_recoverable_version()
size = v[4] # verinfo[4] == size
self._most_recent_size = size
return servermap
def _update_servermap(self, servermap, mode):
u = ServermapUpdater(self, self._storage_broker, Monitor(), servermap,
mode)
if self._history:
self._history.notify_mapupdate(u.get_status())
return u.update()
#def set_version(self, version):
# I can be set in two ways:
# 1. When the node is created.
# 2. (for an existing share) when the Servermap is updated
# before I am read.
# assert version in (MDMF_VERSION, SDMF_VERSION)
# self._protocol_version = version
def get_version(self):
return self._protocol_version
def _do_serialized(self, cb, *args, **kwargs):
# note: to avoid deadlock, this callable is *not* allowed to invoke
# other serialized methods within this (or any other)
# MutableFileNode. The callable should be a bound method of this same
# MFN instance.
d = defer.Deferred()
self._serializer.addCallback(lambda ignore: cb(*args, **kwargs))
# we need to put off d.callback until this Deferred is finished being
# processed. Otherwise the caller's subsequent activities (like,
# doing other things with this node) can cause reentrancy problems in
# the Deferred code itself
self._serializer.addBoth(lambda res: eventually(d.callback, res))
# add a log.err just in case something really weird happens, because
# self._serializer stays around forever, therefore we won't see the
# usual Unhandled Error in Deferred that would give us a hint.
self._serializer.addErrback(log.err)
return d
def _upload(self, new_contents, servermap):
"""
A MutableFileNode still has to have some way of getting
published initially, which is what I am here for. After that,
all publishing, updating, modifying and so on happens through
MutableFileVersions.
"""
assert self._pubkey, "update_servermap must be called before publish"
# Define IPublishInvoker with a set_downloader_hints method?
# Then have the publisher call that method when it's done publishing?
p = Publish(self, self._storage_broker, servermap)
if self._history:
self._history.notify_publish(p.get_status(),
new_contents.get_size())
d = p.publish(new_contents)
d.addCallback(self._did_upload, new_contents.get_size())
return d
def set_downloader_hints(self, hints):
self._downloader_hints = hints
def _did_upload(self, res, size):
self._most_recent_size = size
return res
class MutableFileVersion:
"""
I represent a specific version (most likely the best version) of a
mutable file.
Since I implement IReadable, instances which hold a
reference to an instance of me are guaranteed the ability (absent
connection difficulties or unrecoverable versions) to read the file
that I represent. Depending on whether I was initialized with a
write capability or not, I may also provide callers the ability to
overwrite or modify the contents of the mutable file that I
reference.
"""
implements(IMutableFileVersion, IWriteable)
def __init__(self,
node,
servermap,
version,
storage_index,
storage_broker,
readcap,
writekey=None,
write_secrets=None,
history=None):
self._node = node
self._servermap = servermap
self._version = version
self._storage_index = storage_index
self._write_secrets = write_secrets
self._history = history
self._storage_broker = storage_broker
#assert isinstance(readcap, IURI)
self._readcap = readcap
self._writekey = writekey
self._serializer = defer.succeed(None)
def get_sequence_number(self):
"""
Get the sequence number of the mutable version that I represent.
"""
return self._version[0] # verinfo[0] == the sequence number
# TODO: Terminology?
def get_writekey(self):
"""
I return a writekey or None if I don't have a writekey.
"""
return self._writekey
def set_downloader_hints(self, hints):
"""
I set the downloader hints.
"""
assert isinstance(hints, dict)
self._downloader_hints = hints
def get_downloader_hints(self):
"""
I return the downloader hints.
"""
return self._downloader_hints
def overwrite(self, new_contents):
"""
I overwrite the contents of this mutable file version with the
data in new_contents.
"""
assert not self.is_readonly()
return self._do_serialized(self._overwrite, new_contents)
def _overwrite(self, new_contents):
assert IMutableUploadable.providedBy(new_contents)
assert self._servermap.get_last_update()[0] == MODE_WRITE
return self._upload(new_contents)
def modify(self, modifier, backoffer=None):
"""I use a modifier callback to apply a change to the mutable file.
I implement the following pseudocode::
obtain_mutable_filenode_lock()
first_time = True
while True:
update_servermap(MODE_WRITE)
old = retrieve_best_version()
new = modifier(old, servermap, first_time)
first_time = False
if new == old: break
try:
publish(new)
except UncoordinatedWriteError, e:
backoffer(e)
continue
break
release_mutable_filenode_lock()
The idea is that your modifier function can apply a delta of some
sort, and it will be re-run as necessary until it succeeds. The
modifier must inspect the old version to see whether its delta has
already been applied: if so it should return the contents unmodified.
Note that the modifier is required to run synchronously, and must not
invoke any methods on this MutableFileNode instance.
The backoff-er is a callable that is responsible for inserting a
random delay between subsequent attempts, to help competing updates
from colliding forever. It is also allowed to give up after a while.
The backoffer is given two arguments: this MutableFileNode, and the
Failure object that contains the UncoordinatedWriteError. It should
return a Deferred that will fire when the next attempt should be
made, or return the Failure if the loop should give up. If
backoffer=None, a default one is provided which will perform
exponential backoff, and give up after 4 tries. Note that the
backoffer should not invoke any methods on this MutableFileNode
instance, and it needs to be highly conscious of deadlock issues.
"""
assert not self.is_readonly()
return self._do_serialized(self._modify, modifier, backoffer)
def _modify(self, modifier, backoffer):
if backoffer is None:
backoffer = BackoffAgent().delay
return self._modify_and_retry(modifier, backoffer, True)
def _modify_and_retry(self, modifier, backoffer, first_time):
"""
I try to apply modifier to the contents of this version of the
mutable file. If I succeed, I return an UploadResults instance
describing my success. If I fail, I try again after waiting for
a little bit.
"""
log.msg("doing modify")
if first_time:
d = self._update_servermap()
else:
# We ran into trouble; do MODE_CHECK so we're a little more
# careful on subsequent tries.
d = self._update_servermap(mode=MODE_CHECK)
d.addCallback(lambda ignored:
self._modify_once(modifier, first_time))
def _retry(f):
f.trap(UncoordinatedWriteError)
# Uh oh, it broke. We're allowed to trust the servermap for our
# first try, but after that we need to update it. It's
# possible that we've failed due to a race with another
# uploader, and if the race is to converge correctly, we
# need to know about that upload.
d2 = defer.maybeDeferred(backoffer, self, f)
d2.addCallback(lambda ignored:
self._modify_and_retry(modifier,
backoffer, False))
return d2
d.addErrback(_retry)
return d
def _modify_once(self, modifier, first_time):
"""
I attempt to apply a modifier to the contents of the mutable
file.
"""
assert self._servermap.get_last_update()[0] != MODE_READ
# download_to_data is serialized, so we have to call this to
# avoid deadlock.
d = self._try_to_download_data()
def _apply(old_contents):
new_contents = modifier(old_contents, self._servermap, first_time)
precondition((isinstance(new_contents, str) or
new_contents is None),
"Modifier function must return a string "
"or None")
if new_contents is None or new_contents == old_contents:
log.msg("no changes")
# no changes need to be made
if first_time:
return
# However, since Publish is not automatically doing a
# recovery when it observes UCWE, we need to do a second
# publish. See #551 for details. We'll basically loop until
# we managed an uncontested publish.
old_uploadable = MutableData(old_contents)
new_contents = old_uploadable
else:
new_contents = MutableData(new_contents)
return self._upload(new_contents)
d.addCallback(_apply)
return d
def is_readonly(self):
"""
I return True if this MutableFileVersion provides no write
access to the file that it encapsulates, and False if it
provides the ability to modify the file.
"""
return self._writekey is None
def is_mutable(self):
"""
I return True, since mutable files are always mutable by
somebody.
"""
return True
def get_storage_index(self):
"""
I return the storage index of the reference that I encapsulate.
"""
return self._storage_index
def get_size(self):
"""
I return the length, in bytes, of this readable object.
"""
return self._servermap.size_of_version(self._version)
def download_to_data(self, fetch_privkey=False, progress=None):
"""
I return a Deferred that fires with the contents of this
readable object as a byte string.
"""
c = consumer.MemoryConsumer(progress=progress)
d = self.read(c, fetch_privkey=fetch_privkey)
d.addCallback(lambda mc: "".join(mc.chunks))
return d
def _try_to_download_data(self):
"""
I am an unserialized cousin of download_to_data; I am called
from the children of modify() to download the data associated
with this mutable version.
"""
c = consumer.MemoryConsumer()
# modify will almost certainly write, so we need the privkey.
d = self._read(c, fetch_privkey=True)
d.addCallback(lambda mc: "".join(mc.chunks))
return d
def read(self, consumer, offset=0, size=None, fetch_privkey=False):
"""
I read a portion (possibly all) of the mutable file that I
reference into consumer.
"""
return self._do_serialized(self._read, consumer, offset, size,
fetch_privkey)
def _read(self, consumer, offset=0, size=None, fetch_privkey=False):
"""
I am the serialized companion of read.
"""
r = Retrieve(self._node, self._storage_broker, self._servermap,
self._version, fetch_privkey)
if self._history:
self._history.notify_retrieve(r.get_status())
d = r.download(consumer, offset, size)
return d
def _do_serialized(self, cb, *args, **kwargs):
# note: to avoid deadlock, this callable is *not* allowed to invoke
# other serialized methods within this (or any other)
# MutableFileNode. The callable should be a bound method of this same
# MFN instance.
d = defer.Deferred()
self._serializer.addCallback(lambda ignore: cb(*args, **kwargs))
# we need to put off d.callback until this Deferred is finished being
# processed. Otherwise the caller's subsequent activities (like,
# doing other things with this node) can cause reentrancy problems in
# the Deferred code itself
self._serializer.addBoth(lambda res: eventually(d.callback, res))
# add a log.err just in case something really weird happens, because
# self._serializer stays around forever, therefore we won't see the
# usual Unhandled Error in Deferred that would give us a hint.
self._serializer.addErrback(log.err)
return d
def _upload(self, new_contents):
#assert self._pubkey, "update_servermap must be called before publish"
p = Publish(self._node, self._storage_broker, self._servermap)
if self._history:
self._history.notify_publish(p.get_status(),
new_contents.get_size())
d = p.publish(new_contents)
d.addCallback(self._did_upload, new_contents.get_size())
return d
def _did_upload(self, res, size):
self._most_recent_size = size
return res
def update(self, data, offset):
"""
Do an update of this mutable file version by inserting data at
offset within the file. If offset is the EOF, this is an append
operation. I return a Deferred that fires with the results of
the update operation when it has completed.
In cases where update does not append any data, or where it does
not append so many blocks that the block count crosses a
power-of-two boundary, this operation will use roughly
O(data.get_size()) memory/bandwidth/CPU to perform the update.
Otherwise, it must download, re-encode, and upload the entire
file again, which will use O(filesize) resources.
"""
return self._do_serialized(self._update, data, offset)
def _update(self, data, offset):
"""
I update the mutable file version represented by this particular
IMutableVersion by inserting the data in data at the offset
offset. I return a Deferred that fires when this has been
completed.
"""
new_size = data.get_size() + offset
old_size = self.get_size()
segment_size = self._version[3]
num_old_segments = mathutil.div_ceil(old_size,
segment_size)
num_new_segments = mathutil.div_ceil(new_size,
segment_size)
log.msg("got %d old segments, %d new segments" % \
(num_old_segments, num_new_segments))
# We do a whole file re-encode if the file is an SDMF file.
if self._version[2]: # version[2] == SDMF salt, which MDMF lacks
log.msg("doing re-encode instead of in-place update")
return self._do_modify_update(data, offset)
# Otherwise, we can replace just the parts that are changing.
log.msg("updating in place")
d = self._do_update_update(data, offset)
d.addCallback(self._decode_and_decrypt_segments, data, offset)
d.addCallback(self._build_uploadable_and_finish, data, offset)
return d
def _do_modify_update(self, data, offset):
"""
I perform a file update by modifying the contents of the file
after downloading it, then reuploading it. I am less efficient
than _do_update_update, but am necessary for certain updates.
"""
def m(old, servermap, first_time):
start = offset
rest = offset + data.get_size()
new = old[:start]
new += "".join(data.read(data.get_size()))
new += old[rest:]
return new
return self._modify(m, None)
def _do_update_update(self, data, offset):
"""
I start the Servermap update that gets us the data we need to
continue the update process. I return a Deferred that fires when
the servermap update is done.
"""
assert IMutableUploadable.providedBy(data)
assert self.is_mutable()
# offset == self.get_size() is valid and means that we are
# appending data to the file.
assert offset <= self.get_size()
segsize = self._version[3]
# We'll need the segment that the data starts in, regardless of
# what we'll do later.
start_segment = offset // segsize
# We only need the end segment if the data we append does not go
# beyond the current end-of-file.
end_segment = start_segment
if offset + data.get_size() < self.get_size():
end_data = offset + data.get_size()
# The last byte we touch is the end_data'th byte, which is actually
# byte end_data - 1 because bytes are zero-indexed.
end_data -= 1
end_segment = end_data // segsize
self._start_segment = start_segment
self._end_segment = end_segment
# Now ask for the servermap to be updated in MODE_WRITE with
# this update range.
return self._update_servermap(update_range=(start_segment,
end_segment))
def _decode_and_decrypt_segments(self, ignored, data, offset):
"""
After the servermap update, I take the encrypted and encoded
data that the servermap fetched while doing its update and
transform it into decoded-and-decrypted plaintext that can be
used by the new uploadable. I return a Deferred that fires with
the segments.
"""
r = Retrieve(self._node, self._storage_broker, self._servermap,
self._version)
# decode: takes in our blocks and salts from the servermap,
# returns a Deferred that fires with the corresponding plaintext
# segments. Does not download -- simply takes advantage of
# existing infrastructure within the Retrieve class to avoid
# duplicating code.
sm = self._servermap
# XXX: If the methods in the servermap don't work as
# abstractions, you should rewrite them instead of going around
# them.
update_data = sm.update_data
start_segments = {} # shnum -> start segment
end_segments = {} # shnum -> end segment
blockhashes = {} # shnum -> blockhash tree
for (shnum, original_data) in update_data.iteritems():
data = [d[1] for d in original_data if d[0] == self._version]
# data is [(blockhashes,start,end)..]
# Every data entry in our list should now be share shnum for
# a particular version of the mutable file, so all of the
# entries should be identical.
datum = data[0]
assert [x for x in data if x != datum] == []
# datum is (blockhashes,start,end)
blockhashes[shnum] = datum[0]
start_segments[shnum] = datum[1] # (block,salt) bytestrings
end_segments[shnum] = datum[2]
d1 = r.decode(start_segments, self._start_segment)
d2 = r.decode(end_segments, self._end_segment)
d3 = defer.succeed(blockhashes)
return deferredutil.gatherResults([d1, d2, d3])
def _build_uploadable_and_finish(self, segments_and_bht, data, offset):
"""
After the process has the plaintext segments, I build the
TransformingUploadable that the publisher will eventually
re-upload to the grid. I then invoke the publisher with that
uploadable, and return a Deferred when the publish operation has
completed without issue.
"""
u = TransformingUploadable(data, offset,
self._version[3],
segments_and_bht[0],
segments_and_bht[1])
p = Publish(self._node, self._storage_broker, self._servermap)
return p.update(u, offset, segments_and_bht[2], self._version)
def _update_servermap(self, mode=MODE_WRITE, update_range=None):
"""
I update the servermap. I return a Deferred that fires when the
servermap update is done.
"""
if update_range:
u = ServermapUpdater(self._node, self._storage_broker, Monitor(),
self._servermap,
mode=mode,
update_range=update_range)
else:
u = ServermapUpdater(self._node, self._storage_broker, Monitor(),
self._servermap,
mode=mode)
return u.update()
| gpl-2.0 | -7,937,661,244,697,442,000 | 3,475,592,748,840,157,700 | 37.853904 | 129 | 0.608363 | false |
shinyChen/browserscope | test/test_util.py | 9 | 6660 | #!/usr/bin/python2.5
#
# Copyright 2008 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the 'License')
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an 'AS IS' BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Shared Models Unit Tests."""
__author__ = 'elsigh@google.com (Lindsey Simon)'
import unittest
import random
import logging
from google.appengine.ext import db
from google.appengine.api import memcache
from django.test.client import Client
from base import util
from categories import all_test_sets
from categories import test_set_params
from models import result
from models.user_agent import UserAgent
import mock_data
import settings
from categories import richtext
class TestHome(unittest.TestCase):
def setUp(self):
self.client = Client()
def testHome(self):
response = self.client.get('/', {}, **mock_data.UNIT_TEST_UA)
self.assertEqual(200, response.status_code)
#def testHomeWithResults(self):
#test_set = mock_data.MockTestSet('cat_home')
#params = {'cat_home_results': 'apple=0,banana=97,coconut=677'}
#response = self.client.get('/', params, **mock_data.UNIT_TEST_UA)
#self.assertEqual(200, response.status_code)
class TestBeacon(unittest.TestCase):
def setUp(self):
self.test_set = mock_data.MockTestSet()
all_test_sets.AddTestSet(self.test_set)
self.client = Client()
def tearDown(self):
all_test_sets.RemoveTestSet(self.test_set)
def testBeaconWithoutCsrfToken(self):
params = {}
response = self.client.get('/beacon', params, **mock_data.UNIT_TEST_UA)
self.assertEqual(403, response.status_code)
def testBeaconWithoutCategory(self):
csrf_token = self.client.get('/get_csrf').content
params = {'results': 'testDisply:200', 'csrf_token': csrf_token}
response = self.client.get('/beacon', params, **mock_data.UNIT_TEST_UA)
self.assertEqual(util.BAD_BEACON_MSG + 'Category/Results', response.content)
def testBeacon(self):
csrf_token = self.client.get('/get_csrf').content
params = {
'category': self.test_set.category,
'results': 'apple=1,banana=2,coconut=4',
'csrf_token': csrf_token
}
response = self.client.get('/beacon', params, **mock_data.UNIT_TEST_UA)
self.assertEqual(204, response.status_code)
# Did a ResultParent get created?
query = db.Query(result.ResultParent)
query.filter('category =', self.test_set.category)
result_parent = query.get()
self.assertNotEqual(result_parent, None)
result_times = result_parent.GetResultTimes()
self.assertEqual(
[('apple', 1, False), ('banana', 2, False), ('coconut', 4, False)],
sorted((x.test, x.score, x.dirty) for x in result_times))
def testBeaconWithChromeFrame(self):
csrf_token = self.client.get('/get_csrf').content
chrome_ua_string = ('Mozilla/5.0 (Windows; U; Windows NT 5.1; en-US) '
'AppleWebKit/530.1 (KHTML, like Gecko) Chrome/4.0.169.1 Safari/530.1')
chrome_frame_ua_string = ('Mozilla/4.0 '
'(compatible; MSIE 7.0; Windows NT 5.1; Trident/4.0; '
'chromeframe; '
'.NET CLR 2.0.50727; .NET CLR 1.1.4322; '
'.NET CLR 3.0.04506.648; .NET CLR 3.5.21022)')
unit_test_ua = mock_data.UNIT_TEST_UA
unit_test_ua['HTTP_USER_AGENT'] = chrome_frame_ua_string
params = {
'category': self.test_set.category,
'results': 'apple=0,banana=0,coconut=1000',
'csrf_token': csrf_token,
'js_ua': chrome_ua_string
}
response = self.client.get('/beacon', params, **unit_test_ua)
self.assertEqual(204, response.status_code)
# Did a ResultParent get created?
query = db.Query(result.ResultParent)
query.filter('category =', self.test_set.category)
result_parent = query.get()
self.assertNotEqual(result_parent, None)
# What UA did the ResultParent get tied to? Chrome Frame (IE 7) I hope.
user_agent = result_parent.user_agent
self.assertEqual('Chrome Frame (IE 7) 4.0.169', user_agent.pretty())
# Were ResultTimes created?
result_times = result_parent.GetResultTimes()
self.assertEqual(
[('apple', 0, False), ('banana', 0, False), ('coconut', 1000, False)],
sorted((x.test, x.score, x.dirty) for x in result_times))
def testBeaconWithBogusTests(self):
csrf_token = self.client.get('/get_csrf').content
params = {
'category': self.test_set.category,
'results': 'testBogus=1,testVisibility=2',
'csrf_token': csrf_token
}
response = self.client.get('/beacon', params, **mock_data.UNIT_TEST_UA)
self.assertEqual(util.BAD_BEACON_MSG + 'ResultParent', response.content)
# Did a ResultParent get created? Shouldn't have.
query = db.Query(result.ResultParent)
query.filter('category =', self.test_set.category)
result_parent = query.get()
self.assertEqual(None, result_parent)
def testBeaconWithoutTestSet(self):
category = 'test_beacon_wo_test_set'
csrf_token = self.client.get('/get_csrf').content
params = {
'category': category,
'results': 'testDisplay=1,testVisibility=2',
'csrf_token': csrf_token
}
response = self.client.get('/beacon', params, **mock_data.UNIT_TEST_UA)
self.assertEqual(util.BAD_BEACON_MSG + 'TestSet', response.content)
class TestUtilFunctions(unittest.TestCase):
def testCheckThrottleIpAddress(self):
ip = mock_data.UNIT_TEST_UA['REMOTE_ADDR']
ua_string = mock_data.UNIT_TEST_UA['HTTP_USER_AGENT']
category = 'foo'
for i in range(11):
self.assertTrue(util.CheckThrottleIpAddress(ip, ua_string, category))
# The next one should bomb.
self.assertFalse(util.CheckThrottleIpAddress(ip, ua_string, category))
# But a new category should work fine.
self.assertTrue(util.CheckThrottleIpAddress(ip, ua_string, 'bar'))
class TestClearMemcache(unittest.TestCase):
def setUp(self):
self.client = Client()
def testClearMemcacheRecentTests(self):
memcache.set(util.RECENT_TESTS_MEMCACHE_KEY, 'foo')
params = {'recent': 1}
response = self.client.get('/clear_memcache', params)
recent_tests = memcache.get(util.RECENT_TESTS_MEMCACHE_KEY)
self.assertEqual(None, recent_tests)
self.assertEqual(200, response.status_code)
if __name__ == '__main__':
unittest.main()
| apache-2.0 | -683,832,852,822,910,700 | 2,869,262,754,986,060,000 | 33.507772 | 80 | 0.684685 | false |
wolfram74/numerical_methods_iserles_notes | venv/lib/python2.7/site-packages/sympy/calculus/tests/test_finite_diff.py | 18 | 7438 | from sympy import S, symbols, Function
from sympy.calculus.finite_diff import (
apply_finite_diff, finite_diff_weights, as_finite_diff
)
def test_apply_finite_diff():
x, h = symbols('x h')
f = Function('f')
assert (apply_finite_diff(1, [x-h, x+h], [f(x-h), f(x+h)], x) -
(f(x+h)-f(x-h))/(2*h)).simplify() == 0
assert (apply_finite_diff(1, [5, 6, 7], [f(5), f(6), f(7)], 5) -
(-S(3)/2*f(5) + 2*f(6) - S(1)/2*f(7))).simplify() == 0
def test_finite_diff_weights():
d = finite_diff_weights(1, [5, 6, 7], 5)
assert d[1][2] == [-S(3)/2, 2, -S(1)/2]
# Table 1, p. 702 in doi:10.1090/S0025-5718-1988-0935077-0
# --------------------------------------------------------
# x = [[0], [-1, 0, 1], ...]
xl = [[j for j in range(-i, i+1)] for i in range(0, 5)]
# d holds all coefficients
d = [finite_diff_weights({0: 0, 1: 2, 2: 4, 3: 4, 4: 4}[i],
xl[i], 0) for i in range(5)]
# Zeroeth derivative
assert d[0][0][0] == [S(1)]
# First derivative
assert d[1][1][2] == [-S(1)/2, S(0), S(1)/2]
assert d[2][1][4] == [S(1)/12, -S(2)/3, S(0), S(2)/3, -S(1)/12]
assert d[3][1][6] == [-S(1)/60, S(3)/20, -S(3)/4, S(0), S(3)/4, -S(3)/20,
S(1)/60]
assert d[4][1][8] == [S(1)/280, -S(4)/105, S(1)/5, -S(4)/5, S(0), S(4)/5,
-S(1)/5, S(4)/105, -S(1)/280]
# Second derivative
assert d[1][2][2] == [S(1), -S(2), S(1)]
assert d[2][2][4] == [-S(1)/12, S(4)/3, -S(5)/2, S(4)/3, -S(1)/12]
assert d[3][2][6] == [S(1)/90, -S(3)/20, S(3)/2, -S(49)/18, S(3)/2,
-S(3)/20, S(1)/90]
assert d[4][2][8] == [-S(1)/560, S(8)/315, -S(1)/5, S(8)/5, -S(205)/72,
S(8)/5, -S(1)/5, S(8)/315, -S(1)/560]
# Third derivative
assert d[2][3][4] == [-S(1)/2, S(1), S(0), -S(1), S(1)/2]
assert d[3][3][6] == [S(1)/8, -S(1), S(13)/8, S(0), -S(13)/8, S(1),
-S(1)/8]
assert d[4][3][8] == [-S(7)/240, S(3)/10, -S(169)/120, S(61)/30, S(0),
-S(61)/30, S(169)/120, -S(3)/10, S(7)/240]
# Fourth derivative
assert d[2][4][4] == [S(1), -S(4), S(6), -S(4), S(1)]
assert d[3][4][6] == [-S(1)/6, S(2), -S(13)/2, S(28)/3, -S(13)/2, S(2),
-S(1)/6]
assert d[4][4][8] == [S(7)/240, -S(2)/5, S(169)/60, -S(122)/15, S(91)/8,
-S(122)/15, S(169)/60, -S(2)/5, S(7)/240]
# Table 2, p. 703 in doi:10.1090/S0025-5718-1988-0935077-0
# --------------------------------------------------------
xl = [[j/S(2) for j in list(range(-i*2+1, 0, 2))+list(range(1, i*2+1, 2))]
for i in range(1, 5)]
# d holds all coefficients
d = [finite_diff_weights({0: 1, 1: 2, 2: 4, 3: 4}[i], xl[i], 0) for
i in range(4)]
# Zeroth derivative
assert d[0][0][1] == [S(1)/2, S(1)/2]
assert d[1][0][3] == [-S(1)/16, S(9)/16, S(9)/16, -S(1)/16]
assert d[2][0][5] == [S(3)/256, -S(25)/256, S(75)/128, S(75)/128,
-S(25)/256, S(3)/256]
assert d[3][0][7] == [-S(5)/2048, S(49)/2048, -S(245)/2048, S(1225)/2048,
S(1225)/2048, -S(245)/2048, S(49)/2048, -S(5)/2048]
# First derivative
assert d[0][1][1] == [-S(1), S(1)]
assert d[1][1][3] == [S(1)/24, -S(9)/8, S(9)/8, -S(1)/24]
assert d[2][1][5] == [-S(3)/640, S(25)/384, -S(75)/64, S(75)/64,
-S(25)/384, S(3)/640]
assert d[3][1][7] == [S(5)/7168, -S(49)/5120, S(245)/3072, S(-1225)/1024,
S(1225)/1024, -S(245)/3072, S(49)/5120, -S(5)/7168]
# Reasonably the rest of the table is also correct... (testing of that
# deemed excessive at the moment)
def test_as_finite_diff():
x, h = symbols('x h')
f = Function('f')
# Central 1st derivative at gridpoint
assert (as_finite_diff(f(x).diff(x), [x-2, x-1, x, x+1, x+2]) -
(S(1)/12*(f(x-2)-f(x+2)) + S(2)/3*(f(x+1)-f(x-1)))).simplify() == 0
# Central 1st derivative "half-way"
assert (as_finite_diff(f(x).diff(x)) -
(f(x + S(1)/2)-f(x - S(1)/2))).simplify() == 0
assert (as_finite_diff(f(x).diff(x), h) -
(f(x + h/S(2))-f(x - h/S(2)))/h).simplify() == 0
assert (as_finite_diff(f(x).diff(x), [x - 3*h, x-h, x+h, x + 3*h]) -
(S(9)/(8*2*h)*(f(x+h) - f(x-h)) +
S(1)/(24*2*h)*(f(x - 3*h) - f(x + 3*h)))).simplify() == 0
# One sided 1st derivative at gridpoint
assert (as_finite_diff(f(x).diff(x), [0, 1, 2], 0) -
(-S(3)/2*f(0) + 2*f(1) - f(2)/2)).simplify() == 0
assert (as_finite_diff(f(x).diff(x), [x, x+h], x) -
(f(x+h) - f(x))/h).simplify() == 0
assert (as_finite_diff(f(x).diff(x), [x-h, x, x+h], x-h) -
(-S(3)/(2*h)*f(x-h) + 2/h*f(x) -
S(1)/(2*h)*f(x+h))).simplify() == 0
# One sided 1st derivative "half-way"
assert (as_finite_diff(f(x).diff(x), [x-h, x+h, x + 3*h, x + 5*h, x + 7*h])
- 1/(2*h)*(-S(11)/(12)*f(x-h) + S(17)/(24)*f(x+h)
+ S(3)/8*f(x + 3*h) - S(5)/24*f(x + 5*h)
+ S(1)/24*f(x + 7*h))).simplify() == 0
# Central 2nd derivative at gridpoint
assert (as_finite_diff(f(x).diff(x, 2), [x-h, x, x+h]) -
h**-2 * (f(x-h) + f(x+h) - 2*f(x))).simplify() == 0
assert (as_finite_diff(f(x).diff(x, 2), [x - 2*h, x-h, x, x+h, x + 2*h]) -
h**-2 * (-S(1)/12*(f(x - 2*h) + f(x + 2*h)) +
S(4)/3*(f(x+h) + f(x-h)) - S(5)/2*f(x))).simplify() == 0
# Central 2nd derivative "half-way"
assert (as_finite_diff(f(x).diff(x, 2), [x - 3*h, x-h, x+h, x + 3*h]) -
(2*h)**-2 * (S(1)/2*(f(x - 3*h) + f(x + 3*h)) -
S(1)/2*(f(x+h) + f(x-h)))).simplify() == 0
# One sided 2nd derivative at gridpoint
assert (as_finite_diff(f(x).diff(x, 2), [x, x+h, x + 2*h, x + 3*h]) -
h**-2 * (2*f(x) - 5*f(x+h) +
4*f(x+2*h) - f(x+3*h))).simplify() == 0
# One sided 2nd derivative at "half-way"
assert (as_finite_diff(f(x).diff(x, 2), [x-h, x+h, x + 3*h, x + 5*h]) -
(2*h)**-2 * (S(3)/2*f(x-h) - S(7)/2*f(x+h) + S(5)/2*f(x + 3*h) -
S(1)/2*f(x + 5*h))).simplify() == 0
# Central 3rd derivative at gridpoint
assert (as_finite_diff(f(x).diff(x, 3)) -
(-f(x - 3/S(2)) + 3*f(x - 1/S(2)) -
3*f(x + 1/S(2)) + f(x + 3/S(2)))).simplify() == 0
assert (as_finite_diff(
f(x).diff(x, 3), [x - 3*h, x - 2*h, x-h, x, x+h, x + 2*h, x + 3*h]) -
h**-3 * (S(1)/8*(f(x - 3*h) - f(x + 3*h)) - f(x - 2*h) +
f(x + 2*h) + S(13)/8*(f(x-h) - f(x+h)))).simplify() == 0
# Central 3rd derivative at "half-way"
assert (as_finite_diff(f(x).diff(x, 3), [x - 3*h, x-h, x+h, x + 3*h]) -
(2*h)**-3 * (f(x + 3*h)-f(x - 3*h) +
3*(f(x-h)-f(x+h)))).simplify() == 0
# One sided 3rd derivative at gridpoint
assert (as_finite_diff(f(x).diff(x, 3), [x, x+h, x + 2*h, x + 3*h]) -
h**-3 * (f(x + 3*h)-f(x) + 3*(f(x+h)-f(x + 2*h)))).simplify() == 0
# One sided 3rd derivative at "half-way"
assert (as_finite_diff(f(x).diff(x, 3), [x-h, x+h, x + 3*h, x + 5*h]) -
(2*h)**-3 * (f(x + 5*h)-f(x-h) +
3*(f(x+h)-f(x + 3*h)))).simplify() == 0
| mit | 2,416,990,553,576,661,000 | 5,706,420,352,104,313,000 | 42.752941 | 79 | 0.417451 | false |
TamiaLab/carnetdumaker | apps/bugtracker/tests/test_context_processors.py | 1 | 2920 | """
Tests suite for the context processors of the bug tracker app.
"""
from django.test import SimpleTestCase
from django.http import HttpRequest
from ..context_processors import bugtracker
from ..constants import (STATUS_OPEN,
STATUS_NEED_DETAILS,
STATUS_CONFIRMED,
STATUS_WORKING_ON,
STATUS_DEFERRED,
STATUS_DUPLICATE,
STATUS_WONT_FIX,
STATUS_CLOSED,
STATUS_FIXED)
from ..constants import (PRIORITY_GODZILLA,
PRIORITY_CRITICAL,
PRIORITY_MAJOR,
PRIORITY_MINOR,
PRIORITY_TRIVIAL,
PRIORITY_NEED_REVIEW,
PRIORITY_FEATURE,
PRIORITY_WISHLIST,
PRIORITY_INVALID,
PRIORITY_NOT_MY_FAULT)
from ..constants import (DIFFICULTY_DESIGN_ERRORS,
DIFFICULTY_IMPORTANT,
DIFFICULTY_NORMAL,
DIFFICULTY_LOW_IMPACT,
DIFFICULTY_OPTIONAL)
class BugTrackerContextProcessorTestCase(SimpleTestCase):
"""
Tests case for the context processor.
"""
def test_bugtracker_context_update(self):
"""
Test if the ``bugtracker`` context processor add the constants into the context.
"""
request = HttpRequest()
result = bugtracker(request)
self.assertEqual(result, {
'BUGTRACKER_STATUS': {
'OPEN': STATUS_OPEN,
'NEED_DETAILS': STATUS_NEED_DETAILS,
'CONFIRMED': STATUS_CONFIRMED,
'WORKING_ON': STATUS_WORKING_ON,
'DEFERRED': STATUS_DEFERRED,
'DUPLICATE': STATUS_DUPLICATE,
'WONT_FIX': STATUS_WONT_FIX,
'CLOSED': STATUS_CLOSED,
'FIXED': STATUS_FIXED,
},
'BUGTRACKER_PRIORITY': {
'GODZILLA': PRIORITY_GODZILLA,
'CRITICAL': PRIORITY_CRITICAL,
'MAJOR': PRIORITY_MAJOR,
'MINOR': PRIORITY_MINOR,
'TRIVIAL': PRIORITY_TRIVIAL,
'NEED_REVIEW': PRIORITY_NEED_REVIEW,
'FEATURE': PRIORITY_FEATURE,
'WISHLIST': PRIORITY_WISHLIST,
'INVALID': PRIORITY_INVALID,
'NOT_MY_FAULT': PRIORITY_NOT_MY_FAULT,
},
'BUGTRACKER_DIFFICULTY': {
'DESIGN_ERRORS': DIFFICULTY_DESIGN_ERRORS,
'IMPORTANT': DIFFICULTY_IMPORTANT,
'NORMAL': DIFFICULTY_NORMAL,
'LOW_IMPACT': DIFFICULTY_LOW_IMPACT,
'OPTIONAL': DIFFICULTY_OPTIONAL,
},
})
| agpl-3.0 | -5,262,955,387,087,841,000 | -9,060,627,764,804,474,000 | 35.962025 | 88 | 0.492123 | false |
Ensembles/ert | python/python/ert/enkf/plot_data/ensemble_plot_gen_kw_vector.py | 2 | 1627 | # Copyright (C) 2014 Statoil ASA, Norway.
#
# The file 'ensemble_plot_gen_kw_vector.py' is part of ERT - Ensemble based Reservoir Tool.
#
# ERT is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# ERT is distributed in the hope that it will be useful, but WITHOUT ANY
# WARRANTY; without even the implied warranty of MERCHANTABILITY or
# FITNESS FOR A PARTICULAR PURPOSE.
#
# See the GNU General Public License at <http://www.gnu.org/licenses/gpl.html>
# for more details.
from cwrap import BaseCClass
from ert.enkf import EnkfPrototype
class EnsemblePlotGenKWVector(BaseCClass):
TYPE_NAME = "ensemble_plot_gen_kw_vector"
_size = EnkfPrototype("int enkf_plot_gen_kw_vector_get_size(ensemble_plot_gen_kw_vector)")
_get_value = EnkfPrototype("double enkf_plot_gen_kw_vector_iget(ensemble_plot_gen_kw_vector, int)")
def __init__(self):
raise NotImplementedError("Class can not be instantiated directly!")
def __len__(self):
""" @rtype: int """
return self._size()
def getValue(self, index):
""" @rtype: float """
return self[index]
def __iter__(self):
cur = 0
while cur < len(self):
yield self[cur]
cur += 1
def __getitem__(self, index):
""" @rtype: float """
return self._get_value(index)
def __repr__(self):
return 'EnsemblePlotGenKWVector(size = %d) %s' % (len(self), self._ad_str())
| gpl-3.0 | -1,488,551,277,825,351,700 | -5,943,563,387,763,520,000 | 32.204082 | 103 | 0.657652 | false |
nrwahl2/ansible | lib/ansible/modules/network/cloudengine/ce_vrf_interface.py | 27 | 15550 | #!/usr/bin/python
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: ce_vrf_interface
version_added: "2.4"
short_description: Manages interface specific VPN configuration on HUAWEI CloudEngine switches.
description:
- Manages interface specific VPN configuration of HUAWEI CloudEngine switches.
author: Zhijin Zhou (@CloudEngine-Ansible)
notes:
- Ensure that a VPN instance has been created and the IPv4 address family has been enabled for the VPN instance.
options:
vrf:
description:
- VPN instance, the length of vrf name is 1 ~ 31, i.e. "test", but can not be C(_public_).
required: true
vpn_interface:
description:
- An interface that can binding VPN instance, i.e. 40GE1/0/22, Vlanif10.
Must be fully qualified interface name.
Interface types, such as 10GE, 40GE, 100GE, LoopBack, MEth, Tunnel, Vlanif....
required: true
state:
description:
- Manage the state of the resource.
required: false
choices: ['present','absent']
default: present
'''
EXAMPLES = '''
- name: VRF interface test
hosts: cloudengine
connection: local
gather_facts: no
vars:
cli:
host: "{{ inventory_hostname }}"
port: "{{ ansible_ssh_port }}"
username: "{{ username }}"
password: "{{ password }}"
transport: cli
tasks:
- name: "Configure a VPN instance for the interface"
ce_vrf_interface:
vpn_interface: 40GE1/0/2
vrf: test
state: present
provider: "{{ cli }}"
- name: "Disable the association between a VPN instance and an interface"
ce_vrf_interface:
vpn_interface: 40GE1/0/2
vrf: test
state: absent
provider: "{{ cli }}"
'''
RETURN = '''
proposed:
description: k/v pairs of parameters passed into module
returned: verbose mode
type: dict
sample: {
"state": "present",
"vpn_interface": "40GE2/0/17",
"vrf": "jss"
}
existing:
description: k/v pairs of existing attributes on the interface
returned: verbose mode
type: dict
sample: {
"vpn_interface": "40GE2/0/17",
"vrf": null
}
end_state:
description: k/v pairs of end attributes on the interface
returned: verbose mode
type: dict
sample: {
"vpn_interface": "40GE2/0/17",
"vrf": "jss"
}
updates:
description: command list sent to the device
returned: always
type: list
sample: [
"ip binding vpn-instance jss",
]
changed:
description: check to see if a change was made on the device
returned: always
type: boolean
sample: true
'''
from xml.etree import ElementTree
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.ce import ce_argument_spec, get_nc_config, set_nc_config
CE_NC_GET_VRF = """
<filter type="subtree">
<l3vpn xmlns="http://www.huawei.com/netconf/vrp" content-version="1.0" format-version="1.0">
<l3vpncomm>
<l3vpnInstances>
<l3vpnInstance>
<vrfName>%s</vrfName>
</l3vpnInstance>
</l3vpnInstances>
</l3vpncomm>
</l3vpn>
</filter>
"""
CE_NC_GET_VRF_INTERFACE = """
<filter type="subtree">
<l3vpn xmlns="http://www.huawei.com/netconf/vrp" content-version="1.0" format-version="1.0">
<l3vpncomm>
<l3vpnInstances>
<l3vpnInstance>
<vrfName></vrfName>
<l3vpnIfs>
<l3vpnIf>
<ifName></ifName>
</l3vpnIf>
</l3vpnIfs>
</l3vpnInstance>
</l3vpnInstances>
</l3vpncomm>
</l3vpn>
</filter>
"""
CE_NC_MERGE_VRF_INTERFACE = """
<config>
<l3vpn xmlns="http://www.huawei.com/netconf/vrp" content-version="1.0" format-version="1.0">
<l3vpncomm>
<l3vpnInstances>
<l3vpnInstance>
<vrfName>%s</vrfName>
<l3vpnIfs>
<l3vpnIf operation="merge">
<ifName>%s</ifName>
</l3vpnIf>
</l3vpnIfs>
</l3vpnInstance>
</l3vpnInstances>
</l3vpncomm>
</l3vpn>
</config>
"""
CE_NC_GET_INTF = """
<filter type="subtree">
<ifm xmlns="http://www.huawei.com/netconf/vrp" content-version="1.0" format-version="1.0">
<interfaces>
<interface>
<ifName>%s</ifName>
<isL2SwitchPort></isL2SwitchPort>
</interface>
</interfaces>
</ifm>
</filter>
"""
CE_NC_DEL_INTF_VPN = """
<config>
<l3vpn xmlns="http://www.huawei.com/netconf/vrp" content-version="1.0" format-version="1.0">
<l3vpncomm>
<l3vpnInstances>
<l3vpnInstance>
<vrfName>%s</vrfName>
<l3vpnIfs>
<l3vpnIf operation="delete">
<ifName>%s</ifName>
</l3vpnIf>
</l3vpnIfs>
</l3vpnInstance>
</l3vpnInstances>
</l3vpncomm>
</l3vpn>
</config>
"""
def get_interface_type(interface):
"""Gets the type of interface, such as 10GE, ETH-TRUNK, VLANIF..."""
if interface is None:
return None
iftype = None
if interface.upper().startswith('GE'):
iftype = 'ge'
elif interface.upper().startswith('10GE'):
iftype = '10ge'
elif interface.upper().startswith('25GE'):
iftype = '25ge'
elif interface.upper().startswith('4X10GE'):
iftype = '4x10ge'
elif interface.upper().startswith('40GE'):
iftype = '40ge'
elif interface.upper().startswith('100GE'):
iftype = '100ge'
elif interface.upper().startswith('VLANIF'):
iftype = 'vlanif'
elif interface.upper().startswith('LOOPBACK'):
iftype = 'loopback'
elif interface.upper().startswith('METH'):
iftype = 'meth'
elif interface.upper().startswith('ETH-TRUNK'):
iftype = 'eth-trunk'
elif interface.upper().startswith('VBDIF'):
iftype = 'vbdif'
elif interface.upper().startswith('NVE'):
iftype = 'nve'
elif interface.upper().startswith('TUNNEL'):
iftype = 'tunnel'
elif interface.upper().startswith('ETHERNET'):
iftype = 'ethernet'
elif interface.upper().startswith('FCOE-PORT'):
iftype = 'fcoe-port'
elif interface.upper().startswith('FABRIC-PORT'):
iftype = 'fabric-port'
elif interface.upper().startswith('STACK-PORT'):
iftype = 'stack-Port'
elif interface.upper().startswith('NULL'):
iftype = 'null'
else:
return None
return iftype.lower()
class VrfInterface(object):
"""Manange vpn instance"""
def __init__(self, argument_spec):
self.spec = argument_spec
self.module = None
self.init_module()
# vpn instance info
self.vrf = self.module.params['vrf']
self.vpn_interface = self.module.params['vpn_interface']
self.vpn_interface = self.vpn_interface.upper().replace(' ', '')
self.state = self.module.params['state']
self.intf_info = dict()
self.intf_info['isL2SwitchPort'] = None
self.intf_info['vrfName'] = None
self.conf_exist = False
# state
self.changed = False
self.updates_cmd = list()
self.results = dict()
self.proposed = dict()
self.existing = dict()
self.end_state = dict()
def init_module(self):
"""init_module"""
required_one_of = [("vrf", "vpn_interface")]
self.module = AnsibleModule(
argument_spec=self.spec, required_one_of=required_one_of, supports_check_mode=True)
def check_response(self, xml_str, xml_name):
"""Check if response message is already succeed."""
if "<ok/>" not in xml_str:
self.module.fail_json(msg='Error: %s failed.' % xml_name)
def get_update_cmd(self):
""" get updated command"""
if self.conf_exist:
return
if self.state == 'absent':
self.updates_cmd.append(
"undo ip binding vpn-instance %s" % self.vrf)
return
if self.vrf != self.intf_info['vrfName']:
self.updates_cmd.append("ip binding vpn-instance %s" % self.vrf)
return
def check_params(self):
"""Check all input params"""
if not self.is_vrf_exist():
self.module.fail_json(
msg='Error: The VPN instance is not existed.')
if self.state == 'absent':
if self.vrf != self.intf_info['vrfName']:
self.module.fail_json(
msg='Error: The VPN instance is not bound to the interface.')
if self.intf_info['isL2SwitchPort'] == 'true':
self.module.fail_json(
msg='Error: L2Switch Port can not binding a VPN instance.')
# interface type check
if self.vpn_interface:
intf_type = get_interface_type(self.vpn_interface)
if not intf_type:
self.module.fail_json(
msg='Error: interface name of %s'
' is error.' % self.vpn_interface)
# vrf check
if self.vrf == '_public_':
self.module.fail_json(
msg='Error: The vrf name _public_ is reserved.')
if len(self.vrf) < 1 or len(self.vrf) > 31:
self.module.fail_json(
msg='Error: The vrf name length must be between 1 and 31.')
def get_interface_vpn_name(self, vpninfo, vpn_name):
""" get vpn instance name"""
l3vpn_if = vpninfo.findall("l3vpnIf")
for l3vpn_ifinfo in l3vpn_if:
for ele in l3vpn_ifinfo:
if ele.tag in ['ifName']:
if ele.text == self.vpn_interface:
self.intf_info['vrfName'] = vpn_name
def get_interface_vpn(self):
""" get the VPN instance associated with the interface"""
xml_str = CE_NC_GET_VRF_INTERFACE
con_obj = get_nc_config(self.module, xml_str)
if "<data/>" in con_obj:
return
xml_str = con_obj.replace('\r', '').replace('\n', '').\
replace('xmlns="urn:ietf:params:xml:ns:netconf:base:1.0"', "").\
replace('xmlns="http://www.huawei.com/netconf/vrp"', "")
# get global vrf interface info
root = ElementTree.fromstring(xml_str)
vpns = root.findall(
"data/l3vpn/l3vpncomm/l3vpnInstances/l3vpnInstance")
if vpns:
for vpnele in vpns:
vpn_name = None
for vpninfo in vpnele:
if vpninfo.tag == 'vrfName':
vpn_name = vpninfo.text
if vpninfo.tag == 'l3vpnIfs':
self.get_interface_vpn_name(vpninfo, vpn_name)
return
def is_vrf_exist(self):
""" judge whether the VPN instance is existed"""
conf_str = CE_NC_GET_VRF % self.vrf
con_obj = get_nc_config(self.module, conf_str)
if "<data/>" in con_obj:
return False
return True
def get_intf_conf_info(self):
""" get related configuration of the interface"""
conf_str = CE_NC_GET_INTF % self.vpn_interface
con_obj = get_nc_config(self.module, conf_str)
if "<data/>" in con_obj:
return
# get interface base info
xml_str = con_obj.replace('\r', '').replace('\n', '').\
replace('xmlns="urn:ietf:params:xml:ns:netconf:base:1.0"', "").\
replace('xmlns="http://www.huawei.com/netconf/vrp"', "")
root = ElementTree.fromstring(xml_str)
interface = root.find("data/ifm/interfaces/interface")
if interface:
for eles in interface:
if eles.tag in ["isL2SwitchPort"]:
self.intf_info[eles.tag] = eles.text
self.get_interface_vpn()
return
def get_existing(self):
"""get existing config"""
self.existing = dict(vrf=self.intf_info['vrfName'],
vpn_interface=self.vpn_interface)
def get_proposed(self):
"""get_proposed"""
self.proposed = dict(vrf=self.vrf,
vpn_interface=self.vpn_interface,
state=self.state)
def get_end_state(self):
"""get_end_state"""
self.intf_info['vrfName'] = None
self.get_intf_conf_info()
self.end_state = dict(vrf=self.intf_info['vrfName'],
vpn_interface=self.vpn_interface)
def show_result(self):
""" show result"""
self.results['changed'] = self.changed
self.results['proposed'] = self.proposed
self.results['existing'] = self.existing
self.results['end_state'] = self.end_state
if self.changed:
self.results['updates'] = self.updates_cmd
else:
self.results['updates'] = list()
self.module.exit_json(**self.results)
def judge_if_config_exist(self):
""" judge whether configuration has existed"""
if self.state == 'absent':
return False
delta = set(self.proposed.items()).difference(
self.existing.items())
delta = dict(delta)
if len(delta) == 1 and delta['state']:
return True
return False
def config_interface_vrf(self):
""" configure VPN instance of the interface"""
if not self.conf_exist and self.state == 'present':
xml_str = CE_NC_MERGE_VRF_INTERFACE % (
self.vrf, self.vpn_interface)
ret_xml = set_nc_config(self.module, xml_str)
self.check_response(ret_xml, "VRF_INTERFACE_CONFIG")
self.changed = True
elif self.state == 'absent':
xml_str = CE_NC_DEL_INTF_VPN % (self.vrf, self.vpn_interface)
ret_xml = set_nc_config(self.module, xml_str)
self.check_response(ret_xml, "DEL_VRF_INTERFACE_CONFIG")
self.changed = True
def work(self):
"""excute task"""
self.get_intf_conf_info()
self.check_params()
self.get_existing()
self.get_proposed()
self.conf_exist = self.judge_if_config_exist()
self.config_interface_vrf()
self.get_update_cmd()
self.get_end_state()
self.show_result()
def main():
"""main"""
argument_spec = dict(
vrf=dict(required=True, type='str'),
vpn_interface=dict(required=True, type='str'),
state=dict(choices=['absent', 'present'],
default='present', required=False),
)
argument_spec.update(ce_argument_spec)
vrf_intf = VrfInterface(argument_spec)
vrf_intf.work()
if __name__ == '__main__':
main()
| gpl-3.0 | 3,597,435,742,196,605,400 | 663,666,637,927,489,900 | 29.135659 | 116 | 0.572219 | false |
codedsk/hubcheck-hubzero-tests | hchztests/tests/test_website_support_need_help.py | 1 | 7124 | import pytest
import sys
import os
import re
import hubcheck
pytestmark = [ pytest.mark.website,
pytest.mark.tickets,
pytest.mark.need_help,
pytest.mark.reboot,
pytest.mark.upgrade,
pytest.mark.prod_safe_upgrade
]
class TestNeedHelp(hubcheck.testcase.TestCase2):
def setup_method(self,method):
# setup a web browser
self.browser.get(self.https_authority)
# get user account info
self.username,self.password = \
self.testdata.find_account_for('ticketsubmitter')
self.adminuser,self.adminpass = \
self.testdata.find_account_for('ticketmanager')
self.ticket_number = None
def teardown_method(self,method):
# if we created a ticket, delete the ticket
if self.ticket_number is not None \
and (self.adminuser != "") \
and (self.adminpass != ""):
try:
self.utils.account.logout()
except:
pass
self.utils.account.login_as(self.adminuser,self.adminpass)
self.utils.support.close_support_ticket_invalid(self.ticket_number)
def test_link_exists(self):
"""
click the need help link, to see if the widget exists
"""
po = self.catalog.load_pageobject('SupportNeedHelpPage')
po.open()
po.close()
@pytest.mark.nt
def test_link_changes_webpage(self):
"""
click the need help link, check if the url changes
"""
po = self.catalog.load_pageobject('GenericPage')
start_url = po.current_url()
po.toggle_needhelp()
end_url = po.current_url()
assert start_url == end_url, "clicking the 'Need Help?' link" \
+ " changed the web page from %s to %s" % (start_url,end_url)
def test_if_link_leads_to_support_url(self):
"""
open the "Need Help?" dialogue to ensure it does not lead to
/support
Sometime found when javascript is turned off, but if javascript
is on, clicking this link should not send the user to the
/support webpage.
"""
# store the start and end page url's for comparison
# click the needhelp link and see if it takes us to /support
po = self.catalog.load_pageobject('SupportNeedHelpPage')
startpageurl = po.current_url()
po.open()
endpageurl = po.current_url()
assert startpageurl == endpageurl, \
"User was redirected to %s\n" % endpageurl
# FIXME: use urlparse here
# create a pattern for a url regular expression
p = re.compile('(([^:]+)://)?([^:/]+)(:([0-9]+))?(/.*)?')
(junk, junk, junk, junk, junk, path) = p.search(endpageurl).groups()
# check that the page we were taken to is not /support
s = "pageurl = %s\npath = %s\n" % (endpageurl,path)
assert path != '/support', s
def test_submit_ticket_logged_in_using_need_help_link(self):
"""
login to the website as the "ticket submitter" and submit a
ticket using the need help link.
"""
problem = 'hubcheck test ticket\n%s' % (self.fnbase)
# login to the website and click the need help link
self.utils.account.login_as(self.username,self.password)
po = self.catalog.load_pageobject('SupportNeedHelpPage')
po.open()
# fill in the trouble report
# username, name, and email fields are
# not accessible while logged in
self.ticket_number = po.submit_ticket({'problem':problem})
# check if the ticket number is a valid number
assert self.ticket_number is not None, "no ticket number returned"
assert re.match('\d+',self.ticket_number) is not None, \
"cound not find a matching ticket number in '%s'" \
% (self.ticket_number)
# convert to a number and ensure it is not ticket #0
assert int(self.ticket_number) > 0, \
"invalid ticket number returned: %s" % (self.ticket_number)
@pytest.mark.captcha
def test_submit_ticket_logged_out_using_need_help_link(self):
"""
submit a support ticket using the need help link while not
logged into the website.
"""
# data for trouble report
data = {
'name' : 'hubcheck testuser',
'email' : 'hubchecktest@hubzero.org',
'problem' : 'hubcheck test ticket\n%s' % (self.fnbase),
'captcha' : True,
}
# navigate to the SupportNeedHelp Page:
po = self.catalog.load_pageobject('SupportNeedHelpPage')
po.open()
# fill in the trouble report
# username is optional
self.ticket_number = po.submit_ticket(data)
# check if the ticket number is a valid number
assert self.ticket_number is not None, \
"no ticket number returned"
assert re.match('\d+',self.ticket_number) is not None, \
"cound not find a matching ticket number in '%s'" \
% (self.ticket_number)
# convert to a number and ensure it is not ticket #0
assert int(self.ticket_number) > 0, \
"invalid ticket number returned: %s" % (self.ticket_number)
@pytest.mark.tickets_attach_jpg
def test_attaching_jpg_image_to_ticket_submitted_through_need_help(self):
"""
Login to the website and submit a ticket, using the need help
link, with an attached jpeg image.
"""
problem = 'hubcheck test ticket\nattaching jpg image\n%s' \
% (self.fnbase)
uploadfilename = 'app2.jpg'
uploadfilepath = os.path.join(self.datadir,'images',uploadfilename)
data = {
'problem' : problem,
'upload' : uploadfilepath,
}
# login to the website and navigate to the need help form
self.utils.account.login_as(self.username,self.password)
po = self.catalog.load_pageobject('SupportNeedHelpPage')
# po.open()
po.needhelplink.click()
# submit a trouble report
# username, name, and email fields are not accessible
self.ticket_number = po.submit_ticket(data)
assert self.ticket_number is not None, "no ticket number returned"
assert int(self.ticket_number) > 0, \
"invalid ticket number returned: %s" % (self.ticket_number)
po.goto_ticket()
po = self.catalog.load_pageobject('SupportTicketViewPage')
content = po.get_ticket_content()
imgsrc = content.download_image(uploadfilename)
# not sure how to really download image files yet.
# so we assume that as long as opening the image didn't
# cause an error, the test passed.
assert re.search(uploadfilename,imgsrc) is not None, \
"After uploading an image to support ticket" \
+ " #%s, could not download image %s" \
% (self.ticket_number,uploadfilename)
| mit | 5,957,578,336,372,185,000 | -8,585,877,790,807,667,000 | 32.28972 | 79 | 0.592504 | false |
YongseopKim/crosswalk-test-suite | tools/apkanalyser/comm.py | 3 | 2140 | # -*- coding: utf-8 -*-
#!/usr/bin/env python
# Copyright (C) 2015 Intel Corporation. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in
# the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Intel Corporation nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# Author: belem.zhang@intel.com
import os, sys
import re, codecs
import shutil, glob
def _find(pathname, matchFunc=os.path.isfile):
for dirname in sys.path:
candidate = os.path.join(dirname, pathname)
if matchFunc(candidate):
return candidate
def mk_dir(path):
if not find_dir(path):
os.mkdir(path)
def find_file(pathname):
return _find(pathname)
def find_dir(path):
return _find(path, matchFunc=os.path.isdir)
def find_glob_path(filepath):
return glob.glob(filepath)
| bsd-3-clause | 3,328,539,060,826,182,000 | -2,226,679,050,300,698,400 | 37.214286 | 71 | 0.74486 | false |
jasper-meyer/Platformer | platformer.py | 1 | 3751 | """
platformer.py
Author: Jasper Meyer
Credit: You, the internet, Brendan
Assignment:
Write and submit a program that implements the sandbox platformer game:
https://github.com/HHS-IntroProgramming/Platformer
"""
from ggame import App, RectangleAsset, ImageAsset, Sprite, LineStyle, Color, Frame
SCREEN_WIDTH = 1080
SCREEN_HEIGHT = 720
myapp = App(SCREEN_WIDTH, SCREEN_HEIGHT)
black = Color(0, 1)
backcol = Color(0xd9ffcc, 1.0)
purp = Color(0x9900cc, 1.0)
blue = Color(0x3399ff,1.0)
noline = LineStyle(0, black)
bg_asset = RectangleAsset(SCREEN_WIDTH, SCREEN_HEIGHT, noline, backcol)
bg = Sprite(bg_asset, (0,0))
thinline = LineStyle(1, black)
sq = RectangleAsset (75,75, noline, black)
wub=0
pup=0
mousex=0
mousey=0
mousexround=0
mouseyround=0
play = RectangleAsset (25,50, noline, purp)
spr = RectangleAsset (20,10, noline, blue)
vy=0
player=0
acc = 0
ti = 0
rupx=0
lupx=0
vx=0
up=0
upup=0
stop = 0
shutup=0
spring = 0
sub = 0
springlist = []
def wup(event):
global wub
global mousexround
global mouseyround
wub = 1
if wub == 1:
mousexround=mousex-((mousex)%75)
mouseyround=mousey-((mousey)%75)
block = Sprite (sq, (mousexround, mouseyround))
def mousemo(event):
global mousex
global mousey
mousex=event.x
mousey=event.y
def spri(event):
global spring
global mousex
global mousey
global mouseyround
global sub
global springlist
sub =1
if sub == 1:
mouseyround=mousey-((mousey)%75)+65
springlist.append (Sprite (spr, (mousex, mouseyround)))
def pup(event):
global pub
global mousex
global mouseyround
global player
pub = 1
if pub == 1:
mouseyround=mousey-((mousey)%75)+25
if player == 0:
player = Sprite (play, (mousex, mouseyround))
def rup(event):
global rupx
rupx=1
def lup(event):
global lupx
lupx=1
def uup(event):
global up
up=1
def step():
if player != 0:
global vy
global acc
global ti
global rupx
global vx
global lupx
global up
global upup
global stop
global shutup
global springlist
global player
acc = 0.02
for s in springlist:
if player.collidingWith(s):
vy=-50+vy
vx=-vx
if stop == 0:
ti=ti+.5
if upup==4.5:
vy = (0.2*ti)-upup
else:
vy = (0.2*ti)
player.y=player.y+vy
player.x=player.x+vx
if rupx == 1:
vx=vx+1.5
lupx=0
rupx=0
if lupx == 1:
vx=vx-1.5
rupx=0
lupx=0
if vx > 3:
vx = 3
if vx < -3:
vx =-3
if up == 1:
upup = 4.5
up=0
if up == 0:
upup =4.5
col = player.collidingWithSprites(Sprite)
if len(col) > 1 and col[1].y<player.y+500:
stop=1
player.y=player.y-0.2
else:
stop=0
if stop == 1:
vy=0
ti=0
if len(col) > 1:
if col[1].y<player.y+50:
vx=-0.5*vx
if player.y > 2000:
player = 0
ti=0
myapp.listenKeyEvent('keyup', 's', spri)
myapp.listenKeyEvent('keydown', 'up arrow', uup)
myapp.listenKeyEvent('keydown', 'left arrow', lup)
myapp.listenKeyEvent('keydown', 'right arrow', rup)
myapp.listenKeyEvent('keyup', 'p', pup)
myapp.listenKeyEvent('keyup', 'w', wup)
myapp.listenMouseEvent('mousemove', mousemo)
myapp.run(step) | mit | -2,587,384,198,693,263,000 | 3,086,779,119,420,748,300 | 17.76 | 82 | 0.546254 | false |
xpansa/server-tools | fetchmail_attach_from_folder/match_algorithm/__init__.py | 54 | 1115 | # -*- encoding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# This module copyright (C) 2013 Therp BV (<http://therp.nl>)
# All Rights Reserved
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from . import base
from . import email_exact
from . import email_domain
from . import openerp_standard
| agpl-3.0 | 5,096,427,212,387,660,000 | 8,699,950,399,947,223,000 | 41.884615 | 78 | 0.618834 | false |
simplegeo/eventlet | eventlet/hubs/pyevent.py | 13 | 5455 | import sys
import traceback
import event
import types
from eventlet.support import greenlets as greenlet
from eventlet.hubs.hub import BaseHub, FdListener, READ, WRITE
class event_wrapper(object):
def __init__(self, impl=None, seconds=None):
self.impl = impl
self.seconds = seconds
def __repr__(self):
if self.impl is not None:
return repr(self.impl)
else:
return object.__repr__(self)
def __str__(self):
if self.impl is not None:
return str(self.impl)
else:
return object.__str__(self)
def cancel(self):
if self.impl is not None:
self.impl.delete()
self.impl = None
@property
def pending(self):
return bool(self.impl and self.impl.pending())
class Hub(BaseHub):
SYSTEM_EXCEPTIONS = (KeyboardInterrupt, SystemExit)
def __init__(self):
super(Hub,self).__init__()
event.init()
self.signal_exc_info = None
self.signal(
2,
lambda signalnum, frame: self.greenlet.parent.throw(KeyboardInterrupt))
self.events_to_add = []
def dispatch(self):
loop = event.loop
while True:
for e in self.events_to_add:
if e is not None and e.impl is not None and e.seconds is not None:
e.impl.add(e.seconds)
e.seconds = None
self.events_to_add = []
result = loop()
if getattr(event, '__event_exc', None) is not None:
# only have to do this because of bug in event.loop
t = getattr(event, '__event_exc')
setattr(event, '__event_exc', None)
assert getattr(event, '__event_exc') is None
raise t[0], t[1], t[2]
if result != 0:
return result
def run(self):
while True:
try:
self.dispatch()
except greenlet.GreenletExit:
break
except self.SYSTEM_EXCEPTIONS:
raise
except:
if self.signal_exc_info is not None:
self.schedule_call_global(
0, greenlet.getcurrent().parent.throw, *self.signal_exc_info)
self.signal_exc_info = None
else:
self.squelch_timer_exception(None, sys.exc_info())
def abort(self, wait=True):
self.schedule_call_global(0, self.greenlet.throw, greenlet.GreenletExit)
if wait:
assert self.greenlet is not greenlet.getcurrent(), "Can't abort with wait from inside the hub's greenlet."
self.switch()
def _getrunning(self):
return bool(self.greenlet)
def _setrunning(self, value):
pass # exists for compatibility with BaseHub
running = property(_getrunning, _setrunning)
def add(self, evtype, fileno, real_cb):
# this is stupid: pyevent won't call a callback unless it's a function,
# so we have to force it to be one here
if isinstance(real_cb, types.BuiltinMethodType):
def cb(_d):
real_cb(_d)
else:
cb = real_cb
if evtype is READ:
evt = event.read(fileno, cb, fileno)
elif evtype is WRITE:
evt = event.write(fileno, cb, fileno)
return super(Hub,self).add(evtype, fileno, evt)
def signal(self, signalnum, handler):
def wrapper():
try:
handler(signalnum, None)
except:
self.signal_exc_info = sys.exc_info()
event.abort()
return event_wrapper(event.signal(signalnum, wrapper))
def remove(self, listener):
super(Hub, self).remove(listener)
listener.cb.delete()
def remove_descriptor(self, fileno):
for lcontainer in self.listeners.itervalues():
listener = lcontainer.pop(fileno, None)
if listener:
try:
listener.cb.delete()
except self.SYSTEM_EXCEPTIONS:
raise
except:
traceback.print_exc()
def schedule_call_local(self, seconds, cb, *args, **kwargs):
current = greenlet.getcurrent()
if current is self.greenlet:
return self.schedule_call_global(seconds, cb, *args, **kwargs)
event_impl = event.event(_scheduled_call_local, (cb, args, kwargs, current))
wrapper = event_wrapper(event_impl, seconds=seconds)
self.events_to_add.append(wrapper)
return wrapper
schedule_call = schedule_call_local
def schedule_call_global(self, seconds, cb, *args, **kwargs):
event_impl = event.event(_scheduled_call, (cb, args, kwargs))
wrapper = event_wrapper(event_impl, seconds=seconds)
self.events_to_add.append(wrapper)
return wrapper
def _version_info(self):
baseversion = event.__version__
return baseversion
def _scheduled_call(event_impl, handle, evtype, arg):
cb, args, kwargs = arg
try:
cb(*args, **kwargs)
finally:
event_impl.delete()
def _scheduled_call_local(event_impl, handle, evtype, arg):
cb, args, kwargs, caller_greenlet = arg
try:
if not caller_greenlet.dead:
cb(*args, **kwargs)
finally:
event_impl.delete()
| mit | 8,641,797,476,861,537,000 | -7,043,344,501,646,803,000 | 30.171429 | 118 | 0.55912 | false |
sourcelair/ceryx | ceryx/tests/client/connection.py | 2 | 1957 | from urllib3.connection import HTTPConnection, HTTPSConnection
import os
import socket
DEFAULT_CERYX_HOST = "ceryx" # Set by Docker Compose in tests
CERYX_HOST = os.getenv("CERYX_HOST", DEFAULT_CERYX_HOST)
class CeryxTestsHTTPConnection(HTTPConnection):
"""
Custom-built HTTPConnection for Ceryx tests. Force sets the request's
host to the configured Ceryx host, if the request's original host
ends with `.ceryx.test`.
"""
@property
def host(self):
"""
Do what the original property did. We just want to touch the setter.
"""
return self._dns_host.rstrip('.')
@host.setter
def host(self, value):
"""
If the request header ends with `.ceryx.test` then force set the actual
host to the configured Ceryx host, so as to send corresponding
requests to Ceryx.
"""
self._dns_host = CERYX_HOST if value.endswith(".ceryx.test") else value
class CeryxTestsHTTPSConnection(CeryxTestsHTTPConnection, HTTPSConnection):
def __init__(
self, host, port=None, key_file=None, cert_file=None,
key_password=None, strict=None,
timeout=socket._GLOBAL_DEFAULT_TIMEOUT, ssl_context=None,
server_hostname=None, **kw,
):
# Initialise the HTTPConnection subclass created above.
CeryxTestsHTTPConnection.__init__(
self, host, port, strict=strict, timeout=timeout, **kw,
)
self.key_file = key_file
self.cert_file = cert_file
self.key_password = key_password
self.ssl_context = ssl_context
self.server_hostname = server_hostname
# ------------------------------
# Original comment from upstream
# ------------------------------
#
# Required property for Google AppEngine 1.9.0 which otherwise causes
# HTTPS requests to go out as HTTP. (See Issue #356)
self._protocol = 'https'
| mit | -2,059,056,748,722,613,200 | 784,563,916,303,075,600 | 31.633333 | 79 | 0.619315 | false |
Ophiuchus1312/enigma2-master | lib/python/Screens/TimerEdit.py | 1 | 20176 | from Components.ActionMap import ActionMap
from Components.Button import Button
from Components.Label import Label
from Components.config import config
from Components.MenuList import MenuList
from Components.TimerList import TimerList
from Components.TimerSanityCheck import TimerSanityCheck
from Components.UsageConfig import preferredTimerPath
from Components.Sources.StaticText import StaticText
from RecordTimer import RecordTimerEntry, parseEvent, AFTEREVENT
from Screens.Screen import Screen
from Screens.ChoiceBox import ChoiceBox
from Screens.MessageBox import MessageBox
from ServiceReference import ServiceReference
from Screens.TimerEntry import TimerEntry, TimerLog
from Tools.BoundFunction import boundFunction
from Tools.FuzzyDate import FuzzyTime
from Tools.Directories import resolveFilename, SCOPE_HDD
from time import time, localtime
from timer import TimerEntry as RealTimerEntry
from enigma import eServiceCenter
import Tools.CopyFiles
import os
class TimerEditList(Screen):
EMPTY = 0
ENABLE = 1
DISABLE = 2
CLEANUP = 3
DELETE = 4
def __init__(self, session):
Screen.__init__(self, session)
Screen.setTitle(self, _("Timer List"))
self.onChangedEntry = [ ]
list = [ ]
self.list = list
self.fillTimerList()
self["timerlist"] = TimerList(list)
self.key_red_choice = self.EMPTY
self.key_yellow_choice = self.EMPTY
self.key_blue_choice = self.EMPTY
self["key_red"] = Button(" ")
self["key_green"] = Button(_("Add"))
self["key_yellow"] = Button(" ")
self["key_blue"] = Button(" ")
self["description"] = Label()
self["actions"] = ActionMap(["OkCancelActions", "DirectionActions", "ShortcutActions", "TimerEditActions"],
{
"ok": self.openEdit,
"cancel": self.leave,
"green": self.addCurrentTimer,
"log": self.showLog,
"left": self.left,
"right": self.right,
"up": self.up,
"down": self.down
}, -1)
self.setTitle(_("Timer overview"))
self.session.nav.RecordTimer.on_state_change.append(self.onStateChange)
self.onShown.append(self.updateState)
def createSummary(self):
return TimerEditListSummary
def up(self):
self["timerlist"].instance.moveSelection(self["timerlist"].instance.moveUp)
self.updateState()
def down(self):
self["timerlist"].instance.moveSelection(self["timerlist"].instance.moveDown)
self.updateState()
def left(self):
self["timerlist"].instance.moveSelection(self["timerlist"].instance.pageUp)
self.updateState()
def right(self):
self["timerlist"].instance.moveSelection(self["timerlist"].instance.pageDown)
self.updateState()
def toggleDisabledState(self):
cur=self["timerlist"].getCurrent()
if cur:
t = cur
if t.disabled:
# print "try to ENABLE timer"
t.enable()
timersanitycheck = TimerSanityCheck(self.session.nav.RecordTimer.timer_list, cur)
if not timersanitycheck.check():
t.disable()
print "Sanity check failed"
simulTimerList = timersanitycheck.getSimulTimerList()
if simulTimerList is not None:
self.session.openWithCallback(self.finishedEdit, TimerSanityConflict, simulTimerList)
else:
print "Sanity check passed"
if timersanitycheck.doubleCheck():
t.disable()
else:
if t.isRunning():
if t.repeated:
list = (
(_("Stop current event but not coming events"), "stoponlycurrent"),
(_("Stop current event and disable coming events"), "stopall"),
(_("Don't stop current event but disable coming events"), "stoponlycoming")
)
self.session.openWithCallback(boundFunction(self.runningEventCallback, t), ChoiceBox, title=_("Repeating event currently recording... What do you want to do?"), list = list)
else:
t.disable()
self.session.nav.RecordTimer.timeChanged(t)
self.refill()
self.updateState()
def runningEventCallback(self, t, result):
if result is not None:
if result[1] == "stoponlycurrent" or result[1] == "stopall":
t.enable()
t.processRepeated(findRunningEvent = False)
self.session.nav.RecordTimer.doActivate(t)
if result[1] == "stoponlycoming" or result[1] == "stopall":
t.disable()
self.session.nav.RecordTimer.timeChanged(t)
self.refill()
self.updateState()
def removeAction(self, descr):
actions = self["actions"].actions
if descr in actions:
del actions[descr]
def updateState(self):
cur = self["timerlist"].getCurrent()
if cur:
self["description"].setText(cur.description)
if self.key_red_choice != self.DELETE:
self["actions"].actions.update({"red":self.removeTimerQuestion})
self["key_red"].setText(_("Delete"))
self.key_red_choice = self.DELETE
if cur.disabled and (self.key_yellow_choice != self.ENABLE):
self["actions"].actions.update({"yellow":self.toggleDisabledState})
self["key_yellow"].setText(_("Enable"))
self.key_yellow_choice = self.ENABLE
elif cur.isRunning() and not cur.repeated and (self.key_yellow_choice != self.EMPTY):
self.removeAction("yellow")
self["key_yellow"].setText(" ")
self.key_yellow_choice = self.EMPTY
elif ((not cur.isRunning())or cur.repeated ) and (not cur.disabled) and (self.key_yellow_choice != self.DISABLE):
self["actions"].actions.update({"yellow":self.toggleDisabledState})
self["key_yellow"].setText(_("Disable"))
self.key_yellow_choice = self.DISABLE
else:
if self.key_red_choice != self.EMPTY:
self.removeAction("red")
self["key_red"].setText(" ")
self.key_red_choice = self.EMPTY
if self.key_yellow_choice != self.EMPTY:
self.removeAction("yellow")
self["key_yellow"].setText(" ")
self.key_yellow_choice = self.EMPTY
showCleanup = True
for x in self.list:
if (not x[0].disabled) and (x[1] == True):
break
else:
showCleanup = False
if showCleanup and (self.key_blue_choice != self.CLEANUP):
self["actions"].actions.update({"blue":self.cleanupQuestion})
self["key_blue"].setText(_("Cleanup"))
self.key_blue_choice = self.CLEANUP
elif (not showCleanup) and (self.key_blue_choice != self.EMPTY):
self.removeAction("blue")
self["key_blue"].setText(" ")
self.key_blue_choice = self.EMPTY
if len(self.list) == 0:
return
timer = self['timerlist'].getCurrent()
if timer:
try:
name = str(timer.name)
time = ("%s %s ... %s") % (FuzzyTime(timer.begin)[0], FuzzyTime(timer.begin)[1], FuzzyTime(timer.end)[1])
duration = ("(%d " + _("mins") + ")") % ((timer.end - timer.begin) / 60)
service = str(timer.service_ref.getServiceName())
if timer.state == RealTimerEntry.StateWaiting:
state = _("waiting")
elif timer.state == RealTimerEntry.StatePrepared:
state = _("about to start")
elif timer.state == RealTimerEntry.StateRunning:
if timer.justplay:
state = _("zapped")
else:
state = _("recording...")
elif timer.state == RealTimerEntry.StateEnded:
state = _("done!")
else:
state = _("<unknown>")
except:
name = ""
time = ""
duration = ""
service = ""
else:
name = ""
time = ""
duration = ""
service = ""
for cb in self.onChangedEntry:
cb(name, time, duration, service, state)
def fillTimerList(self):
#helper function to move finished timers to end of list
def eol_compare(x, y):
if x[0].state != y[0].state and x[0].state == RealTimerEntry.StateEnded or y[0].state == RealTimerEntry.StateEnded:
return cmp(x[0].state, y[0].state)
return cmp(x[0].begin, y[0].begin)
list = self.list
print list
del list[:]
list.extend([(timer, False) for timer in self.session.nav.RecordTimer.timer_list])
list.extend([(timer, True) for timer in self.session.nav.RecordTimer.processed_timers])
if config.usage.timerlist_finished_timer_position.index: #end of list
list.sort(cmp = eol_compare)
else:
list.sort(key = lambda x: x[0].begin)
def showLog(self):
cur=self["timerlist"].getCurrent()
if cur:
self.session.openWithCallback(self.finishedEdit, TimerLog, cur)
def openEdit(self):
cur=self["timerlist"].getCurrent()
if cur:
self.session.openWithCallback(self.finishedEdit, TimerEntry, cur)
def cleanupQuestion(self):
self.session.openWithCallback(self.cleanupTimer, MessageBox, _("Really delete done timers?"))
def cleanupTimer(self, delete):
if delete:
self.session.nav.RecordTimer.cleanup()
self.refill()
self.updateState()
def removeTimerQuestion(self):
cur = self["timerlist"].getCurrent()
service = str(cur.service_ref.getServiceName())
t = localtime(cur.begin)
f = str(t.tm_year) + str(t.tm_mon).zfill(2) + str(t.tm_mday).zfill(2) + " " + str(t.tm_hour).zfill(2) + str(t.tm_min).zfill(2) + " - " + service + " - " + cur.name
f = f.replace(':','_')
f = f.replace(',','_')
f = f.replace('/','_')
if not cur:
return
onhdd = False
self.moviename = f
path = resolveFilename(SCOPE_HDD)
files = os.listdir(path)
for file in files:
if file.startswith(f):
onhdd = True
break
if onhdd:
message = (_("Do you really want to delete %s?") % (cur.name))
choices = [(_("No"), "no"),
(_("Yes, delete from Timerlist"), "yes"),
(_("Yes, delete from Timerlist and delete recording"), "yesremove")]
self.session.openWithCallback(self.startDelete, ChoiceBox, title=message, list=choices)
else:
self.session.openWithCallback(self.removeTimer, MessageBox, _("Do you really want to delete %s?") % (cur.name), default = False)
def startDelete(self, answer):
if not answer or not answer[1]:
self.close()
return
if answer[1] == 'no':
return
elif answer[1] == 'yes':
self.removeTimer(True)
elif answer[1] == 'yesremove':
if config.EMC.movie_trashcan_enable.getValue():
trashpath = config.EMC.movie_trashcan_path.getValue()
self.MoveToTrash(trashpath)
elif config.usage.movielist_trashcan.getValue():
trashpath = resolveFilename(SCOPE_HDD) + '.Trash'
self.MoveToTrash(trashpath)
else:
self.session.openWithCallback(self.callbackRemoveRecording, MessageBox, _("Do you really want to delete the recording?"), default = False)
def callbackRemoveRecording(self, answer):
if not answer:
return
self.delete()
def removeTimer(self, result):
if not result:
return
list = self["timerlist"]
cur = list.getCurrent()
if cur:
timer = cur
timer.afterEvent = AFTEREVENT.NONE
self.session.nav.RecordTimer.removeEntry(timer)
self.refill()
self.updateState()
def MoveToTrash(self, trashpath):
self.removeTimer(True)
moviepath = os.path.normpath(resolveFilename(SCOPE_HDD))
movedList =[]
files = os.listdir(moviepath)
for file in files:
if file.startswith(self.moviename):
movedList.append((os.path.join(moviepath, file), os.path.join(trashpath, file)))
Tools.CopyFiles.moveFiles(movedList, None)
def delete(self):
item = self["timerlist"].getCurrent()
if item is None:
return # huh?
name = item.name
service = str(item.service_ref.getServiceName())
t = localtime(item.begin)
f = str(t.tm_year) + str(t.tm_mon).zfill(2) + str(t.tm_mday).zfill(2) + " " + str(t.tm_hour).zfill(2) + str(t.tm_min).zfill(2) + " - " + service + " - " + name
f = f.replace(':','_')
f = f.replace(',','_')
f = f.replace('/','_')
path = resolveFilename(SCOPE_HDD)
self.removeTimer(True)
from enigma import eBackgroundFileEraser
files = os.listdir(path)
for file in files:
if file.startswith(f):
eBackgroundFileEraser.getInstance().erase(os.path.realpath(path + file))
def refill(self):
oldsize = len(self.list)
self.fillTimerList()
lst = self["timerlist"]
newsize = len(self.list)
if oldsize and oldsize != newsize:
idx = lst.getCurrentIndex()
lst.entryRemoved(idx)
else:
lst.invalidate()
def addCurrentTimer(self):
event = None
service = self.session.nav.getCurrentService()
if service is not None:
info = service.info()
if info is not None:
event = info.getEvent(0)
# FIXME only works if already playing a service
serviceref = ServiceReference(self.session.nav.getCurrentlyPlayingServiceOrGroup())
if event is None:
data = (int(time()), int(time() + 60), "", "", None)
else:
data = parseEvent(event, description = False)
self.addTimer(RecordTimerEntry(serviceref, checkOldTimers = True, dirname = preferredTimerPath(), *data))
def addTimer(self, timer):
self.session.openWithCallback(self.finishedAdd, TimerEntry, timer)
def finishedEdit(self, answer):
# print "finished edit"
if answer[0]:
# print "Edited timer"
entry = answer[1]
timersanitycheck = TimerSanityCheck(self.session.nav.RecordTimer.timer_list, entry)
success = False
if not timersanitycheck.check():
simulTimerList = timersanitycheck.getSimulTimerList()
if simulTimerList is not None:
for x in simulTimerList:
if x.setAutoincreaseEnd(entry):
self.session.nav.RecordTimer.timeChanged(x)
if not timersanitycheck.check():
simulTimerList = timersanitycheck.getSimulTimerList()
if simulTimerList is not None:
self.session.openWithCallback(self.finishedEdit, TimerSanityConflict, timersanitycheck.getSimulTimerList())
else:
success = True
else:
success = True
if success:
print "Sanity check passed"
self.session.nav.RecordTimer.timeChanged(entry)
self.fillTimerList()
self.updateState()
# else:
# print "Timeredit aborted"
def finishedAdd(self, answer):
# print "finished add"
if answer[0]:
entry = answer[1]
simulTimerList = self.session.nav.RecordTimer.record(entry)
if simulTimerList is not None:
for x in simulTimerList:
if x.setAutoincreaseEnd(entry):
self.session.nav.RecordTimer.timeChanged(x)
simulTimerList = self.session.nav.RecordTimer.record(entry)
if simulTimerList is not None:
self.session.openWithCallback(self.finishSanityCorrection, TimerSanityConflict, simulTimerList)
self.fillTimerList()
self.updateState()
# else:
# print "Timeredit aborted"
def finishSanityCorrection(self, answer):
self.finishedAdd(answer)
def leave(self):
self.session.nav.RecordTimer.on_state_change.remove(self.onStateChange)
self.close()
def onStateChange(self, entry):
self.refill()
self.updateState()
class TimerSanityConflict(Screen):
EMPTY = 0
ENABLE = 1
DISABLE = 2
EDIT = 3
def __init__(self, session, timer):
Screen.__init__(self, session)
self.timer = timer
print "TimerSanityConflict"
self["timer1"] = TimerList(self.getTimerList(timer[0]))
self.list = []
self.list2 = []
count = 0
for x in timer:
if count != 0:
self.list.append((_("Conflicting timer") + " " + str(count), x))
self.list2.append((timer[count], False))
count += 1
if count == 1:
self.list.append((_("Channel not in services list")))
self["list"] = MenuList(self.list)
self["timer2"] = TimerList(self.list2)
self["key_red"] = Button("Edit")
self["key_green"] = Button(" ")
self["key_yellow"] = Button(" ")
self["key_blue"] = Button(" ")
self.key_green_choice = self.EMPTY
self.key_yellow_choice = self.EMPTY
self.key_blue_choice = self.EMPTY
self["actions"] = ActionMap(["OkCancelActions", "DirectionActions", "ShortcutActions", "TimerEditActions"],
{
"ok": self.leave_ok,
"cancel": self.leave_cancel,
"red": self.editTimer1,
"up": self.up,
"down": self.down
}, -1)
self.setTitle(_("Timer sanity error"))
self.onShown.append(self.updateState)
def getTimerList(self, timer):
return [(timer, False)]
def editTimer1(self):
self.session.openWithCallback(self.finishedEdit, TimerEntry, self["timer1"].getCurrent())
def editTimer2(self):
self.session.openWithCallback(self.finishedEdit, TimerEntry, self["timer2"].getCurrent())
def toggleNewTimer(self):
if self.timer[0].disabled:
self.timer[0].disabled = False
self.session.nav.RecordTimer.timeChanged(self.timer[0])
elif not self.timer[0].isRunning():
self.timer[0].disabled = True
self.session.nav.RecordTimer.timeChanged(self.timer[0])
self.finishedEdit((True, self.timer[0]))
def toggleTimer(self):
x = self["list"].getSelectedIndex() + 1 # the first is the new timer so we do +1 here
if self.timer[x].disabled:
self.timer[x].disabled = False
self.session.nav.RecordTimer.timeChanged(self.timer[x])
if not self.timer[0].isRunning():
self.timer[0].disabled = True
self.session.nav.RecordTimer.timeChanged(self.timer[0])
elif not self.timer[x].isRunning():
self.timer[x].disabled = True
self.session.nav.RecordTimer.timeChanged(self.timer[x])
if self.timer[x].disabled:
self.timer[0].disabled = False
self.session.nav.RecordTimer.timeChanged(self.timer[0])
self.finishedEdit((True, self.timer[0]))
def finishedEdit(self, answer):
self.leave_ok()
def leave_ok(self):
self.close((True, self.timer[0]))
def leave_cancel(self):
self.close((False, self.timer[0]))
def up(self):
self["list"].instance.moveSelection(self["list"].instance.moveUp)
self["timer2"].moveToIndex(self["list"].getSelectedIndex())
def down(self):
self["list"].instance.moveSelection(self["list"].instance.moveDown)
self["timer2"].moveToIndex(self["list"].getSelectedIndex())
def removeAction(self, descr):
actions = self["actions"].actions
if descr in actions:
del actions[descr]
def updateState(self):
if self.timer[0] is not None:
if self.timer[0].disabled and self.key_green_choice != self.ENABLE:
self["actions"].actions.update({"green":self.toggleTimer})
self["key_green"].setText(_("Enable"))
self.key_green_choice = self.ENABLE
elif self.timer[0].isRunning() and not self.timer[0].repeated and self.key_green_choice != self.EMPTY:
self.removeAction("green")
self["key_green"].setText(" ")
self.key_green_choice = self.EMPTY
elif (not self.timer[0].isRunning() or self.timer[0].repeated ) and self.key_green_choice != self.DISABLE:
self["actions"].actions.update({"green":self.toggleNewTimer})
self["key_green"].setText(_("Disable"))
self.key_green_choice = self.DISABLE
if len(self.timer) > 1:
x = self["list"].getSelectedIndex() + 1 # the first is the new timer so we do +1 here
if self.timer[x] is not None:
if self.key_yellow_choice == self.EMPTY:
self["actions"].actions.update({"yellow":self.editTimer2})
self["key_yellow"].setText(_("Edit"))
self.key_yellow_choice = self.EDIT
if self.timer[x].disabled and self.key_blue_choice != self.ENABLE:
self["actions"].actions.update({"blue":self.toggleTimer})
self["key_blue"].setText(_("Enable"))
self.key_blue_choice = self.ENABLE
elif self.timer[x].isRunning() and not self.timer[x].repeated and self.key_blue_choice != self.EMPTY:
self.removeAction("blue")
self["key_blue"].setText(" ")
self.key_blue_choice = self.EMPTY
elif (not self.timer[x].isRunning() or self.timer[x].repeated ) and self.key_blue_choice != self.DISABLE:
self["actions"].actions.update({"blue":self.toggleTimer})
self["key_blue"].setText(_("Disable"))
self.key_blue_choice = self.DISABLE
else:
#FIXME.... this doesnt hide the buttons self.... just the text
if self.key_yellow_choice != self.EMPTY:
self.removeAction("yellow")
self["key_yellow"].setText(" ")
self.key_yellow_choice = self.EMPTY
if self.key_blue_choice != self.EMPTY:
self.removeAction("blue")
self["key_blue"].setText(" ")
self.key_blue_choice = self.EMPTY
class TimerEditListSummary(Screen):
def __init__(self, session, parent):
Screen.__init__(self, session, parent = parent)
self["name"] = StaticText("")
self["service"] = StaticText("")
self["time"] = StaticText("")
self["duration"] = StaticText("")
self["state"] = StaticText("")
self.onShow.append(self.addWatcher)
self.onHide.append(self.removeWatcher)
def addWatcher(self):
self.parent.onChangedEntry.append(self.selectionChanged)
self.parent.updateState()
def removeWatcher(self):
self.parent.onChangedEntry.remove(self.selectionChanged)
def selectionChanged(self, name, time, duration, service, state):
self["name"].text = name
self["service"].text = service
self["time"].text = time
self["duration"].text = duration
self["state"].text = state
| gpl-2.0 | -7,330,570,064,914,841,000 | 8,064,267,257,821,168,000 | 31.753247 | 179 | 0.6875 | false |
polaris-gslb/polaris-core | tests/test-polaris-pdns.py | 2 | 1937 | #!/usr/bin/env python3
import subprocess
import sys
import time
import json
POLARIS_PDNS_FILE = '/opt/polaris/bin/polaris-pdns'
def pretty_json(s):
d = json.loads(s)
return json.dumps(d, indent=4, separators=(',', ': '))
class TestPolarisPDNS:
def __init__(self, polaris_pdns_file):
self.proc = subprocess.Popen([ polaris_pdns_file ],
stdin=subprocess.PIPE,
stdout=subprocess.PIPE)
def execute_query(self, query):
query += '\n'
self.proc.stdin.write(query.encode())
self.proc.stdin.flush()
output = self.proc.stdout.readline().decode()
return pretty_json(output)
def prepare_query(self, method, params):
q = {
'method': method,
'parameters': {
'qtype': params['qtype'],
'qname': params['qname'],
'remote': params['remote'],
'local': params['local'],
'real-remote': params['real-remote'],
'zone-id': params['zone-id']
}
}
return json.dumps(q)
if __name__ == '__main__':
t = TestPolarisPDNS(POLARIS_PDNS_FILE)
method = 'lookup'
params = {
'qtype': 'A',
'qname': 'www.example.com',
'remote': '10.1.1.21',
'local': '0.0.0.0',
'real-remote': '10.1.1.21/32',
'zone-id': -1
}
q = t.prepare_query(method, params)
print("query: ", pretty_json(q), "\n")
print("response: ", t.execute_query(q))
method = 'lookup'
params = {
'qtype': 'SOA',
'qname': 'www.example.com',
'remote': '10.1.1.21',
'local': '0.0.0.0',
'real-remote': '10.1.1.21/32',
'zone-id': -1
}
q = t.prepare_query(method, params)
print("query: ", pretty_json(q), "\n")
print("response: ", t.execute_query(q))
| bsd-3-clause | -3,377,897,696,496,218,600 | -4,856,843,464,550,433,000 | 24.486842 | 62 | 0.497161 | false |
daniel20162016/my-first | read_xml_all/calcul_matrix_compare_je_good_192matrix.py | 1 | 6357 | # -*- coding: utf-8 -*-
"""
Created on Mon Oct 31 15:45:22 2016
@author: wang
"""
#from matplotlib import pylab as plt
#from numpy import fft, fromstring, int16, linspace
#import wave
from read_wav_xml_good_1 import*
from matrix_24_2 import*
from max_matrix_norm import*
import numpy as np
# open a wave file
filename = 'francois_filon_pure_3.wav'
filename_1 ='francois_filon_pure_3.xml'
word ='je'
wave_signal_float,framerate, word_start_point, word_length_point, word_end_point= read_wav_xml_good_1(filename,filename_1,word)
#print 'word_start_point=',word_start_point
#print 'word_length_point=',word_length_point
#print 'word_end_point=',word_end_point
XJ_1 =wave_signal_float
t_step=1920;
t_entre_step=1440;
t_du_1_1 = int(word_start_point[0]);
t_du_1_2 = int(word_end_point[0]);
t_du_2_1 = int(word_start_point[1]);
t_du_2_2 = int(word_end_point[1]);
t_du_3_1 = int(word_start_point[2]);
t_du_3_2 = int(word_end_point[2]);
t_du_4_1 = int(word_start_point[3]);
t_du_4_2 = int(word_end_point[3]);
t_du_5_1 = int(word_start_point[4]);
t_du_5_2 = int(word_end_point[4]);
fs=framerate
#XJ_du_1 = wave_signal_float[(t_du_1_1-1):t_du_1_2];
#length_XJ_du_1 = int(word_length_point[0]+1);
#x1,y1,z1=matrix_24_2(XJ_du_1,fs)
#x1=max_matrix_norm(x1)
#==============================================================================
# this part is to calcul the first matrix
#==============================================================================
XJ_du_1_2 = XJ_1[(t_du_1_1-1):(t_du_1_1+t_step)];
x1_1,y1_1,z1_1=matrix_24_2(XJ_du_1_2 ,fs)
x1_1=max_matrix_norm(x1_1)
matrix_all_step_new_1 = np.zeros([192])
for i in range(0,24):
matrix_all_step_new_1[i]=x1_1[i]
#==============================================================================
# the other colonne is the all fft
#==============================================================================
for i in range(1,8):
XJ_du_1_total = XJ_1[(t_du_1_1+t_entre_step*(i)-1):(t_du_1_1+t_step+t_entre_step*(i) )];
x1_all,y1_all,z1_all=matrix_24_2(XJ_du_1_total,fs)
x1_all=max_matrix_norm(x1_all)
for j in range(0,24):
matrix_all_step_new_1[24*i+j]=x1_all[j]
#==============================================================================
# this part is to calcul the second matrix
#==============================================================================
for k in range (1,2):
t_start=t_du_2_1
XJ_du_1_2 = XJ_1[(t_start-1):(t_start+t_step)];
x1_1,y1_1,z1_1=matrix_24_2(XJ_du_1_2 ,fs)
x1_1=max_matrix_norm(x1_1)
matrix_all_step_new_2 = np.zeros([192])
for i in range(0,24):
matrix_all_step_new_2[i]=x1_1[i]
#==============================================================================
# the other colonne is the all fft
#==============================================================================
for i in range(1,8):
XJ_du_1_total = XJ_1[(t_start+t_entre_step*(i)-1):(t_start+t_step+t_entre_step*(i) )];
x1_all,y1_all,z1_all=matrix_24_2(XJ_du_1_total,fs)
x1_all=max_matrix_norm(x1_all)
for j in range(0,24):
matrix_all_step_new_2[24*i+j]=x1_all[j]
#==============================================================================
# this part is to calcul the 3 matrix
#==============================================================================
for k in range (1,2):
t_start=t_du_3_1
XJ_du_1_2 = XJ_1[(t_start-1):(t_start+t_step)];
x1_1,y1_1,z1_1=matrix_24_2(XJ_du_1_2 ,fs)
x1_1=max_matrix_norm(x1_1)
matrix_all_step_new_3 = np.zeros([192])
for i in range(0,24):
matrix_all_step_new_3[i]=x1_1[i]
#==============================================================================
# the other colonne is the all fft
#==============================================================================
for i in range(1,8):
XJ_du_1_total = XJ_1[(t_start+t_entre_step*(i)-1):(t_start+t_step+t_entre_step*(i) )];
x1_all,y1_all,z1_all=matrix_24_2(XJ_du_1_total,fs)
x1_all=max_matrix_norm(x1_all)
for j in range(0,24):
matrix_all_step_new_3[24*i+j]=x1_all[j]
#==============================================================================
# this part is to calcul the 4 matrix
#==============================================================================
for k in range (1,2):
t_start=t_du_4_1
XJ_du_1_2 = XJ_1[(t_start-1):(t_start+t_step)];
x1_1,y1_1,z1_1=matrix_24_2(XJ_du_1_2 ,fs)
x1_1=max_matrix_norm(x1_1)
matrix_all_step_new_4 = np.zeros([192])
for i in range(0,24):
matrix_all_step_new_4[i]=x1_1[i]
#==============================================================================
# the other colonne is the all fft
#==============================================================================
for i in range(1,8):
# print i
XJ_du_1_total = XJ_1[(t_start+t_entre_step*(i)-1):(t_start+t_step+t_entre_step*(i) )];
x1_all,y1_all,z1_all=matrix_24_2(XJ_du_1_total,fs)
x1_all=max_matrix_norm(x1_all)
for j in range(0,24):
matrix_all_step_new_4[24*i+j]=x1_all[j]
#print 'matrix_all_step_4=',matrix_all_step_4
#==============================================================================
# this part is to calcul the 5 matrix
#==============================================================================
for k in range (1,2):
t_start=t_du_5_1
XJ_du_1_2 = XJ_1[(t_start-1):(t_start+t_step)];
x1_1,y1_1,z1_1=matrix_24_2(XJ_du_1_2 ,fs)
x1_1=max_matrix_norm(x1_1)
matrix_all_step_new_5 = np.zeros([192])
for i in range(0,24):
matrix_all_step_new_5[i]=x1_1[i]
#==============================================================================
# the other colonne is the all fft
#==============================================================================
for i in range(1,8):
# print i
XJ_du_1_total = XJ_1[(t_start+t_entre_step*(i)-1):(t_start+t_step+t_entre_step*(i) )];
x1_all,y1_all,z1_all=matrix_24_2(XJ_du_1_total,fs)
x1_all=max_matrix_norm(x1_all)
for j in range(0,24):
matrix_all_step_new_5[24*i+j]=x1_all[j]
#print 'matrix_all_step_5=',matrix_all_step_5
np.savez('je_compare_192_matrix.npz',matrix_all_step_new_1,matrix_all_step_new_2,matrix_all_step_new_3,matrix_all_step_new_4,matrix_all_step_new_5)
| mit | -8,377,679,391,775,332,000 | 3,473,323,830,944,191,000 | 38.484472 | 147 | 0.46028 | false |
wholland/env | env.py | 1 | 6119 | #!/usr/bin/python
import argparse
import json
import shutil
import os
def copy_file(src, dest, backup):
success = True
if not backup is None:
(backup_folder, backup_file) = os.path.split(backup)
print("Creating backup file for " + dest + " at " + backup)
try:
if not os.path.exists(backup_folder):
os.makedirs(backup_folder)
shutil.copyfile(dest, backup)
except Exception as e:
print("Backup failed: " + str(e))
success = False
if success:
(dest_folder, dest_file) = os.path.split(dest)
print("Copy file " + src + " to " + dest)
try:
if not os.path.exists(dest_folder):
os.makedirs(dest_folder)
shutil.copyfile(src, dest)
except IOError as e:
print("Copy failed: " + str(e))
def copy_dir(src, dest, backup):
success = True
if not backup is None:
try:
print("Creating backup file for " + dest + " at " + backup)
rmtree(backup, ignore_errors=True)
shutil.copytree(dest, backup)
except IOError as e:
print("Backup failed: " + str(e))
success = False
if success:
try:
print("Copy directory " + src + " to " + dest)
shutil.copytree(src, dest)
except IOError as e:
print("Copy failed: " + str(e))
def push(args):
defs = json.load(open(os.path.expanduser(args.file)))
for definition in defs:
if definition["group"] in args.categories:
print("Pushing " + definition["name"]);
src = os.path.expanduser(os.path.join(args.source, definition["source"]))
dest = os.path.expanduser(os.path.join(args.target, definition["target"]))
backup = os.path.expanduser(os.path.join(args.backup, definition["target"]))
if definition["type"].lower() == "f":
# Copy a file
if args.unsafe:
if not args.wimp:
copy_file(src, dest, None)
else:
print("Would copy file. Src:" + src + " Dest:" + dest);
else:
if not args.wimp:
copy_file(src, dest, backup)
else:
print("Would copy file. Src:" + src + " Dest:" + dest + " Backup:" + backup);
elif definition["type"].lower() == "d":
# Copy a directory
if args.verbose:
print(definition["name"] + ": Pushing directory from " + src + " to " + dest)
if args.unsafe:
if not args.wimp:
copy_dir(src, dest, None)
else:
print("Would copy file. Src:" + src + " Dest:" + dest);
else:
if not args.wimp:
copy_dir(src, dest, backup)
else:
print("Would copy dir. Src:" + src + " Dest:" + dest + " Backup:" + backup);
else:
print(definition["name"] + ": Unknown type \""+definition["type"]+"\"")
def pull(args):
defs = json.load(open(os.path.expanduser(args.file)))
for definition in defs:
if definition["group"] in args.categories:
print("Pulling " + definition["name"]);
src = os.path.expanduser(os.path.join(args.target, definition["target"]))
dest = os.path.expanduser(os.path.join(args.source, definition["source"]))
if definition["type"].lower() == "f":
# Copy a file
if not args.wimp:
copy_file(src, dest, None)
else:
print("Would copy file. Src:" + src + " Dest:" + dest);
elif definition["type"].lower() == "d":
# Copy a directory
if not args.wimp:
copy_dir(src, dest, None)
else:
print("Would copy directory. Src:" + src + " Dest:" + dest);
else:
print(definition["name"] + ": Unknown type \""+definition["type"]+"\"")
def revert(args):
defs = json.load(open(os.path.expanduser(args.file)))
for definition in defs:
if definition["group"] in args.categories:
src = os.path.expanduser(os.path.join(args.backup, definition["target"]))
dest = os.path.expanduser(os.path.join(args.target, definition["target"]))
if definition["type"].lower() == "f":
# Copy a file
if not args.wimp:
copy_file(src, dest, None)
else:
print("Would copy file. Src:" + src + " Dest:" + dest);
elif definition["type"].lower() == "d":
# Copy a directory
if not args.wimp:
copy_dir(src, dest, None)
else:
print("Would copy directory. Src:" + src + " Dest:" + dest);
else:
print(definition["name"] + ": Unknown type \""+definition["type"]+"\"")
def main():
default_defs = "~/env/env.def"
default_source = "~/env/"
default_target = "~/"
default_backup = "~/.backup/env/"
parser = argparse.ArgumentParser()
parser.add_argument("-v", "--verbose", action="store_true", help="Increase Verbosity")
parser.add_argument("-f", "--file", default=default_defs, help="Definition File to use")
parser.add_argument("-s", "--source", default=default_source, help="Override source root")
parser.add_argument("-t", "--target", default=default_target, help="Override target root")
parser.add_argument("-w", "--wimp", action="store_true", help="Don't actually make any changes (implies -v)")
subparsers = parser.add_subparsers()
parser_push = subparsers.add_parser("push", help="Push configs into environment")
parser_push.add_argument("-u", "--unsafe", action="store_true", help="No backups Created")
parser_push.add_argument("-a", "--All", action="store_true", help="Cleanup Backups")
parser_push.add_argument("-b", "--backup", default=default_backup, help="Override backup root")
parser_push.add_argument("categories", nargs=argparse.REMAINDER)
parser_push.set_defaults(func=push)
parser_pull = subparsers.add_parser("pull", help="Pull configs from environment")
parser_pull.add_argument("-a", "--All", action="store_true", help="Cleanup Backups")
parser_pull.add_argument("categories", nargs=argparse.REMAINDER)
parser_pull.set_defaults(func=pull)
parser_revert = subparsers.add_parser("revert", help="Revert configs from backups")
parser_revert.add_argument("-c", "--cleanup", action="store_true", help="Cleanup Backups")
parser_revert.add_argument("-a", "--All", action="store_true", help="Cleanup Backups")
parser_revert.add_argument("-b", "--backup", default=default_backup, help="Override backup root")
parser_revert.add_argument("categories", nargs=argparse.REMAINDER)
parser_revert.set_defaults(func=revert)
args = parser.parse_args()
if args.wimp:
args.verbose = True
args.func(args)
if __name__ == "__main__":
main();
| mit | 8,504,266,744,384,629,000 | -5,111,651,464,616,246,000 | 34.575581 | 110 | 0.648962 | false |
jailuthra/misc | python/quicksort.py | 1 | 1066 | import sys
import random
comparisons = 0
def main():
global comparisons
with open(sys.argv[1], 'r') as f:
arr = [int(x) for x in f.read().split()]
quicksort(arr, 0, len(arr)-1)
# print(arr)
print(comparisons)
def getPivot(arr, l, r):
first = arr[l]
mid = arr[(l+r)//2]
last = arr[r]
if first <= mid <= last or last <= mid <= first:
return (l+r)//2
elif mid <= first <= last or last <= first <= mid:
return l
else:
return r
def partition(arr, l, r):
k = getPivot(arr, l, r)
k = random.randint(l, r)
pivot = arr[k]
arr[k], arr[l] = arr[l], arr[k]
i = l+1
for j in range(l+1, r+1):
if arr[j] < pivot:
arr[j], arr[i] = arr[i], arr[j]
i += 1
arr[l], arr[i-1] = arr[i-1], arr[l]
return i-1
def quicksort(arr, l, r):
if r - l < 0:
return
global comparisons
comparisons += r - l
p = partition(arr, l, r)
quicksort(arr, l, p-1)
quicksort(arr, p+1, r)
if __name__ == '__main__':
main()
| mit | -2,577,473,741,709,697,000 | 5,715,162,682,362,268,000 | 21.208333 | 54 | 0.5 | false |
kosz85/django | django/conf/locale/nn/formats.py | 65 | 1743 | # This file is distributed under the same license as the Django package.
#
# The *_FORMAT strings use the Django date format syntax,
# see http://docs.djangoproject.com/en/dev/ref/templates/builtins/#date
DATE_FORMAT = 'j. F Y'
TIME_FORMAT = 'H:i'
DATETIME_FORMAT = 'j. F Y H:i'
YEAR_MONTH_FORMAT = 'F Y'
MONTH_DAY_FORMAT = 'j. F'
SHORT_DATE_FORMAT = 'd.m.Y'
SHORT_DATETIME_FORMAT = 'd.m.Y H:i'
FIRST_DAY_OF_WEEK = 1 # Monday
# The *_INPUT_FORMATS strings use the Python strftime format syntax,
# see http://docs.python.org/library/datetime.html#strftime-strptime-behavior
# Kept ISO formats as they are in first position
DATE_INPUT_FORMATS = [
'%Y-%m-%d', '%d.%m.%Y', '%d.%m.%y', # '2006-10-25', '25.10.2006', '25.10.06'
# '%d. %b %Y', '%d %b %Y', # '25. okt 2006', '25 okt 2006'
# '%d. %b. %Y', '%d %b. %Y', # '25. okt. 2006', '25 okt. 2006'
# '%d. %B %Y', '%d %B %Y', # '25. oktober 2006', '25 oktober 2006'
]
DATETIME_INPUT_FORMATS = [
'%Y-%m-%d %H:%M:%S', # '2006-10-25 14:30:59'
'%Y-%m-%d %H:%M:%S.%f', # '2006-10-25 14:30:59.000200'
'%Y-%m-%d %H:%M', # '2006-10-25 14:30'
'%Y-%m-%d', # '2006-10-25'
'%Y-%m-%d', # '2006-10-25'
'%d.%m.%Y %H:%M:%S', # '25.10.2006 14:30:59'
'%d.%m.%Y %H:%M:%S.%f', # '25.10.2006 14:30:59.000200'
'%d.%m.%Y %H:%M', # '25.10.2006 14:30'
'%d.%m.%Y', # '25.10.2006'
'%d.%m.%y %H:%M:%S', # '25.10.06 14:30:59'
'%d.%m.%y %H:%M:%S.%f', # '25.10.06 14:30:59.000200'
'%d.%m.%y %H:%M', # '25.10.06 14:30'
'%d.%m.%y', # '25.10.06'
]
DECIMAL_SEPARATOR = ','
THOUSAND_SEPARATOR = '\xa0' # non-breaking space
NUMBER_GROUPING = 3
| bsd-3-clause | 1,218,534,502,296,249,600 | -1,542,469,435,405,297,400 | 42.575 | 81 | 0.513483 | false |
rubenvereecken/pokemongo-api | POGOProtos/Data/Battle/BattleParticipant_pb2.py | 16 | 4760 | # Generated by the protocol buffer compiler. DO NOT EDIT!
# source: POGOProtos/Data/Battle/BattleParticipant.proto
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
from google.protobuf import descriptor_pb2
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
from POGOProtos.Data.Battle import BattlePokemonInfo_pb2 as POGOProtos_dot_Data_dot_Battle_dot_BattlePokemonInfo__pb2
from POGOProtos.Data.Player import PlayerPublicProfile_pb2 as POGOProtos_dot_Data_dot_Player_dot_PlayerPublicProfile__pb2
DESCRIPTOR = _descriptor.FileDescriptor(
name='POGOProtos/Data/Battle/BattleParticipant.proto',
package='POGOProtos.Data.Battle',
syntax='proto3',
serialized_pb=_b('\n.POGOProtos/Data/Battle/BattleParticipant.proto\x12\x16POGOProtos.Data.Battle\x1a.POGOProtos/Data/Battle/BattlePokemonInfo.proto\x1a\x30POGOProtos/Data/Player/PlayerPublicProfile.proto\"\xac\x02\n\x11\x42\x61ttleParticipant\x12\x41\n\x0e\x61\x63tive_pokemon\x18\x01 \x01(\x0b\x32).POGOProtos.Data.Battle.BattlePokemonInfo\x12K\n\x16trainer_public_profile\x18\x02 \x01(\x0b\x32+.POGOProtos.Data.Player.PlayerPublicProfile\x12\x42\n\x0freverse_pokemon\x18\x03 \x03(\x0b\x32).POGOProtos.Data.Battle.BattlePokemonInfo\x12\x43\n\x10\x64\x65\x66\x65\x61ted_pokemon\x18\x04 \x03(\x0b\x32).POGOProtos.Data.Battle.BattlePokemonInfob\x06proto3')
,
dependencies=[POGOProtos_dot_Data_dot_Battle_dot_BattlePokemonInfo__pb2.DESCRIPTOR,POGOProtos_dot_Data_dot_Player_dot_PlayerPublicProfile__pb2.DESCRIPTOR,])
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
_BATTLEPARTICIPANT = _descriptor.Descriptor(
name='BattleParticipant',
full_name='POGOProtos.Data.Battle.BattleParticipant',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='active_pokemon', full_name='POGOProtos.Data.Battle.BattleParticipant.active_pokemon', index=0,
number=1, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='trainer_public_profile', full_name='POGOProtos.Data.Battle.BattleParticipant.trainer_public_profile', index=1,
number=2, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='reverse_pokemon', full_name='POGOProtos.Data.Battle.BattleParticipant.reverse_pokemon', index=2,
number=3, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='defeated_pokemon', full_name='POGOProtos.Data.Battle.BattleParticipant.defeated_pokemon', index=3,
number=4, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=173,
serialized_end=473,
)
_BATTLEPARTICIPANT.fields_by_name['active_pokemon'].message_type = POGOProtos_dot_Data_dot_Battle_dot_BattlePokemonInfo__pb2._BATTLEPOKEMONINFO
_BATTLEPARTICIPANT.fields_by_name['trainer_public_profile'].message_type = POGOProtos_dot_Data_dot_Player_dot_PlayerPublicProfile__pb2._PLAYERPUBLICPROFILE
_BATTLEPARTICIPANT.fields_by_name['reverse_pokemon'].message_type = POGOProtos_dot_Data_dot_Battle_dot_BattlePokemonInfo__pb2._BATTLEPOKEMONINFO
_BATTLEPARTICIPANT.fields_by_name['defeated_pokemon'].message_type = POGOProtos_dot_Data_dot_Battle_dot_BattlePokemonInfo__pb2._BATTLEPOKEMONINFO
DESCRIPTOR.message_types_by_name['BattleParticipant'] = _BATTLEPARTICIPANT
BattleParticipant = _reflection.GeneratedProtocolMessageType('BattleParticipant', (_message.Message,), dict(
DESCRIPTOR = _BATTLEPARTICIPANT,
__module__ = 'POGOProtos.Data.Battle.BattleParticipant_pb2'
# @@protoc_insertion_point(class_scope:POGOProtos.Data.Battle.BattleParticipant)
))
_sym_db.RegisterMessage(BattleParticipant)
# @@protoc_insertion_point(module_scope)
| mit | -6,109,187,427,268,664,000 | -4,731,600,799,200,936,000 | 48.072165 | 657 | 0.765546 | false |
sarahfo/oppia | core/domain/dependency_registry_test.py | 29 | 4131 | # coding: utf-8
#
# Copyright 2014 The Oppia Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for JavaScript library dependencies."""
__author__ = 'Sean Lip'
from core.domain import dependency_registry
from core.domain import exp_services
from core.domain import interaction_registry
from core.tests import test_utils
import feconf
class DependencyRegistryTests(test_utils.GenericTestBase):
"""Tests for the dependency registry."""
def test_get_dependency_html(self):
self.assertIn(
'jsrepl',
dependency_registry.Registry.get_dependency_html('jsrepl'))
with self.assertRaises(IOError):
dependency_registry.Registry.get_dependency_html('a')
class DependencyControllerTests(test_utils.GenericTestBase):
"""Tests for dependency loading on user-facing pages."""
def test_no_dependencies_in_non_exploration_pages(self):
response = self.testapp.get(feconf.GALLERY_URL)
self.assertEqual(response.status_int, 200)
response.mustcontain(no=['jsrepl'])
response = self.testapp.get('/about')
self.assertEqual(response.status_int, 200)
response.mustcontain(no=['jsrepl'])
def test_dependencies_loaded_in_exploration_editor(self):
exp_services.load_demo('0')
# Register and login as an editor.
self.signup(self.EDITOR_EMAIL, self.EDITOR_USERNAME)
self.login(self.EDITOR_EMAIL)
# Verify that the exploration does not have a jsrepl dependency.
exploration = exp_services.get_exploration_by_id('0')
interaction_ids = exploration.get_interaction_ids()
all_dependency_ids = (
interaction_registry.Registry.get_deduplicated_dependency_ids(
interaction_ids))
self.assertNotIn('jsrepl', all_dependency_ids)
# However, jsrepl is loaded in the exploration editor anyway, since
# all dependencies are loaded in the exploration editor.
response = self.testapp.get('/create/0')
self.assertEqual(response.status_int, 200)
response.mustcontain('jsrepl')
self.logout()
def test_dependency_does_not_load_in_exploration_not_containing_it(self):
EXP_ID = '0'
exp_services.load_demo(EXP_ID)
# Verify that exploration 0 does not have a jsrepl dependency.
exploration = exp_services.get_exploration_by_id(EXP_ID)
interaction_ids = exploration.get_interaction_ids()
all_dependency_ids = (
interaction_registry.Registry.get_deduplicated_dependency_ids(
interaction_ids))
self.assertNotIn('jsrepl', all_dependency_ids)
# Thus, jsrepl is not loaded in the exploration reader.
response = self.testapp.get('/explore/%s' % EXP_ID)
self.assertEqual(response.status_int, 200)
response.mustcontain(no=['jsrepl'])
def test_dependency_loads_in_exploration_containing_it(self):
EXP_ID = '1'
exp_services.load_demo(EXP_ID)
# Verify that exploration 1 has a jsrepl dependency.
exploration = exp_services.get_exploration_by_id(EXP_ID)
interaction_ids = exploration.get_interaction_ids()
all_dependency_ids = (
interaction_registry.Registry.get_deduplicated_dependency_ids(
interaction_ids))
self.assertIn('jsrepl', all_dependency_ids)
# Thus, jsrepl is loaded in the exploration reader.
response = self.testapp.get('/explore/%s' % EXP_ID)
self.assertEqual(response.status_int, 200)
response.mustcontain('jsrepl')
| apache-2.0 | 2,296,181,748,860,828,000 | 4,693,708,715,953,321,000 | 36.554545 | 77 | 0.681917 | false |
jobscore/sync-engine | migrations/env.py | 3 | 2894 | from __future__ import with_statement
from alembic import context
from logging.config import fileConfig
# Interpret the config file for Python logging.
# This line sets up loggers basically.
fileConfig(context.config.config_file_name)
# add your model's MetaData object here
# for 'autogenerate' support
# from myapp import mymodel
from inbox.models.base import MailSyncBase
target_metadata = MailSyncBase.metadata
from inbox.config import config
from inbox.ignition import EngineManager
# Alembic configuration is confusing. Here we look for a shard id both as a
# "main option" (where it's programmatically set by bin/create-db), and in the
# "x" argument, which is the primary facility for passing additional
# command-line args to alembic. So you would do e.g.
#
# alembic -x shard_id=1 upgrade +1
#
# to target shard 1 for the migration.
config_shard_id = context.config.get_main_option('shard_id')
x_shard_id = context.get_x_argument(as_dictionary=True).get(
'shard_id')
if config_shard_id is not None:
shard_id = int(config_shard_id)
elif x_shard_id is not None:
shard_id = int(x_shard_id)
else:
raise ValueError('No shard_id is configured for migration; '
'run `alembic -x shard_id=<target shard id> upgrade +1`')
def run_migrations_offline():
"""Run migrations in 'offline' mode.
This configures the context with just a URL
and not an Engine, though an Engine is acceptable
here as well. By skipping the Engine creation
we don't even need a DBAPI to be available.
Calls to context.execute() here emit the given string to the
script output.
"""
engine_manager = EngineManager(config.get_required('DATABASE_HOSTS'),
config.get_required('DATABASE_USERS'),
include_disabled=True)
engine = engine_manager.engines[shard_id]
context.configure(engine=engine, url=engine.url)
with context.begin_transaction():
context.run_migrations()
def run_migrations_online():
"""Run migrations in 'online' mode.
In this scenario we need to create an Engine
and associate a connection with the context.
"""
engine_manager = EngineManager(config.get_required('DATABASE_HOSTS'),
config.get_required('DATABASE_USERS'),
include_disabled=True)
engine = engine_manager.engines[shard_id]
connection = engine.connect()
# Set sane lock wait timeout value.
connection.execute('SET @@lock_wait_timeout=15')
context.configure(
connection=connection,
target_metadata=target_metadata
)
try:
with context.begin_transaction():
context.run_migrations()
finally:
connection.close()
if context.is_offline_mode():
run_migrations_offline()
else:
run_migrations_online()
| agpl-3.0 | -7,631,318,871,762,852,000 | -8,421,367,250,939,299,000 | 30.456522 | 78 | 0.678645 | false |
nens/threedi-qgis-plugin | tests/test_geo_utils.py | 1 | 1446 | """
Test geo utils.
"""
from qgis.core import QgsCoordinateTransform
from ThreeDiToolbox.tests.utilities import ensure_qgis_app_is_initialized
from ThreeDiToolbox.utils.geo_utils import get_coord_transformation_instance
import pytest
@pytest.fixture
def rdnew_to_wgs84():
ensure_qgis_app_is_initialized()
src_epsg, dest_epsg = 28992, 4326
transformer = get_coord_transformation_instance(src_epsg, dest_epsg)
return transformer
@pytest.fixture
def wgs84_to_rdnew():
ensure_qgis_app_is_initialized()
src_epsg, dest_epsg = 4326, 28992
transformer = get_coord_transformation_instance(src_epsg, dest_epsg)
return transformer
def test_get_coord_transformation_instance(rdnew_to_wgs84, wgs84_to_rdnew):
assert isinstance(rdnew_to_wgs84, QgsCoordinateTransform)
assert isinstance(wgs84_to_rdnew, QgsCoordinateTransform)
def test_get_coord_transformation_epsg(rdnew_to_wgs84):
assert rdnew_to_wgs84.sourceCrs().isValid()
assert rdnew_to_wgs84.sourceCrs().authid() == "EPSG:28992"
assert rdnew_to_wgs84.destinationCrs().isValid()
assert rdnew_to_wgs84.destinationCrs().authid() == "EPSG:4326"
def test_get_coord_transformation_epsg_reverse(wgs84_to_rdnew):
assert wgs84_to_rdnew.sourceCrs().isValid()
assert wgs84_to_rdnew.sourceCrs().authid() == "EPSG:4326"
assert wgs84_to_rdnew.destinationCrs().isValid()
assert wgs84_to_rdnew.destinationCrs().authid() == "EPSG:28992"
| gpl-3.0 | 6,500,953,183,080,428,000 | 274,233,383,203,015,330 | 31.863636 | 76 | 0.744813 | false |
xfournet/intellij-community | python/lib/Lib/unicodedata.py | 69 | 6437 | from bisect import bisect_left
import operator
import java.lang.Character
# XXX - this is intended as a stopgap measure until 2.5.1, which will have a Java implementation
# requires java 6 for `normalize` function
# only has one version of the database
# does not normalized ideographs
_codepoints = {}
_eaw = {}
_names = {}
_segments = []
_eaw_segments = []
Nonesuch = object()
def get_int(col):
try:
return int(col)
except ValueError:
return None
def get_yn(col):
if col == 'Y': return 1
else: return 0
def get_numeric(col):
try:
return float(col)
except ValueError:
try:
a, b = col.split('/')
return float(a)/float(b)
except:
return None
def init_unicodedata(data):
for row in data:
cols = row.split(';')
codepoint = int(cols[0], 16)
name = cols[1]
if name == '<CJK Ideograph, Last>':
lookup_name = 'CJK UNIFIED IDEOGRAPH'
else:
lookup_name = name
data = (
cols[2],
get_int(cols[3]),
cols[4],
cols[5],
get_int(cols[6]),
get_int(cols[7]),
get_numeric(cols[8]),
get_yn(cols[9]),
lookup_name,
)
if name.find('First') >= 0:
start = codepoint
elif name.find('Last') >= 0:
_segments.append((start, (start, codepoint), data))
else:
_names[name] = unichr(codepoint)
_codepoints[codepoint] = data
def init_east_asian_width(data):
for row in data:
if row.startswith('#'):
continue
row = row.partition('#')[0]
cols = row.split(';')
if len(cols) < 2:
continue
cr = cols[0].split('..')
width = cols[1].rstrip()
if len(cr) == 1:
codepoint = int(cr[0], 16)
_eaw[codepoint] = width
else:
start = int(cr[0], 16)
end = int(cr[1], 16)
_eaw_segments.append((start, (start, end), width))
# xxx - need to normalize the segments, so
# <CJK Ideograph, Last> ==> CJK UNIFIED IDEOGRAPH;
# may need to do some sort of analysis against CPython for the normalization!
def name(unichr, default=None):
codepoint = get_codepoint(unichr, "name")
v = _codepoints.get(codepoint, None)
if v is None:
v = check_segments(codepoint, _segments)
if v is not None:
return "%s-%X" % (v[8], codepoint)
if v is None:
if default is not Nonesuch:
return default
raise ValueError()
return v[8]
# xxx - also need to add logic here so that if it's CJK UNIFIED
# IDEOGRAPH-8000, we go against the segment to verify the prefix
def lookup(name):
return _names[name]
def check_segments(codepoint, segments):
i = bisect_left(segments, (codepoint,))
if i < len(segments):
segment = segments[i - 1]
if codepoint <= segment[1][1]:
return segment[2]
return None
def get_codepoint(unichr, fn=None):
if not(isinstance(unichr, unicode)):
raise TypeError(fn, "() argument 1 must be unicode, not " + type(unichr))
if len(unichr) > 1 or len(unichr) == 0:
raise TypeError("need a single Unicode character as parameter")
return ord(unichr)
def get_eaw(unichr, default, fn):
codepoint = get_codepoint(unichr, fn)
v = _eaw.get(codepoint, None)
if v is None:
v = check_segments(codepoint, _eaw_segments)
if v is None:
if default is not Nonesuch:
return default
raise ValueError()
return v
def get(unichr, default, fn, getter):
codepoint = get_codepoint(unichr, fn)
data = _codepoints.get(codepoint, None)
if data is None:
data = check_segments(codepoint, _segments)
if data is None:
if default is not Nonesuch:
return default
raise ValueError()
v = getter(data)
if v is None:
if default is not Nonesuch:
return default
raise ValueError()
else:
return v
category_getter = operator.itemgetter(0)
combining_getter = operator.itemgetter(1)
bidirectional_getter = operator.itemgetter(2)
decomposition_getter = operator.itemgetter(3)
decimal_getter = operator.itemgetter(4)
digit_getter = operator.itemgetter(5)
numeric_getter = operator.itemgetter(6)
mirrored_getter = operator.itemgetter(7)
def decimal(unichr, default=Nonesuch):
return get(unichr, default, 'decimal', decimal_getter)
def decomposition(unichr, default=''):
return get(unichr, default, 'decomposition', decomposition_getter)
def digit(unichr, default=Nonesuch):
return get(unichr, default, 'digit', digit_getter)
def numeric(unichr, default=Nonesuch):
return get(unichr, default, 'numeric', numeric_getter)
def category(unichr):
return get(unichr, 'Cn', 'catgegory', category_getter)
def bidirectional(unichr):
return get(unichr, '', 'bidirectional', bidirectional_getter)
def combining(unichr):
return get(unichr, 0, 'combining', combining_getter)
def mirrored(unichr):
return get(unichr, 0, 'mirrored', mirrored_getter)
def east_asian_width(unichr):
return get_eaw(unichr, 'N', 'east_asian_width')
def jymirrored(unichr):
return java.lang.Character.isMirrored(get_codepoint(unichr, 'mirrored'))
try:
from java.text import Normalizer
_forms = {
'NFC': Normalizer.Form.NFC,
'NFKC': Normalizer.Form.NFKC,
'NFD': Normalizer.Form.NFD,
'NFKD': Normalizer.Form.NFKD
}
def normalize(form, unistr):
"""
Return the normal form 'form' for the Unicode string unistr. Valid
values for form are 'NFC', 'NFKC', 'NFD', and 'NFKD'.
"""
try:
normalizer_form = _forms[form]
except KeyError:
raise ValueError('invalid normalization form')
return Normalizer.normalize(unistr, normalizer_form)
except ImportError:
pass
def init():
import pkgutil
import os.path
import StringIO
import sys
my_path = os.path.dirname(__file__)
loader = pkgutil.get_loader('unicodedata')
init_unicodedata(StringIO.StringIO(loader.get_data(os.path.join(my_path, 'UnicodeData.txt'))))
init_east_asian_width(StringIO.StringIO(loader.get_data(os.path.join(my_path, 'EastAsianWidth.txt'))))
init()
| apache-2.0 | -8,670,031,607,505,170,000 | 8,767,752,780,109,545,000 | 27.10917 | 106 | 0.607426 | false |
keithroe/vtkoptix | ThirdParty/Twisted/twisted/test/test_ident.py | 41 | 6029 | # Copyright (c) Twisted Matrix Laboratories.
# See LICENSE for details.
"""
Test cases for twisted.protocols.ident module.
"""
import struct
from twisted.protocols import ident
from twisted.python import failure
from twisted.internet import error
from twisted.internet import defer
from twisted.trial import unittest
from twisted.test.proto_helpers import StringTransport
class ClassParserTestCase(unittest.TestCase):
"""
Test parsing of ident responses.
"""
def setUp(self):
"""
Create a ident client used in tests.
"""
self.client = ident.IdentClient()
def test_indentError(self):
"""
'UNKNOWN-ERROR' error should map to the L{ident.IdentError} exception.
"""
d = defer.Deferred()
self.client.queries.append((d, 123, 456))
self.client.lineReceived('123, 456 : ERROR : UNKNOWN-ERROR')
return self.assertFailure(d, ident.IdentError)
def test_noUSerError(self):
"""
'NO-USER' error should map to the L{ident.NoUser} exception.
"""
d = defer.Deferred()
self.client.queries.append((d, 234, 456))
self.client.lineReceived('234, 456 : ERROR : NO-USER')
return self.assertFailure(d, ident.NoUser)
def test_invalidPortError(self):
"""
'INVALID-PORT' error should map to the L{ident.InvalidPort} exception.
"""
d = defer.Deferred()
self.client.queries.append((d, 345, 567))
self.client.lineReceived('345, 567 : ERROR : INVALID-PORT')
return self.assertFailure(d, ident.InvalidPort)
def test_hiddenUserError(self):
"""
'HIDDEN-USER' error should map to the L{ident.HiddenUser} exception.
"""
d = defer.Deferred()
self.client.queries.append((d, 567, 789))
self.client.lineReceived('567, 789 : ERROR : HIDDEN-USER')
return self.assertFailure(d, ident.HiddenUser)
def test_lostConnection(self):
"""
A pending query which failed because of a ConnectionLost should
receive an L{ident.IdentError}.
"""
d = defer.Deferred()
self.client.queries.append((d, 765, 432))
self.client.connectionLost(failure.Failure(error.ConnectionLost()))
return self.assertFailure(d, ident.IdentError)
class TestIdentServer(ident.IdentServer):
def lookup(self, serverAddress, clientAddress):
return self.resultValue
class TestErrorIdentServer(ident.IdentServer):
def lookup(self, serverAddress, clientAddress):
raise self.exceptionType()
class NewException(RuntimeError):
pass
class ServerParserTestCase(unittest.TestCase):
def testErrors(self):
p = TestErrorIdentServer()
p.makeConnection(StringTransport())
L = []
p.sendLine = L.append
p.exceptionType = ident.IdentError
p.lineReceived('123, 345')
self.assertEqual(L[0], '123, 345 : ERROR : UNKNOWN-ERROR')
p.exceptionType = ident.NoUser
p.lineReceived('432, 210')
self.assertEqual(L[1], '432, 210 : ERROR : NO-USER')
p.exceptionType = ident.InvalidPort
p.lineReceived('987, 654')
self.assertEqual(L[2], '987, 654 : ERROR : INVALID-PORT')
p.exceptionType = ident.HiddenUser
p.lineReceived('756, 827')
self.assertEqual(L[3], '756, 827 : ERROR : HIDDEN-USER')
p.exceptionType = NewException
p.lineReceived('987, 789')
self.assertEqual(L[4], '987, 789 : ERROR : UNKNOWN-ERROR')
errs = self.flushLoggedErrors(NewException)
self.assertEqual(len(errs), 1)
for port in -1, 0, 65536, 65537:
del L[:]
p.lineReceived('%d, 5' % (port,))
p.lineReceived('5, %d' % (port,))
self.assertEqual(
L, ['%d, 5 : ERROR : INVALID-PORT' % (port,),
'5, %d : ERROR : INVALID-PORT' % (port,)])
def testSuccess(self):
p = TestIdentServer()
p.makeConnection(StringTransport())
L = []
p.sendLine = L.append
p.resultValue = ('SYS', 'USER')
p.lineReceived('123, 456')
self.assertEqual(L[0], '123, 456 : USERID : SYS : USER')
if struct.pack('=L', 1)[0] == '\x01':
_addr1 = '0100007F'
_addr2 = '04030201'
else:
_addr1 = '7F000001'
_addr2 = '01020304'
class ProcMixinTestCase(unittest.TestCase):
line = ('4: %s:0019 %s:02FA 0A 00000000:00000000 '
'00:00000000 00000000 0 0 10927 1 f72a5b80 '
'3000 0 0 2 -1') % (_addr1, _addr2)
def testDottedQuadFromHexString(self):
p = ident.ProcServerMixin()
self.assertEqual(p.dottedQuadFromHexString(_addr1), '127.0.0.1')
def testUnpackAddress(self):
p = ident.ProcServerMixin()
self.assertEqual(p.unpackAddress(_addr1 + ':0277'),
('127.0.0.1', 631))
def testLineParser(self):
p = ident.ProcServerMixin()
self.assertEqual(
p.parseLine(self.line),
(('127.0.0.1', 25), ('1.2.3.4', 762), 0))
def testExistingAddress(self):
username = []
p = ident.ProcServerMixin()
p.entries = lambda: iter([self.line])
p.getUsername = lambda uid: (username.append(uid), 'root')[1]
self.assertEqual(
p.lookup(('127.0.0.1', 25), ('1.2.3.4', 762)),
(p.SYSTEM_NAME, 'root'))
self.assertEqual(username, [0])
def testNonExistingAddress(self):
p = ident.ProcServerMixin()
p.entries = lambda: iter([self.line])
self.assertRaises(ident.NoUser, p.lookup, ('127.0.0.1', 26),
('1.2.3.4', 762))
self.assertRaises(ident.NoUser, p.lookup, ('127.0.0.1', 25),
('1.2.3.5', 762))
self.assertRaises(ident.NoUser, p.lookup, ('127.0.0.1', 25),
('1.2.3.4', 763))
| bsd-3-clause | -7,544,261,003,172,469,000 | -7,465,978,264,401,543,000 | 30.07732 | 78 | 0.583845 | false |
ar7z1/ansible | lib/ansible/modules/messaging/rabbitmq_policy.py | 16 | 4535 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# (c) 2013, John Dewey <john@dewey.ws>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: rabbitmq_policy
short_description: Manage the state of policies in RabbitMQ.
description:
- Manage the state of a policy in RabbitMQ.
version_added: "1.5"
author: "John Dewey (@retr0h)"
options:
name:
description:
- The name of the policy to manage.
required: true
vhost:
description:
- The name of the vhost to apply to.
default: /
apply_to:
description:
- What the policy applies to. Requires RabbitMQ 3.2.0 or later.
default: all
choices: [all, exchanges, queues]
version_added: "2.1"
pattern:
description:
- A regex of queues to apply the policy to.
required: true
tags:
description:
- A dict or string describing the policy.
required: true
priority:
description:
- The priority of the policy.
default: 0
node:
description:
- Erlang node name of the rabbit we wish to configure.
default: rabbit
state:
description:
- The state of the policy.
default: present
choices: [present, absent]
'''
EXAMPLES = '''
- name: ensure the default vhost contains the HA policy via a dict
rabbitmq_policy:
name: HA
pattern: .*
args:
tags:
ha-mode: all
- name: ensure the default vhost contains the HA policy
rabbitmq_policy:
name: HA
pattern: .*
tags:
ha-mode: all
'''
import json
from ansible.module_utils.basic import AnsibleModule
class RabbitMqPolicy(object):
def __init__(self, module, name):
self._module = module
self._name = name
self._vhost = module.params['vhost']
self._pattern = module.params['pattern']
self._apply_to = module.params['apply_to']
self._tags = module.params['tags']
self._priority = module.params['priority']
self._node = module.params['node']
self._rabbitmqctl = module.get_bin_path('rabbitmqctl', True)
def _exec(self, args, run_in_check_mode=False):
if not self._module.check_mode or (self._module.check_mode and run_in_check_mode):
cmd = [self._rabbitmqctl, '-q', '-n', self._node]
args.insert(1, '-p')
args.insert(2, self._vhost)
rc, out, err = self._module.run_command(cmd + args, check_rc=True)
return out.splitlines()
return list()
def list(self):
policies = self._exec(['list_policies'], True)
for policy in policies:
if not policy:
continue
policy_name = policy.split('\t')[1]
if policy_name == self._name:
return True
return False
def set(self):
args = ['set_policy']
args.append(self._name)
args.append(self._pattern)
args.append(json.dumps(self._tags))
args.append('--priority')
args.append(self._priority)
if self._apply_to != 'all':
args.append('--apply-to')
args.append(self._apply_to)
return self._exec(args)
def clear(self):
return self._exec(['clear_policy', self._name])
def main():
arg_spec = dict(
name=dict(required=True),
vhost=dict(default='/'),
pattern=dict(required=True),
apply_to=dict(default='all', choices=['all', 'exchanges', 'queues']),
tags=dict(type='dict', required=True),
priority=dict(default='0'),
node=dict(default='rabbit'),
state=dict(default='present', choices=['present', 'absent']),
)
module = AnsibleModule(
argument_spec=arg_spec,
supports_check_mode=True
)
name = module.params['name']
state = module.params['state']
rabbitmq_policy = RabbitMqPolicy(module, name)
result = dict(changed=False, name=name, state=state)
if rabbitmq_policy.list():
if state == 'absent':
rabbitmq_policy.clear()
result['changed'] = True
else:
result['changed'] = False
elif state == 'present':
rabbitmq_policy.set()
result['changed'] = True
module.exit_json(**result)
if __name__ == '__main__':
main()
| gpl-3.0 | -6,817,297,717,851,702,000 | 8,534,149,753,409,788,000 | 25.994048 | 92 | 0.590959 | false |
knoguchi/kenix-scm | server/lib/boto/swf/layer1_decisions.py | 18 | 11934 | """
Helper class for creating decision responses.
"""
class Layer1Decisions(object):
"""
Use this object to build a list of decisions for a decision response.
Each method call will add append a new decision. Retrieve the list
of decisions from the _data attribute.
"""
def __init__(self):
self._data = []
def schedule_activity_task(self,
activity_id,
activity_type_name,
activity_type_version,
task_list=None,
control=None,
heartbeat_timeout=None,
schedule_to_close_timeout=None,
schedule_to_start_timeout=None,
start_to_close_timeout=None,
input=None):
"""
Schedules an activity task.
:type activity_id: string
:param activity_id: The activityId of the type of the activity
being scheduled.
:type activity_type_name: string
:param activity_type_name: The name of the type of the activity
being scheduled.
:type activity_type_version: string
:param activity_type_version: The version of the type of the
activity being scheduled.
:type task_list: string
:param task_list: If set, specifies the name of the task list in
which to schedule the activity task. If not specified, the
defaultTaskList registered with the activity type will be used.
Note: a task list for this activity task must be specified either
as a default for the activity type or through this field. If
neither this field is set nor a default task list was specified
at registration time then a fault will be returned.
"""
o = {}
o['decisionType'] = 'ScheduleActivityTask'
attrs = o['scheduleActivityTaskDecisionAttributes'] = {}
attrs['activityId'] = activity_id
attrs['activityType'] = {
'name': activity_type_name,
'version': activity_type_version,
}
if task_list is not None:
attrs['taskList'] = {'name': task_list}
if control is not None:
attrs['control'] = control
if heartbeat_timeout is not None:
attrs['heartbeatTimeout'] = heartbeat_timeout
if schedule_to_close_timeout is not None:
attrs['scheduleToCloseTimeout'] = schedule_to_close_timeout
if schedule_to_start_timeout is not None:
attrs['scheduleToStartTimeout'] = schedule_to_start_timeout
if start_to_close_timeout is not None:
attrs['startToCloseTimeout'] = start_to_close_timeout
if input is not None:
attrs['input'] = input
self._data.append(o)
def request_cancel_activity_task(self, activity_id):
"""
Attempts to cancel a previously scheduled activity task. If
the activity task was scheduled but has not been assigned to a
worker, then it will be canceled. If the activity task was
already assigned to a worker, then the worker will be informed
that cancellation has been requested in the response to
RecordActivityTaskHeartbeat.
"""
o = {}
o['decisionType'] = 'RequestCancelActivityTask'
attrs = o['requestCancelActivityTaskDecisionAttributes'] = {}
attrs['activityId'] = activity_id
self._data.append(o)
def record_marker(self, marker_name, details=None):
"""
Records a MarkerRecorded event in the history. Markers can be
used for adding custom information in the history for instance
to let deciders know that they do not need to look at the
history beyond the marker event.
"""
o = {}
o['decisionType'] = 'RecordMarker'
attrs = o['recordMarkerDecisionAttributes'] = {}
attrs['markerName'] = marker_name
if details is not None:
attrs['details'] = details
self._data.append(o)
def complete_workflow_execution(self, result=None):
"""
Closes the workflow execution and records a WorkflowExecutionCompleted
event in the history
"""
o = {}
o['decisionType'] = 'CompleteWorkflowExecution'
attrs = o['completeWorkflowExecutionDecisionAttributes'] = {}
if result is not None:
attrs['result'] = result
self._data.append(o)
def fail_workflow_execution(self, reason=None, details=None):
"""
Closes the workflow execution and records a
WorkflowExecutionFailed event in the history.
"""
o = {}
o['decisionType'] = 'FailWorkflowExecution'
attrs = o['failWorkflowExecutionDecisionAttributes'] = {}
if reason is not None:
attrs['reason'] = reason
if details is not None:
attrs['details'] = details
self._data.append(o)
def cancel_workflow_executions(self, details=None):
"""
Closes the workflow execution and records a WorkflowExecutionCanceled
event in the history.
"""
o = {}
o['decisionType'] = 'CancelWorkflowExecution'
attrs = o['cancelWorkflowExecutionsDecisionAttributes'] = {}
if details is not None:
attrs['details'] = details
self._data.append(o)
def continue_as_new_workflow_execution(self,
child_policy=None,
execution_start_to_close_timeout=None,
input=None,
tag_list=None,
task_list=None,
start_to_close_timeout=None,
workflow_type_version=None):
"""
Closes the workflow execution and starts a new workflow execution of
the same type using the same workflow id and a unique run Id. A
WorkflowExecutionContinuedAsNew event is recorded in the history.
"""
o = {}
o['decisionType'] = 'ContinueAsNewWorkflowExecution'
attrs = o['continueAsNewWorkflowExecutionDecisionAttributes'] = {}
if child_policy is not None:
attrs['childPolicy'] = child_policy
if execution_start_to_close_timeout is not None:
attrs['executionStartToCloseTimeout'] = execution_start_to_close_timeout
if input is not None:
attrs['input'] = input
if tag_list is not None:
attrs['tagList'] = tag_list
if task_list is not None:
attrs['taskList'] = {'name': task_list}
if start_to_close_timeout is not None:
attrs['startToCloseTimeout'] = start_to_close_timeout
if workflow_type_version is not None:
attrs['workflowTypeVersion'] = workflow_type_version
self._data.append(o)
def start_timer(self,
start_to_fire_timeout,
timer_id,
control=None):
"""
Starts a timer for this workflow execution and records a TimerStarted
event in the history. This timer will fire after the specified delay
and record a TimerFired event.
"""
o = {}
o['decisionType'] = 'StartTimer'
attrs = o['startTimerDecisionAttributes'] = {}
attrs['startToFireTimeout'] = start_to_fire_timeout
attrs['timerId'] = timer_id
if control is not None:
attrs['control'] = control
self._data.append(o)
def cancel_timer(self, timer_id):
"""
Cancels a previously started timer and records a TimerCanceled
event in the history.
"""
o = {}
o['decisionType'] = 'CancelTimer'
attrs = o['cancelTimerDecisionAttributes'] = {}
attrs['timerId'] = timer_id
self._data.append(o)
def signal_external_workflow_execution(self,
workflow_id,
signal_name,
run_id=None,
control=None,
input=None):
"""
Requests a signal to be delivered to the specified external workflow
execution and records a SignalExternalWorkflowExecutionInitiated
event in the history.
"""
o = {}
o['decisionType'] = 'SignalExternalWorkflowExecution'
attrs = o['signalExternalWorkflowExecutionDecisionAttributes'] = {}
attrs['workflowId'] = workflow_id
attrs['signalName'] = signal_name
if run_id is not None:
attrs['runId'] = run_id
if control is not None:
attrs['control'] = control
if input is not None:
attrs['input'] = input
self._data.append(o)
def request_cancel_external_workflow_execution(self,
workflow_id,
control=None,
run_id=None):
"""
Requests that a request be made to cancel the specified
external workflow execution and records a
RequestCancelExternalWorkflowExecutionInitiated event in the
history.
"""
o = {}
o['decisionType'] = 'RequestCancelExternalWorkflowExecution'
attrs = o['requestCancelExternalWorkflowExecutionDecisionAttributes'] = {}
attrs['workflowId'] = workflow_id
if control is not None:
attrs['control'] = control
if run_id is not None:
attrs['runId'] = run_id
self._data.append(o)
def start_child_workflow_execution(self,
workflow_type_name,
workflow_type_version,
workflow_id,
child_policy=None,
control=None,
execution_start_to_close_timeout=None,
input=None,
tag_list=None,
task_list=None,
task_start_to_close_timeout=None):
"""
Requests that a child workflow execution be started and
records a StartChildWorkflowExecutionInitiated event in the
history. The child workflow execution is a separate workflow
execution with its own history.
"""
o = {}
o['decisionType'] = 'StartChildWorkflowExecution'
attrs = o['startChildWorkflowExecutionDecisionAttributes'] = {}
attrs['workflowType'] = {
'name': workflow_type_name,
'version': workflow_type_version,
}
attrs['workflowId'] = workflow_id
if child_policy is not None:
attrs['childPolicy'] = child_policy
if control is not None:
attrs['control'] = control
if execution_start_to_close_timeout is not None:
attrs['executionStartToCloseTimeout'] = execution_start_to_close_timeout
if input is not None:
attrs['input'] = input
if tag_list is not None:
attrs['tagList'] = tag_list
if task_list is not None:
attrs['taskList'] = {'name': task_list}
if task_start_to_close_timeout is not None:
attrs['taskStartToCloseTimeout'] = task_start_to_close_timeout
self._data.append(o)
| apache-2.0 | -5,994,713,252,639,349,000 | -5,276,776,305,425,389,000 | 40.581882 | 84 | 0.550025 | false |
JingJunYin/tensorflow | tensorflow/tools/api/generator/create_python_api_test.py | 32 | 2857 | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# =============================================================================
"""Tests for create_python_api."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import imp
import sys
from tensorflow.python.platform import test
from tensorflow.python.util.tf_export import tf_export
from tensorflow.tools.api.generator import create_python_api
@tf_export('test_op', 'test_op1')
def test_op():
pass
@tf_export('TestClass', 'NewTestClass')
class TestClass(object):
pass
_TEST_CONSTANT = 5
_MODULE_NAME = 'test.tensorflow.test_module'
class CreatePythonApiTest(test.TestCase):
def setUp(self):
# Add fake op to a module that has 'tensorflow' in the name.
sys.modules[_MODULE_NAME] = imp.new_module(_MODULE_NAME)
setattr(sys.modules[_MODULE_NAME], 'test_op', test_op)
setattr(sys.modules[_MODULE_NAME], 'TestClass', TestClass)
test_op.__module__ = _MODULE_NAME
TestClass.__module__ = _MODULE_NAME
tf_export('consts._TEST_CONSTANT').export_constant(
_MODULE_NAME, '_TEST_CONSTANT')
def tearDown(self):
del sys.modules[_MODULE_NAME]
def testFunctionImportIsAdded(self):
imports = create_python_api.get_api_imports()
expected_import = (
'from test.tensorflow.test_module import test_op as test_op1')
self.assertTrue(
expected_import in str(imports),
msg='%s not in %s' % (expected_import, str(imports)))
expected_import = 'from test.tensorflow.test_module import test_op'
self.assertTrue(
expected_import in str(imports),
msg='%s not in %s' % (expected_import, str(imports)))
def testClassImportIsAdded(self):
imports = create_python_api.get_api_imports()
expected_import = 'from test.tensorflow.test_module import TestClass'
self.assertTrue(
'TestClass' in str(imports),
msg='%s not in %s' % (expected_import, str(imports)))
def testConstantIsAdded(self):
imports = create_python_api.get_api_imports()
expected = 'from test.tensorflow.test_module import _TEST_CONSTANT'
self.assertTrue(expected in str(imports),
msg='%s not in %s' % (expected, str(imports)))
if __name__ == '__main__':
test.main()
| apache-2.0 | 5,660,658,074,855,745,000 | -5,851,355,702,000,498,000 | 32.22093 | 79 | 0.679734 | false |
uhlik/blendmaxwell | mxs.py | 2 | 222633 | #!/Library/Frameworks/Python.framework/Versions/3.5/bin/python3
# -*- coding: utf-8 -*-
# The MIT License (MIT)
#
# Copyright (c) 2015 Jakub Uhlík
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is furnished
# to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
import os
import platform
import datetime
import struct
import math
import sys
import numpy
from .log import log, LogStyles
from . import utils
s = platform.system()
if(s == 'Darwin'):
pass
elif(s == 'Linux'):
try:
from pymaxwell import *
except ImportError:
mp = os.environ.get("MAXWELL3_ROOT")
if(not mp):
raise OSError("missing MAXWELL3_ROOT environment variable")
pp = os.path.abspath(os.path.join(mp, 'python', 'pymaxwell', 'python3.5'))
if(not os.path.exists(pp)):
raise OSError("pymaxwell for python 3.5 does not exist ({})".format(pp))
sys.path.insert(0, pp)
from pymaxwell import *
elif(s == 'Windows'):
try:
from pymaxwell import *
except ImportError:
mp = os.environ.get("MAXWELL3_ROOT")
if(not mp):
raise OSError("missing MAXWELL3_ROOT environment variable")
pp = os.path.abspath(os.path.join(mp, 'python', 'pymaxwell', 'python3.5'))
if(not os.path.exists(pp)):
raise OSError("pymaxwell for python 3.5 does not exist ({})".format(pp))
sys.path.insert(0, pp)
os.environ['PATH'] = ';'.join([mp, os.environ['PATH']])
from pymaxwell import *
def read_mxm_preview(path):
import numpy
s = Cmaxwell(mwcallback)
m = s.readMaterial(path)
a, _ = m.getPreview()
r = numpy.copy(a)
return r
def material_preview_scene(scene, tmp_dir, quality, ):
s = Cmaxwell(mwcallback)
log('reading scene: {}'.format(scene), 2)
ok = s.readMXS(scene)
if(not ok):
log('error reading scene: {}'.format(scene), 2, LogStyles.ERROR, )
return None
def get_material_names(s):
it = CmaxwellMaterialIterator()
o = it.first(s)
l = []
while not o.isNull():
name = o.getName()
l.append(name)
o = it.next()
return l
names = get_material_names(s)
for n in names:
if(n.lower() == 'preview'):
break
log('swapping material: {}'.format(n), 2)
material = s.getMaterial(n)
p = os.path.join(tmp_dir, 'material.mxm')
material.read(p)
material.forceToWriteIntoScene()
log('setting parameters..', 2)
s.setRenderParameter('ENGINE', bytes(quality, encoding='UTF-8'))
exr = os.path.join(tmp_dir, "render.exr")
s.setPath('RENDER', exr, 32)
s.setRenderParameter('DO NOT SAVE MXI FILE', False)
s.setRenderParameter('DO NOT SAVE IMAGE FILE', False)
src_dir, _ = os.path.split(scene)
ok = s.addSearchingPath(src_dir)
sp = os.path.join(tmp_dir, "scene.mxs")
log('writing scene: {}'.format(sp), 2)
ok = s.writeMXS(sp)
if(not ok):
log('error writing scene: {}'.format(sp), 2, LogStyles.ERROR, )
return None
log('done.', 2)
return sp
def material_preview_mxi(tmp_dir):
mp = os.path.join(tmp_dir, 'render.mxi')
ep = os.path.join(tmp_dir, 'render.exr')
import numpy
a = numpy.zeros((1, 1, 3), dtype=numpy.float, )
if(os.path.exists(mp)):
log('reading mxi: {}'.format(mp), 2)
i = CmaxwellMxi()
i.read(mp)
a, _ = i.getRenderBuffer(32)
elif(os.path.exists(ep)):
log('reading exr: {}'.format(ep), 2)
i = CmaxwellMxi()
i.readImage(ep)
i.write(mp)
a, _ = i.getRenderBuffer(32)
else:
log('image not found..', 2)
return a
def viewport_render_scene(tmp_dir, quality, ):
s = Cmaxwell(mwcallback)
p = os.path.join(tmp_dir, "scene.mxs")
ok = s.readMXS(p)
if(not ok):
return False
s.setRenderParameter('ENGINE', bytes(quality, encoding='UTF-8'))
mxi = os.path.join(tmp_dir, "render.mxi")
s.setRenderParameter('MXI FULLNAME', bytes(mxi, encoding='UTF-8'))
exr = os.path.join(tmp_dir, "render.exr")
s.setPath('RENDER', exr, 32)
s.setRenderParameter('DO NOT SAVE MXI FILE', False)
s.setRenderParameter('DO NOT SAVE IMAGE FILE', False)
# turn off channels
s.setRenderParameter('EMBED CHANNELS', 1)
ls = ['DO ALPHA CHANNEL', 'DO IDOBJECT CHANNEL', 'DO IDMATERIAL CHANNEL', 'DO SHADOW PASS CHANNEL', 'DO MOTION CHANNEL',
'DO ROUGHNESS CHANNEL', 'DO FRESNEL CHANNEL', 'DO NORMALS CHANNEL', 'DO POSITION CHANNEL', 'DO ZBUFFER CHANNEL',
'DO DEEP CHANNEL', 'DO UV CHANNEL', 'DO ALPHA CUSTOM CHANNEL', 'DO REFLECTANCE CHANNEL', ]
for n in ls:
s.setRenderParameter(n, 0)
ok = s.writeMXS(p)
if(not ok):
return False
return True
def viewport_render_mxi(tmp_dir):
ep = os.path.join(tmp_dir, 'render2.exr')
a = numpy.zeros((1, 1, 3), dtype=numpy.float, )
if(os.path.exists(ep)):
log('reading exr: {}'.format(ep), 2)
i = CmaxwellMxi()
i.readImage(ep)
# i.write(mp)
a, _ = i.getRenderBuffer(32)
else:
log('image not found..', 2, LogStyles.ERROR)
return a
class MXSWriter():
def __init__(self, path, append=False, ):
"""Create scene or load existing.
path string (path)
append bool
"""
if(__name__ != "__main__"):
if(platform.system() == 'Darwin'):
raise ImportError("No pymaxwell directly in Blender on Mac OS X..")
log(self.__class__.__name__, 1, LogStyles.MESSAGE, prefix="* ", )
self.path = path
self.mxs = Cmaxwell(mwcallback)
pid = utils.get_plugin_id()
if(pid != ""):
# write here directly, even though it is also part of scene data, but api change just for this is pointless..
self.mxs.setPluginID(pid)
if(append):
log("appending to existing scene..", 2, prefix="* ", )
self.mxs.readMXS(self.path)
else:
log("creating new scene..", 2, prefix="* ", )
self.mgr = CextensionManager.instance()
self.mgr.loadAllExtensions()
def write(self):
"""Write scene fo file.
(no parameters..)
"""
log("saving scene..", 2)
ok = self.mxs.writeMXS(self.path)
log("done.", 2)
return ok
def erase_unused_materials(self):
self.mxs.eraseUnusedMaterials()
def set_base_and_pivot(self, o, matrix=None, motion=None, ):
"""Convert float tuples to Cbases and set to object.
o CmaxwellObject
base ((3 float), (3 float), (3 float), (3 float)) or None
pivot ((3 float), (3 float), (3 float), (3 float)) or None
"""
if(matrix is None):
matrix = ([[0.0, 0.0, 0.0], [1.0, 0.0, 0.0], [0.0, 1.0, 0.0], [0.0, 0.0, 1.0]],
[[0.0, 0.0, 0.0], [1.0, 0.0, 0.0], [0.0, 1.0, 0.0], [0.0, 0.0, 1.0]],
[0.0, 0.0, 0.0],
[0.0, 0.0, 0.0],
[0.0, 0.0, 0.0])
base = matrix[0]
pivot = matrix[1]
l = matrix[2]
r = matrix[3]
s = matrix[4]
b = Cbase()
b.origin = Cvector(*base[0])
b.xAxis = Cvector(*base[1])
b.yAxis = Cvector(*base[2])
b.zAxis = Cvector(*base[3])
p = Cbase()
p.origin = Cvector(*pivot[0])
p.xAxis = Cvector(*pivot[1])
p.yAxis = Cvector(*pivot[2])
p.zAxis = Cvector(*pivot[3])
o.setBaseAndPivot(b, p)
o.setPivotPosition(Cvector(*l))
o.setPivotRotation(Cvector(*r))
o.setPosition(Cvector(*l))
o.setRotation(Cvector(*r))
o.setScale(Cvector(*s))
if(motion is not None):
for(t, _, b, p) in motion:
bb = Cbase()
bb.origin = Cvector(*b[0])
bb.xAxis = Cvector(*b[1])
bb.yAxis = Cvector(*b[2])
bb.zAxis = Cvector(*b[3])
pp = Cbase()
pp.origin = Cvector(*p[0])
pp.xAxis = Cvector(*p[1])
pp.yAxis = Cvector(*p[2])
pp.zAxis = Cvector(*p[3])
o.setBaseAndPivot(bb, pp, t, )
def set_object_props(self, o, hide=False, opacity=100, cid=(1.0, 1.0, 1.0), hcam=False, hcamsc=False, hgi=False, hrr=False, hzcp=False, blocked_emitters=None, ):
"""Set common object properties.
o CmaxwellObject
hide bool
opacity float
cid (float, float, float) 0.0 - 1.0 rgb
hcam bool
hcamsc bool
hgi bool
hrr bool
hzcp bool
blocked_emitters list of blocked emitter object names
"""
if(hide):
o.setHide(hide)
if(opacity != 100.0):
o.setOpacity(opacity)
c = Crgb()
c.assign(*cid)
o.setColorID(c)
if(hcam):
o.setHideToCamera(True)
if(hcamsc):
o.setHideToCameraInShadowsPass(True)
if(hgi):
o.setHideToGI(True)
if(hrr):
o.setHideToReflectionsRefractions(True)
if(hzcp):
o.excludeOfCutPlanes(True)
if(blocked_emitters):
for n in blocked_emitters:
ok = o.addExcludedLight(n)
def texture_data_to_mxparams(self, name, data, mxparams, ):
"""Create CtextureMap, fill with parameters and put into mxparams.
name string
data dict {'type': string,
'path': string,
'channel': int,
'use_global_map': bool,
'tile_method_type': [bool, bool],
'tile_method_units': int,
'repeat': [float, float],
'mirror': [bool, bool],
'offset': [float, float],
'rotation': float,
'invert': bool,
'alpha_only': bool,
'interpolation': bool,
'brightness': float,
'contrast': float,
'saturation': float,
'hue': float,
'rgb_clamp': [float, float], }
mxparams mxparams
"""
d = data
if(d is None):
return
t = CtextureMap()
t.setPath(d['path'])
v = Cvector2D()
v.assign(*d['repeat'])
t.scale = v
v = Cvector2D()
v.assign(*d['offset'])
t.offset = v
t.rotation = d['rotation']
t.uvwChannelID = d['channel']
t.uIsTiled = d['tile_method_type'][0]
t.vIsTiled = d['tile_method_type'][1]
t.uIsMirrored = d['mirror'][0]
t.vIsMirrored = d['mirror'][1]
t.invert = d['invert']
# t.doGammaCorrection = 0
t.useAbsoluteUnits = d['tile_method_units']
t.normalMappingFlipRed = d['normal_mapping_flip_red']
t.normalMappingFlipGreen = d['normal_mapping_flip_green']
t.normalMappingFullRangeBlue = d['normal_mapping_full_range_blue']
t.useAlpha = d['alpha_only']
t.typeInterpolation = d['interpolation']
t.saturation = d['saturation'] / 100
t.contrast = d['contrast'] / 100
t.brightness = d['brightness'] / 100
t.hue = d['hue'] / 180
t.clampMin = d['rgb_clamp'][0] / 255
t.clampMax = d['rgb_clamp'][1] / 255
t.useGlobalMap = d['use_global_map']
# t.cosA = 1.000000
# t.sinA = 0.000000
ok = mxparams.setTextureMap(name, t)
return mxparams
def texture(self, d, ):
"""Create CtextureMap from parameters
d dict
"""
if(d is None):
return
s = self.mxs
t = CtextureMap()
t.setPath(d['path'])
t.uvwChannelID = d['channel']
t.brightness = d['brightness'] / 100
t.contrast = d['contrast'] / 100
t.saturation = d['saturation'] / 100
t.hue = d['hue'] / 180
t.useGlobalMap = d['use_global_map']
t.useAbsoluteUnits = d['tile_method_units']
t.uIsTiled = d['tile_method_type'][0]
t.vIsTiled = d['tile_method_type'][1]
t.uIsMirrored = d['mirror'][0]
t.vIsMirrored = d['mirror'][1]
vec = Cvector2D()
vec.assign(d['offset'][0], d['offset'][1])
t.offset = vec
t.rotation = d['rotation']
t.invert = d['invert']
t.useAlpha = d['alpha_only']
if(d['interpolation']):
t.typeInterpolation = 1
else:
t.typeInterpolation = 0
t.clampMin = d['rgb_clamp'][0] / 255
t.clampMax = d['rgb_clamp'][1] / 255
vec = Cvector2D()
vec.assign(d['repeat'][0], d['repeat'][1])
t.scale = vec
t.normalMappingFlipRed = d['normal_mapping_flip_red']
t.normalMappingFlipGreen = d['normal_mapping_flip_green']
t.normalMappingFullRangeBlue = d['normal_mapping_full_range_blue']
for i, pt in enumerate(d['procedural']):
if(pt['use'] == 'BRICK'):
e = self.mgr.createDefaultTextureExtension('Brick')
p = e.getExtensionData()
p.setFloat('Blend procedural', pt['blending_factor'])
p.setFloat('Brick width', pt['brick_brick_width'])
p.setFloat('Brick height', pt['brick_brick_height'])
p.setInt('Brick offset', pt['brick_brick_offset'])
p.setInt('Random offset', pt['brick_random_offset'])
p.setByte('Double brick', pt['brick_double_brick'])
p.setFloat('Small brick width', pt['brick_small_brick_width'])
p.setByte('Round corners', pt['brick_round_corners'])
p.setFloat('Boundary sharpness U', pt['brick_boundary_sharpness_u'])
p.setFloat('Boundary sharpness V', pt['brick_boundary_sharpness_v'])
p.setInt('Boundary noise detail', pt['brick_boundary_noise_detail'])
p.setFloat('Boundary noise region U', pt['brick_boundary_noise_region_u'])
p.setFloat('Boundary noise region V', pt['brick_boundary_noise_region_v'])
p.setUInt('Seed', pt['brick_seed'])
p.setByte('Random rotation', pt['brick_random_rotation'])
p.setInt('Color variation', pt['brick_color_variation'])
c = Crgb()
c.assign(*pt['brick_brick_color_0'])
p.setRgb('Brick color 0', c)
self.texture_data_to_mxparams('Brick texture 0', pt['brick_brick_texture_0'], p, )
p.setInt('Sampling factor 0', pt['brick_sampling_factor_0'])
p.setInt('Weight 0', pt['brick_weight_0'])
c = Crgb()
c.assign(*pt['brick_brick_color_1'])
p.setRgb('Brick color 1', c)
self.texture_data_to_mxparams('Brick texture 1', pt['brick_brick_texture_1'], p, )
p.setInt('Sampling factor 1', pt['brick_sampling_factor_1'])
p.setInt('Weight 1', pt['brick_weight_1'])
c = Crgb()
c.assign(*pt['brick_brick_color_2'])
p.setRgb('Brick color 2', c)
self.texture_data_to_mxparams('Brick texture 2', pt['brick_brick_texture_2'], p, )
p.setInt('Sampling factor 2', pt['brick_sampling_factor_2'])
p.setInt('Weight 2', pt['brick_weight_2'])
p.setFloat('Mortar thickness', pt['brick_mortar_thickness'])
c = Crgb()
c.assign(*pt['brick_mortar_color'])
p.setRgb('Mortar color', c)
self.texture_data_to_mxparams('Mortar texture', pt['brick_mortar_texture'], p, )
t.addProceduralTexture(p)
elif(pt['use'] == 'CHECKER'):
e = self.mgr.createDefaultTextureExtension('Checker')
p = e.getExtensionData()
p.setFloat('Blend procedural', pt['blending_factor'])
c = Crgb()
c.assign(*pt['checker_color_0'])
p.setRgb('Color0', c)
c = Crgb()
c.assign(*pt['checker_color_1'])
p.setRgb('Color1', c)
p.setUInt('Number of elements U', pt['checker_number_of_elements_u'])
p.setUInt('Number of elements V', pt['checker_number_of_elements_v'])
p.setFloat('Transition sharpness', pt['checker_transition_sharpness'])
p.setUInt('Fall-off', pt['checker_falloff'])
t.addProceduralTexture(p)
elif(pt['use'] == 'CIRCLE'):
e = self.mgr.createDefaultTextureExtension('Circle')
p = e.getExtensionData()
p.setFloat('Blend procedural', pt['blending_factor'])
c = Crgb()
c.assign(*pt['circle_background_color'])
p.setRgb('Background color', c)
c = Crgb()
c.assign(*pt['circle_circle_color'])
p.setRgb('Circle color', c)
p.setFloat('RadiusU', pt['circle_radius_u'])
p.setFloat('RadiusV', pt['circle_radius_v'])
p.setFloat('Transition factor', pt['circle_transition_factor'])
p.setUInt('Fall-off', pt['circle_falloff'])
t.addProceduralTexture(p)
elif(pt['use'] == 'GRADIENT3'):
e = self.mgr.createDefaultTextureExtension('Gradient3')
p = e.getExtensionData()
p.setFloat('Blend procedural', pt['blending_factor'])
p.setByte('Gradient U', pt['gradient3_gradient_u'])
c = Crgb()
c.assign(*pt['gradient3_color0_u'])
p.setRgb('Color0 U', c)
c = Crgb()
c.assign(*pt['gradient3_color1_u'])
p.setRgb('Color1 U', c)
c = Crgb()
c.assign(*pt['gradient3_color2_u'])
p.setRgb('Color2 U', c)
p.setUInt('Gradient type U', pt['gradient3_gradient_type_u'])
p.setFloat('Color1 U position', pt['gradient3_color1_u_position'])
p.setByte('Gradient V', pt['gradient3_gradient_v'])
c = Crgb()
c.assign(*pt['gradient3_color0_v'])
p.setRgb('Color0 V', c)
c = Crgb()
c.assign(*pt['gradient3_color1_v'])
p.setRgb('Color1 V', c)
c = Crgb()
c.assign(*pt['gradient3_color2_v'])
p.setRgb('Color2 V', c)
p.setUInt('Gradient type V', pt['gradient3_gradient_type_v'])
p.setFloat('Color1 V position', pt['gradient3_color1_v_position'])
t.addProceduralTexture(p)
elif(pt['use'] == 'GRADIENT'):
e = self.mgr.createDefaultTextureExtension('Gradient')
p = e.getExtensionData()
p.setFloat('Blend procedural', pt['blending_factor'])
p.setByte('Gradient U', pt['gradient_gradient_u'])
c = Crgb()
c.assign(*pt['gradient_color0_u'])
p.setRgb('Color0 U', c)
c = Crgb()
c.assign(*pt['gradient_color1_u'])
p.setRgb('Color1 U', c)
p.setUInt('Gradient type U', pt['gradient_gradient_type_u'])
p.setFloat('Transition factor U', pt['gradient_transition_factor_u'])
p.setByte('Gradient V', pt['gradient_gradient_v'])
c = Crgb()
c.assign(*pt['gradient_color0_v'])
p.setRgb('Color0 V', c)
c = Crgb()
c.assign(*pt['gradient_color1_v'])
p.setRgb('Color1 V', c)
p.setUInt('Gradient type V', pt['gradient_gradient_type_v'])
p.setFloat('Transition factor V', pt['gradient_transition_factor_v'])
t.addProceduralTexture(p)
elif(pt['use'] == 'GRID'):
e = self.mgr.createDefaultTextureExtension('Grid')
p = e.getExtensionData()
p.setFloat('Blend procedural', pt['blending_factor'])
c = Crgb()
c.assign(*pt['grid_boundary_color'])
p.setRgb('Boundary color', c)
c = Crgb()
c.assign(*pt['grid_cell_color'])
p.setRgb('Cell color', c)
p.setFloat('Cell width', pt['grid_cell_width'])
p.setFloat('Cell height', pt['grid_cell_height'])
if(pt['grid_horizontal_lines']):
p.setFloat('Boundary thickness U', pt['grid_boundary_thickness_u'])
else:
p.setFloat('Boundary thickness U', 0.0)
if(pt['grid_vertical_lines']):
p.setFloat('Boundary thickness V', pt['grid_boundary_thickness_v'])
else:
p.setFloat('Boundary thickness V', 0.0)
p.setFloat('Transition sharpness', pt['grid_transition_sharpness'])
p.setUInt('Fall-off', pt['grid_falloff'])
t.addProceduralTexture(p)
elif(pt['use'] == 'MARBLE'):
e = self.mgr.createDefaultTextureExtension('Marble')
p = e.getExtensionData()
p.setFloat('Blend procedural', pt['blending_factor'])
p.setUInt('Coordinates type', pt['marble_coordinates_type'])
c = Crgb()
c.assign(*pt['marble_color0'])
p.setRgb('Color0', c)
c = Crgb()
c.assign(*pt['marble_color1'])
p.setRgb('Color1', c)
c = Crgb()
c.assign(*pt['marble_color2'])
p.setRgb('Color2', c)
p.setFloat('Frequency', pt['marble_frequency'])
p.setFloat('Detail', pt['marble_detail'])
p.setInt('Octaves', pt['marble_octaves'])
p.setUInt('Seed', pt['marble_seed'])
t.addProceduralTexture(p)
elif(pt['use'] == 'NOISE'):
e = self.mgr.createDefaultTextureExtension('Noise')
p = e.getExtensionData()
p.setFloat('Blend procedural', pt['blending_factor'])
p.setUInt('Coordinates type', pt['noise_coordinates_type'])
c = Crgb()
c.assign(*pt['noise_noise_color'])
p.setRgb('Noise color', c)
c = Crgb()
c.assign(*pt['noise_background_color'])
p.setRgb('Background color', c)
p.setFloat('Detail', pt['noise_detail'])
p.setFloat('Persistance', pt['noise_persistance'])
p.setInt('Octaves', pt['noise_octaves'])
p.setFloat('Low value', pt['noise_low_value'])
p.setFloat('High value', pt['noise_high_value'])
p.setUInt('Seed', pt['noise_seed'])
t.addProceduralTexture(p)
elif(pt['use'] == 'VORONOI'):
e = self.mgr.createDefaultTextureExtension('Voronoi')
p = e.getExtensionData()
p.setFloat('Blend procedural', pt['blending_factor'])
p.setUInt('Coordinates type', pt['voronoi_coordinates_type'])
c = Crgb()
c.assign(*pt['voronoi_color0'])
p.setRgb('Color0', c)
c = Crgb()
c.assign(*pt['voronoi_color1'])
p.setRgb('Color1', c)
p.setInt('Detail', pt['voronoi_detail'])
p.setUInt('Distance', pt['voronoi_distance'])
p.setUInt('Combination', pt['voronoi_combination'])
p.setFloat('Low value', pt['voronoi_low_value'])
p.setFloat('High value', pt['voronoi_high_value'])
p.setUInt('Seed', pt['voronoi_seed'])
t.addProceduralTexture(p)
elif(pt['use'] == 'TILED'):
e = self.mgr.createDefaultTextureExtension('TiledTexture')
p = e.getExtensionData()
p.setFloat('Blend factor', pt['blending_factor'])
c = Crgb()
c.assign(*pt['tiled_base_color'])
p.setRgb('Base Color', c)
p.setByte('Use base color', pt['tiled_use_base_color'])
p.setString('Filename_mask', pt['tiled_token_mask'])
p.setString('Filename', pt['tiled_filename'])
# 'Map U tile range' UCHAR
# 'Map V tile range' UCHAR
t.addProceduralTexture(p)
elif(pt['use'] == 'WIREFRAME'):
e = self.mgr.createDefaultTextureExtension('WireframeTexture')
p = e.getExtensionData()
c = Crgb()
c.assign(*pt['wireframe_fill_color'])
p.setRgb('Fill Color', c)
c = Crgb()
c.assign(*pt['wireframe_edge_color'])
p.setRgb('Edge Color', c)
c = Crgb()
c.assign(*pt['wireframe_coplanar_edge_color'])
p.setRgb('Coplanar Edge Color', c)
p.setFloat('Edge Width', pt['wireframe_edge_width'])
p.setFloat('Coplanar Edge Width', pt['wireframe_coplanar_edge_width'])
p.setFloat('Coplanar Threshold', pt['wireframe_coplanar_threshold'])
t.addProceduralTexture(p)
else:
raise TypeError("{0} is unknown procedural texture type".format(pt['use']))
return t
def material_placeholder(self, n=None, ):
if(n is not None):
pass
else:
n = 'MATERIAL_PLACEHOLDER'
s = self.mxs
m = s.createMaterial(n)
l = m.addLayer()
b = l.addBSDF()
r = b.getReflectance()
a = Cattribute()
a.activeType = MAP_TYPE_BITMAP
t = CtextureMap()
mgr = CextensionManager.instance()
mgr.loadAllExtensions()
e = mgr.createDefaultTextureExtension('Checker')
ch = e.getExtensionData()
ch.setUInt('Number of elements U', 32)
ch.setUInt('Number of elements V', 32)
t.addProceduralTexture(ch)
a.textureMap = t
r.setAttribute('color', a)
return m
def material_default(self, n, ):
s = self.mxs
m = s.createMaterial(n)
l = m.addLayer()
b = l.addBSDF()
return m
def material_external(self, d, ):
s = self.mxs
p = d['path']
t = s.readMaterial(p)
t.setName(d['name'])
m = s.addMaterial(t)
if(not d['embed']):
m.setReference(1, p)
return m
def material_custom(self, d, ):
s = self.mxs
m = s.createMaterial(d['name'])
d = d['data']
def global_props(d, m):
# global properties
if(d['override_map']):
t = self.texture(d['override_map'])
if(t is not None):
m.setGlobalMap(t)
if(d['bump_map_enabled']):
a = Cattribute()
a.activeType = MAP_TYPE_BITMAP
t = self.texture(d['bump_map'])
if(t is not None):
a.textureMap = t
if(d['bump_map_use_normal']):
a.value = d['bump_normal']
else:
a.value = d['bump']
m.setAttribute('bump', a)
m.setNormalMapState(d['bump_map_use_normal'])
m.setDispersion(d['dispersion'])
m.setMatteShadow(d['shadow'])
m.setMatte(d['matte'])
m.setNestedPriority(d['priority'])
c = Crgb()
c.assign(*d['id'])
m.setColorID(c)
if(d['active_display_map']):
t = self.texture(d['active_display_map'])
m.setActiveDisplayMap(t)
def displacement(d, m):
if(not d['enabled']):
return
m.enableDisplacement(True)
if(d['map'] is not None):
t = self.texture(d['map'])
m.setDisplacementMap(t)
m.setDisplacementCommonParameters(d['type'], d['subdivision'], int(d['smoothing']), d['offset'], d['subdivision_method'], d['uv_interpolation'], )
m.setHeightMapDisplacementParameters(d['height'], d['height_units'], d['adaptive'], )
v = Cvector(*d['v3d_scale'])
m.setVectorDisplacementParameters(v, d['v3d_transform'], d['v3d_rgb_mapping'], d['v3d_preset'], )
def add_bsdf(d, l):
b = l.addBSDF()
b.setName(d['name'])
bp = d['bsdf_props']
# weight
if(bp['weight_map_enabled']):
a = Cattribute()
a.activeType = MAP_TYPE_BITMAP
t = self.texture(bp['weight_map'])
if(t is not None):
a.textureMap = t
a.value = bp['weight']
else:
a = Cattribute()
a.activeType = MAP_TYPE_VALUE
a.value = bp['weight']
b.setWeight(a)
# enabled
if(not bp['visible']):
b.setState(False)
# ior
r = b.getReflectance()
if(bp['ior'] == 1):
# measured data
r.setActiveIorMode(1)
r.setComplexIor(bp['complex_ior'])
else:
if(bp['reflectance_0_map_enabled']):
a = Cattribute()
a.activeType = MAP_TYPE_BITMAP
t = self.texture(bp['reflectance_0_map'])
if(t is not None):
a.textureMap = t
a.rgb.assign(*bp['reflectance_0'])
else:
a = Cattribute()
a.activeType = MAP_TYPE_RGB
a.rgb.assign(*bp['reflectance_0'])
r.setAttribute('color', a)
if(bp['reflectance_90_map_enabled']):
a = Cattribute()
a.activeType = MAP_TYPE_BITMAP
t = self.texture(bp['reflectance_90_map'])
if(t is not None):
a.textureMap = t
a.rgb.assign(*bp['reflectance_90'])
else:
a = Cattribute()
a.activeType = MAP_TYPE_RGB
a.rgb.assign(*bp['reflectance_90'])
r.setAttribute('color.tangential', a)
if(bp['transmittance_map_enabled']):
a = Cattribute()
a.activeType = MAP_TYPE_BITMAP
t = self.texture(bp['transmittance_map'])
if(t is not None):
a.textureMap = t
a.rgb.assign(*bp['transmittance'])
else:
a = Cattribute()
a.activeType = MAP_TYPE_RGB
a.rgb.assign(*bp['transmittance'])
r.setAttribute('transmittance.color', a)
r.setAbsorptionDistance(bp['attenuation_units'], bp['attenuation'])
r.setIOR(bp['nd'], bp['abbe'])
if(bp['force_fresnel']):
r.enableForceFresnel(True)
r.setConductor(bp['k'])
if(bp['r2_enabled']):
r.setFresnelCustom(bp['r2_falloff_angle'], bp['r2_influence'], True, )
# surface
if(bp['roughness_map_enabled']):
a = Cattribute()
a.activeType = MAP_TYPE_BITMAP
t = self.texture(bp['roughness_map'])
if(t is not None):
a.textureMap = t
a.value = bp['roughness']
else:
a = Cattribute()
a.activeType = MAP_TYPE_VALUE
a.value = bp['roughness']
b.setAttribute('roughness', a)
if(bp['bump_map_enabled']):
a = Cattribute()
a.activeType = MAP_TYPE_BITMAP
t = self.texture(bp['bump_map'])
if(t is not None):
a.textureMap = t
if(bp['bump_map_use_normal']):
a.value = bp['bump_normal']
else:
a.value = bp['bump']
else:
a = Cattribute()
a.activeType = MAP_TYPE_VALUE
if(bp['bump_map_use_normal']):
a.value = bp['bump_normal']
else:
a.value = bp['bump']
b.setAttribute('bump', a)
b.setNormalMapState(bp['bump_map_use_normal'])
if(bp['anisotropy_map_enabled']):
a = Cattribute()
a.activeType = MAP_TYPE_BITMAP
t = self.texture(bp['anisotropy_map'])
if(t is not None):
a.textureMap = t
a.value = bp['anisotropy']
else:
a = Cattribute()
a.activeType = MAP_TYPE_VALUE
a.value = bp['anisotropy']
b.setAttribute('anisotropy', a)
if(bp['anisotropy_angle_map_enabled']):
a = Cattribute()
a.activeType = MAP_TYPE_BITMAP
t = self.texture(bp['anisotropy_angle_map'])
if(t is not None):
a.textureMap = t
a.value = bp['anisotropy_angle']
else:
a = Cattribute()
a.activeType = MAP_TYPE_VALUE
a.value = bp['anisotropy_angle']
b.setAttribute('angle', a)
# subsurface
a = Cattribute()
a.activeType = MAP_TYPE_RGB
a.rgb.assign(*bp['scattering'])
r.setAttribute('scattering', a)
r.setScatteringParameters(bp['coef'], bp['asymmetry'], bp['single_sided'])
if(bp['single_sided']):
if(bp['single_sided_map_enabled']):
a = Cattribute()
a.activeType = MAP_TYPE_BITMAP
t = self.texture(bp['single_sided_map'])
if(t is not None):
a.textureMap = t
a.value = bp['single_sided_value']
else:
a = Cattribute()
a.activeType = MAP_TYPE_VALUE
a.value = bp['single_sided_value']
r.setScatteringThickness(a)
r.setScatteringThicknessRange(bp['single_sided_min'], bp['single_sided_max'])
# coating
cp = d['coating']
if(cp['enabled']):
c = b.addCoating()
if(cp['thickness_map_enabled']):
a = Cattribute()
a.activeType = MAP_TYPE_BITMAP
t = self.texture(cp['thickness_map'])
if(t is not None):
a.textureMap = t
a.value = cp['thickness']
else:
a = Cattribute()
a.activeType = MAP_TYPE_VALUE
a.value = cp['thickness']
c.setThickness(a)
c.setThicknessRange(cp['thickness_map_min'], cp['thickness_map_max'])
r = c.getReflectance()
if(cp['ior'] == 1):
# measured data
r.setActiveIorMode(1)
r.setComplexIor(cp['complex_ior'])
else:
if(cp['reflectance_0_map_enabled']):
a = Cattribute()
a.activeType = MAP_TYPE_BITMAP
t = self.texture(cp['reflectance_0_map'])
if(t is not None):
a.textureMap = t
a.rgb.assign(*cp['reflectance_0'])
else:
a = Cattribute()
a.activeType = MAP_TYPE_RGB
a.rgb.assign(*cp['reflectance_0'])
r.setAttribute('color', a)
if(cp['reflectance_90_map_enabled']):
a = Cattribute()
a.activeType = MAP_TYPE_BITMAP
t = self.texture(cp['reflectance_90_map'])
if(t is not None):
a.textureMap = t
a.rgb.assign(*cp['reflectance_90'])
else:
a = Cattribute()
a.activeType = MAP_TYPE_RGB
a.rgb.assign(*cp['reflectance_90'])
r.setAttribute('color.tangential', a)
r.setIOR(cp['nd'], 1.0, )
if(cp['force_fresnel']):
r.enableForceFresnel(True)
r.setConductor(cp['k'])
if(cp['r2_enabled']):
r.setFresnelCustom(cp['r2_falloff_angle'], 0.0, True, )
def add_emitter(d, l):
e = l.createEmitter()
if(d['type'] == 0):
e.setLobeType(EMISSION_LOBE_DEFAULT)
elif(d['type'] == 1):
e.setLobeType(EMISSION_LOBE_IES)
e.setLobeIES(d['ies_data'])
e.setIESLobeIntensity(d['ies_intensity'])
elif(d['type'] == 2):
e.setLobeType(EMISSION_LOBE_SPOTLIGHT)
if(d['spot_map'] is not None):
t = self.texture(d['spot_map'])
if(t is not None):
e.setLobeImageProjectedMap(d['spot_map_enabled'], t)
e.setSpotConeAngle(d['spot_cone_angle'])
e.setSpotFallOffAngle(d['spot_falloff_angle'])
e.setSpotFallOffType(d['spot_falloff_type'])
e.setSpotBlur(d['spot_blur'])
if(d['emission'] == 0):
e.setActiveEmissionType(EMISSION_TYPE_PAIR)
ep = CemitterPair()
c = Crgb()
c.assign(*d['color'])
ep.rgb.assign(c)
ep.temperature = d['color_black_body']
ep.watts = d['luminance_power']
ep.luminousEfficacy = d['luminance_efficacy']
ep.luminousPower = d['luminance_output']
ep.illuminance = d['luminance_output']
ep.luminousIntensity = d['luminance_output']
ep.luminance = d['luminance_output']
e.setPair(ep)
if(d['luminance'] == 0):
u = EMISSION_UNITS_WATTS_AND_LUMINOUS_EFFICACY
elif(d['luminance'] == 1):
u = EMISSION_UNITS_LUMINOUS_POWER
elif(d['luminance'] == 2):
u = EMISSION_UNITS_ILLUMINANCE
elif(d['luminance'] == 3):
u = EMISSION_UNITS_LUMINOUS_INTENSITY
elif(d['luminance'] == 4):
u = EMISSION_UNITS_LUMINANCE
if(d['color_black_body_enabled']):
e.setActivePair(EMISSION_COLOR_TEMPERATURE, u)
else:
e.setActivePair(EMISSION_RGB, u)
elif(d['emission'] == 1):
e.setActiveEmissionType(EMISSION_TYPE_TEMPERATURE)
e.setTemperature(d['temperature_value'])
elif(d['emission'] == 2):
e.setActiveEmissionType(EMISSION_TYPE_MXI)
a = Cattribute()
a.activeType = MAP_TYPE_BITMAP
t = self.texture(d['hdr_map'])
if(t is not None):
a.textureMap = t
a.value = d['hdr_intensity']
e.setMXI(a)
e.setState(True)
def add_layer(d, m):
l = m.addLayer()
l.setName(d['name'])
lpd = d['layer_props']
if(not lpd['visible']):
l.setEnabled(False)
if(lpd['blending'] == 1):
l.setStackedBlendingMode(1)
if(lpd['opacity_map_enabled']):
a = Cattribute()
a.activeType = MAP_TYPE_BITMAP
t = self.texture(lpd['opacity_map'])
if(t is not None):
a.textureMap = t
a.value = lpd['opacity']
else:
a = Cattribute()
a.activeType = MAP_TYPE_VALUE
a.value = lpd['opacity']
l.setAttribute('weight', a)
epd = d['emitter']
if(epd['enabled']):
add_emitter(epd, l)
for b in d['bsdfs']:
add_bsdf(b, l)
global_props(d['global_props'], m)
displacement(d['displacement'], m)
for layer in d['layers']:
add_layer(layer, m)
return m
def material(self, d, ):
s = self.mxs
if(d['subtype'] == 'EXTERNAL'):
if(d['path'] == ''):
m = self.material_placeholder(d['name'])
else:
m = self.material_external(d)
if(d['override']):
# global properties
if(d['override_map']):
t = self.texture(d['override_map'])
if(t is not None):
m.setGlobalMap(t)
if(d['bump_map_enabled']):
a = Cattribute()
a.activeType = MAP_TYPE_BITMAP
t = self.texture(d['bump_map'])
if(t is not None):
a.textureMap = t
if(d['bump_map_use_normal']):
a.value = d['bump_normal']
else:
a.value = d['bump']
m.setAttribute('bump', a)
m.setNormalMapState(d['bump_map_use_normal'])
m.setDispersion(d['dispersion'])
m.setMatteShadow(d['shadow'])
m.setMatte(d['matte'])
m.setNestedPriority(d['priority'])
c = Crgb()
c.assign(*d['id'])
m.setColorID(c)
elif(d['subtype'] == 'EXTENSION'):
if(d['use'] == 'EMITTER'):
m = s.createMaterial(d['name'])
l = m.addLayer()
e = l.createEmitter()
if(d['emitter_type'] == 0):
e.setLobeType(EMISSION_LOBE_DEFAULT)
elif(d['emitter_type'] == 1):
e.setLobeType(EMISSION_LOBE_IES)
e.setLobeIES(d['emitter_ies_data'])
e.setIESLobeIntensity(d['emitter_ies_intensity'])
elif(d['emitter_type'] == 2):
e.setLobeType(EMISSION_LOBE_SPOTLIGHT)
if(d['emitter_spot_map'] is not None):
t = self.texture(d['emitter_spot_map'])
if(t is not None):
e.setLobeImageProjectedMap(d['emitter_spot_map_enabled'], t)
e.setSpotConeAngle(d['emitter_spot_cone_angle'])
e.setSpotFallOffAngle(d['emitter_spot_falloff_angle'])
e.setSpotFallOffType(d['emitter_spot_falloff_type'])
e.setSpotBlur(d['emitter_spot_blur'])
if(d['emitter_emission'] == 0):
e.setActiveEmissionType(EMISSION_TYPE_PAIR)
ep = CemitterPair()
c = Crgb()
c.assign(*d['emitter_color'])
ep.rgb.assign(c)
ep.temperature = d['emitter_color_black_body']
ep.watts = d['emitter_luminance_power']
ep.luminousEfficacy = d['emitter_luminance_efficacy']
ep.luminousPower = d['emitter_luminance_output']
ep.illuminance = d['emitter_luminance_output']
ep.luminousIntensity = d['emitter_luminance_output']
ep.luminance = d['emitter_luminance_output']
e.setPair(ep)
if(d['emitter_luminance'] == 0):
u = EMISSION_UNITS_WATTS_AND_LUMINOUS_EFFICACY
elif(d['emitter_luminance'] == 1):
u = EMISSION_UNITS_LUMINOUS_POWER
elif(d['emitter_luminance'] == 2):
u = EMISSION_UNITS_ILLUMINANCE
elif(d['emitter_luminance'] == 3):
u = EMISSION_UNITS_LUMINOUS_INTENSITY
elif(d['emitter_luminance'] == 4):
u = EMISSION_UNITS_LUMINANCE
if(d['emitter_color_black_body_enabled']):
e.setActivePair(EMISSION_COLOR_TEMPERATURE, u)
else:
e.setActivePair(EMISSION_RGB, u)
elif(d['emitter_emission'] == 1):
e.setActiveEmissionType(EMISSION_TYPE_TEMPERATURE)
e.setTemperature(d['emitter_temperature_value'])
elif(d['emitter_emission'] == 2):
e.setActiveEmissionType(EMISSION_TYPE_MXI)
a = Cattribute()
a.activeType = MAP_TYPE_BITMAP
t = self.texture(d['emitter_hdr_map'])
if(t is not None):
a.textureMap = t
a.value = d['emitter_hdr_intensity']
e.setMXI(a)
e.setState(True)
def global_props(d, m):
# global properties
if(d['override_map']):
t = texture(d['override_map'], s, )
if(t is not None):
m.setGlobalMap(t)
if(d['bump_map_enabled']):
a = Cattribute()
a.activeType = MAP_TYPE_BITMAP
t = texture(d['bump_map'], s, )
if(t is not None):
a.textureMap = t
if(d['bump_map_use_normal']):
a.value = d['bump_normal']
else:
a.value = d['bump']
m.setAttribute('bump', a)
m.setNormalMapState(d['bump_map_use_normal'])
m.setDispersion(d['dispersion'])
m.setMatteShadow(d['shadow'])
m.setMatte(d['matte'])
m.setNestedPriority(d['priority'])
c = Crgb()
c.assign(*d['id'])
m.setColorID(c)
if(d['active_display_map']):
t = texture(d['active_display_map'], s, )
m.setActiveDisplayMap(t)
global_props(d, m)
else:
m = CextensionManager.instance()
m.loadAllExtensions()
if(d['use'] == 'AGS'):
e = m.createDefaultMaterialModifierExtension('AGS')
p = e.getExtensionData()
c = Crgb()
c.assign(*d['ags_color'])
p.setRgb('Color', c)
p.setFloat('Reflection', d['ags_reflection'])
p.setUInt('Type', d['ags_type'])
elif(d['use'] == 'OPAQUE'):
e = m.createDefaultMaterialModifierExtension('Opaque')
p = e.getExtensionData()
p.setByte('Color Type', d['opaque_color_type'])
c = Crgb()
c.assign(*d['opaque_color'])
p.setRgb('Color', c)
self.texture_data_to_mxparams('Color Map', d['opaque_color_map'], p, )
p.setByte('Shininess Type', d['opaque_shininess_type'])
p.setFloat('Shininess', d['opaque_shininess'])
self.texture_data_to_mxparams('Shininess Map', d['opaque_shininess_map'], p, )
p.setByte('Roughness Type', d['opaque_roughness_type'])
p.setFloat('Roughness', d['opaque_roughness'])
self.texture_data_to_mxparams('Roughness Map', d['opaque_roughness_map'], p, )
p.setByte('Clearcoat', d['opaque_clearcoat'])
elif(d['use'] == 'TRANSPARENT'):
e = m.createDefaultMaterialModifierExtension('Transparent')
p = e.getExtensionData()
p.setByte('Color Type', d['transparent_color_type'])
c = Crgb()
c.assign(*d['transparent_color'])
p.setRgb('Color', c)
self.texture_data_to_mxparams('Color Map', d['transparent_color_map'], p, )
p.setFloat('Ior', d['transparent_ior'])
p.setFloat('Transparency', d['transparent_transparency'])
p.setByte('Roughness Type', d['transparent_roughness_type'])
p.setFloat('Roughness', d['transparent_roughness'])
self.texture_data_to_mxparams('Roughness Map', d['transparent_roughness_map'], p, )
p.setFloat('Specular Tint', d['transparent_specular_tint'])
p.setFloat('Dispersion', d['transparent_dispersion'])
p.setByte('Clearcoat', d['transparent_clearcoat'])
elif(d['use'] == 'METAL'):
e = m.createDefaultMaterialModifierExtension('Metal')
p = e.getExtensionData()
p.setUInt('IOR', d['metal_ior'])
p.setFloat('Tint', d['metal_tint'])
p.setByte('Color Type', d['metal_color_type'])
c = Crgb()
c.assign(*d['metal_color'])
p.setRgb('Color', c)
self.texture_data_to_mxparams('Color Map', d['metal_color_map'], p, )
p.setByte('Roughness Type', d['metal_roughness_type'])
p.setFloat('Roughness', d['metal_roughness'])
self.texture_data_to_mxparams('Roughness Map', d['metal_roughness_map'], p, )
p.setByte('Anisotropy Type', d['metal_anisotropy_type'])
p.setFloat('Anisotropy', d['metal_anisotropy'])
self.texture_data_to_mxparams('Anisotropy Map', d['metal_anisotropy_map'], p, )
p.setByte('Angle Type', d['metal_angle_type'])
p.setFloat('Angle', d['metal_angle'])
self.texture_data_to_mxparams('Angle Map', d['metal_angle_map'], p, )
p.setByte('Dust Type', d['metal_dust_type'])
p.setFloat('Dust', d['metal_dust'])
self.texture_data_to_mxparams('Dust Map', d['metal_dust_map'], p, )
p.setByte('Perforation Enabled', d['metal_perforation_enabled'])
self.texture_data_to_mxparams('Perforation Map', d['metal_perforation_map'], p, )
elif(d['use'] == 'TRANSLUCENT'):
e = m.createDefaultMaterialModifierExtension('Translucent')
p = e.getExtensionData()
p.setFloat('Scale', d['translucent_scale'])
p.setFloat('Ior', d['translucent_ior'])
p.setByte('Color Type', d['translucent_color_type'])
c = Crgb()
c.assign(*d['translucent_color'])
p.setRgb('Color', c)
self.texture_data_to_mxparams('Color Map', d['translucent_color_map'], p, )
p.setFloat('Hue Shift', d['translucent_hue_shift'])
p.setByte('Invert Hue', d['translucent_invert_hue'])
p.setFloat('Vibrance', d['translucent_vibrance'])
p.setFloat('Density', d['translucent_density'])
p.setFloat('Opacity', d['translucent_opacity'])
p.setByte('Roughness Type', d['translucent_roughness_type'])
p.setFloat('Roughness', d['translucent_roughness'])
self.texture_data_to_mxparams('Roughness Map', d['translucent_roughness_map'], p, )
p.setFloat('Specular Tint', d['translucent_specular_tint'])
p.setByte('Clearcoat', d['translucent_clearcoat'])
p.setFloat('Clearcoat Ior', d['translucent_clearcoat_ior'])
elif(d['use'] == 'CARPAINT'):
e = m.createDefaultMaterialModifierExtension('Car Paint')
p = e.getExtensionData()
c = Crgb()
c.assign(*d['carpaint_color'])
p.setRgb('Color', c)
p.setFloat('Metallic', d['carpaint_metallic'])
p.setFloat('Topcoat', d['carpaint_topcoat'])
elif(d['use'] == 'HAIR'):
e = m.createDefaultMaterialModifierExtension('Hair')
p = e.getExtensionData()
p.setByte('Color Type', d['hair_color_type'])
c = Crgb()
c.assign(*d['hair_color'])
p.setRgb('Color', c)
self.texture_data_to_mxparams('Color Map', d['hair_color_map'], p, )
self.texture_data_to_mxparams('Root-Tip Map', d['hair_root_tip_map'], p, )
p.setByte('Root-Tip Weight Type', d['hair_root_tip_weight_type'])
p.setFloat('Root-Tip Weight', d['hair_root_tip_weight'])
self.texture_data_to_mxparams('Root-Tip Weight Map', d['hair_root_tip_weight_map'], p, )
p.setFloat('Primary Highlight Strength', d['hair_primary_highlight_strength'])
p.setFloat('Primary Highlight Spread', d['hair_primary_highlight_spread'])
c = Crgb()
c.assign(*d['hair_primary_highlight_tint'])
p.setRgb('Primary Highlight Tint', c)
p.setFloat('Secondary Highlight Strength', d['hair_secondary_highlight_strength'])
p.setFloat('Secondary Highlight Spread', d['hair_secondary_highlight_spread'])
c = Crgb()
c.assign(*d['hair_secondary_highlight_tint'])
p.setRgb('Secondary Highlight Tint', c)
m = s.createMaterial(d['name'])
m.applyMaterialModifierExtension(p)
# global properties
if(d['override_map']):
t = self.texture(d['override_map'])
if(t is not None):
m.setGlobalMap(t)
if(d['bump_map_enabled']):
a = Cattribute()
a.activeType = MAP_TYPE_BITMAP
t = self.texture(d['bump_map'])
if(t is not None):
a.textureMap = t
if(d['bump_map_use_normal']):
a.value = d['bump_normal']
else:
a.value = d['bump']
m.setAttribute('bump', a)
m.setNormalMapState(d['bump_map_use_normal'])
m.setDispersion(d['dispersion'])
m.setMatteShadow(d['shadow'])
m.setMatte(d['matte'])
m.setNestedPriority(d['priority'])
c = Crgb()
c.assign(*d['id'])
m.setColorID(c)
if(d['active_display_map']):
t = self.texture(d['active_display_map'])
m.setActiveDisplayMap(t)
def displacement(d, m):
if(not d['enabled']):
return
m.enableDisplacement(True)
if(d['map'] is not None):
t = self.texture(d['map'])
m.setDisplacementMap(t)
m.setDisplacementCommonParameters(d['type'], d['subdivision'], int(d['smoothing']), d['offset'], d['subdivision_method'], d['uv_interpolation'], )
m.setHeightMapDisplacementParameters(d['height'], d['height_units'], d['adaptive'], )
v = Cvector(*d['v3d_scale'])
m.setVectorDisplacementParameters(v, d['v3d_transform'], d['v3d_rgb_mapping'], d['v3d_preset'], )
try:
displacement(d['displacement'], m)
except KeyError:
pass
elif(d['subtype'] == 'CUSTOM'):
m = self.material_custom(d)
else:
raise TypeError("Material '{}' {} is unknown type".format(d['name'], d['subtype']))
def get_material(self, n, ):
"""get material by name from scene, if material is missing, create and return placeholder"""
def get_material_names(s):
it = CmaxwellMaterialIterator()
o = it.first(s)
l = []
while not o.isNull():
name = o.getName()
l.append(name)
o = it.next()
return l
s = self.mxs
names = get_material_names(s)
m = None
if(n in names):
m = s.getMaterial(n)
if(m is None):
# should not happen because i stopped changing material names.. but i leave it here
m = self.material_placeholder()
return m
def camera(self, props, steps, active=False, lens_extra=None, response=None, region=None, custom_bokeh=(1.0, 0.0, False), cut_planes=(0.0, 1e7, False), shift_lens=(0.0, 0.0), ):
"""Create camera.
props (string name, int nSteps, float shutter, float filmWidth, float filmHeight, float iso, int diaphragmType, float angle,
int nBlades, float fps, int xRes, int yRes, float pixelAspect, int lensType, int projectionType)
steps [(int iStep, [3 float] origin, [3 float] focalPoint, [3 float] up, float focalLength, float fStop, bool focalLengthNeedCorrection), ..., ]
active bool
lens_extra float or None
response string or None
region (float x1, float y1, float x2, float y2, string type) or None
custom_bokeh (float ratio, float angle, bool enabled) or None
cut_planes (float near, float far, bool enabled) or None
shift_lens (float x, float y) or None
"""
s = self.mxs
if(props[13] in [6, 7]):
props2 = list(props[:])
props2[13] = TYPE_EXTENSION_LENS
c = s.addCamera(*props2)
else:
c = s.addCamera(*props)
for step in steps:
l = list(step[:])
l[1] = Cvector(*l[1])
l[2] = Cvector(*l[2])
l[3] = Cvector(*l[3])
c.setStep(*l)
# TYPE_THIN_LENS, TYPE_PINHOLE, TYPE_ORTHO
if(lens_extra is not None):
if(props[13] == TYPE_FISHEYE_LENS):
c.setFishLensProperties(lens_extra)
if(props[13] == TYPE_SPHERICAL_LENS):
c.setSphericalLensProperties(lens_extra)
if(props[13] == TYPE_CYLINDRICAL_LENS):
c.setCylindricalLensProperties(lens_extra)
if(props[13] == 6):
p = MXparamList()
p.createString('EXTENSION_NAME', 'Lat-Long Stereo')
p.createUInt('Type', lens_extra[0], 0, 2)
p.createFloat('FOV Vertical', lens_extra[1], 180.0, 0.0)
p.createFloat('FOV Horizontal', lens_extra[2], 360.0, 0.0)
p.createByte('Flip Ray X', lens_extra[3], 0, 1)
p.createByte('Flip Ray Y', lens_extra[4], 0, 1)
p.createFloat('Parallax Distance', lens_extra[5], 0.0, 360.0)
p.createByte('Zenith Mode', lens_extra[6], 0, 1)
p.createFloat('Separation', lens_extra[7], 0.0, 100000.0)
p.createTextureMap('Separation Map', CtextureMap())
self.texture_data_to_mxparams('Separation Map', lens_extra[8], p, )
c.applyCameraLensExtension(p)
if(props[13] == 7):
p = MXparamList()
p.createString('EXTENSION_NAME', 'Fish Stereo')
p.createUInt('Type', lens_extra[0], 0, 2)
p.createFloat('FOV', lens_extra[1], 0.0, 360.0)
p.createFloat('Separation', lens_extra[2], 0.0, 1000000.0)
p.createTextureMap('Separation Map', CtextureMap())
self.texture_data_to_mxparams('Separation Map', lens_extra[3], p, )
p.createByte('Vertical Mode', lens_extra[4], 0, 1)
p.createFloat('Dome Radius', lens_extra[5], 1.0, 1000000.0)
p.createTextureMap('Turn Map', CtextureMap())
self.texture_data_to_mxparams('Turn Map', lens_extra[6], p, )
p.createByte('Dome Tilt Compensation', lens_extra[7], 0, 1)
p.createFloat('Dome Tilt', lens_extra[8], 0.0, 90.0)
p.createTextureMap('Tilt Map', CtextureMap())
self.texture_data_to_mxparams('Tilt Map', lens_extra[9], p, )
c.applyCameraLensExtension(p)
if(response is not None):
c.setCameraResponsePreset(response)
if(custom_bokeh is not None):
c.setCustomBokeh(*custom_bokeh)
if(cut_planes is not None):
c.setCutPlanes(*cut_planes)
if(shift_lens is not None):
c.setShiftLens(*shift_lens)
if(region is not None):
if(region[2] == props[3]):
region[2] -= 1
if(region[3] == props[4]):
region[3] -= 1
c.setScreenRegion(*region)
if(active):
c.setActive()
return c
def empty(self, name, matrix, motion, object_props=None, ):
"""Create empty object.
name string
matrix (((3 float), (3 float), (3 float), (3 float)), ((3 float), (3 float), (3 float), (3 float)), (3 float), (3 float), (3 float)) - base, pivot, location, rotation, scale
object_props (bool hide, float opacity, tuple cid=(int, int, int), bool hcam, bool hcamsc, bool hgi, bool hrr, bool hzcp, ) or None
"""
s = self.mxs
o = s.createMesh(name, 0, 0, 0, 0, )
self.set_base_and_pivot(o, matrix, motion, )
if(object_props is not None):
self.set_object_props(o, *object_props)
return o
def mesh(self, name, matrix, motion, num_positions, vertices, normals, triangles, triangle_normals, uv_channels, object_props=None, num_materials=0, materials=[], triangle_materials=None, backface_material=None, ):
"""Create mesh object.
name string
base ((3 float), (3 float), (3 float), (3 float))
pivot ((3 float), (3 float), (3 float), (3 float))
num_positions int
vertices [[(float x, float y, float z), ..., ], [...], ]
normals [[(float x, float y, float z), ..., ], [...], ]
triangles [(int iv0, int iv1, int iv2, int in0, int in1, int in2, ), ..., ], ] # (3x vertex index, 3x normal index)
triangle_normals [[(float x, float y, float z), ..., ], [...], ]
uv_channels [[(float u1, float v1, float w1, float u2, float v2, float w2, float u3, float v3, float w3, ), ..., ], ..., ] or None # ordered by uv index and ordered by triangle index
num_materials int
object_props (bool hide, float opacity, tuple cid=(int, int, int), bool hcam, bool hcamsc, bool hgi, bool hrr, bool hzcp, ) or None
materials [(string path, bool embed), ..., ] or None
triangle_materials [(int tri_id, int mat_id), ..., ] or None
backface_material (string path, bool embed) or None
"""
s = self.mxs
o = s.createMesh(name, len(vertices[0]), len(normals[0]) + len(triangle_normals[0]), len(triangles), num_positions)
if(uv_channels is not None):
for i in range(len(uv_channels)):
o.addChannelUVW(i)
# an = 0
for ip in range(num_positions):
an = 0
verts = vertices[ip]
norms = normals[ip]
for i, loc in enumerate(verts):
o.setVertex(i, ip, Cvector(*loc), )
o.setNormal(i, ip, Cvector(*norms[i]), )
an += 1
for ip in range(num_positions):
trinorms = triangle_normals[ip]
for i, nor in enumerate(trinorms):
o.setNormal(an + i, ip, Cvector(*nor), )
if(type(triangles) is not list):
# pymaxwell does not like numpy arrays.. Cvectors has no problems, but setTriangle does..
triangles = triangles.tolist()
for i, tri in enumerate(triangles):
o.setTriangle(i, *tri)
if(uv_channels is not None):
for iuv, uv in enumerate(uv_channels):
for it, t in enumerate(uv):
o.setTriangleUVW(it, iuv, *t)
self.set_base_and_pivot(o, matrix, motion, )
if(object_props is not None):
self.set_object_props(o, *object_props)
if(materials is not None):
if(num_materials > 1):
# multi material
mats = []
for i in range(num_materials):
try:
n = materials[i]
mat = self.get_material(n)
except:
mat = self.material_placeholder()
mats.append(mat)
# pymaxwell does not like numpy arrays..
if(type(triangle_materials) is not list):
triangle_materials = triangle_materials.tolist()
for tid, mid in triangle_materials:
o.setTriangleMaterial(tid, mats[mid])
else:
# single material
if(len(materials) == 1):
if(materials[0] != ''):
mat = self.get_material(materials[0])
o.setMaterial(mat)
else:
# no material
pass
if(backface_material is not None):
if(backface_material != ''):
# only single backface material
mat = self.get_material(backface_material)
o.setBackfaceMaterial(mat)
return o
def instance(self, name, instanced_name, matrix, motion=None, object_props=None, materials=None, backface_material=None, ):
"""Create instance of mesh object. Instanced object must exist in scene.
name string
instanced_name string
base ((3 float), (3 float), (3 float), (3 float))
pivot ((3 float), (3 float), (3 float), (3 float))
object_props (bool hide, float opacity, tuple cid=(int, int, int), bool hcam, bool hcamsc, bool hgi, bool hrr, bool hzcp, ) or None
material (string path, bool embed) or None
backface_material (string path, bool embed) or None
"""
s = self.mxs
bo = s.getObject(instanced_name)
o = s.createInstancement(name, bo)
self.set_base_and_pivot(o, matrix, motion, )
if(object_props is not None):
self.set_object_props(o, *object_props)
if(materials is not None):
if(len(materials) > 1):
# multi material instances inherits material from base object
pass
else:
# single material, and i think (not sure) i can't make instance with different material than base in blender..
if(len(materials) == 1):
if(materials[0] != ''):
mat = self.get_material(materials[0])
o.setMaterial(mat)
if(backface_material is not None):
if(backface_material != ''):
mat = self.get_material(backface_material)
o.setBackfaceMaterial(mat)
return o
def reference(self, name, path, flags, matrix, motion=None, object_props=None, material=None, backface_material=None, ):
"""Create MXS reference object.
name string
path string (path)
flags [bool, bool, bool, bool]
base ((3 float), (3 float), (3 float), (3 float))
pivot ((3 float), (3 float), (3 float), (3 float))
object_props (bool hide, float opacity, tuple cid=(int, int, int), bool hcam, bool hcamsc, bool hgi, bool hrr, bool hzcp, ) or None
"""
s = self.mxs
o = s.createMesh(name, 0, 0, 0, 0, )
o.setReferencedScenePath(path)
if(flags[0]):
o.setReferencedOverrideFlags(FLAG_OVERRIDE_HIDE)
if(flags[1]):
o.setReferencedOverrideFlags(FLAG_OVERRIDE_HIDE_TO_CAMERA)
if(flags[2]):
o.setReferencedOverrideFlags(FLAG_OVERRIDE_HIDE_TO_REFL_REFR)
if(flags[3]):
o.setReferencedOverrideFlags(FLAG_OVERRIDE_HIDE_TO_GI)
self.set_base_and_pivot(o, matrix, motion, )
if(object_props is not None):
self.set_object_props(o, *object_props)
if(material is not None):
if(material != ''):
mat = self.get_material(material)
o.setMaterial(mat)
if(backface_material is not None):
if(backface_material != ''):
mat = self.get_material(backface_material)
o.setBackfaceMaterial(mat)
return o
def hierarchy(self, tree, ):
"""Set hierarchy of all objects at once.
tree [(obj_name, parent_name or None, ), ..., ]
"""
s = self.mxs
for on, pn, _ in tree:
if(pn is not None):
o = s.getObject(on)
p = s.getObject(pn)
o.setParent(p)
def environment(self, env_type=None, sky_type=None, sky=None, dome=None, sun_type=None, sun=None, ibl=None, ):
"""Set Environment properties.
env_type string or None PHYSICAL_SKY, IMAGE_BASED, NONE
sky_type string or None PHYSICAL, CONSTANT
sky dict or None {sky_use_preset bool
sky_preset string (path)
sky_intensity float
sky_planet_refl float
sky_ozone float
sky_water float
sky_turbidity_coeff float
sky_wavelength_exp float
sky_reflectance float
sky_asymmetry float}
dome dict or None {dome_intensity float
dome_zenith [float, float, float]
dome_horizon [float, float, float]
dome_mid_point float}
sun_type string or None DISABLED, PHYSICAL, CUSTOM
sun dict or None {sun_power float
sun_radius_factor float
sun_temp float
sun_color [float, float, float]
sun_location_type string LATLONG, ANGLES, DIRECTION
sun_latlong_lat float
sun_latlong_lon float
sun_date string
sun_time string
sun_latlong_gmt int
sun_latlong_gmt_auto bool
sun_latlong_ground_rotation float
sun_angles_zenith float
sun_angles_azimuth float
sun_dir_x float
sun_dir_y float
sun_dir_z float}
ibl dict or None {ibl_intensity float
ibl_interpolation bool
ibl_screen_mapping bool
ibl_bg_type string HDR_IMAGE, ACTIVE_SKY, DISABLED
ibl_bg_map string (path)
ibl_bg_intensity float
ibl_bg_scale_x float
ibl_bg_scale_y float
ibl_bg_offset_x float
ibl_bg_offset_y float
ibl_refl_type string HDR_IMAGE, ACTIVE_SKY, DISABLED
ibl_refl_map string (path)
ibl_refl_intensity float
ibl_refl_scale_x float
ibl_refl_scale_y float
ibl_refl_offset_x float
ibl_refl_offset_y float
ibl_refr_type string HDR_IMAGE, ACTIVE_SKY, DISABLED
ibl_refr_map string (path)
ibl_refr_intensity float
ibl_refr_scale_x float
ibl_refr_scale_y float
ibl_refr_offset_x float
ibl_refr_offset_y float
ibl_illum_type string HDR_IMAGE, ACTIVE_SKY, DISABLED
ibl_illum_map string (path)
ibl_illum_intensity float
ibl_illum_scale_x float
ibl_illum_scale_y float
ibl_illum_offset_x float
ibl_illum_offset_y float}
"""
s = self.mxs
env = s.getEnvironment()
if(env_type == 'PHYSICAL_SKY' or env_type == 'IMAGE_BASED'):
if(sky_type is not None):
env.setActiveSky(sky_type)
if(sky_type == 'PHYSICAL'):
if(not sky["sky_use_preset"]):
env.setPhysicalSkyAtmosphere(sky["sky_intensity"],
sky["sky_ozone"],
sky["sky_water"],
sky["sky_turbidity_coeff"],
sky["sky_wavelength_exp"],
sky["sky_reflectance"],
sky["sky_asymmetry"],
sky["sky_planet_refl"], )
else:
env.loadSkyFromPreset(sky["sky_preset"])
elif(sky_type == 'CONSTANT'):
hc = Crgb()
hc.assign(*dome['dome_horizon'])
zc = Crgb()
zc.assign(*dome['dome_zenith'])
env.setSkyConstant(dome["dome_intensity"], hc, zc, dome['dome_mid_point'])
sc = Crgb()
sc.assign(*sun['sun_color'])
if(sun_type == 'PHYSICAL'):
env.setSunProperties(SUN_PHYSICAL, sun["sun_temp"], sun["sun_power"], sun["sun_radius_factor"], sc)
elif(sun_type == 'CUSTOM'):
env.setSunProperties(SUN_CONSTANT, sun["sun_temp"], sun["sun_power"], sun["sun_radius_factor"], sc)
else:
# sun_type == 'DISABLED' or sun_type == None
env.setSunProperties(SUN_DISABLED, sun["sun_temp"], sun["sun_power"], sun["sun_radius_factor"], sc)
if(sun['sun_location_type'] == 'LATLONG'):
env.setSunPositionType(0)
l = sun["sun_date"].split(".")
date = datetime.date(int(l[2]), int(l[1]), int(l[0]))
day = int(date.timetuple().tm_yday)
l = sun["sun_time"].split(":")
hour = int(l[0])
minute = int(l[1])
time = hour + (minute / 60)
env.setSunLongitudeAndLatitude(sun["sun_latlong_lon"], sun["sun_latlong_lat"], sun["sun_latlong_gmt"], day, time)
env.setSunRotation(sun["sun_latlong_ground_rotation"])
elif(sun['sun_location_type'] == 'ANGLES'):
env.setSunPositionType(1)
env.setSunAngles(sun["sun_angles_zenith"], sun["sun_angles_azimuth"])
elif(sun['sun_location_type'] == 'DIRECTION'):
env.setSunPositionType(2)
env.setSunDirection(Cvector(sun["sun_dir_x"], sun["sun_dir_y"], sun["sun_dir_z"]))
if(env_type == 'IMAGE_BASED'):
env.enableEnvironment(True)
def state(s):
# channel state: 0 = Disabled; 1 = Enabled; 2 = Use active sky instead.
if(s == 'HDR_IMAGE'):
return 1
if(s == 'ACTIVE_SKY'):
return 2
if(s == 'SAME_AS_BG'):
# same as bg, set the same values as in bg layer
return 3
return 0
if(ibl is not None):
env.setEnvironmentWeight(ibl["ibl_intensity"])
s = state(ibl["ibl_bg_type"])
env.setEnvironmentLayer(IBL_LAYER_BACKGROUND, ibl["ibl_bg_map"], s, not ibl["ibl_screen_mapping"], not ibl["ibl_interpolation"], ibl["ibl_bg_intensity"], ibl["ibl_bg_scale_x"], ibl["ibl_bg_scale_y"], ibl["ibl_bg_offset_x"], ibl["ibl_bg_offset_y"], )
s = state(ibl["ibl_refl_type"])
if(s == 3):
s = state(ibl["ibl_bg_type"])
env.setEnvironmentLayer(IBL_LAYER_REFLECTION, ibl["ibl_bg_map"], s, not ibl["ibl_screen_mapping"], not ibl["ibl_interpolation"], ibl["ibl_bg_intensity"], ibl["ibl_bg_scale_x"], ibl["ibl_bg_scale_y"], ibl["ibl_bg_offset_x"], ibl["ibl_bg_offset_y"], )
else:
env.setEnvironmentLayer(IBL_LAYER_REFLECTION, ibl["ibl_refl_map"], s, not ibl["ibl_screen_mapping"], not ibl["ibl_interpolation"], ibl["ibl_refl_intensity"], ibl["ibl_refl_scale_x"], ibl["ibl_refl_scale_y"], ibl["ibl_refl_offset_x"], ibl["ibl_refl_offset_y"], )
s = state(ibl["ibl_refr_type"])
if(s == 3):
s = state(ibl["ibl_bg_type"])
env.setEnvironmentLayer(IBL_LAYER_REFRACTION, ibl["ibl_bg_map"], s, not ibl["ibl_screen_mapping"], not ibl["ibl_interpolation"], ibl["ibl_bg_intensity"], ibl["ibl_bg_scale_x"], ibl["ibl_bg_scale_y"], ibl["ibl_bg_offset_x"], ibl["ibl_bg_offset_y"], )
else:
env.setEnvironmentLayer(IBL_LAYER_REFRACTION, ibl["ibl_refr_map"], s, not ibl["ibl_screen_mapping"], not ibl["ibl_interpolation"], ibl["ibl_refr_intensity"], ibl["ibl_refr_scale_x"], ibl["ibl_refr_scale_y"], ibl["ibl_refr_offset_x"], ibl["ibl_refr_offset_y"], )
s = state(ibl["ibl_illum_type"])
if(s == 3):
s = state(ibl["ibl_bg_type"])
env.setEnvironmentLayer(IBL_LAYER_ILLUMINATION, ibl["ibl_bg_map"], s, not ibl["ibl_screen_mapping"], not ibl["ibl_interpolation"], ibl["ibl_bg_intensity"], ibl["ibl_bg_scale_x"], ibl["ibl_bg_scale_y"], ibl["ibl_bg_offset_x"], ibl["ibl_bg_offset_y"], )
else:
env.setEnvironmentLayer(IBL_LAYER_ILLUMINATION, ibl["ibl_illum_map"], s, not ibl["ibl_screen_mapping"], not ibl["ibl_interpolation"], ibl["ibl_illum_intensity"], ibl["ibl_illum_scale_x"], ibl["ibl_illum_scale_y"], ibl["ibl_illum_offset_x"], ibl["ibl_illum_offset_y"], )
else:
# env_type == 'NONE' or env_type == None
env.setActiveSky('')
def parameters(self, scene, materials=None, generals=None, tone=None, simulens=None, illum_caustics=None, other=None, text_overlay=None, ):
"""Set scene render parameters.
scene dict {cpu_threads int,
multilight int,
multilight_type int,
quality string RS1, RS0
sampling_level float,
time int, },
materials dict {override bool,
override_path string (path),
search_path string (path), } or None
generals dict {diplacement bool,
dispersion bool,
motion_blur bool, } or None
tone dict {burn float,
color_space int,
gamma float,
sharpness bool,
sharpness_value float,
tint float,
whitepoint float, } or None
simulens dict {aperture_map string (path),
devignetting bool,
devignetting_value float,
diffraction bool,
diffraction_value float,
frequency float,
obstacle_map string (path),
scattering bool,
scattering_value float, } or None
illum_caustics dict {illumination int,
refl_caustics int,
refr_caustics int, } or None
other dict {protect bool, }
"""
s = self.mxs
# s.setRenderParameter('ENGINE', scene["quality"])
s.setRenderParameter('ENGINE', bytes(scene["quality"], encoding='UTF-8'))
s.setRenderParameter('NUM THREADS', scene["cpu_threads"])
s.setRenderParameter('STOP TIME', scene["time"] * 60)
s.setRenderParameter('SAMPLING LEVEL', scene["sampling_level"])
s.setRenderParameter('USE MULTILIGHT', scene["multilight"])
s.setRenderParameter('SAVE LIGHTS IN SEPARATE FILES', scene["multilight_type"])
if(generals is not None):
s.setRenderParameter('DO MOTION BLUR', generals["motion_blur"])
s.setRenderParameter('DO DISPLACEMENT', generals["diplacement"])
s.setRenderParameter('DO DISPERSION', generals["dispersion"])
if(illum_caustics is not None):
v = illum_caustics['illumination']
if(v == 3):
s.setRenderParameter('DO DIRECT LAYER', 0)
s.setRenderParameter('DO INDIRECT LAYER', 0)
elif(v == 2):
s.setRenderParameter('DO DIRECT LAYER', 0)
s.setRenderParameter('DO INDIRECT LAYER', 1)
elif(v == 1):
s.setRenderParameter('DO DIRECT LAYER', 1)
s.setRenderParameter('DO INDIRECT LAYER', 0)
else:
s.setRenderParameter('DO DIRECT LAYER', 1)
s.setRenderParameter('DO INDIRECT LAYER', 1)
v = illum_caustics['refl_caustics']
if(v == 3):
s.setRenderParameter('DO DIRECT REFLECTION CAUSTIC LAYER', 0)
s.setRenderParameter('DO INDIRECT REFLECTION CAUSTIC LAYER', 0)
elif(v == 2):
s.setRenderParameter('DO DIRECT REFLECTION CAUSTIC LAYER', 0)
s.setRenderParameter('DO INDIRECT REFLECTION CAUSTIC LAYER', 1)
elif(v == 1):
s.setRenderParameter('DO DIRECT REFLECTION CAUSTIC LAYER', 1)
s.setRenderParameter('DO INDIRECT REFLECTION CAUSTIC LAYER', 0)
else:
s.setRenderParameter('DO DIRECT REFLECTION CAUSTIC LAYER', 1)
s.setRenderParameter('DO INDIRECT REFLECTION CAUSTIC LAYER', 1)
v = illum_caustics['refr_caustics']
if(v == 3):
s.setRenderParameter('DO DIRECT REFRACTION CAUSTIC LAYER', 0)
s.setRenderParameter('DO INDIRECT REFRACTION CAUSTIC LAYER', 0)
elif(v == 2):
s.setRenderParameter('DO DIRECT REFRACTION CAUSTIC LAYER', 0)
s.setRenderParameter('DO INDIRECT REFRACTION CAUSTIC LAYER', 1)
elif(v == 1):
s.setRenderParameter('DO DIRECT REFRACTION CAUSTIC LAYER', 1)
s.setRenderParameter('DO INDIRECT REFRACTION CAUSTIC LAYER', 0)
else:
s.setRenderParameter('DO DIRECT REFRACTION CAUSTIC LAYER', 1)
s.setRenderParameter('DO INDIRECT REFRACTION CAUSTIC LAYER', 1)
if(simulens is not None):
s.setRenderParameter('DO DEVIGNETTING', simulens["devignetting"])
s.setRenderParameter('DEVIGNETTING', simulens["devignetting_value"])
s.setRenderParameter('DO SCATTERING_LENS', simulens["scattering"])
s.setRenderParameter('SCATTERING_LENS', simulens["scattering_value"])
if(simulens["diffraction"]):
s.enableDiffraction()
s.setDiffraction(simulens["diffraction_value"], simulens["frequency"], simulens["aperture_map"], simulens["obstacle_map"])
if(tone is not None):
s.setRenderParameter('DO SHARPNESS', tone["sharpness"])
s.setRenderParameter('SHARPNESS', tone["sharpness_value"])
s.setToneMapping(tone["gamma"], tone["burn"])
s.setColorSpace(tone["color_space"])
s.setWhitePoint(tone["whitepoint"], tone["tint"])
if(materials is not None):
if(materials["override"]):
s.setOverrideMaterial(True)
if(materials["override_path"] != ""):
s.setOverrideMaterial(materials["override_path"])
if(materials["search_path"] != ""):
s.addSearchingPath(materials["search_path"])
if(materials["default_material"] != ""):
s.setDefaultMaterial(True)
s.setDefaultMaterial(materials["default_material"])
else:
s.setDefaultMaterial(False)
if(other is not None):
if(other['protect']):
s.enableProtection(True)
else:
s.enableProtection(False)
if(other['extra_sampling_enabled']):
s.setRenderParameter('DO EXTRA SAMPLING', 1)
s.setRenderParameter('EXTRA SAMPLING SL', other['extra_sampling_sl'])
s.setRenderParameter('EXTRA SAMPLING MASK', other['extra_sampling_mask'])
if(platform.system() == 'Linux'):
# wtf?
s.setRenderParameter('EXTRA SAMPLING CUSTOM ALPHA', bytes(other['extra_sampling_custom_alpha'], encoding='UTF-8'))
s.setRenderParameter('EXTRA SAMPLING USER BITMAP', bytes(other['extra_sampling_user_bitmap'], encoding='UTF-8'))
else:
s.setRenderParameter('EXTRA SAMPLING CUSTOM ALPHA', other['extra_sampling_custom_alpha'])
s.setRenderParameter('EXTRA SAMPLING USER BITMAP', other['extra_sampling_user_bitmap'])
if(other['extra_sampling_invert']):
s.setRenderParameter('EXTRA SAMPLING INVERT', 1)
if(text_overlay is not None):
if(text_overlay['enabled']):
o = CoverlayTextOptions()
o.enabled_ = 1
o.text_ = Cstring(text_overlay['text'])
o.position_ = text_overlay['position']
c = Crgb()
c.assign(*text_overlay['color'])
o.color_ = c.toRGB8()
o.backgroundEnabled_ = text_overlay['background']
c = Crgb()
c.assign(*text_overlay['background_color'])
o.backgroundColor_ = c.toRGB8()
s.setOverlayTextOptions(o)
def channels(self, base_path, mxi, image, image_depth='RGB8', channels_output_mode=0, channels_render=True, channels_render_type=0, channels=None, ):
"""Set scene render channels.
base_path string (path)
mxi string (path)
image string (path)
image_depth string RGB8, RGB16, RGB32
channels_output_mode int
channels_render bool
channels_render_type int
channels dict {channels_alpha bool
channels_alpha_file string
channels_alpha_opaque bool
channels_custom_alpha bool
channels_custom_alpha_file string
channels_deep bool
channels_deep_file string
channels_deep_max_samples int
channels_deep_min_dist float
channels_deep_type int
channels_fresnel bool
channels_fresnel_file string
channels_material_id bool
channels_material_id_file string
channels_motion_vector bool
channels_motion_vector_file string
channels_normals bool
channels_normals_file string
channels_normals_space int
channels_object_id bool
channels_object_id_file string
channels_position bool
channels_position_file string
channels_position_space int
channels_roughness bool
channels_roughness_file string
channels_shadow bool
channels_shadow_file string
channels_uv bool
channels_uv_file string
channels_z_buffer bool
channels_z_buffer_far float
channels_z_buffer_file string
channels_z_buffer_near float} or None
"""
def get_ext_depth(t, e=None):
if(e is not None):
t = "{}{}".format(e[1:].upper(), int(t[3:]))
if(t == 'RGB8'):
return ('.tif', 8)
elif(t == 'RGB16'):
return ('.tif', 16)
elif(t == 'RGB32'):
return ('.tif', 32)
elif(t == 'PNG8'):
return ('.png', 8)
elif(t == 'PNG16'):
return ('.png', 16)
elif(t == 'TGA'):
return ('.tga', 8)
elif(t == 'TIF8'):
return ('.tif', 8)
elif(t == 'TIF16'):
return ('.tif', 16)
elif(t == 'TIF32'):
return ('.tif', 32)
elif(t == 'EXR16'):
return ('.exr', 16)
elif(t == 'EXR32'):
return ('.exr', 32)
elif(t == 'EXR_DEEP'):
return ('.exr', 32)
elif(t == 'JPG'):
return ('.jpg', 8)
elif(t == 'JP2'):
return ('.jp2', 8)
elif(t == 'HDR'):
return ('.hdr', 32)
elif(t == 'DTEX'):
return ('.dtex', 32)
elif(t == 'PSD8'):
return ('.psd', 8)
elif(t == 'PSD16'):
return ('.psd', 16)
elif(t == 'PSD32'):
return ('.psd', 32)
else:
return ('.tif', 8)
s = self.mxs
s.setRenderParameter('DO NOT SAVE MXI FILE', (mxi is None))
s.setRenderParameter('DO NOT SAVE IMAGE FILE', (image is None))
if(mxi is not None):
# s.setRenderParameter('MXI FULLNAME', mxi)
# s.setRenderParameter('MXI FULLNAME', bytes(mxi, encoding='UTF-8'))
if(platform.system() == 'Linux'):
# wtf?
s.setRenderParameter('MXI FULLNAME', bytes(mxi, encoding='UTF-8'))
else:
# s.setRenderParameter('MXI FULLNAME', mxi)
s.setRenderParameter('MXI FULLNAME', bytes(mxi, encoding='UTF-8'))
if(image is not None):
if(image_depth is None):
image_depth = 'RGB8'
_, depth = get_ext_depth(image_depth, os.path.splitext(os.path.split(image)[1])[1])
s.setPath('RENDER', image, depth)
s.setRenderParameter('DO RENDER CHANNEL', int(channels_render))
s.setRenderParameter('EMBED CHANNELS', channels_output_mode)
s.setRenderParameter('RENDER LAYERS', channels_render_type)
if(channels is not None):
e, depth = get_ext_depth(channels["channels_alpha_file"])
s.setPath('ALPHA', "{}_alpha{}".format(base_path, e), depth)
e, depth = get_ext_depth(channels["channels_shadow_file"])
s.setPath('SHADOW', "{}_shadow{}".format(base_path, e), depth)
e, depth = get_ext_depth(channels["channels_object_id_file"])
s.setPath('OBJECT', "{}_object_id{}".format(base_path, e), depth)
e, depth = get_ext_depth(channels["channels_material_id_file"])
s.setPath('MATERIAL', "{}_material_id{}".format(base_path, e), depth)
e, depth = get_ext_depth(channels["channels_motion_vector_file"])
s.setPath('MOTION', "{}_motion_vector{}".format(base_path, e), depth)
e, depth = get_ext_depth(channels["channels_z_buffer_file"])
s.setPath('Z', "{}_z_buffer{}".format(base_path, e), depth)
e, depth = get_ext_depth(channels["channels_roughness_file"])
s.setPath('ROUGHNESS', "{}_roughness{}".format(base_path, e), depth)
e, depth = get_ext_depth(channels["channels_fresnel_file"])
s.setPath('FRESNEL', "{}_fresnel{}".format(base_path, e), depth)
e, depth = get_ext_depth(channels["channels_normals_file"])
s.setPath('NORMALS', "{}_normals{}".format(base_path, e), depth)
e, depth = get_ext_depth(channels["channels_position_file"])
s.setPath('POSITION', "{}_position{}".format(base_path, e), depth)
e, depth = get_ext_depth(channels["channels_deep_file"])
s.setPath('DEEP', "{}_deep{}".format(base_path, e), depth)
e, depth = get_ext_depth(channels["channels_uv_file"])
s.setPath('UV', "{}_uv{}".format(base_path, e), depth)
e, depth = get_ext_depth(channels["channels_custom_alpha_file"])
s.setPath('ALPHA_CUSTOM', "{}_custom_alpha{}".format(base_path, e), depth)
e, depth = get_ext_depth(channels["channels_reflectance_file"])
s.setPath('REFLECTANCE', "{}_reflectance{}".format(base_path, e), depth)
s.setRenderParameter('DO ALPHA CHANNEL', int(channels["channels_alpha"]))
s.setRenderParameter('OPAQUE ALPHA', int(channels["channels_alpha_opaque"]))
s.setRenderParameter('DO IDOBJECT CHANNEL', int(channels["channels_object_id"]))
s.setRenderParameter('DO IDMATERIAL CHANNEL', int(channels["channels_material_id"]))
s.setRenderParameter('DO SHADOW PASS CHANNEL', int(channels["channels_shadow"]))
s.setRenderParameter('DO MOTION CHANNEL', int(channels["channels_motion_vector"]))
s.setRenderParameter('DO ROUGHNESS CHANNEL', int(channels["channels_roughness"]))
s.setRenderParameter('DO FRESNEL CHANNEL', int(channels["channels_fresnel"]))
s.setRenderParameter('DO NORMALS CHANNEL', int(channels["channels_normals"]))
s.setRenderParameter('NORMALS CHANNEL SPACE', channels["channels_normals_space"])
s.setRenderParameter('POSITION CHANNEL SPACE', channels["channels_position_space"])
s.setRenderParameter('DO POSITION CHANNEL', int(channels["channels_position"]))
s.setRenderParameter('DO ZBUFFER CHANNEL', int(channels["channels_z_buffer"]))
s.setRenderParameter('ZBUFFER RANGE', (channels["channels_z_buffer_near"], channels["channels_z_buffer_far"]))
s.setRenderParameter('DO DEEP CHANNEL', int(channels["channels_deep"]))
s.setRenderParameter('DEEP CHANNEL TYPE', channels["channels_deep_type"])
s.setRenderParameter('DEEP MIN DISTANCE', channels["channels_deep_min_dist"])
s.setRenderParameter('DEEP MAX SAMPLES', channels["channels_deep_max_samples"])
s.setRenderParameter('DO UV CHANNEL', int(channels["channels_uv"]))
# s.setRenderParameter('MOTION CHANNEL TYPE', ?)
s.setRenderParameter('DO ALPHA CUSTOM CHANNEL', int(channels["channels_custom_alpha"]))
s.setRenderParameter('DO REFLECTANCE CHANNEL', int(channels["channels_reflectance"]))
def custom_alphas(self, groups, ):
"""Set custom alphas.
groups list of dicts: {'name': string, 'objects': list of strings, 'opaque': bool, }
"""
s = self.mxs
def get_material_names(s):
it = CmaxwellMaterialIterator()
o = it.first(s)
l = []
while not o.isNull():
name = o.getName()
l.append(name)
o = it.next()
return l
def get_object_names(s):
it = CmaxwellObjectIterator()
o = it.first(s)
l = []
while not o.isNull():
name, _ = o.getName()
l.append(name)
o = it.next()
return l
sobs = get_object_names(s)
smats = get_material_names(s)
for a in groups:
s.createCustomAlphaChannel(a['name'], a['opaque'])
for n in a['objects']:
if(n in sobs):
o = s.getObject(n)
o.addToCustomAlpha(a['name'])
for n in a['materials']:
if(n in smats):
m = s.getMaterial(n)
m.addToCustomAlpha(a['name'])
def ext_particles(self, name, properties, matrix, motion=None, object_props=None, material=None, backface_material=None, ):
"""Create particles object.
name string
properties dict
base ((3 float), (3 float), (3 float), (3 float))
pivot ((3 float), (3 float), (3 float), (3 float))
object_props (bool hide, float opacity, tuple cid=(int, int, int), bool hcam, bool hcamsc, bool hgi, bool hrr, bool hzcp, ) or None
material (string path, bool embed) or None
backface_material (string path, bool embed) or None
"""
s = self.mxs
e = self.mgr.createDefaultGeometryProceduralExtension('MaxwellParticles')
p = e.getExtensionData()
d = properties
if(d['embed'] is True):
c = Cbase()
c.origin = Cvector(0.0, 0.0, 0.0)
c.xAxis = Cvector(1.0, 0.0, 0.0)
c.yAxis = Cvector(0.0, 1.0, 0.0)
c.zAxis = Cvector(0.0, 0.0, 1.0)
p.setFloatArray('PARTICLE_POSITIONS', list(d['pdata']['PARTICLE_POSITIONS']), c)
p.setFloatArray('PARTICLE_SPEEDS', list(d['pdata']['PARTICLE_SPEEDS']), c)
p.setFloatArray('PARTICLE_RADII', list(d['pdata']['PARTICLE_RADII']), c)
p.setIntArray('PARTICLE_IDS', list(d['pdata']['PARTICLE_IDS']))
p.setFloatArray('PARTICLE_NORMALS', list(d['pdata']['PARTICLE_NORMALS']), c)
p.setFloatArray('PARTICLE_UVW', list(d['pdata']['PARTICLE_UVW']), c)
else:
p.setString('FileName', d['filename'])
p.setFloat('Radius Factor', d['radius_multiplier'])
p.setFloat('MB Factor', d['motion_blur_multiplier'])
p.setFloat('Shutter 1/', d['shutter_speed'])
p.setFloat('Load particles %', d['load_particles'])
p.setUInt('Axis', d['axis_system'])
p.setInt('Frame#', d['frame_number'])
p.setFloat('fps', d['fps'])
p.setInt('Create N particles per particle', d['extra_create_np_pp'])
p.setFloat('Extra particles dispersion', d['extra_dispersion'])
p.setFloat('Extra particles deformation', d['extra_deformation'])
p.setByte('Load particle Force', d['load_force'])
p.setByte('Load particle Vorticity', d['load_vorticity'])
p.setByte('Load particle Normal', d['load_normal'])
p.setByte('Load particle neighbors no.', d['load_neighbors_num'])
p.setByte('Load particle UV', d['load_uv'])
p.setByte('Load particle Age', d['load_age'])
p.setByte('Load particle Isolation Time', d['load_isolation_time'])
p.setByte('Load particle Viscosity', d['load_viscosity'])
p.setByte('Load particle Density', d['load_density'])
p.setByte('Load particle Pressure', d['load_pressure'])
p.setByte('Load particle Mass', d['load_mass'])
p.setByte('Load particle Temperature', d['load_temperature'])
p.setByte('Load particle ID', d['load_id'])
p.setFloat('Min Force', d['min_force'])
p.setFloat('Max Force', d['max_force'])
p.setFloat('Min Vorticity', d['min_vorticity'])
p.setFloat('Max Vorticity', d['max_vorticity'])
p.setInt('Min Nneighbors', d['min_nneighbors'])
p.setInt('Max Nneighbors', d['max_nneighbors'])
p.setFloat('Min Age', d['min_age'])
p.setFloat('Max Age', d['max_age'])
p.setFloat('Min Isolation Time', d['min_isolation_time'])
p.setFloat('Max Isolation Time', d['max_isolation_time'])
p.setFloat('Min Viscosity', d['min_viscosity'])
p.setFloat('Max Viscosity', d['max_viscosity'])
p.setFloat('Min Density', d['min_density'])
p.setFloat('Max Density', d['max_density'])
p.setFloat('Min Pressure', d['min_pressure'])
p.setFloat('Max Pressure', d['max_pressure'])
p.setFloat('Min Mass', d['min_mass'])
p.setFloat('Max Mass', d['max_mass'])
p.setFloat('Min Temperature', d['min_temperature'])
p.setFloat('Max Temperature', d['max_temperature'])
p.setFloat('Min Velocity', d['min_velocity'])
p.setFloat('Max Velocity', d['max_velocity'])
o = s.createGeometryProceduralObject(name, p)
a, _ = o.addChannelUVW()
o.generateCustomUVW(0, a)
self.set_base_and_pivot(o, matrix, motion, )
if(object_props is not None):
self.set_object_props(o, *object_props)
if(material is not None):
if(material != ''):
mat = self.get_material(material)
o.setMaterial(mat)
if(backface_material is not None):
if(backface_material != ''):
mat = self.get_material(backface_material)
o.setBackfaceMaterial(mat)
return o
def ext_hair(self, name, extension, matrix, motion, root_radius, tip_radius, data, object_props=None, display_percent=10, display_max=1000, material=None, backface_material=None, ):
"""Create hair/grass object.
name string
extension string ('MaxwellHair' ,'MGrassP')
base ((3 float), (3 float), (3 float), (3 float))
pivot ((3 float), (3 float), (3 float), (3 float))
root_radius float
tip_radius float
data dict of extension data
object_props (bool hide, float opacity, tuple cid=(int, int, int), bool hcam, bool hcamsc, bool hgi, bool hrr, bool hzcp, ) or None
display_percent int
display_max int
material (string path, bool embed) or None
backface_material (string path, bool embed) or None
"""
s = self.mxs
e = self.mgr.createDefaultGeometryProceduralExtension(extension)
p = e.getExtensionData()
p.setByteArray('HAIR_MAJOR_VER', data['HAIR_MAJOR_VER'])
p.setByteArray('HAIR_MINOR_VER', data['HAIR_MINOR_VER'])
p.setByteArray('HAIR_FLAG_ROOT_UVS', data['HAIR_FLAG_ROOT_UVS'])
m = memoryview(struct.pack("I", data['HAIR_GUIDES_COUNT'][0])).tolist()
p.setByteArray('HAIR_GUIDES_COUNT', m)
m = memoryview(struct.pack("I", data['HAIR_GUIDES_POINT_COUNT'][0])).tolist()
p.setByteArray('HAIR_GUIDES_POINT_COUNT', m)
c = Cbase()
c.origin = Cvector(0.0, 0.0, 0.0)
c.xAxis = Cvector(1.0, 0.0, 0.0)
c.yAxis = Cvector(0.0, 1.0, 0.0)
c.zAxis = Cvector(0.0, 0.0, 1.0)
p.setFloatArray('HAIR_POINTS', list(data['HAIR_POINTS']), c)
p.setFloatArray('HAIR_NORMALS', list(data['HAIR_NORMALS']), c)
if(data['HAIR_FLAG_ROOT_UVS'][0] == 1):
p.setFloatArray('HAIR_ROOT_UVS', list(data['HAIR_ROOT_UVS']), c)
p.setUInt('Display Percent', display_percent)
if(extension == 'MaxwellHair'):
p.setUInt('Display Max. Hairs', display_max)
p.setDouble('Root Radius', root_radius)
p.setDouble('Tip Radius', tip_radius)
if(extension == 'MGrassP'):
p.setUInt('Display Max. Hairs', display_max)
p.setDouble('Root Radius', root_radius)
p.setDouble('Tip Radius', tip_radius)
o = s.createGeometryProceduralObject(name, p)
if(extension == 'MaxwellHair'):
a, _ = o.addChannelUVW()
o.generateCustomUVW(0, a)
b, _ = o.addChannelUVW()
o.generateCustomUVW(1, b)
c, _ = o.addChannelUVW()
o.generateCustomUVW(2, c)
if(extension == 'MGrassP'):
a, _ = o.addChannelUVW()
o.generateCustomUVW(0, a)
b, _ = o.addChannelUVW()
o.generateCustomUVW(1, b)
self.set_base_and_pivot(o, matrix, motion, )
if(object_props is not None):
self.set_object_props(o, *object_props)
if(material is not None):
if(material != ''):
mat = self.get_material(material)
o.setMaterial(mat)
if(backface_material is not None):
if(backface_material != ''):
mat = self.get_material(backface_material)
o.setBackfaceMaterial(mat)
return o
def ext_sea(self, name, matrix, motion=None, object_props=None, geometry=None, wind=None, material=None, backface_material=None, ):
"""Create sea extension object.
name string
base ((3 float), (3 float), (3 float), (3 float))
pivot ((3 float), (3 float), (3 float), (3 float))
object_props (bool hide, float opacity, tuple cid=(int, int, int), bool hcam, bool hcamsc, bool hgi, bool hrr, bool hzcp, ) or None
geometry (float reference_time,
int resolution,
float ocean_depth,
float vertical_scale,
float ocean_dim,
int ocean_seed,
bool enable_choppyness,
float choppy_factor, )
wind (float ocean_wind_mod,
float ocean_wind_dir,
float ocean_wind_alignment,
float ocean_min_wave_length,
float damp_factor_against_wind, )
material (string path, bool embed) or None
backface_material (string path, bool embed) or None
"""
s = self.mxs
e = self.mgr.createDefaultGeometryLoaderExtension('MaxwellSea')
p = e.getExtensionData()
p.setFloat('Reference Time', geometry[0])
p.setUInt('Resolution', geometry[1])
p.setFloat('Ocean Depth', geometry[2])
p.setFloat('Vertical Scale', geometry[3])
p.setFloat('Ocean Dim', geometry[4])
p.setUInt('Ocean Seed', geometry[5])
p.setByte('Enable Choppyness', geometry[6])
p.setFloat('Choppy factor', geometry[7])
p.setByte('Enable White Caps', geometry[8])
p.setFloat('Ocean Wind Mod.', wind[0])
p.setFloat('Ocean Wind Dir.', wind[1])
p.setFloat('Ocean Wind Alignment', wind[2])
p.setFloat('Ocean Min. Wave Length', wind[3])
p.setFloat('Damp Factor Against Wind', wind[4])
o = s.createGeometryLoaderObject(name, p)
self.set_base_and_pivot(o, matrix, motion, )
if(object_props is not None):
self.set_object_props(o, *object_props)
if(material is not None):
if(material != ''):
mat = self.get_material(material)
o.setMaterial(mat)
if(backface_material is not None):
if(backface_material != ''):
mat = self.get_material(backface_material)
o.setBackfaceMaterial(mat)
def ext_volumetrics(self, name, properties, matrix, motion=None, object_props=None, material=None, backface_material=None, ):
"""Create Volumetrics Extension Object.
name string
properties (int type 1, float density) or (int type 2, float density, int seed, float low, float high, float detail, int octaves, float perssistence)
base ((3 float), (3 float), (3 float), (3 float))
pivot ((3 float), (3 float), (3 float), (3 float))
object_props (bool hide, float opacity, tuple cid=(int, int, int), bool hcam, bool hcamsc, bool hgi, bool hrr, bool hzcp, ) or None
material (string path, bool embed) or None
backface_material (string path, bool embed) or None
"""
s = self.mxs
e = self.mgr.createDefaultGeometryProceduralExtension('MaxwellVolumetric')
p = e.getExtensionData()
d = properties
p.setByte('Create Constant Density', d[0])
p.setFloat('ConstantDensity', d[1])
if(d[0] == 2):
p.setUInt('Seed', d[2])
p.setFloat('Low value', d[3])
p.setFloat('High value', d[4])
p.setFloat('Detail', d[5])
p.setInt('Octaves', d[6])
p.setFloat('Persistance', d[7])
o = s.createGeometryProceduralObject(name, p)
self.set_base_and_pivot(o, matrix, motion, )
if(object_props is not None):
self.set_object_props(o, *object_props)
if(material is not None):
if(material != ''):
mat = self.get_material(material)
o.setMaterial(mat)
if(backface_material is not None):
if(backface_material != ''):
mat = self.get_material(backface_material)
o.setBackfaceMaterial(mat)
return o
def mod_grass(self, object_name, properties, material=None, backface_material=None, ):
"""Create grass object modifier extension.
object_name string
properties dict of many, many properties, see code..
material (string path, bool embed) or None
backface_material (string path, bool embed) or None
"""
s = self.mxs
e = self.mgr.createDefaultGeometryModifierExtension('MaxwellGrass')
p = e.getExtensionData()
if(material is not None):
mat = self.get_material(material)
if(mat is not None):
p.setString('Material', mat.getName())
if(backface_material is not None):
mat = self.get_material(backface_material)
if(mat is not None):
p.setString('Double Sided Material', mat.getName())
p.setUInt('Density', properties['density'])
self.texture_data_to_mxparams('Density Map', properties['density_map'], p, )
p.setFloat('Length', properties['length'])
self.texture_data_to_mxparams('Length Map', properties['length_map'], p, )
p.setFloat('Length Variation', properties['length_variation'])
p.setFloat('Root Width', properties['root_width'])
p.setFloat('Tip Width', properties['tip_width'])
p.setFloat('Direction Type', properties['direction_type'])
p.setFloat('Initial Angle', properties['initial_angle'])
p.setFloat('Initial Angle Variation', properties['initial_angle_variation'])
self.texture_data_to_mxparams('Initial Angle Map', properties['initial_angle_map'], p, )
p.setFloat('Start Bend', properties['start_bend'])
p.setFloat('Start Bend Variation', properties['start_bend_variation'])
self.texture_data_to_mxparams('Start Bend Map', properties['start_bend_map'], p, )
p.setFloat('Bend Radius', properties['bend_radius'])
p.setFloat('Bend Radius Variation', properties['bend_radius_variation'])
self.texture_data_to_mxparams('Bend Radius Map', properties['bend_radius_map'], p, )
p.setFloat('Bend Angle', properties['bend_angle'])
p.setFloat('Bend Angle Variation', properties['bend_angle_variation'])
self.texture_data_to_mxparams('Bend Angle Map', properties['bend_angle_map'], p, )
p.setFloat('Cut Off', properties['cut_off'])
p.setFloat('Cut Off Variation', properties['cut_off_variation'])
self.texture_data_to_mxparams('Cut Off Map', properties['cut_off_map'], p, )
p.setUInt('Points per Blade', properties['points_per_blade'])
p.setUInt('Primitive Type', properties['primitive_type'])
p.setUInt('Seed', properties['seed'])
p.setByte('Enable LOD', properties['lod'])
p.setFloat('LOD Min Distance', properties['lod_min_distance'])
p.setFloat('LOD Max Distance', properties['lod_max_distance'])
p.setFloat('LOD Max Distance Density', properties['lod_max_distance_density'])
p.setUInt('Display Percent', properties['display_percent'])
p.setUInt('Display Max. Blades', properties['display_max_blades'])
o = s.getObject(object_name)
o.applyGeometryModifierExtension(p)
return o
def mod_subdivision(self, object_name, level=2, scheme=0, interpolation=2, crease=0.0, smooth_angle=90.0, quads=None, ):
"""Create subdivision object modifier extension.
object_name string
level int
scheme int (0, "Catmull-Clark"), (1, "Loop")
interpolation int (0, "None"), (1, "Edges"), (2, "Edges And Corners"), (3, "Sharp")
crease float
smooth float
quads [[int, int], ...] or None
"""
s = self.mxs
e = self.mgr.createDefaultGeometryModifierExtension('SubdivisionModifier')
p = e.getExtensionData()
p.setUInt('Subdivision Level', level)
p.setUInt('Subdivision Scheme', scheme)
p.setUInt('Interpolation', interpolation)
p.setFloat('Crease', crease)
p.setFloat('Smooth Angle', smooth_angle)
o = s.getObject(object_name)
if(scheme == 0 and quads is not None):
for t, q in quads:
o.setTriangleQuadBuddy(t, q)
o.applyGeometryModifierExtension(p)
return o
def mod_scatter(self, object_name, scatter_object, inherit_objectid=False, remove_overlapped=False, density=None, seed=0, scale=None, rotation=None, lod=None, angle=None, display_percent=10, display_max=1000, ):
"""Create scatter object modifier extension.
object_name string
scatter_object string
inherit_objectid bool
density (float, density_map or None) or None
seed int
scale ((float, float, float), scale_map or None, scale_variation (float, float, float)) or None
rotation ((float, float, float), rotation_map or None, rotation_variation (float, float, float), rotation_direction int (0, "Polygon Normal"), (1, "World Z")) or None
lod (bool, lod_min_distance float, lod_max_distance float, lod_max_distance_density float) or None
display_percent int
display_max int
"""
s = self.mxs
e = self.mgr.createDefaultGeometryModifierExtension('MaxwellScatter')
p = e.getExtensionData()
p.setString('Object', scatter_object)
p.setByte('Inherit ObjectID', inherit_objectid)
if(density is not None):
p.setFloat('Density', density[0])
self.texture_data_to_mxparams('Density Map', density[1], p, )
p.setUInt('Seed', seed)
p.setByte('Remove Overlapped', remove_overlapped)
if(scale is not None):
p.setFloat('Scale X', scale[0])
p.setFloat('Scale Y', scale[1])
p.setFloat('Scale Z', scale[2])
self.texture_data_to_mxparams('Scale Map', scale[3], p, )
p.setFloat('Scale X Variation', scale[4])
p.setFloat('Scale Y Variation', scale[5])
p.setFloat('Scale Z Variation', scale[6])
p.setByte('Uniform Scale', scale[7])
if(rotation is not None):
p.setFloat('Rotation X', rotation[0])
p.setFloat('Rotation Y', rotation[1])
p.setFloat('Rotation Z', rotation[2])
self.texture_data_to_mxparams('Rotation Map', rotation[3], p, )
p.setFloat('Rotation X Variation', rotation[4])
p.setFloat('Rotation Y Variation', rotation[5])
p.setFloat('Rotation Z Variation', rotation[6])
p.setUInt('Direction Type', rotation[7])
if(lod is not None):
p.setByte('Enable LOD', lod[0])
p.setFloat('LOD Min Distance', lod[1])
p.setFloat('LOD Max Distance', lod[2])
p.setFloat('LOD Max Distance Density', lod[3])
if(angle is not None):
p.setFloat('Direction Type', angle[0])
p.setFloat('Initial Angle', angle[1])
p.setFloat('Initial Angle Variation', angle[2])
self.texture_data_to_mxparams('Initial Angle Map', angle[3], p, )
p.setUInt('Display Percent', display_percent)
p.setUInt('Display Max. Blades', display_max)
o = s.getObject(object_name)
o.applyGeometryModifierExtension(p)
return o
def mod_cloner(self, object_name, cloned_object, render_emitter, pdata, radius=1.0, mb_factor=1.0, load_percent=100.0, start_offset=0, ex_npp=0, ex_p_dispersion=0.0, ex_p_deformation=0.0, align_to_velocity=False, scale_with_radius=False, inherit_obj_id=False, frame=1, fps=24.0, display_percent=10, display_max=1000, ):
"""Create cloner object modifier extension.
object_name string
cloned_object string
render_emitter bool
pdata string or dict
radius float
mb_factor float
load_percent float
start_offset int
ex_npp int
ex_p_dispersion float
ex_p_deformation float
align_to_velocity bool
scale_with_radius bool
inherit_obj_id bool
frame int
fps float
display_percent int
display_max int
"""
s = self.mxs
e = self.mgr.createDefaultGeometryModifierExtension('MaxwellCloner')
p = e.getExtensionData()
if(type(pdata) is dict):
c = Cbase()
c.origin = Cvector(0.0, 0.0, 0.0)
c.xAxis = Cvector(1.0, 0.0, 0.0)
c.yAxis = Cvector(0.0, 1.0, 0.0)
c.zAxis = Cvector(0.0, 0.0, 1.0)
p.setFloatArray('PARTICLE_POSITIONS', list(pdata['PARTICLE_POSITIONS']), c)
p.setFloatArray('PARTICLE_SPEEDS', list(pdata['PARTICLE_SPEEDS']), c)
p.setFloatArray('PARTICLE_RADII', list(pdata['PARTICLE_RADII']), c)
p.setIntArray('PARTICLE_IDS', list(pdata['PARTICLE_IDS']))
else:
p.setString('FileName', pdata)
p.setFloat('Radius Factor', radius)
p.setFloat('MB Factor', mb_factor)
p.setFloat('Load particles %', load_percent)
p.setUInt('Start offset', start_offset)
p.setUInt('Create N particles per particle', ex_npp)
p.setFloat('Extra particles dispersion', ex_p_dispersion)
p.setFloat('Extra particles deformation', ex_p_deformation)
p.setByte('Use velocity', align_to_velocity)
p.setByte('Scale with particle radius', scale_with_radius)
p.setByte('Inherit ObjectID', inherit_obj_id)
p.setInt('Frame#', frame)
p.setFloat('fps', fps)
p.setUInt('Display Percent', display_percent)
p.setUInt('Display Max. Particles', display_max)
if(not render_emitter):
o = s.getObject(object_name)
o.setHide(True)
o = s.getObject(cloned_object)
o.applyGeometryModifierExtension(p)
return o
def wireframe_override_object_materials(self, clay_mat_name, wire_base_name, ):
s = self.mxs
it = CmaxwellObjectIterator()
o = it.first(scene)
l = []
while not o.isNull():
name, _ = o.getName()
l.append(name)
o = it.next()
for o in l:
# do not set material to wire base
if(o.getName()[0] != wire_base_name):
if(o.isInstance()[0] == 1):
instanced = o.getInstanced()
# do not set material to wire base instances
if(instanced.getName()[0] != wire_base_name):
o.setMaterial(clay_mat_name)
else:
o.setMaterial(clay_mat_name)
def wireframe_zero_scale_base(self, wire_base_name):
s = self.mxs
o = s.getObject(wire_base_name)
z = (0.0, 0.0, 0.0)
b = Cbase()
b.origin = Cvector(*z)
b.xAxis = Cvector(*z)
b.yAxis = Cvector(*z)
b.zAxis = Cvector(*z)
p = Cbase()
p.origin = Cvector(*z)
p.xAxis = Cvector(1.0, 0.0, 0.0)
p.yAxis = Cvector(0.0, 1.0, 0.0)
p.zAxis = Cvector(0.0, 0.0, 1.0)
o.setBaseAndPivot(b, p)
o.setScale(Cvector(0, 0, 0))
class MXMWriter():
def __init__(self, path, data, ):
"""Create Extension MXM.
path string (path)
data dict
"""
if(__name__ != "__main__"):
if(platform.system() == 'Darwin'):
raise ImportError("No pymaxwell for Mac OS X..")
log(self.__class__.__name__, 1, LogStyles.MESSAGE, prefix="* ", )
self.path = path
self.mxs = Cmaxwell(mwcallback)
self.mgr = CextensionManager.instance()
self.mgr.loadAllExtensions()
mat = self.material(data)
if(mat is not None):
log("writing to: {}".format(self.path), 2, prefix="* ", )
mat.write(path)
log("done.", 2, prefix="* ", )
else:
raise RuntimeError("Something unexpected happened..")
def texture_data_to_mxparams(self, name, data, mxparams, ):
"""Create CtextureMap, fill with parameters and put into mxparams.
name string
data dict {'type': string,
'path': string,
'channel': int,
'use_global_map': bool,
'tile_method_type': [bool, bool],
'tile_method_units': int,
'repeat': [float, float],
'mirror': [bool, bool],
'offset': [float, float],
'rotation': float,
'invert': bool,
'alpha_only': bool,
'interpolation': bool,
'brightness': float,
'contrast': float,
'saturation': float,
'hue': float,
'rgb_clamp': [float, float], }
mxparams mxparams
"""
d = data
if(d is None):
return
t = CtextureMap()
t.setPath(d['path'])
v = Cvector2D()
v.assign(*d['repeat'])
t.scale = v
v = Cvector2D()
v.assign(*d['offset'])
t.offset = v
t.rotation = d['rotation']
t.uvwChannelID = d['channel']
t.uIsTiled = d['tile_method_type'][0]
t.vIsTiled = d['tile_method_type'][1]
t.uIsMirrored = d['mirror'][0]
t.vIsMirrored = d['mirror'][1]
t.invert = d['invert']
# t.doGammaCorrection = 0
t.useAbsoluteUnits = d['tile_method_units']
t.normalMappingFlipRed = d['normal_mapping_flip_red']
t.normalMappingFlipGreen = d['normal_mapping_flip_green']
t.normalMappingFullRangeBlue = d['normal_mapping_full_range_blue']
t.useAlpha = d['alpha_only']
t.typeInterpolation = d['interpolation']
t.saturation = d['saturation'] / 100
t.contrast = d['contrast'] / 100
t.brightness = d['brightness'] / 100
t.hue = d['hue'] / 180
t.clampMin = d['rgb_clamp'][0] / 255
t.clampMax = d['rgb_clamp'][1] / 255
t.useGlobalMap = d['use_global_map']
# t.cosA = 1.000000
# t.sinA = 0.000000
ok = mxparams.setTextureMap(name, t)
return mxparams
def texture(self, d, ):
"""Create CtextureMap from parameters
d dict
"""
s = self.mxs
t = CtextureMap()
t.setPath(d['path'])
t.uvwChannelID = d['channel']
t.brightness = d['brightness'] / 100
t.contrast = d['contrast'] / 100
t.saturation = d['saturation'] / 100
t.hue = d['hue'] / 180
t.useGlobalMap = d['use_global_map']
t.useAbsoluteUnits = d['tile_method_units']
t.uIsTiled = d['tile_method_type'][0]
t.vIsTiled = d['tile_method_type'][1]
t.uIsMirrored = d['mirror'][0]
t.vIsMirrored = d['mirror'][1]
vec = Cvector2D()
vec.assign(d['offset'][0], d['offset'][1])
t.offset = vec
t.rotation = d['rotation']
t.invert = d['invert']
t.useAlpha = d['alpha_only']
if(d['interpolation']):
t.typeInterpolation = 1
else:
t.typeInterpolation = 0
t.clampMin = d['rgb_clamp'][0] / 255
t.clampMax = d['rgb_clamp'][1] / 255
vec = Cvector2D()
vec.assign(d['repeat'][0], d['repeat'][1])
t.scale = vec
t.normalMappingFlipRed = d['normal_mapping_flip_red']
t.normalMappingFlipGreen = d['normal_mapping_flip_green']
t.normalMappingFullRangeBlue = d['normal_mapping_full_range_blue']
for i, pt in enumerate(d['procedural']):
if(pt['use'] == 'BRICK'):
e = self.mgr.createDefaultTextureExtension('Brick')
p = e.getExtensionData()
p.setFloat('Blend procedural', pt['blending_factor'])
p.setFloat('Brick width', pt['brick_brick_width'])
p.setFloat('Brick height', pt['brick_brick_height'])
p.setInt('Brick offset', pt['brick_brick_offset'])
p.setInt('Random offset', pt['brick_random_offset'])
p.setByte('Double brick', pt['brick_double_brick'])
p.setFloat('Small brick width', pt['brick_small_brick_width'])
p.setByte('Round corners', pt['brick_round_corners'])
p.setFloat('Boundary sharpness U', pt['brick_boundary_sharpness_u'])
p.setFloat('Boundary sharpness V', pt['brick_boundary_sharpness_v'])
p.setInt('Boundary noise detail', pt['brick_boundary_noise_detail'])
p.setFloat('Boundary noise region U', pt['brick_boundary_noise_region_u'])
p.setFloat('Boundary noise region V', pt['brick_boundary_noise_region_v'])
p.setUInt('Seed', pt['brick_seed'])
p.setByte('Random rotation', pt['brick_random_rotation'])
p.setInt('Color variation', pt['brick_color_variation'])
c = Crgb()
c.assign(*pt['brick_brick_color_0'])
p.setRgb('Brick color 0', c)
self.texture_data_to_mxparams('Brick texture 0', pt['brick_brick_texture_0'], p, )
p.setInt('Sampling factor 0', pt['brick_sampling_factor_0'])
p.setInt('Weight 0', pt['brick_weight_0'])
c = Crgb()
c.assign(*pt['brick_brick_color_1'])
p.setRgb('Brick color 1', c)
self.texture_data_to_mxparams('Brick texture 1', pt['brick_brick_texture_1'], p, )
p.setInt('Sampling factor 1', pt['brick_sampling_factor_1'])
p.setInt('Weight 1', pt['brick_weight_1'])
c = Crgb()
c.assign(*pt['brick_brick_color_2'])
p.setRgb('Brick color 2', c)
self.texture_data_to_mxparams('Brick texture 2', pt['brick_brick_texture_2'], p, )
p.setInt('Sampling factor 2', pt['brick_sampling_factor_2'])
p.setInt('Weight 2', pt['brick_weight_2'])
p.setFloat('Mortar thickness', pt['brick_mortar_thickness'])
c = Crgb()
c.assign(*pt['brick_mortar_color'])
p.setRgb('Mortar color', c)
self.texture_data_to_mxparams('Mortar texture', pt['brick_mortar_texture'], p, )
t.addProceduralTexture(p)
elif(pt['use'] == 'CHECKER'):
e = self.mgr.createDefaultTextureExtension('Checker')
p = e.getExtensionData()
p.setFloat('Blend procedural', pt['blending_factor'])
c = Crgb()
c.assign(*pt['checker_color_0'])
p.setRgb('Color0', c)
c = Crgb()
c.assign(*pt['checker_color_1'])
p.setRgb('Color1', c)
p.setUInt('Number of elements U', pt['checker_number_of_elements_u'])
p.setUInt('Number of elements V', pt['checker_number_of_elements_v'])
p.setFloat('Transition sharpness', pt['checker_transition_sharpness'])
p.setUInt('Fall-off', pt['checker_falloff'])
t.addProceduralTexture(p)
elif(pt['use'] == 'CIRCLE'):
e = self.mgr.createDefaultTextureExtension('Circle')
p = e.getExtensionData()
p.setFloat('Blend procedural', pt['blending_factor'])
c = Crgb()
c.assign(*pt['circle_background_color'])
p.setRgb('Background color', c)
c = Crgb()
c.assign(*pt['circle_circle_color'])
p.setRgb('Circle color', c)
p.setFloat('RadiusU', pt['circle_radius_u'])
p.setFloat('RadiusV', pt['circle_radius_v'])
p.setFloat('Transition factor', pt['circle_transition_factor'])
p.setUInt('Fall-off', pt['circle_falloff'])
t.addProceduralTexture(p)
elif(pt['use'] == 'GRADIENT3'):
e = self.mgr.createDefaultTextureExtension('Gradient3')
p = e.getExtensionData()
p.setFloat('Blend procedural', pt['blending_factor'])
p.setByte('Gradient U', pt['gradient3_gradient_u'])
c = Crgb()
c.assign(*pt['gradient3_color0_u'])
p.setRgb('Color0 U', c)
c = Crgb()
c.assign(*pt['gradient3_color1_u'])
p.setRgb('Color1 U', c)
c = Crgb()
c.assign(*pt['gradient3_color2_u'])
p.setRgb('Color2 U', c)
p.setUInt('Gradient type U', pt['gradient3_gradient_type_u'])
p.setFloat('Color1 U position', pt['gradient3_color1_u_position'])
p.setByte('Gradient V', pt['gradient3_gradient_v'])
c = Crgb()
c.assign(*pt['gradient3_color0_v'])
p.setRgb('Color0 V', c)
c = Crgb()
c.assign(*pt['gradient3_color1_v'])
p.setRgb('Color1 V', c)
c = Crgb()
c.assign(*pt['gradient3_color2_v'])
p.setRgb('Color2 V', c)
p.setUInt('Gradient type V', pt['gradient3_gradient_type_v'])
p.setFloat('Color1 V position', pt['gradient3_color1_v_position'])
t.addProceduralTexture(p)
elif(pt['use'] == 'GRADIENT'):
e = self.mgr.createDefaultTextureExtension('Gradient')
p = e.getExtensionData()
p.setFloat('Blend procedural', pt['blending_factor'])
p.setByte('Gradient U', pt['gradient_gradient_u'])
c = Crgb()
c.assign(*pt['gradient_color0_u'])
p.setRgb('Color0 U', c)
c = Crgb()
c.assign(*pt['gradient_color1_u'])
p.setRgb('Color1 U', c)
p.setUInt('Gradient type U', pt['gradient_gradient_type_u'])
p.setFloat('Transition factor U', pt['gradient_transition_factor_u'])
p.setByte('Gradient V', pt['gradient_gradient_v'])
c = Crgb()
c.assign(*pt['gradient_color0_v'])
p.setRgb('Color0 V', c)
c = Crgb()
c.assign(*pt['gradient_color1_v'])
p.setRgb('Color1 V', c)
p.setUInt('Gradient type V', pt['gradient_gradient_type_v'])
p.setFloat('Transition factor V', pt['gradient_transition_factor_v'])
t.addProceduralTexture(p)
elif(pt['use'] == 'GRID'):
e = self.mgr.createDefaultTextureExtension('Grid')
p = e.getExtensionData()
p.setFloat('Blend procedural', pt['blending_factor'])
c = Crgb()
c.assign(*pt['grid_boundary_color'])
p.setRgb('Boundary color', c)
c = Crgb()
c.assign(*pt['grid_cell_color'])
p.setRgb('Cell color', c)
p.setFloat('Cell width', pt['grid_cell_width'])
p.setFloat('Cell height', pt['grid_cell_height'])
if(pt['grid_horizontal_lines']):
p.setFloat('Boundary thickness U', pt['grid_boundary_thickness_u'])
else:
p.setFloat('Boundary thickness U', 0.0)
if(pt['grid_vertical_lines']):
p.setFloat('Boundary thickness V', pt['grid_boundary_thickness_v'])
else:
p.setFloat('Boundary thickness V', 0.0)
p.setFloat('Transition sharpness', pt['grid_transition_sharpness'])
p.setUInt('Fall-off', pt['grid_falloff'])
t.addProceduralTexture(p)
elif(pt['use'] == 'MARBLE'):
e = self.mgr.createDefaultTextureExtension('Marble')
p = e.getExtensionData()
p.setFloat('Blend procedural', pt['blending_factor'])
p.setUInt('Coordinates type', pt['marble_coordinates_type'])
c = Crgb()
c.assign(*pt['marble_color0'])
p.setRgb('Color0', c)
c = Crgb()
c.assign(*pt['marble_color1'])
p.setRgb('Color1', c)
c = Crgb()
c.assign(*pt['marble_color2'])
p.setRgb('Color2', c)
p.setFloat('Frequency', pt['marble_frequency'])
p.setFloat('Detail', pt['marble_detail'])
p.setInt('Octaves', pt['marble_octaves'])
p.setUInt('Seed', pt['marble_seed'])
t.addProceduralTexture(p)
elif(pt['use'] == 'NOISE'):
e = self.mgr.createDefaultTextureExtension('Noise')
p = e.getExtensionData()
p.setFloat('Blend procedural', pt['blending_factor'])
p.setUInt('Coordinates type', pt['noise_coordinates_type'])
c = Crgb()
c.assign(*pt['noise_noise_color'])
p.setRgb('Noise color', c)
c = Crgb()
c.assign(*pt['noise_background_color'])
p.setRgb('Background color', c)
p.setFloat('Detail', pt['noise_detail'])
p.setFloat('Persistance', pt['noise_persistance'])
p.setInt('Octaves', pt['noise_octaves'])
p.setFloat('Low value', pt['noise_low_value'])
p.setFloat('High value', pt['noise_high_value'])
p.setUInt('Seed', pt['noise_seed'])
t.addProceduralTexture(p)
elif(pt['use'] == 'VORONOI'):
e = self.mgr.createDefaultTextureExtension('Voronoi')
p = e.getExtensionData()
p.setFloat('Blend procedural', pt['blending_factor'])
p.setUInt('Coordinates type', pt['voronoi_coordinates_type'])
c = Crgb()
c.assign(*pt['voronoi_color0'])
p.setRgb('Color0', c)
c = Crgb()
c.assign(*pt['voronoi_color1'])
p.setRgb('Color1', c)
p.setInt('Detail', pt['voronoi_detail'])
p.setUInt('Distance', pt['voronoi_distance'])
p.setUInt('Combination', pt['voronoi_combination'])
p.setFloat('Low value', pt['voronoi_low_value'])
p.setFloat('High value', pt['voronoi_high_value'])
p.setUInt('Seed', pt['voronoi_seed'])
t.addProceduralTexture(p)
elif(pt['use'] == 'TILED'):
e = self.mgr.createDefaultTextureExtension('TiledTexture')
p = e.getExtensionData()
p.setFloat('Blend factor', pt['blending_factor'])
c = Crgb()
c.assign(*pt['tiled_base_color'])
p.setRgb('Base Color', c)
p.setByte('Use base color', pt['tiled_use_base_color'])
p.setString('Filename_mask', pt['tiled_token_mask'])
p.setString('Filename', pt['tiled_filename'])
# 'Map U tile range' UCHAR
# 'Map V tile range' UCHAR
t.addProceduralTexture(p)
elif(pt['use'] == 'WIREFRAME'):
e = self.mgr.createDefaultTextureExtension('WireframeTexture')
p = e.getExtensionData()
c = Crgb()
c.assign(*pt['wireframe_fill_color'])
p.setRgb('Fill Color', c)
c = Crgb()
c.assign(*pt['wireframe_edge_color'])
p.setRgb('Edge Color', c)
c = Crgb()
c.assign(*pt['wireframe_coplanar_edge_color'])
p.setRgb('Coplanar Edge Color', c)
p.setFloat('Edge Width', pt['wireframe_edge_width'])
p.setFloat('Coplanar Edge Width', pt['wireframe_coplanar_edge_width'])
p.setFloat('Coplanar Threshold', pt['wireframe_coplanar_threshold'])
t.addProceduralTexture(p)
else:
raise TypeError("{0} is unknown procedural texture type".format(pt['use']))
return t
def material_placeholder(self, n=None, ):
if(n is not None):
pass
else:
n = 'MATERIAL_PLACEHOLDER'
s = self.mxs
m = s.createMaterial(n)
l = m.addLayer()
b = l.addBSDF()
r = b.getReflectance()
a = Cattribute()
a.activeType = MAP_TYPE_BITMAP
t = CtextureMap()
mgr = CextensionManager.instance()
mgr.loadAllExtensions()
e = mgr.createDefaultTextureExtension('Checker')
ch = e.getExtensionData()
ch.setUInt('Number of elements U', 32)
ch.setUInt('Number of elements V', 32)
t.addProceduralTexture(ch)
a.textureMap = t
r.setAttribute('color', a)
return m
def material_default(self, n, ):
s = self.mxs
m = s.createMaterial(n)
l = m.addLayer()
b = l.addBSDF()
return m
def material_external(self, d, ):
s = self.mxs
p = d['path']
t = s.readMaterial(p)
t.setName(d['name'])
m = s.addMaterial(t)
if(not d['embed']):
m.setReference(1, p)
return m
def material_custom(self, d, ):
s = self.mxs
m = s.createMaterial(d['name'])
d = d['data']
def global_props(d, m):
# global properties
if(d['override_map']):
t = self.texture(d['override_map'])
if(t is not None):
m.setGlobalMap(t)
if(d['bump_map_enabled']):
a = Cattribute()
a.activeType = MAP_TYPE_BITMAP
t = self.texture(d['bump_map'])
if(t is not None):
a.textureMap = t
if(d['bump_map_use_normal']):
a.value = d['bump_normal']
else:
a.value = d['bump']
m.setAttribute('bump', a)
m.setNormalMapState(d['bump_map_use_normal'])
m.setDispersion(d['dispersion'])
m.setMatteShadow(d['shadow'])
m.setMatte(d['matte'])
m.setNestedPriority(d['priority'])
c = Crgb()
c.assign(*d['id'])
m.setColorID(c)
if(d['active_display_map']):
t = self.texture(d['active_display_map'])
m.setActiveDisplayMap(t)
def displacement(d, m):
if(not d['enabled']):
return
m.enableDisplacement(True)
if(d['map'] is not None):
t = self.texture(d['map'])
m.setDisplacementMap(t)
m.setDisplacementCommonParameters(d['type'], d['subdivision'], int(d['smoothing']), d['offset'], d['subdivision_method'], d['uv_interpolation'], )
m.setHeightMapDisplacementParameters(d['height'], d['height_units'], d['adaptive'], )
v = Cvector(*d['v3d_scale'])
m.setVectorDisplacementParameters(v, d['v3d_transform'], d['v3d_rgb_mapping'], d['v3d_preset'], )
def add_bsdf(d, l):
b = l.addBSDF()
b.setName(d['name'])
bp = d['bsdf_props']
# weight
if(bp['weight_map_enabled']):
a = Cattribute()
a.activeType = MAP_TYPE_BITMAP
t = self.texture(bp['weight_map'])
if(t is not None):
a.textureMap = t
a.value = bp['weight']
else:
a = Cattribute()
a.activeType = MAP_TYPE_VALUE
a.value = bp['weight']
b.setWeight(a)
# enabled
if(not bp['visible']):
b.setState(False)
# ior
r = b.getReflectance()
if(bp['ior'] == 1):
# measured data
r.setActiveIorMode(1)
r.setComplexIor(bp['complex_ior'])
else:
if(bp['reflectance_0_map_enabled']):
a = Cattribute()
a.activeType = MAP_TYPE_BITMAP
t = self.texture(bp['reflectance_0_map'])
if(t is not None):
a.textureMap = t
a.rgb.assign(*bp['reflectance_0'])
else:
a = Cattribute()
a.activeType = MAP_TYPE_RGB
a.rgb.assign(*bp['reflectance_0'])
r.setAttribute('color', a)
if(bp['reflectance_90_map_enabled']):
a = Cattribute()
a.activeType = MAP_TYPE_BITMAP
t = self.texture(bp['reflectance_90_map'])
if(t is not None):
a.textureMap = t
a.rgb.assign(*bp['reflectance_90'])
else:
a = Cattribute()
a.activeType = MAP_TYPE_RGB
a.rgb.assign(*bp['reflectance_90'])
r.setAttribute('color.tangential', a)
if(bp['transmittance_map_enabled']):
a = Cattribute()
a.activeType = MAP_TYPE_BITMAP
t = self.texture(bp['transmittance_map'])
if(t is not None):
a.textureMap = t
a.rgb.assign(*bp['transmittance'])
else:
a = Cattribute()
a.activeType = MAP_TYPE_RGB
a.rgb.assign(*bp['transmittance'])
r.setAttribute('transmittance.color', a)
r.setAbsorptionDistance(bp['attenuation_units'], bp['attenuation'])
r.setIOR(bp['nd'], bp['abbe'])
if(bp['force_fresnel']):
r.enableForceFresnel(True)
r.setConductor(bp['k'])
if(bp['r2_enabled']):
r.setFresnelCustom(bp['r2_falloff_angle'], bp['r2_influence'], True, )
# surface
if(bp['roughness_map_enabled']):
a = Cattribute()
a.activeType = MAP_TYPE_BITMAP
t = self.texture(bp['roughness_map'])
if(t is not None):
a.textureMap = t
a.value = bp['roughness']
else:
a = Cattribute()
a.activeType = MAP_TYPE_VALUE
a.value = bp['roughness']
b.setAttribute('roughness', a)
if(bp['bump_map_enabled']):
a = Cattribute()
a.activeType = MAP_TYPE_BITMAP
t = self.texture(bp['bump_map'])
if(t is not None):
a.textureMap = t
if(bp['bump_map_use_normal']):
a.value = bp['bump_normal']
else:
a.value = bp['bump']
else:
a = Cattribute()
a.activeType = MAP_TYPE_VALUE
if(bp['bump_map_use_normal']):
a.value = bp['bump_normal']
else:
a.value = bp['bump']
b.setAttribute('bump', a)
b.setNormalMapState(bp['bump_map_use_normal'])
if(bp['anisotropy_map_enabled']):
a = Cattribute()
a.activeType = MAP_TYPE_BITMAP
t = self.texture(bp['anisotropy_map'])
if(t is not None):
a.textureMap = t
a.value = bp['anisotropy']
else:
a = Cattribute()
a.activeType = MAP_TYPE_VALUE
a.value = bp['anisotropy']
b.setAttribute('anisotropy', a)
if(bp['anisotropy_angle_map_enabled']):
a = Cattribute()
a.activeType = MAP_TYPE_BITMAP
t = self.texture(bp['anisotropy_angle_map'])
if(t is not None):
a.textureMap = t
a.value = bp['anisotropy_angle']
else:
a = Cattribute()
a.activeType = MAP_TYPE_VALUE
a.value = bp['anisotropy_angle']
b.setAttribute('angle', a)
# subsurface
a = Cattribute()
a.activeType = MAP_TYPE_RGB
a.rgb.assign(*bp['scattering'])
r.setAttribute('scattering', a)
r.setScatteringParameters(bp['coef'], bp['asymmetry'], bp['single_sided'])
if(bp['single_sided']):
if(bp['single_sided_map_enabled']):
a = Cattribute()
a.activeType = MAP_TYPE_BITMAP
t = self.texture(bp['single_sided_map'])
if(t is not None):
a.textureMap = t
a.value = bp['single_sided_value']
else:
a = Cattribute()
a.activeType = MAP_TYPE_VALUE
a.value = bp['single_sided_value']
r.setScatteringThickness(a)
r.setScatteringThicknessRange(bp['single_sided_min'], bp['single_sided_max'])
# coating
cp = d['coating']
if(cp['enabled']):
c = b.addCoating()
if(cp['thickness_map_enabled']):
a = Cattribute()
a.activeType = MAP_TYPE_BITMAP
t = self.texture(cp['thickness_map'])
if(t is not None):
a.textureMap = t
a.value = cp['thickness']
else:
a = Cattribute()
a.activeType = MAP_TYPE_VALUE
a.value = cp['thickness']
c.setThickness(a)
c.setThicknessRange(cp['thickness_map_min'], cp['thickness_map_max'])
r = c.getReflectance()
if(cp['ior'] == 1):
# measured data
r.setActiveIorMode(1)
r.setComplexIor(cp['complex_ior'])
else:
if(cp['reflectance_0_map_enabled']):
a = Cattribute()
a.activeType = MAP_TYPE_BITMAP
t = self.texture(cp['reflectance_0_map'])
if(t is not None):
a.textureMap = t
a.rgb.assign(*cp['reflectance_0'])
else:
a = Cattribute()
a.activeType = MAP_TYPE_RGB
a.rgb.assign(*cp['reflectance_0'])
r.setAttribute('color', a)
if(cp['reflectance_90_map_enabled']):
a = Cattribute()
a.activeType = MAP_TYPE_BITMAP
t = self.texture(cp['reflectance_90_map'])
if(t is not None):
a.textureMap = t
a.rgb.assign(*cp['reflectance_90'])
else:
a = Cattribute()
a.activeType = MAP_TYPE_RGB
a.rgb.assign(*cp['reflectance_90'])
r.setAttribute('color.tangential', a)
r.setIOR(cp['nd'], 1.0, )
if(cp['force_fresnel']):
r.enableForceFresnel(True)
r.setConductor(cp['k'])
if(cp['r2_enabled']):
r.setFresnelCustom(cp['r2_falloff_angle'], 0.0, True, )
def add_emitter(d, l):
e = l.createEmitter()
if(d['type'] == 0):
e.setLobeType(EMISSION_LOBE_DEFAULT)
elif(d['type'] == 1):
e.setLobeType(EMISSION_LOBE_IES)
e.setLobeIES(d['ies_data'])
e.setIESLobeIntensity(d['ies_intensity'])
elif(d['type'] == 2):
e.setLobeType(EMISSION_LOBE_SPOTLIGHT)
if(d['spot_map'] is not None):
t = self.texture(d['spot_map'])
if(t is not None):
e.setLobeImageProjectedMap(d['spot_map_enabled'], t)
e.setSpotConeAngle(d['spot_cone_angle'])
e.setSpotFallOffAngle(d['spot_falloff_angle'])
e.setSpotFallOffType(d['spot_falloff_type'])
e.setSpotBlur(d['spot_blur'])
if(d['emission'] == 0):
e.setActiveEmissionType(EMISSION_TYPE_PAIR)
ep = CemitterPair()
c = Crgb()
c.assign(*d['color'])
ep.rgb.assign(c)
ep.temperature = d['color_black_body']
ep.watts = d['luminance_power']
ep.luminousEfficacy = d['luminance_efficacy']
ep.luminousPower = d['luminance_output']
ep.illuminance = d['luminance_output']
ep.luminousIntensity = d['luminance_output']
ep.luminance = d['luminance_output']
e.setPair(ep)
if(d['luminance'] == 0):
u = EMISSION_UNITS_WATTS_AND_LUMINOUS_EFFICACY
elif(d['luminance'] == 1):
u = EMISSION_UNITS_LUMINOUS_POWER
elif(d['luminance'] == 2):
u = EMISSION_UNITS_ILLUMINANCE
elif(d['luminance'] == 3):
u = EMISSION_UNITS_LUMINOUS_INTENSITY
elif(d['luminance'] == 4):
u = EMISSION_UNITS_LUMINANCE
if(d['color_black_body_enabled']):
e.setActivePair(EMISSION_COLOR_TEMPERATURE, u)
else:
e.setActivePair(EMISSION_RGB, u)
elif(d['emission'] == 1):
e.setActiveEmissionType(EMISSION_TYPE_TEMPERATURE)
e.setTemperature(d['temperature_value'])
elif(d['emission'] == 2):
e.setActiveEmissionType(EMISSION_TYPE_MXI)
a = Cattribute()
a.activeType = MAP_TYPE_BITMAP
t = self.texture(d['hdr_map'])
if(t is not None):
a.textureMap = t
a.value = d['hdr_intensity']
e.setMXI(a)
e.setState(True)
def add_layer(d, m):
l = m.addLayer()
l.setName(d['name'])
lpd = d['layer_props']
if(not lpd['visible']):
l.setEnabled(False)
if(lpd['blending'] == 1):
l.setStackedBlendingMode(1)
if(lpd['opacity_map_enabled']):
a = Cattribute()
a.activeType = MAP_TYPE_BITMAP
t = self.texture(lpd['opacity_map'])
if(t is not None):
a.textureMap = t
a.value = lpd['opacity']
else:
a = Cattribute()
a.activeType = MAP_TYPE_VALUE
a.value = lpd['opacity']
l.setAttribute('weight', a)
epd = d['emitter']
if(epd['enabled']):
add_emitter(epd, l)
for b in d['bsdfs']:
add_bsdf(b, l)
global_props(d['global_props'], m)
displacement(d['displacement'], m)
for layer in d['layers']:
add_layer(layer, m)
return m
def material(self, d, ):
s = self.mxs
if(d['subtype'] == 'EXTERNAL'):
if(d['path'] == ''):
m = self.material_placeholder(d['name'])
else:
m = self.material_external(d)
if(d['override']):
# global properties
if(d['override_map']):
t = self.texture(d['override_map'])
if(t is not None):
m.setGlobalMap(t)
if(d['bump_map_enabled']):
a = Cattribute()
a.activeType = MAP_TYPE_BITMAP
t = self.texture(d['bump_map'])
if(t is not None):
a.textureMap = t
if(d['bump_map_use_normal']):
a.value = d['bump_normal']
else:
a.value = d['bump']
m.setAttribute('bump', a)
m.setNormalMapState(d['bump_map_use_normal'])
m.setDispersion(d['dispersion'])
m.setMatteShadow(d['shadow'])
m.setMatte(d['matte'])
m.setNestedPriority(d['priority'])
c = Crgb()
c.assign(*d['id'])
m.setColorID(c)
elif(d['subtype'] == 'EXTENSION'):
if(d['use'] == 'EMITTER'):
m = s.createMaterial(d['name'])
l = m.addLayer()
e = l.createEmitter()
if(d['emitter_type'] == 0):
e.setLobeType(EMISSION_LOBE_DEFAULT)
elif(d['emitter_type'] == 1):
e.setLobeType(EMISSION_LOBE_IES)
e.setLobeIES(d['emitter_ies_data'])
e.setIESLobeIntensity(d['emitter_ies_intensity'])
elif(d['emitter_type'] == 2):
e.setLobeType(EMISSION_LOBE_SPOTLIGHT)
if(d['emitter_spot_map'] is not None):
t = self.texture(d['emitter_spot_map'])
if(t is not None):
e.setLobeImageProjectedMap(d['emitter_spot_map_enabled'], t)
e.setSpotConeAngle(d['emitter_spot_cone_angle'])
e.setSpotFallOffAngle(d['emitter_spot_falloff_angle'])
e.setSpotFallOffType(d['emitter_spot_falloff_type'])
e.setSpotBlur(d['emitter_spot_blur'])
if(d['emitter_emission'] == 0):
e.setActiveEmissionType(EMISSION_TYPE_PAIR)
ep = CemitterPair()
c = Crgb()
c.assign(*d['emitter_color'])
ep.rgb.assign(c)
ep.temperature = d['emitter_color_black_body']
ep.watts = d['emitter_luminance_power']
ep.luminousEfficacy = d['emitter_luminance_efficacy']
ep.luminousPower = d['emitter_luminance_output']
ep.illuminance = d['emitter_luminance_output']
ep.luminousIntensity = d['emitter_luminance_output']
ep.luminance = d['emitter_luminance_output']
e.setPair(ep)
if(d['emitter_color_black_body_enabled']):
if(d['emitter_luminance'] == 0):
u = EMISSION_UNITS_WATTS_AND_LUMINOUS_EFFICACY
elif(d['emitter_luminance'] == 1):
u = EMISSION_UNITS_LUMINOUS_POWER
elif(d['emitter_luminance'] == 2):
u = EMISSION_UNITS_ILLUMINANCE
elif(d['emitter_luminance'] == 3):
u = EMISSION_UNITS_LUMINOUS_INTENSITY
elif(d['emitter_luminance'] == 4):
u = EMISSION_UNITS_LUMINANCE
e.setActivePair(EMISSION_COLOR_TEMPERATURE, u)
else:
if(d['emitter_luminance'] == 0):
u = EMISSION_UNITS_WATTS_AND_LUMINOUS_EFFICACY
elif(d['emitter_luminance'] == 1):
u = EMISSION_UNITS_LUMINOUS_POWER
elif(d['emitter_luminance'] == 2):
u = EMISSION_UNITS_ILLUMINANCE
elif(d['emitter_luminance'] == 3):
u = EMISSION_UNITS_LUMINOUS_INTENSITY
elif(d['emitter_luminance'] == 4):
u = EMISSION_UNITS_LUMINANCE
e.setActivePair(EMISSION_RGB, u)
elif(d['emitter_emission'] == 1):
e.setActiveEmissionType(EMISSION_TYPE_TEMPERATURE)
e.setTemperature(d['emitter_temperature_value'])
elif(d['emitter_emission'] == 2):
e.setActiveEmissionType(EMISSION_TYPE_MXI)
a = Cattribute()
a.activeType = MAP_TYPE_BITMAP
t = self.texture(d['emitter_hdr_map'])
if(t is not None):
a.textureMap = t
a.value = d['emitter_hdr_intensity']
e.setMXI(a)
e.setState(True)
def global_props(d, m):
# global properties
if(d['override_map']):
t = texture(d['override_map'], s, )
if(t is not None):
m.setGlobalMap(t)
if(d['bump_map_enabled']):
a = Cattribute()
a.activeType = MAP_TYPE_BITMAP
t = texture(d['bump_map'], s, )
if(t is not None):
a.textureMap = t
if(d['bump_map_use_normal']):
a.value = d['bump_normal']
else:
a.value = d['bump']
m.setAttribute('bump', a)
m.setNormalMapState(d['bump_map_use_normal'])
m.setDispersion(d['dispersion'])
m.setMatteShadow(d['shadow'])
m.setMatte(d['matte'])
m.setNestedPriority(d['priority'])
c = Crgb()
c.assign(*d['id'])
m.setColorID(c)
if(d['active_display_map']):
t = texture(d['active_display_map'], s, )
m.setActiveDisplayMap(t)
global_props(d, m)
else:
m = CextensionManager.instance()
m.loadAllExtensions()
if(d['use'] == 'AGS'):
e = m.createDefaultMaterialModifierExtension('AGS')
p = e.getExtensionData()
c = Crgb()
c.assign(*d['ags_color'])
p.setRgb('Color', c)
p.setFloat('Reflection', d['ags_reflection'])
p.setUInt('Type', d['ags_type'])
elif(d['use'] == 'OPAQUE'):
e = m.createDefaultMaterialModifierExtension('Opaque')
p = e.getExtensionData()
p.setByte('Color Type', d['opaque_color_type'])
c = Crgb()
c.assign(*d['opaque_color'])
p.setRgb('Color', c)
self.texture_data_to_mxparams('Color Map', d['opaque_color_map'], p, )
p.setByte('Shininess Type', d['opaque_shininess_type'])
p.setFloat('Shininess', d['opaque_shininess'])
self.texture_data_to_mxparams('Shininess Map', d['opaque_shininess_map'], p, )
p.setByte('Roughness Type', d['opaque_roughness_type'])
p.setFloat('Roughness', d['opaque_roughness'])
self.texture_data_to_mxparams('Roughness Map', d['opaque_roughness_map'], p, )
p.setByte('Clearcoat', d['opaque_clearcoat'])
elif(d['use'] == 'TRANSPARENT'):
e = m.createDefaultMaterialModifierExtension('Transparent')
p = e.getExtensionData()
p.setByte('Color Type', d['transparent_color_type'])
c = Crgb()
c.assign(*d['transparent_color'])
p.setRgb('Color', c)
self.texture_data_to_mxparams('Color Map', d['transparent_color_map'], p, )
p.setFloat('Ior', d['transparent_ior'])
p.setFloat('Transparency', d['transparent_transparency'])
p.setByte('Roughness Type', d['transparent_roughness_type'])
p.setFloat('Roughness', d['transparent_roughness'])
self.texture_data_to_mxparams('Roughness Map', d['transparent_roughness_map'], p, )
p.setFloat('Specular Tint', d['transparent_specular_tint'])
p.setFloat('Dispersion', d['transparent_dispersion'])
p.setByte('Clearcoat', d['transparent_clearcoat'])
elif(d['use'] == 'METAL'):
e = m.createDefaultMaterialModifierExtension('Metal')
p = e.getExtensionData()
p.setUInt('IOR', d['metal_ior'])
p.setFloat('Tint', d['metal_tint'])
p.setByte('Color Type', d['metal_color_type'])
c = Crgb()
c.assign(*d['metal_color'])
p.setRgb('Color', c)
self.texture_data_to_mxparams('Color Map', d['metal_color_map'], p, )
p.setByte('Roughness Type', d['metal_roughness_type'])
p.setFloat('Roughness', d['metal_roughness'])
self.texture_data_to_mxparams('Roughness Map', d['metal_roughness_map'], p, )
p.setByte('Anisotropy Type', d['metal_anisotropy_type'])
p.setFloat('Anisotropy', d['metal_anisotropy'])
self.texture_data_to_mxparams('Anisotropy Map', d['metal_anisotropy_map'], p, )
p.setByte('Angle Type', d['metal_angle_type'])
p.setFloat('Angle', d['metal_angle'])
self.texture_data_to_mxparams('Angle Map', d['metal_angle_map'], p, )
p.setByte('Dust Type', d['metal_dust_type'])
p.setFloat('Dust', d['metal_dust'])
self.texture_data_to_mxparams('Dust Map', d['metal_dust_map'], p, )
p.setByte('Perforation Enabled', d['metal_perforation_enabled'])
self.texture_data_to_mxparams('Perforation Map', d['metal_perforation_map'], p, )
elif(d['use'] == 'TRANSLUCENT'):
e = m.createDefaultMaterialModifierExtension('Translucent')
p = e.getExtensionData()
p.setFloat('Scale', d['translucent_scale'])
p.setFloat('Ior', d['translucent_ior'])
p.setByte('Color Type', d['translucent_color_type'])
c = Crgb()
c.assign(*d['translucent_color'])
p.setRgb('Color', c)
self.texture_data_to_mxparams('Color Map', d['translucent_color_map'], p, )
p.setFloat('Hue Shift', d['translucent_hue_shift'])
p.setByte('Invert Hue', d['translucent_invert_hue'])
p.setFloat('Vibrance', d['translucent_vibrance'])
p.setFloat('Density', d['translucent_density'])
p.setFloat('Opacity', d['translucent_opacity'])
p.setByte('Roughness Type', d['translucent_roughness_type'])
p.setFloat('Roughness', d['translucent_roughness'])
self.texture_data_to_mxparams('Roughness Map', d['translucent_roughness_map'], p, )
p.setFloat('Specular Tint', d['translucent_specular_tint'])
p.setByte('Clearcoat', d['translucent_clearcoat'])
p.setFloat('Clearcoat Ior', d['translucent_clearcoat_ior'])
elif(d['use'] == 'CARPAINT'):
e = m.createDefaultMaterialModifierExtension('Car Paint')
p = e.getExtensionData()
c = Crgb()
c.assign(*d['carpaint_color'])
p.setRgb('Color', c)
p.setFloat('Metallic', d['carpaint_metallic'])
p.setFloat('Topcoat', d['carpaint_topcoat'])
elif(d['use'] == 'HAIR'):
e = m.createDefaultMaterialModifierExtension('Hair')
p = e.getExtensionData()
p.setByte('Color Type', d['hair_color_type'])
c = Crgb()
c.assign(*d['hair_color'])
p.setRgb('Color', c)
self.texture_data_to_mxparams('Color Map', d['hair_color_map'], p, )
self.texture_data_to_mxparams('Root-Tip Map', d['hair_root_tip_map'], p, )
p.setByte('Root-Tip Weight Type', d['hair_root_tip_weight_type'])
p.setFloat('Root-Tip Weight', d['hair_root_tip_weight'])
self.texture_data_to_mxparams('Root-Tip Weight Map', d['hair_root_tip_weight_map'], p, )
p.setFloat('Primary Highlight Strength', d['hair_primary_highlight_strength'])
p.setFloat('Primary Highlight Spread', d['hair_primary_highlight_spread'])
c = Crgb()
c.assign(*d['hair_primary_highlight_tint'])
p.setRgb('Primary Highlight Tint', c)
p.setFloat('Secondary Highlight Strength', d['hair_secondary_highlight_strength'])
p.setFloat('Secondary Highlight Spread', d['hair_secondary_highlight_spread'])
c = Crgb()
c.assign(*d['hair_secondary_highlight_tint'])
p.setRgb('Secondary Highlight Tint', c)
m = s.createMaterial(d['name'])
m.applyMaterialModifierExtension(p)
# global properties
if(d['override_map']):
t = self.texture(d['override_map'])
if(t is not None):
m.setGlobalMap(t)
if(d['bump_map_enabled']):
a = Cattribute()
a.activeType = MAP_TYPE_BITMAP
t = self.texture(d['bump_map'])
if(t is not None):
a.textureMap = t
if(d['bump_map_use_normal']):
a.value = d['bump_normal']
else:
a.value = d['bump']
m.setAttribute('bump', a)
m.setNormalMapState(d['bump_map_use_normal'])
m.setDispersion(d['dispersion'])
m.setMatteShadow(d['shadow'])
m.setMatte(d['matte'])
m.setNestedPriority(d['priority'])
c = Crgb()
c.assign(*d['id'])
m.setColorID(c)
if(d['active_display_map']):
t = texture(d['active_display_map'], s, )
m.setActiveDisplayMap(t)
def displacement(d, m):
if(not d['enabled']):
return
m.enableDisplacement(True)
if(d['map'] is not None):
t = self.texture(d['map'])
m.setDisplacementMap(t)
m.setDisplacementCommonParameters(d['type'], d['subdivision'], int(d['smoothing']), d['offset'], d['subdivision_method'], d['uv_interpolation'], )
m.setHeightMapDisplacementParameters(d['height'], d['height_units'], d['adaptive'], )
v = Cvector(*d['v3d_scale'])
m.setVectorDisplacementParameters(v, d['v3d_transform'], d['v3d_rgb_mapping'], d['v3d_preset'], )
try:
displacement(d['displacement'], m)
except KeyError:
pass
elif(d['subtype'] == 'CUSTOM'):
m = self.material_custom(d)
else:
raise TypeError("Material '{}' {} is unknown type".format(d['name'], d['subtype']))
return m
def get_material(self, n, ):
"""get material by name from scene, if material is missing, create and return placeholder"""
def get_material_names(s):
it = CmaxwellMaterialIterator()
o = it.first(s)
l = []
while not o.isNull():
name = o.getName()
l.append(name)
o = it.next()
return l
s = self.mxs
names = get_material_names(s)
m = None
if(n in names):
m = s.getMaterial(n)
if(m is None):
# should not happen because i stopped changing material names.. but i leave it here
m = self.material_placeholder()
return m
class MXMEmitterCheck():
def __init__(self, path, ):
if(__name__ != "__main__"):
if(platform.system() == 'Darwin'):
raise ImportError("No pymaxwell for Mac OS X..")
log(self.__class__.__name__, 1, LogStyles.MESSAGE, prefix="* ", )
self.path = path
self.mxs = Cmaxwell(mwcallback)
self.emitter = False
m = self.mxs.readMaterial(self.path)
for i in range(m.getNumLayers()[0]):
l = m.getLayer(i)
e = l.getEmitter()
if(e.isNull()):
# no emitter layer
self.emitter = False
return
if(not e.getState()[0]):
# there is, but is disabled
self.emitter = False
return
# is emitter
self.emitter = True
class MXSReader():
def __init__(self, path, ):
if(__name__ != "__main__"):
if(platform.system() == 'Darwin'):
raise ImportError("No pymaxwell for Mac OS X..")
log(self.__class__.__name__, 1, LogStyles.MESSAGE, prefix="* ", )
self.path = path
self.mxs = Cmaxwell(mwcallback)
log("loading {}".format(self.path), 2, prefix="* ", )
self.mxs.readMXS(self.path)
if(self.mxs.isProtectionEnabled()):
raise RuntimeError("Protected MXS")
self._prepare()
def _mxs_get_objects_names(self):
s = self.mxs
it = CmaxwellObjectIterator()
o = it.first(s)
l = []
while not o.isNull():
name, _ = o.getName()
l.append(name)
o = it.next()
return l
def _mxs_object(self, o):
object_name, _ = o.getName()
is_instance, _ = o.isInstance()
is_mesh, _ = o.isMesh()
if(is_instance == 0 and is_mesh == 0):
log("{}: only empties, meshes and instances are supported..".format(object_name), 2, LogStyles.WARNING, )
return None
# skip not posrotscale initialized objects
is_init, _ = o.isPosRotScaleInitialized()
if(not is_init):
# log("{}: object is not initialized, skipping..".format(object_name), 2, LogStyles.WARNING, )
log("{}: object is not initialized..".format(object_name), 2, LogStyles.WARNING, )
# return None
r = {'name': o.getName()[0],
'vertices': [],
'normals': [],
'triangles': [],
'trianglesUVW': [],
'matrix': (),
'parent': None,
'type': '',
'materials': [],
'nmats': 0,
'matnames': [], }
if(is_instance == 1):
io = o.getInstanced()
ion = io.getName()[0]
b, p = self._base_and_pivot(o)
r = {'name': o.getName()[0],
'base': b,
'pivot': p,
'parent': None,
'type': 'INSTANCE',
'instanced': ion, }
# no multi material instances, always one material per instance
m, _ = o.getMaterial()
if(m.isNull() == 1):
r['material'] = None
else:
r['material'] = o.getName()
p, _ = o.getParent()
if(not p.isNull()):
r['parent'] = p.getName()[0]
cid, _ = o.getColorID()
rgb8 = cid.toRGB8()
col = [str(rgb8.r()), str(rgb8.g()), str(rgb8.b())]
r['colorid'] = ", ".join(col)
h = []
if(o.getHideToCamera()):
h.append("C")
if(o.getHideToGI()):
h.append("GI")
if(o.getHideToReflectionsRefractions()):
h.append("RR")
r['hidden'] = ", ".join(h)
r['referenced_mxs'] = False
r['referenced_mxs_path'] = None
rmp = io.getReferencedScenePath()
if(rmp != ""):
r['referenced_mxs'] = True
r['referenced_mxs_path'] = rmp
return r
# counts
nv, _ = o.getVerticesCount()
nn, _ = o.getNormalsCount()
nt, _ = o.getTrianglesCount()
nppv, _ = o.getPositionsPerVertexCount()
ppv = 0
r['referenced_mxs'] = False
r['referenced_mxs_path'] = None
if(nv > 0):
r['type'] = 'MESH'
cid, _ = o.getColorID()
rgb8 = cid.toRGB8()
col = [str(rgb8.r()), str(rgb8.g()), str(rgb8.b())]
r['colorid'] = ", ".join(col)
h = []
if(o.getHideToCamera()):
h.append("C")
if(o.getHideToGI()):
h.append("GI")
if(o.getHideToReflectionsRefractions()):
h.append("RR")
r['hidden'] = ", ".join(h)
else:
r['type'] = 'EMPTY'
rmp = o.getReferencedScenePath()
if(rmp != ""):
r['referenced_mxs'] = True
r['referenced_mxs_path'] = rmp
cid, _ = o.getColorID()
rgb8 = cid.toRGB8()
col = [str(rgb8.r()), str(rgb8.g()), str(rgb8.b())]
r['colorid'] = ", ".join(col)
if(nppv - 1 != ppv and nv != 0):
log("only one position per vertex is supported..", 2, LogStyles.WARNING, )
# vertices
for i in range(nv):
v, _ = o.getVertex(i, ppv)
# (float x, float y, float z)
r['vertices'].append((v.x(), v.y(), v.z()))
# normals
for i in range(nn):
v, _ = o.getNormal(i, ppv)
# (float x, float y, float z)
r['normals'].append((v.x(), v.y(), v.z()))
# triangles
for i in range(nt):
t = o.getTriangle(i)
# (int v1, int v2, int v3, int n1, int n2, int n3)
r['triangles'].append(t)
# materials
mats = []
for i in range(nt):
m, _ = o.getTriangleMaterial(i)
if(m.isNull() == 1):
n = None
else:
n = m.getName()
if(n not in mats):
mats.append(n)
r['materials'].append((i, n))
r['nmats'] = len(mats)
r['matnames'] = mats
# uv channels
ncuv, _ = o.getChannelsUVWCount()
for cuv in range(ncuv):
# uv triangles
r['trianglesUVW'].append([])
for i in range(nt):
t = o.getTriangleUVW(i, cuv)
# float u1, float v1, float w1, float u2, float v2, float w2, float u3, float v3, float w3
r['trianglesUVW'][cuv].append(t)
# base and pivot to matrix
b, p = self._base_and_pivot(o)
r['base'] = b
r['pivot'] = p
# parent
p, _ = o.getParent()
if(not p.isNull()):
r['parent'] = p.getName()[0]
return r
def _mxs_camera(self, c):
v = c.getValues()
v = {'name': v[0],
'nSteps': v[1],
'shutter': v[2],
'filmWidth': v[3],
'filmHeight': v[4],
'iso': v[5],
'pDiaphragmType': v[6],
'angle': v[7],
'nBlades': v[8],
'fps': v[9],
'xRes': v[10],
'yRes': v[11],
'pixelAspect': v[12],
'lensType': v[13], }
s = c.getStep(0)
o = s[0]
f = s[1]
u = s[2]
# skip weird cameras
flc = s[3]
co = s[0]
fp = s[1]
d = Cvector()
d.substract(fp, co)
fd = d.norm()
if(flc == 0.0 or fd == 0.0):
log("{}: impossible camera, skipping..".format(v['name']), 2, LogStyles.WARNING)
return None
r = {'name': v['name'],
'shutter': 1.0 / v['shutter'],
'iso': v['iso'],
'x_res': v['xRes'],
'y_res': v['yRes'],
'pixel_aspect': v['pixelAspect'],
'origin': (o.x(), o.y(), o.z()),
'focal_point': (f.x(), f.y(), f.z()),
'up': (u.x(), u.y(), u.z()),
'focal_length': self._uncorrect_focal_length(s) * 1000.0,
'f_stop': s[4],
'film_width': round(v['filmWidth'] * 1000.0, 3),
'film_height': round(v['filmHeight'] * 1000.0, 3),
'active': False,
'sensor_fit': None,
'shift_x': 0.0,
'shift_y': 0.0,
'zclip': False,
'zclip_near': 0.0,
'zclip_far': 1000000.0,
'type': 'CAMERA', }
if(r['film_width'] > r['film_height']):
r['sensor_fit'] = 'HORIZONTAL'
else:
r['sensor_fit'] = 'VERTICAL'
cp = c.getCutPlanes()
if(cp[2] is True):
r['zclip'] = True
r['zclip_near'] = cp[0]
r['zclip_far'] = cp[1]
sl = c.getShiftLens()
r['shift_x'] = sl[0]
r['shift_y'] = sl[1]
d = c.getDiaphragm()
r['diaphragm_type'] = d[0][0]
r['diaphragm_angle'] = d[1]
r['diaphragm_blades'] = d[2]
return r
def _base_and_pivot(self, o):
b, p, _ = o.getBaseAndPivot()
o = b.origin
x = b.xAxis
y = b.yAxis
z = b.zAxis
rb = [[o.x(), o.y(), o.z()], [x.x(), x.y(), x.z()], [y.x(), y.y(), y.z()], [z.x(), z.y(), z.z()]]
rp = ((0.0, 0.0, 0.0), (1.0, 0.0, 0.0), (0.0, 1.0, 0.0), (0.0, 0.0, 1.0), )
return rb, rp
def _uncorrect_focal_length(self, step):
flc = step[3]
o = step[0]
fp = step[1]
d = Cvector()
d.substract(fp, o)
fd = d.norm()
fluc = 1.0 / (1.0 / flc - 1 / fd)
return fluc
def _prepare(self):
s = self.mxs
self.object_names = self._mxs_get_objects_names()
def _is_emitter(self, o):
is_instance, _ = o.isInstance()
is_mesh, _ = o.isMesh()
if(not is_mesh and not is_instance):
return False
if(is_mesh):
nt, _ = o.getTrianglesCount()
mats = []
for i in range(nt):
m, _ = o.getTriangleMaterial(i)
if(not m.isNull()):
if(m not in mats):
mats.append(m)
for m in mats:
nl, _ = m.getNumLayers()
for i in range(nl):
l = m.getLayer(i)
e = l.getEmitter()
if(not e.isNull()):
return True
if(is_instance):
m, _ = o.getMaterial()
if(not m.isNull()):
nl, _ = m.getNumLayers()
for i in range(nl):
l = m.getLayer(i)
e = l.getEmitter()
if(not e.isNull()):
return True
return False
def _global_transform(self, o):
cb, _ = o.getWorldTransform()
o = cb.origin
x = cb.xAxis
y = cb.yAxis
z = cb.zAxis
rb = [[o.x(), o.y(), o.z()], [x.x(), x.y(), x.z()], [y.x(), y.y(), y.z()], [z.x(), z.y(), z.z()]]
rp = ((0.0, 0.0, 0.0), (1.0, 0.0, 0.0), (0.0, 1.0, 0.0), (0.0, 0.0, 1.0), )
return rb, rp
def objects(self, only_emitters=False):
if(only_emitters):
s = self.mxs
data = []
log("converting emitters..", 2)
for n in self.object_names:
d = None
o = s.getObject(n)
if(self._is_emitter(o)):
d = self._mxs_object(o)
if(d is not None):
b, p = self._global_transform(o)
d['base'] = b
d['pivot'] = p
d['parent'] = None
data.append(d)
else:
s = self.mxs
data = []
log("converting empties, meshes and instances..", 2)
for n in self.object_names:
d = None
o = s.getObject(n)
d = self._mxs_object(o)
if(d is not None):
data.append(d)
return data
def cameras(self):
s = self.mxs
data = []
log("converting cameras..", 2)
nms = s.getCameraNames()
cams = []
if(type(nms) == list):
for n in nms:
cams.append(s.getCamera(n))
for c in cams:
d = self._mxs_camera(c)
if(d is not None):
data.append(d)
# set active camera
if(len(cams) > 1):
# if there is just one camera, this behaves badly.
# use it just when there are two or more cameras..
active_cam = s.getActiveCamera()
active_cam_name = active_cam.getName()
for o in data:
if(o['type'] == 'CAMERA'):
if(o['name'] == active_cam_name):
o['active'] = True
else:
for o in data:
if(o['type'] == 'CAMERA'):
o['active'] = True
return data
def sun(self):
s = self.mxs
data = []
env = s.getEnvironment()
if(env.getSunProperties()[0] == 1):
log("converting sun..", 2)
if(env.getSunPositionType() == 2):
v, _ = env.getSunDirection()
else:
v, _ = env.getSunDirectionUsedForRendering()
d = {'name': "The Sun",
'xyz': (v.x(), v.y(), v.z()),
'type': 'SUN', }
data.append(d)
return data
class MXSSceneWrapper():
def __init__(self, load_extensions=True, ):
if(__name__ != "__main__"):
if(platform.system() == 'Darwin'):
raise ImportError("No pymaxwell directly in Blender on Mac OS X..")
log(self.__class__.__name__, 1, LogStyles.MESSAGE, prefix="* ", )
log("creating new scene..", 2, prefix="* ", )
self.mxs = Cmaxwell(mwcallback)
pid = utils.get_plugin_id()
if(pid != ""):
# write here directly, even though it is also part of scene data, but api change just for this is pointless..
self.mxs.setPluginID(pid)
self.mgr = None
if(load_extensions):
log("loadinf extensions..", 2, prefix="* ", )
self.mgr = CextensionManager.instance()
self.mgr.loadAllExtensions()
class MXMReader():
def __init__(self, mxm_path, ):
def texture(t):
if(t is None):
return None
if(t.isEmpty()):
return None
d = {'path': t.getPath(),
'use_global_map': t.useGlobalMap,
'channel': t.uvwChannelID,
'brightness': t.brightness * 100,
'contrast': t.contrast * 100,
'saturation': t.saturation * 100,
'hue': t.hue * 180,
'rotation': t.rotation,
'invert': t.invert,
'interpolation': t.typeInterpolation,
'use_alpha': t.useAlpha,
'repeat': [t.scale.x(), t.scale.y()],
'mirror': [t.uIsMirrored, t.vIsMirrored],
'offset': [t.offset.x(), t.offset.y()],
'clamp': [int(t.clampMin * 255), int(t.clampMax * 255)],
'tiling_units': t.useAbsoluteUnits,
'tiling_method': [t.uIsTiled, t.vIsTiled],
'normal_mapping_flip_red': t.normalMappingFlipRed,
'normal_mapping_flip_green': t.normalMappingFlipGreen,
'normal_mapping_full_range_blue': t.normalMappingFullRangeBlue, }
# t.cosA
# t.doGammaCorrection
# t.sinA
# t.theTextureExtensions
d['procedural'] = []
if(t.hasProceduralTextures()):
n = t.getProceduralTexturesCount()
for i in range(n):
pd = extension(None, None, t, i)
d['procedural'].append(pd)
return d
def material(s, m):
data = {}
if(m.isNull()):
return data
# defaults
bsdfd = {'visible': True, 'weight': 100.0, 'weight_map_enabled': False, 'weight_map': None, 'ior': 0, 'complex_ior': "",
'reflectance_0': (0.6, 0.6, 0.6, ), 'reflectance_0_map_enabled': False, 'reflectance_0_map': None,
'reflectance_90': (1.0, 1.0, 1.0, ), 'reflectance_90_map_enabled': False, 'reflectance_90_map': None,
'transmittance': (0.0, 0.0, 0.0), 'transmittance_map_enabled': False, 'transmittance_map': None,
'attenuation': 1.0, 'attenuation_units': 0, 'nd': 3.0, 'force_fresnel': False, 'k': 0.0, 'abbe': 1.0,
'r2_enabled': False, 'r2_falloff_angle': 75.0, 'r2_influence': 0.0,
'roughness': 100.0, 'roughness_map_enabled': False, 'roughness_map': None,
'bump': 30.0, 'bump_map_enabled': False, 'bump_map': None, 'bump_map_use_normal': False, 'bump_normal': 100.0,
'anisotropy': 0.0, 'anisotropy_map_enabled': False, 'anisotropy_map': None,
'anisotropy_angle': 0.0, 'anisotropy_angle_map_enabled': False, 'anisotropy_angle_map': None,
'scattering': (0.5, 0.5, 0.5, ), 'coef': 0.0, 'asymmetry': 0.0,
'single_sided': False, 'single_sided_value': 1.0, 'single_sided_map_enabled': False, 'single_sided_map': None, 'single_sided_min': 0.001, 'single_sided_max': 10.0, }
coatingd = {'enabled': False,
'thickness': 500.0, 'thickness_map_enabled': False, 'thickness_map': None, 'thickness_map_min': 100.0, 'thickness_map_max': 1000.0,
'ior': 0, 'complex_ior': "",
'reflectance_0': (0.6, 0.6, 0.6, ), 'reflectance_0_map_enabled': False, 'reflectance_0_map': None,
'reflectance_90': (1.0, 1.0, 1.0, ), 'reflectance_90_map_enabled': False, 'reflectance_90_map': None,
'nd': 3.0, 'force_fresnel': False, 'k': 0.0, 'r2_enabled': False, 'r2_falloff_angle': 75.0, }
displacementd = {'enabled': False, 'map': None, 'type': 1, 'subdivision': 5, 'adaptive': False, 'subdivision_method': 0,
'offset': 0.5, 'smoothing': True, 'uv_interpolation': 2, 'height': 2.0, 'height_units': 0,
'v3d_preset': 0, 'v3d_transform': 0, 'v3d_rgb_mapping': 0, 'v3d_scale': (1.0, 1.0, 1.0), }
emitterd = {'enabled': False, 'type': 0, 'ies_data': "", 'ies_intensity': 1.0,
'spot_map_enabled': False, 'spot_map': "", 'spot_cone_angle': 45.0, 'spot_falloff_angle': 10.0, 'spot_falloff_type': 0, 'spot_blur': 1.0,
'emission': 0, 'color': (1.0, 1.0, 1.0, ), 'color_black_body_enabled': False, 'color_black_body': 6500.0,
'luminance': 0, 'luminance_power': 40.0, 'luminance_efficacy': 17.6, 'luminance_output': 100.0, 'temperature_value': 6500.0,
'hdr_map': None, 'hdr_intensity': 1.0, }
layerd = {'visible': True, 'opacity': 100.0, 'opacity_map_enabled': False, 'opacity_map': None, 'blending': 0, }
globald = {'override_map': None, 'bump': 30.0, 'bump_map_enabled': False, 'bump_map': None, 'bump_map_use_normal': False, 'bump_normal': 100.0,
'dispersion': False, 'shadow': False, 'matte': False, 'priority': 0, 'id': (0.0, 0.0, 0.0), 'active_display_map': None, }
# structure
structure = []
nl, _ = m.getNumLayers()
for i in range(nl):
l = m.getLayer(i)
ln, _ = l.getName()
nb, _ = l.getNumBSDFs()
bs = []
for j in range(nb):
b = l.getBSDF(j)
bn = b.getName()
bs.append([bn, b])
ls = [ln, l, bs]
structure.append(ls)
# default data
data['global_props'] = globald.copy()
data['displacement'] = displacementd.copy()
data['layers'] = []
for i, sl in enumerate(structure):
bsdfs = []
for j, sb in enumerate(sl[2]):
bsdfs.append({'name': sb[0],
'bsdf_props': bsdfd.copy(),
'coating': coatingd.copy(), })
layer = {'name': sl[0],
'layer_props': layerd.copy(),
'bsdfs': bsdfs,
'emitter': emitterd.copy(), }
data['layers'].append(layer)
# custom data
def global_props(m, d):
t, _ = m.getGlobalMap()
d['override_map'] = texture(t)
a, _ = m.getAttribute('bump')
if(a.activeType == MAP_TYPE_BITMAP):
d['bump_map_enabled'] = True
d['bump_map'] = texture(a.textureMap)
d['bump_map_use_normal'] = m.getNormalMapState()[0]
if(d['bump_map_use_normal']):
d['bump_normal'] = a.value
else:
d['bump'] = a.value
else:
d['bump_map_enabled'] = False
d['bump_map'] = None
d['bump_map_use_normal'] = m.getNormalMapState()[0]
if(d['bump_map_use_normal']):
d['bump_normal'] = a.value
else:
d['bump'] = a.value
d['dispersion'] = m.getDispersion()[0]
d['shadow'] = m.getMatteShadow()[0]
d['matte'] = m.getMatte()[0]
d['priority'] = m.getNestedPriority()[0]
c, _ = m.getColorID()
d['id'] = [c.r(), c.g(), c.b()]
return d
data['global_props'] = global_props(m, data['global_props'])
def displacement(m, d):
if(not m.isDisplacementEnabled()[0]):
return d
d['enabled'] = True
t, _ = m.getDisplacementMap()
d['map'] = texture(t)
displacementType, subdivisionLevel, smoothness, offset, subdivisionType, interpolationUvType, minLOD, maxLOD, _ = m.getDisplacementCommonParameters()
height, absoluteHeight, adaptive, _ = m.getHeightMapDisplacementParameters()
scale, transformType, mapping, preset, _ = m.getVectorDisplacementParameters()
d['type'] = displacementType
d['subdivision'] = subdivisionLevel
d['adaptive'] = adaptive
d['subdivision_method'] = subdivisionType
d['offset'] = offset
d['smoothing'] = bool(smoothness)
d['uv_interpolation'] = interpolationUvType
d['height'] = height
d['height_units'] = absoluteHeight
d['v3d_preset'] = preset
d['v3d_transform'] = transformType
d['v3d_rgb_mapping'] = mapping
d['v3d_scale'] = (scale.x(), scale.y(), scale.z(), )
return d
data['displacement'] = displacement(m, data['displacement'])
def cattribute_rgb(a):
if(a.activeType == MAP_TYPE_BITMAP):
c = (a.rgb.r(), a.rgb.g(), a.rgb.b())
e = True
m = texture(a.textureMap)
else:
c = (a.rgb.r(), a.rgb.g(), a.rgb.b())
e = False
m = None
return c, e, m
def cattribute_value(a):
if(a.activeType == MAP_TYPE_BITMAP):
v = a.value
e = True
m = texture(a.textureMap)
else:
v = a.value
e = False
m = None
return v, e, m
def layer_props(l, d):
d['visible'] = l.getEnabled()[0]
d['blending'] = l.getStackedBlendingMode()[0]
a, _ = l.getAttribute('weight')
if(a.activeType == MAP_TYPE_BITMAP):
d['opacity'] = a.value
d['opacity_map_enabled'] = True
d['opacity_map'] = texture(a.textureMap)
else:
d['opacity'] = a.value
d['opacity_map_enabled'] = False
d['opacity_map'] = None
return d
def emitter(l, d):
e = l.getEmitter()
if(e.isNull()):
d['enabled'] = False
return d
d['enabled'] = True
d['type'] = e.getLobeType()[0]
d['ies_data'] = e.getLobeIES()
d['ies_intensity'] = e.getIESLobeIntensity()[0]
t, _ = e.getLobeImageProjectedMap()
d['spot_map_enabled'] = (not t.isEmpty())
d['spot_map'] = texture(t)
d['spot_cone_angle'] = e.getSpotConeAngle()[0]
d['spot_falloff_angle'] = e.getSpotFallOffAngle()[0]
d['spot_falloff_type'] = e.getSpotFallOffType()[0]
d['spot_blur'] = e.getSpotBlur()[0]
d['emission'] = e.getActiveEmissionType()[0]
ep, _ = e.getPair()
colorType, units, _ = e.getActivePair()
d['color'] = (ep.rgb.r(), ep.rgb.g(), ep.rgb.b(), )
d['color_black_body'] = ep.temperature
d['luminance'] = units
if(units == EMISSION_UNITS_WATTS_AND_LUMINOUS_EFFICACY):
d['luminance_power'] = ep.watts
d['luminance_efficacy'] = ep.luminousEfficacy
elif(units == EMISSION_UNITS_LUMINOUS_POWER):
d['luminance_output'] = ep.luminousPower
elif(units == EMISSION_UNITS_ILLUMINANCE):
d['luminance_output'] = ep.illuminance
elif(units == EMISSION_UNITS_LUMINOUS_INTENSITY):
d['luminance_output'] = ep.luminousIntensity
elif(units == EMISSION_UNITS_LUMINANCE):
d['luminance_output'] = ep.luminance
if(colorType == EMISSION_COLOR_TEMPERATURE):
d['color_black_body_enabled'] = True
d['temperature_value'] = e.getTemperature()[0]
a, _ = e.getMXI()
if(a.activeType == MAP_TYPE_BITMAP):
d['hdr_map'] = texture(a.textureMap)
d['hdr_intensity'] = a.value
else:
d['hdr_map'] = None
d['hdr_intensity'] = a.value
return d
def bsdf_props(b, d):
d['visible'] = b.getState()[0]
a, _ = b.getWeight()
if(a.activeType == MAP_TYPE_BITMAP):
d['weight_map_enabled'] = True
d['weight'] = a.value
d['weight_map'] = texture(a.textureMap)
else:
d['weight_map_enabled'] = False
d['weight'] = a.value
d['weight_map'] = None
r = b.getReflectance()
d['ior'] = r.getActiveIorMode()[0]
d['complex_ior'] = r.getComplexIor()
d['reflectance_0'], d['reflectance_0_map_enabled'], d['reflectance_0_map'] = cattribute_rgb(r.getAttribute('color')[0])
d['reflectance_90'], d['reflectance_90_map_enabled'], d['reflectance_90_map'] = cattribute_rgb(r.getAttribute('color.tangential')[0])
d['transmittance'], d['transmittance_map_enabled'], d['transmittance_map'] = cattribute_rgb(r.getAttribute('transmittance.color')[0])
d['attenuation_units'], d['attenuation'] = r.getAbsorptionDistance()
d['nd'], d['abbe'], _ = r.getIOR()
d['force_fresnel'], _ = r.getForceFresnel()
d['k'], _ = r.getConductor()
d['r2_falloff_angle'], d['r2_influence'], d['r2_enabled'], _ = r.getFresnelCustom()
d['roughness'], d['roughness_map_enabled'], d['roughness_map'] = cattribute_value(b.getAttribute('roughness')[0])
d['bump_map_use_normal'] = b.getNormalMapState()[0]
if(d['bump_map_use_normal']):
d['bump_normal'], d['bump_map_enabled'], d['bump_map'] = cattribute_value(b.getAttribute('bump')[0])
else:
d['bump'], d['bump_map_enabled'], d['bump_map'] = cattribute_value(b.getAttribute('bump')[0])
d['anisotropy'], d['anisotropy_map_enabled'], d['anisotropy_map'] = cattribute_value(b.getAttribute('anisotropy')[0])
d['anisotropy_angle'], d['anisotropy_angle_map_enabled'], d['anisotropy_angle_map'] = cattribute_value(b.getAttribute('angle')[0])
a, _ = r.getAttribute('scattering')
d['scattering'] = (a.rgb.r(), a.rgb.g(), a.rgb.b(), )
d['coef'], d['asymmetry'], d['single_sided'], _ = r.getScatteringParameters()
d['single_sided_value'], d['single_sided_map_enabled'], d['single_sided_map'] = cattribute_value(r.getScatteringThickness()[0])
d['single_sided_min'], d['single_sided_max'], _ = r.getScatteringThicknessRange()
return d
def coating(b, d):
nc, _ = b.getNumCoatings()
if(nc > 0):
c = b.getCoating(0)
else:
d['enabled'] = False
return d
d['enabled'] = True
d['thickness'], d['thickness_map_enabled'], d['thickness_map'] = cattribute_value(c.getThickness()[0])
d['thickness_map_min'], d['thickness_map_max'], _ = c.getThicknessRange()
r = c.getReflectance()
d['ior'] = r.getActiveIorMode()[0]
d['complex_ior'] = r.getComplexIor()
d['reflectance_0'], d['reflectance_0_map_enabled'], d['reflectance_0_map'] = cattribute_rgb(r.getAttribute('color')[0])
d['reflectance_90'], d['reflectance_90_map_enabled'], d['reflectance_90_map'] = cattribute_rgb(r.getAttribute('color.tangential')[0])
d['nd'], _, _ = r.getIOR()
d['force_fresnel'], _ = r.getForceFresnel()
d['k'], _ = r.getConductor()
d['r2_falloff_angle'], _, d['r2_enabled'], _ = r.getFresnelCustom()
return d
for i, sl in enumerate(structure):
l = sl[1]
data['layers'][i]['layer_props'] = layer_props(l, data['layers'][i]['layer_props'])
data['layers'][i]['emitter'] = emitter(l, data['layers'][i]['emitter'])
for j, bs in enumerate(sl[2]):
b = bs[1]
data['layers'][i]['bsdfs'][j]['bsdf_props'] = bsdf_props(b, data['layers'][i]['bsdfs'][j]['bsdf_props'])
data['layers'][i]['bsdfs'][j]['coating'] = coating(b, data['layers'][i]['bsdfs'][j]['coating'])
return data
def extension(s, m, pt=None, pi=None, ):
def texture(t):
if(t is None):
return None
if(t.isEmpty()):
return None
d = {'path': t.getPath(),
'use_global_map': t.useGlobalMap,
'channel': t.uvwChannelID,
'brightness': t.brightness * 100,
'contrast': t.contrast * 100,
'saturation': t.saturation * 100,
'hue': t.hue * 180,
'rotation': t.rotation,
'invert': t.invert,
'interpolation': t.typeInterpolation,
'use_alpha': t.useAlpha,
'repeat': [t.scale.x(), t.scale.y()],
'mirror': [t.uIsMirrored, t.vIsMirrored],
'offset': [t.offset.x(), t.offset.y()],
'clamp': [int(t.clampMin * 255), int(t.clampMax * 255)],
'tiling_units': t.useAbsoluteUnits,
'tiling_method': [t.uIsTiled, t.vIsTiled],
'normal_mapping_flip_red': t.normalMappingFlipRed,
'normal_mapping_flip_green': t.normalMappingFlipGreen,
'normal_mapping_full_range_blue': t.normalMappingFullRangeBlue, }
return d
def mxparamlistarray(v):
return None
def rgb(v):
return (v.r(), v.g(), v.b())
if(pt is not None and pi is not None):
params = pt.getProceduralTexture(pi)
else:
params, _ = m.getMaterialModifierExtensionParams()
types = [(0, 'UCHAR', params.getByte, ),
(1, 'UINT', params.getUInt, ),
(2, 'INT', params.getInt, ),
(3, 'FLOAT', params.getFloat, ),
(4, 'DOUBLE', params.getDouble, ),
(5, 'STRING', params.getString, ),
(6, 'FLOATARRAY', params.getFloatArray, ),
(7, 'DOUBLEARRAY', params.getDoubleArray, ),
(8, 'BYTEARRAY', params.getByteArray, ),
(9, 'INTARRAY', params.getIntArray, ),
(10, 'MXPARAMLIST', params.getTextureMap, ),
(11, 'MXPARAMLISTARRAY', mxparamlistarray, ),
(12, 'RGB', params.getRgb, ), ]
d = {}
for i in range(params.getNumItems()):
name, data, _, _, data_type, _, data_count, _ = params.getByIndex(i)
_, _, f = types[data_type]
k = name
if(data_type not in [10, 11, 12]):
v, _ = f(name)
else:
if(data_type == 10):
v = texture(f(name)[0])
elif(data_type == 11):
pass
elif(data_type == 12):
v = rgb(f(name)[0])
d[k] = v
return d
log("{0} {1} {0}".format("-" * 30, self.__class__.__name__), 0, LogStyles.MESSAGE, prefix="", )
log("path: {}".format(mxm_path), 1, LogStyles.MESSAGE)
s = Cmaxwell(mwcallback)
m = s.readMaterial(mxm_path)
self.data = material(s, m)
if(m.hasMaterialModifier()):
self.data['extension'] = extension(s, m)
class MXSReferenceReader():
def __init__(self, path, ):
log("maxwell meshes to data:", 1)
log("reading mxs scene from: {0}".format(path), 2)
scene = Cmaxwell(mwcallback)
ok = scene.readMXS(path)
if(not ok):
raise RuntimeError("Error during reading scene {}".format(path))
nms = self.get_objects_names(scene)
data = []
log("reading meshes..", 2)
for n in nms:
d = None
o = scene.getObject(n)
if(not o.isNull()):
if(o.isMesh()[0] == 1 and o.isInstance()[0] == 0):
d = self.object(o)
if(d is not None):
data.append(d)
log("reading instances..", 2)
for n in nms:
d = None
o = scene.getObject(n)
if(not o.isNull()):
if(o.isMesh()[0] == 0 and o.isInstance()[0] == 1):
io = o.getInstanced()
ion = io.getName()[0]
for a in data:
if(a['name'] == ion):
b, p = self.global_transform(o)
d = {'name': o.getName()[0],
'base': b,
'pivot': p,
'vertices': a['vertices'][:], }
if(d is not None):
data.append(d)
self.data = data
log("done.", 2)
def get_objects_names(self, scene):
it = CmaxwellObjectIterator()
o = it.first(scene)
l = []
while not o.isNull():
name, _ = o.getName()
l.append(name)
o = it.next()
return l
def object(self, o):
is_instance, _ = o.isInstance()
is_mesh, _ = o.isMesh()
if(is_instance == 0 and is_mesh == 0):
return None
def get_verts(o):
vs = []
nv, _ = o.getVerticesCount()
for i in range(nv):
v, _ = o.getVertex(i, 0)
vs.append((v.x(), v.y(), v.z()))
return vs
b, p = self.global_transform(o)
r = {'name': o.getName()[0],
'base': b,
'pivot': p,
'vertices': [], }
if(is_instance == 1):
io = o.getInstanced()
r['vertices'] = get_verts(io)
else:
r['vertices'] = get_verts(o)
return r
def global_transform(self, o):
cb, _ = o.getWorldTransform()
o = cb.origin
x = cb.xAxis
y = cb.yAxis
z = cb.zAxis
rb = [[o.x(), o.y(), o.z()], [x.x(), x.y(), x.z()], [y.x(), y.y(), y.z()], [z.x(), z.y(), z.z()]]
rp = ((0.0, 0.0, 0.0), (1.0, 0.0, 0.0), (0.0, 1.0, 0.0), (0.0, 0.0, 1.0), )
return rb, rp
| gpl-2.0 | 3,793,325,070,926,812,000 | -2,792,134,590,545,383,400 | 43.840282 | 323 | 0.464107 | false |
mistercrunch/panoramix | superset/views/base_api.py | 2 | 21953 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import functools
import logging
from typing import Any, Callable, cast, Dict, List, Optional, Set, Tuple, Type, Union
from apispec import APISpec
from apispec.exceptions import DuplicateComponentNameError
from flask import Blueprint, g, Response
from flask_appbuilder import AppBuilder, Model, ModelRestApi
from flask_appbuilder.api import expose, protect, rison, safe
from flask_appbuilder.models.filters import BaseFilter, Filters
from flask_appbuilder.models.sqla.filters import FilterStartsWith
from flask_appbuilder.models.sqla.interface import SQLAInterface
from flask_babel import lazy_gettext as _
from marshmallow import fields, Schema
from sqlalchemy import and_, distinct, func
from sqlalchemy.orm.query import Query
from superset.extensions import db, event_logger, security_manager
from superset.models.core import FavStar
from superset.models.dashboard import Dashboard
from superset.models.slice import Slice
from superset.schemas import error_payload_content
from superset.sql_lab import Query as SqllabQuery
from superset.stats_logger import BaseStatsLogger
from superset.typing import FlaskResponse
from superset.utils.core import time_function
logger = logging.getLogger(__name__)
get_related_schema = {
"type": "object",
"properties": {
"page_size": {"type": "integer"},
"page": {"type": "integer"},
"include_ids": {"type": "array", "items": {"type": "integer"}},
"filter": {"type": "string"},
},
}
class RelatedResultResponseSchema(Schema):
value = fields.Integer(description="The related item identifier")
text = fields.String(description="The related item string representation")
class RelatedResponseSchema(Schema):
count = fields.Integer(description="The total number of related values")
result = fields.List(fields.Nested(RelatedResultResponseSchema))
class DistinctResultResponseSchema(Schema):
text = fields.String(description="The distinct item")
class DistincResponseSchema(Schema):
count = fields.Integer(description="The total number of distinct values")
result = fields.List(fields.Nested(DistinctResultResponseSchema))
def statsd_metrics(f: Callable[..., Any]) -> Callable[..., Any]:
"""
Handle sending all statsd metrics from the REST API
"""
def wraps(self: "BaseSupersetModelRestApi", *args: Any, **kwargs: Any) -> Response:
try:
duration, response = time_function(f, self, *args, **kwargs)
except Exception as ex:
self.incr_stats("error", f.__name__)
raise ex
self.send_stats_metrics(response, f.__name__, duration)
return response
return functools.update_wrapper(wraps, f)
class RelatedFieldFilter:
# data class to specify what filter to use on a /related endpoint
# pylint: disable=too-few-public-methods
def __init__(self, field_name: str, filter_class: Type[BaseFilter]):
self.field_name = field_name
self.filter_class = filter_class
class BaseFavoriteFilter(BaseFilter): # pylint: disable=too-few-public-methods
"""
Base Custom filter for the GET list that filters all dashboards, slices
that a user has favored or not
"""
name = _("Is favorite")
arg_name = ""
class_name = ""
""" The FavStar class_name to user """
model: Type[Union[Dashboard, Slice, SqllabQuery]] = Dashboard
""" The SQLAlchemy model """
def apply(self, query: Query, value: Any) -> Query:
# If anonymous user filter nothing
if security_manager.current_user is None:
return query
users_favorite_query = db.session.query(FavStar.obj_id).filter(
and_(
FavStar.user_id == g.user.get_id(),
FavStar.class_name == self.class_name,
)
)
if value:
return query.filter(and_(self.model.id.in_(users_favorite_query)))
return query.filter(and_(~self.model.id.in_(users_favorite_query)))
class BaseSupersetModelRestApi(ModelRestApi):
"""
Extends FAB's ModelResApi to implement specific superset generic functionality
"""
csrf_exempt = False
method_permission_name = {
"bulk_delete": "delete",
"data": "list",
"data_from_cache": "list",
"delete": "delete",
"distinct": "list",
"export": "mulexport",
"import_": "add",
"get": "show",
"get_list": "list",
"info": "list",
"post": "add",
"put": "edit",
"refresh": "edit",
"related": "list",
"related_objects": "list",
"schemas": "list",
"select_star": "list",
"table_metadata": "list",
"test_connection": "post",
"thumbnail": "list",
"viz_types": "list",
}
order_rel_fields: Dict[str, Tuple[str, str]] = {}
"""
Impose ordering on related fields query::
order_rel_fields = {
"<RELATED_FIELD>": ("<RELATED_FIELD_FIELD>", "<asc|desc>"),
...
}
""" # pylint: disable=pointless-string-statement
related_field_filters: Dict[str, Union[RelatedFieldFilter, str]] = {}
"""
Declare the filters for related fields::
related_fields = {
"<RELATED_FIELD>": <RelatedFieldFilter>)
}
""" # pylint: disable=pointless-string-statement
filter_rel_fields: Dict[str, BaseFilter] = {}
"""
Declare the related field base filter::
filter_rel_fields_field = {
"<RELATED_FIELD>": "<FILTER>")
}
""" # pylint: disable=pointless-string-statement
allowed_rel_fields: Set[str] = set()
"""
Declare a set of allowed related fields that the `related` endpoint supports
""" # pylint: disable=pointless-string-statement
text_field_rel_fields: Dict[str, str] = {}
"""
Declare an alternative for the human readable representation of the Model object::
text_field_rel_fields = {
"<RELATED_FIELD>": "<RELATED_OBJECT_FIELD>"
}
""" # pylint: disable=pointless-string-statement
allowed_distinct_fields: Set[str] = set()
openapi_spec_component_schemas: Tuple[Type[Schema], ...] = tuple()
"""
Add extra schemas to the OpenAPI component schemas section
""" # pylint: disable=pointless-string-statement
add_columns: List[str]
edit_columns: List[str]
list_columns: List[str]
show_columns: List[str]
responses = {
"400": {"description": "Bad request", "content": error_payload_content},
"401": {"description": "Unauthorized", "content": error_payload_content},
"403": {"description": "Forbidden", "content": error_payload_content},
"404": {"description": "Not found", "content": error_payload_content},
"422": {
"description": "Could not process entity",
"content": error_payload_content,
},
"500": {"description": "Fatal error", "content": error_payload_content},
}
def __init__(self) -> None:
# Setup statsd
self.stats_logger = BaseStatsLogger()
# Add base API spec base query parameter schemas
if self.apispec_parameter_schemas is None: # type: ignore
self.apispec_parameter_schemas = {}
self.apispec_parameter_schemas["get_related_schema"] = get_related_schema
if self.openapi_spec_component_schemas is None:
self.openapi_spec_component_schemas = ()
self.openapi_spec_component_schemas = self.openapi_spec_component_schemas + (
RelatedResponseSchema,
DistincResponseSchema,
)
super().__init__()
def add_apispec_components(self, api_spec: APISpec) -> None:
"""
Adds extra OpenApi schema spec components, these are declared
on the `openapi_spec_component_schemas` class property
"""
for schema in self.openapi_spec_component_schemas:
try:
api_spec.components.schema(
schema.__name__, schema=schema,
)
except DuplicateComponentNameError:
pass
super().add_apispec_components(api_spec)
def create_blueprint(
self, appbuilder: AppBuilder, *args: Any, **kwargs: Any
) -> Blueprint:
self.stats_logger = self.appbuilder.get_app.config["STATS_LOGGER"]
return super().create_blueprint(appbuilder, *args, **kwargs)
def _init_properties(self) -> None:
model_id = self.datamodel.get_pk_name()
if self.list_columns is None and not self.list_model_schema:
self.list_columns = [model_id]
if self.show_columns is None and not self.show_model_schema:
self.show_columns = [model_id]
if self.edit_columns is None and not self.edit_model_schema:
self.edit_columns = [model_id]
if self.add_columns is None and not self.add_model_schema:
self.add_columns = [model_id]
super()._init_properties()
def _get_related_filter(
self, datamodel: SQLAInterface, column_name: str, value: str
) -> Filters:
filter_field = self.related_field_filters.get(column_name)
if isinstance(filter_field, str):
filter_field = RelatedFieldFilter(cast(str, filter_field), FilterStartsWith)
filter_field = cast(RelatedFieldFilter, filter_field)
search_columns = [filter_field.field_name] if filter_field else None
filters = datamodel.get_filters(search_columns)
base_filters = self.filter_rel_fields.get(column_name)
if base_filters:
filters.add_filter_list(base_filters)
if value and filter_field:
filters.add_filter(
filter_field.field_name, filter_field.filter_class, value
)
return filters
def _get_distinct_filter(self, column_name: str, value: str) -> Filters:
filter_field = RelatedFieldFilter(column_name, FilterStartsWith)
filter_field = cast(RelatedFieldFilter, filter_field)
search_columns = [filter_field.field_name] if filter_field else None
filters = self.datamodel.get_filters(search_columns)
filters.add_filter_list(self.base_filters)
if value and filter_field:
filters.add_filter(
filter_field.field_name, filter_field.filter_class, value
)
return filters
def _get_text_for_model(self, model: Model, column_name: str) -> str:
if column_name in self.text_field_rel_fields:
model_column_name = self.text_field_rel_fields.get(column_name)
if model_column_name:
return getattr(model, model_column_name)
return str(model)
def _get_result_from_rows(
self, datamodel: SQLAInterface, rows: List[Model], column_name: str
) -> List[Dict[str, Any]]:
return [
{
"value": datamodel.get_pk_value(row),
"text": self._get_text_for_model(row, column_name),
}
for row in rows
]
def _add_extra_ids_to_result(
self,
datamodel: SQLAInterface,
column_name: str,
ids: List[int],
result: List[Dict[str, Any]],
) -> None:
if ids:
# Filter out already present values on the result
values = [row["value"] for row in result]
ids = [id_ for id_ in ids if id_ not in values]
pk_col = datamodel.get_pk()
# Fetch requested values from ids
extra_rows = db.session.query(datamodel.obj).filter(pk_col.in_(ids)).all()
result += self._get_result_from_rows(datamodel, extra_rows, column_name)
def incr_stats(self, action: str, func_name: str) -> None:
"""
Proxy function for statsd.incr to impose a key structure for REST API's
:param action: String with an action name eg: error, success
:param func_name: The function name
"""
self.stats_logger.incr(f"{self.__class__.__name__}.{func_name}.{action}")
def timing_stats(self, action: str, func_name: str, value: float) -> None:
"""
Proxy function for statsd.incr to impose a key structure for REST API's
:param action: String with an action name eg: error, success
:param func_name: The function name
:param value: A float with the time it took for the endpoint to execute
"""
self.stats_logger.timing(
f"{self.__class__.__name__}.{func_name}.{action}", value
)
def send_stats_metrics(
self, response: Response, key: str, time_delta: Optional[float] = None
) -> None:
"""
Helper function to handle sending statsd metrics
:param response: flask response object, will evaluate if it was an error
:param key: The function name
:param time_delta: Optional time it took for the endpoint to execute
"""
if 200 <= response.status_code < 400:
self.incr_stats("success", key)
else:
self.incr_stats("error", key)
if time_delta:
self.timing_stats("time", key, time_delta)
@event_logger.log_this_with_context(
action=lambda self, *args, **kwargs: f"{self.__class__.__name__}.info",
object_ref=False,
log_to_statsd=False,
)
def info_headless(self, **kwargs: Any) -> Response:
"""
Add statsd metrics to builtin FAB _info endpoint
"""
duration, response = time_function(super().info_headless, **kwargs)
self.send_stats_metrics(response, self.info.__name__, duration)
return response
@event_logger.log_this_with_context(
action=lambda self, *args, **kwargs: f"{self.__class__.__name__}.get",
object_ref=False,
log_to_statsd=False,
)
def get_headless(self, pk: int, **kwargs: Any) -> Response:
"""
Add statsd metrics to builtin FAB GET endpoint
"""
duration, response = time_function(super().get_headless, pk, **kwargs)
self.send_stats_metrics(response, self.get.__name__, duration)
return response
@event_logger.log_this_with_context(
action=lambda self, *args, **kwargs: f"{self.__class__.__name__}.get_list",
object_ref=False,
log_to_statsd=False,
)
def get_list_headless(self, **kwargs: Any) -> Response:
"""
Add statsd metrics to builtin FAB GET list endpoint
"""
duration, response = time_function(super().get_list_headless, **kwargs)
self.send_stats_metrics(response, self.get_list.__name__, duration)
return response
@event_logger.log_this_with_context(
action=lambda self, *args, **kwargs: f"{self.__class__.__name__}.post",
object_ref=False,
log_to_statsd=False,
)
def post_headless(self) -> Response:
"""
Add statsd metrics to builtin FAB POST endpoint
"""
duration, response = time_function(super().post_headless)
self.send_stats_metrics(response, self.post.__name__, duration)
return response
@event_logger.log_this_with_context(
action=lambda self, *args, **kwargs: f"{self.__class__.__name__}.put",
object_ref=False,
log_to_statsd=False,
)
def put_headless(self, pk: int) -> Response:
"""
Add statsd metrics to builtin FAB PUT endpoint
"""
duration, response = time_function(super().put_headless, pk)
self.send_stats_metrics(response, self.put.__name__, duration)
return response
@event_logger.log_this_with_context(
action=lambda self, *args, **kwargs: f"{self.__class__.__name__}.delete",
object_ref=False,
log_to_statsd=False,
)
def delete_headless(self, pk: int) -> Response:
"""
Add statsd metrics to builtin FAB DELETE endpoint
"""
duration, response = time_function(super().delete_headless, pk)
self.send_stats_metrics(response, self.delete.__name__, duration)
return response
@expose("/related/<column_name>", methods=["GET"])
@protect()
@safe
@statsd_metrics
@rison(get_related_schema)
def related(self, column_name: str, **kwargs: Any) -> FlaskResponse:
"""Get related fields data
---
get:
parameters:
- in: path
schema:
type: string
name: column_name
- in: query
name: q
content:
application/json:
schema:
$ref: '#/components/schemas/get_related_schema'
responses:
200:
description: Related column data
content:
application/json:
schema:
schema:
$ref: "#/components/schemas/RelatedResponseSchema"
400:
$ref: '#/components/responses/400'
401:
$ref: '#/components/responses/401'
404:
$ref: '#/components/responses/404'
500:
$ref: '#/components/responses/500'
"""
if column_name not in self.allowed_rel_fields:
self.incr_stats("error", self.related.__name__)
return self.response_404()
args = kwargs.get("rison", {})
# handle pagination
page, page_size = self._handle_page_args(args)
try:
datamodel = self.datamodel.get_related_interface(column_name)
except KeyError:
return self.response_404()
page, page_size = self._sanitize_page_args(page, page_size)
# handle ordering
order_field = self.order_rel_fields.get(column_name)
if order_field:
order_column, order_direction = order_field
else:
order_column, order_direction = "", ""
# handle filters
filters = self._get_related_filter(datamodel, column_name, args.get("filter"))
# Make the query
_, rows = datamodel.query(
filters, order_column, order_direction, page=page, page_size=page_size
)
# produce response
result = self._get_result_from_rows(datamodel, rows, column_name)
# If ids are specified make sure we fetch and include them on the response
ids = args.get("include_ids")
self._add_extra_ids_to_result(datamodel, column_name, ids, result)
return self.response(200, count=len(result), result=result)
@expose("/distinct/<column_name>", methods=["GET"])
@protect()
@safe
@statsd_metrics
@rison(get_related_schema)
def distinct(self, column_name: str, **kwargs: Any) -> FlaskResponse:
"""Get distinct values from field data
---
get:
parameters:
- in: path
schema:
type: string
name: column_name
- in: query
name: q
content:
application/json:
schema:
$ref: '#/components/schemas/get_related_schema'
responses:
200:
description: Distinct field data
content:
application/json:
schema:
schema:
$ref: "#/components/schemas/DistincResponseSchema"
400:
$ref: '#/components/responses/400'
401:
$ref: '#/components/responses/401'
404:
$ref: '#/components/responses/404'
500:
$ref: '#/components/responses/500'
"""
if column_name not in self.allowed_distinct_fields:
self.incr_stats("error", self.related.__name__)
return self.response_404()
args = kwargs.get("rison", {})
# handle pagination
page, page_size = self._sanitize_page_args(*self._handle_page_args(args))
# Create generic base filters with added request filter
filters = self._get_distinct_filter(column_name, args.get("filter"))
# Make the query
query_count = self.appbuilder.get_session.query(
func.count(distinct(getattr(self.datamodel.obj, column_name)))
)
count = self.datamodel.apply_filters(query_count, filters).scalar()
if count == 0:
return self.response(200, count=count, result=[])
query = self.appbuilder.get_session.query(
distinct(getattr(self.datamodel.obj, column_name))
)
# Apply generic base filters with added request filter
query = self.datamodel.apply_filters(query, filters)
# Apply sort
query = self.datamodel.apply_order_by(query, column_name, "asc")
# Apply pagination
result = self.datamodel.apply_pagination(query, page, page_size).all()
# produce response
result = [
{"text": item[0], "value": item[0]}
for item in result
if item[0] is not None
]
return self.response(200, count=count, result=result)
| apache-2.0 | 31,853,287,170,667,510 | 4,475,438,319,307,166,700 | 36.398637 | 88 | 0.605475 | false |
kustodian/ansible | test/units/modules/network/f5/test_bigip_smtp.py | 22 | 4984 | # -*- coding: utf-8 -*-
#
# Copyright: (c) 2017, F5 Networks Inc.
# GNU General Public License v3.0 (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import os
import json
import pytest
import sys
if sys.version_info < (2, 7):
pytestmark = pytest.mark.skip("F5 Ansible modules require Python >= 2.7")
from ansible.module_utils.basic import AnsibleModule
try:
from library.modules.bigip_smtp import ApiParameters
from library.modules.bigip_smtp import ModuleParameters
from library.modules.bigip_smtp import ModuleManager
from library.modules.bigip_smtp import ArgumentSpec
# In Ansible 2.8, Ansible changed import paths.
from test.units.compat import unittest
from test.units.compat.mock import Mock
from test.units.modules.utils import set_module_args
except ImportError:
from ansible.modules.network.f5.bigip_smtp import ApiParameters
from ansible.modules.network.f5.bigip_smtp import ModuleParameters
from ansible.modules.network.f5.bigip_smtp import ModuleManager
from ansible.modules.network.f5.bigip_smtp import ArgumentSpec
# Ansible 2.8 imports
from units.compat import unittest
from units.compat.mock import Mock
from units.modules.utils import set_module_args
fixture_path = os.path.join(os.path.dirname(__file__), 'fixtures')
fixture_data = {}
def load_fixture(name):
path = os.path.join(fixture_path, name)
if path in fixture_data:
return fixture_data[path]
with open(path) as f:
data = f.read()
try:
data = json.loads(data)
except Exception:
pass
fixture_data[path] = data
return data
class TestParameters(unittest.TestCase):
def test_module_parameters(self):
args = dict(
name='foo',
smtp_server='1.1.1.1',
smtp_server_port='25',
smtp_server_username='admin',
smtp_server_password='password',
local_host_name='smtp.mydomain.com',
encryption='tls',
update_password='always',
from_address='no-reply@mydomain.com',
authentication=True,
)
p = ModuleParameters(params=args)
assert p.name == 'foo'
assert p.smtp_server == '1.1.1.1'
assert p.smtp_server_port == 25
assert p.smtp_server_username == 'admin'
assert p.smtp_server_password == 'password'
assert p.local_host_name == 'smtp.mydomain.com'
assert p.encryption == 'tls'
assert p.update_password == 'always'
assert p.from_address == 'no-reply@mydomain.com'
assert p.authentication_disabled is None
assert p.authentication_enabled is True
def test_api_parameters(self):
p = ApiParameters(params=load_fixture('load_sys_smtp_server.json'))
assert p.name == 'foo'
assert p.smtp_server == 'mail.foo.bar'
assert p.smtp_server_port == 465
assert p.smtp_server_username == 'admin'
assert p.smtp_server_password == '$M$Ch$this-is-encrypted=='
assert p.local_host_name == 'mail-host.foo.bar'
assert p.encryption == 'ssl'
assert p.from_address == 'no-reply@foo.bar'
assert p.authentication_disabled is None
assert p.authentication_enabled is True
class TestManager(unittest.TestCase):
def setUp(self):
self.spec = ArgumentSpec()
def test_create_monitor(self, *args):
set_module_args(dict(
name='foo',
smtp_server='1.1.1.1',
smtp_server_port='25',
smtp_server_username='admin',
smtp_server_password='password',
local_host_name='smtp.mydomain.com',
encryption='tls',
update_password='always',
from_address='no-reply@mydomain.com',
authentication=True,
partition='Common',
provider=dict(
server='localhost',
password='password',
user='admin'
)
))
module = AnsibleModule(
argument_spec=self.spec.argument_spec,
supports_check_mode=self.spec.supports_check_mode
)
# Override methods in the specific type of manager
mm = ModuleManager(module=module)
mm.exists = Mock(side_effect=[False, True])
mm.create_on_device = Mock(return_value=True)
results = mm.exec_module()
assert results['changed'] is True
assert results['encryption'] == 'tls'
assert results['smtp_server'] == '1.1.1.1'
assert results['smtp_server_port'] == 25
assert results['local_host_name'] == 'smtp.mydomain.com'
assert results['authentication'] is True
assert results['from_address'] == 'no-reply@mydomain.com'
assert 'smtp_server_username' not in results
assert 'smtp_server_password' not in results
| gpl-3.0 | -5,582,785,453,975,722,000 | -1,419,490,070,901,021,200 | 31.789474 | 91 | 0.62801 | false |
nzavagli/UnrealPy | UnrealPyEmbed/Development/Python/2015.08.07-Python2710-x64-Source-vs2015/Python27/Source/Jinja2-2.7.3/docs/jinjaext.py | 17 | 6953 | # -*- coding: utf-8 -*-
"""
Jinja Documentation Extensions
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Support for automatically documenting filters and tests.
:copyright: Copyright 2008 by Armin Ronacher.
:license: BSD.
"""
import collections
import os
import re
import inspect
import jinja2
from itertools import islice
from types import BuiltinFunctionType
from docutils import nodes
from docutils.statemachine import ViewList
from sphinx.ext.autodoc import prepare_docstring
from sphinx.application import TemplateBridge
from pygments.style import Style
from pygments.token import Keyword, Name, Comment, String, Error, \
Number, Operator, Generic
from jinja2 import Environment, FileSystemLoader
from jinja2.utils import next
def parse_rst(state, content_offset, doc):
node = nodes.section()
# hack around title style bookkeeping
surrounding_title_styles = state.memo.title_styles
surrounding_section_level = state.memo.section_level
state.memo.title_styles = []
state.memo.section_level = 0
state.nested_parse(doc, content_offset, node, match_titles=1)
state.memo.title_styles = surrounding_title_styles
state.memo.section_level = surrounding_section_level
return node.children
class JinjaStyle(Style):
title = 'Jinja Style'
default_style = ""
styles = {
Comment: 'italic #aaaaaa',
Comment.Preproc: 'noitalic #B11414',
Comment.Special: 'italic #505050',
Keyword: 'bold #B80000',
Keyword.Type: '#808080',
Operator.Word: 'bold #B80000',
Name.Builtin: '#333333',
Name.Function: '#333333',
Name.Class: 'bold #333333',
Name.Namespace: 'bold #333333',
Name.Entity: 'bold #363636',
Name.Attribute: '#686868',
Name.Tag: 'bold #686868',
Name.Decorator: '#686868',
String: '#AA891C',
Number: '#444444',
Generic.Heading: 'bold #000080',
Generic.Subheading: 'bold #800080',
Generic.Deleted: '#aa0000',
Generic.Inserted: '#00aa00',
Generic.Error: '#aa0000',
Generic.Emph: 'italic',
Generic.Strong: 'bold',
Generic.Prompt: '#555555',
Generic.Output: '#888888',
Generic.Traceback: '#aa0000',
Error: '#F00 bg:#FAA'
}
_sig_re = re.compile(r'^[a-zA-Z_][a-zA-Z0-9_]*(\(.*?\))')
def format_function(name, aliases, func):
lines = inspect.getdoc(func).splitlines()
signature = '()'
if isinstance(func, BuiltinFunctionType):
match = _sig_re.match(lines[0])
if match is not None:
del lines[:1 + bool(lines and not lines[0])]
signature = match.group(1)
else:
try:
argspec = inspect.getargspec(func)
if getattr(func, 'environmentfilter', False) or \
getattr(func, 'contextfilter', False) or \
getattr(func, 'evalcontextfilter', False):
del argspec[0][0]
signature = inspect.formatargspec(*argspec)
except:
pass
result = ['.. function:: %s%s' % (name, signature), '']
result.extend(' ' + line for line in lines)
if aliases:
result.extend(('', ' :aliases: %s' % ', '.join(
'``%s``' % x for x in sorted(aliases))))
return result
def dump_functions(mapping):
def directive(dirname, arguments, options, content, lineno,
content_offset, block_text, state, state_machine):
reverse_mapping = {}
for name, func in mapping.items():
reverse_mapping.setdefault(func, []).append(name)
filters = []
for func, names in reverse_mapping.items():
aliases = sorted(names, key=lambda x: len(x))
name = aliases.pop()
filters.append((name, aliases, func))
filters.sort()
result = ViewList()
for name, aliases, func in filters:
for item in format_function(name, aliases, func):
result.append(item, '<jinjaext>')
node = nodes.paragraph()
state.nested_parse(result, content_offset, node)
return node.children
return directive
from jinja2.defaults import DEFAULT_FILTERS, DEFAULT_TESTS
jinja_filters = dump_functions(DEFAULT_FILTERS)
jinja_tests = dump_functions(DEFAULT_TESTS)
def jinja_nodes(dirname, arguments, options, content, lineno,
content_offset, block_text, state, state_machine):
from jinja2.nodes import Node
doc = ViewList()
def walk(node, indent):
p = ' ' * indent
sig = ', '.join(node.fields)
doc.append(p + '.. autoclass:: %s(%s)' % (node.__name__, sig), '')
if node.abstract:
members = []
for key, name in node.__dict__.items():
if not key.startswith('_') and \
not hasattr(node.__base__, key) and isinstance(name, collections.Callable):
members.append(key)
if members:
members.sort()
doc.append('%s :members: %s' % (p, ', '.join(members)), '')
if node.__base__ != object:
doc.append('', '')
doc.append('%s :Node type: :class:`%s`' %
(p, node.__base__.__name__), '')
doc.append('', '')
children = node.__subclasses__()
children.sort(key=lambda x: x.__name__.lower())
for child in children:
walk(child, indent)
walk(Node, 0)
return parse_rst(state, content_offset, doc)
def inject_toc(app, doctree, docname):
titleiter = iter(doctree.traverse(nodes.title))
try:
# skip first title, we are not interested in that one
next(titleiter)
title = next(titleiter)
# and check if there is at least another title
next(titleiter)
except StopIteration:
return
tocnode = nodes.section('')
tocnode['classes'].append('toc')
toctitle = nodes.section('')
toctitle['classes'].append('toctitle')
toctitle.append(nodes.title(text='Table Of Contents'))
tocnode.append(toctitle)
tocnode += doctree.document.settings.env.get_toc_for(docname)[0][1]
title.parent.insert(title.parent.children.index(title), tocnode)
def setup(app):
app.add_directive('jinjafilters', jinja_filters, 0, (0, 0, 0))
app.add_directive('jinjatests', jinja_tests, 0, (0, 0, 0))
app.add_directive('jinjanodes', jinja_nodes, 0, (0, 0, 0))
# uncomment for inline toc. links are broken unfortunately
##app.connect('doctree-resolved', inject_toc)
| mit | 5,624,439,763,749,296,000 | -1,295,728,789,800,572,400 | 34.65641 | 94 | 0.567237 | false |
catherinemoresco/feedme | venv/lib/python2.7/site-packages/gunicorn/app/base.py | 24 | 4153 | # -*- coding: utf-8 -
#
# This file is part of gunicorn released under the MIT license.
# See the NOTICE for more information.
import os
import sys
import traceback
from gunicorn import util
from gunicorn.arbiter import Arbiter
from gunicorn.config import Config, get_default_config_file
from gunicorn import debug
from gunicorn.six import execfile_
class Application(object):
"""\
An application interface for configuring and loading
the various necessities for any given web framework.
"""
def __init__(self, usage=None, prog=None):
self.usage = usage
self.cfg = None
self.callable = None
self.prog = prog
self.logger = None
self.do_load_config()
def do_load_config(self):
try:
self.load_config()
except Exception as e:
sys.stderr.write("\nError: %s\n" % str(e))
sys.stderr.flush()
sys.exit(1)
def load_config_from_file(self, filename):
if not os.path.exists(filename):
raise RuntimeError("%r doesn't exist" % filename)
cfg = {
"__builtins__": __builtins__,
"__name__": "__config__",
"__file__": filename,
"__doc__": None,
"__package__": None
}
try:
execfile_(filename, cfg, cfg)
except Exception:
print("Failed to read config file: %s" % filename)
traceback.print_exc()
sys.exit(1)
for k, v in cfg.items():
# Ignore unknown names
if k not in self.cfg.settings:
continue
try:
self.cfg.set(k.lower(), v)
except:
sys.stderr.write("Invalid value for %s: %s\n\n" % (k, v))
raise
return cfg
def load_config(self):
# init configuration
self.cfg = Config(self.usage, prog=self.prog)
# parse console args
parser = self.cfg.parser()
args = parser.parse_args()
# optional settings from apps
cfg = self.init(parser, args, args.args)
# Load up the any app specific configuration
if cfg and cfg is not None:
for k, v in cfg.items():
self.cfg.set(k.lower(), v)
if args.config:
self.load_config_from_file(args.config)
else:
default_config = get_default_config_file()
if default_config is not None:
self.load_config_from_file(default_config)
# Lastly, update the configuration with any command line
# settings.
for k, v in args.__dict__.items():
if v is None:
continue
if k == "args":
continue
self.cfg.set(k.lower(), v)
def init(self, parser, opts, args):
raise NotImplementedError
def load(self):
raise NotImplementedError
def reload(self):
self.do_load_config()
if self.cfg.spew:
debug.spew()
def wsgi(self):
if self.callable is None:
self.callable = self.load()
return self.callable
def run(self):
if self.cfg.check_config:
try:
self.load()
except:
sys.stderr.write("\nError while loading the application:\n\n")
traceback.print_exc()
sys.stderr.flush()
sys.exit(1)
sys.exit(0)
if self.cfg.spew:
debug.spew()
if self.cfg.daemon:
util.daemonize(self.cfg.enable_stdio_inheritance)
# set python paths
if self.cfg.pythonpath and self.cfg.pythonpath is not None:
paths = self.cfg.pythonpath.split(",")
for path in paths:
pythonpath = os.path.abspath(path)
if pythonpath not in sys.path:
sys.path.insert(0, pythonpath)
try:
Arbiter(self).run()
except RuntimeError as e:
sys.stderr.write("\nError: %s\n\n" % e)
sys.stderr.flush()
sys.exit(1)
| gpl-2.0 | 6,882,300,172,783,695,000 | 7,822,619,249,659,260,000 | 27.251701 | 78 | 0.532145 | false |
mezz64/home-assistant | homeassistant/components/xiaomi/device_tracker.py | 12 | 5680 | """Support for Xiaomi Mi routers."""
import logging
import requests
import voluptuous as vol
from homeassistant.components.device_tracker import (
DOMAIN,
PLATFORM_SCHEMA,
DeviceScanner,
)
from homeassistant.const import CONF_HOST, CONF_PASSWORD, CONF_USERNAME, HTTP_OK
import homeassistant.helpers.config_validation as cv
_LOGGER = logging.getLogger(__name__)
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend(
{
vol.Required(CONF_HOST): cv.string,
vol.Required(CONF_USERNAME, default="admin"): cv.string,
vol.Required(CONF_PASSWORD): cv.string,
}
)
def get_scanner(hass, config):
"""Validate the configuration and return a Xiaomi Device Scanner."""
scanner = XiaomiDeviceScanner(config[DOMAIN])
return scanner if scanner.success_init else None
class XiaomiDeviceScanner(DeviceScanner):
"""This class queries a Xiaomi Mi router.
Adapted from Luci scanner.
"""
def __init__(self, config):
"""Initialize the scanner."""
self.host = config[CONF_HOST]
self.username = config[CONF_USERNAME]
self.password = config[CONF_PASSWORD]
self.last_results = {}
self.token = _get_token(self.host, self.username, self.password)
self.mac2name = None
self.success_init = self.token is not None
def scan_devices(self):
"""Scan for new devices and return a list with found device IDs."""
self._update_info()
return self.last_results
def get_device_name(self, device):
"""Return the name of the given device or None if we don't know."""
if self.mac2name is None:
result = self._retrieve_list_with_retry()
if result:
hosts = [x for x in result if "mac" in x and "name" in x]
mac2name_list = [(x["mac"].upper(), x["name"]) for x in hosts]
self.mac2name = dict(mac2name_list)
else:
# Error, handled in the _retrieve_list_with_retry
return
return self.mac2name.get(device.upper(), None)
def _update_info(self):
"""Ensure the information from the router are up to date.
Returns true if scanning successful.
"""
if not self.success_init:
return False
result = self._retrieve_list_with_retry()
if result:
self._store_result(result)
return True
return False
def _retrieve_list_with_retry(self):
"""Retrieve the device list with a retry if token is invalid.
Return the list if successful.
"""
_LOGGER.info("Refreshing device list")
result = _retrieve_list(self.host, self.token)
if result:
return result
_LOGGER.info("Refreshing token and retrying device list refresh")
self.token = _get_token(self.host, self.username, self.password)
return _retrieve_list(self.host, self.token)
def _store_result(self, result):
"""Extract and store the device list in self.last_results."""
self.last_results = []
for device_entry in result:
# Check if the device is marked as connected
if int(device_entry["online"]) == 1:
self.last_results.append(device_entry["mac"])
def _retrieve_list(host, token, **kwargs):
"""Get device list for the given host."""
url = "http://{}/cgi-bin/luci/;stok={}/api/misystem/devicelist"
url = url.format(host, token)
try:
res = requests.get(url, timeout=5, **kwargs)
except requests.exceptions.Timeout:
_LOGGER.exception("Connection to the router timed out at URL %s", url)
return
if res.status_code != HTTP_OK:
_LOGGER.exception("Connection failed with http code %s", res.status_code)
return
try:
result = res.json()
except ValueError:
# If json decoder could not parse the response
_LOGGER.exception("Failed to parse response from mi router")
return
try:
xiaomi_code = result["code"]
except KeyError:
_LOGGER.exception("No field code in response from mi router. %s", result)
return
if xiaomi_code == 0:
try:
return result["list"]
except KeyError:
_LOGGER.exception("No list in response from mi router. %s", result)
return
else:
_LOGGER.info(
"Receive wrong Xiaomi code %s, expected 0 in response %s",
xiaomi_code,
result,
)
return
def _get_token(host, username, password):
"""Get authentication token for the given host+username+password."""
url = f"http://{host}/cgi-bin/luci/api/xqsystem/login"
data = {"username": username, "password": password}
try:
res = requests.post(url, data=data, timeout=5)
except requests.exceptions.Timeout:
_LOGGER.exception("Connection to the router timed out")
return
if res.status_code == HTTP_OK:
try:
result = res.json()
except ValueError:
# If JSON decoder could not parse the response
_LOGGER.exception("Failed to parse response from mi router")
return
try:
return result["token"]
except KeyError:
error_message = (
"Xiaomi token cannot be refreshed, response from "
+ "url: [%s] \nwith parameter: [%s] \nwas: [%s]"
)
_LOGGER.exception(error_message, url, data, result)
return
else:
_LOGGER.error(
"Invalid response: [%s] at url: [%s] with data [%s]", res, url, data
)
| apache-2.0 | 5,886,470,307,679,928,000 | -536,491,059,793,231,700 | 32.023256 | 81 | 0.600176 | false |
xiaolonginfo/decode-Django | Django-1.5.1/tests/regressiontests/expressions_regress/tests.py | 46 | 15966 | """
Spanning tests for all the operations that F() expressions can perform.
"""
from __future__ import absolute_import
import datetime
from django.db import connection
from django.db.models import F
from django.test import TestCase, Approximate, skipUnlessDBFeature
from .models import Number, Experiment
class ExpressionsRegressTests(TestCase):
def setUp(self):
Number(integer=-1).save()
Number(integer=42).save()
Number(integer=1337).save()
self.assertEqual(Number.objects.update(float=F('integer')), 3)
def test_fill_with_value_from_same_object(self):
"""
We can fill a value in all objects with an other value of the
same object.
"""
self.assertQuerysetEqual(
Number.objects.all(),
[
'<Number: -1, -1.000>',
'<Number: 42, 42.000>',
'<Number: 1337, 1337.000>'
]
)
def test_increment_value(self):
"""
We can increment a value of all objects in a query set.
"""
self.assertEqual(
Number.objects.filter(integer__gt=0)
.update(integer=F('integer') + 1),
2)
self.assertQuerysetEqual(
Number.objects.all(),
[
'<Number: -1, -1.000>',
'<Number: 43, 42.000>',
'<Number: 1338, 1337.000>'
]
)
def test_filter_not_equals_other_field(self):
"""
We can filter for objects, where a value is not equals the value
of an other field.
"""
self.assertEqual(
Number.objects.filter(integer__gt=0)
.update(integer=F('integer') + 1),
2)
self.assertQuerysetEqual(
Number.objects.exclude(float=F('integer')),
[
'<Number: 43, 42.000>',
'<Number: 1338, 1337.000>'
]
)
def test_complex_expressions(self):
"""
Complex expressions of different connection types are possible.
"""
n = Number.objects.create(integer=10, float=123.45)
self.assertEqual(Number.objects.filter(pk=n.pk)
.update(float=F('integer') + F('float') * 2),
1)
self.assertEqual(Number.objects.get(pk=n.pk).integer, 10)
self.assertEqual(Number.objects.get(pk=n.pk).float, Approximate(256.900, places=3))
class ExpressionOperatorTests(TestCase):
def setUp(self):
self.n = Number.objects.create(integer=42, float=15.5)
def test_lefthand_addition(self):
# LH Addition of floats and integers
Number.objects.filter(pk=self.n.pk).update(
integer=F('integer') + 15,
float=F('float') + 42.7
)
self.assertEqual(Number.objects.get(pk=self.n.pk).integer, 57)
self.assertEqual(Number.objects.get(pk=self.n.pk).float, Approximate(58.200, places=3))
def test_lefthand_subtraction(self):
# LH Subtraction of floats and integers
Number.objects.filter(pk=self.n.pk).update(integer=F('integer') - 15,
float=F('float') - 42.7)
self.assertEqual(Number.objects.get(pk=self.n.pk).integer, 27)
self.assertEqual(Number.objects.get(pk=self.n.pk).float, Approximate(-27.200, places=3))
def test_lefthand_multiplication(self):
# Multiplication of floats and integers
Number.objects.filter(pk=self.n.pk).update(integer=F('integer') * 15,
float=F('float') * 42.7)
self.assertEqual(Number.objects.get(pk=self.n.pk).integer, 630)
self.assertEqual(Number.objects.get(pk=self.n.pk).float, Approximate(661.850, places=3))
def test_lefthand_division(self):
# LH Division of floats and integers
Number.objects.filter(pk=self.n.pk).update(integer=F('integer') / 2,
float=F('float') / 42.7)
self.assertEqual(Number.objects.get(pk=self.n.pk).integer, 21)
self.assertEqual(Number.objects.get(pk=self.n.pk).float, Approximate(0.363, places=3))
def test_lefthand_modulo(self):
# LH Modulo arithmetic on integers
Number.objects.filter(pk=self.n.pk).update(integer=F('integer') % 20)
self.assertEqual(Number.objects.get(pk=self.n.pk).integer, 2)
self.assertEqual(Number.objects.get(pk=self.n.pk).float, Approximate(15.500, places=3))
def test_lefthand_bitwise_and(self):
# LH Bitwise ands on integers
Number.objects.filter(pk=self.n.pk).update(integer=F('integer').bitand(56))
self.assertEqual(Number.objects.get(pk=self.n.pk).integer, 40)
self.assertEqual(Number.objects.get(pk=self.n.pk).float, Approximate(15.500, places=3))
@skipUnlessDBFeature('supports_bitwise_or')
def test_lefthand_bitwise_or(self):
# LH Bitwise or on integers
Number.objects.filter(pk=self.n.pk).update(integer=F('integer').bitor(48))
self.assertEqual(Number.objects.get(pk=self.n.pk).integer, 58)
self.assertEqual(Number.objects.get(pk=self.n.pk).float, Approximate(15.500, places=3))
def test_right_hand_addition(self):
# Right hand operators
Number.objects.filter(pk=self.n.pk).update(integer=15 + F('integer'),
float=42.7 + F('float'))
# RH Addition of floats and integers
self.assertEqual(Number.objects.get(pk=self.n.pk).integer, 57)
self.assertEqual(Number.objects.get(pk=self.n.pk).float, Approximate(58.200, places=3))
def test_right_hand_subtraction(self):
Number.objects.filter(pk=self.n.pk).update(integer=15 - F('integer'),
float=42.7 - F('float'))
# RH Subtraction of floats and integers
self.assertEqual(Number.objects.get(pk=self.n.pk).integer, -27)
self.assertEqual(Number.objects.get(pk=self.n.pk).float, Approximate(27.200, places=3))
def test_right_hand_multiplication(self):
# RH Multiplication of floats and integers
Number.objects.filter(pk=self.n.pk).update(integer=15 * F('integer'),
float=42.7 * F('float'))
self.assertEqual(Number.objects.get(pk=self.n.pk).integer, 630)
self.assertEqual(Number.objects.get(pk=self.n.pk).float, Approximate(661.850, places=3))
def test_right_hand_division(self):
# RH Division of floats and integers
Number.objects.filter(pk=self.n.pk).update(integer=640 / F('integer'),
float=42.7 / F('float'))
self.assertEqual(Number.objects.get(pk=self.n.pk).integer, 15)
self.assertEqual(Number.objects.get(pk=self.n.pk).float, Approximate(2.755, places=3))
def test_right_hand_modulo(self):
# RH Modulo arithmetic on integers
Number.objects.filter(pk=self.n.pk).update(integer=69 % F('integer'))
self.assertEqual(Number.objects.get(pk=self.n.pk).integer, 27)
self.assertEqual(Number.objects.get(pk=self.n.pk).float, Approximate(15.500, places=3))
class FTimeDeltaTests(TestCase):
def setUp(self):
sday = datetime.date(2010, 6, 25)
stime = datetime.datetime(2010, 6, 25, 12, 15, 30, 747000)
midnight = datetime.time(0)
delta0 = datetime.timedelta(0)
delta1 = datetime.timedelta(microseconds=253000)
delta2 = datetime.timedelta(seconds=44)
delta3 = datetime.timedelta(hours=21, minutes=8)
delta4 = datetime.timedelta(days=10)
# Test data is set so that deltas and delays will be
# strictly increasing.
self.deltas = []
self.delays = []
self.days_long = []
# e0: started same day as assigned, zero duration
end = stime+delta0
e0 = Experiment.objects.create(name='e0', assigned=sday, start=stime,
end=end, completed=end.date())
self.deltas.append(delta0)
self.delays.append(e0.start-
datetime.datetime.combine(e0.assigned, midnight))
self.days_long.append(e0.completed-e0.assigned)
# e1: started one day after assigned, tiny duration, data
# set so that end time has no fractional seconds, which
# tests an edge case on sqlite. This Experiment is only
# included in the test data when the DB supports microsecond
# precision.
if connection.features.supports_microsecond_precision:
delay = datetime.timedelta(1)
end = stime + delay + delta1
e1 = Experiment.objects.create(name='e1', assigned=sday,
start=stime+delay, end=end, completed=end.date())
self.deltas.append(delta1)
self.delays.append(e1.start-
datetime.datetime.combine(e1.assigned, midnight))
self.days_long.append(e1.completed-e1.assigned)
# e2: started three days after assigned, small duration
end = stime+delta2
e2 = Experiment.objects.create(name='e2',
assigned=sday-datetime.timedelta(3), start=stime, end=end,
completed=end.date())
self.deltas.append(delta2)
self.delays.append(e2.start-
datetime.datetime.combine(e2.assigned, midnight))
self.days_long.append(e2.completed-e2.assigned)
# e3: started four days after assigned, medium duration
delay = datetime.timedelta(4)
end = stime + delay + delta3
e3 = Experiment.objects.create(name='e3',
assigned=sday, start=stime+delay, end=end, completed=end.date())
self.deltas.append(delta3)
self.delays.append(e3.start-
datetime.datetime.combine(e3.assigned, midnight))
self.days_long.append(e3.completed-e3.assigned)
# e4: started 10 days after assignment, long duration
end = stime + delta4
e4 = Experiment.objects.create(name='e4',
assigned=sday-datetime.timedelta(10), start=stime, end=end,
completed=end.date())
self.deltas.append(delta4)
self.delays.append(e4.start-
datetime.datetime.combine(e4.assigned, midnight))
self.days_long.append(e4.completed-e4.assigned)
self.expnames = [e.name for e in Experiment.objects.all()]
def test_delta_add(self):
for i in range(len(self.deltas)):
delta = self.deltas[i]
test_set = [e.name for e in
Experiment.objects.filter(end__lt=F('start')+delta)]
self.assertEqual(test_set, self.expnames[:i])
test_set = [e.name for e in
Experiment.objects.filter(end__lte=F('start')+delta)]
self.assertEqual(test_set, self.expnames[:i+1])
def test_delta_subtract(self):
for i in range(len(self.deltas)):
delta = self.deltas[i]
test_set = [e.name for e in
Experiment.objects.filter(start__gt=F('end')-delta)]
self.assertEqual(test_set, self.expnames[:i])
test_set = [e.name for e in
Experiment.objects.filter(start__gte=F('end')-delta)]
self.assertEqual(test_set, self.expnames[:i+1])
def test_exclude(self):
for i in range(len(self.deltas)):
delta = self.deltas[i]
test_set = [e.name for e in
Experiment.objects.exclude(end__lt=F('start')+delta)]
self.assertEqual(test_set, self.expnames[i:])
test_set = [e.name for e in
Experiment.objects.exclude(end__lte=F('start')+delta)]
self.assertEqual(test_set, self.expnames[i+1:])
def test_date_comparison(self):
for i in range(len(self.days_long)):
days = self.days_long[i]
test_set = [e.name for e in
Experiment.objects.filter(completed__lt=F('assigned')+days)]
self.assertEqual(test_set, self.expnames[:i])
test_set = [e.name for e in
Experiment.objects.filter(completed__lte=F('assigned')+days)]
self.assertEqual(test_set, self.expnames[:i+1])
@skipUnlessDBFeature("supports_mixed_date_datetime_comparisons")
def test_mixed_comparisons1(self):
for i in range(len(self.delays)):
delay = self.delays[i]
if not connection.features.supports_microsecond_precision:
delay = datetime.timedelta(delay.days, delay.seconds)
test_set = [e.name for e in
Experiment.objects.filter(assigned__gt=F('start')-delay)]
self.assertEqual(test_set, self.expnames[:i])
test_set = [e.name for e in
Experiment.objects.filter(assigned__gte=F('start')-delay)]
self.assertEqual(test_set, self.expnames[:i+1])
def test_mixed_comparisons2(self):
delays = [datetime.timedelta(delay.days) for delay in self.delays]
for i in range(len(delays)):
delay = delays[i]
test_set = [e.name for e in
Experiment.objects.filter(start__lt=F('assigned')+delay)]
self.assertEqual(test_set, self.expnames[:i])
test_set = [e.name for e in
Experiment.objects.filter(start__lte=F('assigned')+delay+
datetime.timedelta(1))]
self.assertEqual(test_set, self.expnames[:i+1])
def test_delta_update(self):
for i in range(len(self.deltas)):
delta = self.deltas[i]
exps = Experiment.objects.all()
expected_durations = [e.duration() for e in exps]
expected_starts = [e.start+delta for e in exps]
expected_ends = [e.end+delta for e in exps]
Experiment.objects.update(start=F('start')+delta, end=F('end')+delta)
exps = Experiment.objects.all()
new_starts = [e.start for e in exps]
new_ends = [e.end for e in exps]
new_durations = [e.duration() for e in exps]
self.assertEqual(expected_starts, new_starts)
self.assertEqual(expected_ends, new_ends)
self.assertEqual(expected_durations, new_durations)
def test_delta_invalid_op_mult(self):
raised = False
try:
r = repr(Experiment.objects.filter(end__lt=F('start')*self.deltas[0]))
except TypeError:
raised = True
self.assertTrue(raised, "TypeError not raised on attempt to multiply datetime by timedelta.")
def test_delta_invalid_op_div(self):
raised = False
try:
r = repr(Experiment.objects.filter(end__lt=F('start')/self.deltas[0]))
except TypeError:
raised = True
self.assertTrue(raised, "TypeError not raised on attempt to divide datetime by timedelta.")
def test_delta_invalid_op_mod(self):
raised = False
try:
r = repr(Experiment.objects.filter(end__lt=F('start')%self.deltas[0]))
except TypeError:
raised = True
self.assertTrue(raised, "TypeError not raised on attempt to modulo divide datetime by timedelta.")
def test_delta_invalid_op_and(self):
raised = False
try:
r = repr(Experiment.objects.filter(end__lt=F('start').bitand(self.deltas[0])))
except TypeError:
raised = True
self.assertTrue(raised, "TypeError not raised on attempt to binary and a datetime with a timedelta.")
def test_delta_invalid_op_or(self):
raised = False
try:
r = repr(Experiment.objects.filter(end__lt=F('start').bitor(self.deltas[0])))
except TypeError:
raised = True
self.assertTrue(raised, "TypeError not raised on attempt to binary or a datetime with a timedelta.")
| gpl-2.0 | 4,154,290,197,067,971,600 | 3,776,054,503,609,120,000 | 40.47013 | 109 | 0.595703 | false |
yewang15215/django | tests/auth_tests/test_basic.py | 12 | 7419 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
import warnings
from django.contrib.auth import get_user, get_user_model
from django.contrib.auth.models import AnonymousUser, User
from django.core.exceptions import ImproperlyConfigured
from django.db import IntegrityError
from django.http import HttpRequest
from django.test import TestCase, override_settings
from django.utils import translation
from .models import CustomUser
class BasicTestCase(TestCase):
def test_user(self):
"Users can be created and can set their password"
u = User.objects.create_user('testuser', 'test@example.com', 'testpw')
self.assertTrue(u.has_usable_password())
self.assertFalse(u.check_password('bad'))
self.assertTrue(u.check_password('testpw'))
# Check we can manually set an unusable password
u.set_unusable_password()
u.save()
self.assertFalse(u.check_password('testpw'))
self.assertFalse(u.has_usable_password())
u.set_password('testpw')
self.assertTrue(u.check_password('testpw'))
u.set_password(None)
self.assertFalse(u.has_usable_password())
# Check username getter
self.assertEqual(u.get_username(), 'testuser')
# Check authentication/permissions
self.assertFalse(u.is_anonymous)
self.assertTrue(u.is_authenticated)
self.assertFalse(u.is_staff)
self.assertTrue(u.is_active)
self.assertFalse(u.is_superuser)
# Check API-based user creation with no password
u2 = User.objects.create_user('testuser2', 'test2@example.com')
self.assertFalse(u2.has_usable_password())
def test_unicode_username(self):
User.objects.create_user('jörg')
User.objects.create_user('Григорий')
# Two equivalent unicode normalized usernames should be duplicates
omega_username = 'iamtheΩ' # U+03A9 GREEK CAPITAL LETTER OMEGA
ohm_username = 'iamtheΩ' # U+2126 OHM SIGN
User.objects.create_user(ohm_username)
with self.assertRaises(IntegrityError):
User.objects.create_user(omega_username)
def test_is_anonymous_authenticated_method_deprecation(self):
deprecation_message = (
'Using user.is_authenticated() and user.is_anonymous() as a '
'method is deprecated. Remove the parentheses to use it as an '
'attribute.'
)
u = User.objects.create_user('testuser', 'test@example.com', 'testpw')
# Backwards-compatibility callables
with warnings.catch_warnings(record=True) as warns:
warnings.simplefilter('always')
self.assertFalse(u.is_anonymous())
self.assertEqual(len(warns), 1)
self.assertEqual(str(warns[0].message), deprecation_message)
with warnings.catch_warnings(record=True) as warns:
warnings.simplefilter('always')
self.assertTrue(u.is_authenticated())
self.assertEqual(len(warns), 1)
self.assertEqual(str(warns[0].message), deprecation_message)
def test_user_no_email(self):
"Users can be created without an email"
u = User.objects.create_user('testuser1')
self.assertEqual(u.email, '')
u2 = User.objects.create_user('testuser2', email='')
self.assertEqual(u2.email, '')
u3 = User.objects.create_user('testuser3', email=None)
self.assertEqual(u3.email, '')
def test_anonymous_user(self):
"Check the properties of the anonymous user"
a = AnonymousUser()
self.assertIsNone(a.pk)
self.assertEqual(a.username, '')
self.assertEqual(a.get_username(), '')
self.assertTrue(a.is_anonymous)
self.assertFalse(a.is_authenticated)
self.assertFalse(a.is_staff)
self.assertFalse(a.is_active)
self.assertFalse(a.is_superuser)
self.assertEqual(a.groups.all().count(), 0)
self.assertEqual(a.user_permissions.all().count(), 0)
def test_anonymous_user_is_anonymous_authenticated_method_deprecation(self):
a = AnonymousUser()
deprecation_message = (
'Using user.is_authenticated() and user.is_anonymous() as a '
'method is deprecated. Remove the parentheses to use it as an '
'attribute.'
)
# Backwards-compatibility callables
with warnings.catch_warnings(record=True) as warns:
warnings.simplefilter('always') # prevent warnings from appearing as errors
self.assertTrue(a.is_anonymous())
self.assertEqual(len(warns), 1)
self.assertEqual(str(warns[0].message), deprecation_message)
with warnings.catch_warnings(record=True) as warns:
warnings.simplefilter('always') # prevent warnings from appearing as errors
self.assertFalse(a.is_authenticated())
self.assertEqual(len(warns), 1)
self.assertEqual(str(warns[0].message), deprecation_message)
def test_superuser(self):
"Check the creation and properties of a superuser"
super = User.objects.create_superuser('super', 'super@example.com', 'super')
self.assertTrue(super.is_superuser)
self.assertTrue(super.is_active)
self.assertTrue(super.is_staff)
def test_get_user_model(self):
"The current user model can be retrieved"
self.assertEqual(get_user_model(), User)
@override_settings(AUTH_USER_MODEL='auth_tests.CustomUser')
def test_swappable_user(self):
"The current user model can be swapped out for another"
self.assertEqual(get_user_model(), CustomUser)
with self.assertRaises(AttributeError):
User.objects.all()
@override_settings(AUTH_USER_MODEL='badsetting')
def test_swappable_user_bad_setting(self):
"The alternate user setting must point to something in the format app.model"
with self.assertRaises(ImproperlyConfigured):
get_user_model()
@override_settings(AUTH_USER_MODEL='thismodel.doesntexist')
def test_swappable_user_nonexistent_model(self):
"The current user model must point to an installed model"
with self.assertRaises(ImproperlyConfigured):
get_user_model()
def test_user_verbose_names_translatable(self):
"Default User model verbose names are translatable (#19945)"
with translation.override('en'):
self.assertEqual(User._meta.verbose_name, 'user')
self.assertEqual(User._meta.verbose_name_plural, 'users')
with translation.override('es'):
self.assertEqual(User._meta.verbose_name, 'usuario')
self.assertEqual(User._meta.verbose_name_plural, 'usuarios')
class TestGetUser(TestCase):
def test_get_user_anonymous(self):
request = HttpRequest()
request.session = self.client.session
user = get_user(request)
self.assertIsInstance(user, AnonymousUser)
def test_get_user(self):
created_user = User.objects.create_user('testuser', 'test@example.com', 'testpw')
self.client.login(username='testuser', password='testpw')
request = HttpRequest()
request.session = self.client.session
user = get_user(request)
self.assertIsInstance(user, User)
self.assertEqual(user.username, created_user.username)
| bsd-3-clause | 5,680,126,195,987,669,000 | -3,317,818,294,543,565,000 | 40.379888 | 89 | 0.658026 | false |
vdemeester/docker-py | tests/integration/regression_test.py | 4 | 2232 | import io
import random
import docker
import six
from .base import BaseAPIIntegrationTest, TEST_IMG
import pytest
class TestRegressions(BaseAPIIntegrationTest):
def test_443_handle_nonchunked_response_in_stream(self):
dfile = io.BytesIO()
with pytest.raises(docker.errors.APIError) as exc:
for line in self.client.build(fileobj=dfile, tag="a/b/c"):
pass
assert exc.value.is_error()
dfile.close()
def test_542_truncate_ids_client_side(self):
self.client.start(
self.client.create_container(TEST_IMG, ['true'])
)
result = self.client.containers(all=True, trunc=True)
assert len(result[0]['Id']) == 12
def test_647_support_doubleslash_in_image_names(self):
with pytest.raises(docker.errors.APIError):
self.client.inspect_image('gensokyo.jp//kirisame')
def test_649_handle_timeout_value_none(self):
self.client.timeout = None
ctnr = self.client.create_container(TEST_IMG, ['sleep', '2'])
self.client.start(ctnr)
self.client.stop(ctnr)
def test_715_handle_user_param_as_int_value(self):
ctnr = self.client.create_container(TEST_IMG, ['id', '-u'], user=1000)
self.client.start(ctnr)
self.client.wait(ctnr)
logs = self.client.logs(ctnr)
if six.PY3:
logs = logs.decode('utf-8')
assert logs == '1000\n'
def test_792_explicit_port_protocol(self):
tcp_port, udp_port = random.sample(range(9999, 32000), 2)
ctnr = self.client.create_container(
TEST_IMG, ['sleep', '9999'], ports=[2000, (2000, 'udp')],
host_config=self.client.create_host_config(
port_bindings={'2000/tcp': tcp_port, '2000/udp': udp_port}
)
)
self.tmp_containers.append(ctnr)
self.client.start(ctnr)
assert self.client.port(
ctnr, 2000
)[0]['HostPort'] == six.text_type(tcp_port)
assert self.client.port(
ctnr, '2000/tcp'
)[0]['HostPort'] == six.text_type(tcp_port)
assert self.client.port(
ctnr, '2000/udp'
)[0]['HostPort'] == six.text_type(udp_port)
| apache-2.0 | -3,033,671,684,242,907,000 | 2,460,057,830,817,599,500 | 33.338462 | 78 | 0.598118 | false |
Workday/OpenFrame | tools/telemetry/catapult_base/refactor/offset_token.py | 16 | 3155 | # Copyright 2015 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import collections
import itertools
import token
import tokenize
def _Pairwise(iterable):
"""s -> (None, s0), (s0, s1), (s1, s2), (s2, s3), ..."""
a, b = itertools.tee(iterable)
a = itertools.chain((None,), a)
return itertools.izip(a, b)
class OffsetToken(object):
"""A Python token with a relative position.
A token is represented by a type defined in Python's token module, a string
representing the content, and an offset. Using relative positions makes it
easy to insert and remove tokens.
"""
def __init__(self, token_type, string, offset):
self._type = token_type
self._string = string
self._offset = offset
@property
def type(self):
return self._type
@property
def type_name(self):
return token.tok_name[self._type]
@property
def string(self):
return self._string
@string.setter
def string(self, value):
self._string = value
@property
def offset(self):
return self._offset
def __str__(self):
return str((self.type_name, self.string, self.offset))
def Tokenize(f):
"""Read tokens from a file-like object.
Args:
f: Any object that has a readline method.
Returns:
A collections.deque containing OffsetTokens. Deques are cheaper and easier
to manipulate sequentially than lists.
"""
f.seek(0)
tokenize_tokens = tokenize.generate_tokens(f.readline)
offset_tokens = collections.deque()
for prev_token, next_token in _Pairwise(tokenize_tokens):
token_type, string, (srow, scol), _, _ = next_token
if not prev_token:
offset_tokens.append(OffsetToken(token_type, string, (0, 0)))
else:
erow, ecol = prev_token[3]
if erow == srow:
offset_tokens.append(OffsetToken(token_type, string, (0, scol-ecol)))
else:
offset_tokens.append(OffsetToken(token_type, string, (srow-erow, scol)))
return offset_tokens
def Untokenize(offset_tokens):
"""Return the string representation of an iterable of OffsetTokens."""
# Make a copy. Don't modify the original.
offset_tokens = collections.deque(offset_tokens)
# Strip leading NL tokens.
while offset_tokens[0].type == tokenize.NL:
offset_tokens.popleft()
# Strip leading vertical whitespace.
first_token = offset_tokens.popleft()
# Take care not to modify the existing token. Create a new one in its place.
first_token = OffsetToken(first_token.type, first_token.string,
(0, first_token.offset[1]))
offset_tokens.appendleft(first_token)
# Convert OffsetTokens to tokenize tokens.
tokenize_tokens = []
row = 1
col = 0
for t in offset_tokens:
offset_row, offset_col = t.offset
if offset_row == 0:
col += offset_col
else:
row += offset_row
col = offset_col
tokenize_tokens.append((t.type, t.string, (row, col), (row, col), None))
# tokenize can't handle whitespace before line continuations.
# So add a space.
return tokenize.untokenize(tokenize_tokens).replace('\\\n', ' \\\n')
| bsd-3-clause | 4,284,388,886,327,345,700 | -8,061,240,820,339,070,000 | 26.920354 | 80 | 0.674485 | false |
sippy/voiptests | test_cases/reinv_brkn2.py | 1 | 2000 | # Copyright (c) 2016 Sippy Software, Inc. All rights reserved.
#
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without modification,
# are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation and/or
# other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR
# ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
# ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from test_cases.reinv_fail import a_test_reinv_fail
from test_cases.reinvite import b_test_reinvite
class a_test_reinv_brkn2(a_test_reinv_fail):
cld = 'bob_reinv_brkn2'
cli = 'alice_reinv_brkn2'
def reinvite(self, ua):
if not self.connect_done or self.disconnect_done:
return
sdp_body_bak = ua.lSDP
ua.lSDP = sdp_body_bak.getCopy()
for sect in ua.lSDP.content.sections:
sect.c_header = None
rval = a_test_reinv_fail.reinvite(self, ua)
ua.lSDP = sdp_body_bak
return rval
class b_test_reinv_brkn2(b_test_reinvite):
cli = 'bob_reinv_brkn2'
| bsd-2-clause | 6,130,052,858,248,842,000 | -2,564,852,962,747,822,000 | 43.444444 | 82 | 0.74 | false |
spacewalkproject/spacewalk | java/scripts/api/managechannel.py | 16 | 2371 | #!/usr/bin/python
"""
Script to :
- create unique channels for given users
- Push Content to the same for each user
"""
import os
import xmlrpclib
# Setup
SATELLITE_HOST = "test10-64.rhndev.redhat.com"
SATELLITE_URL = "http://%s/rpc/api" % SATELLITE_HOST
SATELLITE_LOGIN_HASH ={'prad03':'redhat', 'prad02' : 'redhat'}
SUFFIX_HASH = {'prad03' : '03', 'prad02' : '02'}
CHANNEL_INFO = {'label' : 'channel-',
'name' : 'channel-',
'summary' : 'dummy channel',
'archLabel' : 'channel-ia32',
'parentLabel' : ''}
PKG_CONTENT_DIR = '/tmp/upload/'
client = xmlrpclib.Server(SATELLITE_URL, verbose=0)
def getKeys(users):
"""
Generate session key for each user
"""
keylist = {}
for login,password in users.items():
sessionkey = client.auth.login(login, password)
keylist[login] = sessionkey
return keylist
def createChannels(keylist, info):
"""
Create unique channels per user
"""
channel_list = {}
for login,key in keylist.items():
# create channel under each org
# Channel label,name should be unique
label = info['label'] + SUFFIX_HASH[login]
name = info['name'] + SUFFIX_HASH[login]
try:
print "Creating Channel: ",label
client.channel.software.create(key, label, name, \
info['summary'], info['archLabel'], \
info['parentLabel'])
except xmlrpclib.Fault, e:
print e
channel_list[login] = label
return channel_list
def pushContent(users, channels):
"""
Invoke rhnpush to push packages to channels
"""
for login,password in users.items():
print "Pushing Content to %s" % channels[login]
push_cmd = 'rhnpush --server=%s/APP --username=%s --password=%s \
--dir=%s --channel=%s -vvvv --tolerant --nosig' % \
(SATELLITE_HOST, login, password, PKG_CONTENT_DIR, \
channels[login])
os.system(push_cmd)
def main():
# Create Session keys
keys = getKeys(SATELLITE_LOGIN_HASH)
# Create channels
channel_list = createChannels(keys, CHANNEL_INFO)
# push content to channels
pushContent(SATELLITE_LOGIN_HASH, channel_list)
if __name__ == '__main__':
main()
| gpl-2.0 | -2,832,070,907,003,094,000 | 340,170,304,439,722,500 | 27.566265 | 73 | 0.578237 | false |
puremourning/YouCompleteMe | python/ycm/client/completer_available_request.py | 7 | 1464 | # Copyright (C) 2013 Google Inc.
#
# This file is part of YouCompleteMe.
#
# YouCompleteMe is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# YouCompleteMe is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with YouCompleteMe. If not, see <http://www.gnu.org/licenses/>.
from ycm.client.base_request import BaseRequest, BuildRequestData
class CompleterAvailableRequest( BaseRequest ):
def __init__( self, filetypes ):
super( CompleterAvailableRequest, self ).__init__()
self.filetypes = filetypes
self._response = None
def Start( self ):
request_data = BuildRequestData()
request_data.update( { 'filetypes': self.filetypes } )
self._response = self.PostDataToHandler( request_data,
'semantic_completion_available' )
def Response( self ):
return self._response
def SendCompleterAvailableRequest( filetypes ):
request = CompleterAvailableRequest( filetypes )
# This is a blocking call.
request.Start()
return request.Response()
| gpl-3.0 | -5,846,964,543,519,370,000 | -431,297,076,498,451,900 | 33.046512 | 78 | 0.719945 | false |
rizar/attention-lvcsr | libs/blocks/tests/monitoring/test_monitored_quantity.py | 2 | 1638 | import numpy
import theano
from fuel.datasets import IterableDataset
from blocks.monitoring.evaluators import DatasetEvaluator
from blocks.monitoring.aggregation import MonitoredQuantity
from blocks.bricks.cost import CategoricalCrossEntropy
class CrossEntropy(MonitoredQuantity):
def __init__(self, **kwargs):
super(CrossEntropy, self).__init__(**kwargs)
def initialize(self):
self.total_cross_entropy, self.examples_seen = 0.0, 0
def accumulate(self, target, predicted):
import numpy
self.total_cross_entropy += -(target * numpy.log(predicted)).sum()
self.examples_seen += 1
def readout(self):
res = self.total_cross_entropy / self.examples_seen
return res
def test_dataset_evaluators():
X = theano.tensor.vector('X')
Y = theano.tensor.vector('Y')
data = [numpy.arange(1, 7, dtype=theano.config.floatX).reshape(3, 2),
numpy.arange(11, 17, dtype=theano.config.floatX).reshape(3, 2)]
data_stream = IterableDataset(dict(X=data[0],
Y=data[1])).get_example_stream()
validator = DatasetEvaluator([
CrossEntropy(requires=[X, Y],
name="monitored_cross_entropy0"),
# to test two same quantities and make sure that state will be reset
CrossEntropy(requires=[X, Y],
name="monitored_cross_entropy1"),
CategoricalCrossEntropy().apply(X, Y), ])
values = validator.evaluate(data_stream)
numpy.testing.assert_allclose(
values['monitored_cross_entropy1'],
values['categoricalcrossentropy_apply_cost'])
| mit | 701,178,117,307,698,200 | -1,313,076,827,511,507,500 | 34.608696 | 76 | 0.653846 | false |
cnrat/fail2ban | fail2ban/tests/action_d/test_badips.py | 19 | 2735 | # emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: t -*-
# vi: set ft=python sts=4 ts=4 sw=4 noet :
# This file is part of Fail2Ban.
#
# Fail2Ban is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# Fail2Ban is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Fail2Ban; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
import os
import unittest
import sys
from ..dummyjail import DummyJail
from ..utils import CONFIG_DIR
if sys.version_info >= (2,7):
class BadIPsActionTest(unittest.TestCase):
def setUp(self):
"""Call before every test case."""
self.jail = DummyJail()
self.jail.actions.add("test")
pythonModule = os.path.join(CONFIG_DIR, "action.d", "badips.py")
self.jail.actions.add("badips", pythonModule, initOpts={
'category': "ssh",
'banaction': "test",
})
self.action = self.jail.actions["badips"]
def tearDown(self):
"""Call after every test case."""
# Must cancel timer!
if self.action._timer:
self.action._timer.cancel()
def testCategory(self):
categories = self.action.getCategories()
self.assertTrue("ssh" in categories)
self.assertTrue(len(categories) >= 10)
self.assertRaises(
ValueError, setattr, self.action, "category",
"invalid-category")
# Not valid for reporting category...
self.assertRaises(
ValueError, setattr, self.action, "category", "mail")
# but valid for blacklisting.
self.action.bancategory = "mail"
def testScore(self):
self.assertRaises(ValueError, setattr, self.action, "score", -5)
self.action.score = 5
self.action.score = "5"
def testBanaction(self):
self.assertRaises(
ValueError, setattr, self.action, "banaction",
"invalid-action")
self.action.banaction = "test"
def testUpdateperiod(self):
self.assertRaises(
ValueError, setattr, self.action, "updateperiod", -50)
self.assertRaises(
ValueError, setattr, self.action, "updateperiod", 0)
self.action.updateperiod = 900
self.action.updateperiod = "900"
def testStart(self):
self.action.start()
self.assertTrue(len(self.action._bannedips) > 10)
def testStop(self):
self.testStart()
self.action.stop()
self.assertTrue(len(self.action._bannedips) == 0)
| gpl-2.0 | -2,077,587,286,221,556,500 | -9,063,945,158,101,880,000 | 29.388889 | 81 | 0.703108 | false |
nemesisdesign/django | tests/null_queries/tests.py | 55 | 2973 | from __future__ import unicode_literals
from django.core.exceptions import FieldError
from django.test import TestCase
from .models import Choice, Inner, OuterA, OuterB, Poll
class NullQueriesTests(TestCase):
def test_none_as_null(self):
"""
Regression test for the use of None as a query value.
None is interpreted as an SQL NULL, but only in __exact and __iexact
queries.
Set up some initial polls and choices
"""
p1 = Poll(question='Why?')
p1.save()
c1 = Choice(poll=p1, choice='Because.')
c1.save()
c2 = Choice(poll=p1, choice='Why Not?')
c2.save()
# Exact query with value None returns nothing ("is NULL" in sql,
# but every 'id' field has a value).
self.assertQuerysetEqual(Choice.objects.filter(choice__exact=None), [])
# The same behavior for iexact query.
self.assertQuerysetEqual(Choice.objects.filter(choice__iexact=None), [])
# Excluding the previous result returns everything.
self.assertQuerysetEqual(
Choice.objects.exclude(choice=None).order_by('id'),
[
'<Choice: Choice: Because. in poll Q: Why? >',
'<Choice: Choice: Why Not? in poll Q: Why? >'
]
)
# Valid query, but fails because foo isn't a keyword
with self.assertRaises(FieldError):
Choice.objects.filter(foo__exact=None)
# Can't use None on anything other than __exact and __iexact
with self.assertRaises(ValueError):
Choice.objects.filter(id__gt=None)
# Related managers use __exact=None implicitly if the object hasn't been saved.
p2 = Poll(question="How?")
self.assertEqual(repr(p2.choice_set.all()), '<QuerySet []>')
def test_reverse_relations(self):
"""
Querying across reverse relations and then another relation should
insert outer joins correctly so as not to exclude results.
"""
obj = OuterA.objects.create()
self.assertQuerysetEqual(
OuterA.objects.filter(inner__third=None),
['<OuterA: OuterA object>']
)
self.assertQuerysetEqual(
OuterA.objects.filter(inner__third__data=None),
['<OuterA: OuterA object>']
)
Inner.objects.create(first=obj)
self.assertQuerysetEqual(
Inner.objects.filter(first__inner__third=None),
['<Inner: Inner object>']
)
# Ticket #13815: check if <reverse>_isnull=False does not produce
# faulty empty lists
OuterB.objects.create(data="reverse")
self.assertQuerysetEqual(
OuterB.objects.filter(inner__isnull=False),
[]
)
Inner.objects.create(first=obj)
self.assertQuerysetEqual(
OuterB.objects.exclude(inner__isnull=False),
['<OuterB: OuterB object>']
)
| bsd-3-clause | 8,177,878,028,806,964,000 | 6,174,511,874,418,627,000 | 33.569767 | 87 | 0.597713 | false |
renesugar/arrow | python/pyarrow/tests/test_jvm.py | 5 | 13848 | # -*- coding: utf-8 -*-
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import json
import os
import pyarrow as pa
import pyarrow.jvm as pa_jvm
import pytest
import six
import sys
import xml.etree.ElementTree as ET
jpype = pytest.importorskip("jpype")
@pytest.fixture(scope="session")
def root_allocator():
# This test requires Arrow Java to be built in the same source tree
pom_path = os.path.join(
os.path.dirname(__file__), '..', '..', '..',
'java', 'pom.xml')
tree = ET.parse(pom_path)
version = tree.getroot().find(
'POM:version',
namespaces={
'POM': 'http://maven.apache.org/POM/4.0.0'
}).text
jar_path = os.path.join(
os.path.dirname(__file__), '..', '..', '..',
'java', 'tools', 'target',
'arrow-tools-{}-jar-with-dependencies.jar'.format(version))
jar_path = os.getenv("ARROW_TOOLS_JAR", jar_path)
jpype.startJVM(jpype.getDefaultJVMPath(), "-Djava.class.path=" + jar_path)
return jpype.JPackage("org").apache.arrow.memory.RootAllocator(sys.maxsize)
def test_jvm_buffer(root_allocator):
# Create a buffer
jvm_buffer = root_allocator.buffer(8)
for i in range(8):
jvm_buffer.setByte(i, 8 - i)
# Convert to Python
buf = pa_jvm.jvm_buffer(jvm_buffer)
# Check its content
assert buf.to_pybytes() == b'\x08\x07\x06\x05\x04\x03\x02\x01'
def _jvm_field(jvm_spec):
om = jpype.JClass('com.fasterxml.jackson.databind.ObjectMapper')()
pojo_Field = jpype.JClass('org.apache.arrow.vector.types.pojo.Field')
return om.readValue(jvm_spec, pojo_Field)
def _jvm_schema(jvm_spec, metadata=None):
field = _jvm_field(jvm_spec)
schema_cls = jpype.JClass('org.apache.arrow.vector.types.pojo.Schema')
fields = jpype.JClass('java.util.ArrayList')()
fields.add(field)
if metadata:
dct = jpype.JClass('java.util.HashMap')()
for k, v in six.iteritems(metadata):
dct.put(k, v)
return schema_cls(fields, dct)
else:
return schema_cls(fields)
# In the following, we use the JSON serialization of the Field objects in Java.
# This ensures that we neither rely on the exact mechanics on how to construct
# them using Java code as well as enables us to define them as parameters
# without to invoke the JVM.
#
# The specifications were created using:
#
# om = jpype.JClass('com.fasterxml.jackson.databind.ObjectMapper')()
# field = … # Code to instantiate the field
# jvm_spec = om.writeValueAsString(field)
@pytest.mark.parametrize('pa_type,jvm_spec', [
(pa.null(), '{"name":"null"}'),
(pa.bool_(), '{"name":"bool"}'),
(pa.int8(), '{"name":"int","bitWidth":8,"isSigned":true}'),
(pa.int16(), '{"name":"int","bitWidth":16,"isSigned":true}'),
(pa.int32(), '{"name":"int","bitWidth":32,"isSigned":true}'),
(pa.int64(), '{"name":"int","bitWidth":64,"isSigned":true}'),
(pa.uint8(), '{"name":"int","bitWidth":8,"isSigned":false}'),
(pa.uint16(), '{"name":"int","bitWidth":16,"isSigned":false}'),
(pa.uint32(), '{"name":"int","bitWidth":32,"isSigned":false}'),
(pa.uint64(), '{"name":"int","bitWidth":64,"isSigned":false}'),
(pa.float16(), '{"name":"floatingpoint","precision":"HALF"}'),
(pa.float32(), '{"name":"floatingpoint","precision":"SINGLE"}'),
(pa.float64(), '{"name":"floatingpoint","precision":"DOUBLE"}'),
(pa.time32('s'), '{"name":"time","unit":"SECOND","bitWidth":32}'),
(pa.time32('ms'), '{"name":"time","unit":"MILLISECOND","bitWidth":32}'),
(pa.time64('us'), '{"name":"time","unit":"MICROSECOND","bitWidth":64}'),
(pa.time64('ns'), '{"name":"time","unit":"NANOSECOND","bitWidth":64}'),
(pa.timestamp('s'), '{"name":"timestamp","unit":"SECOND",'
'"timezone":null}'),
(pa.timestamp('ms'), '{"name":"timestamp","unit":"MILLISECOND",'
'"timezone":null}'),
(pa.timestamp('us'), '{"name":"timestamp","unit":"MICROSECOND",'
'"timezone":null}'),
(pa.timestamp('ns'), '{"name":"timestamp","unit":"NANOSECOND",'
'"timezone":null}'),
(pa.timestamp('ns', tz='UTC'), '{"name":"timestamp","unit":"NANOSECOND"'
',"timezone":"UTC"}'),
(pa.timestamp('ns', tz='Europe/Paris'), '{"name":"timestamp",'
'"unit":"NANOSECOND","timezone":"Europe/Paris"}'),
(pa.date32(), '{"name":"date","unit":"DAY"}'),
(pa.date64(), '{"name":"date","unit":"MILLISECOND"}'),
(pa.decimal128(19, 4), '{"name":"decimal","precision":19,"scale":4}'),
(pa.string(), '{"name":"utf8"}'),
(pa.binary(), '{"name":"binary"}'),
(pa.binary(10), '{"name":"fixedsizebinary","byteWidth":10}'),
# TODO(ARROW-2609): complex types that have children
# pa.list_(pa.int32()),
# pa.struct([pa.field('a', pa.int32()),
# pa.field('b', pa.int8()),
# pa.field('c', pa.string())]),
# pa.union([pa.field('a', pa.binary(10)),
# pa.field('b', pa.string())], mode=pa.lib.UnionMode_DENSE),
# pa.union([pa.field('a', pa.binary(10)),
# pa.field('b', pa.string())], mode=pa.lib.UnionMode_SPARSE),
# TODO: DictionaryType requires a vector in the type
# pa.dictionary(pa.int32(), pa.array(['a', 'b', 'c'])),
])
@pytest.mark.parametrize('nullable', [True, False])
def test_jvm_types(root_allocator, pa_type, jvm_spec, nullable):
spec = {
'name': 'field_name',
'nullable': nullable,
'type': json.loads(jvm_spec),
# TODO: This needs to be set for complex types
'children': []
}
jvm_field = _jvm_field(json.dumps(spec))
result = pa_jvm.field(jvm_field)
expected_field = pa.field('field_name', pa_type, nullable=nullable)
assert result == expected_field
jvm_schema = _jvm_schema(json.dumps(spec))
result = pa_jvm.schema(jvm_schema)
assert result == pa.schema([expected_field])
# Schema with custom metadata
jvm_schema = _jvm_schema(json.dumps(spec), {'meta': 'data'})
result = pa_jvm.schema(jvm_schema)
assert result == pa.schema([expected_field], {'meta': 'data'})
# These test parameters mostly use an integer range as an input as this is
# often the only type that is understood by both Python and Java
# implementations of Arrow.
@pytest.mark.parametrize('pa_type,py_data,jvm_type', [
(pa.bool_(), [True, False, True, True], 'BitVector'),
(pa.uint8(), list(range(128)), 'UInt1Vector'),
(pa.uint16(), list(range(128)), 'UInt2Vector'),
(pa.int32(), list(range(128)), 'IntVector'),
(pa.int64(), list(range(128)), 'BigIntVector'),
(pa.float32(), list(range(128)), 'Float4Vector'),
(pa.float64(), list(range(128)), 'Float8Vector'),
(pa.timestamp('s'), list(range(128)), 'TimeStampSecVector'),
(pa.timestamp('ms'), list(range(128)), 'TimeStampMilliVector'),
(pa.timestamp('us'), list(range(128)), 'TimeStampMicroVector'),
(pa.timestamp('ns'), list(range(128)), 'TimeStampNanoVector'),
# TODO(ARROW-2605): These types miss a conversion from pure Python objects
# * pa.time32('s')
# * pa.time32('ms')
# * pa.time64('us')
# * pa.time64('ns')
(pa.date32(), list(range(128)), 'DateDayVector'),
(pa.date64(), list(range(128)), 'DateMilliVector'),
# TODO(ARROW-2606): pa.decimal128(19, 4)
])
def test_jvm_array(root_allocator, pa_type, py_data, jvm_type):
# Create vector
cls = "org.apache.arrow.vector.{}".format(jvm_type)
jvm_vector = jpype.JClass(cls)("vector", root_allocator)
jvm_vector.allocateNew(len(py_data))
for i, val in enumerate(py_data):
jvm_vector.setSafe(i, val)
jvm_vector.setValueCount(len(py_data))
py_array = pa.array(py_data, type=pa_type)
jvm_array = pa_jvm.array(jvm_vector)
assert py_array.equals(jvm_array)
# These test parameters mostly use an integer range as an input as this is
# often the only type that is understood by both Python and Java
# implementations of Arrow.
@pytest.mark.parametrize('pa_type,py_data,jvm_type,jvm_spec', [
# TODO: null
(pa.bool_(), [True, False, True, True], 'BitVector', '{"name":"bool"}'),
(
pa.uint8(),
list(range(128)),
'UInt1Vector',
'{"name":"int","bitWidth":8,"isSigned":false}'
),
(
pa.uint16(),
list(range(128)),
'UInt2Vector',
'{"name":"int","bitWidth":16,"isSigned":false}'
),
(
pa.uint32(),
list(range(128)),
'UInt4Vector',
'{"name":"int","bitWidth":32,"isSigned":false}'
),
(
pa.uint64(),
list(range(128)),
'UInt8Vector',
'{"name":"int","bitWidth":64,"isSigned":false}'
),
(
pa.int8(),
list(range(128)),
'TinyIntVector',
'{"name":"int","bitWidth":8,"isSigned":true}'
),
(
pa.int16(),
list(range(128)),
'SmallIntVector',
'{"name":"int","bitWidth":16,"isSigned":true}'
),
(
pa.int32(),
list(range(128)),
'IntVector',
'{"name":"int","bitWidth":32,"isSigned":true}'
),
(
pa.int64(),
list(range(128)),
'BigIntVector',
'{"name":"int","bitWidth":64,"isSigned":true}'
),
# TODO: float16
(
pa.float32(),
list(range(128)),
'Float4Vector',
'{"name":"floatingpoint","precision":"SINGLE"}'
),
(
pa.float64(),
list(range(128)),
'Float8Vector',
'{"name":"floatingpoint","precision":"DOUBLE"}'
),
(
pa.timestamp('s'),
list(range(128)),
'TimeStampSecVector',
'{"name":"timestamp","unit":"SECOND","timezone":null}'
),
(
pa.timestamp('ms'),
list(range(128)),
'TimeStampMilliVector',
'{"name":"timestamp","unit":"MILLISECOND","timezone":null}'
),
(
pa.timestamp('us'),
list(range(128)),
'TimeStampMicroVector',
'{"name":"timestamp","unit":"MICROSECOND","timezone":null}'
),
(
pa.timestamp('ns'),
list(range(128)),
'TimeStampNanoVector',
'{"name":"timestamp","unit":"NANOSECOND","timezone":null}'
),
# TODO(ARROW-2605): These types miss a conversion from pure Python objects
# * pa.time32('s')
# * pa.time32('ms')
# * pa.time64('us')
# * pa.time64('ns')
(
pa.date32(),
list(range(128)),
'DateDayVector',
'{"name":"date","unit":"DAY"}'
),
(
pa.date64(),
list(range(128)),
'DateMilliVector',
'{"name":"date","unit":"MILLISECOND"}'
),
# TODO(ARROW-2606): pa.decimal128(19, 4)
])
def test_jvm_record_batch(root_allocator, pa_type, py_data, jvm_type,
jvm_spec):
# Create vector
cls = "org.apache.arrow.vector.{}".format(jvm_type)
jvm_vector = jpype.JClass(cls)("vector", root_allocator)
jvm_vector.allocateNew(len(py_data))
for i, val in enumerate(py_data):
jvm_vector.setSafe(i, val)
jvm_vector.setValueCount(len(py_data))
# Create field
spec = {
'name': 'field_name',
'nullable': False,
'type': json.loads(jvm_spec),
# TODO: This needs to be set for complex types
'children': []
}
jvm_field = _jvm_field(json.dumps(spec))
# Create VectorSchemaRoot
jvm_fields = jpype.JClass('java.util.ArrayList')()
jvm_fields.add(jvm_field)
jvm_vectors = jpype.JClass('java.util.ArrayList')()
jvm_vectors.add(jvm_vector)
jvm_vsr = jpype.JClass('org.apache.arrow.vector.VectorSchemaRoot')
jvm_vsr = jvm_vsr(jvm_fields, jvm_vectors, len(py_data))
py_record_batch = pa.RecordBatch.from_arrays(
[pa.array(py_data, type=pa_type)],
['col']
)
jvm_record_batch = pa_jvm.record_batch(jvm_vsr)
assert py_record_batch.equals(jvm_record_batch)
def _string_to_varchar_holder(ra, string):
nvch_cls = "org.apache.arrow.vector.holders.NullableVarCharHolder"
holder = jpype.JClass(nvch_cls)()
if string is None:
holder.isSet = 0
else:
holder.isSet = 1
value = jpype.JClass("java.lang.String")("string")
std_charsets = jpype.JClass("java.nio.charset.StandardCharsets")
bytes_ = value.getBytes(std_charsets.UTF_8)
holder.buffer = ra.buffer(len(bytes_))
holder.buffer.setBytes(0, bytes_, 0, len(bytes_))
holder.start = 0
holder.end = len(bytes_)
return holder
# TODO(ARROW-2607)
@pytest.mark.xfail(reason="from_buffers is only supported for "
"primitive arrays yet")
def test_jvm_string_array(root_allocator):
data = [u"string", None, u"töst"]
cls = "org.apache.arrow.vector.VarCharVector"
jvm_vector = jpype.JClass(cls)("vector", root_allocator)
jvm_vector.allocateNew()
for i, string in enumerate(data):
holder = _string_to_varchar_holder(root_allocator, "string")
jvm_vector.setSafe(i, holder)
jvm_vector.setValueCount(i + 1)
py_array = pa.array(data, type=pa.string())
jvm_array = pa_jvm.array(jvm_vector)
assert py_array.equals(jvm_array)
| apache-2.0 | -1,467,900,548,875,081,000 | -2,842,929,155,579,767,000 | 34.68299 | 79 | 0.5974 | false |
pycroscopy/pycroscopy | tests/io/test_hdf_writer.py | 1 | 36224 | # -*- coding: utf-8 -*-
"""
Created on Tue Nov 3 15:07:16 2017
@author: Suhas Somnath
"""
from __future__ import division, print_function, unicode_literals, absolute_import
import unittest
import os
import h5py
import numpy as np
import sys
sys.path.append("../../../pycroscopy/")
from pycroscopy.io.virtual_data import VirtualGroup, VirtualDataset
from pycroscopy.io.hdf_writer import HDFwriter
from pyUSID.io.hdf_utils import get_attr, get_h5_obj_refs # Until an elegant solution presents itself
class TestHDFWriter(unittest.TestCase):
@staticmethod
def __delete_existing_file(file_path):
if os.path.exists(file_path):
os.remove(file_path)
def test_init_invalid_input(self):
with self.assertRaises(TypeError):
_ = HDFwriter(4)
def test_init_path_non_existant_file_01(self):
file_path = 'test.h5'
self.__delete_existing_file(file_path)
writer = HDFwriter(file_path)
self.assertIsInstance(writer, HDFwriter, "writer should be an HDFwriter")
writer.close()
os.remove(file_path)
def test_init_path_existing_file_01(self):
file_path = 'test.h5'
self.__delete_existing_file(file_path)
h5_f = h5py.File(file_path)
h5_f.close()
# Existing h5 file
writer = HDFwriter(file_path)
self.assertIsInstance(writer, HDFwriter, "writer should be an HDFwriter")
writer.close()
os.remove(file_path)
def test_init_h5_handle_r_01(self):
file_path = 'test.h5'
self.__delete_existing_file(file_path)
h5_f = h5py.File(file_path)
h5_f.close()
h5_f = h5py.File(file_path, mode='r')
# hdf handle but of mode r
with self.assertRaises(TypeError):
_ = HDFwriter(h5_f)
os.remove(file_path)
def test_init_h5_handle_r_plus_01(self):
file_path = 'test.h5'
self.__delete_existing_file(file_path)
h5_f = h5py.File(file_path)
h5_f.close()
h5_f = h5py.File(file_path, mode='r+')
# open h5 file handle or mode r+
writer = HDFwriter(h5_f)
self.assertIsInstance(writer, HDFwriter, "writer should be an HDFwriter")
writer.close()
os.remove(file_path)
def test_init_h5_handle_w_01(self):
file_path = 'test.h5'
self.__delete_existing_file(file_path)
h5_f = h5py.File(file_path)
h5_f.close()
h5_f = h5py.File(file_path, mode='w')
# open h5 file handle or mode w
writer = HDFwriter(h5_f)
self.assertIsInstance(writer, HDFwriter, "writer should be an HDFwriter")
writer.close()
os.remove(file_path)
def test_init_h5_handle_closed(self):
file_path = 'test.h5'
self.__delete_existing_file(file_path)
h5_f = h5py.File(file_path)
h5_f.close()
# Existing h5 file but closed
with self.assertRaises(ValueError):
_ = HDFwriter(h5_f)
os.remove(file_path)
def test_simple_dset_write_success_01(self):
file_path = 'test.h5'
self.__delete_existing_file(file_path)
with h5py.File(file_path) as h5_f:
dtype = np.uint16
dset_name = 'test'
data = np.random.randint(0, high=15, size=5, dtype=dtype)
microdset = VirtualDataset(dset_name, data)
writer = HDFwriter(h5_f)
h5_d = writer._create_simple_dset(h5_f, microdset)
self.assertIsInstance(h5_d, h5py.Dataset)
self.assertEqual(h5_d.parent, h5_f)
self.assertEqual(h5_d.name, '/' + dset_name)
self.assertEqual(h5_d.shape, data.shape)
self.assertTrue(np.allclose(h5_d[()], data))
self.assertEqual(h5_d.dtype, dtype)
os.remove(file_path)
def test_simple_dset_write_success_more_options_02(self):
file_path = 'test.h5'
self.__delete_existing_file(file_path)
with h5py.File(file_path) as h5_f:
dset_name = 'test'
data = np.random.rand(16, 1024)
dtype = data.dtype
compression = 'gzip'
chunking=(1, 1024)
microdset = VirtualDataset(dset_name, data, dtype=dtype, compression=compression, chunking=chunking)
writer = HDFwriter(h5_f)
h5_d = writer._create_simple_dset(h5_f, microdset)
self.assertIsInstance(h5_d, h5py.Dataset)
self.assertEqual(h5_d.parent, h5_f)
self.assertEqual(h5_d.name, '/' + dset_name)
self.assertEqual(h5_d.shape, data.shape)
self.assertTrue(np.allclose(h5_d[()], data))
self.assertEqual(h5_d.dtype, dtype)
self.assertEqual(h5_d.compression, compression)
self.assertEqual(h5_d.chunks, chunking)
os.remove(file_path)
def test_simple_dset_write_success_more_options_03(self):
file_path = 'test.h5'
self.__delete_existing_file(file_path)
with h5py.File(file_path) as h5_f:
dset_name = 'test'
data = np.random.rand(16, 1024)
dtype = np.float16
compression = 'gzip'
chunking=(1, 1024)
microdset = VirtualDataset(dset_name, data, dtype=dtype, compression=compression, chunking=chunking)
writer = HDFwriter(h5_f)
h5_d = writer._create_simple_dset(h5_f, microdset)
self.assertIsInstance(h5_d, h5py.Dataset)
self.assertEqual(h5_d.parent, h5_f)
self.assertEqual(h5_d.name, '/' + dset_name)
self.assertEqual(h5_d.shape, data.shape)
self.assertEqual(h5_d.dtype, dtype)
self.assertEqual(h5_d.compression, compression)
self.assertEqual(h5_d.chunks, chunking)
self.assertTrue(np.all(h5_d[()] - data < 1E-3))
os.remove(file_path)
def test_empty_dset_write_success_01(self):
file_path = 'test.h5'
self.__delete_existing_file(file_path)
with h5py.File(file_path) as h5_f:
dset_name = 'test'
maxshape = (16, 1024)
microdset = VirtualDataset(dset_name, None, maxshape=maxshape)
writer = HDFwriter(h5_f)
h5_d = writer._create_empty_dset(h5_f, microdset)
self.assertIsInstance(h5_d, h5py.Dataset)
self.assertEqual(h5_d.parent, h5_f)
self.assertEqual(h5_d.name, '/' + dset_name)
self.assertEqual(h5_d.shape, maxshape)
self.assertEqual(h5_d.maxshape, maxshape)
# dtype is assigned automatically by h5py. Not to be tested here
os.remove(file_path)
def test_empty_dset_write_success_w_options_02(self):
file_path = 'test.h5'
self.__delete_existing_file(file_path)
with h5py.File(file_path) as h5_f:
dset_name = 'test'
maxshape = (16, 1024)
chunking = (1, 1024)
compression = 'gzip'
dtype = np.float16
microdset = VirtualDataset(dset_name, None, maxshape=maxshape,
dtype=dtype, compression=compression, chunking=chunking)
writer = HDFwriter(h5_f)
h5_d = writer._create_empty_dset(h5_f, microdset)
self.assertIsInstance(h5_d, h5py.Dataset)
self.assertEqual(h5_d.parent, h5_f)
self.assertEqual(h5_d.name, '/' + dset_name)
self.assertEqual(h5_d.dtype, dtype)
self.assertEqual(h5_d.compression, compression)
self.assertEqual(h5_d.chunks, chunking)
self.assertEqual(h5_d.shape, maxshape)
self.assertEqual(h5_d.maxshape, maxshape)
os.remove(file_path)
def test_expandable_dset_write_success_01(self):
file_path = 'test.h5'
self.__delete_existing_file(file_path)
with h5py.File(file_path) as h5_f:
dset_name = 'test'
maxshape = (None, 1024)
data = np.random.rand(1, 1024)
microdset = VirtualDataset(dset_name, data, maxshape=maxshape)
writer = HDFwriter(h5_f)
h5_d = writer._create_resizeable_dset(h5_f, microdset)
self.assertIsInstance(h5_d, h5py.Dataset)
self.assertEqual(h5_d.parent, h5_f)
self.assertEqual(h5_d.name, '/' + dset_name)
self.assertEqual(h5_d.shape, data.shape)
self.assertEqual(h5_d.maxshape, maxshape)
self.assertTrue(np.allclose(h5_d[()], data))
# Now test to make sure that the dataset can be expanded:
# TODO: add this to the example!
expansion_axis = 0
h5_d.resize(h5_d.shape[expansion_axis] + 1, axis=expansion_axis)
self.assertEqual(h5_d.shape, (data.shape[0]+1, data.shape[1]))
self.assertEqual(h5_d.maxshape, maxshape)
# Finally try checking to see if this new data is also present in the file
new_data = np.random.rand(1024)
h5_d[1] = new_data
data = np.vstack((np.squeeze(data), new_data))
self.assertTrue(np.allclose(h5_d[()], data))
os.remove(file_path)
# TODO: will have to check to see if the parent is correctly declared for the group
def test_group_create_non_indexed_simple_01(self):
file_path = 'test.h5'
self.__delete_existing_file(file_path)
with h5py.File(file_path) as h5_f:
grp_name = 'test'
micro_group = VirtualGroup(grp_name)
writer = HDFwriter(h5_f)
h5_grp = writer._create_group(h5_f, micro_group)
self.assertIsInstance(h5_grp, h5py.Group)
self.assertEqual(h5_grp.parent, h5_f)
self.assertEqual(h5_grp.name, '/' + grp_name)
# self.assertEqual(len(h5_grp.items), 0)
os.remove(file_path)
def test_group_create_indexed_simple_01(self):
file_path = 'test.h5'
self.__delete_existing_file(file_path)
with h5py.File(file_path) as h5_f:
grp_name = 'test_'
micro_group = VirtualGroup(grp_name)
writer = HDFwriter(h5_f)
h5_grp = writer._create_group(h5_f, micro_group)
self.assertIsInstance(h5_grp, h5py.Group)
self.assertEqual(h5_grp.parent, h5_f)
self.assertEqual(h5_grp.name, '/' + grp_name + '000')
# self.assertEqual(len(h5_grp.items), 0)
os.remove(file_path)
def test_group_create_root_01(self):
file_path = 'test.h5'
self.__delete_existing_file(file_path)
with h5py.File(file_path) as h5_f:
grp_name = ''
micro_group = VirtualGroup(grp_name)
writer = HDFwriter(h5_f)
with self.assertRaises(ValueError):
_ = writer._create_group(h5_f, micro_group)
os.remove(file_path)
def test_group_create_indexed_nested_01(self):
file_path = 'test.h5'
self.__delete_existing_file(file_path)
with h5py.File(file_path) as h5_f:
outer_grp_name = 'outer_'
micro_group = VirtualGroup(outer_grp_name)
writer = HDFwriter(h5_f)
h5_outer_grp = writer._create_group(h5_f, micro_group)
self.assertIsInstance(h5_outer_grp, h5py.Group)
self.assertEqual(h5_outer_grp.parent, h5_f)
self.assertEqual(h5_outer_grp.name, '/' + outer_grp_name + '000')
inner_grp_name = 'inner_'
micro_group = VirtualGroup(inner_grp_name)
h5_inner_grp = writer._create_group(h5_outer_grp, micro_group)
self.assertIsInstance(h5_inner_grp, h5py.Group)
self.assertEqual(h5_inner_grp.parent, h5_outer_grp)
self.assertEqual(h5_inner_grp.name, h5_outer_grp.name + '/' + inner_grp_name + '000')
os.remove(file_path)
def test_write_legal_reg_ref_multi_dim_data(self):
file_path = 'test.h5'
self.__delete_existing_file(file_path)
with h5py.File(file_path) as h5_f:
writer = HDFwriter(h5_f)
data = np.random.rand(5, 7)
h5_dset = writer._create_simple_dset(h5_f, VirtualDataset('test', data))
self.assertIsInstance(h5_dset, h5py.Dataset)
attrs = {'labels': {'even_rows': (slice(0, None, 2), slice(None)),
'odd_rows': (slice(1, None, 2), slice(None))}}
writer._write_dset_attributes(h5_dset, attrs.copy())
h5_f.flush()
# two atts point to region references. one for labels
self.assertEqual(len(h5_dset.attrs), 1 + len(attrs['labels']))
# check if the labels attribute was written:
self.assertTrue(np.all([x in list(attrs['labels'].keys()) for x in get_attr(h5_dset, 'labels')]))
expected_data = [data[:None:2], data[1:None:2]]
written_data = [h5_dset[h5_dset.attrs['even_rows']], h5_dset[h5_dset.attrs['odd_rows']]]
for exp, act in zip(expected_data, written_data):
self.assertTrue(np.allclose(exp, act))
os.remove(file_path)
def test_write_legal_reg_ref_multi_dim_data_2nd_dim(self):
file_path = 'test.h5'
self.__delete_existing_file(file_path)
with h5py.File(file_path) as h5_f:
writer = HDFwriter(h5_f)
data = np.random.rand(5, 3)
h5_dset = writer._create_simple_dset(h5_f, VirtualDataset('test', data))
self.assertIsInstance(h5_dset, h5py.Dataset)
attrs = {'labels': {'even_rows': (slice(None), slice(0, None, 2)),
'odd_rows': (slice(None), slice(1, None, 2))}}
writer._write_dset_attributes(h5_dset, attrs.copy())
h5_f.flush()
# two atts point to region references. one for labels
self.assertEqual(len(h5_dset.attrs), 1 + len(attrs['labels']))
# check if the labels attribute was written:
self.assertTrue(np.all([x in list(attrs['labels'].keys()) for x in get_attr(h5_dset, 'labels')]))
expected_data = [data[:, 0:None:2], data[:, 1:None:2]]
written_data = [h5_dset[h5_dset.attrs['even_rows']], h5_dset[h5_dset.attrs['odd_rows']]]
for exp, act in zip(expected_data, written_data):
self.assertTrue(np.allclose(exp, act))
os.remove(file_path)
def test_write_legal_reg_ref_one_dim_data(self):
file_path = 'test.h5'
self.__delete_existing_file(file_path)
with h5py.File(file_path) as h5_f:
writer = HDFwriter(h5_f)
data = np.random.rand(7)
h5_dset = writer._create_simple_dset(h5_f, VirtualDataset('test', data))
self.assertIsInstance(h5_dset, h5py.Dataset)
attrs = {'labels': {'even_rows': (slice(0, None, 2)),
'odd_rows': (slice(1, None, 2))}}
writer._write_dset_attributes(h5_dset, attrs.copy())
h5_f.flush()
# two atts point to region references. one for labels
self.assertEqual(len(h5_dset.attrs), 1 + len(attrs['labels']))
# check if the labels attribute was written:
self.assertTrue(np.all([x in list(attrs['labels'].keys()) for x in get_attr(h5_dset, 'labels')]))
expected_data = [data[:None:2], data[1:None:2]]
written_data = [h5_dset[h5_dset.attrs['even_rows']], h5_dset[h5_dset.attrs['odd_rows']]]
for exp, act in zip(expected_data, written_data):
self.assertTrue(np.allclose(exp, act))
os.remove(file_path)
def test_generate_and_write_reg_ref_legal(self):
file_path = 'test.h5'
self.__delete_existing_file(file_path)
with h5py.File(file_path) as h5_f:
writer = HDFwriter(h5_f)
data = np.random.rand(2, 7)
h5_dset = writer._create_simple_dset(h5_f, VirtualDataset('test', data))
self.assertIsInstance(h5_dset, h5py.Dataset)
attrs = {'labels': ['row_1', 'row_2']}
if sys.version_info.major == 3:
with self.assertWarns(UserWarning):
writer._write_dset_attributes(h5_dset, attrs.copy())
else:
writer._write_dset_attributes(h5_dset, attrs.copy())
h5_f.flush()
# two atts point to region references. one for labels
self.assertEqual(len(h5_dset.attrs), 1 + len(attrs['labels']))
# check if the labels attribute was written:
self.assertTrue(np.all([x in list(attrs['labels']) for x in get_attr(h5_dset, 'labels')]))
expected_data = [data[0], data[1]]
written_data = [h5_dset[h5_dset.attrs['row_1']], h5_dset[h5_dset.attrs['row_2']]]
for exp, act in zip(expected_data, written_data):
self.assertTrue(np.allclose(np.squeeze(exp), np.squeeze(act)))
os.remove(file_path)
def test_generate_and_write_reg_ref_illegal(self):
file_path = 'test.h5'
self.__delete_existing_file(file_path)
with h5py.File(file_path) as h5_f:
writer = HDFwriter(h5_f)
data = np.random.rand(3, 7)
h5_dset = writer._create_simple_dset(h5_f, VirtualDataset('test', data))
self.assertIsInstance(h5_dset, h5py.Dataset)
# with self.assertWarns(UserWarning):
writer._write_dset_attributes(h5_dset, {'labels': ['row_1', 'row_2']})
self.assertEqual(len(h5_dset.attrs), 0)
h5_f.flush()
os.remove(file_path)
def test_generate_and_write_reg_ref_illegal(self):
file_path = 'test.h5'
self.__delete_existing_file(file_path)
with h5py.File(file_path) as h5_f:
writer = HDFwriter(h5_f)
data = np.random.rand(2, 7)
h5_dset = writer._create_simple_dset(h5_f, VirtualDataset('test', data))
self.assertIsInstance(h5_dset, h5py.Dataset)
# with self.assertWarns(UserWarning):
with self.assertRaises(TypeError):
writer._write_dset_attributes(h5_dset, {'labels': [1, np.arange(3)]})
os.remove(file_path)
def test_write_illegal_reg_ref_too_many_slices(self):
file_path = 'test.h5'
self.__delete_existing_file(file_path)
with h5py.File(file_path) as h5_f:
writer = HDFwriter(h5_f)
data = np.random.rand(5, 7)
h5_dset = writer._create_simple_dset(h5_f, VirtualDataset('test', data))
self.assertIsInstance(h5_dset, h5py.Dataset)
attrs = {'labels': {'even_rows': (slice(0, None, 2), slice(None), slice(None)),
'odd_rows': (slice(1, None, 2), slice(None), slice(None))}}
with self.assertRaises(ValueError):
writer._write_dset_attributes(h5_dset, attrs.copy())
os.remove(file_path)
def test_write_illegal_reg_ref_too_few_slices(self):
file_path = 'test.h5'
self.__delete_existing_file(file_path)
with h5py.File(file_path) as h5_f:
writer = HDFwriter(h5_f)
data = np.random.rand(5, 7)
h5_dset = writer._create_simple_dset(h5_f, VirtualDataset('test', data))
self.assertIsInstance(h5_dset, h5py.Dataset)
attrs = {'labels': {'even_rows': (slice(0, None, 2)),
'odd_rows': (slice(1, None, 2))}}
with self.assertRaises(ValueError):
writer._write_dset_attributes(h5_dset, attrs.copy())
os.remove(file_path)
def test_write_reg_ref_slice_dim_larger_than_data(self):
file_path = 'test.h5'
self.__delete_existing_file(file_path)
with h5py.File(file_path) as h5_f:
writer = HDFwriter(h5_f)
data = np.random.rand(5, 7)
h5_dset = writer._create_simple_dset(h5_f, VirtualDataset('test', data))
self.assertIsInstance(h5_dset, h5py.Dataset)
attrs = {'labels': {'even_rows': (slice(0, 15, 2), slice(None)),
'odd_rows': (slice(1, 15, 2), slice(None))}}
writer._write_dset_attributes(h5_dset, attrs.copy())
h5_f.flush()
# two atts point to region references. one for labels
self.assertEqual(len(h5_dset.attrs), 1 + len(attrs['labels']))
# check if the labels attribute was written:
self.assertTrue(np.all([x in list(attrs['labels'].keys()) for x in get_attr(h5_dset, 'labels')]))
expected_data = [data[:None:2], data[1:None:2]]
written_data = [h5_dset[h5_dset.attrs['even_rows']], h5_dset[h5_dset.attrs['odd_rows']]]
for exp, act in zip(expected_data, written_data):
self.assertTrue(np.allclose(exp, act))
os.remove(file_path)
def test_write_illegal_reg_ref_not_slice_objs(self):
file_path = 'test.h5'
self.__delete_existing_file(file_path)
with h5py.File(file_path) as h5_f:
writer = HDFwriter(h5_f)
data = np.random.rand(5, 7)
h5_dset = writer._create_simple_dset(h5_f, VirtualDataset('test', data))
self.assertIsInstance(h5_dset, h5py.Dataset)
attrs = {'labels': {'even_rows': (slice(0, None, 2), 15),
'odd_rows': (slice(1, None, 2), 'hello')}}
with self.assertRaises(TypeError):
writer._write_dset_attributes(h5_dset, attrs.copy())
os.remove(file_path)
def test_write_simple_atts_reg_ref_to_dset(self):
file_path = 'test.h5'
self.__delete_existing_file(file_path)
with h5py.File(file_path) as h5_f:
writer = HDFwriter(h5_f)
data = np.random.rand(5, 7)
h5_dset = writer._create_simple_dset(h5_f, VirtualDataset('test', data))
self.assertIsInstance(h5_dset, h5py.Dataset)
attrs = {'att_1': 'string_val',
'att_2': 1.2345,
'att_3': [1, 2, 3, 4],
'att_4': ['str_1', 'str_2', 'str_3'],
'labels': {'even_rows': (slice(0, None, 2), slice(None)),
'odd_rows': (slice(1, None, 2), slice(None))}
}
writer._write_dset_attributes(h5_dset, attrs.copy())
reg_ref = attrs.pop('labels')
self.assertEqual(len(h5_dset.attrs), len(attrs) + 1 + len(reg_ref))
for key, expected_val in attrs.items():
self.assertTrue(np.all(get_attr(h5_dset, key) == expected_val))
self.assertTrue(np.all([x in list(reg_ref.keys()) for x in get_attr(h5_dset, 'labels')]))
expected_data = [data[:None:2], data[1:None:2]]
written_data = [h5_dset[h5_dset.attrs['even_rows']], h5_dset[h5_dset.attrs['odd_rows']]]
for exp, act in zip(expected_data, written_data):
self.assertTrue(np.allclose(exp, act))
os.remove(file_path)
def test_write_invalid_input(self):
file_path = 'test.h5'
self.__delete_existing_file(file_path)
with h5py.File(file_path) as h5_f:
writer = HDFwriter(h5_f)
with self.assertRaises(TypeError):
_ = writer.write(np.arange(5))
def test_write_dset_under_root(self):
file_path = 'test.h5'
self.__delete_existing_file(file_path)
with h5py.File(file_path) as h5_f:
writer = HDFwriter(h5_f)
data = np.random.rand(5, 7)
attrs = {'att_1': 'string_val',
'att_2': 1.2345,
'att_3': [1, 2, 3, 4],
'att_4': ['str_1', 'str_2', 'str_3'],
'labels': {'even_rows': (slice(0, None, 2), slice(None)),
'odd_rows': (slice(1, None, 2), slice(None))}
}
micro_dset = VirtualDataset('test', data)
micro_dset.attrs = attrs.copy()
[h5_dset] = writer.write(micro_dset)
self.assertIsInstance(h5_dset, h5py.Dataset)
reg_ref = attrs.pop('labels')
self.assertEqual(len(h5_dset.attrs), len(attrs) + 1 + len(reg_ref))
for key, expected_val in attrs.items():
self.assertTrue(np.all(get_attr(h5_dset, key) == expected_val))
self.assertTrue(np.all([x in list(reg_ref.keys()) for x in get_attr(h5_dset, 'labels')]))
expected_data = [data[:None:2], data[1:None:2]]
written_data = [h5_dset[h5_dset.attrs['even_rows']], h5_dset[h5_dset.attrs['odd_rows']]]
for exp, act in zip(expected_data, written_data):
self.assertTrue(np.allclose(exp, act))
os.remove(file_path)
def test_write_dset_under_existing_group(self):
file_path = 'test.h5'
self.__delete_existing_file(file_path)
with h5py.File(file_path) as h5_f:
writer = HDFwriter(h5_f)
h5_g = writer._create_group(h5_f, VirtualGroup('test_group'))
self.assertIsInstance(h5_g, h5py.Group)
data = np.random.rand(5, 7)
attrs = {'att_1': 'string_val',
'att_2': 1.2345,
'att_3': [1, 2, 3, 4],
'att_4': ['str_1', 'str_2', 'str_3'],
'labels': {'even_rows': (slice(0, None, 2), slice(None)),
'odd_rows': (slice(1, None, 2), slice(None))}
}
micro_dset = VirtualDataset('test', data, parent='/test_group')
micro_dset.attrs = attrs.copy()
[h5_dset] = writer.write(micro_dset)
self.assertIsInstance(h5_dset, h5py.Dataset)
self.assertEqual(h5_dset.parent, h5_g)
reg_ref = attrs.pop('labels')
self.assertEqual(len(h5_dset.attrs), len(attrs) + 1 + len(reg_ref))
for key, expected_val in attrs.items():
self.assertTrue(np.all(get_attr(h5_dset, key) == expected_val))
self.assertTrue(np.all([x in list(reg_ref.keys()) for x in get_attr(h5_dset, 'labels')]))
expected_data = [data[:None:2], data[1:None:2]]
written_data = [h5_dset[h5_dset.attrs['even_rows']], h5_dset[h5_dset.attrs['odd_rows']]]
for exp, act in zip(expected_data, written_data):
self.assertTrue(np.allclose(exp, act))
os.remove(file_path)
def test_write_dset_under_invalid_group(self):
file_path = 'test.h5'
self.__delete_existing_file(file_path)
with h5py.File(file_path) as h5_f:
writer = HDFwriter(h5_f)
with self.assertRaises(KeyError):
_ = writer.write(VirtualDataset('test', np.random.rand(5, 7), parent='/does_not_exist'))
os.remove(file_path)
def test_write_root(self):
file_path = 'test.h5'
self.__delete_existing_file(file_path)
with h5py.File(file_path) as h5_f:
attrs = {'att_1': 'string_val',
'att_2': 1.2345,
'att_3': [1, 2, 3, 4],
'att_4': ['str_1', 'str_2', 'str_3']}
micro_group = VirtualGroup('')
micro_group.attrs = attrs
writer = HDFwriter(h5_f)
[ret_val] = writer.write(micro_group)
self.assertIsInstance(ret_val, h5py.File)
self.assertEqual(h5_f, ret_val)
for key, expected_val in attrs.items():
self.assertTrue(np.all(get_attr(h5_f, key) == expected_val))
os.remove(file_path)
def test_write_single_group(self):
file_path = 'test.h5'
self.__delete_existing_file(file_path)
with h5py.File(file_path) as h5_f:
attrs = {'att_1': 'string_val',
'att_2': 1.2345,
'att_3': [1, 2, 3, 4],
'att_4': ['str_1', 'str_2', 'str_3']}
micro_group = VirtualGroup('Test_')
micro_group.attrs = attrs
writer = HDFwriter(h5_f)
[h5_group] = writer.write(micro_group)
for key, expected_val in attrs.items():
self.assertTrue(np.all(get_attr(h5_group, key) == expected_val))
os.remove(file_path)
def test_group_indexing_sequential(self):
file_path = 'test.h5'
self.__delete_existing_file(file_path)
with h5py.File(file_path) as h5_f:
writer = HDFwriter(h5_f)
micro_group_0 = VirtualGroup('Test_', attrs={'att_1': 'string_val', 'att_2': 1.2345})
[h5_group_0] = writer.write(micro_group_0)
_ = writer.write(VirtualGroup('blah'))
self.assertIsInstance(h5_group_0, h5py.Group)
self.assertEqual(h5_group_0.name, '/Test_000')
for key, expected_val in micro_group_0.attrs.items():
self.assertTrue(np.all(get_attr(h5_group_0, key) == expected_val))
micro_group_1 = VirtualGroup('Test_', attrs={'att_3': [1, 2, 3, 4], 'att_4': ['str_1', 'str_2', 'str_3']})
[h5_group_1] = writer.write(micro_group_1)
self.assertIsInstance(h5_group_1, h5py.Group)
self.assertEqual(h5_group_1.name, '/Test_001')
for key, expected_val in micro_group_1.attrs.items():
self.assertTrue(np.all(get_attr(h5_group_1, key) == expected_val))
os.remove(file_path)
def test_group_indexing_simultaneous(self):
file_path = 'test.h5'
self.__delete_existing_file(file_path)
with h5py.File(file_path) as h5_f:
micro_group_0 = VirtualGroup('Test_', attrs = {'att_1': 'string_val', 'att_2': 1.2345})
micro_group_1 = VirtualGroup('Test_', attrs={'att_3': [1, 2, 3, 4], 'att_4': ['str_1', 'str_2', 'str_3']})
root_group = VirtualGroup('', children=[VirtualGroup('blah'), micro_group_0,
VirtualGroup('meh'), micro_group_1])
writer = HDFwriter(h5_f)
h5_refs_list = writer.write(root_group)
[h5_group_1] = get_h5_obj_refs(['Test_001'], h5_refs_list)
[h5_group_0] = get_h5_obj_refs(['Test_000'], h5_refs_list)
self.assertIsInstance(h5_group_0, h5py.Group)
self.assertEqual(h5_group_0.name, '/Test_000')
for key, expected_val in micro_group_0.attrs.items():
self.assertTrue(np.all(get_attr(h5_group_0, key) == expected_val))
self.assertIsInstance(h5_group_1, h5py.Group)
self.assertEqual(h5_group_1.name, '/Test_001')
for key, expected_val in micro_group_1.attrs.items():
self.assertTrue(np.all(get_attr(h5_group_1, key) == expected_val))
os.remove(file_path)
def test_write_simple_tree(self):
file_path = 'test.h5'
self.__delete_existing_file(file_path)
with h5py.File(file_path) as h5_f:
inner_dset_data = np.random.rand(5, 7)
inner_dset_attrs = {'att_1': 'string_val',
'att_2': 1.2345,
'att_3': [1, 2, 3, 4],
'att_4': ['str_1', 'str_2', 'str_3'],
'labels': {'even_rows': (slice(0, None, 2), slice(None)),
'odd_rows': (slice(1, None, 2), slice(None))}
}
inner_dset = VirtualDataset('inner_dset', inner_dset_data)
inner_dset.attrs = inner_dset_attrs.copy()
attrs_inner_grp = {'att_1': 'string_val',
'att_2': 1.2345,
'att_3': [1, 2, 3, 4],
'att_4': ['str_1', 'str_2', 'str_3']}
inner_group = VirtualGroup('indexed_inner_group_')
inner_group.attrs = attrs_inner_grp
inner_group.add_children(inner_dset)
outer_dset_data = np.random.rand(5, 7)
outer_dset_attrs = {'att_1': 'string_val',
'att_2': 1.2345,
'att_3': [1, 2, 3, 4],
'att_4': ['str_1', 'str_2', 'str_3'],
'labels': {'even_rows': (slice(0, None, 2), slice(None)),
'odd_rows': (slice(1, None, 2), slice(None))}
}
outer_dset = VirtualDataset('test', outer_dset_data, parent='/test_group')
outer_dset.attrs = outer_dset_attrs.copy()
attrs_outer_grp = {'att_1': 'string_val',
'att_2': 1.2345,
'att_3': [1, 2, 3, 4],
'att_4': ['str_1', 'str_2', 'str_3']}
outer_group = VirtualGroup('unindexed_outer_group')
outer_group.attrs = attrs_outer_grp
outer_group.add_children([inner_group, outer_dset])
writer = HDFwriter(h5_f)
h5_refs_list = writer.write(outer_group)
# I don't know of a more elegant way to do this:
[h5_outer_dset] = get_h5_obj_refs([outer_dset.name], h5_refs_list)
[h5_inner_dset] = get_h5_obj_refs([inner_dset.name], h5_refs_list)
[h5_outer_group] = get_h5_obj_refs([outer_group.name], h5_refs_list)
[h5_inner_group] = get_h5_obj_refs(['indexed_inner_group_000'], h5_refs_list)
self.assertIsInstance(h5_outer_dset, h5py.Dataset)
self.assertIsInstance(h5_inner_dset, h5py.Dataset)
self.assertIsInstance(h5_outer_group, h5py.Group)
self.assertIsInstance(h5_inner_group, h5py.Group)
# check assertions for the inner dataset first
self.assertEqual(h5_inner_dset.parent, h5_inner_group)
reg_ref = inner_dset_attrs.pop('labels')
self.assertEqual(len(h5_inner_dset.attrs), len(inner_dset_attrs) + 1 + len(reg_ref))
for key, expected_val in inner_dset_attrs.items():
self.assertTrue(np.all(get_attr(h5_inner_dset, key) == expected_val))
self.assertTrue(np.all([x in list(reg_ref.keys()) for x in get_attr(h5_inner_dset, 'labels')]))
expected_data = [inner_dset_data[:None:2], inner_dset_data[1:None:2]]
written_data = [h5_inner_dset[h5_inner_dset.attrs['even_rows']], h5_inner_dset[h5_inner_dset.attrs['odd_rows']]]
for exp, act in zip(expected_data, written_data):
self.assertTrue(np.allclose(exp, act))
# check assertions for the inner data group next:
self.assertEqual(h5_inner_group.parent, h5_outer_group)
for key, expected_val in attrs_inner_grp.items():
self.assertTrue(np.all(get_attr(h5_inner_group, key) == expected_val))
# check the outer dataset next:
self.assertEqual(h5_outer_dset.parent, h5_outer_group)
reg_ref = outer_dset_attrs.pop('labels')
self.assertEqual(len(h5_outer_dset.attrs), len(outer_dset_attrs) + 1 + len(reg_ref))
for key, expected_val in outer_dset_attrs.items():
self.assertTrue(np.all(get_attr(h5_outer_dset, key) == expected_val))
self.assertTrue(np.all([x in list(reg_ref.keys()) for x in get_attr(h5_outer_dset, 'labels')]))
expected_data = [outer_dset_data[:None:2], outer_dset_data[1:None:2]]
written_data = [h5_outer_dset[h5_outer_dset.attrs['even_rows']],
h5_outer_dset[h5_outer_dset.attrs['odd_rows']]]
for exp, act in zip(expected_data, written_data):
self.assertTrue(np.allclose(exp, act))
# Finally check the outer group:
self.assertEqual(h5_outer_group.parent, h5_f)
for key, expected_val in attrs_outer_grp.items():
self.assertTrue(np.all(get_attr(h5_outer_group, key) == expected_val))
os.remove(file_path)
if __name__ == '__main__':
unittest.main()
| mit | -6,319,937,040,119,715,000 | 3,721,989,523,288,231,400 | 39.026519 | 124 | 0.552203 | false |
gmalmquist/pants | src/python/pants/backend/python/interpreter_cache.py | 5 | 8302 | # coding=utf-8
# Copyright 2014 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import (absolute_import, division, generators, nested_scopes, print_function,
unicode_literals, with_statement)
import os
import shutil
from pex.interpreter import PythonIdentity, PythonInterpreter
from pex.package import EggPackage, Package, SourcePackage
from pex.resolver import resolve
from pants.util.dirutil import safe_concurrent_creation, safe_mkdir
# TODO(wickman) Create a safer version of this and add to twitter.common.dirutil
def _safe_link(src, dst):
try:
os.unlink(dst)
except OSError:
pass
os.symlink(src, dst)
class PythonInterpreterCache(object):
@staticmethod
def _matches(interpreter, filters):
return any(interpreter.identity.matches(filt) for filt in filters)
@classmethod
def _matching(cls, interpreters, filters):
for interpreter in interpreters:
if cls._matches(interpreter, filters):
yield interpreter
@classmethod
def select_interpreter(cls, compatibilities, allow_multiple=False):
"""Given a set of interpreters, either return them all if ``allow_multiple`` is ``True``;
otherwise, return the lowest compatible interpreter.
"""
if allow_multiple:
return compatibilities
return [min(compatibilities)] if compatibilities else []
def __init__(self, python_setup, python_repos, logger=None):
self._python_setup = python_setup
self._python_repos = python_repos
self._cache_dir = python_setup.interpreter_cache_dir
safe_mkdir(self._cache_dir)
self._interpreters = set()
self._logger = logger or (lambda msg: True)
self._default_filters = (python_setup.interpreter_requirement or b'',)
@property
def interpreters(self):
"""Returns the set of cached interpreters."""
return self._interpreters
def _interpreter_from_path(self, path, filters):
interpreter_dir = os.path.basename(path)
identity = PythonIdentity.from_path(interpreter_dir)
try:
executable = os.readlink(os.path.join(path, 'python'))
except OSError:
return None
interpreter = PythonInterpreter(executable, identity)
if self._matches(interpreter, filters):
return self._resolve(interpreter)
return None
def _setup_interpreter(self, interpreter, cache_target_path):
with safe_concurrent_creation(cache_target_path) as safe_path:
os.mkdir(safe_path) # Parent will already have been created by safe_concurrent_creation.
os.symlink(interpreter.binary, os.path.join(safe_path, 'python'))
return self._resolve(interpreter, safe_path)
def _setup_cached(self, filters):
"""Find all currently-cached interpreters."""
for interpreter_dir in os.listdir(self._cache_dir):
path = os.path.join(self._cache_dir, interpreter_dir)
pi = self._interpreter_from_path(path, filters)
if pi:
self._logger('Detected interpreter {}: {}'.format(pi.binary, str(pi.identity)))
self._interpreters.add(pi)
def _setup_paths(self, paths, filters):
"""Find interpreters under paths, and cache them."""
for interpreter in self._matching(PythonInterpreter.all(paths), filters):
identity_str = str(interpreter.identity)
cache_path = os.path.join(self._cache_dir, identity_str)
pi = self._interpreter_from_path(cache_path, filters)
if pi is None:
self._setup_interpreter(interpreter, cache_path)
pi = self._interpreter_from_path(cache_path, filters)
if pi is None:
continue
self._interpreters.add(pi)
def matched_interpreters(self, filters):
"""Given some filters, yield any interpreter that matches at least one of them.
:param filters: A sequence of strings that constrain the interpreter compatibility for this
cache, using the Requirement-style format, e.g. ``'CPython>=3', or just ['>=2.7','<3']``
for requirements agnostic to interpreter class.
"""
for match in self._matching(self._interpreters, filters):
yield match
def setup(self, paths=(), force=False, filters=(b'',)):
"""Sets up a cache of python interpreters.
NB: Must be called prior to accessing the ``interpreters`` property or the ``matches`` method.
:param paths: The paths to search for a python interpreter; the system ``PATH`` by default.
:param bool force: When ``True`` the interpreter cache is always re-built.
:param filters: A sequence of strings that constrain the interpreter compatibility for this
cache, using the Requirement-style format, e.g. ``'CPython>=3', or just ['>=2.7','<3']``
for requirements agnostic to interpreter class.
"""
filters = self._default_filters if not any(filters) else filters
setup_paths = paths or os.getenv('PATH').split(os.pathsep)
self._setup_cached(filters)
def unsatisfied_filters():
return filter(lambda filt: len(list(self._matching(self._interpreters, [filt]))) == 0, filters)
if force or len(unsatisfied_filters()) > 0:
self._setup_paths(setup_paths, filters)
for filt in unsatisfied_filters():
self._logger('No valid interpreters found for {}!'.format(filt))
matches = list(self.matched_interpreters(filters))
if len(matches) == 0:
self._logger('Found no valid interpreters!')
return matches
def _resolve(self, interpreter, interpreter_dir=None):
"""Resolve and cache an interpreter with a setuptools and wheel capability."""
interpreter = self._resolve_interpreter(interpreter, interpreter_dir,
self._python_setup.setuptools_requirement())
if interpreter:
return self._resolve_interpreter(interpreter, interpreter_dir,
self._python_setup.wheel_requirement())
def _resolve_interpreter(self, interpreter, interpreter_dir, requirement):
"""Given a :class:`PythonInterpreter` and a requirement, return an interpreter with the
capability of resolving that requirement or ``None`` if it's not possible to install a
suitable requirement.
If interpreter_dir is unspecified, operates on the default location.
"""
if interpreter.satisfies([requirement]):
return interpreter
if not interpreter_dir:
interpreter_dir = os.path.join(self._cache_dir, str(interpreter.identity))
target_link = os.path.join(interpreter_dir, requirement.key)
bdist = self._resolve_and_link(interpreter, requirement, target_link)
if bdist:
return interpreter.with_extra(bdist.name, bdist.raw_version, bdist.path)
else:
self._logger('Failed to resolve requirement {} for {}'.format(requirement, interpreter))
def _resolve_and_link(self, interpreter, requirement, target_link):
# Short-circuit if there is a local copy.
if os.path.exists(target_link) and os.path.exists(os.path.realpath(target_link)):
bdist = Package.from_href(os.path.realpath(target_link))
if bdist.satisfies(requirement):
return bdist
# Since we're resolving to bootstrap a bare interpreter, we won't have wheel available.
# Explicitly set the precedence to avoid resolution of wheels or distillation of sdists into
# wheels.
precedence = (EggPackage, SourcePackage)
distributions = resolve(requirements=[requirement],
fetchers=self._python_repos.get_fetchers(),
interpreter=interpreter,
context=self._python_repos.get_network_context(),
precedence=precedence)
if not distributions:
return None
assert len(distributions) == 1, ('Expected exactly 1 distribution to be resolved for {}, '
'found:\n\t{}'.format(requirement,
'\n\t'.join(map(str, distributions))))
dist_location = distributions[0].location
target_location = os.path.join(os.path.dirname(target_link), os.path.basename(dist_location))
shutil.move(dist_location, target_location)
_safe_link(target_location, target_link)
self._logger(' installed {}'.format(target_location))
return Package.from_href(target_location)
| apache-2.0 | -4,356,344,351,835,639,000 | 240,028,551,233,318,560 | 42.239583 | 101 | 0.683691 | false |
Vauxoo/maintainer-tools | tools/set_repo_labels.py | 13 | 2539 | # -*- coding: utf-8 -*-
"""
Create and modify labels on github to have same labels and same color
on all repo
"""
from .github_login import login
REPO_TO_IGNORE = [
'odoo-community.org',
'community-data-files',
'contribute-md-template',
'website',
]
# here is the list of labels we need in each repo
all_labels = {
'7.0': '000000',
'8.0': '000000',
'bug': 'fc2929',
'duplicate': 'cccccc',
'enhancement': '84b6eb',
'help wanted': '159818',
'invalid': 'e6e6e6',
'question': 'cc317c',
'needs fixing': 'eb6420',
'needs review': 'fbca04',
'work in progress': '0052cc',
'wontfix': 'ffffff',
}
def main():
gh = login()
all_repos = gh.iter_user_repos('OCA')
for repo in all_repos:
if repo.name in REPO_TO_IGNORE:
continue
labels = repo.iter_labels()
existing_labels = dict((l.name, l.color) for l in labels)
to_create = []
to_change_color = []
for needed_label in all_labels:
if needed_label not in existing_labels.keys():
to_create.append(needed_label)
elif existing_labels[needed_label] != all_labels[needed_label]:
to_change_color.append(needed_label)
extra_labels = [l for l in existing_labels if l not in all_labels]
if to_create:
print ('Repo %s - Create %s missing labels'
% (repo.name, len(to_create)))
for label_name in to_create:
success = repo.create_label(label_name, all_labels[label_name])
if not success:
print ("Failed to create a label on '%s'!"
" Please check you access right to this repository."
% repo.name)
if to_change_color:
print ('Repo %s - Update %s labels with wrong color'
% (repo.name, len(to_change_color)))
for label_name in to_change_color:
success = repo.update_label(label_name, all_labels[label_name])
if not success:
print ("Failed to update a label on '%s'!"
" Please check you access right to this repository."
% repo.name)
if extra_labels:
print ('Repo %s - Found %s extra labels'
% (repo.name, len(extra_labels)))
for label_name in extra_labels:
print label_name
if __name__ == '__main__':
main()
| agpl-3.0 | 9,148,278,775,692,358,000 | 2,295,757,566,944,960,500 | 29.22619 | 79 | 0.530918 | false |
ppizarror/Hero-of-Antair | data/images/pil/ImageChops.py | 2 | 7410 | #
# The Python Imaging Library.
# $Id$
#
# standard channel operations
#
# History:
# 1996-03-24 fl Created
# 1996-08-13 fl Added logical operations (for "1" images)
# 2000-10-12 fl Added offset method (from Image.py)
#
# Copyright (c) 1997-2000 by Secret Labs AB
# Copyright (c) 1996-2000 by Fredrik Lundh
#
# See the README file for information on usage and redistribution.
#
import Image
##
# The <b>ImageChops</b> module contains a number of arithmetical image
# operations, called <i>channel operations</i> ("chops"). These can be
# used for various purposes, including special effects, image
# compositions, algorithmic painting, and more.
# <p>
# At this time, channel operations are only implemented for 8-bit
# images (e.g. "L" and "RGB").
# <p>
# Most channel operations take one or two image arguments and returns
# a new image. Unless otherwise noted, the result of a channel
# operation is always clipped to the range 0 to MAX (which is 255 for
# all modes supported by the operations in this module).
##
##
# Return an image with the same size as the given image, but filled
# with the given pixel value.
#
# @param image Reference image.
# @param value Pixel value.
# @return An image object.
def constant(image, value):
"Fill a channel with a given grey level"
return Image.new("L", image.size, value)
##
# Copy image.
#
# @param image Source image.
# @return A copy of the source image.
def duplicate(image):
"Create a copy of a channel"
return image.copy()
##
# Inverts an image
# (MAX - image).
#
# @param image Source image.
# @return An image object.
def invert(image):
"Invert a channel"
image.load()
return image._new(image.im.chop_invert())
##
# Compare images, and return lighter pixel value
# (max(image1, image2)).
# <p>
# Compares the two images, pixel by pixel, and returns a new image
# containing the lighter values.
#
# @param image1 First image.
# @param image1 Second image.
# @return An image object.
def lighter(image1, image2):
"Select the lighter pixels from each image"
image1.load()
image2.load()
return image1._new(image1.im.chop_lighter(image2.im))
##
# Compare images, and return darker pixel value
# (min(image1, image2)).
# <p>
# Compares the two images, pixel by pixel, and returns a new image
# containing the darker values.
#
# @param image1 First image.
# @param image1 Second image.
# @return An image object.
def darker(image1, image2):
"Select the darker pixels from each image"
image1.load()
image2.load()
return image1._new(image1.im.chop_darker(image2.im))
##
# Calculate absolute difference
# (abs(image1 - image2)).
# <p>
# Returns the absolute value of the difference between the two images.
#
# @param image1 First image.
# @param image1 Second image.
# @return An image object.
def difference(image1, image2):
"Subtract one image from another"
image1.load()
image2.load()
return image1._new(image1.im.chop_difference(image2.im))
##
# Superimpose positive images
# (image1 * image2 / MAX).
# <p>
# Superimposes two images on top of each other. If you multiply an
# image with a solid black image, the result is black. If you multiply
# with a solid white image, the image is unaffected.
#
# @param image1 First image.
# @param image1 Second image.
# @return An image object.
def multiply(image1, image2):
"Superimpose two positive images"
image1.load()
image2.load()
return image1._new(image1.im.chop_multiply(image2.im))
##
# Superimpose negative images
# (MAX - ((MAX - image1) * (MAX - image2) / MAX)).
# <p>
# Superimposes two inverted images on top of each other.
#
# @param image1 First image.
# @param image1 Second image.
# @return An image object.
def screen(image1, image2):
"Superimpose two negative images"
image1.load()
image2.load()
return image1._new(image1.im.chop_screen(image2.im))
##
# Add images
# ((image1 + image2) / scale + offset).
# <p>
# Adds two images, dividing the result by scale and adding the
# offset. If omitted, scale defaults to 1.0, and offset to 0.0.
#
# @param image1 First image.
# @param image1 Second image.
# @return An image object.
def add(image1, image2, scale=1.0, offset=0):
"Add two images"
image1.load()
image2.load()
return image1._new(image1.im.chop_add(image2.im, scale, offset))
##
# Subtract images
# ((image1 - image2) / scale + offset).
# <p>
# Subtracts two images, dividing the result by scale and adding the
# offset. If omitted, scale defaults to 1.0, and offset to 0.0.
#
# @param image1 First image.
# @param image1 Second image.
# @return An image object.
def subtract(image1, image2, scale=1.0, offset=0):
"Subtract two images"
image1.load()
image2.load()
return image1._new(image1.im.chop_subtract(image2.im, scale, offset))
##
# Add images without clipping
# ((image1 + image2) % MAX).
# <p>
# Adds two images, without clipping the result.
#
# @param image1 First image.
# @param image1 Second image.
# @return An image object.
def add_modulo(image1, image2):
"Add two images without clipping"
image1.load()
image2.load()
return image1._new(image1.im.chop_add_modulo(image2.im))
##
# Subtract images without clipping
# ((image1 - image2) % MAX).
# <p>
# Subtracts two images, without clipping the result.
#
# @param image1 First image.
# @param image1 Second image.
# @return An image object.
def subtract_modulo(image1, image2):
"Subtract two images without clipping"
image1.load()
image2.load()
return image1._new(image1.im.chop_subtract_modulo(image2.im))
##
# Logical AND
# (image1 and image2).
def logical_and(image1, image2):
"Logical and between two images"
image1.load()
image2.load()
return image1._new(image1.im.chop_and(image2.im))
##
# Logical OR
# (image1 or image2).
def logical_or(image1, image2):
"Logical or between two images"
image1.load()
image2.load()
return image1._new(image1.im.chop_or(image2.im))
##
# Logical XOR
# (image1 xor image2).
def logical_xor(image1, image2):
"Logical xor between two images"
image1.load()
image2.load()
return image1._new(image1.im.chop_xor(image2.im))
##
# Blend images using constant transparency weight.
# <p>
# Same as the <b>blend</b> function in the <b>Image</b> module.
def blend(image1, image2, alpha):
"Blend two images using a constant transparency weight"
return Image.blend(image1, image2, alpha)
##
# Create composite using transparency mask.
# <p>
# Same as the <b>composite</b> function in the <b>Image</b> module.
def composite(image1, image2, mask):
"Create composite image by blending images using a transparency mask"
return Image.composite(image1, image2, mask)
##
# Offset image data.
# <p>
# Returns a copy of the image where data has been offset by the given
# distances. Data wraps around the edges. If yoffset is omitted, it
# is assumed to be equal to xoffset.
#
# @param image Source image.
# @param xoffset The horizontal distance.
# @param yoffset The vertical distance. If omitted, both
# distances are set to the same value.
# @return An Image object.
def offset(image, xoffset, yoffset=None):
"Offset image in horizontal and/or vertical direction"
if yoffset is None:
yoffset = xoffset
image.load()
return image._new(image.im.offset(xoffset, yoffset))
| gpl-2.0 | 7,500,253,920,044,948,000 | -7,875,623,671,337,241,000 | 23.61794 | 73 | 0.695682 | false |
BMJHayward/numpy | numpy/polynomial/hermite_e.py | 49 | 57120 | """
Objects for dealing with Hermite_e series.
This module provides a number of objects (mostly functions) useful for
dealing with Hermite_e series, including a `HermiteE` class that
encapsulates the usual arithmetic operations. (General information
on how this module represents and works with such polynomials is in the
docstring for its "parent" sub-package, `numpy.polynomial`).
Constants
---------
- `hermedomain` -- Hermite_e series default domain, [-1,1].
- `hermezero` -- Hermite_e series that evaluates identically to 0.
- `hermeone` -- Hermite_e series that evaluates identically to 1.
- `hermex` -- Hermite_e series for the identity map, ``f(x) = x``.
Arithmetic
----------
- `hermemulx` -- multiply a Hermite_e series in ``P_i(x)`` by ``x``.
- `hermeadd` -- add two Hermite_e series.
- `hermesub` -- subtract one Hermite_e series from another.
- `hermemul` -- multiply two Hermite_e series.
- `hermediv` -- divide one Hermite_e series by another.
- `hermeval` -- evaluate a Hermite_e series at given points.
- `hermeval2d` -- evaluate a 2D Hermite_e series at given points.
- `hermeval3d` -- evaluate a 3D Hermite_e series at given points.
- `hermegrid2d` -- evaluate a 2D Hermite_e series on a Cartesian product.
- `hermegrid3d` -- evaluate a 3D Hermite_e series on a Cartesian product.
Calculus
--------
- `hermeder` -- differentiate a Hermite_e series.
- `hermeint` -- integrate a Hermite_e series.
Misc Functions
--------------
- `hermefromroots` -- create a Hermite_e series with specified roots.
- `hermeroots` -- find the roots of a Hermite_e series.
- `hermevander` -- Vandermonde-like matrix for Hermite_e polynomials.
- `hermevander2d` -- Vandermonde-like matrix for 2D power series.
- `hermevander3d` -- Vandermonde-like matrix for 3D power series.
- `hermegauss` -- Gauss-Hermite_e quadrature, points and weights.
- `hermeweight` -- Hermite_e weight function.
- `hermecompanion` -- symmetrized companion matrix in Hermite_e form.
- `hermefit` -- least-squares fit returning a Hermite_e series.
- `hermetrim` -- trim leading coefficients from a Hermite_e series.
- `hermeline` -- Hermite_e series of given straight line.
- `herme2poly` -- convert a Hermite_e series to a polynomial.
- `poly2herme` -- convert a polynomial to a Hermite_e series.
Classes
-------
- `HermiteE` -- A Hermite_e series class.
See also
--------
`numpy.polynomial`
"""
from __future__ import division, absolute_import, print_function
import warnings
import numpy as np
import numpy.linalg as la
from . import polyutils as pu
from ._polybase import ABCPolyBase
__all__ = [
'hermezero', 'hermeone', 'hermex', 'hermedomain', 'hermeline',
'hermeadd', 'hermesub', 'hermemulx', 'hermemul', 'hermediv',
'hermepow', 'hermeval', 'hermeder', 'hermeint', 'herme2poly',
'poly2herme', 'hermefromroots', 'hermevander', 'hermefit', 'hermetrim',
'hermeroots', 'HermiteE', 'hermeval2d', 'hermeval3d', 'hermegrid2d',
'hermegrid3d', 'hermevander2d', 'hermevander3d', 'hermecompanion',
'hermegauss', 'hermeweight']
hermetrim = pu.trimcoef
def poly2herme(pol):
"""
poly2herme(pol)
Convert a polynomial to a Hermite series.
Convert an array representing the coefficients of a polynomial (relative
to the "standard" basis) ordered from lowest degree to highest, to an
array of the coefficients of the equivalent Hermite series, ordered
from lowest to highest degree.
Parameters
----------
pol : array_like
1-D array containing the polynomial coefficients
Returns
-------
c : ndarray
1-D array containing the coefficients of the equivalent Hermite
series.
See Also
--------
herme2poly
Notes
-----
The easy way to do conversions between polynomial basis sets
is to use the convert method of a class instance.
Examples
--------
>>> from numpy.polynomial.hermite_e import poly2herme
>>> poly2herme(np.arange(4))
array([ 2., 10., 2., 3.])
"""
[pol] = pu.as_series([pol])
deg = len(pol) - 1
res = 0
for i in range(deg, -1, -1):
res = hermeadd(hermemulx(res), pol[i])
return res
def herme2poly(c):
"""
Convert a Hermite series to a polynomial.
Convert an array representing the coefficients of a Hermite series,
ordered from lowest degree to highest, to an array of the coefficients
of the equivalent polynomial (relative to the "standard" basis) ordered
from lowest to highest degree.
Parameters
----------
c : array_like
1-D array containing the Hermite series coefficients, ordered
from lowest order term to highest.
Returns
-------
pol : ndarray
1-D array containing the coefficients of the equivalent polynomial
(relative to the "standard" basis) ordered from lowest order term
to highest.
See Also
--------
poly2herme
Notes
-----
The easy way to do conversions between polynomial basis sets
is to use the convert method of a class instance.
Examples
--------
>>> from numpy.polynomial.hermite_e import herme2poly
>>> herme2poly([ 2., 10., 2., 3.])
array([ 0., 1., 2., 3.])
"""
from .polynomial import polyadd, polysub, polymulx
[c] = pu.as_series([c])
n = len(c)
if n == 1:
return c
if n == 2:
return c
else:
c0 = c[-2]
c1 = c[-1]
# i is the current degree of c1
for i in range(n - 1, 1, -1):
tmp = c0
c0 = polysub(c[i - 2], c1*(i - 1))
c1 = polyadd(tmp, polymulx(c1))
return polyadd(c0, polymulx(c1))
#
# These are constant arrays are of integer type so as to be compatible
# with the widest range of other types, such as Decimal.
#
# Hermite
hermedomain = np.array([-1, 1])
# Hermite coefficients representing zero.
hermezero = np.array([0])
# Hermite coefficients representing one.
hermeone = np.array([1])
# Hermite coefficients representing the identity x.
hermex = np.array([0, 1])
def hermeline(off, scl):
"""
Hermite series whose graph is a straight line.
Parameters
----------
off, scl : scalars
The specified line is given by ``off + scl*x``.
Returns
-------
y : ndarray
This module's representation of the Hermite series for
``off + scl*x``.
See Also
--------
polyline, chebline
Examples
--------
>>> from numpy.polynomial.hermite_e import hermeline
>>> from numpy.polynomial.hermite_e import hermeline, hermeval
>>> hermeval(0,hermeline(3, 2))
3.0
>>> hermeval(1,hermeline(3, 2))
5.0
"""
if scl != 0:
return np.array([off, scl])
else:
return np.array([off])
def hermefromroots(roots):
"""
Generate a HermiteE series with given roots.
The function returns the coefficients of the polynomial
.. math:: p(x) = (x - r_0) * (x - r_1) * ... * (x - r_n),
in HermiteE form, where the `r_n` are the roots specified in `roots`.
If a zero has multiplicity n, then it must appear in `roots` n times.
For instance, if 2 is a root of multiplicity three and 3 is a root of
multiplicity 2, then `roots` looks something like [2, 2, 2, 3, 3]. The
roots can appear in any order.
If the returned coefficients are `c`, then
.. math:: p(x) = c_0 + c_1 * He_1(x) + ... + c_n * He_n(x)
The coefficient of the last term is not generally 1 for monic
polynomials in HermiteE form.
Parameters
----------
roots : array_like
Sequence containing the roots.
Returns
-------
out : ndarray
1-D array of coefficients. If all roots are real then `out` is a
real array, if some of the roots are complex, then `out` is complex
even if all the coefficients in the result are real (see Examples
below).
See Also
--------
polyfromroots, legfromroots, lagfromroots, hermfromroots,
chebfromroots.
Examples
--------
>>> from numpy.polynomial.hermite_e import hermefromroots, hermeval
>>> coef = hermefromroots((-1, 0, 1))
>>> hermeval((-1, 0, 1), coef)
array([ 0., 0., 0.])
>>> coef = hermefromroots((-1j, 1j))
>>> hermeval((-1j, 1j), coef)
array([ 0.+0.j, 0.+0.j])
"""
if len(roots) == 0:
return np.ones(1)
else:
[roots] = pu.as_series([roots], trim=False)
roots.sort()
p = [hermeline(-r, 1) for r in roots]
n = len(p)
while n > 1:
m, r = divmod(n, 2)
tmp = [hermemul(p[i], p[i+m]) for i in range(m)]
if r:
tmp[0] = hermemul(tmp[0], p[-1])
p = tmp
n = m
return p[0]
def hermeadd(c1, c2):
"""
Add one Hermite series to another.
Returns the sum of two Hermite series `c1` + `c2`. The arguments
are sequences of coefficients ordered from lowest order term to
highest, i.e., [1,2,3] represents the series ``P_0 + 2*P_1 + 3*P_2``.
Parameters
----------
c1, c2 : array_like
1-D arrays of Hermite series coefficients ordered from low to
high.
Returns
-------
out : ndarray
Array representing the Hermite series of their sum.
See Also
--------
hermesub, hermemul, hermediv, hermepow
Notes
-----
Unlike multiplication, division, etc., the sum of two Hermite series
is a Hermite series (without having to "reproject" the result onto
the basis set) so addition, just like that of "standard" polynomials,
is simply "component-wise."
Examples
--------
>>> from numpy.polynomial.hermite_e import hermeadd
>>> hermeadd([1, 2, 3], [1, 2, 3, 4])
array([ 2., 4., 6., 4.])
"""
# c1, c2 are trimmed copies
[c1, c2] = pu.as_series([c1, c2])
if len(c1) > len(c2):
c1[:c2.size] += c2
ret = c1
else:
c2[:c1.size] += c1
ret = c2
return pu.trimseq(ret)
def hermesub(c1, c2):
"""
Subtract one Hermite series from another.
Returns the difference of two Hermite series `c1` - `c2`. The
sequences of coefficients are from lowest order term to highest, i.e.,
[1,2,3] represents the series ``P_0 + 2*P_1 + 3*P_2``.
Parameters
----------
c1, c2 : array_like
1-D arrays of Hermite series coefficients ordered from low to
high.
Returns
-------
out : ndarray
Of Hermite series coefficients representing their difference.
See Also
--------
hermeadd, hermemul, hermediv, hermepow
Notes
-----
Unlike multiplication, division, etc., the difference of two Hermite
series is a Hermite series (without having to "reproject" the result
onto the basis set) so subtraction, just like that of "standard"
polynomials, is simply "component-wise."
Examples
--------
>>> from numpy.polynomial.hermite_e import hermesub
>>> hermesub([1, 2, 3, 4], [1, 2, 3])
array([ 0., 0., 0., 4.])
"""
# c1, c2 are trimmed copies
[c1, c2] = pu.as_series([c1, c2])
if len(c1) > len(c2):
c1[:c2.size] -= c2
ret = c1
else:
c2 = -c2
c2[:c1.size] += c1
ret = c2
return pu.trimseq(ret)
def hermemulx(c):
"""Multiply a Hermite series by x.
Multiply the Hermite series `c` by x, where x is the independent
variable.
Parameters
----------
c : array_like
1-D array of Hermite series coefficients ordered from low to
high.
Returns
-------
out : ndarray
Array representing the result of the multiplication.
Notes
-----
The multiplication uses the recursion relationship for Hermite
polynomials in the form
.. math::
xP_i(x) = (P_{i + 1}(x) + iP_{i - 1}(x)))
Examples
--------
>>> from numpy.polynomial.hermite_e import hermemulx
>>> hermemulx([1, 2, 3])
array([ 2., 7., 2., 3.])
"""
# c is a trimmed copy
[c] = pu.as_series([c])
# The zero series needs special treatment
if len(c) == 1 and c[0] == 0:
return c
prd = np.empty(len(c) + 1, dtype=c.dtype)
prd[0] = c[0]*0
prd[1] = c[0]
for i in range(1, len(c)):
prd[i + 1] = c[i]
prd[i - 1] += c[i]*i
return prd
def hermemul(c1, c2):
"""
Multiply one Hermite series by another.
Returns the product of two Hermite series `c1` * `c2`. The arguments
are sequences of coefficients, from lowest order "term" to highest,
e.g., [1,2,3] represents the series ``P_0 + 2*P_1 + 3*P_2``.
Parameters
----------
c1, c2 : array_like
1-D arrays of Hermite series coefficients ordered from low to
high.
Returns
-------
out : ndarray
Of Hermite series coefficients representing their product.
See Also
--------
hermeadd, hermesub, hermediv, hermepow
Notes
-----
In general, the (polynomial) product of two C-series results in terms
that are not in the Hermite polynomial basis set. Thus, to express
the product as a Hermite series, it is necessary to "reproject" the
product onto said basis set, which may produce "unintuitive" (but
correct) results; see Examples section below.
Examples
--------
>>> from numpy.polynomial.hermite_e import hermemul
>>> hermemul([1, 2, 3], [0, 1, 2])
array([ 14., 15., 28., 7., 6.])
"""
# s1, s2 are trimmed copies
[c1, c2] = pu.as_series([c1, c2])
if len(c1) > len(c2):
c = c2
xs = c1
else:
c = c1
xs = c2
if len(c) == 1:
c0 = c[0]*xs
c1 = 0
elif len(c) == 2:
c0 = c[0]*xs
c1 = c[1]*xs
else:
nd = len(c)
c0 = c[-2]*xs
c1 = c[-1]*xs
for i in range(3, len(c) + 1):
tmp = c0
nd = nd - 1
c0 = hermesub(c[-i]*xs, c1*(nd - 1))
c1 = hermeadd(tmp, hermemulx(c1))
return hermeadd(c0, hermemulx(c1))
def hermediv(c1, c2):
"""
Divide one Hermite series by another.
Returns the quotient-with-remainder of two Hermite series
`c1` / `c2`. The arguments are sequences of coefficients from lowest
order "term" to highest, e.g., [1,2,3] represents the series
``P_0 + 2*P_1 + 3*P_2``.
Parameters
----------
c1, c2 : array_like
1-D arrays of Hermite series coefficients ordered from low to
high.
Returns
-------
[quo, rem] : ndarrays
Of Hermite series coefficients representing the quotient and
remainder.
See Also
--------
hermeadd, hermesub, hermemul, hermepow
Notes
-----
In general, the (polynomial) division of one Hermite series by another
results in quotient and remainder terms that are not in the Hermite
polynomial basis set. Thus, to express these results as a Hermite
series, it is necessary to "reproject" the results onto the Hermite
basis set, which may produce "unintuitive" (but correct) results; see
Examples section below.
Examples
--------
>>> from numpy.polynomial.hermite_e import hermediv
>>> hermediv([ 14., 15., 28., 7., 6.], [0, 1, 2])
(array([ 1., 2., 3.]), array([ 0.]))
>>> hermediv([ 15., 17., 28., 7., 6.], [0, 1, 2])
(array([ 1., 2., 3.]), array([ 1., 2.]))
"""
# c1, c2 are trimmed copies
[c1, c2] = pu.as_series([c1, c2])
if c2[-1] == 0:
raise ZeroDivisionError()
lc1 = len(c1)
lc2 = len(c2)
if lc1 < lc2:
return c1[:1]*0, c1
elif lc2 == 1:
return c1/c2[-1], c1[:1]*0
else:
quo = np.empty(lc1 - lc2 + 1, dtype=c1.dtype)
rem = c1
for i in range(lc1 - lc2, - 1, -1):
p = hermemul([0]*i + [1], c2)
q = rem[-1]/p[-1]
rem = rem[:-1] - q*p[:-1]
quo[i] = q
return quo, pu.trimseq(rem)
def hermepow(c, pow, maxpower=16):
"""Raise a Hermite series to a power.
Returns the Hermite series `c` raised to the power `pow`. The
argument `c` is a sequence of coefficients ordered from low to high.
i.e., [1,2,3] is the series ``P_0 + 2*P_1 + 3*P_2.``
Parameters
----------
c : array_like
1-D array of Hermite series coefficients ordered from low to
high.
pow : integer
Power to which the series will be raised
maxpower : integer, optional
Maximum power allowed. This is mainly to limit growth of the series
to unmanageable size. Default is 16
Returns
-------
coef : ndarray
Hermite series of power.
See Also
--------
hermeadd, hermesub, hermemul, hermediv
Examples
--------
>>> from numpy.polynomial.hermite_e import hermepow
>>> hermepow([1, 2, 3], 2)
array([ 23., 28., 46., 12., 9.])
"""
# c is a trimmed copy
[c] = pu.as_series([c])
power = int(pow)
if power != pow or power < 0:
raise ValueError("Power must be a non-negative integer.")
elif maxpower is not None and power > maxpower:
raise ValueError("Power is too large")
elif power == 0:
return np.array([1], dtype=c.dtype)
elif power == 1:
return c
else:
# This can be made more efficient by using powers of two
# in the usual way.
prd = c
for i in range(2, power + 1):
prd = hermemul(prd, c)
return prd
def hermeder(c, m=1, scl=1, axis=0):
"""
Differentiate a Hermite_e series.
Returns the series coefficients `c` differentiated `m` times along
`axis`. At each iteration the result is multiplied by `scl` (the
scaling factor is for use in a linear change of variable). The argument
`c` is an array of coefficients from low to high degree along each
axis, e.g., [1,2,3] represents the series ``1*He_0 + 2*He_1 + 3*He_2``
while [[1,2],[1,2]] represents ``1*He_0(x)*He_0(y) + 1*He_1(x)*He_0(y)
+ 2*He_0(x)*He_1(y) + 2*He_1(x)*He_1(y)`` if axis=0 is ``x`` and axis=1
is ``y``.
Parameters
----------
c : array_like
Array of Hermite_e series coefficients. If `c` is multidimensional
the different axis correspond to different variables with the
degree in each axis given by the corresponding index.
m : int, optional
Number of derivatives taken, must be non-negative. (Default: 1)
scl : scalar, optional
Each differentiation is multiplied by `scl`. The end result is
multiplication by ``scl**m``. This is for use in a linear change of
variable. (Default: 1)
axis : int, optional
Axis over which the derivative is taken. (Default: 0).
.. versionadded:: 1.7.0
Returns
-------
der : ndarray
Hermite series of the derivative.
See Also
--------
hermeint
Notes
-----
In general, the result of differentiating a Hermite series does not
resemble the same operation on a power series. Thus the result of this
function may be "unintuitive," albeit correct; see Examples section
below.
Examples
--------
>>> from numpy.polynomial.hermite_e import hermeder
>>> hermeder([ 1., 1., 1., 1.])
array([ 1., 2., 3.])
>>> hermeder([-0.25, 1., 1./2., 1./3., 1./4 ], m=2)
array([ 1., 2., 3.])
"""
c = np.array(c, ndmin=1, copy=1)
if c.dtype.char in '?bBhHiIlLqQpP':
c = c.astype(np.double)
cnt, iaxis = [int(t) for t in [m, axis]]
if cnt != m:
raise ValueError("The order of derivation must be integer")
if cnt < 0:
raise ValueError("The order of derivation must be non-negative")
if iaxis != axis:
raise ValueError("The axis must be integer")
if not -c.ndim <= iaxis < c.ndim:
raise ValueError("The axis is out of range")
if iaxis < 0:
iaxis += c.ndim
if cnt == 0:
return c
c = np.rollaxis(c, iaxis)
n = len(c)
if cnt >= n:
return c[:1]*0
else:
for i in range(cnt):
n = n - 1
c *= scl
der = np.empty((n,) + c.shape[1:], dtype=c.dtype)
for j in range(n, 0, -1):
der[j - 1] = j*c[j]
c = der
c = np.rollaxis(c, 0, iaxis + 1)
return c
def hermeint(c, m=1, k=[], lbnd=0, scl=1, axis=0):
"""
Integrate a Hermite_e series.
Returns the Hermite_e series coefficients `c` integrated `m` times from
`lbnd` along `axis`. At each iteration the resulting series is
**multiplied** by `scl` and an integration constant, `k`, is added.
The scaling factor is for use in a linear change of variable. ("Buyer
beware": note that, depending on what one is doing, one may want `scl`
to be the reciprocal of what one might expect; for more information,
see the Notes section below.) The argument `c` is an array of
coefficients from low to high degree along each axis, e.g., [1,2,3]
represents the series ``H_0 + 2*H_1 + 3*H_2`` while [[1,2],[1,2]]
represents ``1*H_0(x)*H_0(y) + 1*H_1(x)*H_0(y) + 2*H_0(x)*H_1(y) +
2*H_1(x)*H_1(y)`` if axis=0 is ``x`` and axis=1 is ``y``.
Parameters
----------
c : array_like
Array of Hermite_e series coefficients. If c is multidimensional
the different axis correspond to different variables with the
degree in each axis given by the corresponding index.
m : int, optional
Order of integration, must be positive. (Default: 1)
k : {[], list, scalar}, optional
Integration constant(s). The value of the first integral at
``lbnd`` is the first value in the list, the value of the second
integral at ``lbnd`` is the second value, etc. If ``k == []`` (the
default), all constants are set to zero. If ``m == 1``, a single
scalar can be given instead of a list.
lbnd : scalar, optional
The lower bound of the integral. (Default: 0)
scl : scalar, optional
Following each integration the result is *multiplied* by `scl`
before the integration constant is added. (Default: 1)
axis : int, optional
Axis over which the integral is taken. (Default: 0).
.. versionadded:: 1.7.0
Returns
-------
S : ndarray
Hermite_e series coefficients of the integral.
Raises
------
ValueError
If ``m < 0``, ``len(k) > m``, ``np.isscalar(lbnd) == False``, or
``np.isscalar(scl) == False``.
See Also
--------
hermeder
Notes
-----
Note that the result of each integration is *multiplied* by `scl`.
Why is this important to note? Say one is making a linear change of
variable :math:`u = ax + b` in an integral relative to `x`. Then
.. math::`dx = du/a`, so one will need to set `scl` equal to
:math:`1/a` - perhaps not what one would have first thought.
Also note that, in general, the result of integrating a C-series needs
to be "reprojected" onto the C-series basis set. Thus, typically,
the result of this function is "unintuitive," albeit correct; see
Examples section below.
Examples
--------
>>> from numpy.polynomial.hermite_e import hermeint
>>> hermeint([1, 2, 3]) # integrate once, value 0 at 0.
array([ 1., 1., 1., 1.])
>>> hermeint([1, 2, 3], m=2) # integrate twice, value & deriv 0 at 0
array([-0.25 , 1. , 0.5 , 0.33333333, 0.25 ])
>>> hermeint([1, 2, 3], k=1) # integrate once, value 1 at 0.
array([ 2., 1., 1., 1.])
>>> hermeint([1, 2, 3], lbnd=-1) # integrate once, value 0 at -1
array([-1., 1., 1., 1.])
>>> hermeint([1, 2, 3], m=2, k=[1, 2], lbnd=-1)
array([ 1.83333333, 0. , 0.5 , 0.33333333, 0.25 ])
"""
c = np.array(c, ndmin=1, copy=1)
if c.dtype.char in '?bBhHiIlLqQpP':
c = c.astype(np.double)
if not np.iterable(k):
k = [k]
cnt, iaxis = [int(t) for t in [m, axis]]
if cnt != m:
raise ValueError("The order of integration must be integer")
if cnt < 0:
raise ValueError("The order of integration must be non-negative")
if len(k) > cnt:
raise ValueError("Too many integration constants")
if iaxis != axis:
raise ValueError("The axis must be integer")
if not -c.ndim <= iaxis < c.ndim:
raise ValueError("The axis is out of range")
if iaxis < 0:
iaxis += c.ndim
if cnt == 0:
return c
c = np.rollaxis(c, iaxis)
k = list(k) + [0]*(cnt - len(k))
for i in range(cnt):
n = len(c)
c *= scl
if n == 1 and np.all(c[0] == 0):
c[0] += k[i]
else:
tmp = np.empty((n + 1,) + c.shape[1:], dtype=c.dtype)
tmp[0] = c[0]*0
tmp[1] = c[0]
for j in range(1, n):
tmp[j + 1] = c[j]/(j + 1)
tmp[0] += k[i] - hermeval(lbnd, tmp)
c = tmp
c = np.rollaxis(c, 0, iaxis + 1)
return c
def hermeval(x, c, tensor=True):
"""
Evaluate an HermiteE series at points x.
If `c` is of length `n + 1`, this function returns the value:
.. math:: p(x) = c_0 * He_0(x) + c_1 * He_1(x) + ... + c_n * He_n(x)
The parameter `x` is converted to an array only if it is a tuple or a
list, otherwise it is treated as a scalar. In either case, either `x`
or its elements must support multiplication and addition both with
themselves and with the elements of `c`.
If `c` is a 1-D array, then `p(x)` will have the same shape as `x`. If
`c` is multidimensional, then the shape of the result depends on the
value of `tensor`. If `tensor` is true the shape will be c.shape[1:] +
x.shape. If `tensor` is false the shape will be c.shape[1:]. Note that
scalars have shape (,).
Trailing zeros in the coefficients will be used in the evaluation, so
they should be avoided if efficiency is a concern.
Parameters
----------
x : array_like, compatible object
If `x` is a list or tuple, it is converted to an ndarray, otherwise
it is left unchanged and treated as a scalar. In either case, `x`
or its elements must support addition and multiplication with
with themselves and with the elements of `c`.
c : array_like
Array of coefficients ordered so that the coefficients for terms of
degree n are contained in c[n]. If `c` is multidimensional the
remaining indices enumerate multiple polynomials. In the two
dimensional case the coefficients may be thought of as stored in
the columns of `c`.
tensor : boolean, optional
If True, the shape of the coefficient array is extended with ones
on the right, one for each dimension of `x`. Scalars have dimension 0
for this action. The result is that every column of coefficients in
`c` is evaluated for every element of `x`. If False, `x` is broadcast
over the columns of `c` for the evaluation. This keyword is useful
when `c` is multidimensional. The default value is True.
.. versionadded:: 1.7.0
Returns
-------
values : ndarray, algebra_like
The shape of the return value is described above.
See Also
--------
hermeval2d, hermegrid2d, hermeval3d, hermegrid3d
Notes
-----
The evaluation uses Clenshaw recursion, aka synthetic division.
Examples
--------
>>> from numpy.polynomial.hermite_e import hermeval
>>> coef = [1,2,3]
>>> hermeval(1, coef)
3.0
>>> hermeval([[1,2],[3,4]], coef)
array([[ 3., 14.],
[ 31., 54.]])
"""
c = np.array(c, ndmin=1, copy=0)
if c.dtype.char in '?bBhHiIlLqQpP':
c = c.astype(np.double)
if isinstance(x, (tuple, list)):
x = np.asarray(x)
if isinstance(x, np.ndarray) and tensor:
c = c.reshape(c.shape + (1,)*x.ndim)
if len(c) == 1:
c0 = c[0]
c1 = 0
elif len(c) == 2:
c0 = c[0]
c1 = c[1]
else:
nd = len(c)
c0 = c[-2]
c1 = c[-1]
for i in range(3, len(c) + 1):
tmp = c0
nd = nd - 1
c0 = c[-i] - c1*(nd - 1)
c1 = tmp + c1*x
return c0 + c1*x
def hermeval2d(x, y, c):
"""
Evaluate a 2-D HermiteE series at points (x, y).
This function returns the values:
.. math:: p(x,y) = \\sum_{i,j} c_{i,j} * He_i(x) * He_j(y)
The parameters `x` and `y` are converted to arrays only if they are
tuples or a lists, otherwise they are treated as a scalars and they
must have the same shape after conversion. In either case, either `x`
and `y` or their elements must support multiplication and addition both
with themselves and with the elements of `c`.
If `c` is a 1-D array a one is implicitly appended to its shape to make
it 2-D. The shape of the result will be c.shape[2:] + x.shape.
Parameters
----------
x, y : array_like, compatible objects
The two dimensional series is evaluated at the points `(x, y)`,
where `x` and `y` must have the same shape. If `x` or `y` is a list
or tuple, it is first converted to an ndarray, otherwise it is left
unchanged and if it isn't an ndarray it is treated as a scalar.
c : array_like
Array of coefficients ordered so that the coefficient of the term
of multi-degree i,j is contained in ``c[i,j]``. If `c` has
dimension greater than two the remaining indices enumerate multiple
sets of coefficients.
Returns
-------
values : ndarray, compatible object
The values of the two dimensional polynomial at points formed with
pairs of corresponding values from `x` and `y`.
See Also
--------
hermeval, hermegrid2d, hermeval3d, hermegrid3d
Notes
-----
.. versionadded::1.7.0
"""
try:
x, y = np.array((x, y), copy=0)
except:
raise ValueError('x, y are incompatible')
c = hermeval(x, c)
c = hermeval(y, c, tensor=False)
return c
def hermegrid2d(x, y, c):
"""
Evaluate a 2-D HermiteE series on the Cartesian product of x and y.
This function returns the values:
.. math:: p(a,b) = \sum_{i,j} c_{i,j} * H_i(a) * H_j(b)
where the points `(a, b)` consist of all pairs formed by taking
`a` from `x` and `b` from `y`. The resulting points form a grid with
`x` in the first dimension and `y` in the second.
The parameters `x` and `y` are converted to arrays only if they are
tuples or a lists, otherwise they are treated as a scalars. In either
case, either `x` and `y` or their elements must support multiplication
and addition both with themselves and with the elements of `c`.
If `c` has fewer than two dimensions, ones are implicitly appended to
its shape to make it 2-D. The shape of the result will be c.shape[2:] +
x.shape.
Parameters
----------
x, y : array_like, compatible objects
The two dimensional series is evaluated at the points in the
Cartesian product of `x` and `y`. If `x` or `y` is a list or
tuple, it is first converted to an ndarray, otherwise it is left
unchanged and, if it isn't an ndarray, it is treated as a scalar.
c : array_like
Array of coefficients ordered so that the coefficients for terms of
degree i,j are contained in ``c[i,j]``. If `c` has dimension
greater than two the remaining indices enumerate multiple sets of
coefficients.
Returns
-------
values : ndarray, compatible object
The values of the two dimensional polynomial at points in the Cartesian
product of `x` and `y`.
See Also
--------
hermeval, hermeval2d, hermeval3d, hermegrid3d
Notes
-----
.. versionadded::1.7.0
"""
c = hermeval(x, c)
c = hermeval(y, c)
return c
def hermeval3d(x, y, z, c):
"""
Evaluate a 3-D Hermite_e series at points (x, y, z).
This function returns the values:
.. math:: p(x,y,z) = \\sum_{i,j,k} c_{i,j,k} * He_i(x) * He_j(y) * He_k(z)
The parameters `x`, `y`, and `z` are converted to arrays only if
they are tuples or a lists, otherwise they are treated as a scalars and
they must have the same shape after conversion. In either case, either
`x`, `y`, and `z` or their elements must support multiplication and
addition both with themselves and with the elements of `c`.
If `c` has fewer than 3 dimensions, ones are implicitly appended to its
shape to make it 3-D. The shape of the result will be c.shape[3:] +
x.shape.
Parameters
----------
x, y, z : array_like, compatible object
The three dimensional series is evaluated at the points
`(x, y, z)`, where `x`, `y`, and `z` must have the same shape. If
any of `x`, `y`, or `z` is a list or tuple, it is first converted
to an ndarray, otherwise it is left unchanged and if it isn't an
ndarray it is treated as a scalar.
c : array_like
Array of coefficients ordered so that the coefficient of the term of
multi-degree i,j,k is contained in ``c[i,j,k]``. If `c` has dimension
greater than 3 the remaining indices enumerate multiple sets of
coefficients.
Returns
-------
values : ndarray, compatible object
The values of the multidimensional polynomial on points formed with
triples of corresponding values from `x`, `y`, and `z`.
See Also
--------
hermeval, hermeval2d, hermegrid2d, hermegrid3d
Notes
-----
.. versionadded::1.7.0
"""
try:
x, y, z = np.array((x, y, z), copy=0)
except:
raise ValueError('x, y, z are incompatible')
c = hermeval(x, c)
c = hermeval(y, c, tensor=False)
c = hermeval(z, c, tensor=False)
return c
def hermegrid3d(x, y, z, c):
"""
Evaluate a 3-D HermiteE series on the Cartesian product of x, y, and z.
This function returns the values:
.. math:: p(a,b,c) = \\sum_{i,j,k} c_{i,j,k} * He_i(a) * He_j(b) * He_k(c)
where the points `(a, b, c)` consist of all triples formed by taking
`a` from `x`, `b` from `y`, and `c` from `z`. The resulting points form
a grid with `x` in the first dimension, `y` in the second, and `z` in
the third.
The parameters `x`, `y`, and `z` are converted to arrays only if they
are tuples or a lists, otherwise they are treated as a scalars. In
either case, either `x`, `y`, and `z` or their elements must support
multiplication and addition both with themselves and with the elements
of `c`.
If `c` has fewer than three dimensions, ones are implicitly appended to
its shape to make it 3-D. The shape of the result will be c.shape[3:] +
x.shape + y.shape + z.shape.
Parameters
----------
x, y, z : array_like, compatible objects
The three dimensional series is evaluated at the points in the
Cartesian product of `x`, `y`, and `z`. If `x`,`y`, or `z` is a
list or tuple, it is first converted to an ndarray, otherwise it is
left unchanged and, if it isn't an ndarray, it is treated as a
scalar.
c : array_like
Array of coefficients ordered so that the coefficients for terms of
degree i,j are contained in ``c[i,j]``. If `c` has dimension
greater than two the remaining indices enumerate multiple sets of
coefficients.
Returns
-------
values : ndarray, compatible object
The values of the two dimensional polynomial at points in the Cartesian
product of `x` and `y`.
See Also
--------
hermeval, hermeval2d, hermegrid2d, hermeval3d
Notes
-----
.. versionadded::1.7.0
"""
c = hermeval(x, c)
c = hermeval(y, c)
c = hermeval(z, c)
return c
def hermevander(x, deg):
"""Pseudo-Vandermonde matrix of given degree.
Returns the pseudo-Vandermonde matrix of degree `deg` and sample points
`x`. The pseudo-Vandermonde matrix is defined by
.. math:: V[..., i] = He_i(x),
where `0 <= i <= deg`. The leading indices of `V` index the elements of
`x` and the last index is the degree of the HermiteE polynomial.
If `c` is a 1-D array of coefficients of length `n + 1` and `V` is the
array ``V = hermevander(x, n)``, then ``np.dot(V, c)`` and
``hermeval(x, c)`` are the same up to roundoff. This equivalence is
useful both for least squares fitting and for the evaluation of a large
number of HermiteE series of the same degree and sample points.
Parameters
----------
x : array_like
Array of points. The dtype is converted to float64 or complex128
depending on whether any of the elements are complex. If `x` is
scalar it is converted to a 1-D array.
deg : int
Degree of the resulting matrix.
Returns
-------
vander : ndarray
The pseudo-Vandermonde matrix. The shape of the returned matrix is
``x.shape + (deg + 1,)``, where The last index is the degree of the
corresponding HermiteE polynomial. The dtype will be the same as
the converted `x`.
Examples
--------
>>> from numpy.polynomial.hermite_e import hermevander
>>> x = np.array([-1, 0, 1])
>>> hermevander(x, 3)
array([[ 1., -1., 0., 2.],
[ 1., 0., -1., -0.],
[ 1., 1., 0., -2.]])
"""
ideg = int(deg)
if ideg != deg:
raise ValueError("deg must be integer")
if ideg < 0:
raise ValueError("deg must be non-negative")
x = np.array(x, copy=0, ndmin=1) + 0.0
dims = (ideg + 1,) + x.shape
dtyp = x.dtype
v = np.empty(dims, dtype=dtyp)
v[0] = x*0 + 1
if ideg > 0:
v[1] = x
for i in range(2, ideg + 1):
v[i] = (v[i-1]*x - v[i-2]*(i - 1))
return np.rollaxis(v, 0, v.ndim)
def hermevander2d(x, y, deg):
"""Pseudo-Vandermonde matrix of given degrees.
Returns the pseudo-Vandermonde matrix of degrees `deg` and sample
points `(x, y)`. The pseudo-Vandermonde matrix is defined by
.. math:: V[..., deg[1]*i + j] = He_i(x) * He_j(y),
where `0 <= i <= deg[0]` and `0 <= j <= deg[1]`. The leading indices of
`V` index the points `(x, y)` and the last index encodes the degrees of
the HermiteE polynomials.
If ``V = hermevander2d(x, y, [xdeg, ydeg])``, then the columns of `V`
correspond to the elements of a 2-D coefficient array `c` of shape
(xdeg + 1, ydeg + 1) in the order
.. math:: c_{00}, c_{01}, c_{02} ... , c_{10}, c_{11}, c_{12} ...
and ``np.dot(V, c.flat)`` and ``hermeval2d(x, y, c)`` will be the same
up to roundoff. This equivalence is useful both for least squares
fitting and for the evaluation of a large number of 2-D HermiteE
series of the same degrees and sample points.
Parameters
----------
x, y : array_like
Arrays of point coordinates, all of the same shape. The dtypes
will be converted to either float64 or complex128 depending on
whether any of the elements are complex. Scalars are converted to
1-D arrays.
deg : list of ints
List of maximum degrees of the form [x_deg, y_deg].
Returns
-------
vander2d : ndarray
The shape of the returned matrix is ``x.shape + (order,)``, where
:math:`order = (deg[0]+1)*(deg([1]+1)`. The dtype will be the same
as the converted `x` and `y`.
See Also
--------
hermevander, hermevander3d. hermeval2d, hermeval3d
Notes
-----
.. versionadded::1.7.0
"""
ideg = [int(d) for d in deg]
is_valid = [id == d and id >= 0 for id, d in zip(ideg, deg)]
if is_valid != [1, 1]:
raise ValueError("degrees must be non-negative integers")
degx, degy = ideg
x, y = np.array((x, y), copy=0) + 0.0
vx = hermevander(x, degx)
vy = hermevander(y, degy)
v = vx[..., None]*vy[..., None,:]
return v.reshape(v.shape[:-2] + (-1,))
def hermevander3d(x, y, z, deg):
"""Pseudo-Vandermonde matrix of given degrees.
Returns the pseudo-Vandermonde matrix of degrees `deg` and sample
points `(x, y, z)`. If `l, m, n` are the given degrees in `x, y, z`,
then Hehe pseudo-Vandermonde matrix is defined by
.. math:: V[..., (m+1)(n+1)i + (n+1)j + k] = He_i(x)*He_j(y)*He_k(z),
where `0 <= i <= l`, `0 <= j <= m`, and `0 <= j <= n`. The leading
indices of `V` index the points `(x, y, z)` and the last index encodes
the degrees of the HermiteE polynomials.
If ``V = hermevander3d(x, y, z, [xdeg, ydeg, zdeg])``, then the columns
of `V` correspond to the elements of a 3-D coefficient array `c` of
shape (xdeg + 1, ydeg + 1, zdeg + 1) in the order
.. math:: c_{000}, c_{001}, c_{002},... , c_{010}, c_{011}, c_{012},...
and ``np.dot(V, c.flat)`` and ``hermeval3d(x, y, z, c)`` will be the
same up to roundoff. This equivalence is useful both for least squares
fitting and for the evaluation of a large number of 3-D HermiteE
series of the same degrees and sample points.
Parameters
----------
x, y, z : array_like
Arrays of point coordinates, all of the same shape. The dtypes will
be converted to either float64 or complex128 depending on whether
any of the elements are complex. Scalars are converted to 1-D
arrays.
deg : list of ints
List of maximum degrees of the form [x_deg, y_deg, z_deg].
Returns
-------
vander3d : ndarray
The shape of the returned matrix is ``x.shape + (order,)``, where
:math:`order = (deg[0]+1)*(deg([1]+1)*(deg[2]+1)`. The dtype will
be the same as the converted `x`, `y`, and `z`.
See Also
--------
hermevander, hermevander3d. hermeval2d, hermeval3d
Notes
-----
.. versionadded::1.7.0
"""
ideg = [int(d) for d in deg]
is_valid = [id == d and id >= 0 for id, d in zip(ideg, deg)]
if is_valid != [1, 1, 1]:
raise ValueError("degrees must be non-negative integers")
degx, degy, degz = ideg
x, y, z = np.array((x, y, z), copy=0) + 0.0
vx = hermevander(x, degx)
vy = hermevander(y, degy)
vz = hermevander(z, degz)
v = vx[..., None, None]*vy[..., None,:, None]*vz[..., None, None,:]
return v.reshape(v.shape[:-3] + (-1,))
def hermefit(x, y, deg, rcond=None, full=False, w=None):
"""
Least squares fit of Hermite series to data.
Return the coefficients of a HermiteE series of degree `deg` that is
the least squares fit to the data values `y` given at points `x`. If
`y` is 1-D the returned coefficients will also be 1-D. If `y` is 2-D
multiple fits are done, one for each column of `y`, and the resulting
coefficients are stored in the corresponding columns of a 2-D return.
The fitted polynomial(s) are in the form
.. math:: p(x) = c_0 + c_1 * He_1(x) + ... + c_n * He_n(x),
where `n` is `deg`.
Parameters
----------
x : array_like, shape (M,)
x-coordinates of the M sample points ``(x[i], y[i])``.
y : array_like, shape (M,) or (M, K)
y-coordinates of the sample points. Several data sets of sample
points sharing the same x-coordinates can be fitted at once by
passing in a 2D-array that contains one dataset per column.
deg : int
Degree of the fitting polynomial
rcond : float, optional
Relative condition number of the fit. Singular values smaller than
this relative to the largest singular value will be ignored. The
default value is len(x)*eps, where eps is the relative precision of
the float type, about 2e-16 in most cases.
full : bool, optional
Switch determining nature of return value. When it is False (the
default) just the coefficients are returned, when True diagnostic
information from the singular value decomposition is also returned.
w : array_like, shape (`M`,), optional
Weights. If not None, the contribution of each point
``(x[i],y[i])`` to the fit is weighted by `w[i]`. Ideally the
weights are chosen so that the errors of the products ``w[i]*y[i]``
all have the same variance. The default value is None.
Returns
-------
coef : ndarray, shape (M,) or (M, K)
Hermite coefficients ordered from low to high. If `y` was 2-D,
the coefficients for the data in column k of `y` are in column
`k`.
[residuals, rank, singular_values, rcond] : list
These values are only returned if `full` = True
resid -- sum of squared residuals of the least squares fit
rank -- the numerical rank of the scaled Vandermonde matrix
sv -- singular values of the scaled Vandermonde matrix
rcond -- value of `rcond`.
For more details, see `linalg.lstsq`.
Warns
-----
RankWarning
The rank of the coefficient matrix in the least-squares fit is
deficient. The warning is only raised if `full` = False. The
warnings can be turned off by
>>> import warnings
>>> warnings.simplefilter('ignore', RankWarning)
See Also
--------
chebfit, legfit, polyfit, hermfit, polyfit
hermeval : Evaluates a Hermite series.
hermevander : pseudo Vandermonde matrix of Hermite series.
hermeweight : HermiteE weight function.
linalg.lstsq : Computes a least-squares fit from the matrix.
scipy.interpolate.UnivariateSpline : Computes spline fits.
Notes
-----
The solution is the coefficients of the HermiteE series `p` that
minimizes the sum of the weighted squared errors
.. math:: E = \\sum_j w_j^2 * |y_j - p(x_j)|^2,
where the :math:`w_j` are the weights. This problem is solved by
setting up the (typically) overdetermined matrix equation
.. math:: V(x) * c = w * y,
where `V` is the pseudo Vandermonde matrix of `x`, the elements of `c`
are the coefficients to be solved for, and the elements of `y` are the
observed values. This equation is then solved using the singular value
decomposition of `V`.
If some of the singular values of `V` are so small that they are
neglected, then a `RankWarning` will be issued. This means that the
coefficient values may be poorly determined. Using a lower order fit
will usually get rid of the warning. The `rcond` parameter can also be
set to a value smaller than its default, but the resulting fit may be
spurious and have large contributions from roundoff error.
Fits using HermiteE series are probably most useful when the data can
be approximated by ``sqrt(w(x)) * p(x)``, where `w(x)` is the HermiteE
weight. In that case the weight ``sqrt(w(x[i])`` should be used
together with data values ``y[i]/sqrt(w(x[i])``. The weight function is
available as `hermeweight`.
References
----------
.. [1] Wikipedia, "Curve fitting",
http://en.wikipedia.org/wiki/Curve_fitting
Examples
--------
>>> from numpy.polynomial.hermite_e import hermefik, hermeval
>>> x = np.linspace(-10, 10)
>>> err = np.random.randn(len(x))/10
>>> y = hermeval(x, [1, 2, 3]) + err
>>> hermefit(x, y, 2)
array([ 1.01690445, 1.99951418, 2.99948696])
"""
order = int(deg) + 1
x = np.asarray(x) + 0.0
y = np.asarray(y) + 0.0
# check arguments.
if deg < 0:
raise ValueError("expected deg >= 0")
if x.ndim != 1:
raise TypeError("expected 1D vector for x")
if x.size == 0:
raise TypeError("expected non-empty vector for x")
if y.ndim < 1 or y.ndim > 2:
raise TypeError("expected 1D or 2D array for y")
if len(x) != len(y):
raise TypeError("expected x and y to have same length")
# set up the least squares matrices in transposed form
lhs = hermevander(x, deg).T
rhs = y.T
if w is not None:
w = np.asarray(w) + 0.0
if w.ndim != 1:
raise TypeError("expected 1D vector for w")
if len(x) != len(w):
raise TypeError("expected x and w to have same length")
# apply weights. Don't use inplace operations as they
# can cause problems with NA.
lhs = lhs * w
rhs = rhs * w
# set rcond
if rcond is None:
rcond = len(x)*np.finfo(x.dtype).eps
# Determine the norms of the design matrix columns.
if issubclass(lhs.dtype.type, np.complexfloating):
scl = np.sqrt((np.square(lhs.real) + np.square(lhs.imag)).sum(1))
else:
scl = np.sqrt(np.square(lhs).sum(1))
scl[scl == 0] = 1
# Solve the least squares problem.
c, resids, rank, s = la.lstsq(lhs.T/scl, rhs.T, rcond)
c = (c.T/scl).T
# warn on rank reduction
if rank != order and not full:
msg = "The fit may be poorly conditioned"
warnings.warn(msg, pu.RankWarning)
if full:
return c, [resids, rank, s, rcond]
else:
return c
def hermecompanion(c):
"""
Return the scaled companion matrix of c.
The basis polynomials are scaled so that the companion matrix is
symmetric when `c` is an HermiteE basis polynomial. This provides
better eigenvalue estimates than the unscaled case and for basis
polynomials the eigenvalues are guaranteed to be real if
`numpy.linalg.eigvalsh` is used to obtain them.
Parameters
----------
c : array_like
1-D array of HermiteE series coefficients ordered from low to high
degree.
Returns
-------
mat : ndarray
Scaled companion matrix of dimensions (deg, deg).
Notes
-----
.. versionadded::1.7.0
"""
# c is a trimmed copy
[c] = pu.as_series([c])
if len(c) < 2:
raise ValueError('Series must have maximum degree of at least 1.')
if len(c) == 2:
return np.array([[-c[0]/c[1]]])
n = len(c) - 1
mat = np.zeros((n, n), dtype=c.dtype)
scl = np.hstack((1., 1./np.sqrt(np.arange(n - 1, 0, -1))))
scl = np.multiply.accumulate(scl)[::-1]
top = mat.reshape(-1)[1::n+1]
bot = mat.reshape(-1)[n::n+1]
top[...] = np.sqrt(np.arange(1, n))
bot[...] = top
mat[:, -1] -= scl*c[:-1]/c[-1]
return mat
def hermeroots(c):
"""
Compute the roots of a HermiteE series.
Return the roots (a.k.a. "zeros") of the polynomial
.. math:: p(x) = \\sum_i c[i] * He_i(x).
Parameters
----------
c : 1-D array_like
1-D array of coefficients.
Returns
-------
out : ndarray
Array of the roots of the series. If all the roots are real,
then `out` is also real, otherwise it is complex.
See Also
--------
polyroots, legroots, lagroots, hermroots, chebroots
Notes
-----
The root estimates are obtained as the eigenvalues of the companion
matrix, Roots far from the origin of the complex plane may have large
errors due to the numerical instability of the series for such
values. Roots with multiplicity greater than 1 will also show larger
errors as the value of the series near such points is relatively
insensitive to errors in the roots. Isolated roots near the origin can
be improved by a few iterations of Newton's method.
The HermiteE series basis polynomials aren't powers of `x` so the
results of this function may seem unintuitive.
Examples
--------
>>> from numpy.polynomial.hermite_e import hermeroots, hermefromroots
>>> coef = hermefromroots([-1, 0, 1])
>>> coef
array([ 0., 2., 0., 1.])
>>> hermeroots(coef)
array([-1., 0., 1.])
"""
# c is a trimmed copy
[c] = pu.as_series([c])
if len(c) <= 1:
return np.array([], dtype=c.dtype)
if len(c) == 2:
return np.array([-c[0]/c[1]])
m = hermecompanion(c)
r = la.eigvals(m)
r.sort()
return r
def _normed_hermite_e_n(x, n):
"""
Evaluate a normalized HermiteE polynomial.
Compute the value of the normalized HermiteE polynomial of degree ``n``
at the points ``x``.
Parameters
----------
x : ndarray of double.
Points at which to evaluate the function
n : int
Degree of the normalized HermiteE function to be evaluated.
Returns
-------
values : ndarray
The shape of the return value is described above.
Notes
-----
.. versionadded:: 1.10.0
This function is needed for finding the Gauss points and integration
weights for high degrees. The values of the standard HermiteE functions
overflow when n >= 207.
"""
if n == 0:
return np.ones(x.shape)/np.sqrt(np.sqrt(2*np.pi))
c0 = 0.
c1 = 1./np.sqrt(np.sqrt(2*np.pi))
nd = float(n)
for i in range(n - 1):
tmp = c0
c0 = -c1*np.sqrt((nd - 1.)/nd)
c1 = tmp + c1*x*np.sqrt(1./nd)
nd = nd - 1.0
return c0 + c1*x
def hermegauss(deg):
"""
Gauss-HermiteE quadrature.
Computes the sample points and weights for Gauss-HermiteE quadrature.
These sample points and weights will correctly integrate polynomials of
degree :math:`2*deg - 1` or less over the interval :math:`[-\inf, \inf]`
with the weight function :math:`f(x) = \exp(-x^2/2)`.
Parameters
----------
deg : int
Number of sample points and weights. It must be >= 1.
Returns
-------
x : ndarray
1-D ndarray containing the sample points.
y : ndarray
1-D ndarray containing the weights.
Notes
-----
.. versionadded::1.7.0
The results have only been tested up to degree 100, higher degrees may
be problematic. The weights are determined by using the fact that
.. math:: w_k = c / (He'_n(x_k) * He_{n-1}(x_k))
where :math:`c` is a constant independent of :math:`k` and :math:`x_k`
is the k'th root of :math:`He_n`, and then scaling the results to get
the right value when integrating 1.
"""
ideg = int(deg)
if ideg != deg or ideg < 1:
raise ValueError("deg must be a non-negative integer")
# first approximation of roots. We use the fact that the companion
# matrix is symmetric in this case in order to obtain better zeros.
c = np.array([0]*deg + [1])
m = hermecompanion(c)
x = la.eigvalsh(m)
x.sort()
# improve roots by one application of Newton
dy = _normed_hermite_e_n(x, ideg)
df = _normed_hermite_e_n(x, ideg - 1) * np.sqrt(ideg)
x -= dy/df
# compute the weights. We scale the factor to avoid possible numerical
# overflow.
fm = _normed_hermite_e_n(x, ideg - 1)
fm /= np.abs(fm).max()
w = 1/(fm * fm)
# for Hermite_e we can also symmetrize
w = (w + w[::-1])/2
x = (x - x[::-1])/2
# scale w to get the right value
w *= np.sqrt(2*np.pi) / w.sum()
return x, w
def hermeweight(x):
"""Weight function of the Hermite_e polynomials.
The weight function is :math:`\exp(-x^2/2)` and the interval of
integration is :math:`[-\inf, \inf]`. the HermiteE polynomials are
orthogonal, but not normalized, with respect to this weight function.
Parameters
----------
x : array_like
Values at which the weight function will be computed.
Returns
-------
w : ndarray
The weight function at `x`.
Notes
-----
.. versionadded::1.7.0
"""
w = np.exp(-.5*x**2)
return w
#
# HermiteE series class
#
class HermiteE(ABCPolyBase):
"""An HermiteE series class.
The HermiteE class provides the standard Python numerical methods
'+', '-', '*', '//', '%', 'divmod', '**', and '()' as well as the
attributes and methods listed in the `ABCPolyBase` documentation.
Parameters
----------
coef : array_like
HermiteE coefficients in order of increasing degree, i.e,
``(1, 2, 3)`` gives ``1*He_0(x) + 2*He_1(X) + 3*He_2(x)``.
domain : (2,) array_like, optional
Domain to use. The interval ``[domain[0], domain[1]]`` is mapped
to the interval ``[window[0], window[1]]`` by shifting and scaling.
The default value is [-1, 1].
window : (2,) array_like, optional
Window, see `domain` for its use. The default value is [-1, 1].
.. versionadded:: 1.6.0
"""
# Virtual Functions
_add = staticmethod(hermeadd)
_sub = staticmethod(hermesub)
_mul = staticmethod(hermemul)
_div = staticmethod(hermediv)
_pow = staticmethod(hermepow)
_val = staticmethod(hermeval)
_int = staticmethod(hermeint)
_der = staticmethod(hermeder)
_fit = staticmethod(hermefit)
_line = staticmethod(hermeline)
_roots = staticmethod(hermeroots)
_fromroots = staticmethod(hermefromroots)
# Virtual properties
nickname = 'herme'
domain = np.array(hermedomain)
window = np.array(hermedomain)
| bsd-3-clause | 2,271,353,671,344,061,000 | -4,729,777,818,410,633,000 | 30.247265 | 79 | 0.597164 | false |
yl565/statsmodels | statsmodels/stats/contingency_tables.py | 4 | 43623 | """
Methods for analyzing two-way contingency tables (i.e. frequency
tables for observations that are cross-classified with respect to two
categorical variables).
The main classes are:
* Table : implements methods that can be applied to any two-way
contingency table.
* SquareTable : implements methods that can be applied to a square
two-way contingency table.
* Table2x2 : implements methods that can be applied to a 2x2
contingency table.
* StratifiedTable : implements methods that can be applied to a
collection of contingency tables.
Also contains functions for conducting Mcnemar's test and Cochran's q
test.
Note that the inference procedures may depend on how the data were
sampled. In general the observed units are independent and
identically distributed.
"""
from __future__ import division
from statsmodels.tools.decorators import cache_readonly, resettable_cache
import numpy as np
from scipy import stats
import pandas as pd
from statsmodels import iolib
from statsmodels.tools.sm_exceptions import SingularMatrixWarning
def _make_df_square(table):
"""
Reindex a pandas DataFrame so that it becomes square, meaning that
the row and column indices contain the same values, in the same
order. The row and column index are extended to achieve this.
"""
if not isinstance(table, pd.DataFrame):
return table
# If the table is not square, make it square
if table.shape[0] != table.shape[1]:
ix = list(set(table.index) | set(table.columns))
table = table.reindex(ix, axis=0)
table = table.reindex(ix, axis=1)
# Ensures that the rows and columns are in the same order.
table = table.reindex(table.columns)
return table
class _Bunch(object):
def __repr__(self):
return "<bunch object containing statsmodels results>"
class Table(object):
"""
Analyses that can be performed on a two-way contingency table.
Parameters
----------
table : array-like
A contingency table.
shift_zeros : boolean
If True and any cell count is zero, add 0.5 to all values
in the table.
Attributes
----------
table_orig : array-like
The original table is cached as `table_orig`.
marginal_probabilities : tuple of two ndarrays
The estimated row and column marginal distributions.
independence_probabilities : ndarray
Estimated cell probabilities under row/column independence.
fittedvalues : ndarray
Fitted values under independence.
resid_pearson : ndarray
The Pearson residuals under row/column independence.
standardized_resids : ndarray
Residuals for the independent row/column model with approximate
unit variance.
chi2_contribs : ndarray
The contribution of each cell to the chi^2 statistic.
local_logodds_ratios : ndarray
The local log odds ratios are calculated for each 2x2 subtable
formed from adjacent rows and columns.
local_oddsratios : ndarray
The local odds ratios are calculated from each 2x2 subtable
formed from adjacent rows and columns.
cumulative_log_oddsratios : ndarray
The cumulative log odds ratio at a given pair of thresholds is
calculated by reducing the table to a 2x2 table based on
dichotomizing the rows and columns at the given thresholds.
The table of cumulative log odds ratios presents all possible
cumulative log odds ratios that can be formed from a given
table.
cumulative_oddsratios : ndarray
The cumulative odds ratios are calculated by reducing the
table to a 2x2 table based on cutting the rows and columns at
a given point. The table of cumulative odds ratios presents
all possible cumulative odds ratios that can be formed from a
given table.
See also
--------
statsmodels.graphics.mosaicplot.mosaic
scipy.stats.chi2_contingency
Notes
-----
The inference procedures used here are all based on a sampling
model in which the units are independent and identically
distributed, with each unit being classified with respect to two
categorical variables.
References
----------
Definitions of residuals:
https://onlinecourses.science.psu.edu/stat504/node/86
"""
def __init__(self, table, shift_zeros=True):
self.table_orig = table
self.table = np.asarray(table, dtype=np.float64)
if shift_zeros and (self.table.min() == 0):
self.table = self.table + 0.5
@classmethod
def from_data(cls, data, shift_zeros=True):
"""
Construct a Table object from data.
Parameters
----------
data : array-like
The raw data, from which a contingency table is constructed
using the first two columns.
shift_zeros : boolean
If True and any cell count is zero, add 0.5 to all values
in the table.
Returns
-------
A Table instance.
"""
if isinstance(data, pd.DataFrame):
table = pd.crosstab(data.iloc[:, 0], data.iloc[:, 1])
else:
table = pd.crosstab(data[:, 0], data[:, 1])
return cls(table, shift_zeros)
def test_nominal_association(self):
"""
Assess independence for nominal factors.
Assessment of independence between rows and columns using
chi^2 testing. The rows and columns are treated as nominal
(unordered) categorical variables.
Returns
-------
A bunch containing the following attributes:
statistic : float
The chi^2 test statistic.
df : integer
The degrees of freedom of the reference distribution
pvalue : float
The p-value for the test.
"""
statistic = np.asarray(self.chi2_contribs).sum()
df = np.prod(np.asarray(self.table.shape) - 1)
pvalue = 1 - stats.chi2.cdf(statistic, df)
b = _Bunch()
b.statistic = statistic
b.df = df
b.pvalue = pvalue
return b
def test_ordinal_association(self, row_scores=None, col_scores=None):
"""
Assess independence between two ordinal variables.
This is the 'linear by linear' association test, which uses
weights or scores to target the test to have more power
against ordered alternatives.
Parameters
----------
row_scores : array-like
An array of numeric row scores
col_scores : array-like
An array of numeric column scores
Returns
-------
A bunch with the following attributes:
statistic : float
The test statistic.
null_mean : float
The expected value of the test statistic under the null
hypothesis.
null_sd : float
The standard deviation of the test statistic under the
null hypothesis.
zscore : float
The Z-score for the test statistic.
pvalue : float
The p-value for the test.
Notes
-----
The scores define the trend to which the test is most sensitive.
Using the default row and column scores gives the
Cochran-Armitage trend test.
"""
if row_scores is None:
row_scores = np.arange(self.table.shape[0])
if col_scores is None:
col_scores = np.arange(self.table.shape[1])
if len(row_scores) != self.table.shape[0]:
raise ValueError("The length of `row_scores` must match the first dimension of `table`.")
if len(col_scores) != self.table.shape[1]:
raise ValueError("The length of `col_scores` must match the second dimension of `table`.")
# The test statistic
statistic = np.dot(row_scores, np.dot(self.table, col_scores))
# Some needed quantities
n_obs = self.table.sum()
rtot = self.table.sum(1)
um = np.dot(row_scores, rtot)
u2m = np.dot(row_scores**2, rtot)
ctot = self.table.sum(0)
vn = np.dot(col_scores, ctot)
v2n = np.dot(col_scores**2, ctot)
# The null mean and variance of the test statistic
e_stat = um * vn / n_obs
v_stat = (u2m - um**2 / n_obs) * (v2n - vn**2 / n_obs) / (n_obs - 1)
sd_stat = np.sqrt(v_stat)
zscore = (statistic - e_stat) / sd_stat
pvalue = 2 * stats.norm.cdf(-np.abs(zscore))
b = _Bunch()
b.statistic = statistic
b.null_mean = e_stat
b.null_sd = sd_stat
b.zscore = zscore
b.pvalue = pvalue
return b
@cache_readonly
def marginal_probabilities(self):
# docstring for cached attributes in init above
n = self.table.sum()
row = self.table.sum(1) / n
col = self.table.sum(0) / n
if isinstance(self.table_orig, pd.DataFrame):
row = pd.Series(row, self.table_orig.index)
col = pd.Series(col, self.table_orig.columns)
return row, col
@cache_readonly
def independence_probabilities(self):
# docstring for cached attributes in init above
row, col = self.marginal_probabilities
itab = np.outer(row, col)
if isinstance(self.table_orig, pd.DataFrame):
itab = pd.DataFrame(itab, self.table_orig.index,
self.table_orig.columns)
return itab
@cache_readonly
def fittedvalues(self):
# docstring for cached attributes in init above
probs = self.independence_probabilities
fit = self.table.sum() * probs
return fit
@cache_readonly
def resid_pearson(self):
# docstring for cached attributes in init above
fit = self.fittedvalues
resids = (self.table - fit) / np.sqrt(fit)
return resids
@cache_readonly
def standardized_resids(self):
# docstring for cached attributes in init above
row, col = self.marginal_probabilities
sresids = self.resid_pearson / np.sqrt(np.outer(1 - row, 1 - col))
return sresids
@cache_readonly
def chi2_contribs(self):
# docstring for cached attributes in init above
return self.resid_pearson**2
@cache_readonly
def local_log_oddsratios(self):
# docstring for cached attributes in init above
ta = self.table.copy()
a = ta[0:-1, 0:-1]
b = ta[0:-1, 1:]
c = ta[1:, 0:-1]
d = ta[1:, 1:]
tab = np.log(a) + np.log(d) - np.log(b) - np.log(c)
rslt = np.empty(self.table.shape, np.float64)
rslt *= np.nan
rslt[0:-1, 0:-1] = tab
if isinstance(self.table_orig, pd.DataFrame):
rslt = pd.DataFrame(rslt, index=self.table_orig.index,
columns=self.table_orig.columns)
return rslt
@cache_readonly
def local_oddsratios(self):
# docstring for cached attributes in init above
return np.exp(self.local_log_oddsratios)
@cache_readonly
def cumulative_log_oddsratios(self):
# docstring for cached attributes in init above
ta = self.table.cumsum(0).cumsum(1)
a = ta[0:-1, 0:-1]
b = ta[0:-1, -1:] - a
c = ta[-1:, 0:-1] - a
d = ta[-1, -1] - (a + b + c)
tab = np.log(a) + np.log(d) - np.log(b) - np.log(c)
rslt = np.empty(self.table.shape, np.float64)
rslt *= np.nan
rslt[0:-1, 0:-1] = tab
if isinstance(self.table_orig, pd.DataFrame):
rslt = pd.DataFrame(rslt, index=self.table_orig.index,
columns=self.table_orig.columns)
return rslt
@cache_readonly
def cumulative_oddsratios(self):
# docstring for cached attributes in init above
return np.exp(self.cumulative_log_oddsratios)
class SquareTable(Table):
"""
Methods for analyzing a square contingency table.
Parameters
----------
table : array-like
A square contingency table, or DataFrame that is converted
to a square form.
shift_zeros : boolean
If True and any cell count is zero, add 0.5 to all values
in the table.
These methods should only be used when the rows and columns of the
table have the same categories. If `table` is provided as a
Pandas DataFrame, the row and column indices will be extended to
create a square table. Otherwise the table should be provided in
a square form, with the (implicit) row and column categories
appearing in the same order.
"""
def __init__(self, table, shift_zeros=True):
table = _make_df_square(table) # Non-pandas passes through
k1, k2 = table.shape
if k1 != k2:
raise ValueError('table must be square')
super(SquareTable, self).__init__(table, shift_zeros)
def symmetry(self, method="bowker"):
"""
Test for symmetry of a joint distribution.
This procedure tests the null hypothesis that the joint
distribution is symmetric around the main diagonal, that is
.. math::
p_{i, j} = p_{j, i} for all i, j
Returns
-------
A bunch with attributes:
statistic : float
chisquare test statistic
p-value : float
p-value of the test statistic based on chisquare distribution
df : int
degrees of freedom of the chisquare distribution
Notes
-----
The implementation is based on the SAS documentation. R includes
it in `mcnemar.test` if the table is not 2 by 2. However a more
direct generalization of the McNemar test to larger tables is
provided by the homogeneity test (TableSymmetry.homogeneity).
The p-value is based on the chi-square distribution which requires
that the sample size is not very small to be a good approximation
of the true distribution. For 2x2 contingency tables the exact
distribution can be obtained with `mcnemar`
See Also
--------
mcnemar
homogeneity
"""
if method.lower() != "bowker":
raise ValueError("method for symmetry testing must be 'bowker'")
k = self.table.shape[0]
upp_idx = np.triu_indices(k, 1)
tril = self.table.T[upp_idx] # lower triangle in column order
triu = self.table[upp_idx] # upper triangle in row order
statistic = ((tril - triu)**2 / (tril + triu + 1e-20)).sum()
df = k * (k-1) / 2.
pvalue = stats.chi2.sf(statistic, df)
b = _Bunch()
b.statistic = statistic
b.pvalue = pvalue
b.df = df
return b
def homogeneity(self, method="stuart_maxwell"):
"""
Compare row and column marginal distributions.
Parameters
----------
method : string
Either 'stuart_maxwell' or 'bhapkar', leading to two different
estimates of the covariance matrix for the estimated
difference between the row margins and the column margins.
Returns a bunch with attributes:
statistic : float
The chi^2 test statistic
pvalue : float
The p-value of the test statistic
df : integer
The degrees of freedom of the reference distribution
Notes
-----
For a 2x2 table this is equivalent to McNemar's test. More
generally the procedure tests the null hypothesis that the
marginal distribution of the row factor is equal to the
marginal distribution of the column factor. For this to be
meaningful, the two factors must have the same sample space
(i.e. the same categories).
"""
if self.table.shape[0] < 1:
raise ValueError('table is empty')
elif self.table.shape[0] == 1:
b = _Bunch()
b.statistic = 0
b.pvalue = 1
b.df = 0
return b
method = method.lower()
if method not in ["bhapkar", "stuart_maxwell"]:
raise ValueError("method '%s' for homogeneity not known" % method)
n_obs = self.table.sum()
pr = self.table.astype(np.float64) / n_obs
# Compute margins, eliminate last row/column so there is no
# degeneracy
row = pr.sum(1)[0:-1]
col = pr.sum(0)[0:-1]
pr = pr[0:-1, 0:-1]
# The estimated difference between row and column margins.
d = col - row
# The degrees of freedom of the chi^2 reference distribution.
df = pr.shape[0]
if method == "bhapkar":
vmat = -(pr + pr.T) - np.outer(d, d)
dv = col + row - 2*np.diag(pr) - d**2
np.fill_diagonal(vmat, dv)
elif method == "stuart_maxwell":
vmat = -(pr + pr.T)
dv = row + col - 2*np.diag(pr)
np.fill_diagonal(vmat, dv)
try:
statistic = n_obs * np.dot(d, np.linalg.solve(vmat, d))
except np.linalg.LinAlgError:
import warnings
warnings.warn("Unable to invert covariance matrix",
SingularMatrixWarning)
b = _Bunch()
b.statistic = np.nan
b.pvalue = np.nan
b.df = df
return b
pvalue = 1 - stats.chi2.cdf(statistic, df)
b = _Bunch()
b.statistic = statistic
b.pvalue = pvalue
b.df = df
return b
def summary(self, alpha=0.05, float_format="%.3f"):
"""
Produce a summary of the analysis.
Parameters
----------
alpha : float
`1 - alpha` is the nominal coverage probability of the interval.
float_format : string
Used to format numeric values in the table.
method : string
The method for producing the confidence interval. Currently
must be 'normal' which uses the normal approximation.
"""
fmt = float_format
headers = ["Statistic", "P-value", "DF"]
stubs = ["Symmetry", "Homogeneity"]
sy = self.symmetry()
hm = self.homogeneity()
data = [[fmt % sy.statistic, fmt % sy.pvalue, '%d' % sy.df],
[fmt % hm.statistic, fmt % hm.pvalue, '%d' % hm.df]]
tab = iolib.SimpleTable(data, headers, stubs, data_aligns="r",
table_dec_above='')
return tab
class Table2x2(SquareTable):
"""
Analyses that can be performed on a 2x2 contingency table.
Parameters
----------
table : array-like
A 2x2 contingency table
shift_zeros : boolean
If true, 0.5 is added to all cells of the table if any cell is
equal to zero.
Attributes
----------
log_oddsratio : float
The log odds ratio of the table.
log_oddsratio_se : float
The asymptotic standard error of the estimated log odds ratio.
oddsratio : float
The odds ratio of the table.
riskratio : float
The ratio between the risk in the first row and the risk in
the second row. Column 0 is interpreted as containing the
number of occurences of the event of interest.
log_riskratio : float
The estimated log risk ratio for the table.
log_riskratio_se : float
The standard error of the estimated log risk ratio for the
table.
Notes
-----
The inference procedures used here are all based on a sampling
model in which the units are independent and identically
distributed, with each unit being classified with respect to two
categorical variables.
Note that for the risk ratio, the analysis is not symmetric with
respect to the rows and columns of the contingency table. The two
rows define population subgroups, column 0 is the number of
'events', and column 1 is the number of 'non-events'.
"""
def __init__(self, table, shift_zeros=True):
if (table.ndim != 2) or (table.shape[0] != 2) or (table.shape[1] != 2):
raise ValueError("Table2x2 takes a 2x2 table as input.")
super(Table2x2, self).__init__(table, shift_zeros)
@classmethod
def from_data(cls, data, shift_zeros=True):
"""
Construct a Table object from data.
Parameters
----------
data : array-like
The raw data, the first column defines the rows and the
second column defines the columns.
shift_zeros : boolean
If True, and if there are any zeros in the contingency
table, add 0.5 to all four cells of the table.
"""
if isinstance(data, pd.DataFrame):
table = pd.crosstab(data.iloc[:, 0], data.iloc[:, 1])
else:
table = pd.crosstab(data[:, 0], data[:, 1])
return cls(table, shift_zeros)
@cache_readonly
def log_oddsratio(self):
# docstring for cached attributes in init above
f = self.table.flatten()
return np.dot(np.log(f), np.r_[1, -1, -1, 1])
@cache_readonly
def oddsratio(self):
# docstring for cached attributes in init above
return self.table[0, 0] * self.table[1, 1] / (self.table[0, 1] * self.table[1, 0])
@cache_readonly
def log_oddsratio_se(self):
# docstring for cached attributes in init above
return np.sqrt(np.sum(1 / self.table))
def oddsratio_pvalue(self, null=1):
"""
P-value for a hypothesis test about the odds ratio.
Parameters
----------
null : float
The null value of the odds ratio.
"""
return self.log_oddsratio_pvalue(np.log(null))
def log_oddsratio_pvalue(self, null=0):
"""
P-value for a hypothesis test about the log odds ratio.
Parameters
----------
null : float
The null value of the log odds ratio.
"""
zscore = (self.log_oddsratio - null) / self.log_oddsratio_se
pvalue = 2 * stats.norm.cdf(-np.abs(zscore))
return pvalue
def log_oddsratio_confint(self, alpha=0.05, method="normal"):
"""
A confidence level for the log odds ratio.
Parameters
----------
alpha : float
`1 - alpha` is the nominal coverage probability of the
confidence interval.
method : string
The method for producing the confidence interval. Currently
must be 'normal' which uses the normal approximation.
"""
f = -stats.norm.ppf(alpha / 2)
lor = self.log_oddsratio
se = self.log_oddsratio_se
lcb = lor - f * se
ucb = lor + f * se
return lcb, ucb
def oddsratio_confint(self, alpha=0.05, method="normal"):
"""
A confidence interval for the odds ratio.
Parameters
----------
alpha : float
`1 - alpha` is the nominal coverage probability of the
confidence interval.
method : string
The method for producing the confidence interval. Currently
must be 'normal' which uses the normal approximation.
"""
lcb, ucb = self.log_oddsratio_confint(alpha, method=method)
return np.exp(lcb), np.exp(ucb)
@cache_readonly
def riskratio(self):
# docstring for cached attributes in init above
p = self.table[:, 0] / self.table.sum(1)
return p[0] / p[1]
@cache_readonly
def log_riskratio(self):
# docstring for cached attributes in init above
return np.log(self.riskratio)
@cache_readonly
def log_riskratio_se(self):
# docstring for cached attributes in init above
n = self.table.sum(1)
p = self.table[:, 0] / n
va = np.sum((1 - p) / (n*p))
return np.sqrt(va)
def riskratio_pvalue(self, null=1):
"""
p-value for a hypothesis test about the risk ratio.
Parameters
----------
null : float
The null value of the risk ratio.
"""
return self.log_riskratio_pvalue(np.log(null))
def log_riskratio_pvalue(self, null=0):
"""
p-value for a hypothesis test about the log risk ratio.
Parameters
----------
null : float
The null value of the log risk ratio.
"""
zscore = (self.log_riskratio - null) / self.log_riskratio_se
pvalue = 2 * stats.norm.cdf(-np.abs(zscore))
return pvalue
def log_riskratio_confint(self, alpha=0.05, method="normal"):
"""
A confidence interval for the log risk ratio.
Parameters
----------
alpha : float
`1 - alpha` is the nominal coverage probability of the
confidence interval.
method : string
The method for producing the confidence interval. Currently
must be 'normal' which uses the normal approximation.
"""
f = -stats.norm.ppf(alpha / 2)
lrr = self.log_riskratio
se = self.log_riskratio_se
lcb = lrr - f * se
ucb = lrr + f * se
return lcb, ucb
def riskratio_confint(self, alpha=0.05, method="normal"):
"""
A confidence interval for the risk ratio.
Parameters
----------
alpha : float
`1 - alpha` is the nominal coverage probability of the
confidence interval.
method : string
The method for producing the confidence interval. Currently
must be 'normal' which uses the normal approximation.
"""
lcb, ucb = self.log_riskratio_confint(alpha, method=method)
return np.exp(lcb), np.exp(ucb)
def summary(self, alpha=0.05, float_format="%.3f", method="normal"):
"""
Summarizes results for a 2x2 table analysis.
Parameters
----------
alpha : float
`1 - alpha` is the nominal coverage probability of the confidence
intervals.
float_format : string
Used to format the numeric values in the table.
method : string
The method for producing the confidence interval. Currently
must be 'normal' which uses the normal approximation.
"""
def fmt(x):
if type(x) is str:
return x
return float_format % x
headers = ["Estimate", "SE", "LCB", "UCB", "p-value"]
stubs = ["Odds ratio", "Log odds ratio", "Risk ratio", "Log risk ratio"]
lcb1, ucb1 = self.oddsratio_confint(alpha, method)
lcb2, ucb2 = self.log_oddsratio_confint(alpha, method)
lcb3, ucb3 = self.riskratio_confint(alpha, method)
lcb4, ucb4 = self.log_riskratio_confint(alpha, method)
data = [[fmt(x) for x in [self.oddsratio, "", lcb1, ucb1, self.oddsratio_pvalue()]],
[fmt(x) for x in [self.log_oddsratio, self.log_oddsratio_se, lcb2, ucb2,
self.oddsratio_pvalue()]],
[fmt(x) for x in [self.riskratio, "", lcb2, ucb2, self.riskratio_pvalue()]],
[fmt(x) for x in [self.log_riskratio, self.log_riskratio_se, lcb4, ucb4,
self.riskratio_pvalue()]]]
tab = iolib.SimpleTable(data, headers, stubs, data_aligns="r",
table_dec_above='')
return tab
class StratifiedTable(object):
"""
Analyses for a collection of 2x2 contingency tables.
Such a collection may arise by stratifying a single 2x2 table with
respect to another factor. This class implements the
'Cochran-Mantel-Haenszel' and 'Breslow-Day' procedures for
analyzing collections of 2x2 contingency tables.
Parameters
----------
tables : list or ndarray
Either a list containing several 2x2 contingency tables, or
a 2x2xk ndarray in which each slice along the third axis is a
2x2 contingency table.
Attributes
----------
logodds_pooled : float
An estimate of the pooled log odds ratio. This is the
Mantel-Haenszel estimate of an odds ratio that is common to
all the tables.
log_oddsratio_se : float
The estimated standard error of the pooled log odds ratio,
following Robins, Breslow and Greenland (Biometrics
42:311-323).
oddsratio_pooled : float
An estimate of the pooled odds ratio. This is the
Mantel-Haenszel estimate of an odds ratio that is common to
all tables.
risk_pooled : float
An estimate of the pooled risk ratio. This is an estimate of
a risk ratio that is common to all the tables.
Notes
-----
This results are based on a sampling model in which the units are
independent both within and between strata.
"""
def __init__(self, tables, shift_zeros=False):
if isinstance(tables, np.ndarray):
sp = tables.shape
if (len(sp) != 3) or (sp[0] != 2) or (sp[1] != 2):
raise ValueError("If an ndarray, argument must be 2x2xn")
table = tables
else:
# Create a data cube
table = np.dstack(tables).astype(np.float64)
if shift_zeros:
zx = (table == 0).sum(0).sum(0)
ix = np.flatnonzero(zx > 0)
if len(ix) > 0:
table = table.copy()
table[:, :, ix] += 0.5
self.table = table
self._cache = resettable_cache()
# Quantities to precompute. Table entries are [[a, b], [c,
# d]], 'ad' is 'a * d', 'apb' is 'a + b', 'dma' is 'd - a',
# etc.
self._apb = table[0, 0, :] + table[0, 1, :]
self._apc = table[0, 0, :] + table[1, 0, :]
self._bpd = table[0, 1, :] + table[1, 1, :]
self._cpd = table[1, 0, :] + table[1, 1, :]
self._ad = table[0, 0, :] * table[1, 1, :]
self._bc = table[0, 1, :] * table[1, 0, :]
self._apd = table[0, 0, :] + table[1, 1, :]
self._dma = table[1, 1, :] - table[0, 0, :]
self._n = table.sum(0).sum(0)
@classmethod
def from_data(cls, var1, var2, strata, data):
"""
Construct a StratifiedTable object from data.
Parameters
----------
var1 : int or string
The column index or name of `data` containing the variable
defining the rows of the contingency table. The variable
must have only two distinct values.
var2 : int or string
The column index or name of `data` containing the variable
defining the columns of the contingency table. The variable
must have only two distinct values.
strata : int or string
The column index of name of `data` containing the variable
defining the strata.
data : array-like
The raw data. A cross-table for analysis is constructed
from the first two columns.
Returns
-------
A StratifiedTable instance.
"""
if not isinstance(data, pd.DataFrame):
data1 = pd.DataFrame(index=data.index, column=[var1, var2, strata])
data1.loc[:, var1] = data[:, var1]
data1.loc[:, var2] = data[:, var2]
data1.loc[:, strata] = data[:, strata]
else:
data1 = data[[var1, var2, strata]]
gb = data1.groupby(strata).groups
tables = []
for g in gb:
ii = gb[g]
tab = pd.crosstab(data1.loc[ii, var1], data1.loc[ii, var2])
tables.append(tab)
return cls(tables)
def test_null_odds(self, correction=False):
"""
Test that all tables have odds ratio equal to 1.
This is the 'Mantel-Haenszel' test.
Parameters
----------
correction : boolean
If True, use the continuity correction when calculating the
test statistic.
Returns
-------
A bunch containing the chi^2 test statistic and p-value.
"""
statistic = np.sum(self.table[0, 0, :] - self._apb * self._apc / self._n)
statistic = np.abs(statistic)
if correction:
statistic -= 0.5
statistic = statistic**2
denom = self._apb * self._apc * self._bpd * self._cpd
denom /= (self._n**2 * (self._n - 1))
denom = np.sum(denom)
statistic /= denom
# df is always 1
pvalue = 1 - stats.chi2.cdf(statistic, 1)
b = _Bunch()
b.statistic = statistic
b.pvalue = pvalue
return b
@cache_readonly
def oddsratio_pooled(self):
# doc for cached attributes in init above
odds_ratio = np.sum(self._ad / self._n) / np.sum(self._bc / self._n)
return odds_ratio
@cache_readonly
def logodds_pooled(self):
# doc for cached attributes in init above
return np.log(self.oddsratio_pooled)
@cache_readonly
def risk_pooled(self):
# doc for cached attributes in init above
acd = self.table[0, 0, :] * self._cpd
cab = self.table[1, 0, :] * self._apb
rr = np.sum(acd / self._n) / np.sum(cab / self._n)
return rr
@cache_readonly
def logodds_pooled_se(self):
# doc for cached attributes in init above
adns = np.sum(self._ad / self._n)
bcns = np.sum(self._bc / self._n)
lor_va = np.sum(self._apd * self._ad / self._n**2) / adns**2
mid = self._apd * self._bc / self._n**2
mid += (1 - self._apd / self._n) * self._ad / self._n
mid = np.sum(mid)
mid /= (adns * bcns)
lor_va += mid
lor_va += np.sum((1 - self._apd / self._n) * self._bc / self._n) / bcns**2
lor_va /= 2
lor_se = np.sqrt(lor_va)
return lor_se
def logodds_pooled_confint(self, alpha=0.05, method="normal"):
"""
A confidence interval for the pooled log odds ratio.
Parameters
----------
alpha : float
`1 - alpha` is the nominal coverage probability of the
interval.
method : string
The method for producing the confidence interval. Currently
must be 'normal' which uses the normal approximation.
Returns
-------
lcb : float
The lower confidence limit.
ucb : float
The upper confidence limit.
"""
lor = np.log(self.oddsratio_pooled)
lor_se = self.logodds_pooled_se
f = -stats.norm.ppf(alpha / 2)
lcb = lor - f * lor_se
ucb = lor + f * lor_se
return lcb, ucb
def oddsratio_pooled_confint(self, alpha=0.05, method="normal"):
"""
A confidence interval for the pooled odds ratio.
Parameters
----------
alpha : float
`1 - alpha` is the nominal coverage probability of the
interval.
method : string
The method for producing the confidence interval. Currently
must be 'normal' which uses the normal approximation.
Returns
-------
lcb : float
The lower confidence limit.
ucb : float
The upper confidence limit.
"""
lcb, ucb = self.logodds_pooled_confint(alpha, method=method)
lcb = np.exp(lcb)
ucb = np.exp(ucb)
return lcb, ucb
def test_equal_odds(self, adjust=False):
"""
Test that all odds ratios are identical.
This is the 'Breslow-Day' testing procedure.
Parameters
----------
adjust : boolean
Use the 'Tarone' adjustment to achieve the chi^2
asymptotic distribution.
Returns
-------
A bunch containing the following attributes:
statistic : float
The chi^2 test statistic.
p-value : float
The p-value for the test.
"""
table = self.table
r = self.oddsratio_pooled
a = 1 - r
b = r * (self._apb + self._apc) + self._dma
c = -r * self._apb * self._apc
# Expected value of first cell
e11 = (-b + np.sqrt(b**2 - 4*a*c)) / (2*a)
# Variance of the first cell
v11 = 1 / e11 + 1 / (self._apc - e11) + 1 / (self._apb - e11) + 1 / (self._dma + e11)
v11 = 1 / v11
statistic = np.sum((table[0, 0, :] - e11)**2 / v11)
if adjust:
adj = table[0, 0, :].sum() - e11.sum()
adj = adj**2
adj /= np.sum(v11)
statistic -= adj
pvalue = 1 - stats.chi2.cdf(statistic, table.shape[2] - 1)
b = _Bunch()
b.statistic = statistic
b.pvalue = pvalue
return b
def summary(self, alpha=0.05, float_format="%.3f", method="normal"):
"""
A summary of all the main results.
Parameters
----------
alpha : float
`1 - alpha` is the nominal coverage probability of the
confidence intervals.
float_format : string
Used for formatting numeric values in the summary.
method : string
The method for producing the confidence interval. Currently
must be 'normal' which uses the normal approximation.
"""
def fmt(x):
if type(x) is str:
return x
return float_format % x
co_lcb, co_ucb = self.oddsratio_pooled_confint(alpha=alpha, method=method)
clo_lcb, clo_ucb = self.logodds_pooled_confint(alpha=alpha, method=method)
headers = ["Estimate", "LCB", "UCB"]
stubs = ["Pooled odds", "Pooled log odds", "Pooled risk ratio", ""]
data = [[fmt(x) for x in [self.oddsratio_pooled, co_lcb, co_ucb]],
[fmt(x) for x in [self.logodds_pooled, clo_lcb, clo_ucb]],
[fmt(x) for x in [self.risk_pooled, "", ""]],
['', '', '']]
tab1 = iolib.SimpleTable(data, headers, stubs, data_aligns="r",
table_dec_above='')
headers = ["Statistic", "P-value", ""]
stubs = ["Test of OR=1", "Test constant OR"]
rslt1 = self.test_null_odds()
rslt2 = self.test_equal_odds()
data = [[fmt(x) for x in [rslt1.statistic, rslt1.pvalue, ""]],
[fmt(x) for x in [rslt2.statistic, rslt2.pvalue, ""]]]
tab2 = iolib.SimpleTable(data, headers, stubs, data_aligns="r")
tab1.extend(tab2)
headers = ["", "", ""]
stubs = ["Number of tables", "Min n", "Max n", "Avg n", "Total n"]
ss = self.table.sum(0).sum(0)
data = [["%d" % self.table.shape[2], '', ''],
["%d" % min(ss), '', ''],
["%d" % max(ss), '', ''],
["%.0f" % np.mean(ss), '', ''],
["%d" % sum(ss), '', '', '']]
tab3 = iolib.SimpleTable(data, headers, stubs, data_aligns="r")
tab1.extend(tab3)
return tab1
def mcnemar(table, exact=True, correction=True):
"""
McNemar test of homogeneity.
Parameters
----------
table : array-like
A square contingency table.
exact : bool
If exact is true, then the binomial distribution will be used.
If exact is false, then the chisquare distribution will be
used, which is the approximation to the distribution of the
test statistic for large sample sizes.
correction : bool
If true, then a continuity correction is used for the chisquare
distribution (if exact is false.)
Returns
-------
A bunch with attributes:
statistic : float or int, array
The test statistic is the chisquare statistic if exact is
false. If the exact binomial distribution is used, then this
contains the min(n1, n2), where n1, n2 are cases that are zero
in one sample but one in the other sample.
pvalue : float or array
p-value of the null hypothesis of equal marginal distributions.
Notes
-----
This is a special case of Cochran's Q test, and of the homogeneity
test. The results when the chisquare distribution is used are
identical, except for continuity correction.
"""
table = _make_df_square(table)
table = np.asarray(table, dtype=np.float64)
n1, n2 = table[0, 1], table[1, 0]
if exact:
statistic = np.minimum(n1, n2)
# binom is symmetric with p=0.5
pvalue = stats.binom.cdf(statistic, n1 + n2, 0.5) * 2
pvalue = np.minimum(pvalue, 1) # limit to 1 if n1==n2
else:
corr = int(correction) # convert bool to 0 or 1
statistic = (np.abs(n1 - n2) - corr)**2 / (1. * (n1 + n2))
df = 1
pvalue = stats.chi2.sf(statistic, df)
b = _Bunch()
b.statistic = statistic
b.pvalue = pvalue
return b
def cochrans_q(x, return_object=True):
"""
Cochran's Q test for identical binomial proportions.
Parameters
----------
x : array_like, 2d (N, k)
data with N cases and k variables
return_object : boolean
Return values as bunch instead of as individual values.
Returns
-------
Returns a bunch containing the following attributes, or the
individual values according to the value of `return_object`.
statistic : float
test statistic
pvalue : float
pvalue from the chisquare distribution
Notes
-----
Cochran's Q is a k-sample extension of the McNemar test. If there
are only two groups, then Cochran's Q test and the McNemar test
are equivalent.
The procedure tests that the probability of success is the same
for every group. The alternative hypothesis is that at least two
groups have a different probability of success.
In Wikipedia terminology, rows are blocks and columns are
treatments. The number of rows N, should be large for the
chisquare distribution to be a good approximation.
The Null hypothesis of the test is that all treatments have the
same effect.
References
----------
http://en.wikipedia.org/wiki/Cochran_test
SAS Manual for NPAR TESTS
"""
x = np.asarray(x, dtype=np.float64)
gruni = np.unique(x)
N, k = x.shape
count_row_success = (x == gruni[-1]).sum(1, float)
count_col_success = (x == gruni[-1]).sum(0, float)
count_row_ss = count_row_success.sum()
count_col_ss = count_col_success.sum()
assert count_row_ss == count_col_ss #just a calculation check
# From the SAS manual
q_stat = (k-1) * (k * np.sum(count_col_success**2) - count_col_ss**2) \
/ (k * count_row_ss - np.sum(count_row_success**2))
# Note: the denominator looks just like k times the variance of
# the columns
# Wikipedia uses a different, but equivalent expression
#q_stat = (k-1) * (k * np.sum(count_row_success**2) - count_row_ss**2) \
# / (k * count_col_ss - np.sum(count_col_success**2))
df = k - 1
pvalue = stats.chi2.sf(q_stat, df)
if return_object:
b = _Bunch()
b.statistic = q_stat
b.df = df
b.pvalue = pvalue
return b
return q_stat, pvalue, df
| bsd-3-clause | 8,634,953,560,428,819,000 | -5,313,961,210,848,505,000 | 30.070513 | 102 | 0.577746 | false |
Gui13/CouchPotatoServer | couchpotato/core/media/_base/media/main.py | 2 | 16591 | import traceback
from string import ascii_lowercase
from CodernityDB.database import RecordNotFound
from couchpotato import tryInt, get_db
from couchpotato.api import addApiView
from couchpotato.core.event import fireEvent, fireEventAsync, addEvent
from couchpotato.core.helpers.encoding import toUnicode
from couchpotato.core.helpers.variable import splitString, getImdb, getTitle
from couchpotato.core.logger import CPLog
from couchpotato.core.media import MediaBase
from .index import MediaIndex, MediaStatusIndex, MediaTypeIndex, TitleSearchIndex, TitleIndex, StartsWithIndex, MediaChildrenIndex
log = CPLog(__name__)
class MediaPlugin(MediaBase):
_database = {
'media': MediaIndex,
'media_search_title': TitleSearchIndex,
'media_status': MediaStatusIndex,
'media_by_type': MediaTypeIndex,
'media_title': TitleIndex,
'media_startswith': StartsWithIndex,
'media_children': MediaChildrenIndex,
}
def __init__(self):
addApiView('media.refresh', self.refresh, docs = {
'desc': 'Refresh a any media type by ID',
'params': {
'id': {'desc': 'Movie, Show, Season or Episode ID(s) you want to refresh.', 'type': 'int (comma separated)'},
}
})
addApiView('media.list', self.listView, docs = {
'desc': 'List media',
'params': {
'type': {'type': 'string', 'desc': 'Media type to filter on.'},
'status': {'type': 'array or csv', 'desc': 'Filter movie by status. Example:"active,done"'},
'release_status': {'type': 'array or csv', 'desc': 'Filter movie by status of its releases. Example:"snatched,available"'},
'limit_offset': {'desc': 'Limit and offset the movie list. Examples: "50" or "50,30"'},
'starts_with': {'desc': 'Starts with these characters. Example: "a" returns all movies starting with the letter "a"'},
'search': {'desc': 'Search movie title'},
},
'return': {'type': 'object', 'example': """{
'success': True,
'empty': bool, any movies returned or not,
'media': array, media found,
}"""}
})
addApiView('media.get', self.getView, docs = {
'desc': 'Get media by id',
'params': {
'id': {'desc': 'The id of the media'},
}
})
addApiView('media.delete', self.deleteView, docs = {
'desc': 'Delete a media from the wanted list',
'params': {
'id': {'desc': 'Media ID(s) you want to delete.', 'type': 'int (comma separated)'},
'delete_from': {'desc': 'Delete media from this page', 'type': 'string: all (default), wanted, manage'},
}
})
addApiView('media.available_chars', self.charView)
addEvent('app.load', self.addSingleRefreshView, priority = 100)
addEvent('app.load', self.addSingleListView, priority = 100)
addEvent('app.load', self.addSingleCharView, priority = 100)
addEvent('app.load', self.addSingleDeleteView, priority = 100)
addEvent('media.get', self.get)
addEvent('media.with_status', self.withStatus)
addEvent('media.with_identifiers', self.withIdentifiers)
addEvent('media.list', self.list)
addEvent('media.delete', self.delete)
addEvent('media.restatus', self.restatus)
def refresh(self, id = '', **kwargs):
handlers = []
ids = splitString(id)
for x in ids:
refresh_handler = self.createRefreshHandler(x)
if refresh_handler:
handlers.append(refresh_handler)
fireEvent('notify.frontend', type = 'media.busy', data = {'_id': ids})
fireEventAsync('schedule.queue', handlers = handlers)
return {
'success': True,
}
def createRefreshHandler(self, media_id):
try:
media = get_db().get('id', media_id)
event = '%s.update_info' % media.get('type')
def handler():
fireEvent(event, media_id = media_id, on_complete = self.createOnComplete(media_id))
return handler
except:
log.error('Refresh handler for non existing media: %s', traceback.format_exc())
def addSingleRefreshView(self):
for media_type in fireEvent('media.types', merge = True):
addApiView('%s.refresh' % media_type, self.refresh)
def get(self, media_id):
try:
db = get_db()
imdb_id = getImdb(str(media_id))
if imdb_id:
media = db.get('media', 'imdb-%s' % imdb_id, with_doc = True)['doc']
else:
media = db.get('id', media_id)
if media:
# Attach category
try: media['category'] = db.get('id', media.get('category_id'))
except: pass
media['releases'] = fireEvent('release.for_media', media['_id'], single = True)
return media
except RecordNotFound:
log.error('Media with id "%s" not found', media_id)
except:
raise
def getView(self, id = None, **kwargs):
media = self.get(id) if id else None
return {
'success': media is not None,
'media': media,
}
def withStatus(self, status, with_doc = True):
db = get_db()
status = list(status if isinstance(status, (list, tuple)) else [status])
for s in status:
for ms in db.get_many('media_status', s, with_doc = with_doc):
yield ms['doc'] if with_doc else ms
def withIdentifiers(self, identifiers, with_doc = False):
db = get_db()
for x in identifiers:
try:
media = db.get('media', '%s-%s' % (x, identifiers[x]), with_doc = with_doc)
return media
except:
pass
log.debug('No media found with identifiers: %s', identifiers)
def list(self, types = None, status = None, release_status = None, status_or = False, limit_offset = None, starts_with = None, search = None):
db = get_db()
# Make a list from string
if status and not isinstance(status, (list, tuple)):
status = [status]
if release_status and not isinstance(release_status, (list, tuple)):
release_status = [release_status]
if types and not isinstance(types, (list, tuple)):
types = [types]
# query media ids
if types:
all_media_ids = set()
for media_type in types:
all_media_ids = all_media_ids.union(set([x['_id'] for x in db.get_many('media_by_type', media_type)]))
else:
all_media_ids = set([x['_id'] for x in db.all('media')])
media_ids = list(all_media_ids)
filter_by = {}
# Filter on movie status
if status and len(status) > 0:
filter_by['media_status'] = set()
for media_status in fireEvent('media.with_status', status, with_doc = False, single = True):
filter_by['media_status'].add(media_status.get('_id'))
# Filter on release status
if release_status and len(release_status) > 0:
filter_by['release_status'] = set()
for release_status in fireEvent('release.with_status', release_status, with_doc = False, single = True):
filter_by['release_status'].add(release_status.get('media_id'))
# Add search filters
if starts_with:
filter_by['starts_with'] = set()
starts_with = toUnicode(starts_with.lower())[0]
starts_with = starts_with if starts_with in ascii_lowercase else '#'
filter_by['starts_with'] = [x['_id'] for x in db.get_many('media_startswith', starts_with)]
# Filter with search query
if search:
filter_by['search'] = [x['_id'] for x in db.get_many('media_search_title', search)]
if status_or and 'media_status' in filter_by and 'release_status' in filter_by:
filter_by['status'] = list(filter_by['media_status']) + list(filter_by['release_status'])
del filter_by['media_status']
del filter_by['release_status']
# Filter by combining ids
for x in filter_by:
media_ids = [n for n in media_ids if n in filter_by[x]]
total_count = len(media_ids)
if total_count == 0:
return 0, []
offset = 0
limit = -1
if limit_offset:
splt = splitString(limit_offset) if isinstance(limit_offset, (str, unicode)) else limit_offset
limit = tryInt(splt[0])
offset = tryInt(0 if len(splt) is 1 else splt[1])
# List movies based on title order
medias = []
for m in db.all('media_title'):
media_id = m['_id']
if media_id not in media_ids: continue
if offset > 0:
offset -= 1
continue
media = fireEvent('media.get', media_id, single = True)
# Merge releases with movie dict
medias.append(media)
# remove from media ids
media_ids.remove(media_id)
if len(media_ids) == 0 or len(medias) == limit: break
return total_count, medias
def listView(self, **kwargs):
total_movies, movies = self.list(
types = splitString(kwargs.get('type')),
status = splitString(kwargs.get('status')),
release_status = splitString(kwargs.get('release_status')),
status_or = kwargs.get('status_or') is not None,
limit_offset = kwargs.get('limit_offset'),
starts_with = kwargs.get('starts_with'),
search = kwargs.get('search')
)
return {
'success': True,
'empty': len(movies) == 0,
'total': total_movies,
'movies': movies,
}
def addSingleListView(self):
for media_type in fireEvent('media.types', merge = True):
def tempList(*args, **kwargs):
return self.listView(types = media_type, **kwargs)
addApiView('%s.list' % media_type, tempList)
def availableChars(self, types = None, status = None, release_status = None):
db = get_db()
# Make a list from string
if status and not isinstance(status, (list, tuple)):
status = [status]
if release_status and not isinstance(release_status, (list, tuple)):
release_status = [release_status]
if types and not isinstance(types, (list, tuple)):
types = [types]
# query media ids
if types:
all_media_ids = set()
for media_type in types:
all_media_ids = all_media_ids.union(set([x['_id'] for x in db.get_many('media_by_type', media_type)]))
else:
all_media_ids = set([x['_id'] for x in db.all('media')])
media_ids = all_media_ids
filter_by = {}
# Filter on movie status
if status and len(status) > 0:
filter_by['media_status'] = set()
for media_status in fireEvent('media.with_status', status, with_doc = False, single = True):
filter_by['media_status'].add(media_status.get('_id'))
# Filter on release status
if release_status and len(release_status) > 0:
filter_by['release_status'] = set()
for release_status in fireEvent('release.with_status', release_status, with_doc = False, single = True):
filter_by['release_status'].add(release_status.get('media_id'))
# Filter by combining ids
for x in filter_by:
media_ids = [n for n in media_ids if n in filter_by[x]]
chars = set()
for x in db.all('media_startswith'):
if x['_id'] in media_ids:
chars.add(x['key'])
if len(chars) == 25:
break
return list(chars)
def charView(self, **kwargs):
type = splitString(kwargs.get('type', 'movie'))
status = splitString(kwargs.get('status', None))
release_status = splitString(kwargs.get('release_status', None))
chars = self.availableChars(type, status, release_status)
return {
'success': True,
'empty': len(chars) == 0,
'chars': chars,
}
def addSingleCharView(self):
for media_type in fireEvent('media.types', merge = True):
def tempChar(*args, **kwargs):
return self.charView(types = media_type, **kwargs)
addApiView('%s.available_chars' % media_type, tempChar)
def delete(self, media_id, delete_from = None):
try:
db = get_db()
media = db.get('id', media_id)
if media:
deleted = False
media_releases = fireEvent('release.for_media', media['_id'], single = True)
if delete_from == 'all':
# Delete connected releases
for release in media_releases:
db.delete(release)
db.delete(media)
deleted = True
else:
total_releases = len(media_releases)
total_deleted = 0
new_media_status = None
for release in media_releases:
if delete_from in ['wanted', 'snatched', 'late']:
if release.get('status') != 'done':
db.delete(release)
total_deleted += 1
new_media_status = 'done'
elif delete_from == 'manage':
if release.get('status') == 'done':
db.delete(release)
total_deleted += 1
if (total_releases == total_deleted and media['status'] != 'active') or (delete_from == 'wanted' and media['status'] == 'active') or (not new_media_status and delete_from == 'late'):
db.delete(media)
deleted = True
elif new_media_status:
media['status'] = new_media_status
db.update(media)
else:
fireEvent('media.restatus', media.get('_id'), single = True)
if deleted:
fireEvent('notify.frontend', type = 'media.deleted', data = media)
except:
log.error('Failed deleting media: %s', traceback.format_exc())
return True
def deleteView(self, id = '', **kwargs):
ids = splitString(id)
for media_id in ids:
self.delete(media_id, delete_from = kwargs.get('delete_from', 'all'))
return {
'success': True,
}
def addSingleDeleteView(self):
for media_type in fireEvent('media.types', merge = True):
def tempDelete(*args, **kwargs):
return self.deleteView(types = media_type, *args, **kwargs)
addApiView('%s.delete' % media_type, tempDelete)
def restatus(self, media_id):
try:
db = get_db()
m = db.get('id', media_id)
previous_status = m['status']
log.debug('Changing status for %s', getTitle(m))
if not m['profile_id']:
m['status'] = 'done'
else:
move_to_wanted = True
profile = db.get('id', m['profile_id'])
media_releases = fireEvent('release.for_media', m['_id'], single = True)
for q_identifier in profile['qualities']:
index = profile['qualities'].index(q_identifier)
for release in media_releases:
if q_identifier == release['quality'] and (release.get('status') == 'done' and profile['finish'][index]):
move_to_wanted = False
m['status'] = 'active' if move_to_wanted else 'done'
# Only update when status has changed
if previous_status != m['status']:
db.update(m)
return True
except:
log.error('Failed restatus: %s', traceback.format_exc())
| gpl-3.0 | 4,680,839,189,092,532,000 | 6,823,340,988,465,396,000 | 34.989154 | 202 | 0.536255 | false |
gorcz/security_monkey | security_monkey/watchers/iam/iam_group.py | 2 | 6319 | # Copyright 2014 Netflix, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
.. module: security_monkey.watchers.iam.iam_group
:platform: Unix
.. version:: $$VERSION$$
.. moduleauthor:: Patrick Kelley <pkelley@netflix.com> @monkeysecurity
"""
from security_monkey.watcher import Watcher
from security_monkey.watcher import ChangeItem
from security_monkey.exceptions import InvalidAWSJSON
from security_monkey.exceptions import BotoConnectionIssue
from security_monkey import app
import json
import urllib
def all_managed_policies(conn):
managed_policies = {}
for policy in conn.policies.all():
for attached_group in policy.attached_groups.all():
policy = {
"name": policy.policy_name,
"arn": policy.arn,
"version": policy.default_version_id
}
if attached_group.arn not in managed_policies:
managed_policies[attached_group.arn] = [policy]
else:
managed_policies[attached_group.arn].append(policy)
return managed_policies
class IAMGroup(Watcher):
index = 'iamgroup'
i_am_singular = 'IAM Group'
i_am_plural = 'IAM Groups'
def __init__(self, accounts=None, debug=False):
super(IAMGroup, self).__init__(accounts=accounts, debug=debug)
def get_all_groups(self, conn):
all_groups = []
marker = None
while True:
groups_response = self.wrap_aws_rate_limited_call(
conn.get_all_groups,
marker=marker
)
all_groups.extend(groups_response.groups)
if hasattr(groups_response, 'marker'):
marker = groups_response.marker
else:
break
return all_groups
def get_all_group_policies(self, conn, group_name):
all_group_policies = []
marker = None
while True:
group_policies = self.wrap_aws_rate_limited_call(
conn.get_all_group_policies,
group_name,
marker=marker
)
all_group_policies.extend(group_policies.policy_names)
if hasattr(group_policies, 'marker'):
marker = group_policies.marker
else:
break
return all_group_policies
def get_all_group_users(self, conn, group_name):
all_group_users = []
marker = None
while True:
group_users_response = self.wrap_aws_rate_limited_call(
conn.get_group,
group_name,
marker=marker
)
all_group_users.extend(group_users_response.users)
if hasattr(group_users_response, 'marker'):
marker = group_users_response.marker
else:
break
return all_group_users
def slurp(self):
"""
:returns: item_list - list of IAM Groups.
:returns: exception_map - A dict where the keys are a tuple containing the
location of the exception and the value is the actual exception
"""
self.prep_for_slurp()
item_list = []
exception_map = {}
from security_monkey.common.sts_connect import connect
for account in self.accounts:
try:
iam_b3 = connect(account, 'iam_boto3')
managed_policies = all_managed_policies(iam_b3)
iam = connect(account, 'iam')
groups = self.get_all_groups(iam)
except Exception as e:
exc = BotoConnectionIssue(str(e), 'iamgroup', account, None)
self.slurp_exception((self.index, account, 'universal'), exc, exception_map)
continue
for group in groups:
app.logger.debug("Slurping %s (%s) from %s" % (self.i_am_singular, group.group_name, account))
if self.check_ignore_list(group.group_name):
continue
item_config = {
'group': dict(group),
'grouppolicies': {},
'users': {}
}
if managed_policies.has_key(group.arn):
item_config['managed_policies'] = managed_policies.get(group.arn)
### GROUP POLICIES ###
group_policies = self.get_all_group_policies(iam, group.group_name)
for policy_name in group_policies:
policy = self.wrap_aws_rate_limited_call(iam.get_group_policy, group.group_name, policy_name)
policy = policy.policy_document
policy = urllib.unquote(policy)
try:
policydict = json.loads(policy)
except:
exc = InvalidAWSJSON(policy)
self.slurp_exception((self.index, account, 'universal', group.group_name), exc, exception_map)
item_config['grouppolicies'][policy_name] = dict(policydict)
### GROUP USERS ###
group_users = self.get_all_group_users(iam, group['group_name'])
for user in group_users:
item_config['users'][user.arn] = user.user_name
item = IAMGroupItem(account=account, name=group.group_name, config=item_config)
item_list.append(item)
return item_list, exception_map
class IAMGroupItem(ChangeItem):
def __init__(self, account=None, name=None, config={}):
super(IAMGroupItem, self).__init__(
index=IAMGroup.index,
region='universal',
account=account,
name=name,
new_config=config)
| apache-2.0 | 6,726,698,740,485,639,000 | 992,242,384,232,594,700 | 32.611702 | 118 | 0.565596 | false |
andmos/ansible | test/units/modules/network/netvisor/test_pn_stp.py | 9 | 2167 | # Copyright: (c) 2018, Pluribus Networks
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import json
from units.compat.mock import patch
from ansible.modules.network.netvisor import pn_stp
from units.modules.utils import set_module_args
from .nvos_module import TestNvosModule, load_fixture
class TestStpModule(TestNvosModule):
module = pn_stp
def setUp(self):
self.mock_run_nvos_commands = patch('ansible.modules.network.netvisor.pn_stp.run_cli')
self.run_nvos_commands = self.mock_run_nvos_commands.start()
def tearDown(self):
self.mock_run_nvos_commands.stop()
def run_cli_patch(self, module, cli, state_map):
if state_map['update'] == 'stp-modify':
results = dict(
changed=True,
cli_cmd=cli
)
module.exit_json(**results)
def load_fixtures(self, commands=None, state=None, transport='cli'):
self.run_nvos_commands.side_effect = self.run_cli_patch
def test_stp_modify_t1(self):
set_module_args({'pn_cliswitch': 'sw01', 'pn_hello_time': '3',
'pn_stp_mode': 'rstp', 'state': 'update'})
result = self.execute_module(changed=True, state='update')
expected_cmd = '/usr/bin/cli --quiet -e --no-login-prompt switch sw01 stp-modify hello-time 3 root-guard-wait-time 20 mst-max-hops 20 max-age 20 '
expected_cmd += 'stp-mode rstp forwarding-delay 15 bridge-priority 32768'
self.assertEqual(result['cli_cmd'], expected_cmd)
def test_stp_modify_t2(self):
set_module_args({'pn_cliswitch': 'sw01', 'pn_root_guard_wait_time': '50',
'state': 'update'})
result = self.execute_module(changed=True, state='update')
expected_cmd = '/usr/bin/cli --quiet -e --no-login-prompt switch sw01 stp-modify hello-time 2 root-guard-wait-time 50 mst-max-hops 20 '
expected_cmd += 'max-age 20 forwarding-delay 15 bridge-priority 32768'
self.assertEqual(result['cli_cmd'], expected_cmd)
| gpl-3.0 | -6,472,018,951,955,823,000 | -456,518,944,179,800,100 | 41.490196 | 156 | 0.6479 | false |
csmart/jockey-yum | setup.py | 1 | 1204 | #!/usr/bin/env python
# (c) 2007 Canonical Ltd.
# Author: Martin Pitt <martin.pitt@ubuntu.com>
# This script needs python-distutils-extra, an extension to the standard
# distutils which provides i18n, icon support, etc.
# https://launchpad.net/python-distutils-extra
from glob import glob
from distutils.version import StrictVersion
try:
import DistUtilsExtra.auto
except ImportError:
import sys
print >> sys.stderr, 'To build Jockey you need https://launchpad.net/python-distutils-extra'
sys.exit(1)
assert StrictVersion(DistUtilsExtra.auto.__version__) >= '2.4', 'needs DistUtilsExtra.auto >= 2.4'
DistUtilsExtra.auto.setup(
name='jockey',
version='0.9.3',
description='UI for managing third-party and non-free drivers',
url='https://launchpad.net/jockey',
license='GPL v2 or later',
author='Martin Pitt',
author_email='martin.pitt@ubuntu.com',
data_files = [
('share/jockey', ['backend/jockey-backend']),
('share/jockey', ['gtk/jockey-gtk.ui']), # bug in DistUtilsExtra.auto 2.2
('share/jockey', glob('kde/*.ui')), # don't use pykdeuic4
],
scripts = ['gtk/jockey-gtk', 'kde/jockey-kde', 'text/jockey-text'],
)
| gpl-2.0 | -2,184,311,690,796,095,200 | -7,347,241,227,767,246,000 | 31.540541 | 98 | 0.680233 | false |
gersolar/stations | stations_configuration/settings.py | 1 | 5198 | # Only Celery settings for stations project.
#import djcelery
#djcelery.setup_loader()
#BROKER_TRANSPORT = 'amqplib'
#BROKER_URL = 'django://'
##CELERY_RESULT_BACKEND = 'database'
#CELERY_DEFAULT_QUEUE = "default"
#CELERY_QUEUES = {
# "default": {
# "binding_key": "task.#",
# },
# "mailer": {
# "binding_key": "task.#",
# },
#}
#CELERY_ROUTES = {'downloader.tasks.check_email_schedule': {'queue': 'mailer'}}
#CELERY_TIMEZONE = 'UTC'
#CELERY_CONCURRENCY = 7
# Django settings for stations project.
DEBUG = True
TEMPLATE_DEBUG = DEBUG
ADMINS = (
# ('Your Name', 'your_email@example.com'),
)
MANAGERS = ADMINS
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': 'stations.sqlite3',
}
}
# Local time zone for this installation. Choices can be found here:
# http://en.wikipedia.org/wiki/List_of_tz_zones_by_name
# although not all choices may be available on all operating systems.
# In a Windows environment this must be set to your system time zone.
TIME_ZONE = 'UTC' # 'America/Buenos_Aires'
# Language code for this installation. All choices can be found here:
# http://www.i18nguy.com/unicode/language-identifiers.html
LANGUAGE_CODE = 'en-us'
SITE_ID = 1
# If you set this to False, Django will make some optimizations so as not
# to load the internationalization machinery.
USE_I18N = True
# If you set this to False, Django will not format dates, numbers and
# calendars according to the current locale.
USE_L10N = True
# If you set this to False, Django will not use timezone-aware datetimes.
USE_TZ = True
# Absolute filesystem path to the directory that will hold user-uploaded files.
# Example: "/home/media/media.lawrence.com/media/"
MEDIA_ROOT = ''
# URL that handles the media served from MEDIA_ROOT. Make sure to use a
# trailing slash.
# Examples: "http://media.lawrence.com/media/", "http://example.com/media/"
MEDIA_URL = ''
# Absolute path to the directory static files should be collected to.
# Don't put anything in this directory yourself; store your static files
# in apps' "static/" subdirectories and in STATICFILES_DIRS.
# Example: "/home/media/media.lawrence.com/static/"
STATIC_ROOT = ''
# URL prefix for static files.
# Example: "http://media.lawrence.com/static/"
STATIC_URL = '/static/'
# Additional locations of static files
STATICFILES_DIRS = (
# Put strings here, like "/home/html/static" or "C:/www/django/static".
# Always use forward slashes, even on Windows.
# Don't forget to use absolute paths, not relative paths.
)
# List of finder classes that know how to find static files in
# various locations.
STATICFILES_FINDERS = (
'django.contrib.staticfiles.finders.FileSystemFinder',
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
# 'django.contrib.staticfiles.finders.DefaultStorageFinder',
)
# Make this unique, and don't share it with anybody.
SECRET_KEY = 'fax%_3d9oshwed$!3s)jdn876jpj#5u&50m$6naau#&=zpyn%0'
# List of callables that know how to import templates from various sources.
TEMPLATE_LOADERS = (
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
# 'django.template.loaders.eggs.Loader',
)
MIDDLEWARE_CLASSES = (
'sslify.middleware.SSLifyMiddleware',
'django.middleware.common.CommonMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
# Uncomment the next line for simple clickjacking protection:
# 'django.middleware.clickjacking.XFrameOptionsMiddleware',
)
ROOT_URLCONF = 'stations_configuration.urls'
# Python dotted path to the WSGI application used by Django's runserver.
WSGI_APPLICATION = 'stations_configuration.wsgi.application'
TEMPLATE_DIRS = (
# Put strings here, like "/home/html/django_templates" or "C:/www/django/templates".
# Always use forward slashes, even on Windows.
# Don't forget to use absolute paths, not relative paths.
)
INSTALLED_APPS = (
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.sites',
'django.contrib.messages',
'django.contrib.staticfiles',
'polymorphic',
'django.contrib.contenttypes',
# Uncomment the next line to enable the admin:
'django.contrib.admin',
# Uncomment the next line to enable admin documentation:
'django.contrib.admindocs',
'stations',
)
# A sample logging configuration. The only tangible logging
# performed by this configuration is to send an email to
# the site admins on every HTTP 500 error when DEBUG=False.
# See http://docs.djangoproject.com/en/dev/topics/logging for
# more details on how to customize your logging configuration.
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'filters': {
'require_debug_false': {
'()': 'django.utils.log.RequireDebugFalse'
}
},
'handlers': {
'mail_admins': {
'level': 'ERROR',
'filters': ['require_debug_false'],
'class': 'django.utils.log.AdminEmailHandler'
}
},
'loggers': {
'django.request': {
'handlers': ['mail_admins'],
'level': 'ERROR',
'propagate': True,
},
}
}
TEMPLATE_DIRS='templates'
| mit | 4,339,668,899,069,129,700 | -5,321,426,587,229,996,000 | 28.039106 | 85 | 0.729127 | false |
chrisseto/modular-odm | tests/test_foreign.py | 4 | 1849 | #!/usr/bin/env python
# encoding: utf-8
from nose.tools import *
from tests.base import ModularOdmTestCase, TestObject
from modularodm import fields
class TestForeignList(ModularOdmTestCase):
def define_objects(self):
class Foo(TestObject):
_id = fields.IntegerField()
bars = fields.ForeignField('bar', list=True)
class Bar(TestObject):
_id = fields.IntegerField()
return Foo, Bar
def set_up_objects(self):
self.foo = self.Foo(_id=1)
self.bars = []
for idx in range(5):
self.bars.append(self.Bar(_id=idx))
self.bars[idx].save()
self.foo.bars = self.bars
self.foo.save()
def test_get_item(self):
assert_equal(self.bars[2], self.foo.bars[2])
def test_get_slice(self):
assert_equal(self.bars[:3], list(self.foo.bars[:3]))
def test_get_slice_extended(self):
assert_equal(self.bars[::-1], list(self.foo.bars[::-1]))
class TestAbstractForeignList(ModularOdmTestCase):
def define_objects(self):
class Foo(TestObject):
_id = fields.IntegerField()
bars = fields.AbstractForeignField(list=True)
class Bar(TestObject):
_id = fields.IntegerField()
return Foo, Bar
def set_up_objects(self):
self.foo = self.Foo(_id=1)
self.bars = []
for idx in range(5):
self.bars.append(self.Bar(_id=idx))
self.bars[idx].save()
self.foo.bars = self.bars
self.foo.save()
def test_get_item(self):
assert_equal(self.bars[2], self.foo.bars[2])
def test_get_slice(self):
assert_equal(self.bars[:3], list(self.foo.bars[:3]))
def test_get_slice_extended(self):
assert_equal(self.bars[::-1], list(self.foo.bars[::-1]))
| apache-2.0 | 4,276,372,318,259,174,400 | 7,743,754,531,197,981,000 | 22.1125 | 64 | 0.586263 | false |
ycl2045/nova-master | nova/api/openstack/compute/plugins/v3/keypairs.py | 10 | 6309 | # Copyright 2011 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Keypair management extension."""
import webob
import webob.exc
from nova.api.openstack.compute.schemas.v3 import keypairs
from nova.api.openstack import extensions
from nova.api.openstack import wsgi
from nova.api import validation
from nova.compute import api as compute_api
from nova import exception
from nova.openstack.common.gettextutils import _
ALIAS = 'keypairs'
authorize = extensions.extension_authorizer('compute', 'v3:' + ALIAS)
soft_authorize = extensions.soft_extension_authorizer('compute', 'v3:' + ALIAS)
class KeypairController(object):
"""Keypair API controller for the OpenStack API."""
def __init__(self):
self.api = compute_api.KeypairAPI()
def _filter_keypair(self, keypair, **attrs):
clean = {
'name': keypair.name,
'public_key': keypair.public_key,
'fingerprint': keypair.fingerprint,
}
for attr in attrs:
clean[attr] = keypair[attr]
return clean
@extensions.expected_errors((400, 409, 413))
@wsgi.response(201)
@validation.schema(keypairs.create)
def create(self, req, body):
"""Create or import keypair.
Sending name will generate a key and return private_key
and fingerprint.
You can send a public_key to add an existing ssh key
params: keypair object with:
name (required) - string
public_key (optional) - string
"""
context = req.environ['nova.context']
authorize(context, action='create')
params = body['keypair']
name = params['name']
try:
if 'public_key' in params:
keypair = self.api.import_key_pair(context,
context.user_id, name,
params['public_key'])
keypair = self._filter_keypair(keypair, user_id=True)
else:
keypair, private_key = self.api.create_key_pair(
context, context.user_id, name)
keypair = self._filter_keypair(keypair, user_id=True)
keypair['private_key'] = private_key
return {'keypair': keypair}
except exception.KeypairLimitExceeded:
msg = _("Quota exceeded, too many key pairs.")
raise webob.exc.HTTPRequestEntityTooLarge(
explanation=msg,
headers={'Retry-After': 0})
except exception.InvalidKeypair as exc:
raise webob.exc.HTTPBadRequest(explanation=exc.format_message())
except exception.KeyPairExists as exc:
raise webob.exc.HTTPConflict(explanation=exc.format_message())
@wsgi.response(204)
@extensions.expected_errors(404)
def delete(self, req, id):
"""Delete a keypair with a given name."""
context = req.environ['nova.context']
authorize(context, action='delete')
try:
self.api.delete_key_pair(context, context.user_id, id)
except exception.KeypairNotFound:
raise webob.exc.HTTPNotFound()
@extensions.expected_errors(404)
def show(self, req, id):
"""Return data for the given key name."""
context = req.environ['nova.context']
authorize(context, action='show')
try:
keypair = self.api.get_key_pair(context, context.user_id, id)
except exception.KeypairNotFound:
raise webob.exc.HTTPNotFound()
return {'keypair': self._filter_keypair(keypair)}
@extensions.expected_errors(())
def index(self, req):
"""List of keypairs for a user."""
context = req.environ['nova.context']
authorize(context, action='index')
key_pairs = self.api.get_key_pairs(context, context.user_id)
rval = []
for key_pair in key_pairs:
rval.append({'keypair': self._filter_keypair(key_pair)})
return {'keypairs': rval}
class Controller(wsgi.Controller):
def _add_key_name(self, req, servers):
for server in servers:
db_server = req.get_db_instance(server['id'])
# server['id'] is guaranteed to be in the cache due to
# the core API adding it in its 'show'/'detail' methods.
server['key_name'] = db_server['key_name']
def _show(self, req, resp_obj):
if 'server' in resp_obj.obj:
server = resp_obj.obj['server']
self._add_key_name(req, [server])
@wsgi.extends
def show(self, req, resp_obj, id):
context = req.environ['nova.context']
if soft_authorize(context):
self._show(req, resp_obj)
@wsgi.extends
def detail(self, req, resp_obj):
context = req.environ['nova.context']
if 'servers' in resp_obj.obj and soft_authorize(context):
servers = resp_obj.obj['servers']
self._add_key_name(req, servers)
class Keypairs(extensions.V3APIExtensionBase):
"""Keypair Support."""
name = "Keypairs"
alias = ALIAS
version = 1
def get_resources(self):
resources = [
extensions.ResourceExtension('keypairs',
KeypairController())]
return resources
def get_controller_extensions(self):
controller = Controller()
extension = extensions.ControllerExtension(self, 'servers', controller)
return [extension]
# use nova.api.extensions.server.extensions entry point to modify
# server create kwargs
def server_create(self, server_dict, create_kwargs):
create_kwargs['key_name'] = server_dict.get('key_name')
| apache-2.0 | -2,847,100,527,475,154,400 | -277,516,942,678,089,380 | 33.664835 | 79 | 0.612934 | false |
liamgh/liamgreenhughes-sl4a-tf101 | python/src/Lib/test/test_pwd.py | 58 | 3352 | import unittest
from test import test_support
import pwd
class PwdTest(unittest.TestCase):
def test_values(self):
entries = pwd.getpwall()
entriesbyname = {}
entriesbyuid = {}
for e in entries:
self.assertEqual(len(e), 7)
self.assertEqual(e[0], e.pw_name)
self.assert_(isinstance(e.pw_name, basestring))
self.assertEqual(e[1], e.pw_passwd)
self.assert_(isinstance(e.pw_passwd, basestring))
self.assertEqual(e[2], e.pw_uid)
self.assert_(isinstance(e.pw_uid, int))
self.assertEqual(e[3], e.pw_gid)
self.assert_(isinstance(e.pw_gid, int))
self.assertEqual(e[4], e.pw_gecos)
self.assert_(isinstance(e.pw_gecos, basestring))
self.assertEqual(e[5], e.pw_dir)
self.assert_(isinstance(e.pw_dir, basestring))
self.assertEqual(e[6], e.pw_shell)
self.assert_(isinstance(e.pw_shell, basestring))
# The following won't work, because of duplicate entries
# for one uid
# self.assertEqual(pwd.getpwuid(e.pw_uid), e)
# instead of this collect all entries for one uid
# and check afterwards
entriesbyname.setdefault(e.pw_name, []).append(e)
entriesbyuid.setdefault(e.pw_uid, []).append(e)
if len(entries) > 1000: # Huge passwd file (NIS?) -- skip the rest
return
# check whether the entry returned by getpwuid()
# for each uid is among those from getpwall() for this uid
for e in entries:
if not e[0] or e[0] == '+':
continue # skip NIS entries etc.
self.assert_(pwd.getpwnam(e.pw_name) in entriesbyname[e.pw_name])
self.assert_(pwd.getpwuid(e.pw_uid) in entriesbyuid[e.pw_uid])
def test_errors(self):
self.assertRaises(TypeError, pwd.getpwuid)
self.assertRaises(TypeError, pwd.getpwnam)
self.assertRaises(TypeError, pwd.getpwall, 42)
# try to get some errors
bynames = {}
byuids = {}
for (n, p, u, g, gecos, d, s) in pwd.getpwall():
bynames[n] = u
byuids[u] = n
allnames = bynames.keys()
namei = 0
fakename = allnames[namei]
while fakename in bynames:
chars = list(fakename)
for i in xrange(len(chars)):
if chars[i] == 'z':
chars[i] = 'A'
break
elif chars[i] == 'Z':
continue
else:
chars[i] = chr(ord(chars[i]) + 1)
break
else:
namei = namei + 1
try:
fakename = allnames[namei]
except IndexError:
# should never happen... if so, just forget it
break
fakename = ''.join(chars)
self.assertRaises(KeyError, pwd.getpwnam, fakename)
# Choose a non-existent uid.
fakeuid = 4127
while fakeuid in byuids:
fakeuid = (fakeuid * 3) % 0x10000
self.assertRaises(KeyError, pwd.getpwuid, fakeuid)
def test_main():
test_support.run_unittest(PwdTest)
if __name__ == "__main__":
test_main()
| apache-2.0 | -997,044,723,181,891,800 | -7,344,502,966,086,025,000 | 33.556701 | 77 | 0.531026 | false |
m8ttyB/socorro | webapp-django/crashstats/supersearch/tests/test_utils.py | 3 | 1142 | import datetime
from nose.tools import eq_
from django.utils.timezone import utc
from crashstats.crashstats.tests.test_views import BaseTestViews
from crashstats.topcrashers.views import get_date_boundaries
class TestDateBoundaries(BaseTestViews):
def test_get_date_boundaries(self):
# Simple test.
start, end = get_date_boundaries({
'date': [
'>2010-03-01T12:12:12',
'<=2010-03-10T00:00:00',
]
})
eq_(
start,
datetime.datetime(2010, 3, 1, 12, 12, 12).replace(tzinfo=utc)
)
eq_(end, datetime.datetime(2010, 3, 10).replace(tzinfo=utc))
# Test with messy dates.
start, end = get_date_boundaries({
'date': [
'>2010-03-01T12:12:12',
'>2009-01-01T12:12:12',
'<2010-03-11T00:00:00',
'<=2010-03-10T00:00:00',
]
})
eq_(
start,
datetime.datetime(2009, 1, 1, 12, 12, 12).replace(tzinfo=utc)
)
eq_(end, datetime.datetime(2010, 3, 11).replace(tzinfo=utc))
| mpl-2.0 | -3,660,108,555,765,755,000 | -2,332,832,443,631,500,300 | 28.282051 | 73 | 0.531524 | false |
ojengwa/grr | client/client_actions/standard.py | 2 | 20847 | #!/usr/bin/env python
"""Standard actions that happen on the client."""
import cStringIO as StringIO
import ctypes
import gzip
import hashlib
import os
import platform
import socket
import sys
import time
import zlib
import psutil
import logging
from grr.client import actions
from grr.client import client_utils_common
from grr.client import vfs
from grr.client.client_actions import tempfiles
from grr.lib import config_lib
from grr.lib import flags
from grr.lib import rdfvalue
from grr.lib import utils
from grr.lib.rdfvalues import crypto
# We do not send larger buffers than this:
MAX_BUFFER_SIZE = 640 * 1024
class ReadBuffer(actions.ActionPlugin):
"""Reads a buffer from a file and returns it to a server callback."""
in_rdfvalue = rdfvalue.BufferReference
out_rdfvalue = rdfvalue.BufferReference
def Run(self, args):
"""Reads a buffer on the client and sends it to the server."""
# Make sure we limit the size of our output
if args.length > MAX_BUFFER_SIZE:
raise RuntimeError("Can not read buffers this large.")
try:
fd = vfs.VFSOpen(args.pathspec, progress_callback=self.Progress)
fd.Seek(args.offset)
offset = fd.Tell()
data = fd.Read(args.length)
except (IOError, OSError), e:
self.SetStatus(rdfvalue.GrrStatus.ReturnedStatus.IOERROR, e)
return
# Now return the data to the server
self.SendReply(offset=offset, data=data,
length=len(data), pathspec=fd.pathspec)
HASH_CACHE = utils.FastStore(100)
class TransferBuffer(actions.ActionPlugin):
"""Reads a buffer from a file and returns it to the server efficiently."""
in_rdfvalue = rdfvalue.BufferReference
out_rdfvalue = rdfvalue.BufferReference
def Run(self, args):
"""Reads a buffer on the client and sends it to the server."""
# Make sure we limit the size of our output
if args.length > MAX_BUFFER_SIZE:
raise RuntimeError("Can not read buffers this large.")
data = vfs.ReadVFS(args.pathspec, args.offset, args.length,
progress_callback=self.Progress)
result = rdfvalue.DataBlob(
data=zlib.compress(data),
compression=rdfvalue.DataBlob.CompressionType.ZCOMPRESSION)
digest = hashlib.sha256(data).digest()
# Ensure that the buffer is counted against this response. Check network
# send limit.
self.ChargeBytesToSession(len(data))
# Now return the data to the server into the special TransferStore well
# known flow.
self.grr_worker.SendReply(
result, session_id=rdfvalue.SessionID(flow_name="TransferStore"))
# Now report the hash of this blob to our flow as well as the offset and
# length.
self.SendReply(offset=args.offset, length=len(data),
data=digest)
class HashBuffer(actions.ActionPlugin):
"""Hash a buffer from a file and returns it to the server efficiently."""
in_rdfvalue = rdfvalue.BufferReference
out_rdfvalue = rdfvalue.BufferReference
def Run(self, args):
"""Reads a buffer on the client and sends it to the server."""
# Make sure we limit the size of our output
if args.length > MAX_BUFFER_SIZE:
raise RuntimeError("Can not read buffers this large.")
data = vfs.ReadVFS(args.pathspec, args.offset, args.length)
digest = hashlib.sha256(data).digest()
# Now report the hash of this blob to our flow as well as the offset and
# length.
self.SendReply(offset=args.offset, length=len(data),
data=digest)
class CopyPathToFile(actions.ActionPlugin):
"""Copy contents of a pathspec to a file on disk."""
in_rdfvalue = rdfvalue.CopyPathToFileRequest
out_rdfvalue = rdfvalue.CopyPathToFileRequest
BLOCK_SIZE = 10 * 1024 * 1024
def _Copy(self, dest_fd):
"""Copy from VFS to file until no more data or self.length is reached.
Args:
dest_fd: file object to write to
Returns:
self.written: bytes written
"""
while self.written < self.length:
to_read = min(self.length - self.written, self.BLOCK_SIZE)
data = self.src_fd.read(to_read)
if not data:
break
dest_fd.write(data)
self.written += len(data)
# Send heartbeats for long files.
self.Progress()
return self.written
def Run(self, args):
"""Read from a VFS file and write to a GRRTempFile on disk.
If file writing doesn't complete files won't be cleaned up.
Args:
args: see CopyPathToFile in jobs.proto
"""
self.src_fd = vfs.VFSOpen(args.src_path, progress_callback=self.Progress)
self.src_fd.Seek(args.offset)
offset = self.src_fd.Tell()
self.length = args.length or (1024 ** 4) # 1 TB
self.written = 0
suffix = ".gz" if args.gzip_output else ""
self.dest_fd = tempfiles.CreateGRRTempFile(directory=args.dest_dir,
lifetime=args.lifetime,
suffix=suffix)
self.dest_file = self.dest_fd.name
with self.dest_fd:
if args.gzip_output:
gzip_fd = gzip.GzipFile(self.dest_file, "wb", 9, self.dest_fd)
# Gzip filehandle needs its own close method called
with gzip_fd:
self._Copy(gzip_fd)
else:
self._Copy(self.dest_fd)
pathspec_out = rdfvalue.PathSpec(
path=self.dest_file, pathtype=rdfvalue.PathSpec.PathType.OS)
self.SendReply(offset=offset, length=self.written, src_path=args.src_path,
dest_dir=args.dest_dir, dest_path=pathspec_out,
gzip_output=args.gzip_output)
class ListDirectory(ReadBuffer):
"""Lists all the files in a directory."""
in_rdfvalue = rdfvalue.ListDirRequest
out_rdfvalue = rdfvalue.StatEntry
def Run(self, args):
"""Lists a directory."""
try:
directory = vfs.VFSOpen(args.pathspec, progress_callback=self.Progress)
except (IOError, OSError), e:
self.SetStatus(rdfvalue.GrrStatus.ReturnedStatus.IOERROR, e)
return
files = list(directory.ListFiles())
files.sort(key=lambda x: x.pathspec.path)
for response in files:
self.SendReply(response)
class IteratedListDirectory(actions.IteratedAction):
"""Lists a directory as an iterator."""
in_rdfvalue = rdfvalue.ListDirRequest
out_rdfvalue = rdfvalue.StatEntry
def Iterate(self, request, client_state):
"""Restores its way through the directory using an Iterator."""
try:
fd = vfs.VFSOpen(request.pathspec, progress_callback=self.Progress)
except (IOError, OSError), e:
self.SetStatus(rdfvalue.GrrStatus.ReturnedStatus.IOERROR, e)
return
files = list(fd.ListFiles())
files.sort(key=lambda x: x.pathspec.path)
index = client_state.get("index", 0)
length = request.iterator.number
for response in files[index:index + length]:
self.SendReply(response)
# Update the state
client_state["index"] = index + length
class SuspendableListDirectory(actions.SuspendableAction):
"""Lists a directory as a suspendable client action."""
in_rdfvalue = rdfvalue.ListDirRequest
out_rdfvalue = rdfvalue.StatEntry
def Iterate(self):
try:
fd = vfs.VFSOpen(self.request.pathspec, progress_callback=self.Progress)
except (IOError, OSError), e:
self.SetStatus(rdfvalue.GrrStatus.ReturnedStatus.IOERROR, e)
return
length = self.request.iterator.number
for group in utils.Grouper(fd.ListFiles(), length):
for response in group:
self.SendReply(response)
self.Suspend()
class StatFile(ListDirectory):
"""Sends a StatResponse for a single file."""
in_rdfvalue = rdfvalue.ListDirRequest
out_rdfvalue = rdfvalue.StatEntry
def Run(self, args):
"""Sends a StatResponse for a single file."""
try:
fd = vfs.VFSOpen(args.pathspec, progress_callback=self.Progress)
res = fd.Stat()
self.SendReply(res)
except (IOError, OSError), e:
self.SetStatus(rdfvalue.GrrStatus.ReturnedStatus.IOERROR, e)
return
class ExecuteCommand(actions.ActionPlugin):
"""Executes one of the predefined commands."""
in_rdfvalue = rdfvalue.ExecuteRequest
out_rdfvalue = rdfvalue.ExecuteResponse
def Run(self, command):
"""Run."""
cmd = command.cmd
args = command.args
time_limit = command.time_limit
res = client_utils_common.Execute(cmd, args, time_limit)
(stdout, stderr, status, time_used) = res
# Limit output to 10MB so our response doesn't get too big.
stdout = stdout[:10 * 1024 * 1024]
stderr = stderr[:10 * 1024 * 1024]
result = rdfvalue.ExecuteResponse(
request=command,
stdout=stdout,
stderr=stderr,
exit_status=status,
# We have to return microseconds.
time_used=int(1e6 * time_used))
self.SendReply(result)
class ExecuteBinaryCommand(actions.ActionPlugin):
"""Executes a command from a passed in binary.
Obviously this is a dangerous function, it provides for arbitrary code exec by
the server running as root/SYSTEM.
This is protected by the CONFIG[PrivateKeys.executable_signing_private_key],
which should be stored offline and well protected.
This method can be utilized as part of an autoupdate mechanism if necessary.
NOTE: If the binary is too large to fit inside a single request, the request
will have the more_data flag enabled, indicating more data is coming.
"""
in_rdfvalue = rdfvalue.ExecuteBinaryRequest
out_rdfvalue = rdfvalue.ExecuteBinaryResponse
suffix = ""
def WriteBlobToFile(self, request, suffix=""):
"""Writes the blob to a file and returns its path."""
lifetime = 0
# Only set the lifetime thread on the last chunk written.
if not request.more_data:
lifetime = request.time_limit
# Keep the file for at least 5 seconds after execution.
if lifetime > 0:
lifetime += 5
# First chunk truncates the file, later chunks append.
if request.offset == 0:
mode = "w+b"
else:
mode = "r+b"
temp_file = tempfiles.CreateGRRTempFile(filename=request.write_path,
suffix=suffix, mode=mode)
with temp_file:
path = temp_file.name
temp_file.seek(0, 2)
if temp_file.tell() != request.offset:
raise IOError("Chunks out of order Error.")
# Write the new chunk.
temp_file.write(request.executable.data)
return path
def CleanUp(self, path):
"""Removes the temp file."""
try:
if os.path.exists(path):
os.remove(path)
except (OSError, IOError), e:
logging.info("Failed to remove temporary file %s. Err: %s", path, e)
def Run(self, args):
"""Run."""
# Verify the executable blob.
args.executable.Verify(config_lib.CONFIG[
"Client.executable_signing_public_key"])
path = self.WriteBlobToFile(args, self.suffix)
# Only actually run the file on the last chunk.
if not args.more_data:
self.ProcessFile(path, args)
self.CleanUp(path)
def ProcessFile(self, path, args):
res = client_utils_common.Execute(path, args.args, args.time_limit,
bypass_whitelist=True)
(stdout, stderr, status, time_used) = res
# Limit output to 10MB so our response doesn't get too big.
stdout = stdout[:10 * 1024 * 1024]
stderr = stderr[:10 * 1024 * 1024]
result = rdfvalue.ExecuteBinaryResponse(
stdout=stdout,
stderr=stderr,
exit_status=status,
# We have to return microseconds.
time_used=int(1e6 * time_used))
self.SendReply(result)
class ExecutePython(actions.ActionPlugin):
"""Executes python code with exec.
Obviously this is a dangerous function, it provides for arbitrary code exec by
the server running as root/SYSTEM.
This is protected by CONFIG[PrivateKeys.executable_signing_private_key], which
should be stored offline and well protected.
"""
in_rdfvalue = rdfvalue.ExecutePythonRequest
out_rdfvalue = rdfvalue.ExecutePythonResponse
def Run(self, args):
"""Run."""
time_start = time.time()
class StdOutHook(object):
def __init__(self, buf):
self.buf = buf
def write(self, text):
self.buf.write(text)
args.python_code.Verify(config_lib.CONFIG[
"Client.executable_signing_public_key"])
# The execed code can assign to this variable if it wants to return data.
logging.debug("exec for python code %s", args.python_code.data[0:100])
context = globals().copy()
context["py_args"] = args.py_args.ToDict()
context["magic_return_str"] = ""
# Export the Progress function to allow python hacks to call it.
context["Progress"] = self.Progress
stdout = StringIO.StringIO()
with utils.Stubber(sys, "stdout", StdOutHook(stdout)):
exec(args.python_code.data, context) # pylint: disable=exec-used
stdout_output = stdout.getvalue()
magic_str_output = context.get("magic_return_str")
if stdout_output and magic_str_output:
output = "Stdout: %s\nMagic Str:%s\n" % (stdout_output, magic_str_output)
else:
output = stdout_output or magic_str_output
time_used = time.time() - time_start
# We have to return microseconds.
result = rdfvalue.ExecutePythonResponse(
time_used=int(1e6 * time_used),
return_val=utils.SmartStr(output))
self.SendReply(result)
class Segfault(actions.ActionPlugin):
"""This action is just for debugging. It induces a segfault."""
in_rdfvalue = None
out_rdfvalue = None
def Run(self, unused_args):
"""Does the segfaulting."""
if flags.FLAGS.debug:
logging.warning("Segfault action requested :(")
print ctypes.cast(1, ctypes.POINTER(ctypes.c_void_p)).contents
else:
logging.warning("Segfault requested but not running in debug mode.")
class ListProcesses(actions.ActionPlugin):
"""This action lists all the processes running on a machine."""
in_rdfvalue = None
out_rdfvalue = rdfvalue.Process
def Run(self, unused_arg):
# psutil will cause an active loop on Windows 2000
if platform.system() == "Windows" and platform.version().startswith("5.0"):
raise RuntimeError("ListProcesses not supported on Windows 2000")
for proc in psutil.process_iter():
response = rdfvalue.Process()
process_fields = ["pid", "ppid", "name", "exe", "username", "terminal"]
for field in process_fields:
try:
value = getattr(proc, field)
if value is None:
continue
if callable(value):
value = value()
if not isinstance(value, (int, long)):
value = utils.SmartUnicode(value)
setattr(response, field, value)
except (psutil.NoSuchProcess, psutil.AccessDenied, AttributeError):
pass
try:
for arg in proc.cmdline():
response.cmdline.append(utils.SmartUnicode(arg))
except (psutil.NoSuchProcess, psutil.AccessDenied):
pass
try:
response.nice = proc.nice()
except (psutil.NoSuchProcess, psutil.AccessDenied):
pass
try:
# Not available on Windows.
if hasattr(proc, "uids"):
(response.real_uid, response.effective_uid,
response.saved_uid) = proc.uids()
(response.real_gid, response.effective_gid,
response.saved_gid) = proc.gids()
except (psutil.NoSuchProcess, psutil.AccessDenied):
pass
try:
response.ctime = long(proc.create_time() * 1e6)
response.status = str(proc.status())
except (psutil.NoSuchProcess, psutil.AccessDenied):
pass
try:
# Not available on OSX.
if hasattr(proc, "cwd"):
response.cwd = utils.SmartUnicode(proc.cwd())
except (psutil.NoSuchProcess, psutil.AccessDenied):
pass
try:
response.num_threads = proc.num_threads()
except (psutil.NoSuchProcess, psutil.AccessDenied, RuntimeError):
pass
try:
(response.user_cpu_time,
response.system_cpu_time) = proc.cpu_times()
# This is very time consuming so we do not collect cpu_percent here.
# response.cpu_percent = proc.get_cpu_percent()
except (psutil.NoSuchProcess, psutil.AccessDenied):
pass
try:
response.RSS_size, response.VMS_size = proc.memory_info()
response.memory_percent = proc.memory_percent()
except (psutil.NoSuchProcess, psutil.AccessDenied):
pass
# Due to a bug in psutil, this function is disabled for now
# (https://github.com/giampaolo/psutil/issues/340)
# try:
# for f in proc.open_files():
# response.open_files.append(utils.SmartUnicode(f.path))
# except (psutil.NoSuchProcess, psutil.AccessDenied):
# pass
try:
for c in proc.connections():
conn = response.connections.Append(family=c.family,
type=c.type,
pid=proc.pid)
try:
conn.state = c.status
except ValueError:
logging.info("Encountered unknown connection status (%s).",
c.status)
try:
conn.local_address.ip, conn.local_address.port = c.laddr
# Could be in state LISTEN.
if c.raddr:
conn.remote_address.ip, conn.remote_address.port = c.raddr
except AttributeError:
conn.local_address.ip, conn.local_address.port = c.local_address
# Could be in state LISTEN.
if c.remote_address:
(conn.remote_address.ip,
conn.remote_address.port) = c.remote_address
except (psutil.NoSuchProcess, psutil.AccessDenied):
pass
self.SendReply(response)
# Reading information here is slow so we heartbeat between processes.
self.Progress()
class SendFile(actions.ActionPlugin):
"""This action encrypts and sends a file to a remote listener."""
in_rdfvalue = rdfvalue.SendFileRequest
out_rdfvalue = rdfvalue.StatEntry
BLOCK_SIZE = 1024 * 1024 * 10 # 10 MB
def Send(self, sock, msg):
totalsent = 0
n = len(msg)
while totalsent < n:
sent = sock.send(msg[totalsent:])
if sent == 0:
raise RuntimeError("socket connection broken")
totalsent += sent
def Run(self, args):
"""Run."""
# Open the file.
fd = vfs.VFSOpen(args.pathspec, progress_callback=self.Progress)
if args.address_family == rdfvalue.NetworkAddress.Family.INET:
family = socket.AF_INET
elif args.address_family == rdfvalue.NetworkAddress.Family.INET6:
family = socket.AF_INET6
else:
raise RuntimeError("Socket address family not supported.")
s = socket.socket(family, socket.SOCK_STREAM)
try:
s.connect((args.host, args.port))
except socket.error as e:
raise RuntimeError(str(e))
cipher = crypto.AES128CBCCipher(args.key, args.iv,
crypto.Cipher.OP_ENCRYPT)
while True:
data = fd.read(self.BLOCK_SIZE)
if not data:
break
self.Send(s, cipher.Update(data))
# Send heartbeats for long files.
self.Progress()
self.Send(s, cipher.Final())
s.close()
self.SendReply(fd.Stat())
class StatFS(actions.ActionPlugin):
"""Call os.statvfs for a given list of paths. OS X and Linux only.
Note that a statvfs call for a network filesystem (e.g. NFS) that is
unavailable, e.g. due to no network, will result in the call blocking.
"""
in_rdfvalue = rdfvalue.StatFSRequest
out_rdfvalue = rdfvalue.Volume
def Run(self, args):
if platform.system() == "Windows":
raise RuntimeError("os.statvfs not available on Windows")
for path in args.path_list:
try:
fd = vfs.VFSOpen(rdfvalue.PathSpec(path=path, pathtype=args.pathtype),
progress_callback=self.Progress)
st = fd.StatFS()
mount_point = fd.GetMountPoint()
except (IOError, OSError), e:
self.SetStatus(rdfvalue.GrrStatus.ReturnedStatus.IOERROR, e)
continue
unix = rdfvalue.UnixVolume(mount_point=mount_point)
# On linux pre 2.6 kernels don't have frsize, so we fall back to bsize.
# The actual_available_allocation_units attribute is set to blocks
# available to the unprivileged user, root may have some additional
# reserved space.
result = rdfvalue.Volume(bytes_per_sector=(st.f_frsize or st.f_bsize),
sectors_per_allocation_unit=1,
total_allocation_units=st.f_blocks,
actual_available_allocation_units=st.f_bavail,
unix=unix)
self.SendReply(result)
| apache-2.0 | -6,459,234,914,647,858,000 | 6,273,272,390,985,213,000 | 30.161435 | 80 | 0.651365 | false |
taiyuanfang/gyp | test/win/gyptest-cl-buffer-security-check.py | 344 | 1612 | #!/usr/bin/env python
# Copyright (c) 2012 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""
Make sure buffer security check setting is extracted properly.
"""
import TestGyp
import sys
if sys.platform == 'win32':
test = TestGyp.TestGyp(formats=['msvs', 'ninja'])
CHDIR = 'compiler-flags'
test.run_gyp('buffer-security-check.gyp', chdir=CHDIR)
test.build('buffer-security-check.gyp', chdir=CHDIR)
def GetDisassemblyOfMain(exe):
# The standard library uses buffer security checks independent of our
# buffer security settings, so we extract just our code (i.e. main()) to
# check against.
full_path = test.built_file_path(exe, chdir=CHDIR)
output = test.run_dumpbin('/disasm', full_path)
result = []
in_main = False
for line in output.splitlines():
if line == '_main:':
in_main = True
elif in_main:
# Disassembly of next function starts.
if line.startswith('_'):
break
result.append(line)
return '\n'.join(result)
# Buffer security checks are on by default, make sure security_cookie
# appears in the disassembly of our code.
if 'security_cookie' not in GetDisassemblyOfMain('test_bsc_unset.exe'):
test.fail_test()
# Explicitly on.
if 'security_cookie' not in GetDisassemblyOfMain('test_bsc_on.exe'):
test.fail_test()
# Explicitly off, shouldn't be a reference to the security cookie.
if 'security_cookie' in GetDisassemblyOfMain('test_bsc_off.exe'):
test.fail_test()
test.pass_test()
| bsd-3-clause | 1,780,257,169,554,671,900 | 1,497,067,708,219,554,800 | 29.415094 | 76 | 0.680521 | false |
valentin-krasontovitsch/ansible | lib/ansible/modules/network/enos/enos_config.py | 42 | 11179 | #!/usr/bin/python
# -*- coding: utf-8 -*-
#
# (C) 2017 Red Hat Inc.
# Copyright (C) 2017 Lenovo.
#
# GNU General Public License v3.0+
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
#
# Module to configure Lenovo Switches.
# Lenovo Networking
#
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = """
---
module: enos_config
version_added: "2.5"
author: "Anil Kumar Muraleedharan (@amuraleedhar)"
short_description: Manage Lenovo ENOS configuration sections
description:
- Lenovo ENOS configurations use a simple block indent file syntax
for segmenting configuration into sections. This module provides
an implementation for working with ENOS configuration sections in
a deterministic way.
extends_documentation_fragment: enos
notes:
- Tested against ENOS 8.4.1
options:
lines:
description:
- The ordered set of commands that should be configured in the
section. The commands must be the exact same commands as found
in the device running-config. Be sure to note the configuration
command syntax as some commands are automatically modified by the
device config parser.
aliases: ['commands']
parents:
description:
- The ordered set of parents that uniquely identify the section
the commands should be checked against. If the parents argument
is omitted, the commands are checked against the set of top
level or global commands.
src:
description:
- Specifies the source path to the file that contains the configuration
or configuration template to load. The path to the source file can
either be the full path on the Ansible control host or a relative
path from the playbook or role root directory. This argument is
mutually exclusive with I(lines), I(parents).
before:
description:
- The ordered set of commands to push on to the command stack if
a change needs to be made. This allows the playbook designer
the opportunity to perform configuration commands prior to pushing
any changes without affecting how the set of commands are matched
against the system.
after:
description:
- The ordered set of commands to append to the end of the command
stack if a change needs to be made. Just like with I(before) this
allows the playbook designer to append a set of commands to be
executed after the command set.
match:
description:
- Instructs the module on the way to perform the matching of
the set of commands against the current device config. If
match is set to I(line), commands are matched line by line. If
match is set to I(strict), command lines are matched with respect
to position. If match is set to I(exact), command lines
must be an equal match. Finally, if match is set to I(none), the
module will not attempt to compare the source configuration with
the running configuration on the remote device.
default: line
choices: ['line', 'strict', 'exact', 'none']
replace:
description:
- Instructs the module on the way to perform the configuration
on the device. If the replace argument is set to I(line) then
the modified lines are pushed to the device in configuration
mode. If the replace argument is set to I(block) then the entire
command block is pushed to the device in configuration mode if any
line is not correct.
default: line
choices: ['line', 'block', 'config']
config:
description:
- The module, by default, will connect to the remote device and
retrieve the current running-config to use as a base for comparing
against the contents of source. There are times when it is not
desirable to have the task get the current running-config for
every task in a playbook. The I(config) argument allows the
implementer to pass in the configuration to use as the base
config for comparison.
backup:
description:
- This argument will cause the module to create a full backup of
the current C(running-config) from the remote device before any
changes are made. If the C(backup_options) value is not given,
the backup file is written to the C(backup) folder in the playbook
root directory. If the directory does not exist, it is created.
type: bool
default: 'no'
comment:
description:
- Allows a commit description to be specified to be included
when the configuration is committed. If the configuration is
not changed or committed, this argument is ignored.
default: 'configured by enos_config'
admin:
description:
- Enters into administration configuration mode for making config
changes to the device.
type: bool
default: 'no'
backup_options:
description:
- This is a dict object containing configurable options related to backup file path.
The value of this option is read only when C(backup) is set to I(yes), if C(backup) is set
to I(no) this option will be silently ignored.
suboptions:
filename:
description:
- The filename to be used to store the backup configuration. If the the filename
is not given it will be generated based on the hostname, current time and date
in format defined by <hostname>_config.<current-date>@<current-time>
dir_path:
description:
- This option provides the path ending with directory name in which the backup
configuration file will be stored. If the directory does not exist it will be first
created and the filename is either the value of C(filename) or default filename
as described in C(filename) options description. If the path value is not given
in that case a I(backup) directory will be created in the current working directory
and backup configuration will be copied in C(filename) within I(backup) directory.
type: path
type: dict
version_added: "2.8"
"""
EXAMPLES = """
- name: configure top level configuration
enos_config:
"lines: hostname {{ inventory_hostname }}"
- name: configure interface settings
enos_config:
lines:
- enable
- ip ospf enable
parents: interface ip 13
- name: load a config from disk and replace the current config
enos_config:
src: config.cfg
backup: yes
- name: configurable backup path
enos_config:
src: config.cfg
backup: yes
backup_options:
filename: backup.cfg
dir_path: /home/user
"""
RETURN = """
updates:
description: The set of commands that will be pushed to the remote device
returned: Only when lines is specified.
type: list
sample: ['...', '...']
backup_path:
description: The full path to the backup file
returned: when backup is yes
type: str
sample: /playbooks/ansible/backup/enos01.2016-07-16@22:28:34
"""
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.network.enos.enos import load_config, get_config
from ansible.module_utils.network.enos.enos import enos_argument_spec
from ansible.module_utils.network.enos.enos import check_args
from ansible.module_utils.network.common.config import NetworkConfig, dumps
DEFAULT_COMMIT_COMMENT = 'configured by enos_config'
def get_running_config(module):
contents = module.params['config']
if not contents:
contents = get_config(module)
return NetworkConfig(indent=1, contents=contents)
def get_candidate(module):
candidate = NetworkConfig(indent=1)
if module.params['src']:
candidate.load(module.params['src'])
elif module.params['lines']:
parents = module.params['parents'] or list()
candidate.add(module.params['lines'], parents=parents)
return candidate
def run(module, result):
match = module.params['match']
replace = module.params['replace']
replace_config = replace == 'config'
path = module.params['parents']
comment = module.params['comment']
admin = module.params['admin']
check_mode = module.check_mode
candidate = get_candidate(module)
if match != 'none' and replace != 'config':
contents = get_running_config(module)
configobj = NetworkConfig(contents=contents, indent=1)
commands = candidate.difference(configobj, path=path, match=match,
replace=replace)
else:
commands = candidate.items
if commands:
commands = dumps(commands, 'commands').split('\n')
if any((module.params['lines'], module.params['src'])):
if module.params['before']:
commands[:0] = module.params['before']
if module.params['after']:
commands.extend(module.params['after'])
result['commands'] = commands
diff = load_config(module, commands)
if diff:
result['diff'] = dict(prepared=diff)
result['changed'] = True
def main():
"""main entry point for module execution
"""
backup_spec = dict(
filename=dict(),
dir_path=dict(type='path')
)
argument_spec = dict(
src=dict(type='path'),
lines=dict(aliases=['commands'], type='list'),
parents=dict(type='list'),
before=dict(type='list'),
after=dict(type='list'),
match=dict(default='line', choices=['line', 'strict', 'exact', 'none']),
replace=dict(default='line', choices=['line', 'block', 'config']),
config=dict(),
backup=dict(type='bool', default=False),
backup_options=dict(type='dict', options=backup_spec),
comment=dict(default=DEFAULT_COMMIT_COMMENT),
admin=dict(type='bool', default=False)
)
argument_spec.update(enos_argument_spec)
mutually_exclusive = [('lines', 'src'),
('parents', 'src')]
required_if = [('match', 'strict', ['lines']),
('match', 'exact', ['lines']),
('replace', 'block', ['lines']),
('replace', 'config', ['src'])]
module = AnsibleModule(argument_spec=argument_spec,
mutually_exclusive=mutually_exclusive,
required_if=required_if,
supports_check_mode=True)
warnings = list()
check_args(module, warnings)
result = dict(changed=False, warnings=warnings)
if module.params['backup']:
result['__backup__'] = get_config(module)
run(module, result)
module.exit_json(**result)
if __name__ == '__main__':
main()
| gpl-3.0 | 214,183,534,716,526,530 | -2,761,737,565,834,270,000 | 35.06129 | 98 | 0.65963 | false |
lizardsystem/lizard-measure | lizard_measure/migrations/0010_auto__del_score__del_measuringrod__del_field_measurestatusmoment_is_pl.py | 1 | 23606 | # encoding: utf-8
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Deleting model 'Score'
db.delete_table('lizard_measure_score')
# Deleting model 'MeasuringRod'
db.delete_table('lizard_measure_measuringrod')
# Deleting field 'MeasureStatusMoment.is_planning'
db.delete_column('lizard_measure_measurestatusmoment', 'is_planning')
# Deleting field 'MeasureStatusMoment.date'
db.delete_column('lizard_measure_measurestatusmoment', 'date')
# Adding field 'MeasureStatusMoment.planning_date'
db.add_column('lizard_measure_measurestatusmoment', 'planning_date', self.gf('django.db.models.fields.DateField')(null=True, blank=True), keep_default=False)
# Adding field 'MeasureStatusMoment.realisation_date'
db.add_column('lizard_measure_measurestatusmoment', 'realisation_date', self.gf('django.db.models.fields.DateField')(null=True, blank=True), keep_default=False)
# Deleting field 'Measure.total_costs'
db.delete_column('lizard_measure_measure', 'total_costs')
# Adding field 'Measure.valid'
db.add_column('lizard_measure_measure', 'valid', self.gf('django.db.models.fields.NullBooleanField')(default=False, null=True, blank=True), keep_default=False)
# Adding field 'Measure.geom'
db.add_column('lizard_measure_measure', 'geom', self.gf('django.contrib.gis.db.models.fields.GeometryField')(null=True, blank=True), keep_default=False)
def backwards(self, orm):
# Adding model 'Score'
db.create_table('lizard_measure_score', (
('gep', self.gf('django.db.models.fields.FloatField')(null=True, blank=True)),
('area_ident', self.gf('django.db.models.fields.CharField')(max_length=32, null=True, blank=True)),
('ascending', self.gf('django.db.models.fields.NullBooleanField')(null=True, blank=True)),
('mep', self.gf('django.db.models.fields.FloatField')(null=True, blank=True)),
('measuring_rod', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['lizard_measure.MeasuringRod'])),
('limit_bad_insufficient', self.gf('django.db.models.fields.FloatField')(null=True, blank=True)),
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('area', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['lizard_area.Area'], null=True, blank=True)),
('target_2027', self.gf('django.db.models.fields.FloatField')(null=True, blank=True)),
('limit_insufficient_moderate', self.gf('django.db.models.fields.FloatField')(null=True, blank=True)),
('target_2015', self.gf('django.db.models.fields.FloatField')(null=True, blank=True)),
))
db.send_create_signal('lizard_measure', ['Score'])
# Adding model 'MeasuringRod'
db.create_table('lizard_measure_measuringrod', (
('group', self.gf('django.db.models.fields.CharField')(max_length=128, null=True, blank=True)),
('sign', self.gf('django.db.models.fields.CharField')(max_length=128, null=True, blank=True)),
('sub_measuring_rod', self.gf('django.db.models.fields.CharField')(max_length=128, null=True, blank=True)),
('measuring_rod', self.gf('django.db.models.fields.CharField')(max_length=128, null=True, blank=True)),
('id', self.gf('django.db.models.fields.IntegerField')(primary_key=True)),
('unit', self.gf('django.db.models.fields.CharField')(max_length=128, null=True, blank=True)),
))
db.send_create_signal('lizard_measure', ['MeasuringRod'])
# Adding field 'MeasureStatusMoment.is_planning'
db.add_column('lizard_measure_measurestatusmoment', 'is_planning', self.gf('django.db.models.fields.BooleanField')(default=False), keep_default=False)
# Adding field 'MeasureStatusMoment.date'
db.add_column('lizard_measure_measurestatusmoment', 'date', self.gf('django.db.models.fields.DateField')(null=True, blank=True), keep_default=False)
# Deleting field 'MeasureStatusMoment.planning_date'
db.delete_column('lizard_measure_measurestatusmoment', 'planning_date')
# Deleting field 'MeasureStatusMoment.realisation_date'
db.delete_column('lizard_measure_measurestatusmoment', 'realisation_date')
# Adding field 'Measure.total_costs'
db.add_column('lizard_measure_measure', 'total_costs', self.gf('django.db.models.fields.IntegerField')(null=True, blank=True), keep_default=False)
# Deleting field 'Measure.valid'
db.delete_column('lizard_measure_measure', 'valid')
# Deleting field 'Measure.geom'
db.delete_column('lizard_measure_measure', 'geom')
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'lizard_area.area': {
'Meta': {'ordering': "('name',)", 'object_name': 'Area', '_ormbases': ['lizard_area.Communique']},
'area_class': ('django.db.models.fields.IntegerField', [], {'default': '1'}),
'communique_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['lizard_area.Communique']", 'unique': 'True', 'primary_key': 'True'}),
'data_administrator': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['lizard_area.DataAdministrator']"}),
'parent': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['lizard_area.Area']", 'null': 'True', 'blank': 'True'})
},
'lizard_area.communique': {
'Meta': {'object_name': 'Communique', '_ormbases': ['lizard_geo.GeoObject']},
'code': ('django.db.models.fields.CharField', [], {'max_length': '128', 'null': 'True', 'blank': 'True'}),
'description': ('django.db.models.fields.TextField', [], {'default': "''"}),
'geoobject_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['lizard_geo.GeoObject']", 'unique': 'True', 'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '128'})
},
'lizard_area.dataadministrator': {
'Meta': {'object_name': 'DataAdministrator'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '128'})
},
'lizard_geo.geoobject': {
'Meta': {'object_name': 'GeoObject'},
'geo_object_group': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['lizard_geo.GeoObjectGroup']"}),
'geometry': ('django.contrib.gis.db.models.fields.GeometryField', [], {}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'ident': ('django.db.models.fields.CharField', [], {'max_length': '80'})
},
'lizard_geo.geoobjectgroup': {
'Meta': {'object_name': 'GeoObjectGroup'},
'created_by': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'last_modified': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '50', 'db_index': 'True'}),
'source_log': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'})
},
'lizard_measure.fundingorganization': {
'Meta': {'object_name': 'FundingOrganization'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'measure': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['lizard_measure.Measure']"}),
'organization': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['lizard_measure.Organization']"}),
'percentage': ('django.db.models.fields.FloatField', [], {})
},
'lizard_measure.krwstatus': {
'Meta': {'object_name': 'KRWStatus'},
'code': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '32'}),
'description': ('django.db.models.fields.CharField', [], {'max_length': '256', 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'valid': ('django.db.models.fields.NullBooleanField', [], {'default': 'None', 'null': 'True', 'blank': 'True'})
},
'lizard_measure.krwwatertype': {
'Meta': {'object_name': 'KRWWatertype'},
'code': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '32'}),
'description': ('django.db.models.fields.CharField', [], {'max_length': '256', 'null': 'True', 'blank': 'True'}),
'group': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'valid': ('django.db.models.fields.NullBooleanField', [], {'default': 'None', 'null': 'True', 'blank': 'True'})
},
'lizard_measure.measure': {
'Meta': {'ordering': "('id',)", 'object_name': 'Measure'},
'aggregation_type': ('django.db.models.fields.IntegerField', [], {'default': '1'}),
'areas': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "'area_measure_set'", 'blank': 'True', 'to': "orm['lizard_area.Area']"}),
'categories': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['lizard_measure.MeasureCategory']", 'symmetrical': 'False', 'blank': 'True'}),
'datetime_in_source': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'description': ('django.db.models.fields.CharField', [], {'max_length': '512', 'null': 'True', 'blank': 'True'}),
'executive': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'executive_measure_set'", 'null': 'True', 'to': "orm['lizard_measure.Organization']"}),
'exploitation_costs': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'funding_organizations': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['lizard_measure.Organization']", 'through': "orm['lizard_measure.FundingOrganization']", 'symmetrical': 'False'}),
'geom': ('django.contrib.gis.db.models.fields.GeometryField', [], {'null': 'True', 'blank': 'True'}),
'geometry': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['lizard_geo.GeoObject']", 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'ident': ('django.db.models.fields.CharField', [], {'max_length': '64', 'unique': 'True', 'null': 'True', 'blank': 'True'}),
'import_raw': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'import_source': ('django.db.models.fields.IntegerField', [], {'default': '3'}),
'initiator': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'initiator_measure_set'", 'null': 'True', 'to': "orm['lizard_measure.Organization']"}),
'investment_costs': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'is_KRW_measure': ('django.db.models.fields.NullBooleanField', [], {'null': 'True', 'blank': 'True'}),
'is_indicator': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'measure_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['lizard_measure.MeasureType']"}),
'parent': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['lizard_measure.Measure']", 'null': 'True', 'blank': 'True'}),
'period': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['lizard_measure.MeasurePeriod']", 'null': 'True', 'blank': 'True'}),
'read_only': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'responsible_department': ('django.db.models.fields.CharField', [], {'max_length': '256'}),
'status_moments': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['lizard_measure.MeasureStatus']", 'through': "orm['lizard_measure.MeasureStatusMoment']", 'symmetrical': 'False'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '256', 'null': 'True', 'blank': 'True'}),
'unit': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['lizard_measure.Unit']"}),
'valid': ('django.db.models.fields.NullBooleanField', [], {'default': 'False', 'null': 'True', 'blank': 'True'}),
'value': ('django.db.models.fields.FloatField', [], {}),
'waterbodies': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['lizard_measure.WaterBody']", 'symmetrical': 'False', 'blank': 'True'})
},
'lizard_measure.measurecategory': {
'Meta': {'object_name': 'MeasureCategory'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'valid': ('django.db.models.fields.NullBooleanField', [], {'default': 'None', 'null': 'True', 'blank': 'True'})
},
'lizard_measure.measureperiod': {
'Meta': {'ordering': "('start_date', 'end_date')", 'object_name': 'MeasurePeriod'},
'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'end_date': ('django.db.models.fields.DateField', [], {}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'start_date': ('django.db.models.fields.DateField', [], {}),
'valid': ('django.db.models.fields.NullBooleanField', [], {'default': 'None', 'null': 'True', 'blank': 'True'})
},
'lizard_measure.measurestatus': {
'Meta': {'ordering': "('-value',)", 'object_name': 'MeasureStatus'},
'color': ('lizard_map.models.ColorField', [], {'max_length': '8'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'valid': ('django.db.models.fields.NullBooleanField', [], {'default': 'None', 'null': 'True', 'blank': 'True'}),
'value': ('django.db.models.fields.FloatField', [], {'default': '0.0'})
},
'lizard_measure.measurestatusmoment': {
'Meta': {'ordering': "('measure__id', 'status__value')", 'object_name': 'MeasureStatusMoment'},
'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'exploitation_expenditure': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'investment_expenditure': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'measure': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['lizard_measure.Measure']"}),
'planning_date': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'realisation_date': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'status': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['lizard_measure.MeasureStatus']"})
},
'lizard_measure.measuretype': {
'Meta': {'ordering': "('code',)", 'object_name': 'MeasureType'},
'code': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'combined_name': ('django.db.models.fields.CharField', [], {'max_length': '256', 'null': 'True', 'blank': 'True'}),
'description': ('django.db.models.fields.TextField', [], {}),
'group': ('django.db.models.fields.CharField', [], {'max_length': '128', 'null': 'True', 'blank': 'True'}),
'harmonisation': ('django.db.models.fields.CharField', [], {'max_length': '256', 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'klass': ('django.db.models.fields.CharField', [], {'max_length': '256', 'null': 'True', 'blank': 'True'}),
'subcategory': ('django.db.models.fields.CharField', [], {'max_length': '256', 'null': 'True', 'blank': 'True'}),
'units': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['lizard_measure.Unit']", 'symmetrical': 'False', 'blank': 'True'}),
'valid': ('django.db.models.fields.NullBooleanField', [], {'default': 'None', 'null': 'True', 'blank': 'True'})
},
'lizard_measure.organization': {
'Meta': {'ordering': "('description',)", 'unique_together': "(('source', 'code'),)", 'object_name': 'Organization'},
'code': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'description': ('django.db.models.fields.CharField', [], {'max_length': '256', 'null': 'True', 'blank': 'True'}),
'group': ('django.db.models.fields.CharField', [], {'max_length': '128', 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'source': ('django.db.models.fields.IntegerField', [], {'default': '5'}),
'valid': ('django.db.models.fields.NullBooleanField', [], {'default': 'None', 'null': 'True', 'blank': 'True'})
},
'lizard_measure.steeringparameter': {
'Meta': {'object_name': 'SteeringParameter'},
'area': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['lizard_area.Area']"}),
'fews_parameter': ('django.db.models.fields.CharField', [], {'max_length': '256'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'target_maximum': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'target_minimum': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'})
},
'lizard_measure.unit': {
'Meta': {'object_name': 'Unit'},
'code': ('django.db.models.fields.CharField', [], {'max_length': '20', 'unique': 'True', 'null': 'True', 'blank': 'True'}),
'conversion_factor': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'dimension': ('django.db.models.fields.CharField', [], {'max_length': '32', 'null': 'True', 'blank': 'True'}),
'group': ('django.db.models.fields.CharField', [], {'max_length': '128', 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'valid': ('django.db.models.fields.NullBooleanField', [], {'default': 'None', 'null': 'True', 'blank': 'True'})
},
'lizard_measure.waterbody': {
'Meta': {'object_name': 'WaterBody'},
'area': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['lizard_area.Area']", 'null': 'True', 'blank': 'True'}),
'area_ident': ('django.db.models.fields.CharField', [], {'max_length': '32', 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'krw_status': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['lizard_measure.KRWStatus']", 'null': 'True', 'blank': 'True'}),
'krw_watertype': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['lizard_measure.KRWWatertype']", 'null': 'True', 'blank': 'True'})
}
}
complete_apps = ['lizard_measure']
| gpl-3.0 | 6,286,981,118,181,760,000 | 231,448,089,987,500,480 | 77.949833 | 219 | 0.57604 | false |
konrad/kufpybio | kufpybiotools/generate_igr_gff.py | 1 | 1881 | #!/usr/bin/env python
__description__ = ""
__author__ = "Konrad Foerstner <konrad@foerstner.org>"
__copyright__ = "2013 by Konrad Foerstner <konrad@foerstner.org>"
__license__ = "ISC license"
__email__ = "konrad@foerstner.org"
__version__ = ""
import argparse
import csv
import sys
sys.path.append(".")
from kufpybio.gff3 import Gff3Parser, Gff3Entry
from kufpybio.gene import Gene
from kufpybio.igrfinder import IGRFinder
parser = argparse.ArgumentParser(description=__description__)
parser.add_argument("gff_file", type=argparse.FileType("r"))
parser.add_argument("output_file", type=argparse.FileType("w"))
parser.add_argument("--margin", type=int, default=0)
parser.add_argument("--plus_only", default=False, action="store_true")
args = parser.parse_args()
# Build gene list
gene_list = []
gff_parser = Gff3Parser()
region_entry = None
for entry in gff_parser.entries(args.gff_file):
if entry.feature == "region":
region_entry = entry
continue
gene_list.append(Gene(
entry.seq_id, "", "", entry.start, entry.end,
entry.strand))
# Find IGRs and generate GFF file
igr_finder = IGRFinder()
args.output_file.write("##gff-version 3\n")
strands = ["+", "-"]
if args.plus_only is True:
strands = ["+"]
for start, end in igr_finder.find_igrs(gene_list, region_entry.end):
start = start + args.margin
end = end - args.margin
if end <= start:
continue
for strand in strands:
gff3_entry = Gff3Entry({
"seq_id" : region_entry.seq_id,
"source" : "IGR",
"feature" : "IGR",
"start" : start,
"end" : end,
"score" : ".",
"strand" : strand,
"phase" : ".",
"attributes" : "ID=IGR_%s_%s_to_%s" % (
region_entry.seq_id, start, end)})
args.output_file.write(str(gff3_entry) + "\n")
| isc | 3,772,476,471,545,913,000 | 2,789,608,438,891,745,300 | 30.35 | 70 | 0.61563 | false |
ininex/geofire-python | resource/lib/python2.7/site-packages/gcloud/bigquery/client.py | 3 | 10779 | # Copyright 2015 Google Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Client for interacting with the Google BigQuery API."""
from gcloud.client import JSONClient
from gcloud.bigquery.connection import Connection
from gcloud.bigquery.dataset import Dataset
from gcloud.bigquery.job import CopyJob
from gcloud.bigquery.job import ExtractTableToStorageJob
from gcloud.bigquery.job import LoadTableFromStorageJob
from gcloud.bigquery.job import QueryJob
from gcloud.bigquery.query import QueryResults
class Client(JSONClient):
"""Client to bundle configuration needed for API requests.
:type project: str
:param project: the project which the client acts on behalf of. Will be
passed when creating a dataset / job. If not passed,
falls back to the default inferred from the environment.
:type credentials: :class:`oauth2client.client.OAuth2Credentials` or
:class:`NoneType`
:param credentials: The OAuth2 Credentials to use for the connection
owned by this client. If not passed (and if no ``http``
object is passed), falls back to the default inferred
from the environment.
:type http: :class:`httplib2.Http` or class that defines ``request()``.
:param http: An optional HTTP object to make requests. If not passed, an
``http`` object is created that is bound to the
``credentials`` for the current object.
"""
_connection_class = Connection
def list_datasets(self, include_all=False, max_results=None,
page_token=None):
"""List datasets for the project associated with this client.
See:
https://cloud.google.com/bigquery/docs/reference/v2/datasets/list
:type include_all: boolean
:param include_all: True if results include hidden datasets.
:type max_results: int
:param max_results: maximum number of datasets to return, If not
passed, defaults to a value set by the API.
:type page_token: str
:param page_token: opaque marker for the next "page" of datasets. If
not passed, the API will return the first page of
datasets.
:rtype: tuple, (list, str)
:returns: list of :class:`gcloud.bigquery.dataset.Dataset`, plus a
"next page token" string: if the token is not None,
indicates that more datasets can be retrieved with another
call (pass that value as ``page_token``).
"""
params = {}
if include_all:
params['all'] = True
if max_results is not None:
params['maxResults'] = max_results
if page_token is not None:
params['pageToken'] = page_token
path = '/projects/%s/datasets' % (self.project,)
resp = self.connection.api_request(method='GET', path=path,
query_params=params)
datasets = [Dataset.from_api_repr(resource, self)
for resource in resp.get('datasets', ())]
return datasets, resp.get('nextPageToken')
def dataset(self, dataset_name):
"""Construct a dataset bound to this client.
:type dataset_name: str
:param dataset_name: Name of the dataset.
:rtype: :class:`gcloud.bigquery.dataset.Dataset`
:returns: a new ``Dataset`` instance
"""
return Dataset(dataset_name, client=self)
def job_from_resource(self, resource):
"""Detect correct job type from resource and instantiate.
:type resource: dict
:param resource: one job resource from API response
:rtype: One of:
:class:`gcloud.bigquery.job.LoadTableFromStorageJob`,
:class:`gcloud.bigquery.job.CopyJob`,
:class:`gcloud.bigquery.job.ExtractTableToStorageJob`,
:class:`gcloud.bigquery.job.QueryJob`,
:class:`gcloud.bigquery.job.RunSyncQueryJob`
:returns: the job instance, constructed via the resource
"""
config = resource['configuration']
if 'load' in config:
return LoadTableFromStorageJob.from_api_repr(resource, self)
elif 'copy' in config:
return CopyJob.from_api_repr(resource, self)
elif 'extract' in config:
return ExtractTableToStorageJob.from_api_repr(resource, self)
elif 'query' in config:
return QueryJob.from_api_repr(resource, self)
raise ValueError('Cannot parse job resource')
def list_jobs(self, max_results=None, page_token=None, all_users=None,
state_filter=None):
"""List jobs for the project associated with this client.
See:
https://cloud.google.com/bigquery/docs/reference/v2/jobs/list
:type max_results: int
:param max_results: maximum number of jobs to return, If not
passed, defaults to a value set by the API.
:type page_token: str
:param page_token: opaque marker for the next "page" of jobs. If
not passed, the API will return the first page of
jobs.
:type all_users: boolean
:param all_users: if true, include jobs owned by all users in the
project.
:type state_filter: str
:param state_filter: if passed, include only jobs matching the given
state. One of
* ``"done"``
* ``"pending"``
* ``"running"``
:rtype: tuple, (list, str)
:returns: list of job instances, plus a "next page token" string:
if the token is not ``None``, indicates that more jobs can be
retrieved with another call, passing that value as
``page_token``).
"""
params = {'projection': 'full'}
if max_results is not None:
params['maxResults'] = max_results
if page_token is not None:
params['pageToken'] = page_token
if all_users is not None:
params['allUsers'] = all_users
if state_filter is not None:
params['stateFilter'] = state_filter
path = '/projects/%s/jobs' % (self.project,)
resp = self.connection.api_request(method='GET', path=path,
query_params=params)
jobs = [self.job_from_resource(resource) for resource in resp['jobs']]
return jobs, resp.get('nextPageToken')
def load_table_from_storage(self, job_name, destination, *source_uris):
"""Construct a job for loading data into a table from CloudStorage.
See:
https://cloud.google.com/bigquery/docs/reference/v2/jobs#configuration.load
:type job_name: str
:param job_name: Name of the job.
:type destination: :class:`gcloud.bigquery.table.Table`
:param destination: Table into which data is to be loaded.
:type source_uris: sequence of string
:param source_uris: URIs of data files to be loaded; in format
``gs://<bucket_name>/<object_name_or_glob>``.
:rtype: :class:`gcloud.bigquery.job.LoadTableFromStorageJob`
:returns: a new ``LoadTableFromStorageJob`` instance
"""
return LoadTableFromStorageJob(job_name, destination, source_uris,
client=self)
def copy_table(self, job_name, destination, *sources):
"""Construct a job for copying one or more tables into another table.
See:
https://cloud.google.com/bigquery/docs/reference/v2/jobs#configuration.copy
:type job_name: str
:param job_name: Name of the job.
:type destination: :class:`gcloud.bigquery.table.Table`
:param destination: Table into which data is to be copied.
:type sources: sequence of :class:`gcloud.bigquery.table.Table`
:param sources: tables to be copied.
:rtype: :class:`gcloud.bigquery.job.CopyJob`
:returns: a new ``CopyJob`` instance
"""
return CopyJob(job_name, destination, sources, client=self)
def extract_table_to_storage(self, job_name, source, *destination_uris):
"""Construct a job for extracting a table into Cloud Storage files.
See:
https://cloud.google.com/bigquery/docs/reference/v2/jobs#configuration.extract
:type job_name: str
:param job_name: Name of the job.
:type source: :class:`gcloud.bigquery.table.Table`
:param source: table to be extracted.
:type destination_uris: sequence of string
:param destination_uris: URIs of CloudStorage file(s) into which
table data is to be extracted; in format
``gs://<bucket_name>/<object_name_or_glob>``.
:rtype: :class:`gcloud.bigquery.job.ExtractTableToStorageJob`
:returns: a new ``ExtractTableToStorageJob`` instance
"""
return ExtractTableToStorageJob(job_name, source, destination_uris,
client=self)
def run_async_query(self, job_name, query):
"""Construct a job for running a SQL query asynchronously.
See:
https://cloud.google.com/bigquery/docs/reference/v2/jobs#configuration.query
:type job_name: str
:param job_name: Name of the job.
:type query: str
:param query: SQL query to be executed
:rtype: :class:`gcloud.bigquery.job.QueryJob`
:returns: a new ``QueryJob`` instance
"""
return QueryJob(job_name, query, client=self)
def run_sync_query(self, query):
"""Run a SQL query synchronously.
:type query: str
:param query: SQL query to be executed
:rtype: :class:`gcloud.bigquery.query.QueryResults`
:returns: a new ``QueryResults`` instance
"""
return QueryResults(query, client=self)
| mit | -4,223,986,437,829,316,600 | -6,636,682,711,477,721,000 | 38.196364 | 86 | 0.608127 | false |
jakobmoss/tsa | utils/makeweights.py | 1 | 2350 | # -*- coding: utf-8 -*-
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Time Series Analysis -- Generate statistical weigts from scatter
#
# Author: Jakob Rørsted Mosumgaard
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
###############################################################################
# Modules
###############################################################################
from __future__ import print_function, with_statement, division
import numpy as np
import bottleneck as bn
###############################################################################
# Functions
###############################################################################
def genweight(datname, dpath, wpath):
"""
Combine time series with statistical weights calculated from scatter
Arguments:
- `datname`: Identifier of data file
- `dpath` : Path to data file (time series).
- `wpath` : Path to scatter file (with same time points!)
"""
# Pretty print
print('Generating weights for {0} !'.format(dpath))
# Load data and weights
t, d = np.loadtxt(dpath, unpack=True)
tt, sig = np.loadtxt(wpath, unpack=True)
# Check that times are indeed the same
tdif = t - tt
if tdif.any() != 0:
print('Error! Not the same time points! Quitting!')
exit()
# Moving variance (Hans: M = 50 - 100)
M = 70
movstd = bn.move_std(sig, M, min_count=1)
movvar = np.square(movstd)
# Remove first point
x = 1
t = t[x:]
d = d[x:]
movvar = movvar[x:]
# Calculate weights from scatter (1 / variance)
w = np.divide(1.0, movvar)
# Save
outfile = star + '_with-weights.txt'
np.savetxt(outfile, np.transpose([t, d, w]), fmt='%.15e', delimiter='\t')
# Done!
print('Done!\n')
###############################################################################
# Script
###############################################################################
if __name__ == "__main__":
# Definitions
datdir = '../../data/'
ext = '.txt'
append = '-high'
# Run for star 1
star = 'star01'
genweight(star, datdir + star + ext, star + append + ext)
# Run for star 2
star = 'star02'
genweight(star, datdir + star + ext, star + append + ext)
| mit | 7,564,760,995,588,954,000 | -914,473,936,628,255,900 | 28.3625 | 79 | 0.43593 | false |
erstis-go-botting/sexy-bot | misc.py | 1 | 1888 | import os
#checks if settings.ini should be generated. if not given universe, username and password it will generate a settings.ini with the default account
#This settings_generator will only work for universe 82 if the flag argument is given als True(to make sure that universe 82 is intended)
def settings_generator(universe = 82, username = 'defaultName', password = 'defaultPassword', flag=False):
path = os.path.normcase('settings/settings.ini')
if (os.path.isfile('settings/settings.ini')):
print("settings file found, stopping now.")
return
if (universe == 82 and not(flag)) or (username == 'defaultName') or (password == 'defaultPassword'):
print("Not all fields specified, fallback on default configuration")
universe = 82
username = 'defaultName'
password = 'defaultPassword'
if not (os.path.isdir('settings')):
os.makedir('settings')
with open(path,'w') as foo:
foo.write('[credentials]\nuniverse = '+ str(universe) +'\npassword = '+password+'\nusername = '+username)
print("Settings.ini generated")
def force_settings_generator(universe = 82, username = 'defaultName', password = 'defaultPassword', flag=False):
path = os.path.normcase('settings/settings.ini')
if not (os.path.isfile('settings/settings.ini')):
settings_generator(universe, username, password, flag)
return
if (universe == 82 and not(flag)) or (username == 'defaultName') or (password == 'defaultPassword'):
print("Not all fields specified, fallback on default configuration")
universe = 82
username = 'defaultName'
password = 'defaultPassword'
with open(path,'w') as foo:
foo.write('[credentials]\nuniverse = '+ str(universe) +'\npassword = '+password+'\nusername = '+username)
print("Settings.ini generated")
#settings_generator()
| mit | 538,324,773,478,887,040 | -6,320,503,792,804,443,000 | 50.027027 | 146 | 0.678496 | false |
ActiveState/code | recipes/Python/275366_Email_address_leech/recipe-275366.py | 1 | 1624 | import re
def test():
text = \
''' You can contact us at myname@server.site.com
or at yourname AT server DOT site DOT com.
Also at o u r n a m e @ s e r v e r dot s i t e dot c o m
and t.h.e.i.r.n.a.m.e at server dot s/i/t/e DOT COM.
'''
for email in emailLeech(text): print email
DOMAINS = ["com","edu","net","org","gov","us"] #.. and so on
FLAGS = re.IGNORECASE | re.VERBOSE
AT = r'(?: @ | \b A \s* T \b)'
ADDRESSPART = r'\b (?: \w+ | \w (?:(?:\s+|\W) \w)*) \b'
DOMAIN = r'(?:%s)' % '|'.join(["(?:\s*|\W)".join(domain) for domain in DOMAINS])
NONWORD = re.compile(r'\W+')
DOT_REGEX = re.compile(r'(?: \. | \b D \s* O \s* T \b)', FLAGS)
EMAIL_REGEX = re.compile(
(r'(?P<name>%s) \W* %s \W*' % (ADDRESSPART,AT)) +
r'(?P<site>(?: %s \W* %s \W*)+)' % (ADDRESSPART, DOT_REGEX.pattern) +
r'(?P<domain>%s)' % DOMAIN, FLAGS)
def emailLeech(text):
''' An iterator over recognized email addresses within text'''
while (True):
match = EMAIL_REGEX.search(text)
if not match: break
parts = [match.group("name")] + \
DOT_REGEX.split(match.group("site")) + \
[match.group("domain")]
# discard non word chars
parts = [NONWORD.sub('',part) for part in parts]
# discard all empty parts and make lowercase
parts = [part.lower() for part in parts if len(part)>0]
# join the parts
yield "%s@%s.%s" % (parts[0], '.'.join(parts[1:-1]), parts[-1])
text = text[match.end():]
if __name__ == '__main__': test()
| mit | 9,141,161,035,293,001,000 | 6,562,280,396,227,097,000 | 35.088889 | 80 | 0.513547 | false |
mosdef-hub/foyer | foyer/tests/test_forcefield_parameters.py | 1 | 10029 | import numpy as np
import pytest
from foyer import Forcefield, forcefields
from foyer.exceptions import MissingForceError, MissingParametersError
from foyer.forcefield import get_available_forcefield_loaders
from foyer.tests.base_test import BaseTest
from foyer.tests.utils import get_fn
@pytest.mark.skipif(
condition="load_GAFF"
not in map(lambda func: func.__name__, get_available_forcefield_loaders()),
reason="GAFF Plugin is not installed",
)
class TestForcefieldParameters(BaseTest):
@pytest.fixture(scope="session")
def gaff(self):
return forcefields.load_GAFF()
def test_gaff_missing_group(self, gaff):
with pytest.raises(ValueError):
gaff.get_parameters("missing", key=[])
def test_gaff_non_string_keys(self, gaff):
with pytest.raises(TypeError):
gaff.get_parameters("atoms", key=1)
def test_gaff_bond_parameters_gaff(self, gaff):
bond_params = gaff.get_parameters("harmonic_bonds", ["br", "ca"])
assert np.isclose(bond_params["length"], 0.19079)
assert np.isclose(bond_params["k"], 219827.36)
def test_gaff_bond_params_reversed(self, gaff):
assert gaff.get_parameters(
"harmonic_bonds", ["ca", "br"]
) == gaff.get_parameters("harmonic_bonds", ["ca", "br"])
def test_gaff_missing_bond_parameters(self, gaff):
with pytest.raises(MissingParametersError):
gaff.get_parameters("harmonic_bonds", ["str1", "str2"])
def test_gaff_angle_parameters(self, gaff):
angle_params = gaff.get_parameters("harmonic_angles", ["f", "c1", "f"])
assert np.allclose(
[angle_params["theta"], angle_params["k"]],
[3.141592653589793, 487.0176],
)
def test_gaff_angle_parameters_reversed(self, gaff):
assert np.allclose(
list(
gaff.get_parameters(
"harmonic_angles", ["f", "c2", "ha"]
).values()
),
list(
gaff.get_parameters(
"harmonic_angles", ["ha", "c2", "f"]
).values()
),
)
def test_gaff_missing_angle_parameters(self, gaff):
with pytest.raises(MissingParametersError):
gaff.get_parameters("harmonic_angles", ["1", "2", "3"])
def test_gaff_periodic_proper_parameters(self, gaff):
periodic_proper_params = gaff.get_parameters(
"periodic_propers", ["c3", "c", "sh", "hs"]
)
assert np.allclose(periodic_proper_params["periodicity"], [2.0, 1.0])
assert np.allclose(
periodic_proper_params["k"], [9.414, 5.4392000000000005]
)
assert np.allclose(
periodic_proper_params["phase"],
[3.141592653589793, 3.141592653589793],
)
def test_gaff_periodic_proper_parameters_reversed(self, gaff):
assert np.allclose(
list(
gaff.get_parameters(
"periodic_propers", ["c3", "c", "sh", "hs"]
).values()
),
list(
gaff.get_parameters(
"periodic_propers", ["hs", "sh", "c", "c3"]
).values()
),
)
def test_gaff_periodic_improper_parameters(self, gaff):
periodic_improper_params = gaff.get_parameters(
"periodic_impropers", ["c", "", "o", "o"]
)
assert np.allclose(periodic_improper_params["periodicity"], [2.0])
assert np.allclose(periodic_improper_params["k"], [4.6024])
assert np.allclose(
periodic_improper_params["phase"], [3.141592653589793]
)
def test_gaff_periodic_improper_parameters_reversed(self, gaff):
assert np.allclose(
list(
gaff.get_parameters(
"periodic_impropers", ["c", "", "o", "o"]
).values()
),
list(
gaff.get_parameters(
"periodic_impropers", ["c", "o", "", "o"]
).values()
),
)
def test_gaff_proper_params_missing(self, gaff):
with pytest.raises(MissingParametersError):
gaff.get_parameters("periodic_impropers", ["a", "b", "c", "d"])
def test_gaff_scaling_factors(self, gaff):
assert gaff.lj14scale == 0.5
assert np.isclose(gaff.coulomb14scale, 0.833333333)
def test_opls_get_parameters_atoms(self, oplsaa):
atom_params = oplsaa.get_parameters("atoms", "opls_145")
assert atom_params["sigma"] == 0.355
assert atom_params["epsilon"] == 0.29288
def test_opls_get_parameters_atoms_list(self, oplsaa):
atom_params = oplsaa.get_parameters("atoms", ["opls_145"])
assert atom_params["sigma"] == 0.355
assert atom_params["epsilon"] == 0.29288
def test_opls_get_parameters_atom_class(self, oplsaa):
atom_params = oplsaa.get_parameters(
"atoms", "CA", keys_are_atom_classes=True
)
assert atom_params["sigma"] == 0.355
assert atom_params["epsilon"] == 0.29288
def test_opls_get_parameters_bonds(self, oplsaa):
bond_params = oplsaa.get_parameters(
"harmonic_bonds", ["opls_760", "opls_145"]
)
assert bond_params["length"] == 0.146
assert bond_params["k"] == 334720.0
def test_opls_get_parameters_bonds_reversed(self, oplsaa):
assert np.allclose(
list(
oplsaa.get_parameters(
"harmonic_bonds", ["opls_760", "opls_145"]
).values()
),
list(
oplsaa.get_parameters(
"harmonic_bonds", ["opls_145", "opls_760"]
).values()
),
)
def test_opls_get_parameters_bonds_atom_classes_reversed(self, oplsaa):
assert np.allclose(
list(
oplsaa.get_parameters(
"harmonic_bonds", ["C_2", "O_2"], True
).values()
),
list(
oplsaa.get_parameters(
"harmonic_bonds", ["O_2", "C_2"], True
).values()
),
)
def test_opls_get_parameters_angle(self, oplsaa):
angle_params = oplsaa.get_parameters(
"harmonic_angles", ["opls_166", "opls_772", "opls_167"]
)
assert np.allclose(
[angle_params["theta"], angle_params["k"]], [2.0943950239, 585.76]
)
def test_opls_get_parameters_angle_reversed(self, oplsaa):
assert np.allclose(
list(
oplsaa.get_parameters(
"harmonic_angles", ["opls_166", "opls_772", "opls_167"]
).values()
),
list(
oplsaa.get_parameters(
"harmonic_angles", ["opls_167", "opls_772", "opls_166"]
).values()
),
)
def test_opls_get_parameters_angle_atom_classes(self, oplsaa):
angle_params = oplsaa.get_parameters(
"harmonic_angles", ["CA", "C_2", "CA"], keys_are_atom_classes=True
)
assert np.allclose(
[angle_params["theta"], angle_params["k"]], [2.09439510239, 711.28]
)
def test_opls_get_parameters_angle_atom_classes_reversed(self, oplsaa):
assert np.allclose(
list(
oplsaa.get_parameters(
"harmonic_angles",
["CA", "C", "O"],
keys_are_atom_classes=True,
).values()
),
list(
oplsaa.get_parameters(
"harmonic_angles",
["O", "C", "CA"],
keys_are_atom_classes=True,
).values()
),
)
def test_opls_get_parameters_rb_proper(self, oplsaa):
proper_params = oplsaa.get_parameters(
"rb_propers", ["opls_215", "opls_215", "opls_235", "opls_269"]
)
assert np.allclose(
[
proper_params["c0"],
proper_params["c1"],
proper_params["c2"],
proper_params["c3"],
proper_params["c4"],
proper_params["c5"],
],
[2.28446, 0.0, -2.28446, 0.0, 0.0, 0.0],
)
def test_get_parameters_rb_proper_reversed(self, oplsaa):
assert np.allclose(
list(
oplsaa.get_parameters(
"rb_propers",
["opls_215", "opls_215", "opls_235", "opls_269"],
).values()
),
list(
oplsaa.get_parameters(
"rb_propers",
["opls_269", "opls_235", "opls_215", "opls_215"],
).values()
),
)
def test_opls_get_parameters_wildcard(self, oplsaa):
proper_params = oplsaa.get_parameters(
"rb_propers", ["", "opls_235", "opls_544", ""]
)
assert np.allclose(
[
proper_params["c0"],
proper_params["c1"],
proper_params["c2"],
proper_params["c3"],
proper_params["c4"],
proper_params["c5"],
],
[30.334, 0.0, -30.334, 0.0, 0.0, 0.0],
)
def test_opls_missing_force(self, oplsaa):
with pytest.raises(MissingForceError):
oplsaa.get_parameters("periodic_propers", key=["a", "b", "c", "d"])
def test_opls_scaling_factors(self, oplsaa):
assert oplsaa.lj14scale == 0.5
assert oplsaa.coulomb14scale == 0.5
def test_missing_scaling_factors(self):
ff = Forcefield(forcefield_files=(get_fn("validate_customtypes.xml")))
with pytest.raises(AttributeError):
assert ff.lj14scale
with pytest.raises(AttributeError):
assert ff.coulomb14scale
| mit | 1,481,030,475,830,956,800 | 1,089,676,041,681,139,800 | 33.582759 | 79 | 0.5172 | false |
kamilmowinski/nao_gesture | scripts/nao.py | 2 | 1999 | #!/usr/bin/env python
import rospy
import math
from naoqi import ALProxy
from my_kinnect.msg import NaoCoords
class NaoMonkey:
PART = {
'LShoulder': ['LShoulderPitch', 'LShoulderRoll'],
'RShoulder': ['RShoulderPitch', 'RShoulderRoll'],
'LElbow': ['LElbowYaw', 'LElbowRoll'],
'RElbow': ['RElbowYaw', 'RElbowRoll'],
'Head': ['HeadYaw', 'HeadPitch'],
}
LIMITS = {
'Head': [[-2.0, 2.0], [-0.67, 0.51]],
'LShoulder': [[-2.0, 2.0], [-0.31, 1.32]],
'RShoulder': [[-2.0, 2.0], [-1.32, 0.31]],
'LElbow': [[-2.0, 2.0], [-1.54, -0.03]],
'RElbow': [[-2.0, 2.0], [0.03, 1.54]],
}
def __init__(self):
rospy.init_node('nao_mykinect', anonymous=True)
self.listener = rospy.Subscriber('nao', NaoCoords, self.move)
ip = rospy.get_param('~ip', '10.104.16.141')
port = int(rospy.get_param('~port', '9559'))
self.al = ALProxy("ALAutonomousLife", ip, port)
self.postureProxy = ALProxy("ALRobotPosture", ip, port)
self.motionProxy = ALProxy("ALMotion", ip, port)
self.al.setState("disabled")
for part in ["Head", "LArm", "RArm"]:
self.motionProxy.setStiffnesses(part, 1.0)
rospy.loginfo(self.motionProxy.getSummary())
def move(self, coords):
part = coords.Part.data
angles1 = coords.Angles1
angles2 = coords.Angles2
angles = [float(angles1.data), float(angles2.data)]
speed = 1.0
if part not in NaoMonkey.PART:
error_msg = 'Wat? I Do not have ' + str(part)
rospy.loginfo(error_msg)
return
if len(NaoMonkey.PART[part]) != len(angles):
error_msg = 'Wat? What shall i do with rest joint?'
rospy.loginfo(error_msg)
return
angles = map(lambda x: float(x)*math.pi/180.0, angles)
for limit, angle in zip(NaoMonkey.LIMITS[part], angles):
if angle < limit[0] or angle > limit[1]:
error_msg = 'Wat? Limits man!'
rospy.loginfo(error_msg)
self.motionProxy.setAngles(NaoMonkey.PART[part], angles, speed);
if __name__ == '__main__':
try:
NaoMonkey()
rospy.spin()
except rospy.ROSInterruptException:
pass
| gpl-2.0 | 1,216,695,103,929,815,800 | 8,070,011,872,356,621,000 | 28.835821 | 66 | 0.637819 | false |