code
stringlengths 3
1.05M
| repo_name
stringlengths 5
104
| path
stringlengths 4
251
| language
stringclasses 1
value | license
stringclasses 15
values | size
int64 3
1.05M
|
---|---|---|---|---|---|
# threadlocals middleware
try:
from threading import local
except ImportError:
from django.utils._threading_local import local
_thread_locals = local()
def get_current_user():
return getattr(_thread_locals, 'user', None)
def get_current_ip():
return getattr(_thread_locals, 'ip', None)
class ThreadLocals(object):
"""Middleware that gets various objects from the
request object and saves them in thread local storage."""
def process_request(self, request):
_thread_locals.user = getattr(request, 'user', None)
_thread_locals.ip = request.META.get('REMOTE_ADDR', None) | MehmetNuri/ozgurlukicin | middleware/threadlocals.py | Python | gpl-3.0 | 615 |
import flask
import os
import hashlib
from infosystem import database
from infosystem import request
from infosystem import subsystem as subsystem_module
from infosystem import scheduler
POLICYLESS_ROUTES = [
('POST', '/users/reset'),
('GET', '/users/<id>'),
('GET', '/users/routes')
]
SYSADMIN_RESOURCES = [
('POST', '/domains'),
('PUT', '/domains/<id>'),
('DELETE', '/domains/<id>'),
('LIST', '/domains'),
('POST', '/roles'),
('PUT', '/roles/<id>'),
('DELETE', '/roles/<id>'),
('POST', '/capabilities'),
('PUT', '/capabilities/<id>'),
('DELETE', '/capabilities/<id>')
]
class System(flask.Flask):
request_class = request.Request
def __init__(self, *args, **kwargs):
super().__init__(__name__, static_folder=None)
self.configure()
self.init_database()
self.after_init_database()
subsystem_list = subsystem_module.all + list(
kwargs.values()) + list(args)
self.subsystems = {s.name: s for s in subsystem_list}
self.inject_dependencies()
for subsystem in self.subsystems.values():
self.register_blueprint(subsystem)
# Add version in the root URL
self.add_url_rule('/', view_func=self.version, methods=['GET'])
self.scheduler = scheduler.Scheduler()
self.schedule_jobs()
self.bootstrap()
self.before_request(
request.RequestManager(self.subsystems).before_request)
def configure(self):
self.config['BASEDIR'] = os.path.abspath(os.path.dirname(__file__))
self.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = True
self.config['SQLALCHEMY_DATABASE_URI'] = 'sqlite:///:memory:'
def init_database(self):
database.db.init_app(self)
with self.app_context():
database.db.create_all()
def after_init_database(self):
pass
def version(self):
return '1.0.0'
def schedule_jobs(self):
pass
def inject_dependencies(self):
# api = lambda: None
def api():
None
for name, subsystem in self.subsystems.items():
setattr(api, name, subsystem.router.controller.manager)
# Dependency injection
for subsystem in self.subsystems.values():
subsystem.router.controller.manager.api = api
def register_all_routes(self, default_domain_id, sysadmin_role_id):
# Register all system routes and all non-admin
# routes as capabilities in the default domain
for subsystem in self.subsystems.values():
for route in subsystem.router.routes:
route_url = route['url']
route_method = route['method']
bypass_param = route.get('bypass', False)
sysadmin_param = route.get('sysadmin', False)
if (route_method, route_url) in SYSADMIN_RESOURCES:
sysadmin_param = True
route_ref = self.subsystems['routes'].manager.create(
name=route['action'], url=route_url,
method=route['method'], bypass=bypass_param,
sysadmin=sysadmin_param)
# TODO(samueldmq): duplicate the line above here and
# see what breaks, it's probably the SQL
# session management!
if not route_ref.sysadmin:
cap_mng = self.subsystems['capabilities'].manager
capability = cap_mng.create(
domain_id=default_domain_id, route_id=route_ref.id)
# TODO(fdoliveira) define why BYPASS atribute for URLs
# if (route_ref.method, route_ref.url) not in \
# POLICYLESS_ROUTES:
self.subsystems['policies'].manager.create(
capability_id=capability.id,
role_id=sysadmin_role_id)
def create_default_domain(self):
# Create DEFAULT domain
default_domain = self.subsystems['domains'].manager.create(
name='default')
# Create SYSDAMIN role
sysadmin_role = self.subsystems['roles'].manager.create(
name='sysadmin', domain_id=default_domain.id)
# Create SYSADMIN user
pass256 = hashlib.sha256(b"123456").hexdigest()
sysadmin_user = self.subsystems['users'].manager.create(
domain_id=default_domain.id, name='sysadmin', password=pass256,
email="sysadmin@example.com")
# Grant SYSADMIN role to SYSADMIN user
self.subsystems['grants'].manager.create(
user_id=sysadmin_user.id, role_id=sysadmin_role.id)
self.register_all_routes(default_domain.id, sysadmin_role.id)
def bootstrap(self):
"""Bootstrap the system.
- routes;
- TODO(samueldmq): sysadmin;
- default domain with admin and capabilities.
"""
with self.app_context():
if not self.subsystems['domains'].manager.list():
self.create_default_domain()
| samueldmq/infosystem | infosystem/__init__.py | Python | apache-2.0 | 5,110 |
from Hogwash.Results import ResultsFile #type for file created by hw job
from Hogwash.Action import Action #supertype for runnable objects
from Hogwash.Errors import BadExitCode #error if the program crashed
#get a job-specific name
from Hogwash.Helpers import make_job_output_filename, get_cpu_bitness
from waterworks.Processes import bettersystem #run an external command
from evaluate import readClMat, Eval
from Classifiers import readDataFile
from analysis.chatStats import readChat, copyChat, nonSys
from analysis.distMat import getLoc, get121, getF
from path import path
from StringIO import StringIO #store output of process
from itertools import izip
#class representing an experiment to run
class Experiment(Action):
def __init__(self, solverCmd, solvers, mfile):
self.solverCmd = solverCmd.abspath()
self.solvers = solvers
self.mfile = mfile.abspath()
def run(self, hogwash_job):
on64 = (get_cpu_bitness() == 64)
if on64:
par = self.solverCmd.parent
self.solverCmd = par/"../bin64"/self.solverCmd.basename()
outfile = make_job_output_filename(hogwash_job, "output")
cmd = "%s %s %s > %s" % (self.solverCmd, " ".join(self.solvers),
self.mfile, outfile)
print cmd
status = bettersystem(cmd)
if status != 0:
raise BadExitCode(status)
return ResultsFile(outfile)
class Evaluation(Action):
def __init__(self, exp, gold, mat):
self.exp = exp
self.gold = gold.abspath()
self.mat = mat.abspath()
def run(self, hogwash_job):
self.replace_actions(hogwash_job) #bug in hogwash
proposal = readDataFile(file(self.exp))
goldFile = file(self.gold)
readDataFile(goldFile) #discard training data
gold = readDataFile(goldFile)
matrix = readClMat(file(self.mat))
evaluator = Eval(gold, proposal, matrix, self.exp)
return evaluator
class ChatEvaluation(Evaluation):
def __init__(self, exp, nodeVectorGold, chatGolds, mat):
Evaluation.__init__(self, exp, nodeVectorGold, mat)
self.chatGolds = [x.abspath() for x in chatGolds]
def run(self, hogwash_job):
res = Evaluation.run(self, hogwash_job)
golds = []
for chat in self.chatGolds:
print "Reading", chat
golds.append([x for x in readChat(chat)])
relabel = copyChat(golds[0])
proposal = readDataFile(file(self.exp)) #opens twice, whatever
assert(len(proposal) == len(nonSys(relabel)))
for line,prop in izip(nonSys(relabel), proposal):
line.thread = prop
scores = {}
for metric in [get121, getF, getLoc]:
print "Computing", metric.__name__
mscores = [metric(gold, relabel) for gold in golds]
scores[metric.__name__] = mscores
res.chatScores = scores
return res
| pathak22/corr_clustering | correlation-distr/script/DistributedExperiment.py | Python | gpl-2.0 | 2,961 |
# Copyright 2011 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Wsgi helper utilities for trove"""
import math
import re
import time
import traceback
import uuid
import eventlet.wsgi
import jsonschema
from oslo_log import log as logging
from oslo_serialization import jsonutils
from oslo_service import service
from oslo_utils import encodeutils
import paste.urlmap
import webob
import webob.dec
import webob.exc
from trove.common import base_wsgi
from trove.common import cfg
from trove.common import context as rd_context
from trove.common import exception
from trove.common.i18n import _
from trove.common import pastedeploy
from trove.common import utils
CONTEXT_KEY = 'trove.context'
Router = base_wsgi.Router
Debug = base_wsgi.Debug
Middleware = base_wsgi.Middleware
JSONDictSerializer = base_wsgi.JSONDictSerializer
RequestDeserializer = base_wsgi.RequestDeserializer
CONF = cfg.CONF
# Raise the default from 8192 to accommodate large tokens
eventlet.wsgi.MAX_HEADER_LINE = CONF.max_header_line
eventlet.patcher.monkey_patch(all=False, socket=True)
LOG = logging.getLogger('trove.common.wsgi')
def versioned_urlmap(*args, **kwargs):
urlmap = paste.urlmap.urlmap_factory(*args, **kwargs)
return VersionedURLMap(urlmap)
def launch(app_name, port, paste_config_file, data={},
host='0.0.0.0', backlog=128, threads=1000, workers=None):
"""Launches a wsgi server based on the passed in paste_config_file.
Launch provides a easy way to create a paste app from the config
file and launch it via the service launcher. It takes care of
all of the plumbing. The only caveat is that the paste_config_file
must be a file that paste.deploy can find and handle. There is
a helper method in cfg.py that finds files.
Example:
conf_file = CONF.find_file(CONF.api_paste_config)
launcher = wsgi.launch('myapp', CONF.bind_port, conf_file)
launcher.wait()
"""
LOG.debug("Trove started on %s", host)
app = pastedeploy.paste_deploy_app(paste_config_file, app_name, data)
server = base_wsgi.Service(app, port, host=host,
backlog=backlog, threads=threads)
return service.launch(CONF, server, workers)
# Note: taken from Nova
def serializers(**serializers):
"""Attaches serializers to a method.
This decorator associates a dictionary of serializers with a
method. Note that the function attributes are directly
manipulated; the method is not wrapped.
"""
def decorator(func):
if not hasattr(func, 'wsgi_serializers'):
func.wsgi_serializers = {}
func.wsgi_serializers.update(serializers)
return func
return decorator
class TroveMiddleware(Middleware):
# Note: taken from nova
@classmethod
def factory(cls, global_config, **local_config):
"""Used for paste app factories in paste.deploy config files.
Any local configuration (that is, values under the [filter:APPNAME]
section of the paste config) will be passed into the `__init__` method
as kwargs.
A hypothetical configuration would look like:
[filter:analytics]
redis_host = 127.0.0.1
paste.filter_factory = nova.api.analytics:Analytics.factory
which would result in a call to the `Analytics` class as
import nova.api.analytics
analytics.Analytics(app_from_paste, redis_host='127.0.0.1')
You could of course re-implement the `factory` method in subclasses,
but using the kwarg passing it shouldn't be necessary.
"""
def _factory(app):
return cls(app, **local_config)
return _factory
class VersionedURLMap(object):
def __init__(self, urlmap):
self.urlmap = urlmap
def __call__(self, environ, start_response):
req = Request(environ)
if req.url_version is None and req.accept_version is not None:
version = "/v" + req.accept_version
http_exc = webob.exc.HTTPNotAcceptable(_("version not supported"))
app = self.urlmap.get(version, Fault(http_exc))
else:
app = self.urlmap
return app(environ, start_response)
class Router(base_wsgi.Router):
# Original router did not allow for serialization of the 404 error.
# To fix this the _dispatch was modified to use Fault() objects.
@staticmethod
@webob.dec.wsgify
def _dispatch(req):
"""
Called by self._router after matching the incoming request to a route
and putting the information into req.environ. Either returns 404
or the routed WSGI app's response.
"""
match = req.environ['wsgiorg.routing_args'][1]
if not match:
return Fault(webob.exc.HTTPNotFound())
app = match['controller']
return app
class Request(base_wsgi.Request):
@property
def params(self):
return utils.stringify_keys(super(Request, self).params)
def best_match_content_type(self, supported_content_types=None):
"""Determine the most acceptable content-type.
Based on the query extension then the Accept header.
"""
parts = self.path.rsplit('.', 1)
if len(parts) > 1:
format = parts[1]
if format in ['json']:
return 'application/{0}'.format(parts[1])
ctypes = {
'application/vnd.openstack.trove+json': "application/json",
'application/json': "application/json",
}
bm = self.accept.best_match(ctypes.keys())
return ctypes.get(bm, 'application/json')
@utils.cached_property
def accept_version(self):
accept_header = self.headers.get('ACCEPT', "")
accept_version_re = re.compile(".*?application/vnd.openstack.trove"
"(\+.+?)?;"
"version=(?P<version_no>\d+\.?\d*)")
match = accept_version_re.search(accept_header)
return match.group("version_no") if match else None
@utils.cached_property
def url_version(self):
versioned_url_re = re.compile("/v(?P<version_no>\d+\.?\d*)")
match = versioned_url_re.search(self.path)
return match.group("version_no") if match else None
class Result(object):
"""A result whose serialization is compatible with JSON."""
def __init__(self, data, status=200):
self._data = data
self.status = status
def data(self, serialization_type):
"""Return an appropriate serialized type for the body.
serialization_type is not used presently, but may be
in the future, so it stays.
"""
if hasattr(self._data, "data_for_json"):
return self._data.data_for_json()
return self._data
class Resource(base_wsgi.Resource):
def __init__(self, controller, deserializer, serializer,
exception_map=None):
exception_map = exception_map or {}
self.model_exception_map = self._invert_dict_list(exception_map)
super(Resource, self).__init__(controller, deserializer, serializer)
@webob.dec.wsgify(RequestClass=Request)
def __call__(self, request):
return super(Resource, self).__call__(request)
def execute_action(self, action, request, **action_args):
if getattr(self.controller, action, None) is None:
return Fault(webob.exc.HTTPNotFound())
try:
self.controller.validate_request(action, action_args)
result = super(Resource, self).execute_action(
action,
request,
**action_args)
if type(result) is dict:
result = Result(result)
return result
except exception.TroveError as trove_error:
LOG.debug(traceback.format_exc())
LOG.debug("Caught Trove Error %s", trove_error)
httpError = self._get_http_error(trove_error)
LOG.debug("Mapped Error to %s", httpError)
return Fault(httpError(str(trove_error), request=request))
except webob.exc.HTTPError as http_error:
LOG.debug(traceback.format_exc())
return Fault(http_error)
except Exception as error:
exception_uuid = str(uuid.uuid4())
LOG.exception(exception_uuid + ": " + str(error))
return Fault(webob.exc.HTTPInternalServerError(
"Internal Server Error. Please keep this ID to help us "
"figure out what went wrong: (%s)." % exception_uuid,
request=request))
def _get_http_error(self, error):
return self.model_exception_map.get(type(error),
webob.exc.HTTPBadRequest)
def _invert_dict_list(self, exception_dict):
"""Flattens values of keys and inverts keys and values.
Example:
{'x': [1, 2, 3], 'y': [4, 5, 6]} converted to
{1: 'x', 2: 'x', 3: 'x', 4: 'y', 5: 'y', 6: 'y'}
"""
inverted_dict = {}
for key, value_list in exception_dict.items():
for value in value_list:
inverted_dict[value] = key
return inverted_dict
def serialize_response(self, action, action_result, accept):
# If an exception is raised here in the base class, it is swallowed,
# and the action_result is returned as-is. For us, that's bad news -
# we never want that to happen except in the case of webob types.
# So we override the behavior here so we can at least log it.
try:
return super(Resource, self).serialize_response(
action, action_result, accept)
except Exception:
# execute_action either returns the results or a Fault object.
# If action_result is not a Fault then there really was a
# serialization error which we log. Otherwise return the Fault.
if not isinstance(action_result, Fault):
LOG.exception(_("Unserializable result detected."))
raise
return action_result
class Controller(object):
"""Base controller that creates a Resource with default serializers."""
exception_map = {
webob.exc.HTTPUnprocessableEntity: [
exception.UnprocessableEntity,
],
webob.exc.HTTPUnauthorized: [
exception.Forbidden,
exception.SwiftAuthError,
],
webob.exc.HTTPForbidden: [
exception.ReplicaSourceDeleteForbidden,
exception.BackupTooLarge,
exception.ModuleAccessForbidden,
exception.ModuleAppliedToInstance,
exception.PolicyNotAuthorized,
exception.LogAccessForbidden,
],
webob.exc.HTTPBadRequest: [
exception.InvalidModelError,
exception.BadRequest,
exception.CannotResizeToSameSize,
exception.BadValue,
exception.DatabaseAlreadyExists,
exception.UserAlreadyExists,
exception.LocalStorageNotSpecified,
exception.ModuleAlreadyExists,
],
webob.exc.HTTPNotFound: [
exception.NotFound,
exception.ComputeInstanceNotFound,
exception.ModelNotFoundError,
exception.UserNotFound,
exception.DatabaseNotFound,
exception.QuotaResourceUnknown,
exception.BackupFileNotFound,
exception.ClusterNotFound,
exception.DatastoreNotFound,
exception.SwiftNotFound,
exception.ModuleTypeNotFound,
],
webob.exc.HTTPConflict: [
exception.BackupNotCompleteError,
exception.RestoreBackupIntegrityError,
],
webob.exc.HTTPRequestEntityTooLarge: [
exception.OverLimit,
exception.QuotaExceeded,
exception.VolumeQuotaExceeded,
],
webob.exc.HTTPServerError: [
exception.VolumeCreationFailure,
exception.UpdateGuestError,
],
webob.exc.HTTPNotImplemented: [
exception.VolumeNotSupported,
exception.LocalStorageNotSupported,
exception.DatastoreOperationNotSupported,
exception.ClusterInstanceOperationNotSupported,
exception.ClusterDatastoreNotSupported
],
}
schemas = {}
@classmethod
def get_schema(cls, action, body):
LOG.debug("Getting schema for %s:%s" %
(cls.__class__.__name__, action))
if cls.schemas:
matching_schema = cls.schemas.get(action, {})
if matching_schema:
LOG.debug(
"Found Schema: %s" % matching_schema.get("name",
matching_schema))
return matching_schema
@staticmethod
def format_validation_msg(errors):
# format path like object['field1'][i]['subfield2']
messages = []
for error in errors:
path = list(error.path)
f_path = "%s%s" % (path[0],
''.join(['[%r]' % i for i in path[1:]]))
messages.append("%s %s" % (f_path, error.message))
for suberror in sorted(error.context, key=lambda e: e.schema_path):
messages.append(suberror.message)
error_msg = "; ".join(messages)
return "Validation error: %s" % error_msg
def validate_request(self, action, action_args):
body = action_args.get('body', {})
schema = self.get_schema(action, body)
if schema:
validator = jsonschema.Draft4Validator(schema)
if not validator.is_valid(body):
errors = sorted(validator.iter_errors(body),
key=lambda e: e.path)
error_msg = self.format_validation_msg(errors)
LOG.info(error_msg)
raise exception.BadRequest(message=error_msg)
def create_resource(self):
return Resource(
self,
RequestDeserializer(),
TroveResponseSerializer(),
self.exception_map)
def _extract_limits(self, params):
return {key: params[key] for key in params.keys()
if key in ["limit", "marker"]}
class TroveResponseSerializer(base_wsgi.ResponseSerializer):
def serialize_body(self, response, data, content_type, action):
"""Overrides body serialization in base_wsgi.ResponseSerializer.
If the "data" argument is the Result class, its data
method is called and *that* is passed to the superclass implementation
instead of the actual data.
"""
if isinstance(data, Result):
data = data.data(content_type)
super(TroveResponseSerializer, self).serialize_body(
response,
data,
content_type,
action)
def serialize_headers(self, response, data, action):
super(TroveResponseSerializer, self).serialize_headers(
response,
data,
action)
if isinstance(data, Result):
response.status = data.status
class Fault(webob.exc.HTTPException):
"""Error codes for API faults."""
code_wrapper = {
400: webob.exc.HTTPBadRequest,
401: webob.exc.HTTPUnauthorized,
403: webob.exc.HTTPUnauthorized,
404: webob.exc.HTTPNotFound,
}
resp_codes = [int(code) for code in code_wrapper.keys()]
def __init__(self, exception):
"""Create a Fault for the given webob.exc.exception."""
self.wrapped_exc = exception
@staticmethod
def _get_error_name(exc):
# Displays a Red Dwarf specific error name instead of a webob exc name.
named_exceptions = {
'HTTPBadRequest': 'badRequest',
'HTTPUnauthorized': 'unauthorized',
'HTTPForbidden': 'forbidden',
'HTTPNotFound': 'itemNotFound',
'HTTPMethodNotAllowed': 'badMethod',
'HTTPRequestEntityTooLarge': 'overLimit',
'HTTPUnsupportedMediaType': 'badMediaType',
'HTTPInternalServerError': 'instanceFault',
'HTTPNotImplemented': 'notImplemented',
'HTTPServiceUnavailable': 'serviceUnavailable',
}
name = exc.__class__.__name__
if name in named_exceptions:
return named_exceptions[name]
# If the exception isn't in our list, at least strip off the
# HTTP from the name, and then drop the case on the first letter.
name = name.split("HTTP").pop()
name = name[:1].lower() + name[1:]
return name
@webob.dec.wsgify(RequestClass=Request)
def __call__(self, req):
"""Generate a WSGI response based on the exception passed to ctor."""
# Replace the body with fault details.
fault_name = Fault._get_error_name(self.wrapped_exc)
fault_data = {
fault_name: {
'code': self.wrapped_exc.status_int,
}
}
if self.wrapped_exc.detail:
fault_data[fault_name]['message'] = self.wrapped_exc.detail
else:
fault_data[fault_name]['message'] = self.wrapped_exc.explanation
content_type = req.best_match_content_type()
serializer = {
'application/json': base_wsgi.JSONDictSerializer(),
}[content_type]
self.wrapped_exc.body = serializer.serialize(fault_data, content_type)
self.wrapped_exc.content_type = content_type
return self.wrapped_exc
class ContextMiddleware(base_wsgi.Middleware):
def __init__(self, application):
self.admin_roles = CONF.admin_roles
super(ContextMiddleware, self).__init__(application)
def _extract_limits(self, params):
return {key: params[key] for key in params.keys()
if key in ["limit", "marker"]}
def process_request(self, request):
service_catalog = None
catalog_header = request.headers.get('X-Service-Catalog', None)
if catalog_header:
try:
service_catalog = jsonutils.loads(catalog_header)
except ValueError:
raise webob.exc.HTTPInternalServerError(
_('Invalid service catalog json.'))
tenant_id = request.headers.get('X-Tenant-Id', None)
auth_token = request.headers["X-Auth-Token"]
user_id = request.headers.get('X-User-ID', None)
roles = request.headers.get('X-Role', '').split(',')
is_admin = False
for role in roles:
if role.lower() in self.admin_roles:
is_admin = True
break
limits = self._extract_limits(request.params)
context = rd_context.TroveContext(auth_token=auth_token,
tenant=tenant_id,
user=user_id,
is_admin=is_admin,
limit=limits.get('limit'),
marker=limits.get('marker'),
service_catalog=service_catalog,
roles=roles)
request.environ[CONTEXT_KEY] = context
@classmethod
def factory(cls, global_config, **local_config):
def _factory(app):
LOG.debug("Created context middleware with config: %s" %
local_config)
return cls(app)
return _factory
class FaultWrapper(base_wsgi.Middleware):
"""Calls down the middleware stack, making exceptions into faults."""
@webob.dec.wsgify(RequestClass=base_wsgi.Request)
def __call__(self, req):
try:
resp = req.get_response(self.application)
if resp.status_int in Fault.resp_codes:
for (header, value) in resp._headerlist:
if header == "Content-Type" and \
value == "text/plain; charset=UTF-8":
return Fault(Fault.code_wrapper[resp.status_int]())
return resp
return resp
except Exception as ex:
LOG.exception(_("Caught error: %s."),
encodeutils.exception_to_unicode(ex))
exc = webob.exc.HTTPInternalServerError()
return Fault(exc)
@classmethod
def factory(cls, global_config, **local_config):
def _factory(app):
return cls(app)
return _factory
# ported from Nova
class OverLimitFault(webob.exc.HTTPException):
"""
Rate-limited request response.
"""
def __init__(self, message, details, retry_time):
"""
Initialize new `OverLimitFault` with relevant information.
"""
hdrs = OverLimitFault._retry_after(retry_time)
self.wrapped_exc = webob.exc.HTTPRequestEntityTooLarge(headers=hdrs)
self.content = {"overLimit": {"code": self.wrapped_exc.status_int,
"message": message,
"details": details,
"retryAfter": hdrs['Retry-After'],
},
}
@staticmethod
def _retry_after(retry_time):
delay = int(math.ceil(retry_time - time.time()))
retry_after = delay if delay > 0 else 0
headers = {'Retry-After': '%d' % retry_after}
return headers
@webob.dec.wsgify(RequestClass=Request)
def __call__(self, request):
"""
Return the wrapped exception with a serialized body conforming to our
error format.
"""
content_type = request.best_match_content_type()
serializer = {'application/json': JSONDictSerializer(),
}[content_type]
content = serializer.serialize(self.content)
self.wrapped_exc.body = content
self.wrapped_exc.content_type = content_type
return self.wrapped_exc
class ActionDispatcher(object):
"""Maps method name to local methods through action name."""
def dispatch(self, *args, **kwargs):
"""Find and call local method."""
action = kwargs.pop('action', 'default')
action_method = getattr(self, str(action), self.default)
return action_method(*args, **kwargs)
def default(self, data):
raise NotImplementedError()
class DictSerializer(ActionDispatcher):
"""Default request body serialization."""
def serialize(self, data, action='default'):
return self.dispatch(data, action=action)
def default(self, data):
return ""
class JSONDictSerializer(DictSerializer):
"""Default JSON request body serialization."""
def default(self, data):
return jsonutils.dump_as_bytes(data)
| hplustree/trove | trove/common/wsgi.py | Python | apache-2.0 | 23,631 |
import random
depth = 0
track = None
pos = None
result = 0
def EvaluateTree(track, pos, tree):
global result
setTrack(track)
setPos(pos)
result = 0
if not isinstance(tree, BaseNode): raise TypeError("Tree is not a BaseNode")
try:
tree.getResult();
except StepException:
pass
return result
def setPos(ppos):
global pos
pos = ppos
def getPos():
return pos
def setTrack(ttrack):
global track
track = ttrack
class BaseNode:
parent = None
def __init__(self, nr):
self.depth = nr
def __str__(self):
return "BaseNode"
def getResult(self):
return False
class AndNode(BaseNode):
a = None
b = None
def __init__(self, nr):
self.depth = nr
self.a = randomizeNodes()
self.b = randomizeNodes()
def __str__(self):
return "(" + str(self.a) + " && " + str(self.b) + ")"
def getResult(self):
ares = self.a.getResult()
bres = self.b.getResult()
return ares and bres
class OrNode(BaseNode):
a = None
b = None
def __init__(self, nr):
self.depth = nr
self.a = randomizeNodes()
self.b = randomizeNodes()
def __str__(self):
return "(" + str(self.a) + " || " + str(self.b) + ")"
def getResult(self):
return self.a.getResult() or self.b.getResult()
class NotNode(BaseNode):
a = None
def __init__(self, nr):
self.depth = nr
self.a = randomizeNodes()
def __str__(self):
return "!(" + str(self.a) + ")"
def getResult(self):
return not(self.a.getResult())
class IfNode(BaseNode):
a = None
b = None
c = None
def __init__(self, nr):
self.depth = nr
self.a = randomizeNodes()
self.b = randomizeNodes()
self.c = randomizeNodes()
def __str__(self):
return "((" + str(self.a) + ")?(" + str(self.b) + "):(" + str(self.c) + "))"
def getResult(self):
return self.b.getResult() if self.a.getResult() else self.c.getResult()
class SensorNode(BaseNode):
#directions = dict(N = 1, NE = 2, E = 3, ES = 4, S = 5, SW = 6, W = 7, NW = 8)
directions = ["N", "NE", "E", "ES", "S", "SW", "W", "NW"]
direction = 0
def __init__(self, nr, d):
self.depth = nr
if d < 0 or d > len(self.directions) - 1:
raise ValueError("0 > " + str(d) + " < " + str(len(self.directions) - 1))
self.direction = d
def __str__(self):
return "S[" + self.directions[self.direction] + "]"
def getResult(self):
global pos, track
if self.direction == 0:
return 1 if track[pos.y - 1][pos.x + 0] == -1 else 0
elif self.direction == 1:
return 1 if track[pos.y - 1][pos.x + 1] == -1 else 0
elif self.direction == 2:
return 1 if track[pos.y + 0][pos.x + 1] == -1 else 0
elif self.direction == 3:
return 1 if track[pos.y + 1][pos.x + 1] == -1 else 0
elif self.direction == 4:
return 1 if track[pos.y + 1][pos.x + 0] == -1 else 0
elif self.direction == 5:
return 1 if track[pos.y + 1][pos.x - 1] == -1 else 0
elif self.direction == 6:
return 1 if track[pos.y + 0][pos.x - 1] == -1 else 0
elif self.direction == 7:
return 1 if track[pos.y - 1][pos.x - 1] == -1 else 0
class MoveNode(BaseNode):
directions = ["N", "E", "S", "W"]
direction = 0
def __init__(self, nr, d):
self.depth = nr
if d < 0 or d > len(self.directions) - 1:
raise ValueError("0 > " + str(d) + " < " + str(len(self.directions) - 1))
self.direction = d
def __str__(self):
return "M[" + self.directions[self.direction] + "]"
def getResult(self):
global pos, track, result
#print "Move dir=%d from (%d, %d)" % (self.direction, pos.x, pos.y)
if self.direction == 0:
pos.y = pos.y - 1
elif self.direction == 1:
pos.x = pos.x + 1
elif self.direction == 2:
pos.y = pos.y + 1
elif self.direction == 3:
pos.x = pos.x - 1
result = track[pos.y][pos.x]
if result in [4, 8]:
result = 0
if result == 1:
track[pos.y][pos.x] = 8
else:
track[pos.y][pos.x] = 4
raise StepException()
def randomizeNodes():
global depth
minRand = 0
depth = depth + 1
if depth > 60:
minRand = 4
rnd = random.randint(0, 15)
if rnd == 0:
return AndNode(depth)
elif rnd == 1:
return OrNode(depth)
elif rnd == 2:
return NotNode(depth)
elif rnd == 3:
return IfNode(depth)
elif rnd >= 4 and rnd <= 11:
return SensorNode(depth, random.randint(0,7))
elif rnd >= 12 and rnd <= 15:
return MoveNode(depth, random.randint(0,3))
def generateNodes():
global depth
depth = 0
return randomizeNodes()
def merge(node1, node2):
return (node1, node2)
class StepException(Exception):
pass
class coord():
x = 0
y = 0
def __init__(self, x = 0, y = 0):
self.x = x
self.y = y
def __str__(self):
return "(x=%d, y=%d)" % (self.x, self.y)
| tb2johm/GeneticWalker | src/Nodes.py | Python | agpl-3.0 | 5,313 |
#prerequisites: none, just a range from 0-9
# Each number is from 0-9, when raised to the power of 5
#will produce a the same number as when all are put together
def armstrong5():
print("These are the five digit Armstrong numbers:")
for first in range (1, 10):
for second in range (0, 10):
for third in range (0, 10):
for fourth in range (0, 10):
for fifth in range (0, 10):
number = 10000* first + 1000* second + 100* third \
+ 10* fourth + fifth
armstrong = first**5 + second**5 + third**5 \
+ fourth**5 + fifth**5
if number == armstrong:
print(number)
armstrong5()
def armstrong4a():
print ("Here are the four digit Armstrong numbers:")
for number in range(10000, 100000):
unitsDigit = number //10 % 10;
tensDigit = number // 100 % 10;
hundredsDigit = number//1000 % 10;
thousandsDigit = number//10000;
fifth = number % 10
armstrong = thousandsDigit ** 5 + hundredsDigit ** 5 \
+ tensDigit ** 5 + unitsDigit ** 5 + fifth**5
if number == armstrong:
print (number)
armstrong4a()
| khoanguyen0791/cs170 | CS170_homework/Armstrong.py | Python | apache-2.0 | 1,312 |
import sys
import os
import os.path
import fnmatch
import shutil
import glob
#-------------------------------------------------------------------------------
# the main function
# cd tools
# python elxReplaceCopyrightNotice.py
def main() :
# The path to the source files relative to this script
srcdir = os.path.join( "..")
# Get a list of all files
cpppatterns = ( '*.h', '*.hxx', '*.cxx', '*.cuh', '*.cu', '*.h.in' )
cppmatches = []
cmakepatterns = ( 'CMakeLists.txt', '*.cmake')
cmakematches = []
skipped = []
for root, dirnames, filenames in os.walk( srcdir ):
if ".git" in root:
continue
print(root)
for filename in filenames:
found = False
for pattern in cpppatterns:
if fnmatch.fnmatch(filename,pattern):
print("[cpp] " + filename)
cppmatches.append(os.path.join( root, filename ))
found = True
break
if not found:
for pattern in cmakepatterns:
if fnmatch.fnmatch(filename,pattern):
print("[cmake] " + filename)
cmakematches.append(os.path.join( root, filename ))
found = True
break
if not found:
print("[skip] " + filename)
skipped.append(os.path.join( root, filename ))
manualcheckfiles=[]
with open( "CppCopyrightNotice_Apache.txt" ) as noticefile:
noticelines = noticefile.readlines()
for filename in cppmatches:
matchlines = None
with open( filename, 'r' ) as matchfile:
matchlines = matchfile.readlines()
#scan for any other copyright usages
skip = False
for line in matchlines:
if (("COPYRIGHT" in line) or ("Copyright" in line) or ("copyright" in line)):
manualcheckfiles.append(filename)
skip = True
#print("[copyright] " + filename)
if skip==False:
with open( filename, 'w') as overwritefile:
overwritefile.writelines(noticelines)
overwritefile.writelines(matchlines)
with open( "CmakeCopyrightNotice_Apache.txt" ) as noticefile:
noticelines = noticefile.readlines()
for filename in cmakematches:
matchlines = None
with open( filename, 'r' ) as matchfile:
matchlines = matchfile.readlines()
#scan for any other copyright usages
skip = False
for line in matchlines:
if (("COPYRIGHT" in line) or ("Copyright" in line) or ("copyright" in line)):
manualcheckfiles.append(filename)
skip = True
#print("[copyright] " + filename)
if not skip:
with open( filename, 'w') as overwritefile:
overwritefile.writelines(noticelines)
overwritefile.writelines(matchlines)
print("Other Copyright notice found in (please check manually): ")
print(manualcheckfiles)
# Exit
return 0
#-------------------------------------------------------------------------------
if __name__ == '__main__':
main()
| kaspermarstal/SuperElastix | Tools/elxReplaceCopyrightNotice.py | Python | apache-2.0 | 3,468 |
# Copyright 2011 Brendan Kidwell <brendan@glump.net>.
#
# This file is part of caliload.
#
# caliload is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# caliload is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with caliload. If not, see <http://www.gnu.org/licenses/>.
"""Interface to metadata.opf ."""
from caliload.configobject import ConfigObject
from caliload.optionsobject import OptionsObject
from glob import glob
from uuid import uuid1
from xml.dom import minidom
import os
import subprocess
config = ConfigObject()
options = OptionsObject()
import logging
log = logging.getLogger(__name__)
def get_text(nodelist):
rc = []
for node in nodelist:
if node.nodeType == node.TEXT_NODE:
rc.append(node.data)
return ''.join(rc)
def get_book_filename(dir):
file = None
for ext in ("epub", "pdf"):
if not file is None: break
for n in glob(os.path.join(dir, "*.%s" % ext)):
if not n[0] == '_':
file = n
filetype = ext
break
if file is None:
raise RuntimeError("Can't find an ebook file.")
return (file, filetype)
class Metadata:
def __init__(self, useCached=True):
self.xmlfilename = os.path.join(options.dir, 'metadata.opf')
self.bookfile, self.type = get_book_filename(options.dir)
if not useCached or not os.path.exists(self.xmlfilename):
if useCached:
log.info("metadata.opf not found. Extracting from %s." % os.path.basename(self.bookfile))
else:
log.info("Loading metadata.opf from %s." % os.path.basename(self.bookfile))
cmd = [
config.ebookmeta_cmd, self.bookfile, '--to-opf=%s' % self.xmlfilename
]
subprocess.check_output(cmd)
log.info("Reading %s." % os.path.basename(self.xmlfilename))
self.xml = minidom.parse(self.xmlfilename)
self.metadata = self.xml.getElementsByTagName('metadata')[0]
self.uuidfile = os.path.join(options.dir, 'uuid')
self.id = None
for e in self.metadata.getElementsByTagName('dc:identifier'):
scheme = e.getAttribute('opf:scheme')
if scheme.lower() == 'caliload': self.id = get_text(e.childNodes)
if self.id is None:
self.recover_or_generateId()
if not os.path.exists(self.uuidfile):
f = open(self.uuidfile, 'w')
f.write(self.id + '\n')
f.close()
def recover_or_generateId(self):
"""Load UUID from uuid file or generate a new UUID; store UUID in metadata.opf ."""
if os.path.exists(self.uuidfile):
log.info("Found ID in uuid file. Writing to %s." % os.path.basename(self.xmlfilename))
f = open(self.uuidfile, 'r')
self.id = f.read().strip()
f.close()
else:
log.info("ID not found. Creating and saving a new ID.")
self.id = str(uuid1())
f = open(self.uuidfile, 'w')
f.write(self.id + '\n')
f.close()
# write data to XML doc
e = self.xml.createElement('dc:identifier')
e.setAttribute('opf:scheme', 'caliload')
textNode = self.xml.createTextNode(str(self.id))
e.appendChild(textNode)
self.metadata.appendChild(e)
# save metadata.opf
f = open(self.xmlfilename, 'w')
f.write(self.xml.toprettyxml(indent='', newl='', encoding='utf-8'))
f.close()
def write_to_book(self):
"""Write metadata.opf to ebook file."""
log.info("Writing metadata.opf to book file.")
# erase old tags
cmd = [
config.ebookmeta_cmd, self.bookfile, "--tags="
]
subprocess.check_output(cmd)
# write all metadata from opf file
cmd = [
config.ebookmeta_cmd, self.bookfile, '--from-opf=%s' % self.xmlfilename
]
subprocess.check_output(cmd)
| bkidwell/caliload | caliload/metadata.py | Python | gpl-3.0 | 3,883 |
# -*- coding: utf-8 -*-
"""
Created on Mon Oct 10 23:58:35 2016
@author: Subhajit
"""
import numpy as np
import scipy.io
import h5py
def load_matfile(filename='./data/indian_pines_data.mat'):
f = h5py.File(filename)
#print f['X_r'].shape
if 'pca' in filename:
X=np.asarray(f['X_r'],dtype='float32')
else:
X=np.asarray(f['X'],dtype='float32')
y=np.asarray(f['labels'],dtype='uint8')
gt=np.asarray(f['ip_gt'],dtype='uint8')
#im=np.asarray(f['im'],dtype='uint8')
f.close()
X=X.transpose(3,2,1,0)
y=np.squeeze(y)-1
gt=gt.transpose(1,0)
#im=im.transpose(2,1,0)
return X,y,gt
if __name__=='__main__':
X,y,gt,im=load_matfile(filename='./data/Indian_pines_pca.mat')
| Shashank00/testdata | prepData.py | Python | mit | 784 |
__author__ = 'tobin'
from otee.mouth.mouth import random_mouth
| skyczhao/silver | AOTee/otee/mouth/__init__.py | Python | gpl-2.0 | 64 |
# -*- coding: utf-8 -*-
from src import icons
from src.actions import StashWorkflowAction, StashFilterableMenu, PROJECT_AVATAR_DIR, PULL_REQUESTS_OPEN_CACHE_KEY, \
UPDATE_INTERVAL_OPEN_PULL_REQUESTS, UPDATE_INTERVAL_MY_PULL_REQUESTS, UPDATE_INTERVAL_CREATED_PULL_REQUESTS, \
PULL_REQUESTS_REVIEW_CACHE_KEY, PULL_REQUESTS_CREATED_CACHE_KEY, get_data_from_cache
from src.util import workflow, call_alfred
class PullRequestFilterableMenu(StashFilterableMenu):
def __init__(self, args, update_interval, cache_key):
super(PullRequestFilterableMenu, self).__init__(entity_name='pull requests',
update_interval=update_interval,
cache_key=cache_key,
args=args)
def _add_to_result_list(self, pull_request):
workflow().add_item(title=u'{} #{}: {} → {}'.format(pull_request.repo_name, pull_request.pull_request_id,
pull_request.from_branch, pull_request.to_branch),
subtitle=pull_request.title,
arg=':pullrequests ' + pull_request.link,
largetext=pull_request.title,
valid=True,
icon=workflow().cachefile('{}/{}'.format(PROJECT_AVATAR_DIR, pull_request.project_key)))
def _get_result_filter(self):
return lambda pr: u' '.join([pr.repo_name, pr.title])
def _add_item_after_last_result(self):
workflow().add_item(
'Back',
autocomplete=':pullrequests ', icon=icons.GO_BACK
)
_counters = {
0: u'',
1: u'①',
2: u'②',
3: u'③',
4: u'④',
5: u'⑤',
6: u'⑥',
7: u'⑦',
8: u'⑧',
9: u'⑨',
10: u'⑩',
11: u'⑪',
12: u'⑫',
13: u'⑬',
14: u'⑭',
15: u'⑮',
16: u'⑯',
17: u'⑰',
18: u'⑱',
19: u'⑲',
20: u'⑳'
}
_pull_request_modes = {
'open': (PULL_REQUESTS_OPEN_CACHE_KEY, UPDATE_INTERVAL_OPEN_PULL_REQUESTS),
'review': (PULL_REQUESTS_REVIEW_CACHE_KEY, UPDATE_INTERVAL_MY_PULL_REQUESTS),
'created': (PULL_REQUESTS_CREATED_CACHE_KEY, UPDATE_INTERVAL_CREATED_PULL_REQUESTS)
}
def _num_pull_requests(mode):
cache_key, update_interval = _pull_request_modes[mode]
data = get_data_from_cache(cache_key, update_interval)
return _counters.get(len(data), u'⑳⁺') if data else ''
class PullRequestWorkflowAction(StashWorkflowAction):
def menu(self, args):
if len(args) == 2:
workflow().add_item(
u'All open pull requests {}'.format(_num_pull_requests('open')),
'Search in all open pull requests',
arg=':pullrequests open',
icon=icons.OPEN,
valid=True
)
workflow().add_item(
u'Your pull requests to review {}'.format(_num_pull_requests('review')),
'Search in pull requests you have to review',
arg=':pullrequests review',
icon=icons.REVIEW,
valid=True
)
workflow().add_item(
u'Your created pull requests {}'.format(_num_pull_requests('created')),
'Search in pull requests you created',
arg=':pullrequests created',
icon=icons.CREATED,
valid=True
)
workflow().add_item(
'Main menu',
autocomplete='', icon=icons.GO_BACK
)
else:
cache_key, update_interval = _pull_request_modes[args[-2]]
pull_request_menu = PullRequestFilterableMenu(args, update_interval, cache_key)
return pull_request_menu.run()
def execute(self, args, cmd_pressed, shift_pressed):
if args[-1] in _pull_request_modes.keys():
pull_request_mode = args[-1]
call_alfred('stash:pullrequests {} '.format(pull_request_mode))
else:
import webbrowser
webbrowser.open(args[-1])
| f440/alfred-stash-workflow | workflow/src/actions/pull_requests.py | Python | mit | 4,218 |
# -*- coding: utf-8 -*-
from dec2str import dec2str as d2s
class Matrix():
def __init__(self, rows=[], cols=[]):
self.totaltitle = 'Total'
self.minsize = len(self.totaltitle)
self.rows = rows
self.cols = cols
self.vals = {}
# Totals
self.trows = {}
for el in self.rows:
self.trows[el] = 0
self.tcols = {}
for el in self.cols:
self.tcols[el] = 0
self.total = 0
def addcol(self, col):
if col not in self.cols:
self.cols.append(col)
self.tcols[col] = 0
def addcolv(self, col, dval):
self.addcol(col)
for key in sorted(dval.keys()):
self.addrow(key)
self.setval(key, col, dval[key])
self.rows.sort()
self.cols.sort()
def addrow(self, row):
if row not in self.rows:
self.rows.append(row)
self.trows[row] = 0
def setval(self, row, col, val):
assert row in self.rows
assert col in self.cols
# Εάν η val είναι μηδέν, μην κάνεις τίποτα.
if val == 0:
return
self.vals[row] = self.vals.get(row, {})
self.vals[row][col] = self.vals[row].get(col, 0)
# Εάν υπάρχει ήδη τιμή την αφαιρούμε από τα αθροίσματα
if self.vals[row][col] != 0:
oldv = self.vals[row][col]
self.trows[row] -= oldv
self.tcols[col] -= oldv
self.total -= oldv
self.vals[row][col] = val
self.trows[row] += val
self.tcols[col] += val
self.total += val
def getval(self, row, col):
assert row in self.rows
assert col in self.cols
val1 = self.vals.get(row, {col: 0})
return val1.get(col, 0)
def getcol(self, col):
val = []
for row in self.rows:
val.append(self.getval(row, col))
return val
def getcold(self, col):
dval = {}
for row in self.rows:
dval[row] = self.getval(row, col)
return dval
def sumcol(self, col):
assert col in self.cols
return self.tcols[col]
def colsize(self):
csiz = {}
for col in self.cols:
csiz[col] = len(str(col)) # Αρχικό μέγεθος το όνομα της στήλης
tsiz = len(d2s(self.sumcol(col)))
if csiz[col] < tsiz:
csiz[col] = tsiz
return csiz
def colformat(self):
csiz = self.colsize()
cformat = {}
for key in csiz.keys():
cformat[key] = '%%%ss ' % csiz[key]
return cformat
def totalsformat(self):
lent = len(d2s(self.total))
if lent < self.minsize:
lent = self.minsize
return '%%%ss \n' % lent
def rowtitlesformat(self):
maxr = max([len(str(i)) for i in self.rows])
if maxr < self.minsize:
maxr = self.minsize
return '%%%ss ' % maxr
def sumrow(self, row):
assert row in self.rows
return self.trows[row]
def __str__(self):
cformat = self.colformat()
ttitl = self.totaltitle
hrow = self.rowtitlesformat()
htot = self.totalsformat()
ast = hrow % ' '
# print titles
for col in self.cols:
ast += cformat[col] % col
ast += htot % ttitl
for row in self.rows:
ast += hrow % row
for col in self.cols:
ast += cformat[col] % d2s(self.getval(row, col))
ast += htot % d2s(self.sumrow(row))
ast += hrow % ttitl
for col in self.cols:
scol = self.sumcol(col)
ast += cformat[col] % d2s(scol)
ast += htot % d2s(self.total)
return ast
if __name__ == '__main__':
m = Matrix(['d1', 'd2', 'd3', 'd4', 'd5'], ['alpha', 'b', 'c'])
m.setval('d1', 'alpha', 584526.78)
m.setval('d1', 'c', 50)
m.setval('d2', 'alpha', 662538.44)
m.setval('d2', 'alpha', 5)
m.setval('d3', 'b', 100.45)
m.setval('d3', 'c', 80362.44)
m.addcol('theta')
m.addrow('d36')
m.setval('d36', 'alpha', 765344.22)
m.setval('d36', 'theta', 65)
m.setval('d1', 'theta', 44)
k = Matrix()
k.addcolv('thermansi', {'d1': 275, 'd3': 340, 'd4': 300, 'd5': 85})
k.addcolv('asanser', {'d1': 320, 'd2': 260, 'd4': 320, 'd5': 100})
print(k)
dist = k.getcold('asanser')
from distd import distd
print(distd(50, dist))
| tedlaz/pyted | functions/matrix.py | Python | gpl-3.0 | 4,582 |
#
# Katello Repos actions
# Copyright (c) 2012 Red Hat, Inc.
#
# This software is licensed to you under the GNU General Public License,
# version 2 (GPLv2). There is NO WARRANTY for this software, express or
# implied, including the implied warranties of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. You should have received a copy of GPLv2
# along with this software; if not, see
# http://www.gnu.org/licenses/old-licenses/gpl-2.0.txt.
#
# Red Hat trademarks are not licensed under GPLv2. No permission is
# granted to use or replicate Red Hat trademarks that are incorporated
# in this software or its documentation.
#
import os
from katello.client.api.package import PackageAPI
from katello.client.cli.base import opt_parser_add_product, opt_parser_add_org, opt_parser_add_environment
from katello.client.core.base import BaseAction, Command
from katello.client.api.utils import get_repo
from katello.client.utils import printer
# base package action --------------------------------------------------------
class PackageAction(BaseAction):
def __init__(self):
super(PackageAction, self).__init__()
self.api = PackageAPI()
# package actions ------------------------------------------------------------
class Info(PackageAction):
description = _('list information about a package')
def setup_parser(self, parser):
# always provide --id option for create, even on registered clients
parser.add_option('--id', dest='id',
help=_("package id, string value (required)"))
parser.add_option('--repo_id', dest='repo_id',
help=_("repository id"))
parser.add_option('--repo', dest='repo',
help=_("repository name"))
opt_parser_add_org(parser)
opt_parser_add_environment(parser, default=_("Library"))
opt_parser_add_product(parser)
def check_options(self, validator):
validator.require('id')
if not validator.exists('repo_id'):
validator.require(('repo', 'org', 'product'))
def run(self):
packId = self.get_option('id')
repoId = self.get_option('repo_id')
repoName = self.get_option('repo')
orgName = self.get_option('org')
envName = self.get_option('environment')
prodName = self.get_option('product')
if not repoId:
repo = get_repo(orgName, prodName, repoName, envName)
repoId = repo["id"]
pack = self.api.package(packId, repoId)
self.printer.add_column('id')
self.printer.add_column('name')
self.printer.add_column('filename')
self.printer.add_column('arch')
self.printer.add_column('release')
self.printer.add_column('version')
self.printer.add_column('vendor')
self.printer.add_column('download_url', show_with=printer.VerboseStrategy)
self.printer.add_column('description', multiline=True, show_with=printer.VerboseStrategy)
self.printer.add_column('provides', multiline=True, show_with=printer.VerboseStrategy)
self.printer.add_column('requires', multiline=True, show_with=printer.VerboseStrategy)
self.printer.set_header(_("Package Information"))
self.printer.print_item(pack)
return os.EX_OK
# package actions ------------------------------------------------------------
class List(PackageAction):
description = _('list packages in a repository')
def setup_parser(self, parser):
parser.add_option('--repo_id', dest='repo_id',
help=_("repository id"))
parser.add_option('--repo', dest='repo',
help=_("repository name"))
opt_parser_add_org(parser)
opt_parser_add_environment(parser, default=_("Library"))
opt_parser_add_product(parser)
def check_options(self, validator):
if not validator.exists('repo_id'):
validator.require(('repo', 'org', 'product'))
def run(self):
repoId = self.get_repo_id()
if not repoId:
return os.EX_DATAERR
self.printer.set_header(_("Package List For Repo %s") % repoId)
packages = self.api.packages_by_repo(repoId)
self.print_packages(packages)
return os.EX_OK
def get_repo_id(self):
repoId = self.get_option('repo_id')
repoName = self.get_option('repo')
orgName = self.get_option('org')
envName = self.get_option('environment')
prodName = self.get_option('product')
if not repoId:
repo = get_repo(orgName, prodName, repoName, envName)
if repo != None:
repoId = repo["id"]
return repoId
def print_packages(self, packages):
self.printer.add_column('id')
self.printer.add_column('name')
self.printer.add_column('filename')
self.printer.print_items(packages)
class Search(List):
description = _('search packages in a repository')
def setup_parser(self, parser):
super(Search, self).setup_parser(parser)
parser.add_option('--query', dest='query',
help=_("query string for searching packages, e.g. 'kernel*','kernel-3.3.0-4.el6.x86_64'"))
def check_options(self, validator):
super(Search, self).check_options(validator)
validator.require('query')
def run(self):
repoId = self.get_repo_id()
if not repoId:
return os.EX_DATAERR
query = self.get_option('query')
self.printer.set_header(_("Package List For Repo %s and Query %s") % (repoId, query))
packages = self.api.search(query, repoId)
self.print_packages(packages)
return os.EX_OK
# package command ------------------------------------------------------------
class Package(Command):
description = _('package specific actions in the katello server')
| iNecas/katello | cli/src/katello/client/core/package.py | Python | gpl-2.0 | 5,937 |
import json
from flask_login import UserMixin, AnonymousUserMixin
import hashlib
import logging
import os
import threading
import time
import datetime
import itertools
from funcy import project
import peewee
from passlib.apps import custom_app_context as pwd_context
from playhouse.postgres_ext import ArrayField, DateTimeTZField
from permissions import has_access, view_only
from redash import utils, settings, redis_connection
from redash.query_runner import get_query_runner, get_configuration_schema_for_type
from redash.metrics.database import MeteredPostgresqlExtDatabase, MeteredModel
from redash.utils import generate_token
from redash.utils.configuration import ConfigurationContainer
class Database(object):
def __init__(self):
self.database_config = dict(settings.DATABASE_CONFIG)
self.database_config['register_hstore'] = False
self.database_name = self.database_config.pop('name')
self.database = MeteredPostgresqlExtDatabase(self.database_name, **self.database_config)
self.app = None
self.pid = os.getpid()
def init_app(self, app):
self.app = app
self.register_handlers()
def connect_db(self):
self._check_pid()
self.database.reset_metrics()
self.database.connect()
def close_db(self, exc):
self._check_pid()
if not self.database.is_closed():
self.database.close()
def _check_pid(self):
current_pid = os.getpid()
if self.pid != current_pid:
logging.info("New pid detected (%d!=%d); resetting database lock.", self.pid, current_pid)
self.pid = os.getpid()
self.database._conn_lock = threading.Lock()
def register_handlers(self):
self.app.before_request(self.connect_db)
self.app.teardown_request(self.close_db)
db = Database()
# Support for cast operation on database fields
@peewee.Node.extend()
def cast(self, as_type):
return peewee.Expression(self, '::', peewee.SQL(as_type))
class JSONField(peewee.TextField):
def db_value(self, value):
return json.dumps(value)
def python_value(self, value):
return json.loads(value)
class BaseModel(MeteredModel):
class Meta:
database = db.database
@classmethod
def get_by_id(cls, model_id):
return cls.get(cls.id == model_id)
def pre_save(self, created):
pass
def post_save(self, created):
# Handler for post_save operations. Overriding if needed.
pass
def save(self, *args, **kwargs):
pk_value = self._get_pk_value()
created = kwargs.get('force_insert', False) or not bool(pk_value)
self.pre_save(created)
super(BaseModel, self).save(*args, **kwargs)
self.post_save(created)
def update_instance(self, **kwargs):
for k, v in kwargs.items():
# setattr(model_instance, field_name, field_obj.python_value(value))
setattr(self, k, v)
# We have to run pre-save before calculating dirty_fields. We end up running it twice,
# but pre_save calls should be very quick so it's not big of an issue.
# An alternative can be to recalculate dirty_fields, but it felt more error prone.
self.pre_save(False)
self.save(only=self.dirty_fields)
class ModelTimestampsMixin(BaseModel):
updated_at = DateTimeTZField(default=datetime.datetime.now)
created_at = DateTimeTZField(default=datetime.datetime.now)
def pre_save(self, created):
super(ModelTimestampsMixin, self).pre_save(created)
self.updated_at = datetime.datetime.now()
class BelongsToOrgMixin(object):
@classmethod
def get_by_id_and_org(cls, object_id, org):
return cls.get(cls.id == object_id, cls.org == org)
class PermissionsCheckMixin(object):
def has_permission(self, permission):
return self.has_permissions((permission,))
def has_permissions(self, permissions):
has_permissions = reduce(lambda a, b: a and b,
map(lambda permission: permission in self.permissions,
permissions),
True)
return has_permissions
class AnonymousUser(AnonymousUserMixin, PermissionsCheckMixin):
@property
def permissions(self):
return []
class ApiUser(UserMixin, PermissionsCheckMixin):
def __init__(self, api_key, org, groups):
self.id = api_key
self.groups = groups
self.org = org
def __repr__(self):
return u"<ApiUser: {}>".format(self.id)
@property
def permissions(self):
return ['view_query']
class Organization(ModelTimestampsMixin, BaseModel):
SETTING_GOOGLE_APPS_DOMAINS = 'google_apps_domains'
SETTING_IS_PUBLIC = "is_public"
id = peewee.PrimaryKeyField()
name = peewee.CharField()
slug = peewee.CharField(unique=True)
settings = JSONField()
class Meta:
db_table = 'organizations'
def __repr__(self):
return u"<Organization: {}, {}>".format(self.id, self.name)
# When Organization is used with LocalProxy (like the current_org helper), peewee doesn't recognize it as a Model
# and might call int() on it. This method makes sure it works.
def __int__(self):
return self.id
@classmethod
def get_by_slug(cls, slug):
return cls.get(cls.slug == slug)
@property
def default_group(self):
return self.groups.where(Group.name=='default', Group.type==Group.BUILTIN_GROUP).first()
@property
def google_apps_domains(self):
return self.settings.get(self.SETTING_GOOGLE_APPS_DOMAINS, [])
@property
def is_public(self):
return self.settings.get(self.SETTING_IS_PUBLIC, False)
@property
def admin_group(self):
return self.groups.where(Group.name=='admin', Group.type==Group.BUILTIN_GROUP).first()
class Group(BaseModel, BelongsToOrgMixin):
DEFAULT_PERMISSIONS = ['create_dashboard', 'create_query', 'edit_dashboard', 'edit_query',
'view_query', 'view_source', 'execute_query', 'list_users', 'schedule_query']
BUILTIN_GROUP = 'builtin'
REGULAR_GROUP = 'regular'
id = peewee.PrimaryKeyField()
org = peewee.ForeignKeyField(Organization, related_name="groups")
type = peewee.CharField(default=REGULAR_GROUP)
name = peewee.CharField(max_length=100)
permissions = ArrayField(peewee.CharField, default=DEFAULT_PERMISSIONS)
created_at = DateTimeTZField(default=datetime.datetime.now)
class Meta:
db_table = 'groups'
def to_dict(self):
return {
'id': self.id,
'name': self.name,
'permissions': self.permissions,
'type': self.type,
'created_at': self.created_at
}
@classmethod
def all(cls, org):
return cls.select().where(cls.org==org)
@classmethod
def members(cls, group_id):
return User.select().where(peewee.SQL("%s = ANY(groups)", group_id))
def __unicode__(self):
return unicode(self.id)
class User(ModelTimestampsMixin, BaseModel, BelongsToOrgMixin, UserMixin, PermissionsCheckMixin):
id = peewee.PrimaryKeyField()
org = peewee.ForeignKeyField(Organization, related_name="users")
name = peewee.CharField(max_length=320)
email = peewee.CharField(max_length=320)
password_hash = peewee.CharField(max_length=128, null=True)
groups = ArrayField(peewee.IntegerField, null=True)
api_key = peewee.CharField(max_length=40, unique=True)
class Meta:
db_table = 'users'
indexes = (
(('org', 'email'), True),
)
def __init__(self, *args, **kwargs):
super(User, self).__init__(*args, **kwargs)
def to_dict(self, with_api_key=False):
d = {
'id': self.id,
'name': self.name,
'email': self.email,
'gravatar_url': self.gravatar_url,
'groups': self.groups,
'updated_at': self.updated_at,
'created_at': self.created_at
}
if self.password_hash is None:
d['auth_type'] = 'external'
else:
d['auth_type'] = 'password'
if with_api_key:
d['api_key'] = self.api_key
return d
def pre_save(self, created):
super(User, self).pre_save(created)
if not self.api_key:
self.api_key = generate_token(40)
@property
def gravatar_url(self):
email_md5 = hashlib.md5(self.email.lower()).hexdigest()
return "https://www.gravatar.com/avatar/%s?s=40" % email_md5
@property
def permissions(self):
# TODO: this should be cached.
return list(itertools.chain(*[g.permissions for g in
Group.select().where(Group.id << self.groups)]))
@classmethod
def get_by_email_and_org(cls, email, org):
return cls.get(cls.email == email, cls.org == org)
@classmethod
def get_by_api_key_and_org(cls, api_key, org):
return cls.get(cls.api_key == api_key, cls.org == org)
@classmethod
def all(cls, org):
return cls.select().where(cls.org == org)
def __unicode__(self):
return u'%s (%s)' % (self.name, self.email)
def hash_password(self, password):
self.password_hash = pwd_context.encrypt(password)
def verify_password(self, password):
return self.password_hash and pwd_context.verify(password, self.password_hash)
class ConfigurationField(peewee.TextField):
def db_value(self, value):
return value.to_json()
def python_value(self, value):
return ConfigurationContainer.from_json(value)
class DataSource(BelongsToOrgMixin, BaseModel):
id = peewee.PrimaryKeyField()
org = peewee.ForeignKeyField(Organization, related_name="data_sources")
name = peewee.CharField()
type = peewee.CharField()
options = ConfigurationField()
queue_name = peewee.CharField(default="queries")
scheduled_queue_name = peewee.CharField(default="scheduled_queries")
created_at = DateTimeTZField(default=datetime.datetime.now)
class Meta:
db_table = 'data_sources'
indexes = (
(('org', 'name'), True),
)
def to_dict(self, all=False, with_permissions=False):
d = {
'id': self.id,
'name': self.name,
'type': self.type,
'syntax': self.query_runner.syntax
}
if all:
schema = get_configuration_schema_for_type(self.type)
self.options.set_schema(schema)
d['options'] = self.options.to_dict(mask_secrets=True)
d['queue_name'] = self.queue_name
d['scheduled_queue_name'] = self.scheduled_queue_name
d['groups'] = self.groups
if with_permissions:
d['view_only'] = self.data_source_groups.view_only
return d
def __unicode__(self):
return self.name
@classmethod
def create_with_group(cls, *args, **kwargs):
data_source = cls.create(*args, **kwargs)
DataSourceGroup.create(data_source=data_source, group=data_source.org.default_group)
return data_source
def get_schema(self, refresh=False):
key = "data_source:schema:{}".format(self.id)
cache = None
if not refresh:
cache = redis_connection.get(key)
if cache is None:
query_runner = self.query_runner
schema = sorted(query_runner.get_schema(get_stats=refresh), key=lambda t: t['name'])
redis_connection.set(key, json.dumps(schema))
else:
schema = json.loads(cache)
return schema
def add_group(self, group, view_only=False):
dsg = DataSourceGroup.create(group=group, data_source=self, view_only=view_only)
setattr(self, 'data_source_groups', dsg)
def remove_group(self, group):
DataSourceGroup.delete().where(DataSourceGroup.group==group, DataSourceGroup.data_source==self).execute()
def update_group_permission(self, group, view_only):
dsg = DataSourceGroup.get(DataSourceGroup.group==group, DataSourceGroup.data_source==self)
dsg.view_only = view_only
dsg.save()
setattr(self, 'data_source_groups', dsg)
@property
def query_runner(self):
return get_query_runner(self.type, self.options)
@classmethod
def all(cls, org, groups=None):
data_sources = cls.select().where(cls.org==org).order_by(cls.id.asc())
if groups:
data_sources = data_sources.join(DataSourceGroup).where(DataSourceGroup.group << groups)
return data_sources
@property
def groups(self):
groups = DataSourceGroup.select().where(DataSourceGroup.data_source==self)
return dict(map(lambda g: (g.group_id, g.view_only), groups))
class DataSourceGroup(BaseModel):
data_source = peewee.ForeignKeyField(DataSource)
group = peewee.ForeignKeyField(Group, related_name="data_sources")
view_only = peewee.BooleanField(default=False)
class Meta:
db_table = "data_source_groups"
class QueryResult(BaseModel, BelongsToOrgMixin):
id = peewee.PrimaryKeyField()
org = peewee.ForeignKeyField(Organization)
data_source = peewee.ForeignKeyField(DataSource)
query_hash = peewee.CharField(max_length=32, index=True)
query = peewee.TextField()
data = peewee.TextField()
runtime = peewee.FloatField()
retrieved_at = DateTimeTZField()
class Meta:
db_table = 'query_results'
def to_dict(self):
return {
'id': self.id,
'query_hash': self.query_hash,
'query': self.query,
'data': json.loads(self.data),
'data_source_id': self.data_source_id,
'runtime': self.runtime,
'retrieved_at': self.retrieved_at
}
@classmethod
def unused(cls, days=7):
age_threshold = datetime.datetime.now() - datetime.timedelta(days=days)
unused_results = cls.select().where(Query.id == None, cls.retrieved_at < age_threshold)\
.join(Query, join_type=peewee.JOIN_LEFT_OUTER)
return unused_results
@classmethod
def get_latest(cls, data_source, query, max_age=0):
query_hash = utils.gen_query_hash(query)
if max_age == -1:
query = cls.select().where(cls.query_hash == query_hash,
cls.data_source == data_source).order_by(cls.retrieved_at.desc())
else:
query = cls.select().where(cls.query_hash == query_hash, cls.data_source == data_source,
peewee.SQL("retrieved_at + interval '%s second' >= now() at time zone 'utc'",
max_age)).order_by(cls.retrieved_at.desc())
return query.first()
@classmethod
def store_result(cls, org_id, data_source_id, query_hash, query, data, run_time, retrieved_at):
query_result = cls.create(org=org_id,
query_hash=query_hash,
query=query,
runtime=run_time,
data_source=data_source_id,
retrieved_at=retrieved_at,
data=data)
logging.info("Inserted query (%s) data; id=%s", query_hash, query_result.id)
sql = "UPDATE queries SET latest_query_data_id = %s WHERE query_hash = %s AND data_source_id = %s RETURNING id"
query_ids = [row[0] for row in db.database.execute_sql(sql, params=(query_result.id, query_hash, data_source_id))]
# TODO: when peewee with update & returning support is released, we can get back to using this code:
# updated_count = Query.update(latest_query_data=query_result).\
# where(Query.query_hash==query_hash, Query.data_source==data_source_id).\
# execute()
logging.info("Updated %s queries with result (%s).", len(query_ids), query_hash)
return query_result, query_ids
def __unicode__(self):
return u"%d | %s | %s" % (self.id, self.query_hash, self.retrieved_at)
@property
def groups(self):
return self.data_source.groups
def should_schedule_next(previous_iteration, now, schedule):
if schedule.isdigit():
ttl = int(schedule)
next_iteration = previous_iteration + datetime.timedelta(seconds=ttl)
else:
hour, minute = schedule.split(':')
hour, minute = int(hour), int(minute)
# The following logic is needed for cases like the following:
# - The query scheduled to run at 23:59.
# - The scheduler wakes up at 00:01.
# - Using naive implementation of comparing timestamps, it will skip the execution.
normalized_previous_iteration = previous_iteration.replace(hour=hour, minute=minute)
if normalized_previous_iteration > previous_iteration:
previous_iteration = normalized_previous_iteration - datetime.timedelta(days=1)
next_iteration = (previous_iteration + datetime.timedelta(days=1)).replace(hour=hour, minute=minute)
return now > next_iteration
class Query(ModelTimestampsMixin, BaseModel, BelongsToOrgMixin):
id = peewee.PrimaryKeyField()
org = peewee.ForeignKeyField(Organization, related_name="queries")
data_source = peewee.ForeignKeyField(DataSource, null=True)
latest_query_data = peewee.ForeignKeyField(QueryResult, null=True)
name = peewee.CharField(max_length=255)
description = peewee.CharField(max_length=4096, null=True)
query = peewee.TextField()
query_hash = peewee.CharField(max_length=32)
api_key = peewee.CharField(max_length=40)
user_email = peewee.CharField(max_length=360, null=True)
user = peewee.ForeignKeyField(User)
last_modified_by = peewee.ForeignKeyField(User, null=True, related_name="modified_queries")
is_archived = peewee.BooleanField(default=False, index=True)
schedule = peewee.CharField(max_length=10, null=True)
class Meta:
db_table = 'queries'
def to_dict(self, with_stats=False, with_visualizations=False, with_user=True, with_last_modified_by=True):
d = {
'id': self.id,
'latest_query_data_id': self._data.get('latest_query_data', None),
'name': self.name,
'description': self.description,
'query': self.query,
'query_hash': self.query_hash,
'schedule': self.schedule,
'api_key': self.api_key,
'is_archived': self.is_archived,
'updated_at': self.updated_at,
'created_at': self.created_at,
'data_source_id': self.data_source_id
}
if with_user:
d['user'] = self.user.to_dict()
else:
d['user_id'] = self.user_id
if with_last_modified_by:
d['last_modified_by'] = self.last_modified_by.to_dict() if self.last_modified_by is not None else None
else:
d['last_modified_by_id'] = self.last_modified_by_id
if with_stats:
d['retrieved_at'] = self.retrieved_at
d['runtime'] = self.runtime
if with_visualizations:
d['visualizations'] = [vis.to_dict(with_query=False)
for vis in self.visualizations]
return d
def archive(self):
self.is_archived = True
self.schedule = None
for vis in self.visualizations:
for w in vis.widgets:
w.delete_instance()
self.save()
@classmethod
def all_queries(cls, groups):
q = Query.select(Query, User, QueryResult.retrieved_at, QueryResult.runtime)\
.join(QueryResult, join_type=peewee.JOIN_LEFT_OUTER)\
.switch(Query).join(User)\
.join(DataSourceGroup, on=(Query.data_source==DataSourceGroup.data_source))\
.where(Query.is_archived==False)\
.group_by(Query.id, User.id, QueryResult.id, QueryResult.retrieved_at, QueryResult.runtime)\
.order_by(cls.created_at.desc())
return q
@classmethod
def outdated_queries(cls):
queries = cls.select(cls, QueryResult.retrieved_at, DataSource)\
.join(QueryResult)\
.switch(Query).join(DataSource)\
.where(cls.schedule != None)
now = utils.utcnow()
outdated_queries = {}
for query in queries:
if should_schedule_next(query.latest_query_data.retrieved_at, now, query.schedule):
key = "{}:{}".format(query.query_hash, query.data_source.id)
outdated_queries[key] = query
return outdated_queries.values()
@classmethod
def search(cls, term, groups):
# TODO: This is very naive implementation of search, to be replaced with PostgreSQL full-text-search solution.
where = (cls.name**u"%{}%".format(term)) | (cls.description**u"%{}%".format(term))
if term.isdigit():
where |= cls.id == term
where &= cls.is_archived == False
return cls.select()\
.join(DataSourceGroup, on=(Query.data_source==DataSourceGroup.data_source)) \
.where(where) \
.where(DataSourceGroup.group << groups)\
.order_by(cls.created_at.desc())
@classmethod
def recent(cls, groups, user_id=None, limit=20):
query = cls.select(Query, User).where(Event.created_at > peewee.SQL("current_date - 7")).\
join(Event, on=(Query.id == Event.object_id.cast('integer'))). \
join(DataSourceGroup, on=(Query.data_source==DataSourceGroup.data_source)). \
switch(Query).join(User).\
where(Event.action << ('edit', 'execute', 'edit_name', 'edit_description', 'view_source')).\
where(~(Event.object_id >> None)).\
where(Event.object_type == 'query'). \
where(DataSourceGroup.group << groups).\
where(cls.is_archived == False).\
group_by(Event.object_id, Query.id, User.id).\
order_by(peewee.SQL("count(0) desc"))
if user_id:
query = query.where(Event.user == user_id)
query = query.limit(limit)
return query
def pre_save(self, created):
super(Query, self).pre_save(created)
self.query_hash = utils.gen_query_hash(self.query)
self._set_api_key()
if self.last_modified_by is None:
self.last_modified_by = self.user
def post_save(self, created):
if created:
self._create_default_visualizations()
def _create_default_visualizations(self):
table_visualization = Visualization(query=self, name="Table",
description='',
type="TABLE", options="{}")
table_visualization.save()
def _set_api_key(self):
if not self.api_key:
self.api_key = hashlib.sha1(
u''.join((str(time.time()), self.query, str(self.user_id), self.name)).encode('utf-8')).hexdigest()
@property
def runtime(self):
return self.latest_query_data.runtime
@property
def retrieved_at(self):
return self.latest_query_data.retrieved_at
@property
def groups(self):
if self.data_source is None:
return {}
return self.data_source.groups
def __unicode__(self):
return unicode(self.id)
@classmethod
def get_by_hash(cls,query_hash):
return cls.get(cls.query_hash == query_hash)
class Alert(ModelTimestampsMixin, BaseModel):
UNKNOWN_STATE = 'unknown'
OK_STATE = 'ok'
TRIGGERED_STATE = 'triggered'
id = peewee.PrimaryKeyField()
name = peewee.CharField()
query = peewee.ForeignKeyField(Query, related_name='alerts')
user = peewee.ForeignKeyField(User, related_name='alerts')
options = JSONField()
state = peewee.CharField(default=UNKNOWN_STATE)
last_triggered_at = DateTimeTZField(null=True)
rearm = peewee.IntegerField(null=True)
class Meta:
db_table = 'alerts'
@classmethod
def all(cls, groups):
return cls.select(Alert, User, Query)\
.join(Query)\
.join(DataSourceGroup, on=(Query.data_source==DataSourceGroup.data_source))\
.where(DataSourceGroup.group << groups)\
.switch(Alert)\
.join(User)
@classmethod
def get_by_id_and_org(cls, id, org):
return cls.select(Alert, User, Query).join(Query).switch(Alert).join(User).where(cls.id==id, Query.org==org).get()
def to_dict(self, full=True):
d = {
'id': self.id,
'name': self.name,
'options': self.options,
'state': self.state,
'last_triggered_at': self.last_triggered_at,
'updated_at': self.updated_at,
'created_at': self.created_at,
'rearm': self.rearm
}
if full:
d['query'] = self.query.to_dict()
d['user'] = self.user.to_dict()
else:
d['query_id'] = self.query_id
d['user_id'] = self.user_id
return d
def evaluate(self):
data = json.loads(self.query.latest_query_data.data)
# todo: safe guard for empty
value = data['rows'][0][self.options['column']]
op = self.options['op']
if op == 'greater than' and value > self.options['value']:
new_state = self.TRIGGERED_STATE
elif op == 'less than' and value < self.options['value']:
new_state = self.TRIGGERED_STATE
elif op == 'equals' and value == self.options['value']:
new_state = self.TRIGGERED_STATE
else:
new_state = self.OK_STATE
return new_state
def subscribers(self):
return User.select().join(AlertSubscription).where(AlertSubscription.alert==self)
@property
def groups(self):
return self.query.groups
class AlertSubscription(ModelTimestampsMixin, BaseModel):
user = peewee.ForeignKeyField(User)
alert = peewee.ForeignKeyField(Alert)
class Meta:
db_table = 'alert_subscriptions'
def to_dict(self):
return {
'user': self.user.to_dict(),
'alert_id': self.alert_id
}
@classmethod
def all(cls, alert_id):
return AlertSubscription.select(AlertSubscription, User).join(User).where(AlertSubscription.alert==alert_id)
@classmethod
def unsubscribe(cls, alert_id, user_id):
query = AlertSubscription.delete().where(AlertSubscription.alert==alert_id).where(AlertSubscription.user==user_id)
return query.execute()
class Dashboard(ModelTimestampsMixin, BaseModel, BelongsToOrgMixin):
id = peewee.PrimaryKeyField()
org = peewee.ForeignKeyField(Organization, related_name="dashboards")
slug = peewee.CharField(max_length=140, index=True)
name = peewee.CharField(max_length=100)
user_email = peewee.CharField(max_length=360, null=True)
user = peewee.ForeignKeyField(User)
layout = peewee.TextField()
groups = ArrayField(peewee.IntegerField, null=True)
dashboard_filters_enabled = peewee.BooleanField(default=False)
is_archived = peewee.BooleanField(default=False, index=True)
class Meta:
db_table = 'dashboards'
def to_dict(self, with_widgets=False, user=None):
layout = json.loads(self.layout)
if with_widgets:
widget_list = Widget.select(Widget, Visualization, Query, User)\
.where(Widget.dashboard == self.id)\
.join(Visualization, join_type=peewee.JOIN_LEFT_OUTER)\
.join(Query, join_type=peewee.JOIN_LEFT_OUTER)\
.join(User, join_type=peewee.JOIN_LEFT_OUTER)
widgets = {}
for w in widget_list:
if w.visualization_id is None:
widgets[w.id] = w.to_dict()
elif user and has_access(w.visualization.query.groups, user, view_only):
widgets[w.id] = w.to_dict()
else:
widgets[w.id] = project(w.to_dict(),
('id', 'width', 'dashboard_id', 'options', 'created_at', 'updated_at'))
widgets[w.id]['restricted'] = True
# The following is a workaround for cases when the widget object gets deleted without the dashboard layout
# updated. This happens for users with old databases that didn't have a foreign key relationship between
# visualizations and widgets.
# It's temporary until better solution is implemented (we probably should move the position information
# to the widget).
widgets_layout = []
for row in layout:
new_row = []
for widget_id in row:
widget = widgets.get(widget_id, None)
if widget:
new_row.append(widget)
widgets_layout.append(new_row)
# widgets_layout = map(lambda row: map(lambda widget_id: widgets.get(widget_id, None), row), layout)
else:
widgets_layout = None
return {
'id': self.id,
'slug': self.slug,
'name': self.name,
'user_id': self.user_id,
'layout': layout,
'groups': self.groups,
'dashboard_filters_enabled': self.dashboard_filters_enabled,
'widgets': widgets_layout,
'is_archived': self.is_archived,
'updated_at': self.updated_at,
'created_at': self.created_at
}
@classmethod
def all(cls, org=None):
if org:
return cls.select().where(cls.org==org, cls.is_archived==False)
else:
return cls.select().where(cls.is_archived==False)
@classmethod
def get_by_slug_and_org(cls, slug, org):
return cls.get(cls.slug == slug, cls.org==org)
@classmethod
def recent(cls, org, user_id=None, limit=20):
query = cls.select().where(Event.created_at > peewee.SQL("current_date - 7")). \
join(Event, on=(Dashboard.id == Event.object_id.cast('integer'))). \
where(Event.action << ('edit', 'view')).\
where(~(Event.object_id >> None)). \
where(Event.object_type == 'dashboard'). \
where(Dashboard.is_archived == False). \
where(Dashboard.org == org).\
group_by(Event.object_id, Dashboard.id). \
order_by(peewee.SQL("count(0) desc"))
if user_id:
query = query.where(Event.user == user_id)
query = query.limit(limit)
return query
def save(self, *args, **kwargs):
if not self.slug:
self.slug = utils.slugify(self.name)
tries = 1
while self.select().where(Dashboard.slug == self.slug).first() is not None:
self.slug = utils.slugify(self.name) + "_{0}".format(tries)
tries += 1
super(Dashboard, self).save(*args, **kwargs)
def __unicode__(self):
return u"%s=%s" % (self.id, self.name)
@classmethod
def get_by_id(cls,id):
return cls.get(cls.id==id, cls.is_archived==False)
@classmethod
def get_by_groups(cls,groups):
res = cls.select().where(cls.groups.contains_any(groups))
print 'res : ',[r.__dict__['_data'] for r in res]
return res
@classmethod
def get_by_slug(cls,slug):
return cls.get(cls.slug == slug)
@classmethod
def get_by_slug_group(cls,slug,groups):
return cls.select().where(cls.slug == slug,cls.groups.contains_any(groups))
class Visualization(ModelTimestampsMixin, BaseModel):
id = peewee.PrimaryKeyField()
type = peewee.CharField(max_length=100)
query = peewee.ForeignKeyField(Query, related_name='visualizations')
name = peewee.CharField(max_length=255)
description = peewee.CharField(max_length=4096, null=True)
options = peewee.TextField()
class Meta:
db_table = 'visualizations'
def to_dict(self, with_query=True):
d = {
'id': self.id,
'type': self.type,
'name': self.name,
'description': self.description,
'options': json.loads(self.options),
'updated_at': self.updated_at,
'created_at': self.created_at
}
if with_query:
d['query'] = self.query.to_dict()
return d
@classmethod
def get_by_id_and_org(cls, visualization_id, org):
return cls.select(Visualization, Query).join(Query).where(cls.id == visualization_id,
Query.org == org).get()
def __unicode__(self):
return u"%s %s" % (self.id, self.type)
class Widget(ModelTimestampsMixin, BaseModel):
id = peewee.PrimaryKeyField()
visualization = peewee.ForeignKeyField(Visualization, related_name='widgets', null=True)
text = peewee.TextField(null=True)
width = peewee.IntegerField()
options = peewee.TextField()
dashboard = peewee.ForeignKeyField(Dashboard, related_name='widgets', index=True)
# unused; kept for backward compatability:
type = peewee.CharField(max_length=100, null=True)
query_id = peewee.IntegerField(null=True)
class Meta:
db_table = 'widgets'
def to_dict(self):
d = {
'id': self.id,
'width': self.width,
'options': json.loads(self.options),
'dashboard_id': self.dashboard_id,
'text': self.text,
'updated_at': self.updated_at,
'created_at': self.created_at
}
if self.visualization and self.visualization.id:
d['visualization'] = self.visualization.to_dict()
return d
def __unicode__(self):
return u"%s" % self.id
@classmethod
def get_by_id_and_org(cls, widget_id, org):
return cls.select(cls, Dashboard).join(Dashboard).where(cls.id == widget_id, Dashboard.org == org).get()
def delete_instance(self, *args, **kwargs):
layout = json.loads(self.dashboard.layout)
layout = map(lambda row: filter(lambda w: w != self.id, row), layout)
layout = filter(lambda row: len(row) > 0, layout)
self.dashboard.layout = json.dumps(layout)
self.dashboard.save()
super(Widget, self).delete_instance(*args, **kwargs)
class Event(BaseModel):
org = peewee.ForeignKeyField(Organization, related_name="events")
user = peewee.ForeignKeyField(User, related_name="events", null=True)
action = peewee.CharField()
object_type = peewee.CharField()
object_id = peewee.CharField(null=True)
additional_properties = peewee.TextField(null=True)
created_at = DateTimeTZField(default=datetime.datetime.now)
class Meta:
db_table = 'events'
def __unicode__(self):
return u"%s,%s,%s,%s" % (self.user_id, self.action, self.object_type, self.object_id)
@classmethod
def record(cls, event):
org = event.pop('org_id')
user = event.pop('user_id')
action = event.pop('action')
object_type = event.pop('object_type')
object_id = event.pop('object_id', None)
created_at = datetime.datetime.utcfromtimestamp(event.pop('timestamp'))
additional_properties = json.dumps(event)
event = cls.create(org=org, user=user, action=action, object_type=object_type, object_id=object_id,
additional_properties=additional_properties, created_at=created_at)
return event
all_models = (Organization, Group, DataSource, DataSourceGroup, User, QueryResult, Query, Alert, AlertSubscription, Dashboard, Visualization, Widget, Event)
def init_db():
default_org = Organization.create(name="Default", slug='default', settings={})
admin_group = Group.create(name='admin', permissions=['admin', 'super_admin'], org=default_org, type=Group.BUILTIN_GROUP)
default_group = Group.create(name='default', permissions=Group.DEFAULT_PERMISSIONS, org=default_org, type=Group.BUILTIN_GROUP)
return default_org, admin_group, default_group
def create_db(create_tables, drop_tables):
db.connect_db()
for model in all_models:
if drop_tables and model.table_exists():
model.drop_table(cascade=True)
if create_tables and not model.table_exists():
model.create_table()
db.close_db(None)
| olivetree123/redash-x | redash/models.py | Python | bsd-2-clause | 36,856 |
import angr
class InterlockedExchange(angr.SimProcedure):
def run(self, target, value): #pylint:disable=arguments-differ
if not self.state.solver.symbolic(target):
old_value = self.state.memory.load(target, 4, endness=self.state.arch.memory_endness)
self.state.memory.store(target, value)
else:
old_value = self.state.solver.Unconstrained("unconstrained_ret_%s" % self.display_name, self.state.arch.bits, key=('api', 'InterlockedExchange'))
return old_value
| angr/angr | angr/procedures/win32/InterlockedExchange.py | Python | bsd-2-clause | 525 |
#!/usr/bin/python3
# -*- coding: utf-8 -*-
from PySide.QtGui import QWidget, QBoxLayout, QFileDialog, QPushButton, QLabel, QFrame
import os
from correlate import correlate
class MainWindow(QWidget, object):
# noinspection PyUnresolvedReferences
def __init__(self):
super(MainWindow, self).__init__()
self.clip_filename = None
self.audio_filename = None
layout = QBoxLayout(QBoxLayout.TopToBottom)
clip_row = QBoxLayout(QBoxLayout.LeftToRight)
self.clip_button = QPushButton()
self.clip_button.clicked.connect(self.open_clip)
self.clip_button.setText(self.tr("Open clip"))
clip_row.addWidget(self.clip_button)
self.clip_view = QLabel()
clip_row.addWidget(self.clip_view)
clip_frame = QFrame()
clip_frame.setLayout(clip_row)
layout.addWidget(clip_frame)
audio_row = QBoxLayout(QBoxLayout.LeftToRight)
self.audio_button = QPushButton()
self.audio_button.clicked.connect(self.open_audio)
self.audio_button.setText(self.tr("Open audio"))
audio_row.addWidget(self.audio_button)
self.audio_view = QLabel()
audio_row.addWidget(self.audio_view)
audio_frame = QFrame()
audio_frame.setLayout(audio_row)
layout.addWidget(audio_frame)
save_row = QBoxLayout(QBoxLayout.LeftToRight)
self.save_button = QPushButton()
self.save_button.clicked.connect(self.save)
self.save_button.setText(self.tr("Save synced clip"))
save_row.addWidget(self.save_button)
save_frame = QFrame()
save_frame.setLayout(save_row)
layout.addWidget(save_frame)
self.update_save_button()
layout.addStretch()
self.setLayout(layout)
self.show()
def open_clip(self):
self.clip_filename = QFileDialog.getOpenFileName(
self,
self.tr("Open Clip"),
os.getcwd(),
self.tr("Video Files [.mp4, .ogv, .mkv, .avi, .mov] (*.mp4 *.ogv *.mkv *.avi *.mov);;" +
"Audio Files [.flac, .ogg, .wav, .mp3] (*.flac *.ogg *.wav *.mp3)"))[0]
self.clip_view.setText(self.clip_filename)
self.update_save_button()
def open_audio(self):
self.audio_filename = QFileDialog.getOpenFileName(
self,
self.tr("Open Audio"),
os.getcwd(),
self.tr("Audio Files [.flac, .ogg, .wav, .mp3] (*.flac *.ogg *.wav *.mp3)"))[0]
self.audio_view.setText(self.audio_filename)
self.update_save_button()
def update_save_button(self):
self.save_button.setEnabled(bool(self.clip_filename and self.audio_filename))
def save(self):
dialog = QFileDialog(self)
dialog.setFileMode(QFileDialog.AnyFile)
dialog.setAcceptMode(QFileDialog.AcceptSave)
dialog.setDefaultSuffix("avi")
dialog.setDirectory(os.getcwd())
dialog.setNameFilter(self.tr("Video Files [.avi] (*.avi)"))
if dialog.exec_():
output_file_name = dialog.selectedFiles()[0]
correlate(self.clip_filename, self.audio_filename, output_file_name)
| pinae/Audiosyncer | gui.py | Python | gpl-3.0 | 3,171 |
# (c) 2013, Daniel Hokka Zakrisson <daniel@hozac.com>
# (c) 2014, Serge van Ginderachter <serge@vanginderachter.be>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#############################################
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import os
from ansible import constants as C
from ansible.errors import AnsibleError
from ansible.utils.vars import combine_vars
from ansible.module_utils._text import to_native
#FIXME: make into plugins
from ansible.inventory.ini import InventoryParser as InventoryINIParser
from ansible.inventory.yaml import InventoryParser as InventoryYAMLParser
from ansible.inventory.script import InventoryScript
__all__ = ['get_file_parser']
def get_file_parser(hostsfile, groups, loader):
# check to see if the specified file starts with a
# shebang (#!/), so if an error is raised by the parser
# class we can show a more apropos error
shebang_present = False
processed = False
myerr = []
parser = None
try:
with open(hostsfile, 'rb') as inv_file:
initial_chars = inv_file.read(2)
if initial_chars.startswith(b'#!'):
shebang_present = True
except:
pass
#FIXME: make this 'plugin loop'
# script
if loader.is_executable(hostsfile):
try:
parser = InventoryScript(loader=loader, groups=groups, filename=hostsfile)
processed = True
except Exception as e:
myerr.append('Attempted to execute "%s" as inventory script: %s' % (hostsfile, to_native(e)))
elif shebang_present:
myerr.append("The inventory file \'%s\' looks like it should be an executable inventory script, but is not marked executable. "
"Perhaps you want to correct this with `chmod +x %s`?" % (hostsfile, hostsfile))
# YAML/JSON
if not processed and not shebang_present and os.path.splitext(hostsfile)[-1] in C.YAML_FILENAME_EXTENSIONS:
try:
parser = InventoryYAMLParser(loader=loader, groups=groups, filename=hostsfile)
processed = True
except Exception as e:
myerr.append('Attempted to read "%s" as YAML: %s' % (to_native(hostsfile), to_native(e)))
# ini
if not processed and not shebang_present:
try:
parser = InventoryINIParser(loader=loader, groups=groups, filename=hostsfile)
processed = True
except Exception as e:
myerr.append('Attempted to read "%s" as ini file: %s ' % (to_native(hostsfile), to_native(e)))
if not processed and myerr:
raise AnsibleError('\n'.join(myerr))
return parser
class InventoryDirectory(object):
''' Host inventory parser for ansible using a directory of inventories. '''
def __init__(self, loader, groups=None, filename=C.DEFAULT_HOST_LIST):
if groups is None:
groups = dict()
self.names = os.listdir(filename)
self.names.sort()
self.directory = filename
self.parsers = []
self.hosts = {}
self.groups = groups
self._loader = loader
for i in self.names:
# Skip files that end with certain extensions or characters
if any(i.endswith(ext) for ext in C.DEFAULT_INVENTORY_IGNORE):
continue
# Skip hidden files
if i.startswith('.') and not i.startswith('./'):
continue
# These are things inside of an inventory basedir
if i in ("host_vars", "group_vars", "vars_plugins"):
continue
fullpath = os.path.join(self.directory, i)
if os.path.isdir(fullpath):
parser = InventoryDirectory(loader=loader, groups=groups, filename=fullpath)
else:
parser = get_file_parser(fullpath, self.groups, loader)
if parser is None:
#FIXME: needs to use display
import warnings
warnings.warning("Could not find parser for %s, skipping" % fullpath)
continue
self.parsers.append(parser)
# retrieve all groups and hosts form the parser and add them to
# self, don't look at group lists yet, to avoid
# recursion trouble, but just make sure all objects exist in self
newgroups = parser.groups.values()
for group in newgroups:
for host in group.hosts:
self._add_host(host)
for group in newgroups:
self._add_group(group)
# now check the objects lists so they contain only objects from
# self; membership data in groups is already fine (except all &
# ungrouped, see later), but might still reference objects not in self
for group in self.groups.values():
# iterate on a copy of the lists, as those lists get changed in
# the loop
# list with group's child group objects:
for child in group.child_groups[:]:
if child != self.groups[child.name]:
group.child_groups.remove(child)
group.child_groups.append(self.groups[child.name])
# list with group's parent group objects:
for parent in group.parent_groups[:]:
if parent != self.groups[parent.name]:
group.parent_groups.remove(parent)
group.parent_groups.append(self.groups[parent.name])
# list with group's host objects:
for host in group.hosts[:]:
if host != self.hosts[host.name]:
group.hosts.remove(host)
group.hosts.append(self.hosts[host.name])
# also check here that the group that contains host, is
# also contained in the host's group list
if group not in self.hosts[host.name].groups:
self.hosts[host.name].groups.append(group)
# extra checks on special groups all and ungrouped
# remove hosts from 'ungrouped' if they became member of other groups
if 'ungrouped' in self.groups:
ungrouped = self.groups['ungrouped']
# loop on a copy of ungrouped hosts, as we want to change that list
for host in frozenset(ungrouped.hosts):
if len(host.groups) > 1:
host.groups.remove(ungrouped)
ungrouped.hosts.remove(host)
# remove hosts from 'all' if they became member of other groups
# all should only contain direct children, not grandchildren
# direct children should have dept == 1
if 'all' in self.groups:
allgroup = self.groups['all' ]
# loop on a copy of all's child groups, as we want to change that list
for group in allgroup.child_groups[:]:
# groups might once have beeen added to all, and later be added
# to another group: we need to remove the link wit all then
if len(group.parent_groups) > 1 and allgroup in group.parent_groups:
# real children of all have just 1 parent, all
# this one has more, so not a direct child of all anymore
group.parent_groups.remove(allgroup)
allgroup.child_groups.remove(group)
elif allgroup not in group.parent_groups:
# this group was once added to all, but doesn't list it as
# a parent any more; the info in the group is the correct
# info
allgroup.child_groups.remove(group)
def _add_group(self, group):
""" Merge an existing group or add a new one;
Track parent and child groups, and hosts of the new one """
if group.name not in self.groups:
# it's brand new, add him!
self.groups[group.name] = group
# the Group class does not (yet) implement __eq__/__ne__,
# so unlike Host we do a regular comparison here
if self.groups[group.name] != group:
# different object, merge
self._merge_groups(self.groups[group.name], group)
def _add_host(self, host):
if host.name not in self.hosts:
# Papa's got a brand new host
self.hosts[host.name] = host
# because the __eq__/__ne__ methods in Host() compare the
# name fields rather than references, we use id() here to
# do the object comparison for merges
if self.hosts[host.name] != host:
# different object, merge
self._merge_hosts(self.hosts[host.name], host)
def _merge_groups(self, group, newgroup):
""" Merge all of instance newgroup into group,
update parent/child relationships
group lists may still contain group objects that exist in self with
same name, but was instanciated as a different object in some other
inventory parser; these are handled later """
# name
if group.name != newgroup.name:
raise AnsibleError("Cannot merge inventory group %s with %s" % (group.name, newgroup.name))
# depth
group.depth = max([group.depth, newgroup.depth])
# hosts list (host objects are by now already added to self.hosts)
for host in newgroup.hosts:
grouphosts = dict([(h.name, h) for h in group.hosts])
if host.name in grouphosts:
# same host name but different object, merge
self._merge_hosts(grouphosts[host.name], host)
else:
# new membership, add host to group from self
# group from self will also be added again to host.groups, but
# as different object
group.add_host(self.hosts[host.name])
# now remove this the old object for group in host.groups
for hostgroup in [g for g in host.groups]:
if hostgroup.name == group.name and hostgroup != self.groups[group.name]:
self.hosts[host.name].groups.remove(hostgroup)
# group child membership relation
for newchild in newgroup.child_groups:
# dict with existing child groups:
childgroups = dict([(g.name, g) for g in group.child_groups])
# check if child of new group is already known as a child
if newchild.name not in childgroups:
self.groups[group.name].add_child_group(newchild)
# group parent membership relation
for newparent in newgroup.parent_groups:
# dict with existing parent groups:
parentgroups = dict([(g.name, g) for g in group.parent_groups])
# check if parent of new group is already known as a parent
if newparent.name not in parentgroups:
if newparent.name not in self.groups:
# group does not exist yet in self, import him
self.groups[newparent.name] = newparent
# group now exists but not yet as a parent here
self.groups[newparent.name].add_child_group(group)
# variables
group.vars = combine_vars(group.vars, newgroup.vars)
def _merge_hosts(self,host, newhost):
""" Merge all of instance newhost into host """
# name
if host.name != newhost.name:
raise AnsibleError("Cannot merge host %s with %s" % (host.name, newhost.name))
# group membership relation
for newgroup in newhost.groups:
# dict with existing groups:
hostgroups = dict([(g.name, g) for g in host.groups])
# check if new group is already known as a group
if newgroup.name not in hostgroups:
if newgroup.name not in self.groups:
# group does not exist yet in self, import him
self.groups[newgroup.name] = newgroup
# group now exists but doesn't have host yet
self.groups[newgroup.name].add_host(host)
# variables
host.vars = combine_vars(host.vars, newhost.vars)
def get_host_variables(self, host):
""" Gets additional host variables from all inventories """
vars = {}
for i in self.parsers:
vars.update(i.get_host_variables(host))
return vars
| j00bar/ansible | lib/ansible/inventory/dir.py | Python | gpl-3.0 | 13,251 |
# -*- coding: utf-8 -*-
import unittest
import inspect
from structures.prefix_tree import TrieNode, PrefixTree
class TrieNodeTest(unittest.TestCase):
def test_insert(self):
trie = TrieNode()
values = ['amy', 'ann', 'emma', 'rob', 'roger']
for value in values:
trie.insert(value)
print(trie)
print(trie.nodes)
self.assertEqual(trie.total_words(), 5)
self.assertEqual(trie.total_tags(), 15)
# Test insert same value and check total counters
trie.insert('amy')
self.assertEqual(trie.total_words(), 5)
self.assertEqual(trie.total_tags(), 15)
def test_lookup(self):
trie = TrieNode()
values = ['amy', 'ann', 'emma', 'rob', 'roger']
for value in values:
trie.insert(value)
# True assertions
for val in values:
self.assertTrue(trie.lookup(val), 'Not found: {}'.format(val))
# Test fuzzy
self.assertEqual(sorted(list(trie.lookup('a'))),
sorted(['amy', 'ann']))
# False assertions with disabled fuzzy option
for val in ['am', 'an', 'johm', 'max']:
self.assertFalse(list(trie.lookup(val, fuzzy=False)))
# Test empty search
self.assertEqual(list(trie.lookup('non-existed')), [])
def test_delete(self):
trie = TrieNode()
values = ['amy', 'ann', 'anne', 'emma', 'rob', 'roger', 'anna']
for value in values:
trie.insert(value)
total_words = trie.total_words()
total_tags = trie.total_tags()
self.assertEqual(total_words, 7)
# Test delete end value
self.assertEqual(trie.remove('amy'), 0)
self.assertEqual(trie.total_words(), 6)
self.assertEqual(trie.total_tags(), total_tags - 1)
# Check that nothing to delete
self.assertEqual(trie.remove('amy'), -1)
self.assertEqual(trie.total_words(), 6)
self.assertEqual(trie.total_tags(), total_tags - 1)
# Test delete the value but keep the node (and number of tags)
self.assertEqual(trie.remove('ann'), 1)
self.assertEqual(trie.total_words(), 5)
self.assertEqual(trie.total_tags(), total_tags - 1)
class PrefixTreeTest(unittest.TestCase):
def test_tree(self):
prefix_tree = PrefixTree()
values = ['amy', 'ann', 'anne', 'emma', 'rob', 'roger', 'anna']
# Test setter
for value in values:
prefix_tree[value] = value
# Test getter
result = prefix_tree['ann']
self.assertTrue(result)
self.assertTrue(inspect.isgenerator(result))
# expect 'ann', 'anne' and 'anna'
self.assertEqual(len(list(result)), 3)
# Test containment
self.assertTrue('amy' in prefix_tree)
self.assertFalse('am' in prefix_tree)
| prawn-cake/data_structures | structures/tests/test_prefix_tree.py | Python | mit | 2,876 |
class Grids:
"""All Grid logic, including static (station), ship, and single block."""
def __init__(self, sbc_world_filename):
# grids are in sbs
# import Sbc
# self.sbc = Sbc.Sbc(sbc_world_filename)
pass
def get(self):
print('STUB')
return False
| mccorkle/seds-utils | Grids.py | Python | gpl-3.0 | 309 |
# -*- coding: utf-8 -*-
mapping = (
"abvhgdezyijklmnoprstuf'ABVHGDEZYIJKLMNOPRSTUF'",
"абвгґдезиійклмнопрстуфьАБВГҐДЕЗИІЙКЛМНОПРСТУФЬ",
)
reversed_specific_mapping = (
"ьЬ",
"''"
)
pre_processor_mapping = {
"ye": "є",
"zh": "ж",
"yi": "ї",
"kh": "х",
"ts": "ц",
"ch": "ч",
"sh": "ш",
"shch": "щ",
"ju": "ю",
"ja": "я",
"Ye": "Є",
"Zh": "Ж",
"Yi": "Ї",
"Kh": "Х",
"Ts": "Ц",
"Ch": "Ч",
"Sh": "Ш",
"Shch": "Щ",
"Ju": "Ю",
"Ja": "Я"
}
| akosiaris/transliterate | src/transliterate/contrib/languages/uk/data/python32.py | Python | gpl-2.0 | 597 |
#!/usr/bin/python
###############################################################################
#
# This script produces state labels for all ionic states in MDAnalysis output
# based on coordination integers and groups these according to a passed
# regular expression. Though, the passed state_labels could be any numerical
# label at a time step. This script merely outputs statistics for each traj
# and then across the entire dataset.
#
# Example: For 13/26/39/...-column data with type like:
# 1.0 -0.13 -0.193 0.522 0.0 1.0 0.0 0.0 0.0 2.0 9.0 2.0 1748.0
#
# python State_Grouping.py -f f2.out f3.out -m 3 -c 13 -remove 2000
# -i "(.[^-0+]|[^-0+].)[-0][-0][-0][-0]"
# "\+\+[-0][-0][-0][-0]"
# "(.[^-+0]|[^-+0].)(.[^-+0]|[^-+0].)[-0][-0]"
# "\+\+(.[^-0+]|[^-0+].)[-0][-0]"
# -sf 5 6
#
# This would read in columns 5 and 6 for each ion at a timestep,
# produce state labels like ++0100, 101010, ++00--, 1010-- and then
# match them to the four passed regular expressions. This produces
# data like this where there is N+2 lists where N is the number of
# files passed in the -f argument above:
#
# [[2.0, 0.43, 0.53, 0.01, 0.02, 0.00, 0.48],
# [3.0, 0.13, 0.29, 0.16, 0.40, 0.00, 0.87],
# ['MEAN', 0.28, 0.41, 0.09, 0.21, 0.00, 0.67],
# ['STDERR', 0.14, 0.11, 0.07, 0.18, 0.00, 0.19]]
#
# By Chris Ing, 2013 for Python 2.7
#
###############################################################################
from argparse import ArgumentParser
from numpy import mean
from scipy.stats import sem
from collections import defaultdict
from re import match
from ChannelAnalysis.CoordAnalysis.Preprocessor import *
# This function counts state occupancy in each trajectory. It will return the
# state populations for each trajectory and then the associated stats.
def state_counter(data_floats, data_states, all_possible_states, traj_col=11):
# This is an epic datatype that I will use to quickly build a
# dict of dicts where the 1st key is a trajectory number
# and the second key is the state index and the value is a
# count of how many times that state was observed.
count_totals=defaultdict(lambda: defaultdict(int))
# the data_states datatype is a list of tuples: (state_label, state_int)
data_state_ids = [data[1] for data in data_states]
data_state_labels = [data[0] for data in data_states]
for line, state_label, state_label2 in zip(data_floats, data_state_ids, data_state_labels):
traj_id = line[traj_col]
#if state_label == 13:
# print line, state_label2
count_totals[traj_id][state_label] += 1
# This fills zero in for all known occupancy states for all
# trajectories. This is useful because we do a mean
# across all trajectories later.
for traj_id in count_totals.keys():
for known_state in all_possible_states:
count_totals[traj_id][known_state] += 0
# Return the list of list, the mean and standard error of mean
# for each trajectory in the input.
return count_totals_to_percents(count_totals)
# This is a helper function that takes the datatype generated in
# *_counter functions (trajnum dict -> state_id -> integer counts)
# and converts this to populations in a list without weighting like
# the occupancy count function.
def count_totals_to_percents(count_totals):
# Here's the return datatype that stores the percentage of occupancy
# in a given channel/sf state which can be paired with the indices
ion_count_percents = defaultdict(list)
ion_count_indices = defaultdict(list)
for traj_id, count_dict in count_totals.iteritems():
traj_total_lines = float(sum(count_dict.values()))
for ion_state, ion_count in count_dict.iteritems():
ion_count_percents[traj_id].append(ion_count/traj_total_lines)
ion_count_indices[traj_id].append(ion_state)
# Append a little statistics, sorry if this is confusing...
avgs_by_state=defaultdict(list)
for traj_id, percents in ion_count_percents.iteritems():
state_ids = ion_count_indices[traj_id]
for state_id, percent in zip(state_ids, percents):
avgs_by_state[state_id].append(percent)
for state_id, avg in avgs_by_state.iteritems():
ion_count_percents['MEAN'].append(mean(avg))
ion_count_indices['MEAN'].append(state_id)
ion_count_percents['STDERR'].append(sem(avg))
ion_count_indices['STDERR'].append(state_id)
return (dict(ion_count_percents), dict(ion_count_indices))
if __name__ == '__main__':
parser = ArgumentParser(
description='This script takes regex expressions for a state label\
and outputs how many of your states are classified by that label\
as well as the population of those states in the dataset')
parser.add_argument(
'-f', dest='filenames', type=str, nargs="+", required=True,
help='a filename of coordination data from MDAnalysis trajectory data')
parser.add_argument(
'-m', dest='max_ions', type=int, required=True,
help='the maximum number of ions in the channel to consider')
parser.add_argument(
'-c', dest='num_cols', type=int, default=13,
help='the number of columns per ion in the input')
parser.add_argument(
'-remove', dest='remove_frames', type=int, default=0,
help='this is a number of frames to remove from the start of the data')
parser.add_argument(
'-s', dest='sort_col', type=int, default=3,
help='a zero inclusive column number to sort your row on, typically x,y,z')
parser.add_argument(
'-sc', dest='sort_cut', type=float, default=0.0,
help='a value on the sort_col range to classify zero coordinated data')
parser.add_argument(
'-sf', dest='sf_col', type=int, nargs="+", default=[5,6],
help='the coordination integer columns that define the selectivity filter')
parser.add_argument(
'-t', dest='traj_col', type=int, default=11,
help='a zero inclusive column number that contains the run number')
parser.add_argument(
'-o', dest='outfile', type=str, default=None,
help='the file to output the sorted padding output of all input files')
parser.add_argument(
'--addtime', dest='add_time', action="store_true", default=False,
help='an optional argument to add time columns to each ion grouping')
parser.add_argument(
'-i', dest='regex', type=str, nargs="+",
help='a list of regex values in quotes')
parser.add_argument(
'-r', dest='resid_col', type=int, default=12,
help='a zero inclusive column number that contains the ion resid')
args = parser.parse_args()
data_f_padded = process_input(filenames=args.filenames,
num_cols=args.num_cols,
max_ions=args.max_ions,
remove_frames=args.remove_frames,
traj_col=args.traj_col,
sort_col=args.sort_col,
add_time=args.add_time,
padded=True)
# Here you can choose to compute regular expression occupany states
# or species occupancy states depending on your fancy.
if False:
data_f_regex = regex_columns(data_f_padded, regex_strings=args.regex,
num_cols=args.num_cols,
sort_col=args.sort_col,
sort_cut=args.sort_cut,
sf_col=args.sf_col,
max_ions=args.max_ions)
print "Regex Macrostate Occupancy"
print state_counter(data_f_padded, data_f_regex, range(len(args.regex)),
traj_col=args.traj_col)
else:
data_species = species_columns(filenames=args.filenames,
resid_col=args.resid_col,
sort_col=args.sort_col,
max_ions=args.max_ions,
num_cols=args.num_cols,
remove_frames=args.remove_frames,
add_time=args.add_time,
sf_col=None)
print "Species State Occupancy hardcoded for 2 ion species"
all_species_orders = ["-"*args.max_ions]
for ion_occ in range(args.max_ions):
temp_orders = product([0,1], repeat=ion_occ+1)
for order in temp_orders:
order_str = "".join([str(col) for col in order])
for filler in range(args.max_ions-len(order_str)):
order_str += "-"
all_species_orders.append(order_str)
print "Possible ion species arrangements: ", all_species_orders
species_results = state_counter(data_f_padded, data_species,
range(len(all_species_orders)),
traj_col=args.traj_col)
for traj_id, block in species_results[0].iteritems():
print traj_id,
for percent in block:
print "%.3f" % (percent),
print "\n",
| cing/ChannelAnalysis | ChannelAnalysis/CoordAnalysis/Grouping.py | Python | mit | 9,522 |
import math
import random
#The Red Queen's dragon is trapped in a cage waiting for the day to kill Alice, who is supposed to bring the White Queen back in charge of Wonderland. You are Alice and preparing for the day when you can finally bring peace to Wonderland. You have 5 objects to choose from for the weapons you can use to kill the dragon.
#First, you need to be ready...
#Second, you need to be in the forest which is the area where the dragon is going to be.
#Now the game has started...
#Then Alice, you need to have enough energy (like food). This is where this script shows that it returns a boolean expression and integers.
#Your hair needs to be out of your face or behind your ears.
#I have shown the strength of each weapon you can choose from. The scale is on a 1-10 rating of each weapon. As demonstrated below, the sword is the best choice.
#I have shown the different kinds of attacks that you should look out for from the dragon. The moves the dragon uses on you is on a scale from 1-10.
#Then you need thin clothes and you need to have more than one waterbottle beside you to be hydrated.
#Then it asks you what position your lower body is in. Either you have your feet together or you have wide legs.
#Lastly, it asks you if you are right handed or left handed. If you are left handed then you lose because you didn't pay attention to the hint in the beginning. But if you are right handed then you win because you paid attention.
#If Alice is in a good spot to beat the dragon, has a good amount of energy, your hair is behind your face, hydrated, thin clothes, a strong dominant arm, good stance, and you are right handed...THEN YOU HAVE A HIGH CHANCE OF WINNING!
print "Hint: The dragon you are about to fight against can beat left handed people!"
def main():
ready = raw_input("Are you ready?: no or yes ")
area = raw_input("Where are you?: ")
output = """
You are {}, ready?
-YOU BETTER BE READY!!!Don't be scared! You better get your act together and get some confidence to beat this dragon!...
You are in the {}!
-You can't be anywhere else than the forest!
You need to try your best to win!
GET READY...GET SET...
GO!!!
""".format(ready, area)
print output
energy = int(raw_input("How many items of food did you eat this morning?: hint = choose a number higher than 0! "))
enough(energy)
hair = raw_input("What condition is your hair in?: behind your ears, out of your face: ")
your(hair)
weapon = raw_input("What weapon would you like?: torch, stick, whip, bowling ball, sword: ")
yourdecision(weapon)
move = raw_input("What is the move that the dragon has chosen to use?: breathing fire, breaths out bad breath, steps on your toes, whips tail, or grabs you: ")
dragonattacks(move)
clothes = raw_input("Do you have thin clothes on?: yes or no: ")
waterbottle = raw_input("How many waterbottles do you have?: max = 3/ minimum = 1 ")
clothing(howmuch(clothes, waterbottle))
lowerbody = raw_input("What position is your lower body in right now?: wide legs or feet together: ")
strengthof(lowerbody)
upperbody = raw_input("HURRY! THIS IS YOUR LAST SHOT! This is the most important question that you have to answer. If you choose the correct answer you are going to win. But if you choose the wrong answer...I'm sorry, go yell at your Momma! Here it is...What is your dominant hand?: left handed or right handed: ")
strengthofyour(upperbody)
def strength(strongenough):
if strongenough == 88:
return True
else:
return False
def youneed(enoughenergy):
if not enoughenergy <= int("0"):
return 88
else:
return 99
def enough(energy):
if strength(youneed(energy)):
print "Yay! You have had enough food! Way to go! NEXT..."
def your(hair):
if hair == "behind your ears" or hair == "out of your face":
print "Yay! You have your hair out of your face or behind your ears. Either way you are on the right track. NEXT..."
def yourdecision(weapon):
if weapon == "torch":
print "You are going to lose this battle! But if you choose the next move that your dragon makes correctly...you still have a chance!"
strength = 0
elif weapon == "stick":
print "You are going to get injured! But if you choose the next move that your dragon makes correctly...you still have a chance!"
strength = 3
elif weapon == "whip":
print "You can stike the dragon only a few times. But if you choose the next move that your dragon makes correctly...you still have a chance!"
strength = 6
elif weapon == "bowling ball":
print "You are going to do pretty well! If you try hard enough...you might win! YAY!"
strength = 8
elif weapon == "sword":
print "You've got this, Alice! You are capable of beating this dragon! I believe in you!"
strength = 10
else:
strength = random.randint(0,10)
return strength * random.random()
#random = random float from 0-1
def dragonattacks(move):
if move == "breathing fire":
print "You are going to lose to this dragon! You still have a chance if you answer the next few questions correctly!"
enemystrength = 10
elif move == "breaths out bad breath":
print "You have a chance to beat this dragon! GO GO GO!"
enemystrength = 3
elif move == "steps on your toes":
print "You could lose... You still have a chance if you answer the next few questions correctly!"
enemystrength = 7
elif move == "whips tail":
print "The dragon is probably going to beat you.. You still have a chance if you answer the next few questions correctly! But don't get your hopes up high because you have a fierce competitor! :("
enemystrength = 8
elif move == "grabs you":
print "Oh no! The dragon has caught you! You are probably going to die most likely! Hurry do something! You still have a chance if you answer the next few questions correctly!"
enemystrength = 9
else:
enemystrength = random.randint(0,10)
return enemystrength * random.random()
def clothing(lightness):
print lightness
def howmuch(clothes, waterbottle):
if clothes == "yes" and int(waterbottle) > 1:
return "PERFECT! You have thin clothes and more than one water bottle! YAYY! We are moving on to the next question..."
else:
return "NOOO!!! Your clothes can't be any thicker and you should have more than one water bottle! Don't worry you have a few more questions to answer! GO GO GO!"
def strengthof(lowerbody):
if lowerbody == "wide legs":
print "You are strong enough to fight this dragon because your feet are wide."
else:
print "You are too weak to fight this dragon. HURRY YOU MUST TELL ME WHAT YOUR UPPER BODY IS LIKE BEFORE I TELL YOU IF YOU WON OR LOST TO THIS DRAGON!"
def strengthofyour(upperbody):
if upperbody == "left handed":
print "You LOST!!! The dragon beat you! I am disapointed in you! You didn't read or you forgot the hint that I have you at the start of this game!"
elif upperbody == "right handed":
print "CONGRATULATIONS!!! YOU HAVE BEATEN THE DRAGON! Thank your Momma for coming out as a right handed person!"
main()
| hannah2306-cmis/hannah2306-cmis-cs2 | conditionals.py | Python | cc0-1.0 | 6,999 |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('core', '0006_matchrulesinline'),
]
operations = [
migrations.AlterField(
model_name='match',
name='description',
field=models.TextField(max_length=255, null=True, verbose_name=b'Beschreibung', blank=True),
),
]
| LordNeodym/intranet | src/core/migrations/0007_auto_20150829_2047.py | Python | gpl-3.0 | 455 |
__author__ = 'Tony Beltramelli www.tonybeltramelli.com - 06/09/2015'
import numpy as np
class Label:
def __init__(self, path):
file_path = "{}labels.csv".format(path)
try:
data = np.genfromtxt(file_path, delimiter=',', skip_header=1,
names=['timestamp', 'label'], dtype=[("timestamp", long), ('label', int)])
self.has_label = True
except IOError:
self.has_label = False
return
self.timestamp = data['timestamp']
label = data['label']
self.label = []
for i in range(0, len(label)):
self.label.append(chr(int(label[i])))
self.diff = np.diff(self.timestamp)
| tonybeltramelli/Deep-Spying | server/analytics/modules/label/Label.py | Python | apache-2.0 | 725 |
import StringIO
import socket
import cv2
import numpy as np
EMSG_ROBOT_NOT_FOUND = 'Could not connect to the robot at %s:%s'
class EZBv4Video(object):
# Default IP Address and Port for the JD Humanoid Robot.
ConnectedEndPointAddress = '192.168.1.1'
ConnectedEndPointPort = 24
Res160x120 = 0
Res320x240 = 1
Res640x480 = 2
START = 3
STOP = 4
RESERVED_compression = 10
RESERVED_brightness = 11
FreqAuto = 12
RESERVED_ExposureMode = 13
RESERVED_SetReg = 14
RESERVED_SetRetryCount = 15
RESERVED_SetBufferSize = 16
MirrorEnable = 21
MirrorDisable = 22
Freq50hz = 23
Freq60hz = 24
BacklightOff = 25
BacklightOn = 26
IndoorAuto = 27
IndoorForce = 28
RESERVED_LED_Red = 29
RESERVED_LED_Green = 30
RESERVED_LED_Blue = 31
Auto0 = 0
Auto1 = 1
Auto2 = 2
Auto3 = 3
Auto4 = 4
BlacknWhite = 0x18
ColorNegative = 0x40
BlacknWhiteNegative = 0x58
Normal = 0x00
BUFFER_SIZE = 1024
def __init__(self, ip = ConnectedEndPointAddress, port = ConnectedEndPointPort):
self.ip = ip
self.port = port
self.sock = None
self.isConnected = False
self.connect()
def connect(self):
try:
self.sock = socket.create_connection((self.ip, self.port), timeout = 3000)
self.isConnected = True
except:
raise RuntimeError(EMSG_ROBOT_NOT_FOUND % (self.ip, self.port))
def getImage(self):
chunks = []
bytes_recd = 0
while bytes_recd < EZBv4Video.BUFFER_SIZE:
chunk = self.sock.recv(min(EZBv4Video.BUFFER_SIZE - bytes_recd, 2048))
if chunk == '':
raise RuntimeError("socket connection broken")
chunks.append(chunk)
bytes_recd = bytes_recd + len(chunk)
return ''.join(chunks)
def getImages(self):
chunks = []
foundStart = False
self.stopped = False
self.sock.send(bytearray([EZBv4Video.START]))
while not self.stopped:
self.sock.send(bytearray([EZBv4Video.Res160x120]))
# fill the buffer
chunk = self.sock.recv(EZBv4Video.BUFFER_SIZE)
if chunk == '':
raise RuntimeError("socket connection broken")
# try to find image start
try:
foundStart = chunk.index('EZIMG')
except ValueError:
foundStart = -1
# if image start was found let the image start with the magic numbers
if foundStart == -1:
chunks.append(chunk)
else:
# append to prev image
chunks.append(chunk[:foundStart])
# process buffer
self.processImage(''.join(chunks))
# create new buffer
chunks = [ chunk[foundStart:] ]
self.sock.close()
self.isConnected = False
@staticmethod
def createOpencvImageFromStringio(img_stream, cv2_img_flag = 0):
img_stream.seek(0)
return cv2.imdecode( np.asarray(bytearray(img_stream.read()), dtype = np.uint8),
cv2_img_flag )
def processImage(self, _buffer):
# not an image
if not _buffer.startswith('EZIMG'):
return
image = self.createOpencvImageFromStringio(StringIO.StringIO(_buffer[9:]), 1)
self.openCVImageHook(image)
@staticmethod
def openCVImageHook(image):
cv2.imshow('JD', image)
# stop if q is pressed
if cv2.waitKey(1) & 0xFF == ord('q'):
pass
def __del__(self):
if self.isConnected and self.sock:
self.sock.close()
| BrutusTT/pyJD | pyJD/pyEZB/EZBv4Video.py | Python | agpl-3.0 | 4,187 |
# SPDX-License-Identifier: GPL-2.0+
# Copyright 2018 Google, Inc
# Written by Simon Glass <sjg@chromium.org>
#
# Holds and modifies the state information held by binman
#
import hashlib
import re
from dtoc import fdt
import os
from patman import tools
from patman import tout
# Records the device-tree files known to binman, keyed by entry type (e.g.
# 'u-boot-spl-dtb'). These are the output FDT files, which can be updated by
# binman. They have been copied to <xxx>.out files.
#
# key: entry type
# value: tuple:
# Fdt object
# Filename
# Entry object, or None if not known
output_fdt_info = {}
# Prefix to add to an fdtmap path to turn it into a path to the /binman node
fdt_path_prefix = ''
# Arguments passed to binman to provide arguments to entries
entry_args = {}
# True to use fake device-tree files for testing (see U_BOOT_DTB_DATA in
# ftest.py)
use_fake_dtb = False
# The DTB which contains the full image information
main_dtb = None
# Allow entries to expand after they have been packed. This is detected and
# forces a re-pack. If not allowed, any attempted expansion causes an error in
# Entry.ProcessContentsUpdate()
allow_entry_expansion = True
# Don't allow entries to contract after they have been packed. Instead just
# leave some wasted space. If allowed, this is detected and forces a re-pack,
# but may result in entries that oscillate in size, thus causing a pack error.
# An example is a compressed device tree where the original offset values
# result in a larger compressed size than the new ones, but then after updating
# to the new ones, the compressed size increases, etc.
allow_entry_contraction = False
def GetFdtForEtype(etype):
"""Get the Fdt object for a particular device-tree entry
Binman keeps track of at least one device-tree file called u-boot.dtb but
can also have others (e.g. for SPL). This function looks up the given
entry and returns the associated Fdt object.
Args:
etype: Entry type of device tree (e.g. 'u-boot-dtb')
Returns:
Fdt object associated with the entry type
"""
value = output_fdt_info.get(etype);
if not value:
return None
return value[0]
def GetFdtPath(etype):
"""Get the full pathname of a particular Fdt object
Similar to GetFdtForEtype() but returns the pathname associated with the
Fdt.
Args:
etype: Entry type of device tree (e.g. 'u-boot-dtb')
Returns:
Full path name to the associated Fdt
"""
return output_fdt_info[etype][0]._fname
def GetFdtContents(etype='u-boot-dtb'):
"""Looks up the FDT pathname and contents
This is used to obtain the Fdt pathname and contents when needed by an
entry. It supports a 'fake' dtb, allowing tests to substitute test data for
the real dtb.
Args:
etype: Entry type to look up (e.g. 'u-boot.dtb').
Returns:
tuple:
pathname to Fdt
Fdt data (as bytes)
"""
if etype not in output_fdt_info:
return None, None
if not use_fake_dtb:
pathname = GetFdtPath(etype)
data = GetFdtForEtype(etype).GetContents()
else:
fname = output_fdt_info[etype][1]
pathname = tools.GetInputFilename(fname)
data = tools.ReadFile(pathname)
return pathname, data
def UpdateFdtContents(etype, data):
"""Update the contents of a particular device tree
The device tree is updated and written back to its file. This affects what
is returned from future called to GetFdtContents(), etc.
Args:
etype: Entry type (e.g. 'u-boot-dtb')
data: Data to replace the DTB with
"""
dtb, fname, entry = output_fdt_info[etype]
dtb_fname = dtb.GetFilename()
tools.WriteFile(dtb_fname, data)
dtb = fdt.FdtScan(dtb_fname)
output_fdt_info[etype] = [dtb, fname, entry]
def SetEntryArgs(args):
"""Set the value of the entry args
This sets up the entry_args dict which is used to supply entry arguments to
entries.
Args:
args: List of entry arguments, each in the format "name=value"
"""
global entry_args
entry_args = {}
if args:
for arg in args:
m = re.match('([^=]*)=(.*)', arg)
if not m:
raise ValueError("Invalid entry arguemnt '%s'" % arg)
entry_args[m.group(1)] = m.group(2)
def GetEntryArg(name):
"""Get the value of an entry argument
Args:
name: Name of argument to retrieve
Returns:
String value of argument
"""
return entry_args.get(name)
def Prepare(images, dtb):
"""Get device tree files ready for use
This sets up a set of device tree files that can be retrieved by
GetAllFdts(). This includes U-Boot proper and any SPL device trees.
Args:
images: List of images being used
dtb: Main dtb
"""
global output_fdt_info, main_dtb, fdt_path_prefix
# Import these here in case libfdt.py is not available, in which case
# the above help option still works.
from dtoc import fdt
from dtoc import fdt_util
# If we are updating the DTBs we need to put these updated versions
# where Entry_blob_dtb can find them. We can ignore 'u-boot.dtb'
# since it is assumed to be the one passed in with options.dt, and
# was handled just above.
main_dtb = dtb
output_fdt_info.clear()
fdt_path_prefix = ''
output_fdt_info['u-boot-dtb'] = [dtb, 'u-boot.dtb', None]
output_fdt_info['u-boot-spl-dtb'] = [dtb, 'spl/u-boot-spl.dtb', None]
output_fdt_info['u-boot-tpl-dtb'] = [dtb, 'tpl/u-boot-tpl.dtb', None]
if not use_fake_dtb:
fdt_set = {}
for image in images.values():
fdt_set.update(image.GetFdts())
for etype, other in fdt_set.items():
entry, other_fname = other
infile = tools.GetInputFilename(other_fname)
other_fname_dtb = fdt_util.EnsureCompiled(infile)
out_fname = tools.GetOutputFilename('%s.out' %
os.path.split(other_fname)[1])
tools.WriteFile(out_fname, tools.ReadFile(other_fname_dtb))
other_dtb = fdt.FdtScan(out_fname)
output_fdt_info[etype] = [other_dtb, out_fname, entry]
def PrepareFromLoadedData(image):
"""Get device tree files ready for use with a loaded image
Loaded images are different from images that are being created by binman,
since there is generally already an fdtmap and we read the description from
that. This provides the position and size of every entry in the image with
no calculation required.
This function uses the same output_fdt_info[] as Prepare(). It finds the
device tree files, adds a reference to the fdtmap and sets the FDT path
prefix to translate from the fdtmap (where the root node is the image node)
to the normal device tree (where the image node is under a /binman node).
Args:
images: List of images being used
"""
global output_fdt_info, main_dtb, fdt_path_prefix
tout.Info('Preparing device trees')
output_fdt_info.clear()
fdt_path_prefix = ''
output_fdt_info['fdtmap'] = [image.fdtmap_dtb, 'u-boot.dtb', None]
main_dtb = None
tout.Info(" Found device tree type 'fdtmap' '%s'" % image.fdtmap_dtb.name)
for etype, value in image.GetFdts().items():
entry, fname = value
out_fname = tools.GetOutputFilename('%s.dtb' % entry.etype)
tout.Info(" Found device tree type '%s' at '%s' path '%s'" %
(etype, out_fname, entry.GetPath()))
entry._filename = entry.GetDefaultFilename()
data = entry.ReadData()
tools.WriteFile(out_fname, data)
dtb = fdt.Fdt(out_fname)
dtb.Scan()
image_node = dtb.GetNode('/binman')
if 'multiple-images' in image_node.props:
image_node = dtb.GetNode('/binman/%s' % image.image_node)
fdt_path_prefix = image_node.path
output_fdt_info[etype] = [dtb, None, entry]
tout.Info(" FDT path prefix '%s'" % fdt_path_prefix)
def GetAllFdts():
"""Yield all device tree files being used by binman
Yields:
Device trees being used (U-Boot proper, SPL, TPL)
"""
if main_dtb:
yield main_dtb
for etype in output_fdt_info:
dtb = output_fdt_info[etype][0]
if dtb != main_dtb:
yield dtb
def GetUpdateNodes(node, for_repack=False):
"""Yield all the nodes that need to be updated in all device trees
The property referenced by this node is added to any device trees which
have the given node. Due to removable of unwanted notes, SPL and TPL may
not have this node.
Args:
node: Node object in the main device tree to look up
for_repack: True if we want only nodes which need 'repack' properties
added to them (e.g. 'orig-offset'), False to return all nodes. We
don't add repack properties to SPL/TPL device trees.
Yields:
Node objects in each device tree that is in use (U-Boot proper, which
is node, SPL and TPL)
"""
yield node
for dtb, fname, entry in output_fdt_info.values():
if dtb != node.GetFdt():
if for_repack and entry.etype != 'u-boot-dtb':
continue
other_node = dtb.GetNode(fdt_path_prefix + node.path)
#print(' try', fdt_path_prefix + node.path, other_node)
if other_node:
yield other_node
def AddZeroProp(node, prop, for_repack=False):
"""Add a new property to affected device trees with an integer value of 0.
Args:
prop_name: Name of property
for_repack: True is this property is only needed for repacking
"""
for n in GetUpdateNodes(node, for_repack):
n.AddZeroProp(prop)
def AddSubnode(node, name):
"""Add a new subnode to a node in affected device trees
Args:
node: Node to add to
name: name of node to add
Returns:
New subnode that was created in main tree
"""
first = None
for n in GetUpdateNodes(node):
subnode = n.AddSubnode(name)
if not first:
first = subnode
return first
def AddString(node, prop, value):
"""Add a new string property to affected device trees
Args:
prop_name: Name of property
value: String value (which will be \0-terminated in the DT)
"""
for n in GetUpdateNodes(node):
n.AddString(prop, value)
def SetInt(node, prop, value, for_repack=False):
"""Update an integer property in affected device trees with an integer value
This is not allowed to change the size of the FDT.
Args:
prop_name: Name of property
for_repack: True is this property is only needed for repacking
"""
for n in GetUpdateNodes(node, for_repack):
tout.Detail("File %s: Update node '%s' prop '%s' to %#x" %
(n.GetFdt().name, n.path, prop, value))
n.SetInt(prop, value)
def CheckAddHashProp(node):
hash_node = node.FindNode('hash')
if hash_node:
algo = hash_node.props.get('algo')
if not algo:
return "Missing 'algo' property for hash node"
if algo.value == 'sha256':
size = 32
else:
return "Unknown hash algorithm '%s'" % algo
for n in GetUpdateNodes(hash_node):
n.AddEmptyProp('value', size)
def CheckSetHashValue(node, get_data_func):
hash_node = node.FindNode('hash')
if hash_node:
algo = hash_node.props.get('algo').value
if algo == 'sha256':
m = hashlib.sha256()
m.update(get_data_func())
data = m.digest()
for n in GetUpdateNodes(hash_node):
n.SetData('value', data)
def SetAllowEntryExpansion(allow):
"""Set whether post-pack expansion of entries is allowed
Args:
allow: True to allow expansion, False to raise an exception
"""
global allow_entry_expansion
allow_entry_expansion = allow
def AllowEntryExpansion():
"""Check whether post-pack expansion of entries is allowed
Returns:
True if expansion should be allowed, False if an exception should be
raised
"""
return allow_entry_expansion
def SetAllowEntryContraction(allow):
"""Set whether post-pack contraction of entries is allowed
Args:
allow: True to allow contraction, False to raise an exception
"""
global allow_entry_contraction
allow_entry_contraction = allow
def AllowEntryContraction():
"""Check whether post-pack contraction of entries is allowed
Returns:
True if contraction should be allowed, False if an exception should be
raised
"""
return allow_entry_contraction
| CTSRD-CHERI/u-boot | tools/binman/state.py | Python | gpl-2.0 | 12,809 |
# 1222, Fri 18 Dec 2015 (NZDT)
#
# build-cpldns.py: makes pypy cpldns module
#
# Copyright (C) 2016 by Nevil Brownlee, U Auckland | WAND
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from cffi import FFI
ffi = FFI()
# Our C functions
ffi.cdef("int get_ldns_info(struct ldns_info *ldi, uint8_t *dns_msg, int dns_len);")
ffi.cdef("int ldns_ok(struct ldns_info *ldi);")
ffi.cdef("uint16_t get_short(uint8_t *bp, uint16_t x);")
ffi.set_source("cpldns", # .so to be created
""" // passed to the real C compiler
#include <ldns/ldns.h>
struct ldns_info {
struct ldns_struct_pkt *ldpkt; /* ldns_pkt typedef in ldns/packet.h */
int status; /* enum in ldns */
};
int get_ldns_info(struct ldns_info *ldi, uint8_t *dns_msg, int dns_len) {
ldi->status = ldns_wire2pkt(&ldi->ldpkt, dns_msg, dns_len);
return ldi->ldpkt != NULL; /* True if all's well */
}
int ldns_ok(struct ldns_info *ldi) {
return ldi->status == LDNS_STATUS_OK;
}
uint16_t get_short(uint8_t *bp, uint16_t x) {
uint16_t v = *(uint16_t *)&bp[x];
return ntohs(v);
}
""",
libraries=["ldns", "c"]) # list of libraries to link with
ffi.cdef(
"""
struct ldns_info {
struct ldns_struct_pkt *ldpkt; /* ldns_pkt typedef in ldns/packet.h */
int status; /* enum in ldns */
};
extern const char *ldns_pkt_opcode2str(int rcode); /* enum */
extern const char *ldns_pkt_rcode2str(int rcode); /* enum */
extern const char *ldns_rr_type2str(int type); /* enum */
extern const char *ldns_get_errorstr_by_id(int errnbr); /* enum */
extern uint16_t ldns_pkt_id(struct ldns_struct_pkt *p);
extern bool ldns_pkt_qr(struct ldns_struct_pkt *p);
extern int ldns_pkt_get_opcode(struct ldns_struct_pkt *p); /* enum */
extern int ldns_pkt_get_rcode(struct ldns_struct_pkt *p); /* enum */
extern uint16_t ldns_pkt_qdcount(struct ldns_struct_pkt *p);
extern struct ldns_struct_rr_list *ldns_pkt_question(
struct ldns_struct_pkt *p);
extern struct ldns_struct_rr_list *ldns_pkt_answer(
struct ldns_struct_pkt *p);
extern struct ldns_struct_rr_list *ldns_pkt_authority(
struct ldns_struct_pkt *p);
extern struct ldns_struct_rr_list *ldns_pkt_additional(
struct ldns_struct_pkt *p);
extern int ldns_rr_list_rr_count(struct ldns_struct_rr_list *rr_list);
extern struct ldns_struct_rr *ldns_rr_list_rr(
struct ldns_struct_rr_list *rr_list, size_t nr);
extern int ldns_rr_get_type(struct ldns_struct_rr *rr);
extern size_t ldns_rdf_size(struct ldns_struct_rdf *rd);
extern int ldns_rdf_get_type(struct ldns_struct_rdf *rd);
extern struct ldns_struct_rdf *ldns_rr_owner(struct ldns_struct_rr *rr);
extern char *ldns_rdf2str(struct ldns_struct_rdf *rdf);
extern size_t ldns_rr_rd_count(struct ldns_struct_rr *rr);
// get number of rdata fields
extern uint32_t ldns_rr_ttl(struct ldns_struct_rr *rr);
extern char *ldns_rr2str(struct ldns_struct_rr *rr);
extern struct ldns_struct_rdf *ldns_rr_rdf(struct ldns_struct_rr *rr, size_t n);
// get nth rdf fromm rr
struct ldns_struct_rdf { /* From ldata/rdata.h */
size_t _size; /* The size of the data (in octets) */
int _type; /* enum: The type of the data */
void *_data; /* Pointer to the data (raw octets) */
};
""")
if __name__ == "__main__":
ffi.compile()
| nevil-brownlee/pypy-libtrace | lib/pldns/build-cpldns.py | Python | gpl-3.0 | 3,848 |
# -*- coding: utf-8 -*-
"""
Type inference for NumPy binary ufuncs and their methods.
"""
from __future__ import print_function, division, absolute_import
import numpy as np
from numba import *
from numba.minivect import minitypes
from numba import typesystem
from numba.type_inference.module_type_inference import (module_registry,
register,
register_inferer,
register_unbound)
from numba.typesystem import get_type
from numba.type_inference.modules.numpymodule import (get_dtype,
array_from_type,
promote,
promote_to_array,
demote_to_scalar)
#----------------------------------------------------------------------------
# Utilities
#----------------------------------------------------------------------------
def array_of_dtype(a, dtype, static_dtype, out):
if out is not None:
return out
a = array_from_type(a)
if not a.is_object:
dtype = _dtype(a, dtype, static_dtype)
if dtype is not None:
return a.copy(dtype=dtype)
def _dtype(a, dtype, static_dtype):
if static_dtype:
return static_dtype
elif dtype:
return dtype.dtype
elif a.is_array:
return a.dtype
elif not a.is_object:
return a
else:
return None
#------------------------------------------------------------------------
# Ufunc Type Strings
#------------------------------------------------------------------------
def numba_type_from_sig(ufunc_signature):
"""
Convert ufunc type signature string (e.g. 'dd->d') to a FunctionType
"""
args, ret = ufunc_signature.split('->')
to_numba = lambda c: minitypes.map_dtype(np.dtype(c))
signature = to_numba(ret)(*map(to_numba, args))
return signature
def find_signature(args, signatures):
for signature in signatures:
if signature.args == args:
return signature
def find_ufunc_signature(context, argtypes, signatures):
"""
Map (float_, double) and [double(double, double),
int_(int_, int_),
...]
to double(double, double)
"""
signature = find_signature(tuple(argtypes), signatures)
if signature is not None:
return signature
argtype = reduce(context.promote_types, argtypes)
if not argtype.is_object:
args = (argtype,) * len(argtypes)
return find_signature(args, signatures)
return None
class UfuncTypeInferer(object):
"Infer types for arbitrary ufunc"
def __init__(self, ufunc):
self.ufunc = ufunc
self.signatures = set(map(numba_type_from_sig, ufunc.types))
def infer(self, context, argtypes):
signature = find_ufunc_signature(context, argtypes, self.signatures)
if signature is None:
return None
else:
return signature.return_type
def register_arbitrary_ufunc(ufunc):
"Type inference for arbitrary ufuncs"
ufunc_infer = UfuncTypeInferer(ufunc)
def infer(context, *args, **kwargs):
if len(args) != ufunc.nin:
return object_
# Find the right ufunc signature
argtypes = [type.dtype if type.is_array else type for type in args]
result_type = ufunc_infer.infer(context, argtypes)
if result_type is None:
return object_
# Determine output ndim
ndim = 0
for argtype in args:
if argtype.is_array:
ndim = max(argtype.ndim, ndim)
return typesystem.array(result_type, ndim)
module_registry.register_value(ufunc, infer)
# module_registry.register_unbound_dotted_value
#----------------------------------------------------------------------------
# Ufunc type inference
#----------------------------------------------------------------------------
def binary_map(context, a, b, out):
if out is not None:
return out
return promote(context, a, b)
def binary_map_bool(context, a, b, out):
type = binary_map(context, a, b, out)
if type.is_array:
return type.copy(dtype=bool_)
else:
return bool_
def reduce_(a, axis, dtype, out, static_dtype=None):
if out is not None:
return out
dtype_type = _dtype(a, dtype, static_dtype)
if axis is None:
# Return the scalar type
return dtype_type
if dtype_type:
# Handle the axis parameter
if axis.is_tuple and axis.is_sized:
# axis=(tuple with a constant size)
return typesystem.array(dtype_type, a.ndim - axis.size)
elif axis.is_int:
# axis=1
return typesystem.array(dtype_type, a.ndim - 1)
else:
# axis=(something unknown)
return object_
def reduce_bool(a, axis, dtype, out):
return reduce_(a, axis, dtype, out, bool_)
def accumulate(a, axis, dtype, out, static_dtype=None):
return demote_to_scalar(array_of_dtype(a, dtype, static_dtype, out))
def accumulate_bool(a, axis, dtype, out):
return accumulate(a, axis, dtype, out, bool_)
def reduceat(a, indices, axis, dtype, out, static_dtype=None):
return accumulate(a, axis, dtype, out, static_dtype)
def reduceat_bool(a, indices, axis, dtype, out):
return reduceat(a, indices, axis, dtype, out, bool_)
def outer(context, a, b, static_dtype=None):
a = array_of_dtype(a, None, static_dtype, out=None)
if a and a.is_array:
return a.dtype[:, :]
def outer_bool(context, a, b):
return outer(context, a, b, bool_)
#------------------------------------------------------------------------
# Binary Ufuncs
#------------------------------------------------------------------------
binary_ufuncs_compare = (
# Comparisons
'greater',
'greater_equal',
'less',
'less_equal',
'not_equal',
'equal',
)
binary_ufuncs_logical = (
# Logical ufuncs
'logical_and',
'logical_or',
'logical_xor',
'logical_not',
)
binary_ufuncs_bitwise = (
# Bitwise ufuncs
'bitwise_and',
'bitwise_or',
'bitwise_xor',
'left_shift',
'right_shift',
)
binary_ufuncs_arithmetic = (
# Arithmetic ufuncs
'add',
'subtract',
'multiply',
'true_divide',
'floor_divide',
)
if not PY3:
binary_ufuncs_arithmetic = binary_ufuncs_arithmetic + ('divide', )
#------------------------------------------------------------------------
# Register our type functions
#------------------------------------------------------------------------
register_inferer(np, 'sum', reduce_)
register_inferer(np, 'prod', reduce_)
def register_arithmetic_ufunc(register_inferer, register_unbound, binary_ufunc):
register_inferer(np, binary_ufunc, binary_map)
register_unbound(np, binary_ufunc, "reduce", reduce_)
register_unbound(np, binary_ufunc, "accumulate", accumulate)
register_unbound(np, binary_ufunc, "reduceat", reduceat)
register_unbound(np, binary_ufunc, "outer", outer)
def register_bool_ufunc(register_inferer, register_unbound, binary_ufunc):
register_inferer(np, binary_ufunc, binary_map_bool)
register_unbound(np, binary_ufunc, "reduce", reduce_bool)
register_unbound(np, binary_ufunc, "accumulate", accumulate_bool)
register_unbound(np, binary_ufunc, "reduceat", reduceat_bool)
register_unbound(np, binary_ufunc, "outer", outer_bool)
for binary_ufunc in binary_ufuncs_bitwise + binary_ufuncs_arithmetic:
register_arithmetic_ufunc(register_inferer, register_unbound, binary_ufunc)
for binary_ufunc in binary_ufuncs_compare + binary_ufuncs_logical:
register_bool_ufunc(register_inferer, register_unbound, binary_ufunc)
| shiquanwang/numba | numba/type_inference/modules/numpyufuncs.py | Python | bsd-2-clause | 7,943 |
# Authors:
# Rob Crittenden <rcritten@redhat.com>
#
# Copyright (C) 2009,2013 Red Hat
# see file 'COPYING' for use and warranty information
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
Test the `ipalib/plugins/cert.py` module against a RA.
"""
import sys
import os
import shutil
from nose.tools import raises, assert_raises # pylint: disable=E0611
from ipatests.test_xmlrpc.xmlrpc_test import XMLRPC_test, assert_attr_equal
from ipalib import api
from ipalib import errors
from ipalib import x509
import tempfile
from ipapython import ipautil
import nose
import base64
from ipaplatform.paths import paths
from ipapython.dn import DN
# So we can save the cert from issuance and compare it later
cert = None
newcert = None
def is_db_configured():
"""
Raise an exception if we are testing against lite-server and the
developer cert database is configured.
"""
aliasdir = api.env.dot_ipa + os.sep + 'alias' + os.sep + '.pwd'
if (api.env.xmlrpc_uri == u'http://localhost:8888/ipa/xml' and
not ipautil.file_exists(aliasdir)):
raise nose.SkipTest('developer CA not configured in %s' % aliasdir)
# Test setup
#
# This test needs a configured CA behind it in order to work properly
#
# To test against Apache directly then no changes are required. Just be
# sure the xmlrpc_uri in ~/.ipa/default.conf points to Apache.
#
# To test against Dogtag CA in the lite-server:
#
# - Copy the 3 NSS db files from /etc/httpd/alias to ~/.ipa/alias
# - Copy /etc/httpd/alias/pwdfile.txt to ~/.ipa/alias/.pwd.
# - Change ownership of these files to be readable by you.
#
# The API tested depends on the value of ~/.ipa/default/ra_plugin when
# running as the lite-server.
class test_cert(XMLRPC_test):
@classmethod
def setup_class(cls):
super(test_cert, cls).setup_class()
if 'cert_request' not in api.Command:
raise nose.SkipTest('cert_request not registered')
is_db_configured()
def run_certutil(self, args, stdin=None):
new_args = [paths.CERTUTIL, "-d", self.reqdir]
new_args = new_args + args
return ipautil.run(new_args, stdin)
def setup(self):
self.reqdir = tempfile.mkdtemp(prefix = "tmp-")
self.reqfile = self.reqdir + "/test.csr"
self.pwname = self.reqdir + "/pwd"
# Create an empty password file
fp = open(self.pwname, "w")
fp.write("\n")
fp.close()
# Create our temporary NSS database
self.run_certutil(["-N", "-f", self.pwname])
self.subject = DN(('CN', self.host_fqdn), x509.subject_base())
def teardown(self):
shutil.rmtree(self.reqdir, ignore_errors=True)
def generateCSR(self, subject):
self.run_certutil(["-R", "-s", subject,
"-o", self.reqfile,
"-z", paths.GROUP,
"-f", self.pwname,
"-a",
])
fp = open(self.reqfile, "r")
data = fp.read()
fp.close()
return data
"""
Test the `cert` plugin.
"""
host_fqdn = u'ipatestcert.%s' % api.env.domain
service_princ = u'test/%s@%s' % (host_fqdn, api.env.realm)
def test_0001_cert_add(self):
"""
Test the `xmlrpc.cert_request` method without --add.
This should fail because the service principal doesn't exist
"""
# First create the host that will use this policy
res = api.Command['host_add'](self.host_fqdn, force= True)['result']
csr = unicode(self.generateCSR(str(self.subject)))
with assert_raises(errors.NotFound):
res = api.Command['cert_request'](csr, principal=self.service_princ)
def test_0002_cert_add(self):
"""
Test the `xmlrpc.cert_request` method with --add.
"""
# Our host should exist from previous test
global cert
csr = unicode(self.generateCSR(str(self.subject)))
res = api.Command['cert_request'](csr, principal=self.service_princ, add=True)['result']
assert DN(res['subject']) == self.subject
# save the cert for the service_show/find tests
cert = res['certificate']
def test_0003_service_show(self):
"""
Verify that service-show has the right certificate using service-show.
"""
global cert
res = api.Command['service_show'](self.service_princ)['result']
assert base64.b64encode(res['usercertificate'][0]) == cert
def test_0004_service_find(self):
"""
Verify that service-find has the right certificate using service-find.
"""
global cert
# Assume there is only one service
res = api.Command['service_find'](self.service_princ)['result']
assert base64.b64encode(res[0]['usercertificate'][0]) == cert
def test_0005_cert_renew(self):
"""
Issue a new certificate for a service
"""
global newcert
csr = unicode(self.generateCSR(str(self.subject)))
res = api.Command['cert_request'](csr, principal=self.service_princ)['result']
assert DN(res['subject']) == self.subject
# save the cert for the service_show/find tests
newcert = res['certificate']
def test_0006_service_show(self):
"""
Verify the new certificate with service-show.
"""
global cert, newcert
res = api.Command['service_show'](self.service_princ)['result']
# Both the old and the new certs should be listed as certificates now
certs_encoded = (base64.b64encode(cert) for cert in res['usercertificate'])
assert set(certs_encoded) == set([cert, newcert])
def test_0007_cleanup(self):
"""
Clean up cert test data
"""
# Now clean things up
api.Command['host_del'](self.host_fqdn)
# Verify that the service is gone
res = api.Command['service_find'](self.service_princ)
assert res['count'] == 0
class test_cert_find(XMLRPC_test):
@classmethod
def setup_class(cls):
super(test_cert_find, cls).setup_class()
if 'cert_find' not in api.Command:
raise nose.SkipTest('cert_find not registered')
if api.env.ra_plugin != 'dogtag':
raise nose.SkipTest('cert_find for dogtag CA only')
is_db_configured()
"""
Test the `cert-find` command.
"""
short = api.env.host.replace('.' + api.env.domain, '')
def test_0001_find_all(self):
"""
Search for all certificates.
We don't know how many we'll get but there should be at least 10
by default.
"""
res = api.Command['cert_find']()
assert 'count' in res and res['count'] >= 10
def test_0002_find_CA(self):
"""
Search for the CA certificate.
"""
res = api.Command['cert_find'](subject=u'Certificate Authority')
assert 'count' in res and res['count'] == 1
def test_0003_find_OCSP(self):
"""
Search for the OCSP certificate.
"""
res = api.Command['cert_find'](subject=u'OCSP Subsystem')
def test_0004_find_this_host(self):
"""
Find all certificates for this IPA server
"""
res = api.Command['cert_find'](subject=api.env.host)
assert 'count' in res and res['count'] > 1
def test_0005_find_this_host_exact(self):
"""
Find all certificates for this IPA server (exact)
"""
res = api.Command['cert_find'](subject=api.env.host, exactly=True)
assert 'count' in res and res['count'] > 1
def test_0006_find_this_short_host_exact(self):
"""
Find all certificates for this IPA server short name (exact)
"""
res = api.Command['cert_find'](subject=self.short, exactly=True)
assert 'count' in res and res['count'] == 0
def test_0007_find_revocation_reason_0(self):
"""
Find all certificates with revocation reason 0
"""
res = api.Command['cert_find'](revocation_reason=0)
assert 'count' in res and res['count'] == 0
def test_0008_find_revocation_reason_1(self):
"""
Find all certificates with revocation reason 1
"""
res = api.Command['cert_find'](revocation_reason=1)
assert 'count' in res and res['count'] == 0
def test_0009_find_revocation_reason_2(self):
"""
Find all certificates with revocation reason 2
"""
res = api.Command['cert_find'](revocation_reason=2)
assert 'count' in res and res['count'] == 0
def test_0010_find_revocation_reason_3(self):
"""
Find all certificates with revocation reason 3
"""
res = api.Command['cert_find'](revocation_reason=3)
assert 'count' in res and res['count'] == 0
def test_0011_find_revocation_reason_4(self):
"""
Find all certificates with revocation reason 4
There is no way to know in advance how many revoked certificates
we'll have but in the context of make-test we'll have at least one.
"""
res = api.Command['cert_find'](revocation_reason=4)
assert 'count' in res and res['count'] >= 1
def test_0012_find_revocation_reason_5(self):
"""
Find all certificates with revocation reason 5
"""
res = api.Command['cert_find'](revocation_reason=5)
assert 'count' in res and res['count'] == 0
def test_0013_find_revocation_reason_6(self):
"""
Find all certificates with revocation reason 6
"""
res = api.Command['cert_find'](revocation_reason=6)
assert 'count' in res and res['count'] == 0
# There is no revocation reason #7
def test_0014_find_revocation_reason_8(self):
"""
Find all certificates with revocation reason 8
"""
res = api.Command['cert_find'](revocation_reason=8)
assert 'count' in res and res['count'] == 0
def test_0015_find_revocation_reason_9(self):
"""
Find all certificates with revocation reason 9
"""
res = api.Command['cert_find'](revocation_reason=9)
assert 'count' in res and res['count'] == 0
def test_0016_find_revocation_reason_10(self):
"""
Find all certificates with revocation reason 10
"""
res = api.Command['cert_find'](revocation_reason=10)
assert 'count' in res and res['count'] == 0
def test_0017_find_by_issuedon(self):
"""
Find all certificates issued since 2008
"""
res = api.Command['cert_find'](issuedon_from=u'2008-01-01',
sizelimit=10)
assert 'count' in res and res['count'] == 10
def test_0018_find_through_issuedon(self):
"""
Find all certificates issued through 2008
"""
res = api.Command['cert_find'](issuedon_to=u'2008-01-01',
sizelimit=10)
assert 'count' in res and res['count'] == 0
def test_0019_find_notvalid_before(self):
"""
Find all certificates valid not before 2008
"""
res = api.Command['cert_find'](validnotbefore_from=u'2008-01-01',
sizelimit=10)
assert 'count' in res and res['count'] == 10
def test_0020_find_notvalid_before(self):
"""
Find all certificates valid not before to 2100
"""
res = api.Command['cert_find'](validnotbefore_to=u'2100-01-01',
sizelimit=10)
assert 'count' in res and res['count'] == 10
def test_0021_find_notvalid_before(self):
"""
Find all certificates valid not before 2100
"""
res = api.Command['cert_find'](validnotbefore_from=u'2100-01-01',
sizelimit=10)
assert 'count' in res and res['count'] == 0
def test_0022_find_notvalid_before(self):
"""
Find all certificates valid not before to 2008
"""
res = api.Command['cert_find'](validnotbefore_to=u'2008-01-01',
sizelimit=10)
assert 'count' in res and res['count'] == 0
def test_0023_find_notvalid_after(self):
"""
Find all certificates valid not after 2008
"""
res = api.Command['cert_find'](validnotafter_from=u'2008-01-01',
sizelimit=10)
assert 'count' in res and res['count'] == 10
def test_0024_find_notvalid_after(self):
"""
Find all certificates valid not after to 2100
"""
res = api.Command['cert_find'](validnotafter_to=u'2100-01-01',
sizelimit=10)
assert 'count' in res and res['count'] == 10
def test_0025_find_notvalid_after(self):
"""
Find all certificates valid not after 2100
"""
res = api.Command['cert_find'](validnotafter_from=u'2100-01-01',
sizelimit=10)
assert 'count' in res and res['count'] == 0
def test_0026_find_notvalid_after(self):
"""
Find all certificates valid not after to 2008
"""
res = api.Command['cert_find'](validnotafter_to=u'2008-01-01',
sizelimit=10)
assert 'count' in res and res['count'] == 0
def test_0027_sizelimit_zero(self):
"""
Search with a sizelimit of 0
"""
res = api.Command['cert_find'](sizelimit=0)
assert 'count' in res and res['count'] == 0
@raises(errors.ValidationError)
def test_0028_find_negative_size(self):
"""
Search with a negative sizelimit
"""
res = api.Command['cert_find'](sizelimit=-100)
def test_0029_search_for_notfound(self):
"""
Search for a host that isn't there.
"""
res = api.Command['cert_find'](subject=u'notfound')
assert 'count' in res and res['count'] == 0
def test_0030_search_for_testcerts(self):
"""
Search for certs created in other tests
"""
res = api.Command['cert_find'](subject=u'ipatestcert.%s' % api.env.domain)
assert 'count' in res and res['count'] >= 1
@raises(errors.ValidationError)
def test_0031_search_on_invalid_date(self):
"""
Search using invalid date format
"""
res = api.Command['cert_find'](issuedon_from=u'xyz')
| pspacek/freeipa | ipatests/test_xmlrpc/test_cert_plugin.py | Python | gpl-3.0 | 15,240 |
# Copyright (c) 2013 Mirantis Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from blazar.plugins import base
class DummyVMPlugin(base.BasePlugin):
"""Plugin for VM resource that does nothing."""
resource_type = 'virtual:instance'
title = 'Dummy VM Plugin'
description = 'This plugin does nothing.'
def get(self, resource_id):
return None
def reserve_resource(self, reservation_id, values):
return None
def list_allocations(self, query, detail=False):
"""List resource allocations."""
pass
def query_allocations(self, resource_id_list, lease_id=None,
reservation_id=None):
return None
def allocation_candidates(self, lease_values):
return None
def update_reservation(self, reservation_id, values):
return None
def on_start(self, resource_id, lease=None):
"""Dummy VM plugin does nothing."""
return 'VM %s should be waked up this moment.' % resource_id
def on_end(self, resource_id, lease=None):
"""Dummy VM plugin does nothing."""
return 'VM %s should be deleted this moment.' % resource_id
| ChameleonCloud/blazar | blazar/plugins/dummy_vm_plugin.py | Python | apache-2.0 | 1,668 |
import os
from ifaint import cmd_arg
"""Script be passed to Faint with --run by test-svg-suite.py.
Requires that the type is passed with --arg as well, e.g.
> Faint --run test-save.py --arg png
"""
outdir = os.path.join(os.getcwd(), 'out', 'suite', cmd_arg)
active = get_active_image()
filename = active.get_filename()
if filename is not None:
ext = "." + cmd_arg
fn = os.path.basename(active.get_filename())
fn = fn.replace('.svgz', ext)
fn = fn.replace('.svg', ext)
app.save_backup(get_active_image(), os.path.join(outdir,fn))
| lukas-ke/faint-graphics-editor | py/test/faint_scripts/test-save-suite.py | Python | apache-2.0 | 572 |
# -*- coding: utf-8 -*-
import string, os, re
from random import choice
from django.db import connection, transaction
from django.conf import settings
from django.core.mail import EmailMultiAlternatives
def get_random_string(length=8, chars=string.letters + string.digits):
return ''.join([choice(chars) for i in xrange(length)])
def get_url(context):
return context.META["HTTP_HOST"] + context.META["PATH_INFO"]
DEFAULT_IMAGE_PATH = "images/omg"
def get_image_path(path, table_name):
"""
DEFAULT_IMAGE_PATH should happen once in a life time. You better know why.
"""
def upload_callback(instance, filename):
if not filename or len(filename) == 0:
filename = instance.image.name
cursor = connection.cursor()
ext = os.path.splitext(filename)[1]
ext = ext.lower()
random_filename = get_random_string(length=8)
tries = 0
while tries < 3:
cursor.execute('SELECT * FROM ' + table_name + ' WHERE image = %s', [random_filename])
row = cursor.fetchone()
if row:
tries += 1
random_filename = get_random_string(length=8)
else:
break
if tries == 3:
return DEFAULT_IMAGE_PATH
# instance.original_filename = filename
return '%s/%s' % (path, random_filename + ext)
return upload_callback
# Taken from django_hitcount.
# This is not intended to be an all-knowing IP address regex.
IP_RE = re.compile('\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3}')
def get_ip(request):
"""
Retrieves the remote IP address from the request data. If the user is
behind a proxy, they may have a comma-separated list of IP addresses, so
we need to account for that. In such a case, only the first IP in the
list will be retrieved. Also, some hosts that use a proxy will put the
REMOTE_ADDR into HTTP_X_FORWARDED_FOR. This will handle pulling back the
IP from the proper place.
**NOTE** This function was taken from django-tracking (MIT LICENSE)
http://code.google.com/p/django-tracking/
"""
# if neither header contain a value, just use local loopback
ip_address = request.META.get('HTTP_X_FORWARDED_FOR',
request.META.get('REMOTE_ADDR', '127.0.0.1'))
if ip_address:
# make sure we have one and only one IP
try:
ip_address = IP_RE.match(ip_address)
if ip_address:
ip_address = ip_address.group(0)
else:
# no IP, probably from some dirty proxy or other device
# throw in some bogus IP
ip_address = '10.0.0.1'
except IndexError:
pass
return ip_address
def post_url(post):
if post.category:
return settings.SITE_URL + "/" + post.category.slug + "/" + post.slug + "/"
else:
return settings.SITE_URL + "/" + post.slug + "/"
def game_url(game, season):
return settings.SITE_URL + "/games/" + season + "/" + game.slug + "/" + game.datetime_played.strftime("%Y-%m-%d") + "/"
def player_url(player):
return settings.SITE_URL + "/player/" + player.slug + "/"
def replace(string, args):
search = args.split(args[0])[1]
replace = args.split(args[0])[2]
return re.sub(search, replace, string)
def get_attribute(value, arg):
numeric_test = re.compile("^\d+$")
"""Gets an attribute of an object dynamically from a string name"""
if hasattr(value, str(arg)):
return getattr(value, arg)
elif hasattr(value, 'has_key') and value.has_key(arg):
return value[arg]
elif numeric_test.match(str(arg)) and len(value) > int(arg):
return value[int(arg)]
else:
return ""
def contains(value, arg):
expression = re.compile(arg)
if expression.search(value):
return True
return False
def send_mass_email(sender, to=None, bcc=None, subject=None, txt=None, html=None, attachment=None):
"""
This is commmon email send function for KK Velike Lašče.
We always send in html and txt form.
sender example: 'Sender name <sender@internet.net>'
receiver: ['email-1@email.net', ['email-2@email.net', ...]
"""
message = EmailMultiAlternatives(subject, txt, sender, to, bcc, headers={'Reply-To': sender})
message.attach_alternative(html, "text/html")
message.content_subtype = "html"
message.send()
| rokj/django_basketball | common/functions.py | Python | mit | 4,465 |
from flask import current_app
from elasticsearch import Elasticsearch
class Elastic(object):
"""
A thin wrapper around elasticsearch.Elasticsearch()
"""
def __init__(self, app=None, **kwargs):
if app is not None:
self.init_app(app, **kwargs)
def init_app(self, app, **kwargs):
app.config.setdefault('ELASTICSEARCH_URL', 'http://localhost:9200/')
# using the app factory pattern _app_ctx_stack.top is None so what
# do we register on? app.extensions looks a little hackish (I don't
# know flask well enough to be sure), but that's how it's done in
# flask-pymongo so let's use it for now.
app.extensions['elastic'] = \
Elasticsearch(app.config['ELASTICSEARCH_URL'], **kwargs)
def __getattr__(self, item):
if not 'elastic' in current_app.extensions.keys():
raise Exception(
'not initialised, did you forget to call init_app?')
return getattr(current_app.extensions['elastic'], item)
| marceltschoppch/flask-elastic | flask_elastic.py | Python | mit | 1,061 |
# -*- coding: utf-8 -*-
##############################################################################
#
# Copyright (C) 2004-2011 Pexego Sistemas Informáticos. All Rights Reserved
# $Omar Castiñeira Saavedra$
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
{
"name" : "Bom formulas",
"description" : """Add possibility for add formulas to boms eval on mrp context""",
"version" : "1.0",
"author" : "Pexego",
"depends" : ["base","mrp"],
"category" : "Mrp/Bom Formulas",
"init_xml" : [],
"update_xml" : ['data/mrp_bom_data.xml',
'data/product_fields_data.xml',
'mrp_bom_view.xml',
'mrp_production_view.xml',
'product_view.xml',
'stock_view.xml',
'security/ir.model.access.csv'],
'demo_xml': [],
'installable': False,
'active': False,
}
| Pexego/sale_commission | __unported__/mrp_bom_formula/__openerp__.py | Python | agpl-3.0 | 1,607 |
# Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors
# MIT License. See license.txt
from __future__ import unicode_literals
import frappe
import datetime
from frappe.utils import formatdate, fmt_money, flt, cstr, cint, format_datetime, format_time, format_duration
from frappe.model.meta import get_field_currency, get_field_precision
import re
from six import string_types
def format_value(value, df=None, doc=None, currency=None, translated=False):
'''Format value based on given fieldtype, document reference, currency reference.
If docfield info (df) is not given, it will try and guess based on the datatype of the value'''
if isinstance(df, string_types):
df = frappe._dict(fieldtype=df)
if not df:
df = frappe._dict()
if isinstance(value, datetime.datetime):
df.fieldtype = 'Datetime'
elif isinstance(value, datetime.date):
df.fieldtype = 'Date'
elif isinstance(value, datetime.timedelta):
df.fieldtype = 'Time'
elif isinstance(value, int):
df.fieldtype = 'Int'
elif isinstance(value, float):
df.fieldtype = 'Float'
else:
df.fieldtype = 'Data'
elif (isinstance(df, dict)):
# Convert dict to object if necessary
df = frappe._dict(df)
if value is None:
value = ""
elif translated:
value = frappe._(value)
if not df:
return value
elif df.get("fieldtype")=="Date":
return formatdate(value)
elif df.get("fieldtype")=="Datetime":
return format_datetime(value)
elif df.get("fieldtype")=="Time":
return format_time(value)
elif value==0 and df.get("fieldtype") in ("Int", "Float", "Currency", "Percent") and df.get("print_hide_if_no_value"):
# this is required to show 0 as blank in table columns
return ""
elif df.get("fieldtype") == "Currency":
default_currency = frappe.db.get_default("currency")
currency = currency or get_field_currency(df, doc) or default_currency
return fmt_money(value, precision=get_field_precision(df, doc), currency=currency)
elif df.get("fieldtype") == "Float":
precision = get_field_precision(df, doc)
# I don't know why we support currency option for float
currency = currency or get_field_currency(df, doc)
# show 1.000000 as 1
# options should not specified
if not df.options and value is not None:
temp = cstr(value).split(".")
if len(temp)==1 or cint(temp[1])==0:
precision = 0
return fmt_money(value, precision=precision, currency=currency)
elif df.get("fieldtype") == "Percent":
return "{}%".format(flt(value, 2))
elif df.get("fieldtype") in ("Text", "Small Text"):
if not re.search(r"(<br|<div|<p)", value):
return frappe.safe_decode(value).replace("\n", "<br>")
elif df.get("fieldtype") == "Markdown Editor":
return frappe.utils.markdown(value)
elif df.get("fieldtype") == "Table MultiSelect":
meta = frappe.get_meta(df.options)
link_field = [df for df in meta.fields if df.fieldtype == 'Link'][0]
values = [v.get(link_field.fieldname, 'asdf') for v in value]
return ', '.join(values)
elif df.get("fieldtype") == "Duration":
hide_days = df.hide_days
return format_duration(value, hide_days)
elif df.get("fieldtype") == "Text Editor":
return "<div class='ql-snow'>{}</div>".format(value)
return value
| saurabh6790/frappe | frappe/utils/formatters.py | Python | mit | 3,202 |
# -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
import time
from odoo.tests.common import TransactionCase, Form
from odoo.tools import mute_logger
class TestSaleMrpProcurement(TransactionCase):
def test_sale_mrp(self):
warehouse0 = self.env.ref('stock.warehouse0')
# In order to test the sale_mrp module in OpenERP, I start by creating a new product 'Slider Mobile'
# I define product category Mobile Products Sellable.
with mute_logger('odoo.tests.common.onchange'):
# Suppress warning on "Changing your cost method" when creating a
# product category
pc = Form(self.env['product.category'])
pc.name = 'Mobile Products Sellable'
product_category_allproductssellable0 = pc.save()
uom_unit = self.env.ref('uom.product_uom_unit')
self.assertIn("seller_ids", self.env['product.template'].fields_get())
# I define product for Slider Mobile.
product = Form(self.env['product.template'])
product.categ_id = product_category_allproductssellable0
product.list_price = 200.0
product.name = 'Slider Mobile'
product.standard_price = 189.0
product.type = 'product'
product.uom_id = uom_unit
product.uom_po_id = uom_unit
product.route_ids.clear()
product.route_ids.add(warehouse0.manufacture_pull_id.route_id)
product.route_ids.add(warehouse0.mto_pull_id.route_id)
product_template_slidermobile0 = product.save()
with Form(self.env['mrp.bom']) as bom:
bom.product_tmpl_id = product_template_slidermobile0
# I create a sale order for product Slider mobile
so_form = Form(self.env['sale.order'])
so_form.partner_id = self.env.ref('base.res_partner_4')
with so_form.order_line.new() as line:
line.product_id = product_template_slidermobile0.product_variant_ids
line.price_unit = 200
line.product_uom_qty = 500.0
line.customer_lead = 7.0
sale_order_so0 = so_form.save()
# I confirm the sale order
sale_order_so0.action_confirm()
# I verify that a manufacturing order has been generated, and that its name and reference are correct
mo = self.env['mrp.production'].search([('origin', 'like', sale_order_so0.name)], limit=1)
self.assertTrue(mo, 'Manufacturing order has not been generated')
| maxive/erp | addons/sale_mrp/tests/test_sale_mrp_procurement.py | Python | agpl-3.0 | 2,492 |
#!/usr/bin/python
# -*- coding: utf-8 -*-
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
# this is a virtual module that is entirely implemented server side
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['stableinterface'],
'supported_by': 'core'}
DOCUMENTATION = r'''
---
module: win_template
version_added: "1.9.2"
short_description: Templates a file out to a remote server
description:
- Templates are processed by the Jinja2 templating language
(U(http://jinja.pocoo.org/docs/)) - documentation on the template
formatting can be found in the Template Designer Documentation
(U(http://jinja.pocoo.org/docs/templates/)).
- "Additional variables can be used in templates: C(ansible_managed)
(configurable via the C(defaults) section of C(ansible.cfg)) contains a string
which can be used to describe the template name, host, modification time of the
template file and the owner uid."
- "C(template_host) contains the node name of the template's machine."
- "C(template_uid) the owner."
- "C(template_path) the absolute path of the template."
- "C(template_fullpath) is the absolute path of the template."
- "C(template_destpath) is the path of the template on the remote system (added in 2.8)."
- "C(template_run_date) is the date that the template was rendered."
- "Note that including a string that uses a date in the template will result in the template being marked 'changed' each time."
- For other platforms you can use M(template) which uses '\n' as C(newline_sequence).
options:
src:
description:
- Path of a Jinja2 formatted template on the local server. This can be a relative or absolute path.
type: path
required: yes
dest:
description:
- Location to render the template to on the remote machine.
type: path
required: yes
backup:
description:
- Determine whether a backup should be created.
- When set to C(yes), create a backup file including the timestamp information
so you can get the original file back if you somehow clobbered it incorrectly.
type: bool
default: no
version_added: '2.8'
newline_sequence:
description:
- Specify the newline sequence to use for templating files.
type: str
choices: [ '\n', '\r', '\r\n' ]
default: '\r\n'
version_added: '2.4'
block_start_string:
description:
- The string marking the beginning of a block.
type: str
default: '{%'
version_added: '2.4'
block_end_string:
description:
- The string marking the end of a block.
type: str
default: '%}'
version_added: '2.4'
variable_start_string:
description:
- The string marking the beginning of a print statement.
type: str
default: '{{'
version_added: '2.4'
variable_end_string:
description:
- The string marking the end of a print statement.
type: str
default: '}}'
version_added: '2.4'
trim_blocks:
description:
- If this is set to C(yes) the first newline after a block is removed (block, not variable tag!).
type: bool
default: no
version_added: '2.4'
force:
description:
- If C(yes), will replace the remote file when contents are different
from the source.
- If C(no), the file will only be transferred if the destination does
not exist.
type: bool
default: yes
version_added: '2.4'
notes:
- Templates are loaded with C(trim_blocks=yes).
- Beware fetching files from windows machines when creating templates
because certain tools, such as Powershell ISE, and regedit's export facility
add a Byte Order Mark as the first character of the file, which can cause tracebacks.
- To find Byte Order Marks in files, use C(Format-Hex <file> -Count 16) on Windows, and use C(od -a -t x1 -N 16 <file>) on Linux.
- "Also, you can override jinja2 settings by adding a special header to template file.
i.e. C(#jinja2:variable_start_string:'[%', variable_end_string:'%]', trim_blocks: no)
which changes the variable interpolation markers to [% var %] instead of {{ var }}.
This is the best way to prevent evaluation of things that look like, but should not be Jinja2.
raw/endraw in Jinja2 will not work as you expect because templates in Ansible are recursively evaluated."
- You can use the M(win_copy) module with the C(content:) option if you prefer the template inline,
as part of the playbook.
seealso:
- module: template
- module: win_copy
author:
- Jon Hawkesworth (@jhawkesworth)
'''
EXAMPLES = r'''
- name: Create a file from a Jinja2 template
win_template:
src: /mytemplates/file.conf.j2
dest: C:\Temp\file.conf
- name: Create a Unix-style file from a Jinja2 template
win_template:
src: unix/config.conf.j2
dest: C:\share\unix\config.conf
newline_sequence: '\n'
backup: yes
'''
RETURN = r'''
backup_file:
description: Name of the backup file that was created.
returned: if backup=yes
type: str
sample: C:\Path\To\File.txt.11540.20150212-220915.bak
'''
| pilou-/ansible | lib/ansible/modules/windows/win_template.py | Python | gpl-3.0 | 5,199 |
"""Total Generalized Variation denoising using PDHG.
Solves the optimization problem
min_x ||x - d||_2^2 + alpha TGV_2(x)
Where ``d`` is given noisy data TGV_2 is the second order total generalized
variation of ``x``, defined as
TGV_2(x) = min_y ||Gx - y||_1 + beta ||Ey||_1
where ``G`` is the spatial gradient operating on the scalar-field ``x`` and
``E`` is the symmetrized gradient operating on the vector-field ``y``.
Both one-norms ||.||_1 take the one-norm globally and the two-norm locally,
i.e. ||y||_1 := sum_i sqrt(sum_j y_i(j)^2) where y_i(j) is the j-th value
of the vector y_i at location i.
The problem is rewritten as
min_{x, y} ||x - d||_2^2 + alpha ||Gx - y||_1 + alpha * beta ||Ey||_1
which can then be solved with PDHG.
References
----------
[1] K. Bredies and M. Holler. *A TGV-based framework for variational image
decompression, zooming and reconstruction. Part II: Numerics.* SIAM Journal
on Imaging Sciences, 8(4):2851-2886, 2015.
"""
import numpy as np
import odl
# --- Set up the forward operator (identity) --- #
# Reconstruction space: discretized functions on the rectangle
# [-20, 20]^2 with 300 samples per dimension.
U = odl.uniform_discr(min_pt=[-20, -20], max_pt=[20, 20], shape=[300, 300],
dtype='float32')
# Create the forward operator
A = odl.IdentityOperator(U)
# --- Generate artificial data --- #
# Create phantom
phantom = odl.phantom.tgv_phantom(U)
phantom.show(title='Phantom')
# Create sinogram of forward projected phantom with noise
data = A(phantom)
data += odl.phantom.white_noise(A.range) * np.mean(data) * 0.1
data.show(title='Simulated Data')
# --- Set up the inverse problem --- #
# Initialize gradient operator
G = odl.Gradient(U, method='forward', pad_mode='symmetric')
V = G.range
Dx = odl.PartialDerivative(U, 0, method='backward', pad_mode='symmetric')
Dy = odl.PartialDerivative(U, 1, method='backward', pad_mode='symmetric')
# Create symmetrized operator and weighted space.
# TODO: As the weighted space is currently not supported in ODL we find a
# workaround.
# W = odl.ProductSpace(U, 3, weighting=[1, 1, 2])
# sym_gradient = odl.operator.ProductSpaceOperator(
# [[Dx, 0], [0, Dy], [0.5*Dy, 0.5*Dx]], range=W)
E = odl.operator.ProductSpaceOperator(
[[Dx, 0], [0, Dy], [0.5 * Dy, 0.5 * Dx], [0.5 * Dy, 0.5 * Dx]])
W = E.range
# Create the domain of the problem, given by the reconstruction space and the
# range of the gradient on the reconstruction space.
domain = odl.ProductSpace(U, V)
# Column vector of three operators defined as:
# 1. Computes ``Ax``
# 2. Computes ``Gx - y``
# 3. Computes ``Ey``
op = odl.BroadcastOperator(
A * odl.ComponentProjection(domain, 0),
odl.ReductionOperator(G, odl.ScalingOperator(V, -1)),
E * odl.ComponentProjection(domain, 1))
# Do not use the f functional, set it to zero.
f = odl.solvers.ZeroFunctional(domain)
# l2-squared data matching
l2_norm = odl.solvers.L2NormSquared(A.range).translated(data)
# parameters
alpha = 1e-1
beta = 1
# The l1-norms scaled by regularization paramters
l1_norm_1 = alpha * odl.solvers.L1Norm(V)
l1_norm_2 = alpha * beta * odl.solvers.L1Norm(W)
# Combine functionals, order must correspond to the operator K
g = odl.solvers.SeparableSum(l2_norm, l1_norm_1, l1_norm_2)
# --- Select solver parameters and solve using PDHG --- #
# Estimated operator norm, add 10 percent to ensure ||K||_2^2 * sigma * tau < 1
op_norm = 1.1 * odl.power_method_opnorm(op)
niter = 400 # Number of iterations
tau = 1.0 / op_norm # Step size for the primal variable
sigma = 1.0 / op_norm # Step size for the dual variable
# Optionally pass callback to the solver to display intermediate results
callback = (odl.solvers.CallbackPrintIteration() &
odl.solvers.CallbackShow(step=10, indices=0))
# Choose a starting point
x = op.domain.zero()
# Run the algorithm
odl.solvers.pdhg(x, f, g, op, niter=niter, tau=tau, sigma=sigma,
callback=callback)
# Display images
x[0].show(title='TGV Reconstruction')
x[1].show(title='Derivatives', force_show=True)
| kohr-h/odl | examples/solvers/pdhg_denoising_tgv.py | Python | mpl-2.0 | 4,074 |
import sys
import pytest
if __name__ == '__main__':
# Exit with correct code
sys.exit(pytest.main(["--pyargs", "pohmm.tests"] + sys.argv[1:]))
| vmonaco/pohmm | pohmm/tests/__main__.py | Python | bsd-3-clause | 153 |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
import django_otp.util
import two_factor.models
class Migration(migrations.Migration):
dependencies = [
('two_factor', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='phonedevice',
name='key',
field=models.CharField(default=django_otp.util.random_hex, help_text=b'Hex-encoded secret key', max_length=40, validators=[two_factor.models.key_validator]),
),
]
| mathspace/django-two-factor-auth | two_factor/migrations/0002_auto_20150110_0810.py | Python | mit | 563 |
from bottle import route, run, template
import pymongo
from pymongo import MongoClient
@route('/hello/<name>')
def index(name):
return template('<b>Hello {{name}}</b>!', name=name)
@route('/hell')
def index():
# Connect to database
connection = MongoClient('localhost',27017)
db = connection.test
#handle to names collection
names = db.names
item = names.find_one()
name = item['name']
return template('<b>Hello {{name}}</b>!', name=name)
run(host='localhost', port=8080) | nesterione/experiments-of-programming | MongoDB/Python/Week1/bottleframework/bottle_mongo.py | Python | apache-2.0 | 516 |
import json
class BayonetError(Exception):
"""All errors related to making an API request extend this."""
def __init__(self, message=None,
request_body=None, request_headers=None,
http_response_code=None, http_response_message=None):
super(BayonetError, self).__init__(message)
self.request_body = request_body
self.request_headers = request_headers
self.http_response_code = http_response_code
self.http_response_message = http_response_message
# Get reason_code and reason_message from response
try:
response_as_json = json.loads(http_response_message)
if 'reason_code' in response_as_json:
self.reason_code = response_as_json['reason_code']
else:
self.reason_code = None
if 'reason_message' in response_as_json:
self.reason_message = response_as_json['reason_message']
else:
self.reason_message = None
if 'status' in response_as_json:
self.status = response_as_json['status']
else:
self.status = None
except ValueError:
self.reason_code = None
self.reason_message = None
self.status = None
class InvalidClientSetupError(Exception):
def __init__(self, message=None):
super(InvalidClientSetupError, self).__init__(message)
| Bayonet-Client/bayonet-python | bayonet/exceptions.py | Python | mit | 1,465 |
# Copyright 2016 Hardcoded Software (http://www.hardcoded.net)
#
# This software is licensed under the "GPLv3" License as described in the "LICENSE" file,
# which should be included with this package. The terms are also available at
# http://www.gnu.org/licenses/gpl-3.0.html
from PyQt5.QtWidgets import QFormLayout
from PyQt5.QtCore import Qt
from hscommon.trans import trget
from hscommon.plat import ISLINUX
from qtlib.radio_box import RadioBox
from core.scanner import ScanType
from core.app import AppMode
from ..preferences_dialog import PreferencesDialogBase
tr = trget("ui")
class PreferencesDialog(PreferencesDialogBase):
def _setupPreferenceWidgets(self):
self._setupFilterHardnessBox()
self.widgetsVLayout.addLayout(self.filterHardnessHLayout)
self._setupAddCheckbox("matchScaledBox", tr("Match pictures of different dimensions"))
self.widgetsVLayout.addWidget(self.matchScaledBox)
self._setupAddCheckbox("mixFileKindBox", tr("Can mix file kind"))
self.widgetsVLayout.addWidget(self.mixFileKindBox)
self._setupAddCheckbox("useRegexpBox", tr("Use regular expressions when filtering"))
self.widgetsVLayout.addWidget(self.useRegexpBox)
self._setupAddCheckbox("removeEmptyFoldersBox", tr("Remove empty folders on delete or move"))
self.widgetsVLayout.addWidget(self.removeEmptyFoldersBox)
self._setupAddCheckbox(
"ignoreHardlinkMatches",
tr("Ignore duplicates hardlinking to the same file"),
)
self.widgetsVLayout.addWidget(self.ignoreHardlinkMatches)
self._setupAddCheckbox("debugModeBox", tr("Debug mode (restart required)"))
self.widgetsVLayout.addWidget(self.debugModeBox)
self.cacheTypeRadio = RadioBox(self, items=["Sqlite", "Shelve"], spread=False)
cache_form = QFormLayout()
cache_form.setLabelAlignment(Qt.AlignLeft)
cache_form.addRow(tr("Picture cache mode:"), self.cacheTypeRadio)
self.widgetsVLayout.addLayout(cache_form)
self._setupBottomPart()
def _setupDisplayPage(self):
super()._setupDisplayPage()
self._setupAddCheckbox("details_dialog_override_theme_icons", tr("Override theme icons in viewer toolbar"))
self.details_dialog_override_theme_icons.setToolTip(
tr("Use our own internal icons instead of those provided by the theme engine")
)
# Prevent changing this on platforms where themes are unpredictable
self.details_dialog_override_theme_icons.setEnabled(False if not ISLINUX else True)
# Insert this right after the vertical title bar option
index = self.details_groupbox_layout.indexOf(self.details_dialog_vertical_titlebar)
self.details_groupbox_layout.insertWidget(index + 1, self.details_dialog_override_theme_icons)
self._setupAddCheckbox("details_dialog_viewers_show_scrollbars", tr("Show scrollbars in image viewers"))
self.details_dialog_viewers_show_scrollbars.setToolTip(
tr(
"When the image displayed doesn't fit the viewport, \
show scrollbars to span the view around"
)
)
self.details_groupbox_layout.insertWidget(index + 2, self.details_dialog_viewers_show_scrollbars)
def _load(self, prefs, setchecked, section):
setchecked(self.matchScaledBox, prefs.match_scaled)
self.cacheTypeRadio.selected_index = 1 if prefs.picture_cache_type == "shelve" else 0
# Update UI state based on selected scan type
scan_type = prefs.get_scan_type(AppMode.PICTURE)
fuzzy_scan = scan_type == ScanType.FUZZYBLOCK
self.filterHardnessSlider.setEnabled(fuzzy_scan)
setchecked(self.details_dialog_override_theme_icons, prefs.details_dialog_override_theme_icons)
setchecked(self.details_dialog_viewers_show_scrollbars, prefs.details_dialog_viewers_show_scrollbars)
def _save(self, prefs, ischecked):
prefs.match_scaled = ischecked(self.matchScaledBox)
prefs.picture_cache_type = "shelve" if self.cacheTypeRadio.selected_index == 1 else "sqlite"
prefs.details_dialog_override_theme_icons = ischecked(self.details_dialog_override_theme_icons)
prefs.details_dialog_viewers_show_scrollbars = ischecked(self.details_dialog_viewers_show_scrollbars)
| arsenetar/dupeguru | qt/pe/preferences_dialog.py | Python | gpl-3.0 | 4,332 |
"""
ToolbarWidget
:Authors:
Berend Klein Haneveld
"""
import sys
from PySide.QtGui import QWidget
from PySide.QtGui import QLabel
from PySide.QtGui import QHBoxLayout
from PySide.QtGui import QToolButton
from PySide.QtGui import QPushButton
from PySide.QtGui import QMainWindow
from PySide.QtGui import QFont
from PySide.QtGui import QAction
from PySide.QtGui import QSizePolicy
from PySide.QtGui import QIcon
from PySide.QtCore import Qt
from PySide.QtCore import QSize
class ToolbarWidget(QWidget):
"""
ToolbarWidget
"""
def __init__(self):
super(ToolbarWidget, self).__init__()
# Make sure the widget expands over the whole toolbar
self.setSizePolicy(QSizePolicy.Expanding, QSizePolicy.Expanding)
# Create main layout that will contain the layouts for each section
self.mainLayout = QHBoxLayout()
self.mainLayout.setContentsMargins(0, 0, 0, 0)
self.mainLayout.setSpacing(0)
# Layout on the left side
self.leftLayout = QHBoxLayout()
self.leftLayout.setContentsMargins(0, 0, 0, 0)
self.leftLayout.setSpacing(0)
self.leftLayout.setAlignment(Qt.AlignVCenter | Qt.AlignLeft)
# Layout in the center
self.centerLayout = QHBoxLayout()
self.centerLayout.setContentsMargins(0, 0, 0, 0)
self.centerLayout.setSpacing(0)
self.centerLayout.setAlignment(Qt.AlignVCenter | Qt.AlignHCenter)
# Layout on the right side
self.rightLayout = QHBoxLayout()
self.rightLayout.setContentsMargins(0, 0, 0, 0)
self.rightLayout.setSpacing(0)
self.rightLayout.setAlignment(Qt.AlignVCenter | Qt.AlignRight)
self.setLayout(self.mainLayout)
self.leftWidget = QWidget()
self.leftWidget.setLayout(self.leftLayout)
self.centerWidget = QWidget()
self.centerWidget.setLayout(self.centerLayout)
self.rightWidget = QWidget()
self.rightWidget.setLayout(self.rightLayout)
self.mainLayout.addWidget(self.leftWidget)
self.mainLayout.addWidget(self.centerWidget)
self.mainLayout.addWidget(self.rightWidget)
def setText(self, text):
self.label.setText(text)
def addActionLeft(self, action):
toolButton = CreateFlatButton(action)
self.addLeftItem(toolButton)
def addActionCenter(self, action):
toolButton = CreateFlatButton(action)
self.addCenterItem(toolButton)
def addActionRight(self, action):
toolButton = CreateFlatButton(action)
self.addRightItem(toolButton)
def addLeftItem(self, widget):
self.leftLayout.addWidget(widget)
def addCenterItem(self, widget):
self.centerLayout.addWidget(widget)
def addRightItem(self, widget):
self.rightLayout.addWidget(widget)
def CreateFlatButton(action):
"""
Create a custom flat button and style
it so that it will look good on all platforms.
"""
toolButton = QToolButton()
toolButton.setIcon(action.icon())
toolButton.setText(action.text())
toolButton.setAutoRaise(True)
toolButton.setIconSize(QSize(32, 32))
toolButton.setToolButtonStyle(Qt.ToolButtonTextUnderIcon)
if sys.platform.startswith('darwin'):
# Bug for Mac: QToolButtons do not react to setAutoRaise so
# can't be made flat when they are not used inside a toolbar.
# Setting a custom style sheet with border to none fixes this.
# But then it looses all its highlight and pressed visual cues
# so some extra styling needs to be done to provide some nice
# visual feedback on hover and pressed states.
toolButton.setStyleSheet("QToolButton {"
"border: none;"
"} "
"QToolButton:hover {"
"background-color: qradialgradient(cx: 0.5, cy: 0.5,"
"fx: 0.5, fy: 0.5,"
"radius: 0.5, "
"stop: 0 rgba(255, 255, 255, 100), "
"stop: 1 rgba(0, 0, 0, 0));"
"}"
"QToolButton:pressed {"
"background-color: qradialgradient(cx: 0.5, cy: 0.5,"
"fx: 0.5, fy: 0.5,"
"radius: 0.5, "
"stop: 0 rgba(255, 255, 255, 200), "
"stop: 1 rgba(0, 0, 0, 0));"
"}")
font = QFont()
font.setPixelSize(10)
toolButton.setFont(font)
# Connect the clicked signal to the action trigger
def pushed():
toolButton.action.triggered.emit()
setattr(toolButton, "pushed", pushed)
toolButton.clicked.connect(toolButton.pushed)
setattr(toolButton, "action", action)
return toolButton
if __name__ == '__main__':
import os
from PySide.QtGui import QApplication
from PySide.QtGui import QVBoxLayout
app = QApplication([])
mainWindow = QMainWindow()
mainWindow.setUnifiedTitleAndToolBarOnMac(True)
toolbar = mainWindow.addToolBar("Main")
toolbar.setMovable(False)
dirPath = os.path.dirname(__file__)
basePath = os.path.join(dirPath, "../../resources/images/")
icon = QIcon(basePath + "UserTransformButton.png")
toolButtonLeft1 = CreateFlatButton(QAction(icon, "Left", mainWindow))
toolButtonLeft2 = CreateFlatButton(QAction(icon, "2nd left", mainWindow))
toolButtonRight = CreateFlatButton(QAction(icon, "Right", mainWindow))
toolButtonCenter = QPushButton()
toolButtonCenter.setIcon(QIcon(basePath + "LandmarkTransformButton.png"))
toolButtonCenter.setText("Center")
toolButtonCenter.setMinimumWidth(200)
barWidget = ToolbarWidget()
barWidget.addLeftItem(toolButtonLeft1)
barWidget.addLeftItem(toolButtonLeft2)
barWidget.addCenterItem(toolButtonCenter)
barWidget.addRightItem(toolButtonRight)
toolbar.addWidget(barWidget)
layout = QVBoxLayout()
layout.setSpacing(0)
layout.setContentsMargins(0, 0, 0, 0)
layout.setAlignment(Qt.AlignVCenter | Qt.AlignHCenter)
layout.addWidget(QLabel("Test toolbar widget"))
widget = QWidget()
widget.setLayout(layout)
mainWindow.setCentralWidget(widget)
mainWindow.setGeometry(100, 100, 500, 300)
mainWindow.show()
app.exec_()
| berendkleinhaneveld/Registrationshop | ui/widgets/ToolbarWidget.py | Python | mit | 5,515 |
###################################################################################
#
# Copyright (c) 2017-2019 MuK IT GmbH.
#
# This file is part of MuK Backend Theme
# (see https://mukit.at).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
###################################################################################
from odoo import api, SUPERUSER_ID
from . import models
#----------------------------------------------------------
# Hooks
#----------------------------------------------------------
XML_ID = "muk_web_theme._assets_primary_variables"
SCSS_URL = "/muk_web_theme/static/src/scss/colors.scss"
def _uninstall_reset_changes(cr, registry):
env = api.Environment(cr, SUPERUSER_ID, {})
env['muk_utils.scss_editor'].reset_values(SCSS_URL, XML_ID) | muk-it/muk_web | muk_web_theme/__init__.py | Python | lgpl-3.0 | 1,428 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import argparse, socket, json
from sys import exit
import sys
from marcopolo.bindings import marco
from marcopolo.marco_conf.utils import Node
TIMEOUT = 4000
def main(args=None):
parser = argparse.ArgumentParser(description="Discovery of MarcoPolo nodes in the subnet")
parser.add_argument('-d', '--discover', dest="address", type=str, help="Multicast group where to discover", nargs='?', default="224.0.0.1")
parser.add_argument('-s', '--service', dest="service", type=str, help="Name of the service to look for", nargs='?')
parser.add_argument('-S', '--services', dest="services", help="Discover all services in a node", nargs='?')
parser.add_argument('-n', '--node', dest="node", help="Perform the discovery on only one node, identified by its ip/dns name", nargs="?")
parser.add_argument('--sh', '--shell', dest="shell", help="Print output so it can be used as an interable list in a shell", nargs='?')
#parser.add_argument('-v', '--verbose', dest="verbose", help="Verbose mode")
args = parser.parse_args(args)
if args.service:
m = marco.Marco()
try:
nodes = m.request_for(args.service)
except marco.MarcoTimeOutException:
print("No connection to the resolver")
sys.exit(1)
if len(nodes) > 0:
cadena = ""
for node in nodes:
cadena += node.address + "\n" if not args.shell else " "
print(cadena[:-1])
else:
print("There are no nodes available for the requested query")
sys.exit(0)
else:
m = marco.Marco()
try:
nodes = m.marco()
except marco.MarcoTimeOutException:
print("No connection to the resolver")
sys.exit(1)
cadena = ""
if len(nodes) > 0:
for node in nodes:
cadena += node.address + "\n" if not args.shell else " "
print(cadena[:-1])
else:
print("There are no nodes available for the requested query")
if __name__ == "__main__":
main(sys.argv)
| Alternhuman/marcopolo-shell | marcopolo/utils/marcodiscover.py | Python | mpl-2.0 | 2,164 |
from south.db import db
from django.db import models
from cms.models import *
import datetime
class Migration:
def forwards(self, orm):
# Changing field 'Title.language'
db.alter_column('cms_title', 'language', models.CharField(_("language"), max_length=5, db_index=True))
# Changing field 'CMSPlugin.language'
db.alter_column('cms_cmsplugin', 'language', models.CharField(_("language"), db_index=True, max_length=5, editable=False, blank=False))
def backwards(self, orm):
# Changing field 'Title.language'
db.alter_column('cms_title', 'language', models.CharField(_("language"), max_length=3, db_index=True))
# Changing field 'CMSPlugin.language'
db.alter_column('cms_cmsplugin', 'language', models.CharField(_("language"), blank=False, max_length=3, editable=False, db_index=True))
models = {
'sites.site': {
'Meta': {'ordering': "('domain',)", 'db_table': "'django_site'"},
'_stub': True,
'id': ('models.AutoField', [], {'primary_key': 'True'})
},
'auth.user': {
'_stub': True,
'id': ('models.AutoField', [], {'primary_key': 'True'})
},
'cms.pagepermission': {
'can_change_softroot': ('models.BooleanField', ['_("can change soft-root")'], {'default': 'False'}),
'can_edit': ('models.BooleanField', ['_("can edit")'], {'default': 'True'}),
'can_publish': ('models.BooleanField', ['_("can publish")'], {'default': 'True'}),
'everybody': ('models.BooleanField', ['_("everybody")'], {'default': 'False'}),
'group': ('models.ForeignKey', ['Group'], {'null': 'True', 'blank': 'True'}),
'id': ('models.AutoField', [], {'primary_key': 'True'}),
'page': ('models.ForeignKey', ['Page'], {'null': 'True', 'blank': 'True'}),
'type': ('models.IntegerField', ['_("type")'], {'default': '0'}),
'user': ('models.ForeignKey', ['User'], {'null': 'True', 'blank': 'True'})
},
'cms.cmsplugin': {
'creation_date': ('models.DateTimeField', ['_("creation date")'], {'default': 'datetime.datetime.now', 'editable': 'False'}),
'id': ('models.AutoField', [], {'primary_key': 'True'}),
'language': ('models.CharField', ['_("language")'], {'db_index': 'True', 'max_length': '5', 'editable': 'False', 'blank': 'False'}),
'level': ('models.PositiveIntegerField', [],{'db_index':'True', 'editable':'False'}),
'lft': ('models.PositiveIntegerField', [],{'db_index':'True', 'editable':'False'}),
'page': ('models.ForeignKey', ['Page'], {'editable': 'False'}),
'parent': ('models.ForeignKey', ['CMSPlugin'], {'null': 'True', 'editable': 'False', 'blank': 'True'}),
'placeholder': ('models.CharField', ['_("slot")'], {'max_length': '50', 'editable': 'False', 'db_index': 'True'}),
'plugin_type': ('models.CharField', ['_("plugin_name")'], {'max_length': '50', 'editable': 'False', 'db_index': 'True'}),
'position': ('models.PositiveSmallIntegerField', ['_("position")'], {'null': 'True', 'editable': 'False', 'blank': 'True'}),
'rght': ('models.PositiveIntegerField', [],{'db_index':'True', 'editable':'False'}),
'tree_id': ('models.PositiveIntegerField', [],{'db_index':'True', 'editable':'False'}),
},
'cms.title': {
'Meta': {'unique_together': "(('language','page'),)"},
'application_urls': ('models.CharField', ["_('application')"], {'blank': 'True', 'max_length': '200', 'null': 'True', 'db_index': 'True'}),
'creation_date': ('models.DateTimeField', ['_("creation date")'], {'default': 'datetime.datetime.now', 'editable': 'False'}),
'has_url_overwrite': ('models.BooleanField', ['_("has url overwrite")'], {'default': 'False', 'editable': 'False', 'db_index': 'True'}),
'id': ('models.AutoField', [], {'primary_key': 'True'}),
'language': ('models.CharField', ['_("language")'], {'max_length': '5', 'db_index': 'True'}),
'meta_description': ('models.TextField', ['_("description")'], {'max_length': '255', 'blank': 'True', 'null':'True'}),
'meta_keywords': ('models.CharField', ['_("keywords")'], {'max_length': '255', 'blank': 'True', 'null':'True'}),
'page': ('models.ForeignKey', ['Page'], {'related_name': '"title_set"'}),
'path': ('models.CharField', ['_("path")'], {'max_length': '255', 'db_index': 'True'}),
'redirect': ('models.CharField', ['_("redirect")'], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'slug': ('models.SlugField', ['_("slug")'], {'unique': 'False', 'max_length': '255', 'db_index': 'True'}),
'title': ('models.CharField', ['_("title")'], {'max_length': '255'})
},
'cms.page': {
'Meta': {'ordering': "('tree_id','lft')"},
'author': ('models.ForeignKey', ['User'], {'limit_choices_to': "{'page__isnull':False}"}),
'creation_date': ('models.DateTimeField', [], {'default': 'datetime.datetime.now', 'editable': 'False'}),
'id': ('models.AutoField', [], {'primary_key': 'True'}),
'in_navigation': ('models.BooleanField', ['_("in navigation")'], {'default': 'True', 'db_index': 'True'}),
'level': ('models.PositiveIntegerField', [],{'db_index':'True', 'editable':'False'}),
'lft': ('models.PositiveIntegerField', [],{'db_index':'True', 'editable':'False'}),
'login_required': ('models.BooleanField', ["_('login required')"], {'default': 'False'}),
'navigation_extenders': ('models.CharField', ['_("navigation extenders")'], {'blank': 'True', 'max_length': '80', 'null': 'True', 'db_index': 'True'}),
'parent': ('models.ForeignKey', ['Page'], {'db_index': 'True', 'related_name': "'children'", 'null': 'True', 'blank': 'True'}),
'publication_date': ('models.DateTimeField', ['_("publication date")'], {'null': 'True', 'db_index': 'True', 'blank': 'True'}),
'publication_end_date': ('models.DateTimeField', ['_("publication end date")'], {'null': 'True', 'db_index': 'True', 'blank': 'True'}),
'reverse_id': ('models.CharField', ['_("id")'], {'blank': 'True', 'max_length': '40', 'null': 'True', 'db_index': 'True'}),
'rght': ('models.PositiveIntegerField', [],{'db_index':'True', 'editable':'False'}),
'sites': ('models.ManyToManyField', ['Site'], {}),
'soft_root': ('models.BooleanField', ['_("soft root")'], {'default': 'False', 'db_index': 'True'}),
'status': ('models.IntegerField', ['_("status")'], {'default': '0', 'db_index': 'True'}),
'template': ('models.CharField', ['_("template")'], {'max_length': '100'}),
'tree_id': ('models.PositiveIntegerField', [],{'db_index':'True', 'editable':'False'}),
},
'auth.group': {
'_stub': True,
'id': ('models.AutoField', [], {'primary_key': 'True'})
}
}
complete_apps = ['cms']
| emiquelito/django-cms-2.0 | cms/migrations/0010_5char_language.py | Python | bsd-3-clause | 7,237 |